reponame
stringlengths
2
39
files
list
median_score
float64
0
11.5
arpitremarkable
[ { "content": "import os\n\nfrom settings.base import * # pylint: disable=wildcard-import,unused-wildcard-import\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ.get('DATABASE_NAME', 'dynamic_models_test'),\n 'USER': os.environ.get('DATABASE_USER', 'postgres'),\n 'PASSWORD': os.environ.get('DATABASE_PASSWORD', ''),\n 'HOST': 'localhost',\n 'PORT': 5432,\n }\n}\n", "id": "6274125", "language": "Python", "matching_score": 1.5104575157165527, "max_stars_count": 2, "path": "settings/postgres.py" }, { "content": "\"\"\"\nDummy database for generating app migrations.\n\"\"\"\n# pylint: disable=W0614\nfrom .base import *\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.dummy'\n }\n}", "id": "11956492", "language": "Python", "matching_score": 0.8602345585823059, "max_stars_count": 2, "path": "settings/dummy.py" }, { "content": "import os\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSECRET_KEY = '<PASSWORD>'\n\nINSTALLED_APPS = [\n 'tests',\n 'dynamic_models',\n 'django.contrib.contenttypes',\n]\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.dummy'\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'\n }\n}\n", "id": "6274180", "language": "Python", "matching_score": 1.992047667503357, "max_stars_count": 2, "path": "settings/base.py" }, { "content": "import os\n\nPROJECT_DIR = os.path.dirname(os.path.dirname(__file__))\ndb_file = os.environ.get(\n 'DYNAMIC_MODELS_DB',\n os.path.join(PROJECT_DIR, 'dynamic_models.db')\n)\nopen(db_file, 'w').close()\n", "id": "1117391", "language": "Python", "matching_score": 0.4910286068916321, "max_stars_count": 2, "path": "scripts/setup_sqlite_db.py" }, { "content": "from django.contrib import admin\nfrom school import models\n\nadmin.site.unregister(models.User)\nfor model in models.__dict__.values():\n\tif isinstance(model, models.models.base.ModelBase):\n\t\tadmin.site.register(model)\n", "id": "4927539", "language": "Python", "matching_score": 1.7603996992111206, "max_stars_count": 0, "path": "school/admin.py" }, { "content": "from django.conf.urls import patterns, include, url\n\nfrom school import api\nfrom tastypie.api import Api\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\n\nv1_api = Api(api_name='v1')\nv1_api.register(api.UserResource())\nv1_api.register(api.AttendanceResource())\n\nurlpatterns = patterns('',\n url(r'^api/', include(v1_api.urls)),\n\n url(r'^admin/', include(admin.site.urls)),\n)\n", "id": "3518412", "language": "Python", "matching_score": 1.565131664276123, "max_stars_count": 0, "path": "school/urls.py" }, { "content": "from tastypie import fields\nfrom tastypie.resources import ModelResource\n\nfrom school import models\n\nimport datetime\n\n\nclass BehaviourResource(ModelResource):\n class Meta:\n queryset = models.Behaviour.objects.all()\n resource_name = 'behaviour'\n excludes = ['id']\n\n\nclass PointResource(ModelResource):\n behaviour = fields.ForeignKey(BehaviourResource, 'behaviour', full=True, null=True, blank=True)\n class Meta:\n queryset = models.Point.objects.all()\n resource_name = 'behaviour'\n fields = ['behaviour']\n\n\nclass AttendanceResource(ModelResource):\n point = fields.ToManyField(PointResource, 'point_set', full=True, null=True, blank=True)\n class Meta:\n queryset = models.Attendance.objects.all()\n resource_name = 'attendance'\n excludes = ['id']\n\n\ndef attendance_set(bundle):\n try:\n date = datetime.date(year=int(bundle.request.GET['year']), month=int(bundle.request.GET['month']), day=int(bundle.request.GET['day']))\n except (KeyError, ValueError) as e:\n date = datetime.date.today()\n return bundle.obj.attendance_set.filter(date=date)\n\nclass UserResource(ModelResource):\n\n attendances = fields.ToManyField(AttendanceResource, attendance_set, full=True, null=True, blank=True)\n class Meta:\n queryset = models.User.objects.all()\n resource_name = 'user'\n fields = ['first_name', 'last_name', 'username']\n", "id": "3444574", "language": "Python", "matching_score": 2.1401207447052, "max_stars_count": 0, "path": "school/api.py" }, { "content": "from django.contrib.auth.models import User\nfrom django.db import models\n\nfrom djangotoolbox.fields import ListField\n\n\nclass Attendance(models.Model):\n user = models.ForeignKey(User, on_delete=models.PROTECT)\n date = models.DateField(auto_now_add=True, auto_now=True)\n time = models.DateTimeField(auto_now_add=True, auto_now=True)\n\n def __unicode__(self):\n return \"%s - %s\" % (self.user, self.date)\n\n class Meta:\n unique_together = (\"user\", \"date\", )\n\n\nclass Behaviour(models.Model):\n name = models.CharField(max_length=50, unique=True)\n value = models.IntegerField(verbose_name='point')\n\n def __unicode__(self):\n return \"%s - %s\" % (self.name, self.value)\n\n\nclass Point(models.Model):\n attendance = models.ForeignKey(Attendance, on_delete=models.PROTECT)\n behaviour = models.ForeignKey(Behaviour, on_delete=models.PROTECT)\n time = models.DateTimeField(auto_now_add=True, auto_now=True)\n\n def __unicode__(self):\n return \"%s - %s\" % (self.attendance, self.behaviour)\n", "id": "9964770", "language": "Python", "matching_score": 1.9423565864562988, "max_stars_count": 0, "path": "school/models.py" }, { "content": "import pytest\nfrom django.db import models\nfrom dynamic_models import utils\n\n\ndef test_default_fields_setting(settings):\n \"\"\"Should return a dict of default fields defined in settings.\"\"\"\n default_fields = {'default_integer': models.IntegerField()}\n setattr(settings, 'DYNAMIC_MODELS', {'DEFAULT_FIELDS': default_fields})\n assert utils.default_fields() == default_fields\n\ndef test_default_max_length_setting(settings):\n \"\"\"Should return the DEFAULT_MAX_LENGTH setting or a default.\"\"\"\n assert utils.default_max_length() == utils.DEFAULT_MAX_LENGTH\n default_max_length = {'DEFAULT_MAX_LENGTH': 64}\n setattr(settings, 'DYNAMIC_MODELS', default_max_length)\n assert utils.default_max_length() == 64\n\n\nclass TestModelRegistry:\n pass\n\nclass TestLastModifiedCache:\n pass\n", "id": "3780214", "language": "Python", "matching_score": 1.0906199216842651, "max_stars_count": 2, "path": "tests/test_utils.py" }, { "content": "# Generated by Django 2.1.3 on 2018-12-07 14:45\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('contenttypes', '0002_remove_content_type_name'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ModelFieldSchema',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('model_id', models.PositiveIntegerField()),\n ('field_id', models.PositiveIntegerField()),\n ('null', models.BooleanField(default=False)),\n ('unique', models.BooleanField(default=False)),\n ('max_length', models.PositiveIntegerField(null=True)),\n ('field_content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),\n ('model_content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='model_field_columns', to='contenttypes.ContentType')),\n ],\n ),\n migrations.AlterUniqueTogether(\n name='modelfieldschema',\n unique_together={('model_content_type', 'model_id', 'field_content_type', 'field_id')},\n ),\n ]\n", "id": "6950084", "language": "Python", "matching_score": 5.101057052612305, "max_stars_count": 2, "path": "dynamic_models/migrations/0001_initial.py" }, { "content": "# Generated by Django 2.1.3 on 2018-12-18 01:49\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='FieldSchema',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=16)),\n ('data_type', models.CharField(choices=[('character', 'character'), ('text', 'text'), ('integer', 'integer'), ('float', 'float'), ('boolean', 'boolean'), ('date', 'date')], editable=False, max_length=16)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='ModelSchema',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('_modified', models.DateTimeField(auto_now=True)),\n ('name', models.CharField(max_length=32, unique=True)),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n", "id": "6609562", "language": "Python", "matching_score": 1.8824236392974854, "max_stars_count": 2, "path": "tests/migrations/0001_initial.py" }, { "content": "import pytest\nfrom django.utils import timezone\nfrom dynamic_models import utils\nfrom dynamic_models import exceptions\nfrom .models import ModelSchema, FieldSchema\n\n\n# pylint: disable=redefined-outer-name,invalid-name,unused-argument\n\n\[email protected]\ndef model_registry(model_schema):\n return utils.ModelRegistry(model_schema.app_label)\n\[email protected]\ndef unsaved_model_schema(db):\n return ModelSchema(name='unsaved model')\n\[email protected]\ndef model_schema(db):\n return ModelSchema.objects.create(name='simple model')\n\[email protected]\ndef another_model_schema(db):\n return ModelSchema.objects.create(name='another model')\n\[email protected]\ndef field_schema(db):\n return FieldSchema.objects.create(name='field', data_type='integer')\n\[email protected]\ndef existing_column(db, model_schema, field_schema):\n model_schema.add_field(field_schema)\n\n\[email protected]_db\nclass TestModelSchema:\n\n def test_is_current_schema_checks_last_modified(self, model_schema):\n assert model_schema.is_current_schema()\n model_schema.last_modified = timezone.now()\n assert not model_schema.is_current_schema()\n\n def test_is_current_model(self, model_schema, another_model_schema):\n model = model_schema.as_model()\n another_model = another_model_schema.as_model()\n assert model_schema.is_current_model(model)\n with pytest.raises(ValueError):\n model_schema.is_current_model(another_model)\n\n def test_model_is_registered_on_create(self, model_registry, unsaved_model_schema):\n assert not model_registry.is_registered(unsaved_model_schema.model_name)\n unsaved_model_schema.save()\n assert model_registry.is_registered(unsaved_model_schema.model_name)\n\n def test_model_table_is_created_on_create(self, unsaved_model_schema):\n table_name = unsaved_model_schema.db_table\n assert not utils.db_table_exists(table_name)\n unsaved_model_schema.save()\n assert utils.db_table_exists(table_name)\n\n def test_model_registry_is_updated_on_update(self, model_registry, model_schema):\n assert model_registry.is_registered('SimpleModel')\n assert not model_registry.is_registered('NewName')\n model_schema.name = 'new name'\n model_schema.save()\n assert not model_registry.is_registered('SimpleModel')\n assert model_registry.is_registered('NewName')\n\n def test_model_table_is_updated_on_update(self, model_schema):\n assert utils.db_table_exists('tests_simple_model')\n assert not utils.db_table_exists('tests_new_name')\n model_schema.name = 'new name'\n model_schema.save()\n assert utils.db_table_exists('tests_new_name')\n assert not utils.db_table_exists('tests_simple_model')\n\n def test_model_table_is_dropped_on_delete(self, model_schema):\n assert utils.db_table_exists(model_schema.db_table)\n model_schema.delete()\n assert not utils.db_table_exists(model_schema.db_table)\n\n def test_model_is_unregistered_on_delete(self, model_registry, model_schema):\n assert model_registry.is_registered(model_schema.model_name)\n model_schema.delete()\n assert not model_registry.is_registered(model_schema.model_name)\n\n def test_add_field_creates_column(self, model_schema, field_schema):\n table_name = model_schema.db_table\n column_name = field_schema.db_column\n assert not utils.db_table_has_field(table_name, column_name)\n model_schema.add_field(field_schema)\n assert utils.db_table_has_field(table_name, column_name)\n\n @pytest.mark.usefixtures('existing_column')\n def test_update_field_updates_column(self, model_schema, field_schema):\n table_name = model_schema.db_table\n column_name = field_schema.db_column\n assert not utils.db_field_allows_null(table_name, column_name)\n model_schema.update_field(field_schema, null=True)\n assert utils.db_field_allows_null(table_name, column_name)\n\n @pytest.mark.usefixtures('existing_column')\n def test_remove_field_drops_column(self, model_schema, field_schema):\n table_name = model_schema.db_table\n column_name = field_schema.db_column\n assert utils.db_table_has_field(table_name, column_name)\n model_schema.remove_field(field_schema)\n assert not utils.db_table_has_field(table_name, column_name)\n\n\nclass TestFieldSchema:\n\n def test_cannot_save_with_prohibited_name(self):\n prohibited_name = '__module__'\n with pytest.raises(exceptions.InvalidFieldNameError):\n FieldSchema.objects.create(name=prohibited_name, data_type='integer')\n\n def test_cannot_change_null_to_not_null(self, model_schema, field_schema):\n model_field = model_schema.add_field(field_schema, null=True)\n with pytest.raises(exceptions.NullFieldChangedError):\n model_field.null = False\n model_field.save()\n\n def test_related_model_schema_notified_on_update(\n self, model_schema, another_model_schema, field_schema):\n\n model_schema.add_field(field_schema)\n another_model_schema.add_field(field_schema)\n\n model = model_schema.as_model()\n another_model = another_model_schema.as_model()\n\n assert model_schema.is_current_model(model)\n assert another_model_schema.is_current_model(another_model)\n field_schema.update_last_modified()\n assert not model_schema.is_current_model(model)\n assert not another_model_schema.is_current_model(another_model)\n\n\[email protected]_db\nclass TestDynamicModels:\n\n @pytest.fixture\n def dynamic_model(self, model_schema, existing_column):\n return model_schema.as_model()\n\n def test_can_create(self, dynamic_model):\n assert dynamic_model.objects.create(field=2)\n\n def test_can_get(self, dynamic_model):\n obj = dynamic_model.objects.create(field=-3)\n assert dynamic_model.objects.get(pk=obj.pk)\n\n def test_can_update(self, dynamic_model):\n obj = dynamic_model.objects.create(field=4)\n dynamic_model.objects.filter(pk=obj.pk).update(field=6)\n obj.refresh_from_db()\n assert obj.field == 6\n\n def test_can_delete(self, dynamic_model):\n obj = dynamic_model.objects.create(field=3)\n obj.delete()\n with pytest.raises(dynamic_model.DoesNotExist):\n dynamic_model.objects.get(pk=obj.pk)\n\n def test_cannot_save_with_outdated_model(self, model_schema, dynamic_model):\n model_schema.name = 'new name'\n model_schema.save()\n with pytest.raises(exceptions.OutdatedModelError):\n dynamic_model.objects.create(field=4)\n", "id": "5347310", "language": "Python", "matching_score": 4.288914680480957, "max_stars_count": 2, "path": "tests/test_models.py" }, { "content": "import pytest\nfrom django.apps import apps\nfrom django.core.cache import cache\nfrom dynamic_models import utils\nfrom dynamic_models.models import ModelFieldSchema\nfrom .models import ModelSchema, FieldSchema\n\n# pylint: disable=unused-argument,invalid-name\n\n\nTEST_APP_LABEL = 'tests'\nMODEL_REGISTRY = utils.ModelRegistry(TEST_APP_LABEL)\nSTATIC_MODELS = (ModelSchema, FieldSchema)\n\n\ndef raise_on_save(*args, **kwargs):\n raise AssertionError('save method should not be called')\n\[email protected]\ndef prevent_save(monkeypatch):\n monkeypatch.setattr(ModelSchema, 'save', raise_on_save)\n monkeypatch.setattr(FieldSchema, 'save', raise_on_save)\n monkeypatch.setattr(ModelFieldSchema, 'save', raise_on_save)\n\n\[email protected](autouse=True)\ndef cleanup_cache():\n yield\n cache.clear()\n\[email protected](autouse=True)\ndef cleanup_registry():\n \"\"\"\n The app registry bleeds between tests. This fixture removes all dynamically\n declared models after each test.\n \"\"\"\n try:\n yield\n finally:\n test_app_config = apps.get_app_config(TEST_APP_LABEL)\n registered_models = test_app_config.get_models()\n models_to_remove = [\n model for model in registered_models if model not in STATIC_MODELS\n ]\n for model in models_to_remove:\n MODEL_REGISTRY.unregister_model(model.__name__)\n", "id": "5857157", "language": "Python", "matching_score": 1.7223787307739258, "max_stars_count": 2, "path": "tests/conftest.py" }, { "content": "\"\"\"\nSet up the test models inlcuding subclassed verisons of the Sbstract schema\nmodels with additional common field types\n\"\"\"\nfrom dynamic_models.models import AbstractModelSchema, AbstractFieldSchema\n\n\nclass ModelSchema(AbstractModelSchema):\n pass\n\n\nclass FieldSchema(AbstractFieldSchema):\n pass", "id": "9137558", "language": "Python", "matching_score": 1.559430480003357, "max_stars_count": 2, "path": "tests/models.py" } ]
1.741389
christopher-roelofs
[ { "content": "import os\n\nrbfpath = \"\"\ncommand = f'echo \"load_core {rbfpath}\" >/dev/MiSTer_cmd'", "id": "6030885", "language": "Python", "matching_score": 3.2867190837860107, "max_stars_count": 0, "path": "load_core.py" }, { "content": "import os\n\nrbfpath = \"\"\ncommand = 'echo \"load_core {}\" >/dev/MiSTer_cmd'.format(rbfpath)", "id": "7783060", "language": "Python", "matching_score": 0.30376821756362915, "max_stars_count": 8, "path": "load_core.py" }, { "content": "import paramiko\nfrom time import sleep\nimport json\nimport config\nimport ssh\nimport mister\nimport cores\nimport logger\nimport discord\n\nSETTINGS = config.get_config()\nmaps = cores.read_file_map()\n\n\nRECENTS_FOLDER = '/media/{}/config/'.format(SETTINGS['core_storage'])\n\n\nlast_game = None\nlast_core = None\n\n\ndef replace_text(core, game, displayname, text):\n return text.replace(\"{core}\", core).replace(\"{game}\", game).replace(\"{displayname}\", displayname)\n\n\nwhile True:\n try:\n core = mister.get_running_core()\n map_core = cores.get_map(core)\n game = mister.get_last_game(core)\n\n displayname = core\n state_text = SETTINGS['state_text']\n details_text = SETTINGS['details_text']\n small_text = SETTINGS['small_text']\n small_image = SETTINGS['small_image']\n large_image = SETTINGS['large_image']\n large_text = SETTINGS['large_text']\n buttons = None\n\n if \"buttons\" in SETTINGS:\n if SETTINGS['buttons'] != \"\":\n buttons = SETTINGS['buttons']\n\n if \"buttons\" in map_core:\n if map_core[\"buttons\"] != \"\":\n buttons = map_core[\"buttons\"]\n\n if \"state_text\" in map_core:\n state_text = map_core[\"details_text\"]\n\n if \"details_text\" in map_core:\n details_text = map_core[\"details_text\"]\n\n if \"display_name\" in map_core:\n displayname = map_core[\"display_name\"]\n\n if \"small_text\" in map_core:\n if map_core[\"small_text\"] != \"\":\n small_text = map_core[\"small_text\"]\n \n if \"small_image\" in map_core:\n if map_core[\"small_image\"] != \"\":\n small_image = map_core[\"small_image\"]\n\n if \"large_text\" in map_core:\n if map_core[\"large_text\"] != \"\":\n large_text = map_core[\"large_text\"]\n\n if \"large_image\" in map_core:\n if map_core[\"large_image\"] != \"\":\n large_image = map_core[\"large_image\"]\n\n state_text = replace_text(core,game,displayname,state_text)\n details_text = replace_text(core,game,displayname,details_text)\n\n if game != \"\" and game != last_game:\n discord.update_activity(details_text,state_text,large_image,large_text,small_image,small_text,buttons)\n \n if core != last_core:\n discord.update_activity(details_text,state_text,large_image,large_text,small_image,small_text,buttons)\n\n last_core = core\n last_game = game\n\n except Exception as e:\n logger.error(repr(e))\n sleep(int(SETTINGS[\"refresh_rate\"]))\n\nclient.close()\n", "id": "5902426", "language": "Python", "matching_score": 2.529557943344116, "max_stars_count": 0, "path": "main.py" }, { "content": "\nfrom pypresence import Presence\nimport time\nimport config\nimport event_manager\nimport logger\nfrom string_util import replace_text\nfrom time import sleep\nSETTINGS = config.get_config()\n\nclient_id = SETTINGS[\"discord\"]['application_id']\nconnected = False\nretries = 0\nmax_retries = 3\n\nRPC = None\n\ndef initialize():\n global RPC\n global retries\n global connected\n if retries < max_retries:\n try:\n logger.info(\"Attempting to connect to Discord ...\")\n RPC = Presence(client_id, pipe=0) # Initialize the client class\n RPC.connect() # Start the handshake loop\n connected = True\n logger.info(\"Connected to Discord\")\n except Exception as e:\n logger.error(\"Failed to connect to Discord: {}\".format(repr(e)))\n retries += 1\n sleep(1)\n initialize()\n\ndef update_activity(details, state, large_image=None, large_text=None, small_image=None, small_text=None, buttons=None):\n if state == \"\":\n state = None\n if large_image == \"\":\n large_image = None\n if large_text == \"\":\n large_text = None\n if small_image == \"\":\n small_image = None\n if small_text == \"\":\n small_text = None\n try:\n RPC.update(details=details, state=state, start=time.time(), large_image=large_image, large_text=large_text,\n small_image=small_image, small_text=small_text, buttons=buttons) # Set the presence\n except Exception as e:\n logger.error(f\"Faild to update Discord status: {e}\")\n if \"reconnect\" in SETTINGS[\"discord\"]:\n if SETTINGS[\"discord\"][\"reconnect\"]:\n global retries\n retries = 0\n initialize()\n\n\ndef handle_event(event, action):\n if connected:\n buttons = None\n if \"buttons\" in action:\n buttons = action[\"buttons\"]\n update_activity(replace_text(action[\"details_text\"], event.tokens), replace_text(action[\"state_text\"], event.tokens), replace_text(action[\"large_image\"], event.tokens).lower(\n ), replace_text(action[\"large_text\"], event.tokens), replace_text(action[\"small_image\"], event.tokens).lower(), replace_text(action[\"small_text\"], event.tokens), buttons)\n\n\nevent_manager.subscribers[\"Discord\"] = {}\nevent_manager.subscribers[\"Discord\"][\"initialize\"] = lambda: initialize()\nevent_manager.subscribers[\"Discord\"][\"handle_event\"] = {\n 'function': handle_event, 'arg': \"args\"}\n\nif __name__ == \"__main__\":\n while True:\n buttons = [{\"label\": \"Button 1\", \"url\": \"https://www.google.com\"},\n {\"label\": \"Button 2\", \"url\": \"https://www.google.com\"}]\n update_activity(\"Console\", \"Game\", \"segacd\", None, None, None, buttons)\n time.sleep(100000)\n", "id": "3323928", "language": "Python", "matching_score": 3.954991579055786, "max_stars_count": 0, "path": "discord.py" }, { "content": "\nfrom pypresence import Presence\nimport time\nimport config\n\nSETTINGS = config.get_config()\n\nclient_id = SETTINGS['application_id']\n\nRPC = Presence(client_id,pipe=0) # Initialize the client class\nRPC.connect() # Start the handshake loop\n\ndef update_activity(details,state,large_image=None,large_text=None,small_image=None,small_text=None,buttons=None):\n if state == \"\":\n state = None\n if large_image == \"\":\n large_image = None\n if large_text == \"\":\n large_text = None\n if small_image == \"\":\n small_image = None\n if small_text == \"\":\n small_text = None\n RPC.update(details=details, state=state, start=time.time(),large_image=large_image,large_text=large_text,small_image=small_image,small_text=small_text,buttons=buttons) # Set the presence\n\n\nif __name__ == \"__main__\":\n while True:\n buttons = [{\"label\": \"Button 1\", \"url\": \"https://www.google.com\"},{\"label\": \"Button 2\", \"url\": \"https://www.google.com\"}]\n update_activity(\"Console\",\"Game\",\"segacd\",None,None,None,buttons)\n time.sleep(100000)", "id": "6316276", "language": "Python", "matching_score": 0.5019208788871765, "max_stars_count": 0, "path": "discord.py" }, { "content": "#!/usr/bin/python\n\nimport config\nimport gui\nimport telconn\nimport threading\nimport commands\n\n\n\n\ntelconn.initialize()\nthreading._start_new_thread(commands.update_player_objects_timed, ())\ngui.start()\n\n\n\n\n", "id": "688678", "language": "Python", "matching_score": 1.1397769451141357, "max_stars_count": 0, "path": "main.pyw" }, { "content": "__author__ = 'christopher'\n\nimport time\nimport telconn\nimport runtime\nimport logger\nimport memorydb\n\n\n\ndef teleport(player,location):\n telconn.write_out(\"tele \" + player + \" \" + location)\n time.sleep(1)\n telconn.write_out(\"tele \" + player + \" \" + location)\n time.sleep(1)\n telconn.write_out(\"tele \" + player + \" \" + location)\n\ndef p2p_teleport(player1,player2):\n teleport(player1,player2)\n\ndef say(message):\n telconn.write_out(\"say \" + '\"' + message + '\"')\n\ndef pm(player,message):\n try:\n telconn.write_out( \"pm \" + player + \" \" + '\"' + message + '\"')\n except Exception as e:\n print \"pm error: \"+e.message\n\n\ndef kill_player(player):\n telconn.write_out(\"kill \" + player)\n\n\ndef help(player):\n pm(player,\"The following are the available commands\")\n pm(player,\"/home : Teleports you to your set home location\")\n pm(player,\"/setpoi <name> : Creates a new poi at the given location\")\n pm(player,\"/poi <name> : Teleports you to the named poi\")\n pm(player,\"/rpoi <name> : Removes the named poi\")\n pm(player,\"/listpoi or /lpoi : Lists all of your pois\")\n pm(player,\"/clearpoi : Clears all of your pois\")\n pm(player,\"/killme : Instantly kills you\")\n pm(player,\"/goto <player> : Teleports you to the named player\")\n pm(player,\"/bag : Teleports you your last death location\")\n pm(player,\"/where : Gives your position on the map\")\n pm(player,\"/drop : Displays a list of airdrops that have not been claimed\")\n pm(player,\"/claim : claims any airdrop in your radius\")\n\n\ndef send_motd(player):\n try:\n time.sleep(5)\n pm(player,runtime.motd)\n pm(player,\"Type /help for a list of available commands\")\n except Exception as e:\n logger.log_debug(\"send_motd error: \" + e.message)\n\ndef update_players():\n telconn.write_out(\"lp\")\n\ndef update_player_objects_timed():\n time.sleep(5)\n update_players()\n while runtime.run:\n if len(memorydb.online_players)>0:\n update_players()\n time.sleep(1)", "id": "12509184", "language": "Python", "matching_score": 2.3515944480895996, "max_stars_count": 0, "path": "commands.py" }, { "content": "#!/usr/bin/python\n\nimport telnetlib\nimport thread\nimport threading\nimport re\nimport os\nimport pickle\nimport random\nimport string\nimport socket\nimport time\nimport ConfigParser\nfrom time import strftime, sleep\nfrom Tkinter import *\nfrom ttk import *\n\nVERBOSE = False\n\nout = \"\"\nrun = True\n\n#http://steamcommunity.com/profiles/steamid\n\nSessionLog = []\nPlayerObjectArray = []\nOnlinePlayerList = []\nPlayerUpdateList = []\nAirDrops = []\n\ndef create_config():\n config = ConfigParser.RawConfigParser()\n config.add_section('Configuration')\n config.set('Configuration', 'host', 'localhost')\n config.set('Configuration', 'port', '81')\n config.set('Configuration', 'password', '<PASSWORD>')\n config.set('Configuration', 'motd', '')\n config.set('Configuration', 'gui', 'false')\n config.set('Configuration', 'server', 'true')\n config.set('Configuration', 'verbose', 'false')\n with open('config.cfg', 'wb') as configfile:\n config.write(configfile)\n\ndef read_config():\n global HOST, PORT, PASSWORD, MOTD, GUI, SERVER, VERBOSE\n config = ConfigParser.RawConfigParser()\n config.read('config.cfg')\n HOST = config.get('Configuration', 'host')\n PORT = config.get('Configuration', 'port')\n PASSWORD = config.get('Configuration', 'password')\n MOTD = config.get('Configuration', 'motd')\n GUI = config.getboolean('Configuration', 'gui')\n SERVER = config.getboolean('Configuration', 'server')\n VERBOSE = config.getboolean('Configuration', 'verbose')\n\nDesktopNotify = False\nDebug = True\n\n\nroot = Tk()\nroot.title(\"7dtd Telnet Client\")\nroot.columnconfigure(0, weight=1)\nroot.rowconfigure(0, weight=1)\nnote = Notebook(root)\nnote.columnconfigure(0, weight=1)\nnote.rowconfigure(0, weight=1)\nnote.grid(sticky=NSEW)\n\n\n\ntab1 = Frame(note)\ntab1.columnconfigure(0, weight=1)\ntab1.columnconfigure(1, weight=1)\ntab1.rowconfigure(0, weight=1)\ntextbox = Text(tab1, height=20, width=80)\ntextbox.columnconfigure(0, weight=1)\ntextbox.rowconfigure(0, weight=1)\nplayerbox = Text(tab1, height=20, width=20)\ncommandLabel = Label(tab1, width=10, text=\"Command:\")\ninput = Entry(tab1, width=80)\n\n\ntab2 = Frame(note)\ntab2.columnconfigure(0, weight=1)\ntab2.rowconfigure(0, weight=1)\nplayerlist = Listbox(tab2, height=20, width=20)\ninfobox = Text(tab2, height=10, width=80)\n\n\ndef getPlayerObject(name):\n\tfor player in PlayerObjectArray:\n\t\tif player.name == name:\n\t\t\treturn player\n\ndef show_player_info(name):\n infobox.delete(\"1.0\", END)\n player = getPlayerObject(name)\n infobox.insert(END, \"Name:\"+ player.name+ \"\\n\")\n infobox.insert(END, \"SteamID:\"+ player.steamid+ \"\\n\")\n infobox.insert(END, \"IP:\"+ player.ip+ \"\\n\")\n infobox.insert(END, \"Last Location:\"+ player.position+ \"\\n\")\n\ndef addInfo(info):\n\ttextbox.insert(END,info + '\\n')\n\ttextbox.see(END)\n\ndef refreshPlayerList():\n\tplayerbox.delete(\"1.0\", END)\n\tfor player in OnlinePlayerList:\n\t\tplayerbox.insert(\"1.0\",player+ \"\\n\")\ndef refreshInfoList():\n playerlist.delete(1, END)\n for player in PlayerObjectArray:\n playerlist.insert(1, player.name)\n\ndef func(event):\n\tsend_command(input.get())\n\tinput.delete(0, END)\n\ndef listclick(e):\n show_player_info(str(playerlist.get(playerlist.curselection())))\n\n\ndef handler():\n global run\n run = False\n root.destroy()\n sys.exit()\n\n\ntextbox.grid(row=0, column=0, sticky=NSEW, columnspan=2)\nplayerbox.grid(row=0, column=3, sticky=N+S)\ncommandLabel.grid(row=1, column=0, sticky=W)\ninput.grid(row=1, column=1, sticky=E+W, columnspan=3)\ninput.bind('<Return>', func)\nnote.add(tab1, text=\"Console\", compound=TOP)\n\nplayerlist.grid(row=0,column=1,sticky=N+S)\nplayerlist.bind('<<ListboxSelect>>', listclick)\ninfobox.grid(row=0, column=0, sticky=N+E+W)\nnote.add(tab2, text = \"Players\")\n\nroot.protocol(\"WM_DELETE_WINDOW\", handler)\n\ndef log(info):\n print info\n if GUI:\n addInfo(info)\n\n\n\ndef decode_players():\n player_export = open(\"player_export.csv\", \"wb\")\n poi_export= open(\"poi_export.csv\", \"wb\")\n try:\n with open(\"players.pickle\", \"rb\") as f:\n log(\"Trying to load player information...\")\n while True:\n try:\n PlayerObjectArray.append(pickle.load(f))\n except EOFError:\n break\n log(\"The following players have been loaded...\")\n for player in PlayerObjectArray:\n player_export.write(player.name + \",\" + player.steamid +\",,,\" + player.home+ \",,,,,\" +\"\\n\")\n log(\"-------------\")\n log(\"Name:\" + player.name)\n log(\"SteamID:\" + str(player.steamid))\n log(\"IP:\" + str(player.ip))\n log(\"EntityID:\" + str(player.entityid))\n log(\"Home:\" + str(player.home))\n log(\" \")\n for poi in player.pois:\n poi_export.write(player.name +\",\" +player.steamid+\",\"+poi.split(\",\")[1]+\",\"+poi.split(\",\")[0]+\",\"+player.steamid+poi.split(\",\")[1]+\"\\n\")\n f.close()\n player_export.close()\n except:\n log(\"Error opening players pickle file\")\n\ndef encode_players():\n with open(\"players.pickle\", \"wb\") as f:\n for player in PlayerObjectArray:\n pickle.dump(player, f, -1)\n f.close()\n\ndef send_command(cmd):\n out.write(cmd + \"\\n\")\n\ndef delayed_teleport(tp):\n out.write(tp)\n time.sleep(1)\n out.write(tp)\n time.sleep(1)\n out.write(tp)\t\n\ndef pm(message,player):\n out.write( \"pm \" + player + \" \" + '\"' + message + '\"' + \"\\n\")\n\ndef say(message):\n out.write(\"say \" + '\"' + message + '\"' + \"\\n\")\n\n\n\ndef shutdown():\n out.write(\"shutdown \" + \"\\n\")\n\ndef kick(player, reason=\"\"):\n out.write(\"kick \" + player + \" \" + reason + \"\\n\")\n\ndef whitelistUpdate(player, operation, level=0):\n if level > 0:\n out.write(\"whitelist \" + operation + \" \" + player + \" \" + level + \"\\n\")\n else:\n out.write(\"whitelist \" + operation + \" \" + player + \"\\n\")\n\ndef airDrop():\n out.write(\"spawnairdrop \" + \"\\n\")\n\ndef warn(player, reason=\"\"):\n for idx, playerobject in enumerate(PlayerObjectArray):\n if player in playerobject.name:\n out.write(\"pm \" + player + \" You have been issued a warning:\" + reason + \"\\n\")\n playerobject.warned += 1\n out.write(\"pm \" + player + \" You have been warned \" + str(playerobject.warned) + \" times\" + \"\\n\")\n\ndef update_player_objects_timed():\n out.write(\"lp\\n\")\n while True:\n if len(OnlinePlayerList)>0:\n out.write(\"lp\\n\")\n sleep(1)\ndef update_player_objects():\n out.write(\"lp\\n\")\n\ndef send_motd(player):\n sleep(5)\n pm(MOTD,player)\n pm(\"Type /help for a list of available commands\",player)\n\n\ndef notify(msg):\n if DesktopNotify:\n os.system(\"notify-send \"+msg)\n\nclass player_object(object):\n def __init__(self):\n self.name = \"\"\n self.entityid = 0\n self.steamid = 0\n self.ip= \"\"\n self.lastlogon = \"\"\n self.home = \"\"\n self.warned = 0\n self.location = \"\"\n self.home = \"\"\n self.health = 0\n self.deaths = 0\n self.zombies = 0\n self.players = 0\n self.score = 0\n self.ping = 0\n self.position = \"\"\n self.pois = []\n self.tprequests = []\n self.admin = False\n self.adminlevel = 0\n self.mod = False\n self.revive = \"\"\n\n def adminAdd(self):\n pass\n\n def adminRemove(self):\n pass\n\n def adminUpdate(self,level):\n pass\n def modAdd(self):\n pass\n\n def modRemove(self):\n pass\n\n def modUpdate(self,level):\n pass\n\n\n\ndef parse_players_update(line):\n try:\n position = str(int(round(float(line.split(\" \")[3].replace(\"pos=(\", \"\").replace(\",\", \"\"))))) + \" \" + str(int(round(float(line.split(\" \")[4].replace(\",\", \"\"))))) + \" \" + str(int(round(float(line.split(\" \")[5].replace(\"),\", \"\")))))\n entityid = line.split(\",\")[0].split(\"=\")[1]\n name = line.split(\",\")[1].replace(\" \", \"\")\n health = line.split(\",\")[9].split(\"=\")[1]\n death = line.split(\",\")[10].split(\"=\")[1]\n zombies = line.split(\",\")[11].split(\"=\")[1]\n players = line.split(\",\")[12].split(\"=\")[1]\n score = line.split(\",\")[13].split(\"=\")[1]\n steamid = line.split(\",\")[15].split(\"=\")[1]\n ip = line.split(\",\")[16].split(\"=\")[1]\n ping = line.split(\",\")[17].split(\"=\")[1].rstrip(string.whitespace)\n\n if name not in OnlinePlayerList:\n OnlinePlayerList.append(name)\n if GUI:\n refreshPlayerList()\n\n if len(PlayerObjectArray) < 1:\n log(\" object array is empty.creating new player object\")\n player = player_object()\n player.health = health\n player.deaths = death\n player.zombies = zombies\n player.players = players\n player.score = score\n player.ping = ping\n player.position = position\n player.name = name\n player.steamid = steamid\n player.ip = ip\n player.entityid = entityid\n PlayerObjectArray.append(player)\n log(\"-------------\")\n log(\"Name:\" + player.name)\n log(\"SteamID:\" + str(player.steamid))\n log(\"IP:\" + str(player.ip))\n log(\"EntityID:\" + str(player.entityid))\n encode_players()\n\n found = 0\n for player in PlayerObjectArray:\n if steamid == player.steamid:\n found = 1\n player.health = health\n player.deaths = death\n player.zombies = zombies\n player.players = players\n player.score = score\n player.ping = ping\n player.position = position\n player.name = name\n player.steamid = steamid\n player.ip = ip\n player.entityid = entityid\n\n if found == 0:\n log(\"new player joined.creating new player object\")\n player = player_object()\n player.health = health\n player.deaths = death\n player.zombies = zombies\n player.players = players\n player.score = score\n player.ping = ping\n player.position = position\n player.name = name\n player.steamid = steamid\n player.ip = ip\n player.entityid = entityid\n PlayerObjectArray.append(player)\n log(\"-------------\")\n log(\"Name:\" + player.name)\n log(\"SteamID:\" + str(player.steamid))\n log(\"IP:\" + str(player.ip))\n log(\"EntityID:\" + str(player.entityid))\n encode_players()\n if GUI:\n refreshInfoList()\n\n except:\n log(\"error parsing player lp\")\n\ndef readsession(line):\n # SessionLog.append(line)# necessary for stats and time?\n if VERBOSE:\n log(line)\n STATS = re.search(\"STATS\", line)\n CONNECTED = re.search(\"Player connected\", line)\n JOINED = re.search(\"joined the game\", line)\n DISCONNECTED = re.search(\"left the game\", line)\n MESSAGE = re.search(\"GMSG: \", line)\n NIGHTHORDE = re.search(\"Spawning Night Horde for day\", line)\n WANDERINGHORDE = re.search(\"Spawning Wandering Horde\", line)\n WAVESPAWN = re.search(\"Spawning this wave\", line)\n LISTPLAYERS = re.search(\"([0-9][.]\\sid=)\", line)\n AIRDROP = re.search(\"INF AIAirDrop: Spawned supply crate\",line)\n\n if NIGHTHORDE:\n log(line)\n\n if WAVESPAWN:\n log(line)\n\n if WANDERINGHORDE:\n log(line)\n\n if CONNECTED:\n playername = line.split(\",\")[2].split(\"=\")[1]\n update_player_objects()\n log(strftime(\"%m-%d-%y %I:%M:%S %p\") + \":Player connected: \" + playername)\n notify(\"Connected:\" + playername)\n if SERVER:\n send_motd(playername)\n\n\n if DISCONNECTED:\n playername = line.split(' ')[4]\n log(strftime(\"%m-%d-%y %I:%M:%S %p\") + \":Player disconnected: \" + playername)\n notify(\"Disconnected:\" + playername)\n try:\n OnlinePlayerList.remove(playername)\n if GUI:\n refreshPlayerList()\n except:\n pass\n\n\n if AIRDROP:\n try:\n line = line.replace(\"(\",\"\").replace(\")\",\"\").replace(\",\",\"\").split()\n x = line[8]\n y = line[10]\n z = line[9]\n location = str(x)+\",\"+str(64)+\",\"+str(y)\n AirDrops.append(location)\n say(\"Airdrop at \" + location)\n except:\n log(\"Error parsing airdrop info\")\n\n if MESSAGE:\n log(line)\n if SERVER:\n playername = line.split(\" \")[4].rstrip(':')\n DEBUG = re.search(\"/debug\", line)\n SETHOME = re.search(\"/sethome\", line)\n HOME = re.search(\"/home\", line)\n RANDOM = re.search(\"/random\", line)\n SETPOI = re.search(\"/setpoi\", line)\n POI = re.search(\"/poi\", line)\n RPOI = re.search(\"/rpoi\", line)\n LISTPOI = re.search(\"/listpoi\", line)\n LPOI = re.search(\"/lpoi\", line)\n CLEARPOI = re.search(\"/clearpoi\", line)\n KILLME = re.search(\"/killme\", line)\n GOTO = re.search(\"/goto\", line)\n HELP = re.search(\"/help\", line)\n REVIVE = re.search(\"/bag\",line)\n DIED = re.search(\"died\",line)\n\n if DIED:\n try:\n playername = line.split(\" \")[5]\n #log(playername + \" has died\")\n for player in PlayerObjectArray:\n if playername == player.name:\n #log(player.position)\n player.revive = player.position\n log(\"Setting \" + player.name + \" revive point to: \" + player.position)\n pm(\"Setting your revive point to: \"+ player.position,player.name )\n except:\n log(\"Failed to parse player death output (line 436)\")\n\n\n if HELP:\n pm(\"The following are the available commands\",playername)\n pm(\"/sethome <name> Sets your home\",playername)\n pm(\"/home <name> Teleports you to your set home location\",playername)\n pm(\"/setpoi <name> Creates a new poi at the given location\",playername)\n pm(\"/rpoi <name> Removes the named poi\",playername)\n pm(\"/listpoi or /lpoi Lists all of your pois\",playername)\n pm(\"/clearpoi Clears all of your pois\",playername)\n pm(\"/killme Instantly kills you\",playername)\n pm(\"/goto <player> Teleports you to the named player\",playername)\n pm(\"/home <name> Teleports you to your set home location\",playername)\n\n\n if KILLME:\n out.write(\"kill \" + playername + \"\\n\")\n\n if GOTO:\n try:\n player2 = line.split(\" \")[6]\n #out.write(\"tele \" + playername + \" \" + player2 + \"\\n\")\n try:\n thread.start_new_thread(delayed_teleport, (\"tele \" + playername + \" \" + player2 + \"\\n\",))\n except Exception as errtxt:\n print errtxt\n\n except:\n pm(\"Teleport failed. Check target name and try again\",playername)\n\n if SETHOME:\n for player in PlayerObjectArray:\n if playername in player.name:\n player.home = player.position\n pm(\"Home has been set to \" + player.home,player.name)\n log(player.name + \" Home has been set to \" + player.home)\n encode_players()\n\n if HOME:\n for player in PlayerObjectArray:\n if playername in player.name:\n #out.write(\"tele \" + playername + \" \" + player.home + \"\\n\")\n try:\n thread.start_new_thread(delayed_teleport, (\"tele \" + playername + \" \" + player.home + \"\\n\",))\n except Exception as errtxt:\n print errtxt\n\t\t\t\t\n if RANDOM:\n randomx = random.randint(-1000, 1000)\n randomy = random.randint(-1000, 1000)\n #out.write(\"tele \" + playername + \" \" + str(randomx) + \" 64 \" + str(randomy) + \"\\n\")\n\n if SETPOI:\n for player in PlayerObjectArray:\n if playername in player.name:\n try:\n found = 0\n poiname = line.split(\" \")[6].rstrip(string.whitespace)\n poistring = player.position + \",\" + poiname\n for poi in player.pois:\n if poiname == poi.split(\",\")[1]:\n player.pois.remove(poi)\n player.pois.append(poistring)\n found = 1\n\n if found == 0:\n player.pois.append(poistring)\n pm(player.position + \" has been set as \" + poiname,player.name)\n encode_players()\n except:\n pm(\"Failed to set poi\",player.name)\n\n if POI:\n error = 0\n try:\n poiname = line.split(\" \")[6].rstrip(string.whitespace)\n except:\n error = 1\n for player in PlayerObjectArray:\n if playername in player.name:\n if error != 1:\n found = 0\n for poi in player.pois:\n poilocation = poi.split(\",\")[0]\n if poiname == poi.split(\",\")[1]:\n #out.write(\"tele \" + playername + \" \" + poilocation + \"\\n\")\n try:\n thread.start_new_thread(delayed_teleport, (\"tele \" + playername + \" \" + poilocation + \"\\n\",))\n except Exception as errtxt:\n print errtxt\n found = 1\n if found == 0:\n pm( \"Could not find poi.Please check spelling and try again\",player.name)\n else:\n pm(\"Please use the following format /poi poiname\",player.name)\n\n if RPOI:\n try:\n found = 0\n poiname = line.split(\" \")[6].rstrip(string.whitespace)\n for player in PlayerObjectArray:\n if playername in player.name:\n for poi in player.pois:\n if poiname == poi.split(\",\")[1]:\n player.pois.remove(poi)\n pm(poiname + \" has been remove from list\",player.name)\n encode_players()\n found = 1\n\n if found == 0:\n pm(\"Failed to remove poi\",playername)\n except:\n pm(\"Failed to remove poi\",playername)\n if LISTPOI or LPOI:\n for player in PlayerObjectArray:\n if playername in player.name:\n if len(player.pois) < 1:\n pm( \"You have no pois\",player.name)\n else:\n pm(\"The following are your pois:\",player.name)\n for poi in player.pois:\n pm(poi.split(\",\")[1] + \":\"+ poi.split(\",\")[0],player.name)\n\n if CLEARPOI:\n for player in PlayerObjectArray:\n if playername in player.name:\n del player.pois[:]\n encode_players()\n pm(\"All pois have been removed\",player.name)\n\n if REVIVE:\n for player in PlayerObjectArray:\n if playername in player.name:\n if player.revive != \"\":\n thread.start_new_thread(delayed_teleport, (\"tele \" + playername + \" \" + player.revive + \"\\n\",))\n else:\n pm(\"There is no revive point\",player.name)\n\n\n\n if LISTPLAYERS:\n parse_players_update(line)\n\nclass telnet_connect_telnetlib(threading.Thread):\n def __init__(self, threadID, name, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter = counter\n\n def run(self):\n try:\n global out\n out = telnetlib.Telnet(HOST, PORT,5)\n out.read_until(\"password:\")\n out.write(PASSWORD + \"\\n\")\n thread.start_new_thread(update_player_objects_timed, ())\n while run:\n #line = out.expect([\"\\r\\n\"],5)[2].strip()\n line = out.read_until(\"\\r\\n\").strip()\n if line != \"\":\n readsession(line)\n except Exception as e:\n if run:\n log(\"unable to connect : \" + e.message )\n\nread_config()\ndecode_players()\n\ntry:\n t = telnet_connect_telnetlib(1, \"Thread-1\", 1)\n t.start()\nexcept (KeyboardInterrupt, SystemExit):\n run = False\n sys.exit()\n\n\nif GUI:\n refreshInfoList()\n root.mainloop()\n\n\n", "id": "5359979", "language": "Python", "matching_score": 3.5328164100646973, "max_stars_count": 0, "path": "telnet.pyw" }, { "content": "__author__ = 'christopher'\n\n\nimport threading\nimport telnetlib\nimport director\nimport runtime\nimport logger\n\n\n\nclass telnet_connect_telnetlib(threading.Thread):\n def __init__(self, threadID, name, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter = counter\n\n def run(self):\n try:\n global out\n out = telnetlib.Telnet(runtime.host, runtime.port,5)\n out.read_until(\"password:\")\n out.write(runtime.password + \"\\n\")\n while runtime.run:\n line = out.read_until(\"\\r\\n\").strip()\n if line != \"\":\n try:\n director.route(line)\n #print line\n except Exception as e:\n logger.log_debug(e.message)\n except Exception as e:\n if runtime.run:\n logger.log(\"unable to connect : \" + e.message )\n\n\ndef write_out(cmd):\n try:\n out.write(cmd + \"\\n\")\n except Exception as e:\n logger.log_debug(e.message)\n\ndef initialize():\n try:\n t = telnet_connect_telnetlib(1, \"Thread-1\", 1)\n t.start()\n except Exception as e:\n logger.log_debug(e.message)\n\n\n", "id": "9248910", "language": "Python", "matching_score": 0.7137873768806458, "max_stars_count": 0, "path": "telconn.py" }, { "content": "import paramiko\nfrom paramiko import SSHException\nimport logger\n\n\nclass SshConnection:\n def __init__(self,ipaddress,port,username,password):\n self.client = None\n self.ipaddress = ipaddress\n self.port = port\n self.username = username \n self.password = password\n\n def is_connected(self):\n try:\n transport = self.client.get_transport()\n transport.send_ignore()\n return True\n except EOFError as e:\n return False\n\n\n def connect(self):\n logger.info(f\"Attempting to connect to {self.ipaddress} ...\")\n self.client=paramiko.SSHClient()\n self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.client.connect(self.ipaddress,self.port,self.username,self.password)\n\n\n def send_command(self,command):\n stdin, stdout, stderr = self.client.exec_command(command)\n stdout=stdout.readlines()\n return stdout\n\n\nif __name__ == \"__main__\":\n pass", "id": "9964841", "language": "Python", "matching_score": 3.974055290222168, "max_stars_count": 0, "path": "ssh.py" }, { "content": "import paramiko\nfrom paramiko import SSHException\nimport config\nimport logger\nimport sys\n\nSETTINGS = config.get_config()\nclient = None\n\n\ndef send_command(command):\n stdout = []\n try:\n stdin, stdout, stderr = client.exec_command(command)\n stdout=stdout.readlines()\n return stdout\n except SSHException as e:\n logger.error(repr(e))\n sys.exit()\n\ntry:\n logger.info(\"Attempting to connect to Mister ...\")\n client=paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(SETTINGS['main']['mister_ip'],22, username=SETTINGS['main']['mister_username'], password=SETTINGS['main']['mister_password'])\nexcept Exception as e:\n logger.error(repr(e))\n sys.exit()\n\nif __name__ == \"__main__\":\n pass", "id": "6051016", "language": "Python", "matching_score": 1.341381311416626, "max_stars_count": 8, "path": "ssh.py" }, { "content": "import json\nimport os\n\nconfig_file = 'config.json'\n\nconfig = {}\n\ndef create_config():\n config = {}\n\n main = {}\n main[\"mister_ip\"] = \"000.000.00.000\"\n main[\"mister_username\"] = \"root\"\n main[\"mister_password\"] = \"1\"\n main[\"change_scenes\"] = True\n main[\"debug\"] = False\n main[\"custom_text_sources\"] = { \"GameName\": \"Playing {game} on {core}\" }\n main[\"refresh_rate\"] = \"1\"\n main[\"core_storage\"] = \"fat\"\n main[\"pause_scenes\"] = [ \"Pause Scene\" ]\n config[\"main\"] = main\n\n obs = {}\n obs['host'] = \"localhost\"\n obs['port'] = \"4444\"\n obs['password'] = \"\"\n config[\"obs\"] = obs\n\n with open(config_file, \"w\") as write_file:\n json.dump(config, write_file, indent=4)\n\n \n\ndef load_config():\n global config\n if os.path.exists(config_file):\n with open(config_file) as config_text:\n config = json.load(config_text)\n else:\n print(\"Creating config. Update new conif with proper details\")\n create_config()\n \ndef get_config():\n return config\n\nload_config()\n\nif __name__ == \"__main__\":\n print(get_config())", "id": "10428007", "language": "Python", "matching_score": 2.4561522006988525, "max_stars_count": 8, "path": "config.py" }, { "content": "import paramiko\nfrom time import sleep\nimport json\nimport config\nimport ssh\nimport mister\nimport cores\nimport logger\nimport obs\n\n\nSETTINGS = config.get_config()\nmaps = cores.read_file_map()\n\n\nRECENTS_FOLDER = '/media/{}/config/'.format(SETTINGS['main']['core_storage'])\n\n\nlast_game = \"\"\nlast_core = \"\"\n\n\ndef replace_text(core, game, displayname, text):\n return text.replace(\"{core}\", core).replace(\"{game}\", game).replace(\"{displayname}\", displayname)\n\n\nwhile True:\n try:\n core = mister.get_running_core()\n map_core = cores.get_map(core)\n game = mister.get_last_game(core)\n\n sources = None\n displayname = \"\"\n\n if \"display_name\" in map_core:\n displayname = map_core[\"display_name\"]\n\n if \"custom_text_sources\" in map_core:\n sources = map_core[\"custom_text_sources\"]\n elif \"custom_text_sources\" in SETTINGS[\"main\"]:\n sources = SETTINGS[\"main\"][\"custom_text_sources\"]\n\n if obs.get_current_scene() != map_core['scene'] and obs.is_in_scene_list(map_core[\"scene\"]):\n pause_enabled = False\n\n if \"pause_scenes\" in SETTINGS[\"main\"]:\n pause_enabled = True\n if pause_enabled and obs.get_current_scene() not in SETTINGS[\"main\"][\"pause_scenes\"]:\n if SETTINGS[\"main\"][\"change_scenes\"]:\n obs.change_scene(map_core['scene'])\n\n if game != \"\" and game != last_game:\n pause_enabled = False\n\n if \"pause_scenes\" in SETTINGS[\"main\"]:\n pause_enabled = True\n\n if pause_enabled and obs.get_current_scene() in SETTINGS[\"main\"][\"pause_scenes\"]:\n for source in SETTINGS[\"main\"][\"custom_text_sources\"]:\n obs.setSourceText(source, replace_text(\n core, game, displayname, sources[source]))\n else:\n for source in sources:\n obs.setSourceText(source, replace_text(\n core, game, displayname, sources[source]))\n \n if core != last_core:\n if SETTINGS[\"main\"]['change_scenes']:\n current_scene = obs.get_current_scene()\n\n if \"pause_scenes\" in SETTINGS[\"main\"]:\n pause_enabled = True\n\n if pause_enabled and current_scene in SETTINGS[\"main\"][\"pause_scenes\"]:\n for source in SETTINGS[\"main\"][\"custom_text_sources\"]:\n obs.setSourceText(source,\"\")\n else:\n for source in sources:\n obs.setSourceText(source, \"\")\n if game != last_game:\n for source in sources:\n obs.setSourceText(source, replace_text(\n core, game, displayname, sources[source]))\n if current_scene != map_core[\"scene\"] and obs.is_in_scene_list(map_core[\"scene\"]):\n obs.change_scene(map_core['scene'])\n last_core = core\n last_game = game\n\n except Exception as e:\n logger.error(repr(e))\n sleep(int(SETTINGS[\"main\"][\"refresh_rate\"]))\n\nclient.close()\n", "id": "3756391", "language": "Python", "matching_score": 2.167799472808838, "max_stars_count": 8, "path": "main.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom urllib import request\nfrom obswebsocket import obsws, requests\nimport logger\nimport config\nimport event_manager\nfrom string_util import replace_text, replace_value\nimport images\nimport threading\nfrom time import sleep\n\nSETTINGS = config.get_config()\n\n\nws = None\nconnected = False\nmax_retries = 3\n\nretries = 0\n\ndef initialize():\n global retries\n global ws\n global connected\n if retries < max_retries:\n try:\n logger.info(\"Attempting to connect to OBS ...\")\n ws = obsws(SETTINGS['obs']['host'], int(\n SETTINGS['obs']['port']), SETTINGS['obs']['password'])\n ws.connect()\n connected = True\n logger.info(\"Connected to OBS\")\n except Exception as e:\n logger.error(\"Failed to connect to OBS: {}\".format(repr(e)))\n retries += 1\n sleep(1)\n initialize()\n\n\ndef obs_call(call):\n try:\n return ws.call(call)\n except Exception as e:\n logger.error(\"Error making OBS call: {}.\".format(repr(e)))\n\n\n\ndef disconnect():\n ws.disconnect()\n\ndef get_current_scene():\n scenes = obs_call(requests.GetSceneList())\n return scenes.getCurrentScene()\n\n\ndef get_scene_list():\n scenes = obs_call(requests.GetSceneList())\n return scenes.getScenes()\n\n\ndef is_in_scene_list(scene_name):\n scenes = obs_call(requests.GetSceneList())\n for scene in scenes.getScenes():\n if scene_name == scene['name']:\n return True\n return False\n\n\ndef is_in_source_list(source_name):\n sources = obs_call(requests.GetSourcesList())\n for source in sources.getSources():\n if source_name == source[\"name\"]:\n return True\n return False\n\n\ndef broadcast_message(realm, message):\n requests.BroadcastCustomMessage(realm, {\"message\": message})\n logger.event(f'Broadcasted message \"{message}\" to realm {realm}')\n\n\ndef setSourceText(name, text):\n try:\n source_settings = obs_call(\n requests.GetSourceSettings(name)).getSourceSettings()\n logger.event('Changing source text of \"{}\" to \"{}\"'.format(name, text))\n source_settings[\"text\"] = text\n obs_call(requests.SetSourceSettings(\n sourceName=name, sourceSettings=source_settings))\n except KeyError as e:\n logger.error(\n f'Unable to set source text for source \"{name}\". Source name not found')\n\n\ndef setSourceImage(name, file):\n try:\n source_settings = obs_call(\n requests.GetSourceSettings(name)).getSourceSettings()\n logger.event(\n 'Changing source image of \"{}\" to \"{}\"'.format(name, file))\n source_settings[\"file\"] = file\n obs_call(requests.SetSourceSettings(\n sourceName=name, sourceSettings=source_settings))\n except Exception as e:\n logger.error(\n \"Unable to set source image for {}: {}\".format(name, repr(e)))\n\n\ndef setBrowserSourceUrl(name, url):\n try:\n logger.event(\n 'Changing browser source url of \"{}\" to \"{}\"'.format(name, url))\n obs_call(requests.SetBrowserSourceProperties(name, url=url))\n except Exception as e:\n logger.error(\n \"Unable to set source url for {}: {}\".format(name, repr(e)))\n\n\ndef setSourceVolume(name, volume,useDecibel=True):\n try:\n logger.event(f'Changing volume of {name} to {volume}')\n obs_call(requests.SetVolume(name, volume,useDecibel))\n except Exception as e:\n logger.error(\n f\"Unable to set volume of {name} to {volume}: {e}\")\n\ndef SetSourceFilterSettings(source,filter,settings):\n try:\n logger.event(f'Setting {source} filter {filter} settings to {settings}')\n response = obs_call(requests.SetSourceFilterSettings(source,filter,settings))\n if not response.status:\n logger.error(\n f\"Unable to set {source} filter {filter} settings to {settings}: {response.datain}\")\n except Exception as e:\n logger.error(\n f\"Unable to set {source} filter {filter} settings to {settings}: {e}\")\n\n\ndef SetSourceFilterVisibility(source,filter,visible):\n try:\n logger.event(f'Setting {source} filter {filter} visibility to {visible}')\n obs_call(requests.SetSourceFilterVisibility(source,filter,visible))\n except Exception as e:\n logger.error(\n f\"Unable to set {source} filter {filter} visibility to {visible}: {e}\")\n\ndef setSceneItemProperty(name, property, value):\n try:\n logger.event(\n f'Changing source property {property} of \"{name}\" to \"{value}\"')\n args = {\"item\": name, property: value}\n obs_call(requests.SetSceneItemProperties(**args))\n except Exception as e:\n logger.error(\n f\"Unable to set source property {property} for {name}: {e}\")\n\n\ndef change_scene(name):\n try:\n logger.event(\"Switching to {}\".format(name))\n obs_call(requests.SetCurrentScene(name))\n except Exception as e:\n logger.error(\"Unable to scene to {}: {}\".format(name, repr(e)))\n\ndef handle_event(event, action):\n if connected:\n if action['action'] == \"ObsChangeSourceText\":\n for source in action[\"sources\"]:\n if is_in_source_list(replace_text(source, event.tokens)):\n threading.Thread(target=setSourceText, args=[replace_text(\n source, event.tokens), replace_text(action[\"sources\"][source], event.tokens)]).start()\n else:\n logger.error(\n f\"Source {replace_text(source,event.tokens)} not found\")\n\n if action['action'] == \"ObsChangeSourceImage\":\n for source in action[\"sources\"]:\n if is_in_source_list(source):\n if action[\"sources\"][source] == \"\":\n threading.Thread(target=setSourceImage,\n args=[source, \"\"]).start()\n else:\n game = \"\"\n release_name = \"\"\n system = \"\"\n if \"system\" in event.tokens:\n system = event.tokens[\"system\"]\n if \"rom\" in vars(event):\n game = event.rom[\"rom_extensionless_file_name\"]\n release_name = event.rom[\"release_name\"]\n threading.Thread(target=setSourceImage, args=[source, images.get_image(\n action[\"sources\"][source], system, game, release_name)]).start()\n else:\n logger.error(f\"Source {source} not found\")\n\n if action['action'] == \"ObsSetBrowserSourceUrl\":\n source = replace_text(action[\"source\"], event.tokens)\n url = replace_text(action[\"url\"], event.tokens)\n if is_in_source_list(source):\n threading.Thread(target=setBrowserSourceUrl,\n args=[source, url]).start()\n else:\n logger.error(f\"Source {source} not found\")\n\n if action['action'] == \"ObsSetSourceFilterSettings\":\n source = replace_text(action[\"source\"], event.tokens)\n filter = replace_text(action[\"filter\"], event.tokens)\n settings = replace_value(action[\"settings\"],event.tokens)\n if is_in_source_list(source):\n threading.Thread(target=SetSourceFilterSettings,\n args=[source,filter,settings]).start()\n else:\n logger.error(f\"Source {source} not found\")\n\n if action['action'] == \"ObsSetSourceFilterVisibility\":\n source = replace_text(action[\"source\"], event.tokens)\n filter = replace_text(action[\"filter\"], event.tokens)\n visible = action[\"visible\"]\n if is_in_source_list(source):\n threading.Thread(target=SetSourceFilterVisibility,\n args=[source,filter,visible]).start()\n else:\n logger.error(f\"Source {source} not found\")\n\n if action['action'] == \"ObsSetSourceVolume\":\n source = replace_text(action[\"source\"], event.tokens)\n volume = replace_text(action[\"volume\"], event.tokens)\n use_db = True\n if \"type\" in action:\n if action[\"type\"] != \"db\":\n use_db = False\n\n try:\n if is_in_source_list(source):\n threading.Thread(target=setSourceVolume, args=[\n source, float(volume),use_db]).start()\n else:\n logger.error(f\"Source {source} not found\")\n except Exception as e:\n logger.error(f\"Unable to set volume of {source} to {volume}: {e}\")\n\n if action['action'] == \"ObsSetItemProperty\":\n source = replace_text(action[\"source\"], event.tokens)\n if is_in_source_list(source):\n threading.Thread(target=setSceneItemProperty, args=[\n source, action[\"property\"], action[\"value\"]]).start()\n else:\n logger.error(f\"Source {source} not found\")\n\n if action['action'] == \"ObsChangeScene\":\n scene = replace_text(action[\"scene\"], event.tokens)\n if is_in_scene_list(scene):\n threading.Thread(target=change_scene, args=[scene]).start()\n else:\n logger.error(f\"Scene {scene} not found\")\n\nevent_manager.subscribers[\"OBS\"] = {}\nevent_manager.subscribers[\"OBS\"][\"initialize\"] = lambda: initialize()\nevent_manager.subscribers[\"OBS\"][\"handle_event\"] = {\n 'function': handle_event, 'arg': \"args\"}\n\nif __name__ == \"__main__\":\n initialize()\n", "id": "1834497", "language": "Python", "matching_score": 4.670456886291504, "max_stars_count": 0, "path": "obs.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\nfrom obswebsocket import obsws, requests # noqa: E402\nimport logger\nimport config\n\nSETTINGS = config.get_config()\n\n\nws = None\n\n\ntry:\n ws = obsws(SETTINGS['obs']['host'], int(\n SETTINGS['obs']['port']), SETTINGS['obs']['password'])\n ws.connect()\nexcept Exception as e:\n logger.error(\"Unable to connect to OBS: {}\".format(repr(e)))\n\n\ndef obs_call(call):\n try:\n return ws.call(call)\n except Exception as e:\n logger.error(\"Error making OBS call: {}.\".format(repr(e)))\n\n\ndef disconnect():\n ws.disconnect()\n\n\ndef get_current_scene():\n scenes = obs_call(requests.GetSceneList())\n return scenes.getCurrentScene()\n\n\ndef get_scene_list():\n scenes = obs_call(requests.GetSceneList())\n return scenes.getScenes()\n\n\ndef is_in_scene_list(scene_name):\n scenes = obs_call(requests.GetSceneList())\n for scene in scenes.getScenes():\n if scene_name == scene['name']:\n return True\n return False\n\n\ndef setSourceText(name, text):\n try:\n source_settings = obs_call(\n requests.GetSourceSettings(name)).getSourceSettings()\n logger.event('Changing source text of \"{}\" to \"{}\"'.format(name, text))\n source_settings[\"text\"] = text\n obs_call(requests.SetSourceSettings(sourceName=name, sourceSettings=source_settings))\n except Exception as e:\n logger.error(\"Unable to set source text for {}: {}\".format(name, repr(e)))\n\n\ndef change_scene(name):\n try:\n logger.event(\"Switching to {}\".format(name))\n ws.call(requests.SetCurrentScene(name))\n except Exception as e:\n logger.error(\"Unable to scene to {}: {}\".format(name, repr(e)))\n\n", "id": "5915784", "language": "Python", "matching_score": 0.28959155082702637, "max_stars_count": 8, "path": "obs.py" }, { "content": "import board\nimport displayio\nimport terminalio\nfrom adafruit_display_text import label\nfrom time import sleep\nfrom util import colors\nimport util\n\nclass Menu:\n def __init__(self,pet):\n self.currentSelection = 0\n self.display = board.DISPLAY\n self.font = terminalio.FONT\n self.color = colors.black\n self.showing = False\n self.debounce = 0\n self.debounceMax = .05\n self.pet = pet\n\n self.display_group = displayio.Group(max_size=20)\n\n color_bitmap = displayio.Bitmap(160, 128, 1)\n color_palette = displayio.Palette(1)\n color_palette[0] = colors.white \n bg_sprite = displayio.TileGrid(color_bitmap, pixel_shader=color_palette, x=0, y=0)\n\n self.display_group.append(bg_sprite)\n\n # Food label\n food_text = \"* Food\"\n self.food_label = label.Label(self.font, text=food_text, color=self.color)\n self.food_label.x = 10\n self.food_label.y = 10\n self.display_group.append(self.food_label)\n\n # Play label\n play_text = \"Play \"\n self.play_label = label.Label(self.font, text=play_text, color=self.color)\n self.play_label.x = 10\n self.play_label.y = 25\n self.display_group.append(self.play_label)\n\n # Bathroom label\n bathroom_text = \"Bathroom \"\n self.bathroom_label = label.Label(self.font, text=bathroom_text, color=self.color)\n self.bathroom_label.x = 10\n self.bathroom_label.y = 40\n self.display_group.append(self.bathroom_label)\n\n # Reset label\n reset_text = \"Reset \"\n self.reset_label = label.Label(self.font, text=reset_text, color=self.color)\n self.reset_label.x = 10\n self.reset_label.y = 80\n self.display_group.append(self.reset_label)\n\n\n\n def move(self):\n if self.currentSelection == 0:\n play_text = \"* Play\"\n self.play_label.text = play_text\n\n food_text = \"Food\"\n self.food_label.text = food_text\n\n self.currentSelection = 1 \n\n elif self.currentSelection == 1:\n bathroom_text = \"* Bathroom\"\n self.bathroom_label.text = bathroom_text\n \n play_text = \"Play\"\n self.play_label.text = play_text\n\n self.currentSelection = 2\n\n elif self.currentSelection == 2:\n bathroom_text = \"Bathroom\"\n self.bathroom_label.text = bathroom_text\n\n reset_text = \"* Reset\"\n self.reset_label.text = reset_text\n\n self.currentSelection = 3\n\n else:\n reset_text = \"Reset \"\n self.reset_label.text = reset_text\n\n food_text = \"* Food\"\n self.food_label.text = food_text\n\n self.currentSelection = 0\n\n\n def select(self):\n\n if self.currentSelection == 0:\n self.pet.update_hunger(-1)\n self.showing = False\n\n if self.currentSelection == 1:\n self.pet.update_happiness(1)\n self.showing = False\n\n if self.currentSelection == 2:\n self.pet.update_health(1)\n self.showing = False\n\n if self.currentSelection == 3:\n self.pet.reset()\n self.showing = False\n\n\n\n def update(self):\n pass\n\n def draw(self):\n self.showing = True\n self.update()\n self.display.show(self.display_group)", "id": "5386954", "language": "Python", "matching_score": 3.474438428878784, "max_stars_count": 0, "path": "menu.py" }, { "content": "import board\nimport displayio\nimport terminalio\nfrom adafruit_display_text import label\nimport adafruit_imageload\nfrom time import sleep\nfrom util import colors\nimport util\n\n\n\n\n\n\n\n\nclass Hud:\n def __init__(self,pet):\n self.pet = pet\n self.display = board.DISPLAY\n self.font = terminalio.FONT\n self.color = colors.black\n self.batter_check_cooldown = 1000\n self.batter_check_timeout = self.batter_check_cooldown\n\n color_bitmap = displayio.Bitmap(160, 128, 1)\n color_palette = displayio.Palette(1)\n color_palette[0] = colors.white \n bg_sprite = displayio.TileGrid(color_bitmap, pixel_shader=color_palette, x=0, y=0)\n\n self.sprite_sheet, self.palette = adafruit_imageload.load(\"/avatar-0.bmp\",bitmap=displayio.Bitmap,palette=displayio.Palette)\n self.palette.make_transparent(0)\n\n self.display_group = displayio.Group(max_size=20)\n\n self.display_group.append(bg_sprite)\n\n # Battery label\n battery_level = util.get_battery_level()\n battery_text = \"Battery: {}\".format(battery_level)\n self.battery_text = label.Label(self.font, text=battery_text, color=self.color)\n self.battery_text.x = 85\n self.battery_text.y = 120\n self.display_group.append(self.battery_text)\n\n # Name label\n name_text = \"Name: {} \".format(self.pet.get_name())\n self.name_label = label.Label(self.font, text=name_text, color=self.color)\n self.name_label.x = 10\n self.name_label.y = 10\n self.display_group.append(self.name_label)\n \n\n # Age label\n self.age_text = \"Age: {} \".format(self.pet.get_age())\n self.age_label = label.Label(self.font, text=self.age_text, color=self.color)\n self.age_label.x = 10\n self.age_label.y = 25\n self.display_group.append(self.age_label)\n\n\n # Health label\n health_text = \"Health: {} \".format(self.pet.health)\n self.health_label = label.Label(self.font, text=health_text, color=self.color)\n self.health_label.x = 10\n self.health_label.y = 40\n self.display_group.append(self.health_label)\n\n # Happiness Label\n happiness_text = \"Happiness: {} \".format(self.pet.happiness)\n self.happiness_label = label.Label(self.font, text=happiness_text, color=self.color)\n self.happiness_label.x = 10\n self.happiness_label.y = 55\n self.display_group.append(self.happiness_label)\n\n # Hunger label\n hunger_text = \"Hunger: {} \".format(self.pet.hunger)\n self.hunger_label = label.Label(self.font, text=hunger_text, color=self.color)\n self.hunger_label.x = 10\n self.hunger_label.y = 70\n self.display_group.append(self.hunger_label)\n\n self.sprite = displayio.TileGrid(self.sprite_sheet, pixel_shader=self.palette,width = 1,height = 1,tile_width = 16,tile_height = 16)\n self.sprite.x = 55\n self.sprite.y = 20\n self.sprite[0] = self.pet.get_avatar()\n self.sprite_group = displayio.Group(scale=2)\n self.sprite_group.append(self.sprite)\n self.display_group.append(self.sprite_group)\n\n def update(self):\n\n name_text = \"Name: {}\".format(self.pet.get_name())\n self.name_label.text = name_text\n \n age_text = \"Age: {}\".format(self.pet.get_age())\n self.age_label.text = age_text\n\n hunger_text = \"Hunger: {}\".format(self.pet.get_hunger())\n self.hunger_label.text = hunger_text\n\n happiness_text = \"Happiness: {}\".format(self.pet.get_happiness())\n self.happiness_label.text = happiness_text\n\n health_text = \"Health: {} \".format(self.pet.get_health())\n self.health_label.text = health_text\n\n if self.batter_check_timeout < 1:\n battery_level = util.get_battery_level()\n battery_text = \"Battery: {}\".format(battery_level)\n self.battery_text.text = battery_text\n self.batter_check_timeout = self.batter_check_cooldown\n else:\n self.batter_check_timeout -= 1\n\n self.sprite[0] = self.pet.get_avatar()\n\n\n\n\n def draw(self):\n self.update()\n self.display.show(self.display_group)\n \n ", "id": "4998409", "language": "Python", "matching_score": 2.136425495147705, "max_stars_count": 0, "path": "hud.py" }, { "content": "import math\nimport save\nimport json\nimport names\nimport random\n\nclass Pet:\n def __init__(self):\n self.name = names.get_random_name()\n self.age = 0\n self.happiness = 100\n self.hunger = 0\n self.health = 100\n self.avatar = 0\n\n def get_name(self):\n return self.name\n\n def get_avatar(self):\n return self.avatar\n\n def update_age(self,value):\n self.age += value\n \n def get_age(self):\n return int(self.age)\n \n def update_happiness(self,value):\n self.happiness += value\n \n def get_happiness(self):\n return int(self.happiness)\n\n def update_hunger(self,value):\n self.hunger += value\n\n def get_hunger(self):\n return int(self.hunger)\n\n def update_health(self,value):\n self.health += value\n\n def get_health(self):\n return int(self.health)\n\n def get_pet_data(self):\n data = {}\n data['name'] = self.name\n data['age'] = self.age\n data['happiness'] = self.happiness\n data['hunger'] = self.hunger\n data['health'] = self.health\n data['avatar'] = self.avatar\n return data\n\n def load_pet_data(self,data):\n self.name = data['name']\n self.age = data['age']\n self.happiness = data['happiness']\n self.hunger = data['hunger']\n self.health = data['health']\n self.avatar = data['avatar']\n \n def reset(self):\n self.__init__()\n save.save_state(json.dumps(self.get_pet_data()))\n\n def evaluate_age(self):\n if self.age < 1:\n self.avatar = 0\n if self.age >= 1:\n self.avatar = 1\n if self.age >= 5:\n self.avatar = 2\n if self.age >= 15 :\n self.avatar = 3\n if self.age > 30:\n self.avatar = 4\n\n def tick(self):\n self.update_age(.01)\n self.update_hunger(.01)\n self.update_happiness(-.001)\n self.update_health(-.001)\n self.evaluate_age()", "id": "9661031", "language": "Python", "matching_score": 0.9679081439971924, "max_stars_count": 0, "path": "pet.py" }, { "content": "import random\n\nname_list = [\n\"ABBEY\",\n\"ABBIE\",\n\"ABBY\",\n\"ABEL\",\n\"ABIGAIL\",\n\"ACE\",\n\"ADAM\",\n\"ADDIE\",\n\"ADMIRAL\",\n\"AGGIE\",\n\"AIRES\",\n\"AJ\",\n\"AJAX\",\n\"ALDO\",\n\"ALEX\",\n\"ALEXUS\",\n\"ALF\",\n\"ALFIE\",\n\"ALLIE\",\n\"ALLY\",\n\"AMBER\",\n\"AMIE\",\n\"AMIGO\",\n\"AMOS\",\n\"AMY\",\n\"ANDY\",\n\"ANGEL\",\n\"ANGUS\",\n\"ANNIE\",\n\"APOLLO\",\n\"APRIL\",\n\"ARCHIE\",\n\"ARGUS\",\n\"ARIES\",\n\"ARMANTI\",\n\"ARNIE\",\n\"ARROW\",\n\"ASHES\",\n\"ASHLEY\",\n\"ASTRO\",\n\"ATHENA\",\n\"ATLAS\",\n\"AUDI\",\n\"AUGIE\",\n\"AUSSIE\",\n\"AUSTIN\",\n\"AUTUMN\",\n\"AXEL\",\n\"AXLE\",\n\"BABBLES\",\n\"BABE\",\n\"BABY\",\n\"BABY-DOLL\",\n\"BABYKINS\",\n\"BACCHUS\",\n\"BAILEY\",\n\"BAM-BAM\",\n\"BAMBI\",\n\"BANDIT\",\n\"BANJO\",\n\"BARBIE\",\n\"BARCLAY\",\n\"BARKER\",\n\"BARKLEY\",\n\"BARLEY\",\n\"BARNABY\",\n\"BARNEY\",\n\"BARON\",\n\"BART\",\n\"BASIL\",\n\"BAXTER\",\n\"BB\",\n\"BEAMER\",\n\"BEANIE\",\n\"BEANS\",\n\"BEAR\",\n\"BEAU\",\n\"BEAUTY\",\n\"BEAUX\",\n\"BEBE\",\n\"BEETLE\",\n\"BELLA\",\n\"BELLE\",\n\"BEN\",\n\"BENJI\",\n\"BENNY\",\n\"BENSON\",\n\"BENTLEY\",\n\"BERNIE\",\n\"BESSIE\",\n\"BIABLO\",\n\"BIBBLES\",\n\"<NAME>\",\n\"<NAME>\",\n\"BIGGIE\",\n\"BILLIE\",\n\"BILLY\",\n\"BINGO\",\n\"BINKY\",\n\"BIRDIE\",\n\"BIRDY\",\n\"BISCUIT\",\n\"BISHOP\",\n\"BITS\",\n\"BITSY\",\n\"BIZZY\",\n\"BJ\",\n\"BLACKIE\",\n\"BLACK-JACK\",\n\"BLANCHE\",\n\"BLAST\",\n\"BLAZE\",\n\"BLONDIE\",\n\"BLOSSOM\",\n\"BLUE\",\n\"BO\",\n\"BO\",\n\"BOB\",\n\"BOBBIE\",\n\"BOBBY\",\n\"BOBO\",\n\"BODIE\",\n\"BOGEY\",\n\"BONES\",\n\"BONGO\",\n\"BONNIE\",\n\"BOO\",\n\"BOO-BOO\",\n\"BOOKER\",\n\"BOOMER\",\n\"BOONE\",\n\"BOOSTER\",\n\"BOOTIE\",\n\"BOOTS\",\n\"BOOZER\",\n\"BORIS\",\n\"BOSCO\",\n\"BOSLEY\",\n\"BOSS\",\n\"BOY\",\n\"BOZLEY\",\n\"BRADLEY\",\n\"BRADY\",\n\"BRAGGS\",\n\"BRANDI\",\n\"BRANDO\",\n\"BRANDY\",\n\"BRIDGETT\",\n\"BRIDGETTE\",\n\"BRIE\",\n\"BRINDLE\",\n\"BRIT\",\n\"BRITTANY\",\n\"BRODIE\",\n\"BROOK\",\n\"BROOKE\",\n\"BROWNIE\",\n\"BRUISER\",\n\"BRUNO\",\n\"BRUTUS\",\n\"BUBBA\",\n\"BUBBLES\",\n\"BUCK\",\n\"BUCKEYE\",\n\"BUCKO\",\n\"BUCKY\",\n\"BUD\",\n\"BUDDA\",\n\"BUDDIE\",\n\"BUDDY\",\n\"<NAME>\",\n\"BUFFIE\",\n\"BUFFY\",\n\"BUG\",\n\"BUGSEY\",\n\"BUGSY\",\n\"BULLET\",\n\"BULLWINKLE\",\n\"BULLY\",\n\"BUMPER\",\n\"BUNKY\",\n\"BUSTER\",\n\"BUSTER-BROWN\",\n\"BUTCH\",\n\"BUTCHY\",\n\"BUTTER\",\n\"BUTTERBALL\",\n\"BUTTERCUP\",\n\"BUTTERSCOTCH\",\n\"BUTTONS\",\n\"BUZZY\",\n\"CAESAR\",\n\"CALI\",\n\"CALLIE\",\n\"CALVIN\",\n\"CAMEO\",\n\"CAMILLE\",\n\"CANDY\",\n\"CAPONE\",\n\"CAPTAIN\",\n\"CARLEY\",\n\"CASEY\",\n\"CASPER\",\n\"CASSIE\",\n\"CASSIS\",\n\"<NAME>\",\n\"CHAD\",\n\"CHAMBERLAIN\",\n\"CHAMP\",\n\"CHANCE\",\n\"CHANEL\",\n\"CHAOS\",\n\"CHARISMA\",\n\"CHARLES\",\n\"CHARLIE\",\n\"<NAME>\",\n\"CHARMER\",\n\"CHASE\",\n\"CHAUNCEY\",\n\"CHAZ\",\n\"CHECKERS\",\n\"CHELSEA\",\n\"CHEROKEE\",\n\"CHESSIE\",\n\"CHESTER\",\n\"CHEVY\",\n\"CHEWIE\",\n\"CHEWY\",\n\"CHEYENNE\",\n\"<NAME>\",\n\"CHIC\",\n\"CHICO\",\n\"CHIEF\",\n\"CHILI\",\n\"CHINA\",\n\"CHIP\",\n\"CHIPPER\",\n\"CHIPPY\",\n\"CHIPS\",\n\"CHIQUITA\",\n\"CHIVAS\",\n\"CHLOE\",\n\"CHOCOLATE\",\n\"CHRISSY\",\n\"CHUBBS\",\n\"CHUCKY\",\n\"CHYNA\",\n\"CINDER\",\n\"CINDY\",\n\"CINNAMON\",\n\"CISCO\",\n\"CLAIRE\",\n\"CLANCY\",\n\"CLEO\",\n\"CLEOPATRA\",\n\"CLICKER\",\n\"CLIFFORD\",\n\"CLOVER\",\n\"CLYDE\",\n\"COAL\",\n\"COBWEB\",\n\"COCO\",\n\"COCOA\",\n\"COCONUT\",\n\"CODI\",\n\"CODY\",\n\"COLE\",\n\"COMET\",\n\"COMMANDO\",\n\"CONAN\",\n\"CONNOR\",\n\"COOKIE\",\n\"COOPER\",\n\"COPPER\",\n\"CORKY\",\n\"COSMO\",\n\"COTTON\",\n\"COZMO\",\n\"CRACKERS\",\n\"CRICKET\",\n\"CRYSTAL\",\n\"CUBBY\",\n\"CUBS\",\n\"CUJO\",\n\"CUPCAKE\",\n\"CURLY\",\n\"CURRY\",\n\"CUTIE\",\n\"CUTIE-PIE\",\n\"CYRUS\",\n\"DAFFY\",\n\"DAISEY-MAE\",\n\"DAISY\",\n\"DAKOTA\",\n\"DALLAS\",\n\"DANDY\",\n\"DANTE\",\n\"DAPHNE\",\n\"DARBY\",\n\"DARCY\",\n\"DARWIN\",\n\"DASH\",\n\"DAVE\",\n\"DEACON\",\n\"DEE\",\n\"<NAME>\",\n\"DEMPSEY\",\n\"DESTINI\",\n\"DEWEY\",\n\"DEXTER\",\n\"DHARMA\",\n\"DIAMOND\",\n\"DICKENS\",\n\"DIEGO\",\n\"DIESEL\",\n\"DIGGER\",\n\"DILLON\",\n\"DINKY\",\n\"DINO\",\n\"DIVA\",\n\"DIXIE\",\n\"DOBIE\",\n\"DOC\",\n\"DODGER\",\n\"DOGGON’\",\n\"DOLLY\",\n\"DOMINO\",\n\"DOODLES\",\n\"DOOGIE\",\n\"DOTS\",\n\"DOTTIE\",\n\"DOZER\",\n\"DRAGSTER\",\n\"DREAMER\",\n\"DUCHESS\",\n\"DUDE\",\n\"DUDLEY\",\n\"DUFFY\",\n\"DUKE\",\n\"DUNCAN\",\n\"DUNN\",\n\"DUSTY\",\n\"DUTCHES\",\n\"DUTCHESS\",\n\"DYLAN\",\n\"EARL\",\n\"EBONY\",\n\"ECHO\",\n\"EDDIE\",\n\"EDDY\",\n\"EDGAR\",\n\"EDSEL\",\n\"EIFEL\",\n\"EINSTEIN\",\n\"ELLIE\",\n\"ELLIOT\",\n\"ELMO\",\n\"ELVIS\",\n\"ELWOOD\",\n\"EMBER\",\n\"EMILY\",\n\"EMMA\",\n\"EMMY\",\n\"ERIN\",\n\"ERNIE\",\n\"EVA\",\n\"FAITH\",\n\"FANCY\",\n\"FELIX\",\n\"FERGIE\",\n\"FERRIS\",\n\"FIDO\",\n\"FIFI\",\n\"FIGARO\",\n\"FINNEGAN\",\n\"FIONA\",\n\"FLAKE\",\n\"FLAKEY\",\n\"FLASH\",\n\"FLINT\",\n\"FLOPSY\",\n\"FLOWER\",\n\"FLOYD\",\n\"FLUFFY\",\n\"FONZIE\",\n\"FOXY\",\n\"FRANCAIS\",\n\"FRANKIE\",\n\"FRANKY\",\n\"FRECKLES\",\n\"FRED\",\n\"FREDDIE\",\n\"FREDDY\",\n\"FREEDOM\",\n\"FREEWAY\",\n\"FRESIER\",\n\"FRIDAY\",\n\"FRISCO\",\n\"FRISKY\",\n\"FRITZ\",\n\"FRODO\",\n\"FROSTY\",\n\"FURBALL\",\n\"FUZZY\",\n\"GABBY\",\n\"GABRIELLA\",\n\"GARFIELD\",\n\"GASBY\",\n\"GATOR\",\n\"GAVIN\",\n\"GENIE\",\n\"GEORGE\",\n\"GEORGIA\",\n\"GEORGIE\",\n\"GIANT\",\n\"GIBSON\",\n\"GIDGET\",\n\"GIGI\",\n\"GILBERT\",\n\"GILDA\",\n\"GINGER\",\n\"GINNY\",\n\"GIRL\",\n\"GIZMO\",\n\"GODIVA\",\n\"GOLDIE\",\n\"GOOBER\",\n\"GOOSE\",\n\"GORDON\",\n\"GRACE\",\n\"GRACE\",\n\"GRACIE\",\n\"GRACIE\",\n\"GRADY\",\n\"GREENIE\",\n\"GRETA\",\n\"GRETCHEN\",\n\"GRETEL\",\n\"GRETTA\",\n\"GRIFFEN\",\n\"GRINGO\",\n\"GRIZZLY\",\n\"GROMIT\",\n\"GROVER\",\n\"GUCCI\",\n\"GUIDO\",\n\"GUINNESS\",\n\"GUNNER\",\n\"GUNTHER\",\n\"GUS\",\n\"GUY\",\n\"GYPSY\",\n\"HAILEY\",\n\"HALEY\",\n\"HALLIE\",\n\"HAMLET\",\n\"HAMMER\",\n\"HANK\",\n\"HANNA\",\n\"HANNAH\",\n\"HANS\",\n\"HAPPYT\",\n\"HARDY\",\n\"HARLEY\",\n\"HARPO\",\n\"HARRISON\",\n\"HARRY\",\n\"HARVEY\",\n\"HEATHER\",\n\"HEIDI\",\n\"HENRY\",\n\"HERCULES\",\n\"HERSHEY\",\n\"HIGGINS\",\n\"HOBBES\",\n\"HOLLY\",\n\"HOMER\",\n\"HONEY\",\n\"HONEY-BEAR\",\n\"HOOCH\",\n\"HOOVER\",\n\"HOPE\",\n\"HOUDINI\",\n\"HOWIE\",\n\"HUDSON\",\n\"HUEY\",\n\"HUGH\",\n\"HUGO\",\n\"HUMPHREY\",\n\"HUNTER\",\n\"INDIA\",\n\"INDY\",\n\"IRIS\",\n\"ISABELLA\",\n\"ISABELLE\",\n\"ITSY\",\n\"ITSY-BITSY\",\n\"IVORY\",\n\"IVY\",\n\"IZZY\",\n\"JACK\",\n\"JACKIE\",\n\"JACKPOT\",\n\"JACKSON\",\n\"JADE\",\n\"JAGGER\",\n\"JAGS\",\n\"JAGUAR\",\n\"JAKE\",\n\"JAMIE\",\n\"JASMINE\",\n\"JASPER\",\n\"JAXSON\",\n\"JAZMIE\",\n\"JAZZ\",\n\"JELLY\",\n\"JELLY-BEAN\",\n\"JENNA\",\n\"JENNY\",\n\"JERRY\",\n\"JERSEY\",\n\"JESS\",\n\"JESSE\",\n\"<NAME>\",\n\"JESSIE\",\n\"JESTER\",\n\"JET\",\n\"JETHRO\",\n\"JETT\",\n\"JETTA\",\n\"JEWEL\",\n\"JEWELS\",\n\"JIMMUY\",\n\"JINGLES\",\n\"JJ\",\n\"JOE\",\n\"JOEY\",\n\"JOHNNY\",\n\"JOJO\",\n\"JOKER\",\n\"JOLIE\",\n\"JOLLY\",\n\"JORDAN\",\n\"JOSIE\",\n\"JOY\",\n\"JR\",\n\"JUDY\",\n\"JULIUS\",\n\"JUNE\",\n\"JUNIOR\",\n\"JUSTICE\",\n\"KALI\",\n\"KALLIE\",\n\"KANE\",\n\"KARMA\",\n\"KASEY\",\n\"KATIE\",\n\"KATO\",\n\"KATZ\",\n\"KAYLA\",\n\"KC\",\n\"KEESHA\",\n\"KELLIE\",\n\"KELLY\",\n\"KELSEY\",\n\"KENYA\",\n\"KERRY\",\n\"KIBBLES\",\n\"KID\",\n\"KIKI\",\n\"KILLIAN\",\n\"KING\",\n\"KIPPER\",\n\"KIRA\",\n\"KIRBY\",\n\"KISMET\",\n\"KISSY\",\n\"KITTY\",\n\"KIWI\",\n\"KLAUS\",\n\"KOBA\",\n\"KOBE\",\n\"KODA\",\n\"KOKO\",\n\"KONA\",\n\"KOSMO\",\n\"KOTY\",\n\"KRAMER\",\n\"KUJO\",\n\"KURLY\",\n\"KYRA\",\n\"LACEY\",\n\"LADDIE\",\n\"LADY\",\n\"LADYBUG\",\n\"LANEY\",\n\"LASSIE\",\n\"LATTE\",\n\"LAYLA\",\n\"LAZARUS\",\n\"LEFTY\",\n\"LEO\",\n\"LEVI\",\n\"LEXI\",\n\"LEXIE\",\n\"LEXUS\",\n\"LIBBY\",\n\"LIGHTNING\",\n\"LILI\",\n\"LILLY\",\n\"LILY\",\n\"LINCOLN\",\n\"LINUS\",\n\"LITTLE BIT\",\n\"LITTLE-GUY\",\n\"LITTLE-ONE\",\n\"LITTLE-RASCAL\",\n\"LIZZY\",\n\"LOGAN\",\n\"LOKI\",\n\"LOLA\",\n\"LOU\",\n\"LOUIE\",\n\"LOUIS\",\n\"LOVEY\",\n\"LUCAS\",\n\"LUCI\",\n\"LUCIFER\",\n\"LUCKY\",\n\"LUCY\",\n\"LUKE\",\n\"LULU\",\n\"LUNA\",\n\"LYNX\",\n\"MAC\",\n\"MACHO\",\n\"MACINTOSH\",\n\"MACK\",\n\"MACKENZIE\",\n\"MACY\",\n\"MADDIE\",\n\"MADDY\",\n\"MADISON\",\n\"MAGGIE\",\n\"MAGGIE-MAE\",\n\"MAGGIE-MOO\",\n\"MAGGY\",\n\"MAGIC\",\n\"MAGNOLIA\",\n\"MAJOR\",\n\"MANDI\",\n\"MANDY\",\n\"MANGO\",\n\"MARBLE\",\n\"MARIAH\",\n\"MARLEY\",\n\"MARY\",\n\"<NAME>\",\n\"MASON\",\n\"MATTIE\",\n\"MAVERICK\",\n\"MAX\",\n\"MAXIMUS\",\n\"MAXINE\",\n\"MAXWELL\",\n\"MAY\",\n\"MAYA\",\n\"MCDUFF\",\n\"MCKENZIE\",\n\"MEADOW\",\n\"MEGAN\",\n\"MEGGIE\",\n\"MERCEDES\",\n\"MERCLE\",\n\"MERLIN\",\n\"MIA\",\n\"MIASY\",\n\"MICHAEL\",\n\"MICKEY\",\n\"MIDNIGHT\",\n\"MIKEY\",\n\"MIKO\",\n\"MILES\",\n\"MILLER\",\n\"MILLIE\",\n\"MILO\",\n\"MIMI\",\n\"MINDY\",\n\"MING\",\n\"MINI\",\n\"MINNIE\",\n\"MISCHIEF\",\n\"MISHA\",\n\"<NAME>\",\n\"<NAME>\",\n\"MISSIE\",\n\"MISSY\",\n\"MISTER\",\n\"MISTY\",\n\"MITCH\",\n\"MITTENS\",\n\"MITZI\",\n\"MITZY\",\n\"MO\",\n\"MOCHA\",\n\"MOJO\",\n\"MOLLIE\",\n\"MOLLY\",\n\"MONA\",\n\"MONKEY\",\n\"MONSTER\",\n\"MONTANA\",\n\"MONTGOMERY\",\n\"MONTY\",\n\"MOOCHER\",\n\"MOOCHIE\",\n\"MOOKIE\",\n\"MOONSHINE\",\n\"MOOSE\",\n\"MORGAN\",\n\"MOSES\",\n\"MOUSE\",\n\"<NAME>\",\n\"MUFFIN\",\n\"MUFFY\",\n\"MUGSY\",\n\"MULLIGAN\",\n\"MUNCHKIN\",\n\"MURPHY\",\n\"NAKITA\",\n\"NALA\",\n\"NANA\",\n\"NAPOLEON\",\n\"NATASHA\",\n\"NATHAN\",\n\"NELLIE\",\n\"NEMO\",\n\"NENA\",\n\"NERO\",\n\"NESTLE\",\n\"NEWT\",\n\"NEWTON\",\n\"NIBBLES\",\n\"NIBBY\",\n\"NIBBY-NOSE\",\n\"NICK\",\n\"NICKERS\",\n\"NICKIE\",\n\"NICKY\",\n\"NICO\",\n\"NIKE\",\n\"NIKI\",\n\"NIKITA\",\n\"NIKKI\",\n\"NIKO\",\n\"NINA\",\n\"NITRO\",\n\"NOBEL\",\n\"NOEL\",\n\"NONA\",\n\"NOODLES\",\n\"NORTON\",\n\"NOSEY\",\n\"NUGGET\",\n\"NUTMEG\",\n\"OAKLEY\",\n\"OBIE\",\n\"ODIE\",\n\"<NAME>\",\n\"OLIVE\",\n\"OLIVER\",\n\"OLIVIA\",\n\"OLLIE\",\n\"ONIE\",\n\"ONYX\",\n\"OPIE\",\n\"OREO\",\n\"OSCAR\",\n\"OTIS\",\n\"OTTO\",\n\"OZ\",\n\"OZZIE\",\n\"OZZY\",\n\"PABLO\",\n\"PACO\",\n\"PADDINGTON\",\n\"PADDY\",\n\"PANDA\",\n\"PANDORA\",\n\"PANTHER\",\n\"PAPA\",\n\"PARIS\",\n\"PARKER\",\n\"PASHA\",\n\"PATCH\",\n\"PATCHES\",\n\"PATRICKY\",\n\"PATSY\",\n\"PATTY\",\n\"PEACHES\",\n\"PEANUT\",\n\"PEANUTS\",\n\"PEARL\",\n\"PEBBLES\",\n\"PEDRO\",\n\"PENNY\",\n\"PEPE\",\n\"PEPPER\",\n\"PEPPY\",\n\"PEPSI\",\n\"PERSY\",\n\"PETE\",\n\"PETER\",\n\"PETEY\",\n\"PETIE\",\n\"PHANTOM\",\n\"PHOEBE\",\n\"PHOENIX\",\n\"PICASSO\",\n\"PICKLES\",\n\"PIERRE\",\n\"PIGGY\",\n\"PIGLET\",\n\"<NAME>\",\n\"PINKY\",\n\"PINTO\",\n\"PIPER\",\n\"PIPPIN\",\n\"PIPPY\",\n\"PIP-SQUEEK\",\n\"PIRATE\",\n\"PIXIE\",\n\"PLATO\",\n\"PLUTO\",\n\"POCKETS\",\n\"POGO\",\n\"POKEY\",\n\"POLLY\",\n\"PONCHO\",\n\"PONGO\",\n\"POOCH\",\n\"POOCHIE\",\n\"POOH\",\n\"POOH-BEAR\",\n\"POOKIE\",\n\"POOKY\",\n\"POPCORN\",\n\"POPPY\",\n\"PORCHE\",\n\"PORKCHOP\",\n\"PORKY\",\n\"PORTER\",\n\"POWDER\",\n\"PRANCER\",\n\"PRECIOUS\",\n\"PRESLEY\",\n\"PRETTY\",\n\"PRETTY-GIRL\",\n\"PRINCE\",\n\"PRINCESS\",\n\"PRISSY\",\n\"PUCK\",\n\"PUDDLES\",\n\"PUDGE\",\n\"PUFFY\",\n\"PUGSLEY\",\n\"PUMPKIN\",\n\"PUNKIN\",\n\"PUPPY\",\n\"PURDY\",\n\"QUEEN\",\n\"QUEENIE\",\n\"QUINCY\",\n\"QUINN\",\n\"RAGS\",\n\"RAISON\",\n\"RALPH\",\n\"RALPHIE\",\n\"RAMBLER\",\n\"RAMBO\",\n\"RANGER\",\n\"RASCAL\",\n\"RAVEN\",\n\"REBEL\",\n\"RED\",\n\"REGGIE\",\n\"REILLY\",\n\"REMY\",\n\"REX\",\n\"REXY\",\n\"RHETT\",\n\"RICKY\",\n\"RICO\",\n\"RIGGS\",\n\"RILEY\",\n\"<NAME>\",\n\"RINGO\",\n\"RIPLEY\",\n\"ROCCO\",\n\"ROCK\",\n\"ROCKET\",\n\"ROCKO\",\n\"ROCKY\",\n\"ROLAND\",\n\"ROLEX\",\n\"ROLLIE\",\n\"ROMAN\",\n\"ROMEO\",\n\"ROSA\",\n\"ROSCOE\",\n\"ROSEBUD\",\n\"ROSIE\",\n\"ROSY\",\n\"ROVER\",\n\"ROWDY\",\n\"ROXANNE\",\n\"ROXIE\",\n\"ROXY\",\n\"RUBY\",\n\"RUCHUS\",\n\"RUDY\",\n\"RUFFE\",\n\"RUFFER\",\n\"RUFFLES\",\n\"RUFUS\",\n\"RUGER\",\n\"RUSTY\",\n\"RUTHIE\",\n\"RYDER\",\n\"SABINE\",\n\"SABLE\",\n\"SABRINA\",\n\"SADIE\",\n\"SAGE\",\n\"SAILOR\",\n\"SALEM\",\n\"SALLY\",\n\"SALTY\",\n\"SAM\",\n\"SAMANTHA\",\n\"SAMMY\",\n\"SAMPSON\",\n\"SAMSON\",\n\"SANDY\",\n\"SARA\",\n\"SARAH\",\n\"SARGE\",\n\"SASHA\",\n\"SASSIE\",\n\"SASSY\",\n\"SAVANNAH\",\n\"SAWYER\",\n\"SCARLETT\",\n\"SCHOTZIE\",\n\"SCHULTZ\",\n\"SCOOBIE\",\n\"SCOOBY\",\n\"SCOOBY-DOO\",\n\"SCOOTER\",\n\"SCOTTIE\",\n\"SCOUT\",\n\"SCRAPPY\",\n\"SCRUFFY\",\n\"SEBASTIAN\",\n\"SHADOW\",\n\"SHADY\",\n\"SHAGGY\",\n\"SHASTA\",\n\"SHEBA\",\n\"SHEENA\",\n\"SHELBY\",\n\"SHELLY\",\n\"SHERMAN\",\n\"SHILOH\",\n\"SHINER\",\n\"SHORTY\",\n\"SIENNA\",\n\"SIERRA\",\n\"SILKY\",\n\"SILVER\",\n\"SILVESTER\",\n\"SIMBA\",\n\"SIMON\",\n\"SIMONE\",\n\"SISSY\",\n\"SKEETER\",\n\"SKINNY\",\n\"SKIP\",\n\"SKIPPER\",\n\"SKIPPY\",\n\"SKITTLES\",\n\"SKY\",\n\"SKYE\",\n\"SKYLER\",\n\"SLICK\",\n\"SLINKY\",\n\"SLY\",\n\"SMARTY\",\n\"SMOKE\",\n\"SMOKEY\",\n\"SMUDGE\",\n\"SNEAKERS\",\n\"SNICKERS\",\n\"SNOOP\",\n\"SNOOPY\",\n\"SNOWBALL\",\n\"SNOWFLAKE\",\n\"SNOWY\",\n\"SNUFFLES\",\n\"SNUGGLES\",\n\"SOLOMON\",\n\"SONNY\",\n\"SOPHIA\",\n\"SOPHIE\",\n\"SOX\",\n\"SPANKY\",\n\"SPARKLE\",\n\"SPARKY\",\n\"SPEED\",\n\"SPEEDO\",\n\"SPEEDY\",\n\"SPENCER\",\n\"SPIKE\",\n\"SPIRIT\",\n\"SPOOKEY\",\n\"SPOT\",\n\"SPOTTY\",\n\"SPUD\",\n\"SPUNKY\",\n\"SQUEEKY\",\n\"SQUIRT\",\n\"STANLEY\",\n\"STAR\",\n\"STARR\",\n\"STELLA\",\n\"STERLING\",\n\"STICH\",\n\"STINKY\",\n\"STORMY\",\n\"STUART\",\n\"SUGAR\",\n\"SUGAR-BABY\",\n\"SUMMER\",\n\"SUMO\",\n\"SUNDANCE\",\n\"SUNDAY\",\n\"SUNNY\",\n\"SUNSHINE\",\n\"SUSIE\",\n\"SUSIE-Q\",\n\"SUZY\",\n\"SWEETIE\",\n\"SWEETIE-PIE\",\n\"SWEET-PEA\",\n\"SYDNEY\",\n\"TABBY\",\n\"TABETHA\",\n\"TACO\",\n\"TAFFY\",\n\"TALLY\",\n\"TAMMY\",\n\"TANGLES\",\n\"TANGO\",\n\"TANK\",\n\"TANNER\",\n\"TARA\",\n\"TASHA\",\n\"TAYLOR\",\n\"TAZ\",\n\"T-BIRD\",\n\"T-BONE\",\n\"TEDDY\",\n\"TEDDY-BEAR\",\n\"TEQUILA\",\n\"TESS\",\n\"TESSA\",\n\"TESSIE\",\n\"TEX\",\n\"THELMA\",\n\"THOR\",\n\"THUMPER\",\n\"THUNDER\",\n\"THYME\",\n\"TIFFANY\",\n\"TIGER\",\n\"TIGGER\",\n\"TIGGY\",\n\"TIKI\",\n\"TILLY\",\n\"TIMBER\",\n\"TIMMY\",\n\"TINKER\",\n\"TINKER-BELL\",\n\"TINKY\",\n\"TINY\",\n\"TIPPY\",\n\"TIPR\",\n\"TITAN\",\n\"TITO\",\n\"TITUS\",\n\"TOBIE\",\n\"TOBY\",\n\"TOFFEE\",\n\"TOM\",\n\"TOMMY\",\n\"TOMMY-BOY\",\n\"TONI\",\n\"TONY\",\n\"TOOTS\",\n\"TOOTSIE\",\n\"TOPAZ\",\n\"TORI\",\n\"TOTO\",\n\"TRACKER\",\n\"TRAMP\",\n\"TRAPPER\",\n\"TRAVIS\",\n\"TRIGGER\",\n\"TRINITY\",\n\"TRIPOD\",\n\"TRISTAN\",\n\"TRIXIE\",\n\"TROOPER\",\n\"TROUBLE\",\n\"TROY\",\n\"TRUFFLES\",\n\"TUCK\",\n\"TUCKER\",\n\"TUESDAY\",\n\"TUFFY\",\n\"TURBO\",\n\"TURNER\",\n\"TUX\",\n\"TWIGGY\",\n\"TWINKLE\",\n\"TY\",\n\"TYLER\",\n\"TYSON\",\n\"VALINTO\",\n\"VAVA\",\n\"VEGAS\",\n\"VELVET\",\n\"VINNIE\",\n\"VINNY\",\n\"VIOLET\",\n\"VITO\",\n\"VOLVO\",\n\"WADDLES\",\n\"WAGS\",\n\"WALDO\",\n\"WALLACE\",\n\"WALLY\",\n\"WALTER\",\n\"WAYNE\",\n\"WEAVER\",\n\"WEBSTER\",\n\"WESLEY\",\n\"WESTIE\",\n\"WHISKERS\",\n\"WHISKEY\",\n\"WHISPY\",\n\"WHITIE\",\n\"WHIZ\",\n\"WIGGLES\",\n\"WILBER\",\n\"WILLIE\",\n\"WILLOW\",\n\"WILLY\",\n\"WILSON\",\n\"WINNIE\",\n\"WINSTON\",\n\"WINTER\",\n\"WIZ\",\n\"WIZARD\",\n\"WOLFGANG\",\n\"WOLFIE\",\n\"WOODY\",\n\"WOOFIE\",\n\"WRIGLEY\",\n\"WRINKLES\",\n\"WYATT\",\n\"XENA\",\n\"YAKA\",\n\"YANG\",\n\"YELLER\",\n\"YELLOW\",\n\"YIN\",\n\"YODA\",\n\"YOGI\",\n\"YOGI-BEAR\",\n\"YUKON\",\n\"ZACK\",\n\"ZEKE\",\n\"ZENA\",\n\"ZEUS\",\n\"ZIGGY\",\n\"ZIPPY\",\n\"ZOE\",\n\"ZOEY\",\n\"ZOIE\",\n\"ZORRO\"\n]\n\n\ndef get_random_name():\n return random.choice(name_list)", "id": "779871", "language": "Python", "matching_score": 0.858363687992096, "max_stars_count": 0, "path": "names.py" }, { "content": "import xmltodict\nimport json\ndoc = None\nwith open('nes.dat') as fd:\n doc = xmltodict.parse(fd.read())\n\nprint(json.dumps(doc[\"datafile\"][\"game\"][0]))", "id": "2562575", "language": "Python", "matching_score": 0.09888564795255661, "max_stars_count": 0, "path": "nointro.py" }, { "content": "import requests\nimport json\nimport time\nfrom urllib.parse import urlparse\nfrom urllib.parse import parse_qs\nimport datetime\nfrom datetime import timezone\nimport os\n\nfrom requests.models import Response\n\nnetworks = []\nchannels = []\nuser = []\naudio_token = None\nsession_key = None\n\nnetwork_url = \"https://www.di.fm\"\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0\",\n \"Accept-Encoding\": \"*\",\n \"Connection\": \"keep-alive\"\n}\n\ndef update_audio_token():\n global audio_token\n global channels\n global session_key\n global user\n global networks\n # There is probbaly a better way to get this that also allows for using credentials.\n response = requests.post(f\"{network_url}/login\",headers=headers)\n #print(response.text.encode('utf8'))\n result = json.loads(response.text.split(\"di.app.start(\")[1].split(\");\")[0])\n user = result[\"user\"]\n networks = []\n for network in result[\"networks\"]:\n # figure out what's wrong with parsing ClassicalRadio.com thml\n if network[\"name\"] != \"ClassicalRadio.com\":\n networks.append(network)\n \n audio_token = result[\"user\"][\"audio_token\"]\n session_key = result[\"user\"][\"session_key\"]\n channels = result[\"channels\"]\n\ndef get_all_channels():\n channel_list = []\n response = requests.post(f\"{network_url}/login\",headers=headers)\n result = json.loads(response.text.split(\"di.app.start(\")[1].split(\");\")[0])\n for network in result[\"networks\"]:\n if network[\"name\"] != \"ClassicalRadio.com\":\n response = requests.post(f\"{network['url']}/login\",headers=headers)\n result = json.loads(response.text.split(\"di.app.start(\")[1].split(\");\")[0])\n for channel in result[\"channels\"]:\n if channel not in channel_list:\n channel_list.append(channel)\n return channel_list\n\ndef get_url_expiration(url):\n parsed_url = urlparse(url)\n exp = parse_qs(parsed_url.query)['exp'][0]\n return exp\n\ndef is_url_expired(url):\n # This puts the burden on difm endpoint to validate\n # make request and see if \"URL expired\" is returned\n # Invalid signature if token is invalid\n #response = requests.options(url)\n # if response.text == \"URL expired\":\n # return True\n # else:\n # return False\n\n # This is less overhead since we are doing it before playing each track.\n # Expired track is reall only an issue if the user keeps the app open for 24+ hrs.\n # return true if expiration date is less than current date\n current_time = datetime.datetime.now(timezone.utc).replace(tzinfo=timezone.utc)\n expiration_time = datetime.datetime.strptime(\n get_url_expiration(url), '%Y-%m-%dT%H:%M:%S%z').replace(tzinfo=timezone.utc)\n return expiration_time < current_time\n\ndef get_channels():\n return channels\n\ndef get_networks():\n return networks\n\ndef set_network_url(url):\n global network_url\n network_url = url\n update_audio_token()\n\ndef download_track(track,channel,url,directory=\"tracks\"):\n path = os.path.join(directory,channel)\n if not os.path.exists(path):\n os.makedirs(path)\n r = requests.get(url, allow_redirects=True,headers=headers)\n open(os.path.join(path,f\"{track}.mp4\"), 'wb').write(r.content)\n\ndef get_tracks_by_channel_id(id):\n tracks = []\n epoch = time.time()\n channel_url = f'{network_url}/_papi/v1/di/routines/channel/{id}?tune_in=false&audio_token={audio_token}&_={epoch}'\n channel_repsonse = requests.get(channel_url,headers=headers)\n for track in json.loads(channel_repsonse.text)[\"tracks\"]:\n tracks.append(track)\n return tracks\n\ndef generate_playlist(channel_id,channel_name,playlist_directory=\"playlists\"):\n print(f\"Generating pls file for {channel_name} in {playlist_directory} ...\")\n index = 0\n pls_tracks = {}\n for n in range(4):\n tracks = get_tracks_by_channel_id(channel_id)\n for track in tracks:\n pls_tracks[track[\"track\"]] = f'https:{track[\"content\"][\"assets\"][0][\"url\"]}'\n expiration = get_url_expiration(list(pls_tracks.items())[0][1]).replace(\":\",\"-\")\n if not os.path.exists(playlist_directory):\n os.makedirs(playlist_directory)\n with open(os.path.join(playlist_directory,f\"{channel_name} - Expires {expiration}.pls\"), 'a', encoding=\"utf-8\") as f:\n f.write(\"[playlist]\\n\")\n f.write(f\"NumberOfEntries={len(pls_tracks)}\\n\")\n for track in pls_tracks:\n index += 1\n f.write(f\"File{index}={pls_tracks[track]}\\n\")\n f.write(f\"Title{index}={track} Expires: {get_url_expiration(pls_tracks[track])}\\n\")\n print(\"Done. See playlists directory for file\")\n\nupdate_audio_token()\n\nif __name__ == \"__main__\":\n print(get_all_channels())\n", "id": "2200345", "language": "Python", "matching_score": 2.1238436698913574, "max_stars_count": 0, "path": "difm.py" }, { "content": "import datetime\nfrom time import sleep\nimport difm\nimport os\nimport audio\nimport json\nimport vlc\n\ncurrent_page = 1\ncurrent_channel = None\ncurrent_channel_id = None\ncurrent_network = \"DI.FM\"\ncurent_network_id = None\nvolume = None\ncurrent_track = \"\"\ncurrent_track_index = -1\ncurrent_tracklist = {}\nfavorite_channels = {}\nconfig = {\"playlist_directory\":\"playlists\",\"track_directory\":\"tracks\",\"vlc_log\":False}\nplayer = None\nstop_input = False\nlast_channel = {}\nlogs = []\n\ndef log(message,type):\n global logs\n log = {}\n log[\"timestamp\"] = datetime.datetime.now()\n log[\"message\"] = message\n log[\"type\"] = type\n logs.append(log)\n\ndef screen_clear():\n # for mac and linux(here, os.name is 'posix')\n if os.name == 'posix':\n _ = os.system('clear')\n else:\n # for windows platfrom\n _ = os.system('cls')\n\ndef save_last_channel():\n with open('last_channel.json', 'w') as outjson:\n json.dump(last_channel, outjson)\n log(f\"Saved last channel\",\"info\")\n\ndef load_last_channel():\n global last_channel\n if os.path.exists('last_channel.json'):\n with open('last_channel.json') as json_file:\n last_channel = json.load(json_file)\n log(\"Loaded last channel\",\"info\")\n\ndef update_last_channel():\n global last_channel\n last_channel = {}\n last_channel[current_channel] = current_channel_id\n log(f\"Updated last channel to {current_channel}\",\"info\")\n save_last_channel()\n\ndef save_favorites():\n with open('favorites.json', 'w') as outjson:\n json.dump(favorite_channels, outjson)\n log(\"Saved favorites\",\"info\")\n\ndef load_favorites():\n global favorite_channels\n if os.path.exists('favorites.json'):\n with open('favorites.json') as json_file:\n favorite_channels = json.load(json_file)\n log(\"Loaded favorites\",\"info\")\n \ndef load_config():\n global config\n if os.path.exists('config.json'):\n with open('config.json') as json_file:\n config = json.load(json_file)\n log(\"Loaded config\",\"info\")\n\ndef save_config():\n with open('config.json', 'w') as outjson:\n json.dump(config, outjson)\n log(\"Saved config\",\"info\")\n\ndef update_favorites(channel,id):\n global favorite_channels\n if channel in favorite_channels:\n del favorite_channels[channel]\n else:\n favorite_channels[channel] = id\n log(\"Upated favorites\",\"info\")\n save_favorites()\n\ndef update_current_tracks():\n global current_track_index\n global current_tracklist\n current_tracklist = {}\n current_track_index = -1\n for n in range(1):\n for track in difm.get_tracks_by_channel_id(current_channel_id):\n current_tracklist[track[\"track\"]] = \"https:\" + track['content'][\"assets\"][0][\"url\"]\n log(f\"Refreshed tracks for channel id {current_channel_id}\",\"info\")\n\ndef draw_player():\n screen_clear()\n print(\"--------------------------------------------------------------\")\n print(f\"Channel: {current_channel}\")\n print(f\"Favorite: {current_channel in favorite_channels}\")\n print(f\"Track: {current_track}\")\n print(f\"Status: {player.get_status()}\")\n print(f\"Volume: {player.get_volume()}\")\n print(\"--------------------------------------------------------------\")\n print(\"P: Play/Pause | S: Stop | N: Next | R: Previous | Q: Back\")\n print(\"--------------------------------------------------------------\")\n print(\"V: Volume | D: Download Track | F: Favorite/Unfavorite Channel\")\n print(\"--------------------------------------------------------------\")\n\ndef play_next_track(event=None):\n global player\n global current_track\n global current_track_index\n current_track_index += 1\n if current_track_index >= len(current_tracklist): \n log(\"Last song in tracklist\",\"info\")\n update_current_tracks()\n current_track_index += 1 \n if difm.is_url_expired(list(current_tracklist.items())[current_track_index][1]):\n log(\"Track is expired\",\"info\")\n update_current_tracks()\n current_track_index += 1\n if player != None:\n if event == None:\n player.stop_audio()\n else:\n if vlc.EventType._enum_names_[event.type] == \"MediaPlayerEncounteredError\":\n log(\"Issue playing track, skipping to next track.\",\"error\")\n \n del player\n player = audio.Player(vlc_log=config[\"vlc_log\"])\n player.set_event_callback(play_next_track)\n player.play_audio(list(current_tracklist.items())[current_track_index][1])\n current_track = list(current_tracklist.items())[current_track_index][0]\n log(f\"Playing next track: {current_track}\",\"info\")\n draw_player()\n\ndef play_previous_track():\n global player\n global current_track\n global current_track_index\n current_track_index -= 1\n log(\"Playing previous track\")\n if current_track_index > 0: \n if difm.is_url_expired(list(current_tracklist.items())[current_track_index][1]):\n log(\"Track is expired\",\"info\")\n update_current_tracks()\n current_track_index += 1\n player.stop_audio()\n del player\n player = audio.Player(vlc_log=config[\"vlc_log\"])\n player.set_event_callback(play_next_track)\n player.play_audio(list(current_tracklist.items())[current_track_index][1])\n current_track = list(current_tracklist.items())[current_track_index][0]\n draw_player()\n\ndef config_menu():\n quit_config = False\n global config\n while not quit_config:\n screen_clear()\n print(\"----------------------\")\n print(\"Edit Config\")\n print(\"----------------------\")\n print(\"1: Track Directory\")\n print(\"2: Playlist Directory\")\n print(\"3: Enable VLC Log to File\")\n print(\"Q: Back\")\n print(\"----------------------\")\n val = input().lower()\n if val == \"q\":\n quit_config = True\n if val == \"1\":\n screen_clear()\n print(\"Edit Track Directory. Q to Quit\")\n print(f'Current Track Directory: {config[\"track_directory\"]}')\n val = input(\"New Track Directory: \")\n if val.lower() == \"q\":\n pass\n else:\n config[\"track_directory\"] = val\n log(f\"Updated track directory to {val}\",\"info\")\n save_config()\n if val == \"2\":\n screen_clear()\n print(\"Edit Playlist Directory. Q to Quit\")\n print(f'Current Playlist Directory: {config[\"playlist_directory\"]}')\n val = input(\"New Playlist Directory: \")\n if val.lower() == \"q\":\n pass\n else:\n config[\"playlist_directory\"] = val\n log(f\"Updated playlist directory to {val}\",\"info\")\n save_config()\n if val == \"3\":\n print(\"Enable VLC logging to file\")\n print(f\"Current set to {config['vlc_log']}\")\n val = input(\"New state True/False: \")\n if val.lower() == \"q\":\n pass\n elif val.lower() in [\"true\",\"t\",\"false\",\"f\"]:\n if val.lower() in [\"true\",\"t\"]:\n config[\"vlc_log\"] = True\n else:\n config[\"vlc_log\"] = False\n save_config()\n\ndef play_last_channel(generate_playlist=False):\n global current_channel\n global current_channel_id\n global current_tracklist\n global current_track_index\n if len(last_channel) > 0:\n id = list(last_channel.items())[0][1]\n channel= list(last_channel.items())[0][0]\n current_channel = channel\n current_channel_id = id\n try:\n if generate_playlist:\n difm.generate_playlist(id,channel,config[\"playlist_directory\"])\n val = input(\"Press enter to continue\")\n else:\n current_tracklist = {}\n for n in range(1):\n for track in difm.get_tracks_by_channel_id(id):\n current_tracklist[track[\"track\"]] = \"https:\" + track['content'][\"assets\"][0][\"url\"]\n current_track_index = -1\n play_next_track()\n player_menu()\n except Exception as e:\n log(f\"Error: {e}\",\"error\")\n\ndef player_menu():\n quit_player = False\n while not quit_player:\n draw_player()\n val = input().lower()\n if val == \"q\":\n player.stop_audio()\n quit_player = True\n if val == \"p\":\n player.pause_audio()\n if val == \"r\":\n play_previous_track()\n if val == \"s\":\n player.stop_audio()\n if val == \"n\":\n play_next_track()\n if val == \"b\":\n player.stop_audio()\n if val == \"v\":\n volume = input(\"Volume: \")\n try:\n player.set_volume(int(volume))\n log(f\"Updated volume to {volume}\",\"info\")\n except Exception as e:\n log(f\"Failed to set volume: {e}\",\"error\")\n if val == \"d\":\n track = list(current_tracklist.items())[current_track_index][0]\n url = list(current_tracklist.items())[current_track_index][1]\n difm.download_track(track,current_channel,url,config[\"track_directory\"])\n log(f\"Downloaded track: {track}\",\"info\")\n if val ==\"f\":\n update_favorites(current_channel,current_channel_id)\n log(f\"Updated favorites: {current_channel}:{current_channel_id}\",\"info\")\n sleep(1)\n\ndef favorites_menu(generate_playlist=False):\n global current_channel\n global current_channel_id\n global current_track_index\n global stop_input\n global current_tracklist\n quit_favorites = False\n favorite_page = 1\n while not quit_favorites:\n favorites = list(favorite_channels.items())\n favorite_index = 0\n screen_clear()\n print(\"------------------------------------------\")\n print(\" Favorite Channels \")\n print(\"------------------------------------------\")\n max = favorite_page * 10\n if len(favorites[max - 10:max]) < 1:\n max = (favorite_page - 1) * 10\n favorite_page -= 1\n for channel in favorites[max - 10:max]:\n print(f\"{favorite_index}: {channel[0]}\")\n favorite_index += 1\n print(\"------------------------------------------\")\n print(\"N: Next Page | P: Previous Page | Q: Back \")\n print(\"------------------------------------------\")\n val = input().lower()\n if val == \"n\" or val == \"\":\n if favorite_page <= (len(favorites) / 10):\n favorite_page += 1\n elif val == \"p\":\n if favorite_page > 1:\n favorite_page -= 1\n elif val == \"q\":\n quit_favorites = True\n elif val in \"0123456789\":\n try:\n channel = favorites[max - 10:max][int(val)]\n id = channel[1]\n current_channel = channel[0]\n current_channel_id = id\n if generate_playlist:\n difm.generate_playlist(id,channel[0],config[\"playlist_directory\"])\n val = input(\"Press enter to continue\")\n else:\n current_tracklist = {}\n for n in range(1):\n for track in difm.get_tracks_by_channel_id(id):\n current_tracklist[track[\"track\"]] = \"https:\" + track['content'][\"assets\"][0][\"url\"]\n current_track_index = -1\n update_last_channel()\n play_next_track()\n player_menu()\n except Exception as e:\n log(f\"Error: {e}\",\"error\")\n\ndef all_channels_menu(generate_playlist=False):\n quit = False\n while not quit:\n global current_page\n global current_channel\n global current_channel_id\n global current_tracklist\n global current_track\n global stop_input\n global current_track_index\n screen_clear()\n print(\"------------------------------------------\")\n print(\" All Channels \")\n print(\"------------------------------------------\")\n index = 0\n max = current_page * 10\n for channel in difm.channels[max - 10:max]:\n print(f\"{index}: {channel['name']}\")\n index += 1\n print(\"------------------------------------------\")\n print(\"N: Next Page | P: Previous Page | Q: Quit \")\n print(\"------------------------------------------\")\n val = input()\n if val.lower() == \"n\" or val == \"\":\n if current_page <= (len(difm.channels) / 10):\n current_page += 1\n if val.lower() == \"p\":\n if current_page > 1:\n current_page -= 1\n if val.lower() == \"q\":\n quit = True\n if val in \"0123456789\":\n try:\n channel = difm.channels[max - 10:max][int(val)]\n id = channel[\"id\"]\n if generate_playlist:\n difm.generate_playlist(id,channel[\"name\"],config[\"playlist_directory\"])\n val = input(\"Press enter to continue\")\n else:\n current_channel = channel[\"name\"]\n current_channel_id = id\n current_tracklist = {}\n for n in range(1):\n for track in difm.get_tracks_by_channel_id(id):\n current_tracklist[track[\"track\"]] = \"https:\" + track['content'][\"assets\"][0][\"url\"]\n current_track_index = -1\n update_last_channel()\n play_next_track()\n player_menu()\n except Exception as e:\n log(f\"Error: {e}\",\"error\") \n\ndef log_menu():\n quit = False\n log_page = 1\n sorted_logs = sorted(logs, key=lambda k: k['timestamp'], reverse=True)\n while not quit:\n screen_clear()\n print(\"------------------------------------------\")\n print(\" Logs \")\n print(\"------------------------------------------\")\n index = 0\n max = log_page * 10\n for log in sorted_logs[max - 10:max]:\n print(f\"{log['timestamp']} - {log['type']} - {log['message']}\")\n index += 1\n print(\"------------------------------------------\")\n print(\"N: Next Page | P: Previous Page | Q: Quit \")\n print(\"------------------------------------------\")\n val = input()\n if val.lower() == \"n\" or val == \"\":\n if log_page <= (len(sorted_logs) / 10):\n log_page += 1\n if val.lower() == \"p\":\n if log_page > 1:\n log_page -= 1\n if val.lower() == \"q\":\n quit = True\n\ndef playlist_menu():\n global current_channel\n global current_channel_id\n global current_tracklist\n global current_track_index\n global last_channel\n quit_playlist = False\n while not quit_playlist:\n screen_clear()\n print(\"--------------------------------------------\")\n print(\" Generate Playlist(.pls) from Channel \")\n print(\"--------------------------------------------\")\n print(\"1: All Channels\")\n print(\"2: Favorite Channels\")\n print(\"3: Last Channel (default)\")\n print(\"Q: Quit\")\n print(\"--------------------------------------------\")\n val = input().lower() or \"3\"\n if val == \"1\":\n all_channels_menu(generate_playlist=True)\n if val == \"2\":\n favorites_menu(generate_playlist=True)\n if val == \"3\":\n play_last_channel(generate_playlist=True)\n if val == \"q\":\n quit_playlist = True \n\ndef network_menu():\n quit_network = False\n global current_network\n global current_page\n while not quit_network:\n screen_clear()\n print(\"---------------\")\n print(\"Select Network\")\n print(\"---------------\")\n index = 0\n networks = difm.get_networks()\n for network in networks:\n print(f\"{index}: {network['name']}\")\n index += 1 \n print(\"---------------\") \n print(\"Q: Back\")\n print(\"---------------\") \n val = input().lower()\n if val == \"\":\n pass\n elif val in \"0123456789\":\n if int(val) <= len(networks) - 1:\n network = networks[int(val)]\n current_network = network[\"name\"]\n difm.set_network_url(network[\"url\"])\n current_page = 1\n quit_network = True\n elif val == \"q\":\n quit_network = True\n\ndef menu():\n global current_channel\n global current_channel_id\n global current_tracklist\n global current_track_index\n global last_channel\n quit_menu = False\n while not quit_menu:\n screen_clear()\n print(\"-------------------\")\n print(f\" DI.FM Term \")\n print(\"-------------------\")\n print(\"1: All Channels\")\n print(\"2: Favorite Channels\")\n print(\"3: Last Channel (default)\")\n print(\"4: Generate Playlist\")\n print(\"5: Change Stream Network\")\n print(\"6: Edit Config\")\n print(\"7: Show logs\")\n print(\"Q: Quit\")\n print(\"-------------------\")\n val = input().lower() or \"3\"\n if val == \"1\":\n all_channels_menu()\n if val == \"2\":\n favorites_menu()\n if val == \"3\":\n play_last_channel()\n if val == \"4\":\n playlist_menu()\n if val == \"5\":\n network_menu()\n if val == \"6\":\n config_menu()\n if val == \"7\":\n log_menu()\n if val == \"q\":\n quit_menu = True\n\nload_config()\nload_favorites()\nload_last_channel()\nmenu()\n", "id": "7185789", "language": "Python", "matching_score": 4.508063793182373, "max_stars_count": 0, "path": "main.py" }, { "content": "import database\nimport os\nimport datetime\nimport podcast as _podcast\nfrom time import sleep\nimport audio\n\n\nlogs = []\nquit = False\nplayer = None\nepisode_list = []\nepisode_index = -1\n\ndef log(message,type):\n global logs\n log = {}\n log[\"timestamp\"] = datetime.datetime.now()\n log[\"message\"] = message\n log[\"type\"] = type\n logs.append(log)\n\ndef screen_clear():\n # for mac and linux(here, os.name is 'posix')\n if os.name == 'posix':\n _ = os.system('clear')\n else:\n # for windows platfrom\n _ = os.system('cls')\n\ndef draw_player():\n global player\n screen_clear()\n print(\"--------------------------------------------------------------\")\n print(f\"Podcast: {episode_list[episode_index]['podcast']}\")\n print(f\"Episode: {episode_list[episode_index]['title']}\")\n print(f\"Date: {datetime.datetime.fromtimestamp(episode_list[episode_index]['date'])}\")\n print(f\"Listened: {database.get_episode(episode_list[episode_index]['title'])['listened']}\")\n print(f\"Status: {player.get_status()}\")\n print(f\"Volume: {player.get_volume()}\")\n print(f\"Description: {episode_list[episode_index]['description']}\")\n print(\"--------------------------------------------------------------\")\n print(\"P: Play/Pause | S: Stop | N: Next | R: Previous | Q: Back\")\n print(\"--------------------------------------------------------------\")\n print(\"V: Volume | D: Download Episode | L: Mark As Listened\")\n print(\"--------------------------------------------------------------\")\n \ndef error_callback(event):\n log(\"Issue playing episode, skipping to next episode.\",\"error\")\n\ndef play_next_episode(event=None):\n global player\n global episode_index\n episode_index += 1\n if player != None:\n player.stop_audio()\n del player \n if episode_index + 1 > len(episode_list):\n episode_index -= 1\n else:\n player = audio.Player()\n #player.set_end_callback(play_next_episode)\n player.set_error_callback(error_callback)\n player.play_audio(episode_list[episode_index][\"url\"])\n draw_player()\n\ndef update_podcast(podcast):\n current = database.get_podcast(podcast)\n new =_podcast.get_podcast(current[\"feed\"])\n if current[\"updated\"] != new.updated:\n for episode in new.episodes:\n database.add_episode(episode)\n\ndef log_menu():\n quit = False\n log_page = 1\n sorted_logs = sorted(logs, key=lambda k: k['timestamp'], reverse=True)\n while not quit:\n screen_clear()\n print(\"------------------------------------------\")\n print(\" Logs \")\n print(\"------------------------------------------\")\n index = 0\n max = log_page * 10\n for log in sorted_logs[max - 10:max]:\n print(f\"{log['timestamp']} - {log['type']} - {log['message']}\")\n index += 1\n print(\"------------------------------------------\")\n print(\"N: Next Page | P: Previous Page | Q: Quit \")\n print(\"------------------------------------------\")\n val = input()\n if val.lower() == \"n\" or val == \"\":\n if log_page <= (len(sorted_logs) / 10):\n log_page += 1\n if val.lower() == \"p\":\n if log_page > 1:\n log_page -= 1\n if val.lower() == \"q\":\n quit = True\n\ndef player_menu():\n global player\n quit = False\n while not quit:\n draw_player()\n val = input().lower()\n if val == \"q\":\n player.stop_audio()\n quit = True\n if val == \"x\":\n speed = input(\"Speed: \")\n try:\n player.set_speed(float(speed))\n except Exception as e:\n pass\n if val == \"p\":\n player.pause_audio()\n if val == \"l\":\n database.set_as_listened(episode_list[episode_index][\"title\"])\n if val == \"r\":\n pass\n #play_previous_track()\n if val == \"s\":\n player.stop_audio()\n if val == \"n\":\n play_next_episode()\n if val == \"b\":\n player.stop_audio()\n if val == \"v\":\n volume = input(\"Volume: \")\n try:\n player.set_volume(int(volume))\n log(f\"Updated volume to {volume}\",\"info\")\n except Exception as e:\n log(f\"Failed to set volume: {e}\",\"error\")\n sleep(1)\n\ndef add_podcast_menu():\n quit = False\n while not quit:\n screen_clear()\n print(\"------------------------------\")\n print(\" Add Podcast \")\n print(\"------------------------------\")\n print(\"Enter podcast rss feed\")\n val = input()\n if val != \"\":\n print(\"Importing episodes...\")\n podcast = _podcast.get_podcast(val)\n database.add_podcast(podcast)\n for episode in podcast.episodes:\n database.add_episode(episode)\n quit = True\n\n\ndef episode_menu(podcast):\n quit = False\n current_page = 1\n reverse_order = False\n global show_player\n while not quit:\n global episode_list\n episodes = database.get_episodes(podcast)\n if reverse_order:\n episodes.reverse()\n screen_clear()\n print(\"-----------------------------------------------------------------\")\n print(\" Episodes \")\n print(\"-----------------------------------------------------------------\")\n max = current_page * 10\n index = 0\n for episode in episodes[max - 10:max]:\n print(f\"{index}: {episode['title']}\")\n index +=1\n print(\"-----------------------------------------------------------------\")\n print(\"N: Next Page | P: Previous Page | Q: Quit | L: Hide/Show Listened | R: Reverse Order\")\n print(\"-----------------------------------------------------------------\")\n val = input()\n if val.lower() == \"n\" or val == \"\":\n if current_page <= (len(episodes) / 10):\n current_page += 1\n if val.lower() == \"p\":\n if current_page > 1:\n current_page -= 1\n if val.lower() == \"q\":\n quit = True\n if val.lower() == \"r\":\n reverse_order = not reverse_order\n if val in \"0123456789\":\n title = episodes[max - 10:max][int(val)][\"title\"]\n episode = database.get_episode(title)\n episode_list = []\n episode_list.append(episode)\n show_player = True\n play_next_episode()\n player_menu()\n\n\ndef podcast_menu():\n quit = False\n podcasts = database.get_podcasts()\n current_page = 1\n while not quit:\n screen_clear()\n print(\"------------------------------\")\n print(\" Podcasts \")\n print(\"------------------------------\")\n index = 0\n max = current_page * 10\n for podcast in podcasts[max - 10:max]:\n print(f\"{index}: {podcast['title']}\")\n index += 1\n print(\"------------------------------------------\")\n print(\"N: Next Page | P: Previous Page | Q: Quit \")\n print(\"------------------------------------------\")\n val = input()\n if val.lower() == \"n\" or val == \"\":\n if current_page <= (len(podcasts) / 10):\n current_page += 1\n if val.lower() == \"p\":\n if current_page > 1:\n current_page -= 1\n if val.lower() == \"q\":\n quit = True\n if val in \"0123456789\":\n podcast = podcasts[max - 10:max][int(val)][\"title\"]\n update_podcast(podcast)\n episode_menu(podcast)\n \n \n\ndef main_menu():\n quit = False\n while not quit:\n screen_clear()\n print(\"------------------------------\")\n print(\" Term Cast \")\n print(\"------------------------------\")\n print(\"1: Podcasts\")\n print(\"2: Add Podcast\")\n print(\"3: Logs\")\n print(\"4: Quit\")\n val = input().lower() or \"1\"\n if val == \"1\":\n podcast_menu()\n if val == \"2\":\n add_podcast_menu()\n if val == \"3\":\n log_menu()\n if val == \"4\":\n screen_clear()\n quit = True\n\n\n\nmain_menu()", "id": "6146349", "language": "Python", "matching_score": 2.346418857574463, "max_stars_count": 0, "path": "main.py" }, { "content": "import vlc\nimport datetime\n\nclass Player:\n def __init__(self,vlc_log=False):\n params = \"--quiet\"\n if vlc_log:\n params = f'--verbose=2 --file-logging --logfile=vlc-log_{datetime.datetime.now().strftime(\"%m%d%Y\")}.txt'\n self.instance = vlc.Instance(params) # --verbose 2 --quiet\n self.player = self.instance.media_player_new()\n self.listPlayer = self.instance.media_list_player_new()\n self.status = \"Not Playing\"\n\n def get_status(self):\n return self.status\n\n def get_volume(self):\n volume = self.player.audio_get_volume()\n if int(volume) < 0:\n return \"0\"\n else:\n return volume\n \n def set_volume(self,volume):\n return self.player.audio_set_volume(volume)\n\n def set_event_callback(self, callback):\n events = self.player.event_manager()\n events.event_attach(vlc.EventType.MediaPlayerEndReached, callback)\n events.event_attach(vlc.EventType.MediaPlayerEncounteredError, callback)\n\n def play_audio(self, url):\n media = self.instance.media_new(url)\n self.player.set_media(media)\n self.player.play()\n self.status = \"Playing\"\n\n def stop_audio(self):\n self.player.stop()\n self.status = \"Stopped\"\n\n def pause_audio(self):\n if self.status == \"Playing\":\n self.player.pause()\n self.status = \"Paused\"\n elif self.status == \"Paused\":\n self.player.play()\n self.status = \"Playing\"\n elif self.status == \"Stopped\":\n self.player.play()\n self.status = \"Playing\"\n\nif __name__ == \"__main__\":\n pass", "id": "3842337", "language": "Python", "matching_score": 0.7755823731422424, "max_stars_count": 0, "path": "audio.py" }, { "content": "import colored\nfrom colored import stylize\n\n# See https://pypi.org/project/colored/ for colors\n\nlogs = []\n\n\ndef error(log):\n global logs\n log = \"Error: {}\".format(log)\n print(stylize(log, colored.fg(\"red\")))\n logs.append(log)\n\ndef debug(log):\n global logs\n log = \"Debug: {}\".format(log)\n print(stylize(log, colored.fg(\"orange_1\")))\n logs.append(log)\n\ndef info(log):\n global logs\n log = \"Info: {}\".format(log)\n print(stylize(log, colored.fg(\"yellow\")))\n logs.append(log)\n\ndef event(log):\n global logs\n log = \"Event: {}\".format(log)\n print(stylize(log, colored.fg(\"green\")))\n logs.append(log)\n\nif __name__ == \"__main__\":\n pass", "id": "8771084", "language": "Python", "matching_score": 0.21950381994247437, "max_stars_count": 0, "path": "logger.py" }, { "content": "import logger\nimport sys\nimport event_manager\nimport mister\nimport os\nimport sys\nimport json\nimport schedule\nimport time\nimport threading\nimport obs\nimport filesystem\nimport script\nimport discord\nimport dashboard\nimport retroarch\nimport config\nimport steam\n\n\nif __name__ == \"__main__\":\n pubsub_file = \"pubsub.json\"\n\n if len(sys.argv) > 1:\n new_file = sys.argv[1]\n if os.path.exists(new_file):\n pubsub_file = new_file\n logger.info(f\"Using {new_file} instead of pubsub.json\")\n\n SETTINGS = config.get_config()\n\n pubsub = {}\n\n if os.path.exists(pubsub_file):\n with open(pubsub_file) as pubsub_json:\n pubsub = json.load(pubsub_json)\n\n for publisher in pubsub[\"publishers\"]:\n if pubsub[\"publishers\"][publisher][\"status\"] == \"enabled\":\n if publisher in event_manager.publishers:\n if \"initialize\" in event_manager.publishers[publisher]:\n threading.Thread(target=event_manager.publishers[publisher][\"initialize\"]).start()\n refresh_rate = SETTINGS[publisher.lower()]['refresh_rate']\n schedule.every(int(refresh_rate)).seconds.do(event_manager.publishers[publisher][\"publish\"])\n\n for subscriber in event_manager.subscribers:\n if subscriber in pubsub[\"subscribers\"]:\n if pubsub[\"subscribers\"][subscriber][\"status\"] == \"enabled\":\n if \"initialize\" in event_manager.subscribers[subscriber]:\n if \"type\" in pubsub[\"subscribers\"][subscriber]:\n if pubsub[\"subscribers\"][subscriber][\"type\"] == \"async\":\n event_manager.subscribers[subscriber][\"initialize\"]()\n else:\n threading.Thread(target=event_manager.subscribers[subscriber][\"initialize\"]).start()\n \n\n\n\n while True:\n try:\n schedule.run_pending()\n time.sleep(1)\n except KeyboardInterrupt:\n # quit\n try:\n dashboard.shutdown()\n except Exception as e:\n os._exit(0)\n sys.exit()", "id": "12736585", "language": "Python", "matching_score": 2.15966796875, "max_stars_count": 0, "path": "main.py" }, { "content": "import os\nimport json\nimport logger\n\npublishers = {}\nsubscribers = {}\n\npubsub = {}\n\nif os.path.exists(\"pubsub.json\"):\n with open(\"pubsub.json\") as subsub_json:\n pubsub = json.load(subsub_json)\n\ndef manage_event(event):\n logger.event(f\"Managing {event.event} from {event.publisher}\")\n for subscriber in pubsub[\"subscribers\"]:\n if pubsub[\"subscribers\"][subscriber][\"status\"] == \"enabled\" and subscriber in subscribers:\n subscriptions = pubsub[\"subscribers\"][subscriber][\"subscribed_events\"]\n for subscription in subscriptions:\n for key in subscription.keys():\n if key == event.event:\n for action in subscription[key]:\n if action[\"status\"] == \"enabled\":\n subscribers[subscriber][\"handle_event\"][\"function\"](event,action)\n\n\nif __name__ == \"__main__\":\n pass\n", "id": "2163923", "language": "Python", "matching_score": 1.5787826776504517, "max_stars_count": 0, "path": "event_manager.py" }, { "content": "import event_manager\nimport os\nimport json\n\n\n\n\n\ndef handle_event(event,action):\n pass\n\n\n\n\n\n\nevent_manager.subscribers[\"Filesystem\"] = {}\nevent_manager.subscribers[\"Filesystem\"][\"handle_event\"] = {'function': handle_event, 'arg': \"args\"}", "id": "7022112", "language": "Python", "matching_score": 2.082263469696045, "max_stars_count": 0, "path": "filesystem.py" }, { "content": "import event_manager\nimport os\nimport sys\nimport logger\n\n\ndef handle_event(event,action):\n if os.path.exists(action[\"script\"]):\n logger.event(f\"Executing custom script {action['script']}: {action['description']}\")\n script = open(action[\"script\"]).read()\n #sys.argv = [action[\"script\"], event, action ]\n exec(script)\n else:\n logger.error(f\"Script not found: {action['script']}\")\n\nevent_manager.subscribers[\"Script\"] = {}\nevent_manager.subscribers[\"Script\"][\"handle_event\"] = {'function': handle_event, 'arg': \"args\"}", "id": "9246150", "language": "Python", "matching_score": 0.0799640566110611, "max_stars_count": 0, "path": "script.py" }, { "content": "from tinydb import TinyDB, Query\n\n\npodcast_db = TinyDB('podcast.json', indent=4, separators=(',', ': '))\nepisode_db = TinyDB('episode.json', indent=4, separators=(',', ': '))\n\ndef add_podcast(podcast):\n global podcast_db\n podcasts = Query()\n if len(podcast_db.search(podcasts.title == podcast.title)) < 1:\n podcast_db.insert({\"title\": podcast.title,\"description\": podcast.description,\"author\": podcast.author,\"feed\": podcast.feed,\"link\": podcast.link,\"updated\":podcast.updated})\n\ndef remove_podcast(podcast):\n pass\n\ndef get_podcasts():\n return podcast_db.all()\n\ndef get_podcast(title):\n podcasts = Query()\n return podcast_db.search(podcasts.title == title)[0]\n\ndef add_episode(episode):\n global episode_db\n episodes = Query()\n if len(episode_db.search(episodes.title == episode.title)) < 1:\n episode_db.insert({\"title\": episode.title,\"podcast\":episode.podcast, \"url\": episode.url,\"episode\": episode.episode,\"duration\": episode.duration,\"description\": episode.description,\"date\": episode.date, \"listened\": episode.listened})\n\ndef set_as_listened(title):\n global episode_db\n episodes = Query()\n episode_db.update({\"listened\": True },episodes.title == title)\n\n\ndef get_episodes(podcast,filter_listened=True):\n episodes = Query()\n if filter_listened:\n return episode_db.search((episodes.podcast == podcast) & (episodes.listened != True))\n else:\n return episode_db.search(episodes.podcast == podcast)\n\ndef get_episode(title):\n episodes = Query()\n return episode_db.search(episodes.title == title)[0]\n\nif __name__ == \"__main__\":\n pass", "id": "10706658", "language": "Python", "matching_score": 1.9598685503005981, "max_stars_count": 0, "path": "database.py" }, { "content": "import feedparser\nimport json\nfrom html.parser import HTMLParser\n\nfrom requests.api import head\nimport podcastparser\nimport urllib.request\n\n\nclass Podcast:\n def __init__(self):\n self.title = \"\"\n self.description = \"\"\n self.author = \"\"\n self.feed = \"\" \n self.link = \"\"\n self.updated = \"\"\n self.episodes = []\n\nclass Episode:\n def __init__(self):\n self.podcast = \"\"\n self.listened = False\n self.title = \"\"\n self.url = \"\"\n self.episode = \"\" \n self.duration = \"\"\n self.description = \"\"\n self.date = \"\"\n\ndef filter_html(text):\n class HTMLFilter(HTMLParser):\n text = \"\"\n def handle_data(self, data):\n self.text += data\n f = HTMLFilter()\n f.feed(text)\n return f.text\n \ndef get_podcast(url):\n opener = urllib.request.build_opener()\n headers = {\"User-agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36\"}\n request = urllib.request.Request(url, headers=headers)\n parsed = podcastparser.parse(url,opener.open(request))\n podcast = Podcast()\n podcast.title = parsed[\"title\"]\n podcast.author = parsed[\"itunes_owner\"][\"name\"]\n podcast.feed = url\n podcast.link = parsed[\"link\"]\n podcast.description = parsed[\"description\"]\n podcast.updated = parsed[\"episodes\"][0][\"published\"]\n for episode in parsed[\"episodes\"]:\n ep = Episode()\n ep.title = episode[\"title\"]\n ep.podcast = parsed[\"title\"]\n ep.description = episode[\"description\"]\n ep.date = episode[\"published\"]\n if \"number\" in episode:\n ep.episode = episode[\"number\"]\n for item in episode[\"enclosures\"]:\n if \"audio\" in item[\"mime_type\"]:\n ep.url = item[\"url\"]\n podcast.episodes.append(ep)\n return podcast\n\n\ndef get_podcast_old(url):\n f = feedparser.parse(url)\n #print(json.dumps(f))\n podcast = Podcast()\n feed = f[\"feed\"]\n podcast.title = feed[\"title\"]\n podcast.author = feed[\"author\"]\n podcast.feed = feed[\"title_detail\"][\"base\"]\n podcast.link = feed[\"link\"]\n if \"subtitle\" in feed:\n podcast.description = filter_html(feed[\"subtitle\"])\n if \"summary\" in feed:\n podcast.description = feed[\"summary\"]\n if \"updated\" in f:\n podcast.updated = f[\"updated\"]\n if \"updated\" in feed:\n podcast.updated = feed[\"updated\"]\n for entry in f[\"entries\"]:\n episode = Episode()\n episode.title = entry['title']\n episode.podcast = feed[\"title\"]\n episode.description = filter_html(entry[\"summary\"])\n episode.date = entry[\"published\"]\n if \"itunes_episode\" in entry:\n episode.episode = entry[\"itunes_episode\"]\n for link in entry[\"links\"]:\n if \"type\" in link:\n if \"audio\" in link[\"type\"]:\n episode.url = link[\"href\"]\n podcast.episodes.append(episode)\n \n return podcast\n\nif __name__ == \"__main__\":\n url = \"https://feeds.buzzsprout.com/1850247.rss\"\n url = \"https://audioboom.com/channels/5060313.rss\"\n opener = urllib.request.build_opener()\n headers = {\"User-agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36\"}\n request = urllib.request.Request(url, headers=headers)\n parsed = podcastparser.parse(url,opener.open(request))\n print(json.dumps(parsed))\n\n", "id": "6753019", "language": "Python", "matching_score": 0.8577157258987427, "max_stars_count": 0, "path": "podcast.py" }, { "content": "from glob import glob\nfrom tkinter import * # Python interface to the Tk GUI toolkit\nfrom tkinter import filedialog # open file\nfrom tkinter import ttk\nfrom tkinter.messagebox import showinfo\nimport tkinter\nimport database\nimport database_tools\n\nselected_uuid = \"\"\n\nroot = Tk()\nroot.title('Database Manager')\n\n#menu itmes removed for space\n\n\nroot.grid_rowconfigure(0, weight=1)\nroot.grid_columnconfigure(0, weight=1)\n\nframe1 = Frame(root)\nframe2 = Frame(root)\nframe3 = Frame(root)\nscrollbar = Scrollbar(frame1) # put a scrolbar widget on the right side of the window\nscrollbar.pack(side = RIGHT, fill = Y)\n\n# put a treeview widget on the window with stylized column headers and use show=\"headings\" to hide the first hierarchy column\ncolumn_headers=['uuid','release_name', 'system', 'region']\nstyle = ttk.Style()\nstyle.configure(\"Treeview.Heading\", font=(\"Verdana\", 11))\ntv = ttk.Treeview(frame1, height=30, columns=column_headers, show=\"headings\", yscrollcommand = scrollbar.set) \n\ndef refresh_games(search=\"\"):\n for item in tv.get_children():\n tv.delete(item)\n for game in database.search_roms_by_name(search):\n tv.insert('', tkinter.END, values=(game['uuid'],game['release_name'],game[\"system\"],game[\"region\"]))\n\ntv.pack(side=LEFT, fill=BOTH, expand=TRUE)\nscrollbar.config(command = tv.yview)\n\n\n\ndef search_games(e):\n for item in tv.get_children():\n tv.delete(item)\n for game in database.search_roms_by_name(search_entry.get()):\n tv.insert('', tkinter.END, values=(game['uuid'],game['release_name'],game[\"system\"],game[\"region\"]))\n\ndef clear_search():\n search_entry.delete(0, END)\n refresh_games()\n\nclear_search_button = ttk.Button(frame2, text = \"Clear\", width=15,command=clear_search)\nclear_search_button.pack(side = RIGHT, anchor =NE , padx=10, pady=5)\n\nsearch_label = Label(frame2, text= \"Search:\")\nsearch_label.pack(side = LEFT, anchor = W, padx=1, pady=1)\n\nsearch_entry = Entry(frame2,width=60)\nsearch_entry.pack(side = RIGHT, anchor = W, padx=1, pady=1)\nsearch_entry.bind('<KeyRelease>', search_games)\n\n\n\ndef treeview_sort_column(tv, col, text, reverse):\n l = [(tv.set(k, col), k) for k in tv.get_children('')]\n l.sort(reverse=reverse)\n\n # rearrange items in sorted positions\n for index, (val, k) in enumerate(l):\n tv.move(k, '', index)\n\n # reverse sort next time\n tv.heading(col, text=text, command=lambda _col=col: \\\n treeview_sort_column(tv, _col, text,not reverse))\n \n\ntv.heading('uuid', text='UUID')\ntv.column('uuid', anchor='w',width=50)\n\ntv.heading('release_name', text='Release Name',command=lambda _col=\"release_name\":treeview_sort_column(tv, _col, 'Release Name', False))\ntv.column('release_name', anchor='w', width = 400)\n\ntv.heading('system', text='System',command=lambda _col=\"system\":treeview_sort_column(tv, _col,\"System\",False))\ntv.column('system', anchor='w',width=180)\n\ntv.heading('region', text='Region',command=lambda _col=\"region\":treeview_sort_column(tv, _col,\"Region\",False))\ntv.column('region', anchor='w',width = 90)\n\nfor game in database.search_roms_by_name(\"\"):\n tv.insert('', tkinter.END, values=(game['uuid'],game['release_name'],game[\"system\"],game[\"region\"]))\n\n\nrn_label = Label(frame3, text= \"Release Name\")\nrn_label.pack( anchor = SW,padx=1, pady=1)\n\nrn_entry = Entry(frame3,width=60)\nrn_entry.pack( anchor = SW,padx=1, pady=1)\n\nromn_label = Label(frame3, text= \"Rom Extensionless File Name\")\nromn_label.pack( anchor = SW,padx=1, pady=1)\n\nromn_entry = Entry(frame3,width=60)\nromn_entry.pack( anchor = SW,padx=1, pady=1)\n\nsha1_label = Label(frame3, text= \"SHA1\")\nsha1_label.pack( anchor = SW,padx=1, pady=1)\n\nsha1_entry = Entry(frame3,width=60)\nsha1_entry.pack( anchor = SW,padx=1, pady=1)\n\ndeveloper_label = Label(frame3, text= \"Developer\")\ndeveloper_label.pack( anchor = SW,padx=1, pady=1)\n\ndeveloper_entry = Entry(frame3,width=60)\ndeveloper_entry.pack( anchor = SW,padx=1, pady=1)\n\npublisher_label = Label(frame3, text= \"Publisher\")\npublisher_label.pack( anchor = SW,padx=1, pady=1)\n\npublisher_entry = Entry(frame3,width=60)\npublisher_entry.pack( anchor = SW,padx=1, pady=1)\n\ngenre_label = Label(frame3, text= \"Genre\")\ngenre_label.pack( anchor = SW,padx=1, pady=1)\n\ngenre_entry = Entry(frame3,width=60)\ngenre_entry.pack( anchor = SW,padx=1, pady=1)\n\ndate_label = Label(frame3, text= \"Date\")\ndate_label.pack( anchor = SW,padx=1, pady=1)\n\ndate_entry = Entry(frame3,width=60)\ndate_entry.pack( anchor = SW,padx=1, pady=1)\n\nreference_label = Label(frame3, text= \"Reference URL\")\nreference_label.pack( anchor = SW,padx=1, pady=1)\n\nreference_entry = Entry(frame3,width=60)\nreference_entry.pack( anchor = SW,padx=1, pady=1)\n\nmanual_label = Label(frame3, text= \"Manual URL\")\nmanual_label.pack( anchor = SW,padx=1, pady=1)\n\nmanual_entry = Entry(frame3,width=60)\nmanual_entry.pack( anchor = SW,padx=1, pady=1)\n\nregion_label = Label(frame3, text= \"Region\")\nregion_label.pack( anchor = SW,padx=1, pady=1)\n\nregion_combo = ttk.Combobox(frame3)\nregion_combo[\"values\"] = database_tools.regions\nregion_combo.current(1)\nregion_combo.pack(anchor = SW,padx=1, pady=1)\n\nsystem_label = Label(frame3, text= \"System\")\nsystem_label.pack( anchor = SW,padx=1, pady=1)\n\nsystem_combo = ttk.Combobox(frame3)\nsystem_combo[\"values\"] = database_tools.systems\nsystem_combo.current(1)\nsystem_combo.pack(anchor = SW,padx=1, pady=1)\n\n\ndescription_label = Label(frame3, text= \"Description\")\ndescription_label.pack(anchor = SW,padx=1, pady=1)\n\ndescription_text = Text(frame3,height=10, width=60)\ndescription_text.pack(anchor = SW,padx=1, pady=1)\n\n\n\n\ndef clear():\n global selected_uuid\n selected_uuid = \"\"\n rn_entry.delete(0, END)\n romn_entry.delete(0, END)\n region_combo.current(1)\n system_combo.current(1)\n developer_entry.delete(0, END)\n sha1_entry.delete(0, END)\n publisher_entry.delete(0, END)\n genre_entry.delete(0, END)\n date_entry.delete(0, END)\n reference_entry.delete(0, END)\n manual_entry.delete(0, END)\n description_text.delete(\"1.0\", END)\n\ndef delete():\n if selected_uuid != \"\":\n MsgBox = tkinter.messagebox.askquestion ('Delete Record','Are you sure you want to delete the record',icon = 'warning')\n if MsgBox == 'yes':\n database.delete_rom(selected_uuid)\n clear()\n refresh_games()\n\n\n\ndef submit():\n rom = {}\n rom['uuid'] = selected_uuid\n rom[\"release_name\"] = rn_entry.get()\n rom[\"region\"] = region_combo.get()\n rom[\"system\"] = system_combo.get()\n rom['sha1'] = sha1_entry.get().upper()\n rom[\"rom_extensionless_file_name\"] = romn_entry.get()\n rom[\"publisher\"] = publisher_entry.get()\n rom[\"date\"] = date_entry.get()\n rom[\"developer\"] = developer_entry.get()\n rom[\"genre\"] = genre_entry.get()\n rom[\"description\"] = description_text.get(\"1.0\",\"end\")\n rom[\"reference_url\"] = reference_entry.get()\n rom[\"manual_url\"] = manual_entry.get()\n if selected_uuid != \"\":\n test = database.get_rom_by_uuid(rom[\"uuid\"])\n # rom already exist so we will update\n if len(test) > 0:\n database.update_rom(rom)\n clear()\n refresh_games()\n else:\n if database.is_new_rom(rom):\n database.update_rom(rom)\n clear()\n refresh_games()\n else:\n tkinter.messagebox.showerror(title=\"Error\", message=\"Either Sha1 or Rom Extensionless File Name need to be unique\")\n \n\n\ndef duplicate():\n global selected_uuid\n if selected_uuid != \"\":\n selected_uuid = database_tools.generate_uuid()\n tkinter.messagebox.showinfo(title=\"Creating Duplicate\", message=\"You are now edidting a duplicate record \\n a new uuid has been generated \\n SHA1 AND OR Rom Name should be changed \\n submit to add new record\")\n\n\nexport_button = ttk.Button(frame3, text = \"Submit\", width=15,command=submit)\nexport_button.pack(side = LEFT, anchor = NE, padx=10, pady=5)\n\nduplicate_button = ttk.Button(frame3, text = \"Duplicate\", width=15,command=duplicate)\nduplicate_button.pack(side = LEFT, anchor = NE, padx=10, pady=5)\n\n\nclear_button = ttk.Button(frame3, text = \"Clear\", width=15,command=clear)\nclear_button.pack(side = LEFT, anchor = NE, padx=10, pady=5)\n\nclose_button = ttk.Button(frame3, text = \"Delete\", width=15,command=delete)\nclose_button.pack(side = LEFT, anchor = NE, padx=10, pady=5)\n\n\n\n\n\nframe1.grid(column=0, row=0, sticky=\"nsew\")\nframe2.grid(column=0, row=1, sticky=\"n\")\nframe3.grid(column=1, row=0, sticky=\"nsew\")\nroot.rowconfigure(0, weight=1)\nroot.columnconfigure(0, weight=1)\nroot.columnconfigure(1, weight=1)\n\ndef item_selected(event):\n global selected_uuid\n for selected_item in tv.selection():\n item = tv.item(selected_item)\n record = item['values']\n details = database.get_rom_by_uuid(record[0])\n selected_uuid = details[\"uuid\"]\n rn_entry.delete(0, END)\n rn_entry.insert(0, details[\"release_name\"])\n romn_entry.delete(0, END)\n romn_entry.insert(0, details[\"rom_extensionless_file_name\"])\n region_combo.set(details[\"region\"])\n system_combo.set(details[\"system\"])\n developer_entry.delete(0, END)\n developer_entry.insert(0, details[\"developer\"])\n sha1_entry.delete(0, END)\n sha1_entry.insert(0, details[\"sha1\"])\n publisher_entry.delete(0, END)\n publisher_entry.insert(0, details[\"publisher\"])\n genre_entry.delete(0, END)\n genre_entry.insert(0, details[\"genre\"])\n date_entry.delete(0, END)\n date_entry.insert(0, details[\"date\"])\n reference_entry.delete(0, END)\n reference_entry.insert(0, details[\"reference_url\"])\n manual_entry.delete(0, END)\n manual_entry.insert(0, details[\"manual_url\"])\n description_text.delete(\"1.0\", END)\n description_text.insert(\"1.0\", details[\"description\"])\n\n\ntv.bind('<<TreeviewSelect>>', item_selected)\n\nroot.mainloop()", "id": "3442764", "language": "Python", "matching_score": 3.22833251953125, "max_stars_count": 0, "path": "database_manager.py" }, { "content": "import json\nimport sys\nfrom tinydb import TinyDB, Query\nimport re\n\n\nclass Rom():\n def __init__(self):\n self.uuid = \"\"\n self.release_name = \"\"\n self.region = \"\"\n self.system = \"\"\n self.sha1 = \"\"\n self.rom_extensionless_file_name = \"\"\n self.developer = \"\"\n self.publisher = \"\"\n self.genre = \"\"\n self.date = \"\"\n self.description = \"\"\n self.reference_url = \"\"\n self.manual_url = \"\"\n\ndatabase = TinyDB('database.json', indent=4, separators=(',', ': '))\n\n\ndef get_rom_by_hash(hash):\n rom = Query()\n result = database.search(rom.sha1 == hash)\n if len(result) > 0:\n return result[0]\n else:\n return {}\n\ndef delete_rom(uuid):\n query = Query()\n database.remove(query.uuid == uuid)\n\ndef get_rom_by_uuid(uuid):\n rom = Query()\n result = database.search(rom.uuid == uuid)\n if len(result) > 0:\n return result[0]\n else:\n return {}\n\ndef search_roms_by_name(name):\n rom = Query()\n result = database.search(rom.rom_extensionless_file_name.matches(name, flags=re.IGNORECASE))\n return result\n\ndef is_new_rom(rom):\n query = Query()\n result = database.search((query.sha1 == rom[\"sha1\"]) & (query.release_name == rom[\"release_name\"]) & (query.rom_extensionless_file_name == rom[\"rom_extensionless_file_name\"]))\n if len(result) > 0:\n return False\n else:\n return True\n\n\ndef get_rom_by_name(name,system):\n rom = Query()\n result = database.search((rom.rom_extensionless_file_name == name) & (rom.system == system))\n if len(result) > 0:\n return result[0]\n else:\n return {}\n\ndef get_rom_by_hash_or_name(hash,name):\n rom = Query()\n result = database.search((rom.sha1 == hash) | (rom.rom_extensionless_file_name == name))\n if len(result) > 0:\n return result[0]\n else:\n return {}\n\ndef update_rom(item):\n rom = Query()\n database.upsert({\"uuid\":item['uuid'],\"release_name\": item['release_name'], \"region\":item['region'],\"system\":item['system'] ,\"sha1\":item['sha1'].upper(),\"rom_extensionless_file_name\":item['rom_extensionless_file_name'],\"developer\":item['developer'],\"publisher\":item['publisher'],\"genre\":item['genre'],\"date\":item['date'],\"description\":item[\"description\"],\"reference_url\":item[\"reference_url\"],\"manual_url\":item[\"manual_url\"]}, rom.uuid==item[\"uuid\"])\n\n\nif __name__ == \"__main__\":\n print(get_rom_by_hash(\"56FE858D1035DCE4B68520F457A0858BAE7BB16\"))", "id": "2913839", "language": "Python", "matching_score": 3.818445920944214, "max_stars_count": 0, "path": "database.py" }, { "content": "import os\nimport xmltodict\nimport json\nimport sqlite3\nfrom tinydb import TinyDB, Query\nimport pathlib\nimport uuid\nfrom database import Rom\n\n\nsystems = [\"Arcade\", \"Atari 2600\", \"Atari 5200\", \"Atari 7800\", \"Atari Lynx\", \"Bandai WonderSwan\", \"Bandai WonderSwan Color\", \"Coleco ColecoVision\", \"GCE Vectrex\", \"Intellivision\",\n \"NEC PC Engine/TurboGrafx-16\", \"NEC PC Engine CD/TurboGrafx-CD\", \"NEC PC-FX\", \"NEC SuperGrafx\", \"Nintendo Famicom Disk System\", \"Nintendo Game Boy\", \"Nintendo Game Boy Advance\",\n \"Nintendo Game Boy Color\", \"Nintendo Entertainment System\", \"Nintendo Super Nintendo Entertainment System\", \"Sega Game Gear\", \"Sega Master System\", \"Sega CD/Mega-CD\", \"Sega Genesis/Mega Drive\",\n \"Sega Saturn\", \"Sega SG-1000\", \"Sony PlayStation\", \"SNK Neo Geo Pocket\", \"SNK Neo Geo Pocket Color\", \"Magnavox Odyssey2\", \"Commodore 64\", \"Microsoft MSX\", \"Microsoft MSX2\"]\n\nregions = [\"Australia\", \"Asia\", \"Brazil\", \"Canada\", \"China\", \"Denmark\", \"Europe\", \"Finland\", \"France\", \"Germany\", \"Hong Kong\", \"Italy\", \"Japan\", \"Korea\", \"Netherlands\", \"Russia\", \"Spain\", \"Sweden\",\n \"Taiwan\", \"USA\", \"World\", \"Asia, Australia\", \"Brazil, Korea\", \"Japan, Europe\", \"Japan, Korea\", \"Japan, USA\", \"USA, Australia\", \"USA, Europe\", \"USA, Korea\", \"Europe, Australia\", \"Greece\", \"Ireland\", \"Norway\",\n \"Portugal\", \"Scandinavia\", \"UK\", \"USA, Brazil\", \"Poland\"]\n\n\ndef generate_uuid():\n return uuid.uuid4().hex\n\n\ndef replace_none(string):\n if string == None:\n return \"\"\n else:\n return string\n\n\ndef create_patch(new_database, old_database, out_patch):\n updated = TinyDB(new_database, indent=4, separators=(',', ': ')).all()\n roms = []\n index = 1\n for item in updated:\n database = TinyDB(old_database, indent=4, separators=(',', ': '))\n query = Query()\n result = database.search(query.uuid == item[\"uuid\"])\n if len(result) > 0:\n if sorted(item.items()) != sorted(result[0].items()):\n roms.append(item)\n print(\n f'{index}/{len(updated)} - added patch for existing rom: {item[\"rom_extensionless_file_name\"]}')\n else:\n print(\n f'{index}/{len(updated)} - no patch added for existing rom: {item[\"rom_extensionless_file_name\"]}')\n else:\n result = database.search((query.sha1 == item[\"sha1\"]) & (query.release_name == item[\"release_name\"]) & (\n query.rom_extensionless_file_name == item[\"rom_extensionless_file_name\"]))\n if result < 1:\n roms.append(item)\n print(\n f'{index}/{len(updated)} - added patch for new rom: {item[\"rom_extensionless_file_name\"]}')\n\n index += 1\n with open(out_patch, 'w') as outjson:\n json.dump(roms, outjson, indent=4)\n\ndef import_patch(file):\n database = TinyDB('database.json', indent=4, separators=(',', ': '))\n with open(file) as json_file:\n data = json.load(json_file)\n index = 1\n for item in data:\n query = Query()\n database.upsert({\"uuid\": item['uuid'], \"release_name\": item['release_name'], \"region\": item['region'], \"system\": item['system'], \"sha1\": item['sha1'], \"rom_extensionless_file_name\": item['rom_extensionless_file_name'], \"developer\": item['developer'], \"publisher\": item['publisher'], \"genre\": item['genre'],\n \"date\": item['date'], \"description\": item[\"description\"], \"reference_url\": item[\"reference_url\"], \"manual_url\": item[\"manual_url\"]}, (query.sha1 == item[\"sha1\"]) & (query.release_name == item[\"release_name\"]) & (query.rom_extensionless_file_name == item[\"rom_extensionless_file_name\"]))\n print(f\"{index}/{len(data)} - {item['release_name']}\")\n index += 1\n\ndef import_patch2(file):\n database = TinyDB('database.json', indent=4, separators=(',', ': '))\n with open(file) as json_file:\n data = json.load(json_file)\n index = 1\n for item in data:\n query = Query()\n result = database.search((query.release_name == item[\"release_name\"]) & (query.rom_extensionless_file_name == item[\"rom_extensionless_file_name\"]) & (query.system == item[\"system\"]))\n if len(result) < 1:\n database.upsert({\"uuid\": item['uuid'], \"release_name\": item['release_name'], \"region\": item['region'], \"system\": item['system'], \"sha1\": item['sha1'], \"rom_extensionless_file_name\": item['rom_extensionless_file_name'], \"developer\": item['developer'], \"publisher\": item['publisher'], \"genre\": item['genre'],\n \"date\": item['date'], \"description\": item[\"description\"], \"reference_url\": item[\"reference_url\"], \"manual_url\": item[\"manual_url\"]}, (query.sha1 == item[\"sha1\"]) & (query.release_name == item[\"release_name\"]) & (query.rom_extensionless_file_name == item[\"rom_extensionless_file_name\"]))\n print(f\"{index}/{len(data)} - {item['release_name']}\")\n index += 1\n else:\n print(f\"{index}/{len(data)} - skipping {item['release_name']}\")\n index += 1\n\n\n# https://github.com/OpenVGDB/OpenVGDB\ndef openvgdb_to_patch():\n\n def get_release_by_rom_id(id):\n db_connection = sqlite3.connect(db_file)\n cur = db_connection.cursor()\n cur.execute(f\"SELECT * FROM RELEASES WHERE romID=? \", (id,))\n rows = cur.fetchall()\n db_connection.close()\n if len(rows) > 0:\n return rows[0]\n else:\n return (\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\")\n\n db_file = \"openvgdb.sqlite\"\n db_connection = sqlite3.connect(db_file)\n roms = db_connection.cursor().execute(f\"SELECT * FROM ROMS\",).fetchall()\n index = 1\n modb = []\n for item in roms:\n release = get_release_by_rom_id(item[0])\n print(f\"{index}/{len(roms)} - {replace_none(release[2])}\")\n rom = {}\n rom['uuid'] = generate_uuid()\n rom[\"release_name\"] = replace_none(release[2])\n rom[\"region\"] = replace_none(release[4])\n rom[\"system\"] = replace_none(release[6])\n rom['sha1'] = replace_none(item[5])\n rom[\"rom_extensionless_file_name\"] = replace_none(item[8])\n rom[\"publisher\"] = replace_none(release[13])\n rom[\"date\"] = replace_none(release[15])\n rom[\"developer\"] = replace_none(release[12])\n rom[\"genre\"] = replace_none(release[14])\n rom[\"description\"] = replace_none(release[11])\n rom[\"reference_url\"] = replace_none(release[16])\n rom[\"manual_url\"] = \"\"\n modb.append(rom)\n index += 1\n\n with open('import/seed/openvgdb.json', 'w') as outjson:\n json.dump(modb, outjson, indent=4)\n\ndef no_into_game_gear_to_patch():\n f = open('import\\seed\\openvgdb.json')\n f = json.load(f)\n roms = []\n with open(f'dats/Sega - Game Gear (20220110-000545).dat') as fd:\n doc = xmltodict.parse(fd.read())\n for game in doc[\"datafile\"][\"game\"]:\n sha1 = \"\"\n if \"@sha1\" in game[\"rom\"]:\n sha1 = game[\"rom\"][\"@sha1\"]\n rom = vars(Rom())\n rom[\"rom_extensionless_file_name\"] = game[\"@name\"]\n rom[\"sha1\"] = sha1.upper()\n rom[\"uuid\"] = generate_uuid()\n rom[\"release_name\"] = game[\"@name\"]\n rom[\"system\"] = \"Sega Game Gear\"\n for item in f:\n if sha1.upper() == item[\"sha1\"]:\n rom = item\n roms.append(rom)\n \n with open('import/seed/no_into_game_gear.json', 'w') as outjson:\n json.dump(roms, outjson, indent=4)\n\n\n \n\n\ndef mra_to_patch():\n roms = []\n index = 1\n files = os.listdir(\"dats/mra/\")\n for mra in files:\n with open(f'dats/mra/{mra}') as fd:\n try:\n doc = xmltodict.parse(fd.read())\n name = doc['misterromdescription']['name']\n rbf = doc['misterromdescription']['rbf']\n year = \"\"\n if 'year' in doc['misterromdescription']:\n year = doc['misterromdescription']['year']\n manufacturer = \"\"\n if 'manufacturer' in doc['misterromdescription']:\n manufacturer = doc['misterromdescription']['manufacturer']\n category = \"\"\n if \"category\" in doc['misterromdescription']:\n category = doc['misterromdescription']['category']\n region = \"\"\n if 'region' in doc['misterromdescription']:\n region = doc['misterromdescription']['region']\n author = \"\"\n if \"about\" in doc['misterromdescription']:\n if doc['misterromdescription']['about'] != None:\n if \"author\" in doc['misterromdescription']['about']:\n author = doc['misterromdescription']['about']['@author']\n if 'mraauthor' in doc['misterromdescription']:\n author = doc['misterromdescription']['mraauthor']\n\n rom = {}\n rom[\"uuid\"] = generate_uuid()\n rom[\"release_name\"] = name\n rom[\"region\"] = region\n rom[\"system\"] = \"Arcade\"\n rom['sha1'] = \"\"\n rom[\"rom_extensionless_file_name\"] = pathlib.Path(mra).stem\n rom[\"publisher\"] = manufacturer\n rom[\"date\"] = year\n rom[\"developer\"] = manufacturer\n rom[\"genre\"] = category\n rom[\"description\"] = \"\"\n rom[\"reference_url\"] = \"\"\n rom[\"manual_url\"] = \"\"\n roms.append(rom)\n print(f\"{index}/{len(files)} - {pathlib.Path(mra).stem}\")\n index += 1\n except Exception as e:\n print(f\"{index}/{len(files)} - Failed to parse {mra}\")\n print(e)\n index += 1\n\n with open('import/seed/mra.josn', 'w') as outjson:\n json.dump(roms, outjson, indent=4)\n\n# https://github.com/libretro/libretro-database/tree/master/metadat\n\n\ndef libretro_dat_to_patch(filename, system, outname):\n file = open(filename, 'r')\n lines = file.readlines()\n roms = []\n rom = {}\n for line in lines:\n line = line.strip()\n if 'name \"' in line and \"size\" not in line:\n rom = {}\n rom[\"uuid\"] = generate_uuid()\n rom[\"system\"] = system\n rom[\"region\"] = \"\"\n rom[\"sha1\"] = \"\"\n release_name = line.replace('name \"', \"\").replace('\"', \"\")\n rom[\"release_name\"] = release_name\n rom[\"developer\"] = \"\"\n rom[\"genre\"] = \"\"\n rom[\"date\"] = \"\"\n rom[\"publisher\"] = \"\"\n rom[\"reference_url\"] = \"\"\n rom[\"manual_url\"] = \"\"\n if 'rom ( name' in line:\n #rom_name = line.split('\" size ')[0].replace('rom ( name \"',\"\")\n #rom[\"rom_extensionless_file_name\"] = pathlib.Path(rom_name).stem\n rom[\"rom_extensionless_file_name\"] = release_name\n if \"sha1\" in line:\n sha1 = line.split('sha1 ')[1].replace(' )', \"\").upper()\n rom[\"sha1\"] = sha1\n roms.append(rom)\n else:\n roms.append(rom)\n if 'description \"' in line:\n rom['description'] = line.replace(\n 'description \"', \"\").replace('\"', \"\")\n with open(f'import/seed/libretro_{outname}.json', 'w') as outjson:\n json.dump(roms, outjson, indent=4)\n\n\n# https://github.com/libretro/libretro-database/blob/master/metadat/fbneo-split/FBNeo_romcenter.dat\ndef neogeo_to_patch():\n roms = []\n with open('dats/FBNeo_romcenter.dat') as fd:\n doc = xmltodict.parse(fd.read())\n for game in doc['datafile'][\"game\"]:\n if \"@romof\" in game:\n if game['@romof'] == 'neogeo':\n rom = {}\n rom['uuid'] = generate_uuid()\n rom[\"release_name\"] = game['description']\n rom[\"region\"] = \"\"\n rom[\"system\"] = 'SNK Neo Geo'\n rom['sha1'] = \"\"\n rom[\"rom_extensionless_file_name\"] = game['@name']\n rom[\"publisher\"] = game['manufacturer']\n rom[\"date\"] = game['year']\n rom[\"developer\"] = game['manufacturer']\n rom[\"genre\"] = \"\"\n rom[\"description\"] = \"\"\n rom[\"reference_url\"] = \"\"\n rom[\"manual_url\"] = \"\"\n roms.append(rom)\n print(f\"{game['description']}\")\n\n for item in roms:\n if \"@cloneof\" in game:\n if item[\"rom_extensionless_file_name\"] == game[\"@cloneof\"]:\n rom = {}\n rom['uuid'] = generate_uuid()\n rom[\"release_name\"] = game['description']\n rom[\"region\"] = \"\"\n rom[\"system\"] = 'SNK Neo Geo'\n rom['sha1'] = \"\"\n rom[\"rom_extensionless_file_name\"] = game['@name']\n rom[\"publisher\"] = game['manufacturer']\n rom[\"date\"] = game['year']\n rom[\"developer\"] = game['manufacturer']\n rom[\"genre\"] = \"\"\n rom[\"description\"] = \"\"\n rom[\"reference_url\"] = \"\"\n rom[\"manual_url\"] = \"\"\n roms.append(rom)\n print(f\"{game['description']}\")\n \n\n with open('import/seed/neogeo.json', 'w') as outjson:\n json.dump(roms, outjson, indent=4)\n\n\nif __name__ == \"__main__\":\n import_patch2(\"import/seed/no_into_game_gear.json\")\n #no_into_game_gear_to_patch()", "id": "1574829", "language": "Python", "matching_score": 3.6388072967529297, "max_stars_count": 0, "path": "database_tools.py" }, { "content": "import sqlite3\n\ndb_file = \"openvgdb.sqlite\"\n\ndef get_rom_by_hash(hash):\n db_connection = sqlite3.connect(db_file)\n cur = db_connection.cursor()\n cur.execute(f\"SELECT * FROM ROMS WHERE romHashSHA1=?\", (hash,))\n rows = cur.fetchall()\n db_connection.close()\n if len(rows) > 0:\n return rows[0]\n else:\n return (\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\")\n\ndef get_release_by_rom_id(id):\n db_connection = sqlite3.connect(db_file)\n cur = db_connection.cursor()\n cur.execute(f\"SELECT * FROM RELEASES WHERE romID=? \", (id,))\n rows = cur.fetchall()\n db_connection.close()\n if len(rows) > 0:\n return rows[0]\n else:\n return (\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\")\n\ndef get_rom_by_name_or_hash(name,hash):\n db_connection = sqlite3.connect(db_file)\n cur = db_connection.cursor()\n cur.execute(f\"SELECT * FROM ROMS WHERE romHashSHA1=? OR romFileName like ?\", (hash,'%'+name+'%',))\n rows = cur.fetchall()\n db_connection.close()\n if len(rows) > 0:\n return rows[0]\n else:\n return (\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\")\n\n\n\nif __name__ == \"__main__\":\n #print(get_rom_by_hash(\"56FE858D1035DCE4B68520F457A0858BAE7BB16D\"))\n print(get_release_by_rom_id(\"1\"))", "id": "8532397", "language": "Python", "matching_score": 0.4642752408981323, "max_stars_count": 0, "path": "openvgdb.py" }, { "content": "import json\nimport paramiko\nimport shutil\nimport os\nimport config\nimport logger\nimport mister\nimport ssh\n\nSETTINGS = config.get_config()\n\nipaddress = SETTINGS['main']['mister_ip']\nusername = SETTINGS['main']['mister_username']\npassword = SETTINGS['main']['mister_password']\n\nmap_file = \"cores.json\"\ncores = {}\n\n\ndef build_system_map():\n stdout = ssh.send_command('find /media/fat -type f -name \"*.rbf\"')\n stdout.sort()\n cores = {}\n for line in stdout:\n line = line.split('/')[-1].strip()\n corename = line.replace(\".rbf\",\"\")\n if \"_\" in line:\n corename = line.split(\"_\")[0]\n \n core = {}\n core['description'] = \"{} core\".format(corename)\n core['scene'] = \"{} Scene\".format(corename)\n\n cores[corename] = core\n return cores\n\ndef get_cores():\n return cores\n\ndef read_file_map():\n global map_file\n if os.path.exists(map_file):\n with open(map_file) as map_json:\n maps = json.load(map_json)\n return maps\n else:\n return {}\n\ndef merge_maps():\n if os.path.exists(map_file):\n logger.info(\"Backing up {} ...\".format(map_file))\n shutil.copyfile(map_file, '{}.bak'.format(map_file))\n else:\n logger.info(\"No map file exists, only using map from system.\")\n system_map = build_system_map()\n file_map = read_file_map()\n merged_map = system_map\n for map in file_map:\n merged_map[map] = file_map[map]\n added_maps = []\n for map in system_map:\n if map not in file_map:\n added_maps.append(map)\n if len(added_maps) > 0:\n logger.info(\"The following cores have been added to the cores.json: {}\".format(added_maps))\n \n with open(map_file, \"w\") as write_file:\n json.dump(merged_map, write_file, indent=4)\n\ndef load_map_to_memory():\n global map_file\n global cores\n if os.path.exists(map_file):\n with open(map_file) as map_json:\n maps = json.load(map_json)\n cores = maps\n\ndef get_map(corename):\n if corename in cores:\n return cores[corename]\n else:\n return {}\n\nmerge_maps()\nload_map_to_memory() \n\nif __name__ == \"__main__\":\n pass", "id": "12674725", "language": "Python", "matching_score": 2.491809129714966, "max_stars_count": 8, "path": "cores.py" }, { "content": "import json\nimport os\nimport pathlib\nfrom time import sleep\nimport time\nimport hashlib\nimport database\nimport zipfile\nimport pathlib\nfrom os import listdir\nfrom os.path import isfile, join\nimport logger\nimport shutil\nimport event_manager\nimport threading\nimport config\n\nSETTINGS = config.get_config()\n\nmap_file = \"retroarch.json\"\ncores = {}\n\nretroatch_path = SETTINGS[\"retroarch\"][\"install_path\"]\ncores_folder = os.path.join(retroatch_path,\"cores\")\nrecents = os.path.join(retroatch_path,\"content_history.lpl\")\nlast_details = {}\n\n\nclass RetroarchGameChange():\n def __init__(self, system, core, rom,tokens):\n self.publisher = \"Retroarch\"\n self.event = \"RetroarchGameChange\"\n self.system = system\n self.core = core\n self.rom = rom\n self.tokens = tokens\n\n def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__,\n sort_keys=True, indent=4)\n\ndef get_core_list():\n cores = {}\n onlyfiles = [pathlib.Path(f).stem.replace(\"_libretro\",\"\") for f in listdir(cores_folder) if isfile(join(cores_folder, f))]\n for item in onlyfiles:\n core = {}\n cores[item] = core\n return cores\n\ndef read_file_map():\n global map_file\n if os.path.exists(map_file):\n with open(map_file) as map_json:\n maps = json.load(map_json)\n return maps\n else:\n return {}\n\ndef merge_maps():\n if os.path.exists(map_file):\n logger.info(\"Backing up {} ...\".format(map_file))\n shutil.copyfile(map_file, '{}.bak'.format(map_file))\n else:\n logger.info(\"No map file exists, only using map from system.\")\n system_map = get_core_list()\n file_map = read_file_map()\n merged_map = system_map\n for map in file_map:\n merged_map[map] = file_map[map]\n added_maps = []\n for map in system_map:\n if map not in file_map:\n added_maps.append(map)\n if len(added_maps) > 0:\n logger.info(\"The following cores have been added to the retroarch.json: {}\".format(added_maps))\n \n with open(map_file, \"w\") as write_file:\n json.dump(merged_map, write_file, indent=4)\n\ndef load_map_to_memory():\n global map_file\n global cores\n if os.path.exists(map_file):\n with open(map_file) as map_json:\n maps = json.load(map_json)\n cores = maps\n\ndef hash_zip(file):\n archive = zipfile.ZipFile(file)\n blocksize = 1024**2 #1M chunks\n for fname in archive.namelist():\n entry = archive.open(fname)\n sha1 = hashlib.sha1()\n while True:\n block = entry.read(blocksize)\n if not block:\n break\n sha1.update(block)\n return sha1.hexdigest().upper()\ndef hash_file(file):\n buf_size = 65536\n sha1 = hashlib.sha1()\n with open(file, 'rb') as f:\n while True:\n data = f.read(buf_size)\n if not data:\n break\n sha1.update(data)\n return format(sha1.hexdigest().upper())\n\ndef initialize():\n logger.info(\"Initializing Retroarch publisher ...\")\n merge_maps()\n load_map_to_memory()\n\ndef publish():\n global last_details\n if os.path.exists(recents):\n access = time.time() - os.path.getmtime(recents)\n if access < int( SETTINGS[\"retroarch\"][\"refresh_rate\"]):\n with open(recents) as recents_json:\n details = json.load(recents_json)\n if \"items\" in details:\n if len(details[\"items\"]) > 0:\n if details != last_details:\n file = details[\"items\"][0][\"path\"]\n core = pathlib.Path(details[\"items\"][0][\"core_path\"]).stem.replace(\"_libretro\",\"\")\n game = pathlib.Path(file).stem\n system = core\n try:\n if \"system\" in cores[core]:\n system = cores[core][\"system\"]\n except Exception as e:\n pass\n hash = \"\"\n if zipfile.is_zipfile(file):\n hash = hash_zip(file)\n else:\n hash = hash_file(file)\n rom = {}\n if hash != \"\":\n rom = database.get_rom_by_hash(hash)\n if len(rom) != 0:\n logger.info(f\"Hash: {hash} matched in database\")\n else:\n logger.info(f\"Hash: {hash} not matched in database\")\n if len(rom) == 0:\n rom = database.get_rom_by_name(game,system)\n if \"rom_extensionless_file_name\" in rom:\n logger.info(f\"Rom name match in database for Game: {game}, System: {system}\")\n system = rom[\"system\"]\n else:\n logger.info(f\"Game {game} not found in database, defaulting to game\")\n\n rom = vars(database.Rom())\n rom[\"release_name\"] = game\n rom[\"rom_extensionless_file_name\"] = game\n rom[\"system\"] = system\n\n\n try:\n tokens = cores[core]\n tokens[\"core\"] = core\n tokens.update(rom)\n event = RetroarchGameChange(system,core,rom,tokens)\n threading.Thread(target=event_manager.manage_event, args=[event]).start()\n except Exception as e:\n logger.error(f\"Unabled to publish MisterGameChange event\")\n last_details = details\n\nevent_manager.publishers[\"Retroarch\"] = {}\nevent_manager.publishers[\"Retroarch\"][\"initialize\"] = lambda:initialize()\nevent_manager.publishers[\"Retroarch\"][\"publish\"] = lambda:publish()\n\n", "id": "5611777", "language": "Python", "matching_score": 4.708185195922852, "max_stars_count": 0, "path": "retroarch.py" }, { "content": "from lib2to3.pgen2 import token\nimport pathlib\nimport config\nimport sys\nimport os\nimport json\nimport shutil\nimport logger\nimport ssh\nimport threading\nimport event_manager\nimport database\nfrom time import sleep\n\nSETTINGS = config.get_config()\nRECENTS_FOLDER = '/media/{}/config/'.format(SETTINGS['mister']['core_storage'])\n\nconnected = False\nssh_session = None\nretries = 0\nmax_retries = 3\n\nlast_game = \"\"\nlast_core = \"\"\n\nmap_file = \"mister.json\"\ncores = {}\n\n\nclass MisterCoreChange():\n def __init__(self, system, core,tokens):\n self.publisher = \"MiSTer\"\n self.event = \"MisterCoreChange\"\n self.system = system\n self.core = core\n self.tokens = tokens\n \n\n def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__,\n sort_keys=True, indent=4)\n\n\nclass MisterGameChange():\n def __init__(self, system, core, rom,tokens):\n self.publisher = \"MiSTer\"\n self.event = \"MisterGameChange\"\n self.system = system\n self.core = core\n self.rom = rom\n self.tokens = tokens\n\n def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__,\n sort_keys=True, indent=4)\n\ndef initialize():\n global connected\n global ssh_session\n global retries\n if retries < max_retries:\n try:\n ipaddress = SETTINGS['mister']['ipaddress']\n username = SETTINGS['mister']['username']\n password = SETTINGS['<PASSWORD>']['password']\n port = SETTINGS['mister']['port']\n ssh_session = ssh.SshConnection(ipaddress,port,username,password)\n ssh_session.connect()\n logger.info(\"Connected to MiSTer\")\n merge_maps()\n load_map_to_memory()\n connected = True\n except Exception as e:\n logger.error(f\"Failed to connect to MiSTer - {e}\")\n retries += 1\n initialize()\n\ndef reconnect():\n global retries\n retries = 0\n initialize()\n\ndef send_command(command):\n try:\n return ssh_session.send_command(command)\n except Exception as e:\n if \"reconnect\" in SETTINGS['mister']:\n if SETTINGS['mister'][\"reconnect\"]:\n reconnect()\n else:\n return \"\"\n else:\n return \"\"\n\ndef build_system_map():\n stdout = send_command('find /media/fat -type f -name \"*.rbf\"')\n stdout.sort()\n cores = {}\n for line in stdout:\n line = line.split('/')[-1].strip()\n corename = line.replace(\".rbf\",\"\")\n if \"_\" in line:\n corename = line.split(\"_\")[0]\n \n core = {}\n \n\n cores[corename] = core\n return cores\n\ndef get_cores():\n return cores\n\ndef read_file_map():\n global map_file\n if os.path.exists(map_file):\n with open(map_file) as map_json:\n maps = json.load(map_json)\n return maps\n else:\n return {}\n\ndef merge_maps():\n if os.path.exists(map_file):\n logger.info(\"Backing up {} ...\".format(map_file))\n shutil.copyfile(map_file, '{}.bak'.format(map_file))\n else:\n logger.info(\"No map file exists, only using map from system.\")\n system_map = build_system_map()\n file_map = read_file_map()\n merged_map = system_map\n for map in file_map:\n merged_map[map] = file_map[map]\n added_maps = []\n for map in system_map:\n if map not in file_map:\n added_maps.append(map)\n if len(added_maps) > 0:\n logger.info(\"The following cores have been added to the mister.json: {}\".format(added_maps))\n \n with open(map_file, \"w\") as write_file:\n json.dump(merged_map, write_file, indent=4)\n\ndef load_map_to_memory():\n global map_file\n global cores\n if os.path.exists(map_file):\n with open(map_file) as map_json:\n maps = json.load(map_json)\n cores = maps\n\ndef get_map(corename):\n if corename in cores:\n return cores[corename]\n else:\n return {}\n\n\ndef get_running_core():\n try:\n stdout = send_command(\"ps aux | grep [r]bf\")\n current_core = 'menu'\n for line in stdout:\n if '.rbf' in line:\n for part in line.split(\" \"):\n if \".rbf\" in part:\n line = part\n core_name = line.split('/')[-1].replace('.rbf','').strip()\n if \"_\" in core_name:\n base_name = core_name.split('_')[0]\n current_core = base_name\n\n else:\n core_name = core_name.replace('.rbf','').strip()\n current_core = core_name\n return current_core\n except Exception as e:\n logger.error(repr(e))\n return \"\"\n\ndef get_file_hash(filepath, filename):\n stdout = \"\"\n if \".zip\" in filepath:\n stdout = send_command(f'unzip -p \"../media/{filepath}\" \"{filename}\" | sha1sum')\n else:\n stdout = send_command(f'sha1sum \"../media/{filepath}/{filename}\"')\n if len(stdout) > 0:\n return stdout[0].split()[0].upper()\n return \"\"\n\ndef get_last_game(core):\n def ignore(line):\n ignore = [\"cores_recent.cfg\",\"_shmask\",\"_scaler\",\"_gamma\"]\n for item in ignore:\n if item in line:\n return True\n return False\n last_game = \"\",\"\",\"\"\n try:\n processes = \"\"\n processes = send_command(\"ps aux | grep [r]bf\")\n\n for line in processes:\n if \".mra\" in line:\n last_game = line.split('/')[-1].replace('.mra','').strip()\n filename = line.split('/')[-1].strip()\n # adding ../ to path to match the format of the console recents file. Should probbaly not do this\n filepath = line.split(' /media/')[-1].strip().replace(\"/\"+filename,\"\")\n return last_game,filepath,filename\n else:\n timeframe = 0.15 * int(SETTINGS['mister']['refresh_rate'])\n last_changed = send_command(f'find /media/fat/config/ -mmin -{timeframe}')\n if len(last_changed) > 0:\n for line in last_changed:\n if not ignore(line):\n recent = send_command('strings {}'.format(line.strip()))\n if len(recent) > 0:\n if \".ini\" not in recent[1]:\n return pathlib.Path(recent[1].strip()).stem,recent[0].strip()[3:],recent[1].strip()\n return last_game\n\n except Exception as e:\n logger.error(repr(e))\n return \"\",\"\",\"\"\n\ndef publish():\n if connected:\n global last_core\n global last_game\n\n core = get_running_core()\n game,filepath,filename = get_last_game(core)\n system = core\n try:\n if \"system\" in cores[core]:\n system = cores[core][\"system\"]\n except Exception as e:\n pass\n\n if core != \"\" and core != last_core:\n \n last_core = core\n if \"system\" in cores[core]:\n system = cores[core][\"system\"]\n tokens = cores[core]\n tokens[\"core\"] = core\n event = MisterCoreChange(system,core,tokens)\n threading.Thread(target=event_manager.manage_event, args=[event]).start()\n\n \n if game != \"\" and game != last_game:\n hash = get_file_hash(filepath,filename)\n rom = {}\n if hash != \"\":\n rom = database.get_rom_by_hash(hash)\n if len(rom) != 0:\n logger.info(f\"Hash: {hash} matched in database\")\n system = rom[\"system\"]\n else:\n logger.info(f\"Hash: {hash} not matched in database\")\n if len(rom) == 0:\n rom = database.get_rom_by_name(game,system)\n if \"rom_extensionless_file_name\" in rom:\n logger.info(f\"Rom name match in database for Game: {game}, System: {system}\")\n system = rom[\"system\"]\n else:\n logger.info(f\"Game {game} not found in database, defaulting to game\")\n\n rom = vars(database.Rom())\n rom[\"release_name\"] = game\n rom[\"rom_extensionless_file_name\"] = game\n rom[\"system\"] = system\n\n\n try:\n tokens = cores[core]\n tokens[\"core\"] = core\n tokens.update(rom)\n event = MisterGameChange(system,core,rom,tokens)\n last_game = game\n threading.Thread(target=event_manager.manage_event, args=[event]).start()\n except Exception as e:\n logger.error(f\"Unable to publish MisterGameChange event\")\n \nevent_manager.publishers[\"MiSTer\"] = {}\nevent_manager.publishers[\"MiSTer\"][\"initialize\"] = lambda:initialize()\nevent_manager.publishers[\"MiSTer\"][\"publish\"] = lambda:publish()\n\n\n\nif __name__ == \"__main__\":\n initialize()\n while True:\n publish()\n sleep(1)\n", "id": "11389654", "language": "Python", "matching_score": 4.965366840362549, "max_stars_count": 0, "path": "mister.py" }, { "content": "import logger\nimport config\nimport cores\nimport ssh\nimport os\n\nSETTINGS = config.get_config()\nRECENTS_FOLDER = '/media/{}/config/'.format(SETTINGS['core_storage'])\n\ndef get_running_core():\n try:\n stdout = ssh.send_command(\"ps aux | grep [r]bf\")\n current_core = 'menu'\n for line in stdout:\n if '.rbf' in line:\n #logger.info(line.strip())\n for part in line.split(\" \"):\n if \".rbf\" in part:\n line = part\n core_name = line.split('/')[-1].replace('.rbf','').strip()\n if \"_\" in core_name:\n base_name = core_name.split('_')[0]\n current_core = base_name\n\n else:\n core_name = core_name.replace('.rbf','').strip()\n current_core = core_name\n return current_core\n except Exception as e:\n logger.error(repr(e))\n return \"\"\n\n\ndef get_last_game(core):\n ignore = [\"cores_recent.cfg\"]\n last_game = \"\"\n try:\n processes = ssh.send_command(\"ps aux | grep [r]bf\")\n for line in processes:\n if \".mra\" in line:\n last_game = line.split('/')[-1].replace('.mra','').strip()\n return last_game\n else:\n timeframe = 0.15 * int(SETTINGS['refresh_rate'])\n last_changed = ssh.send_command(f'find /media/fat/config/ -mmin -{timeframe}')\n if len(last_changed) > 0:\n for line in last_changed:\n if \"cores_recent.cfg\" not in line:\n recent = ssh.send_command('strings {}'.format(line.strip()))\n if len(recent) > 0:\n return os.path.splitext(recent[2].strip().split('/')[-1])[0]\n return last_game\n\n except Exception as e:\n logger.error(repr(e))\n return \"\"\n\n\n\nif __name__ == \"__main__\":\n pass", "id": "6205773", "language": "Python", "matching_score": 1.0975773334503174, "max_stars_count": 0, "path": "mister.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\nimport os\nimport shutil\n\njson_file = \"splitcores.json\"\n\nrbf_files = []\n\nrbf_location = \"/media/fat\"\n\n# fid all rbf files\nprint(\"Scanning {} for core files ...\".format(rbf_location))\nfor root,d_names,f_names in os.walk(rbf_location):\n for filename in f_names:\n if \".rbf\" in filename:\n rbf_files.append(os.path.join(root, filename))\n\ndef copy_core(src,dest):\n shutil.copy(src,dest)\n\ndef get_datestamp(name):\n name = name.split(\"/\")[-1]\n if \"_\" in name:\n return name.split(\"_\")[1].replace(\".rbf\",\"\")\n else:\n return \"\"\n\n\ndef match_core(core):\n matches = []\n for syscore in rbf_files:\n filename = syscore.split(\"/\")[-1]\n corename = filename.replace(\".rbf\",\"\")\n if \"_\" in filename:\n corename = filename.split(\"_\")[0]\n datestamp = filename.split(\"_\")[1]\n if corename == core:\n matches.append(syscore)\n return matches\n\n\nif os.path.exists(json_file):\n with open(json_file) as splitcore_json:\n cores = json.load(splitcore_json)\n for core in cores:\n print(\"Cleaning up old fores for {}.\".format(core))\n for copy in cores[core]:\n matches = match_core(copy)\n for match in matches:\n print(\"Removing old core {}.\".format(match))\n os.remove(match)\n print(\"Done.\")\n matches = match_core(core)\n print(\"Splitting cores ...\")\n for match in matches:\n for newcore in cores[core]:\n rbf_path = os.path.dirname(match)\n new_rbf_name = \"{}_{}.rbf\".format(newcore,get_datestamp(match))\n new_rbf = os.path.join(rbf_path,new_rbf_name)\n try:\n print(\"copying {} to new {}\".format(match,new_rbf))\n copy_core(match, new_rbf)\n except Exception as e:\n print(repr(e))\n", "id": "8567819", "language": "Python", "matching_score": 0.7911738753318787, "max_stars_count": 8, "path": "splitcores.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport shutil\n\ntry:\n shutil.rmtree('dist')\nexcept Exception as e:\n pass\nos.system('pyinstaller -i icon.ico -n test --clean -y --add-data \"templates;templates\" --add-data \"cores.json;.\" --add-data \"config.json;.\" --add-data \"static;static\" --add-data \"openvgdb.sqlite;.\" test.py')\nshutil.make_archive(os.path.join(\"release\",\"test\"), 'zip', \"dist/test\")\nshutil.rmtree('dist')\nos.system('pyinstaller -i icon.ico -n MiSTerDash --clean -y -F --add-data \"templates;templates\" --add-data \"static;static\" main.py')\nshutil.copyfile('cores.json', 'dist/cores.json')\nshutil.copyfile('config.json', 'dist/config.json')\nshutil.copyfile('openvgdb.sqlite', 'dist/openvgdb.sqlite')\nshutil.make_archive(os.path.join(\"release\",\"MiSTerDash\"), 'zip', \"dist\")\n", "id": "722707", "language": "Python", "matching_score": 2.1713523864746094, "max_stars_count": 0, "path": "build.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport shutil\n\ntry:\n shutil.rmtree('dist')\nexcept Exception as e:\n pass\nos.system('pyinstaller -i icon.ico -n DatabaseManager --clean -y -F --windowed database_manager.py')\nos.system('pyinstaller -i icon.ico -n DatabaseTool --clean -y -F database_tool.py')\nshutil.copyfile('Create Patch.bat', '{0}/Create Patch.bat'.format(\"dist\"))\nshutil.copyfile('Import Patch.bat', '{0}/Import Patch.bat'.format(\"dist\"))\nshutil.copyfile('splitcores.json', '{0}/splitcores.json'.format(\"dist\"))\nshutil.copyfile('splitcores.py', '{0}/splitcores.py'.format(\"dist\"))\nshutil.copyfile('splitcores.sh', '{0}/splitcores.sh'.format(\"dist\"))\nshutil.copyfile('update_and_copy.sh', '{0}/update_and_copy.sh'.format(\"dist\"))\nshutil.make_archive(os.path.join(\"release\",\"extra\"), 'zip', \"dist\")", "id": "9602921", "language": "Python", "matching_score": 1.5737583637237549, "max_stars_count": 0, "path": "build_extra.py" }, { "content": "import database_tools\nfrom os.path import exists\nimport sys\n\ndef create_patch():\n print(\"-------------------------------------\")\n print(\"Creating patch ...\")\n print(\"This process wil create a database.patch file by comparing current database to an older version\")\n input(\"Press enter to continue\")\n print(\"Locating database.json\")\n if not exists(\"database.json\"):\n print(\"database.json not found.\")\n input(\"Press enter to quit\")\n sys.exit()\n print(\"database.json found\")\n print(\"Locating database.old\")\n if not exists(\"database.old\"):\n print(\"database.old not found.\")\n input(\"Press enter to quit\")\n sys.exit()\n print(\"database.old found\")\n print(\"\")\n print(\"-------------------------------------\")\n database_tools.create_patch(\"database.json\",\"database.old\",\"database.patch\")\n\n\ndef import_patch():\n print(\"-------------------------------------\")\n print(\"Importing database.patch ...\")\n if not exists(\"database.patch\"):\n print(\"database.patch not found.\")\n input(\"Press enter to quit\")\n sys.exit()\n print(\"-------------------------------------\")\n print(\"\")\n database_tools.import_patch(\"database.patch\")\n\nif len(sys.argv) > 1:\n if sys.argv[1] == \"-i\" or sys.argv[1] == \"-import\":\n import_patch()\n elif sys.argv[1] == \"-c\" or sys.argv[1] == \"-createpatch\":\n create_patch()\n else:\n print(\"Use -c or -createpatch to create a database.patch file\")\n print(\"Use -i or -import to import a database.patch file\")\nelse:\n print(\"Use -c or -createpatch to create a database.patch file\")\n print(\"Use -i or -import to import a database.patch file\")", "id": "2456994", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "database_tool.py" }, { "content": "import pyttsx3\nimport configuration\n\n\nconfig = configuration.get_config()\n\nengine = pyttsx3.init('sapi5')\n\n# Set Rate\nengine.setProperty('rate', 190)\n\n# Set Volume\nengine.setProperty('volume', 1.0)\n\n# Set Voice (Female)\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[config['voice']].id)\n\n\ndef speak_text(text):\n engine.say(text)\n engine.runAndWait()\n\n\n\nif __name__ == \"__main__\":\n pass", "id": "1703837", "language": "Python", "matching_score": 2.2523441314697266, "max_stars_count": 0, "path": "speak.py" }, { "content": "from speak import speak_text\n\nimport configuration\n\nconfig = configuration.get_config()\n\nif __name__ == \"__main__\":\n text = f'Hello {config[\"owner\"]}'\n speak_text(text)", "id": "10095820", "language": "Python", "matching_score": 1.1805394887924194, "max_stars_count": 0, "path": "test_speech.py" }, { "content": "import json\nfrom speak import speak_text\nfrom listen import listen_to_microphone\nimport random\nimport configuration\nfrom processor import process_command\nfrom time import sleep\n\nconfig = configuration.get_config()\n\ndef replace_token(string):\n return string.replace(\"{self}\",config['self']).replace(\"{owner}\",config[\"owner\"])\n\n\n\nwhile True:\n activate = listen_to_microphone()\n print(activate)\n for phrase in config[\"activation_phrases\"]:\n if activate != \"\":\n if replace_token(phrase) in activate:\n response = replace_token(random.choice(config[\"activation_responses\"]))\n speak_text(f'{replace_token(response)}')\n command = listen_to_microphone()\n if command != \"None\":\n process_command(command)\n\n\n\n ", "id": "12201819", "language": "Python", "matching_score": 1.4269554615020752, "max_stars_count": 0, "path": "main.py" }, { "content": "import requests\nimport json\nfrom speak import speak_text\n\n\n\n\ndef get_joke_of_day():\n response = requests.get(\"https://api.jokes.one/jod\")\n if response.status_code != 200:\n return json.loads(response.text)[\"error\"][\"message\"]\n else:\n return(json.loads(response.text)[\"contents\"][\"jokes\"][0][\"joke\"][\"text\"])\n\ndef get_norris_joke():\n response = requests.get(\"https://api.chucknorris.io/jokes/random\")\n return json.loads(response.text)[\"value\"]\n\ndef process_command(command):\n if \"norris\" in command or \"chuck norris\" in command or \"chuck\" in command:\n joke = get_norris_joke()\n speak_text(joke)\n if \"of the day\" in command:\n joke = get_joke_of_day()\n speak_text(joke)\n\n\nif __name__ == \"__main__\":\n print(get_norris_joke())", "id": "11932498", "language": "Python", "matching_score": 1.0441186428070068, "max_stars_count": 0, "path": "jokes.py" }, { "content": "import jokes\nimport configuration\nimport sys\n\nactions = configuration.get_actions()\nsubscriptions = configuration.get_config()[\"subscriptions\"]\naction_threashold = .8\n\ndef process_command(command):\n print(command)\n for speech in actions:\n tokenized = speech.split()\n total = 0\n for token in tokenized:\n if token in command:\n total += 1\n positivity = total/(len(tokenized))\n if positivity >= action_threashold:\n print(f'Action match positivity: {positivity} for \"{command}\"')\n action = actions[speech][\"action\"]\n if action in subscriptions:\n if action == \"joke\":\n jokes.process_command(command)\n if action == \"stop\":\n sys.exit()\n break\n\nif __name__ == \"__main__\":\n for speech in actions:\n print(speech)", "id": "9938380", "language": "Python", "matching_score": 1.3788055181503296, "max_stars_count": 0, "path": "processor.py" }, { "content": "import json\n\n\n\nconfig_json = open(\"config.json\")\nconfig = json.load(config_json)\n\nactions_json = open(\"actions.json\")\nactions = json.load(actions_json)\n\ndef get_actions():\n global actions\n return actions\n\ndef get_config():\n global config\n return config", "id": "11069849", "language": "Python", "matching_score": 0.7868920564651489, "max_stars_count": 0, "path": "configuration.py" }, { "content": "__author__ = 'christopher'\n\nimport ConfigParser\nimport runtime\nimport logger\n\n\n\n\ndef create_config():\n config = ConfigParser.RawConfigParser()\n config.add_section('Configuration')\n config.set('Configuration', 'host', 'localhost')\n config.set('Configuration', 'port', '81')\n config.set('Configuration', 'password', '<PASSWORD>')\n config.set('Configuration', 'motd', '')\n config.set('Configuration', 'gui', 'false')\n config.set('Configuration', 'server', 'true')\n config.set('Configuration', 'verbose', 'false')\n config.set('Configuration', 'debug', 'false')\n config.set('Configuration', 'drop_claim_radius', '20')\n with open('config.cfg', 'wb') as configfile:\n config.write(configfile)\n\ndef read_config():\n config = ConfigParser.RawConfigParser()\n config.read('config.cfg')\n runtime.host = config.get('Configuration', 'host')\n runtime.port = config.get('Configuration', 'port')\n runtime.password = config.get('Configuration', 'password')\n runtime.motd = config.get('Configuration', 'motd')\n runtime.gui = config.getboolean('Configuration', 'gui')\n runtime.server = config.getboolean('Configuration', 'server')\n runtime.verbose = config.getboolean('Configuration', 'verbose')\n runtime.debug = config.getboolean('Configuration', 'debug')\n runtime.drop_claim_radius = config.get('Configuration', 'drop_claim_radius')\n\ndef save_config():\n config = ConfigParser.RawConfigParser()\n config.add_section('Configuration')\n config.set('Configuration', 'host',runtime.host)\n config.set('Configuration', 'port', runtime.port)\n config.set('Configuration', 'password', runtime.password)\n config.set('Configuration', 'motd', runtime.motd)\n config.set('Configuration', 'gui', runtime.gui)\n config.set('Configuration', 'server', runtime.server)\n config.set('Configuration', 'verbose', runtime.verbose)\n config.set('Configuration', 'debug', runtime.debug)\n config.set('Configuration', 'drop_claim_radius', runtime.drop_claim_radius)\n with open('config.cfg', 'wb') as configfile:\n config.write(configfile)\n\ntry:\n with open('config.cfg') as file:\n read_config()\nexcept IOError as e:\n logger.log_debug(\"No config file, so one is being created\")\n logger.log_debug(\"Pleas edit the config file and try again\")\n create_config()", "id": "1877540", "language": "Python", "matching_score": 1.8254315853118896, "max_stars_count": 0, "path": "config.py" }, { "content": "__author__ = 'christopher'\n\nimport runtime\nimport event\n\n\n\n\ndef add_log(info):\n print(info)\n if runtime.gui:\n log_event = []\n log_event.append(\"Log\")\n log_event.append(info)\n event.gui_event.insert(0,log_event)\n\n\ndef log_verbose(info):\n if runtime.verbose:\n add_log(info)\n\ndef log_debug(info):\n if runtime.debug:\n add_log(info)\n\ndef log(info):\n add_log(info)\n\n\n\n", "id": "6907024", "language": "Python", "matching_score": 2.163743734359741, "max_stars_count": 0, "path": "logger.py" }, { "content": "__author__ = 'christopher'\n\nglobal gui_event\n\ngui_event = []", "id": "3962281", "language": "Python", "matching_score": 0.09126893430948257, "max_stars_count": 0, "path": "event.py" }, { "content": "__author__ = 'christopher'\n\ndef is_coor_formatted(coor):\n if \"/\" in coor:\n return True\n else:\n return False\n\ndef format_coor(coor): #used to go from 100 64 100 format to 100 S / 100 N format\n x = coor.split()[0]\n y = coor.split()[2]\n z = coor.split()[1]\n\n if float(x) > 0:\n x = x.replace(\"-\", \"\") + \" E\"\n else:\n x = x.replace(\"-\", \"\") + \" W\"\n if float(y) > 0:\n y = y.replace(\"-\", \"\") + \" N\"\n else:\n y = y.replace(\"-\", \"\") + \" S\"\n\n formatted = y + \" / \" + x\n return formatted\n\ndef convert_coor(coor): #used to go from 100 S / 100 E format to 100 54 100 format\n split_coor = coor.split()\n x = split_coor[3]\n y = split_coor[0]\n\n if split_coor[1] == \"S\":\n y = \"-\" + y\n\n if split_coor[4] == \"W\":\n x = \"-\" + x\n converted = x + \" \" + \"64\" + \" \" + y\n return converted\n\n\n\n\ndef in_radius(obj1,obj2,radius):\n if is_coor_formatted(obj1):\n obj1 = convert_coor(obj1)\n if is_coor_formatted(obj2):\n obj2 = convert_coor(obj2)\n obj1x = obj1.split(\" \")[0]\n obj1y = obj1.split(\" \")[2]\n obj2x = obj2.split(\" \")[0]\n obj2y = obj2.split(\" \")[2]\n if float(obj2x) - int(radius) <= float(obj1x) <= float(obj2x) + int(radius) and float(obj2y) - int(radius) <= float(obj1y) <= float(obj2y)+ int(radius):\n return True\n else:\n return False\n\n", "id": "665760", "language": "Python", "matching_score": 0.0387716181576252, "max_stars_count": 0, "path": "util.py" }, { "content": "__author__ = 'christopher'\n\n\nglobal player_array\nglobal last_airdrop\n\nplayer_array = []\nonline_players = []\nlast_airdrop = \"\"\nairdrops= []\nimport logger\n\nclass player_object(object):\n def __init__(self):\n self.name = \"\"\n self.entityid = 0\n self.steamid = 0\n self.ip= \"\"\n self.lastlogon = \"\"\n self.home = \"\"\n self.warned = 0\n self.location = \"\"\n self.home = \"\"\n self.health = 0\n self.deaths = 0\n self.zombies = 0\n self.players = 0\n self.score = 0\n self.ping = 0\n self.position = \"\"\n self.pois = []\n self.tprequests = []\n self.admin = False\n self.adminlevel = 0\n self.mod = False\n self.bag = \"\"\n\n\ndef player_exists(steamid):\n for person in player_array:\n if str(person.steamid) == steamid:\n return True\n return False\n\ndef player_exists_from_name(name):\n for person in player_array:\n if person.name == name:\n return True\n return False\n\ndef poi_exists(player,poiname):\n person = get_player_from_name(player)\n for poi in person.pois:\n if poiname == poi.split(\",\")[0]:\n return True\n return False\n\n\ndef add_online_player(player):\n if player not in online_players:\n online_players.append(player)\n\ndef get_online_players():\n player_list = []\n for player in online_players:\n player_list.append(player)\n return player_list\n\ndef remove_online_player(player):\n online_players.remove(player)\n\ndef get_player_from_steamid(steamid):\n for person in player_array:\n if steamid == str(person.steamid):\n return person\n\ndef get_player_from_name(name):\n for person in player_array:\n if name == person.name:\n return person\n\ndef get_poi(player,poiname):\n person = get_player_from_name(player)\n for poi in person.pois:\n if poiname == poi.split(\",\")[0]:\n poilocation = poi.split(\",\")[1]\n return poilocation\n return \"\"\n\ndef add_player(name,entityid,steamid,ip,):\n logger.log(\"memorydb adding new player\")\n player = player_object()\n player.name = name\n player.entityid = entityid\n player.steamid = steamid\n player.ip = ip\n player_array.append(player)\n\ndef update_player(pl):\n player = get_player_from_steamid(pl.steamid)\n player.location = pl.position\n player.name = pl.name\n player.health = pl.health\n player.deaths = pl.deaths\n player.zombies = pl.zombies\n player.players= pl.players\n player.score = pl.score\n player.ping = pl.ping\n\n\n\ndef add_poi(player,name):\n person = get_player_from_name(player)\n if poi_exists(player,name):\n for poi in person.pois:\n if name == poi.split(\",\")[0]:\n person.pois.remove(poi)\n person.pois.append(name + \",\" + person.location)\n else:\n person.pois.append(name + \",\" + person.location)\n\ndef remove_poi(player,name):\n for person in player_array:\n if player == person.name:\n for poi in person.pois:\n if name == poi.split(\",\")[0]:\n person.pois.remove(poi)\n\n\ndef remove_all_pois(player):\n person = get_player_from_name(player)\n del person.pois[:]\n\ndef set_player_home(player):\n person = get_player_from_name(player)\n person.home = person.location", "id": "11919100", "language": "Python", "matching_score": 2.005559206008911, "max_stars_count": 0, "path": "memorydb.py" }, { "content": "import sqlite3\nimport os\n\nimport memorydb\nimport logger\n\n\ndb_filename = 'players.db'\ndb_exists = os.path.exists(db_filename)\n\ndef log(info):\n print info\n\n\n\ndef create_database():\n try:\n conn = sqlite3.connect(db_filename)\n with conn:\n cur = conn.cursor()\n cur.execute(\"CREATE TABLE Players(Name TEXT,Steamid INT PRIMARY KEY,Entityid INT,IP TEXT,Home TEXT,Zombies INT,Players INT,Score INT,Bag TEXT)\")\n\n cur.execute(\"CREATE TABLE Poi(Player TEXT,Steamid INT,Name TEXT,Location TEXT,Uniqueid TEXT PRIMARY KEY)\")\n\n cur.execute(\"CREATE TABLE Airdrop(Airdrop TEXT PRIMARY KEY)\")\n conn.commit()\n except sqlite3.Error as e:\n if conn:\n conn.rollback()\n logger.log(\"Failed to create database \" + e.message)\n\n finally:\n if conn:\n conn.close()\n logger.log(\"Creating database\")\n\n\ndef load_players():\n try:\n conn = sqlite3.connect(db_filename)\n with conn:\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Players\")\n rows = cur.fetchall()\n logger.log(\"Loading players...\")\n for row in rows:\n player = memorydb.player_object()\n player.name = row[0]\n player.steamid = row[1]\n player.entityid = row[2]\n player.ip = row[3]\n player.home = str(row[4])\n player.zombies = row[5]\n player.players = row[6]\n player.score = row[7]\n player.bag = str(row[8])\n cur.execute(\"SELECT * FROM Poi WHERE Steamid = \" + str(player.steamid))\n pois = cur.fetchall()\n for poi in pois:\n player.pois.append(str(poi[2])+\",\"+str(poi[3]))\n memorydb.player_array.append(player)\n logger.log(\"---------------------\")\n logger.log(\"Name: \" + player.name)\n logger.log(\"Steamid: \" + str(player.steamid))\n logger.log(\"IP: \" + str(player.ip))\n logger.log(\" \")\n\n except sqlite3.Error as e:\n if conn:\n conn.rollback()\n logger(e)\n\n finally:\n if conn:\n conn.close()\n\ndef load_airdrops():\n try:\n conn = sqlite3.connect(db_filename)\n with conn:\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Airdrop\")\n rows = cur.fetchall()\n if len(rows)>0:\n logger.log(\"Loading airdrops...\")\n for row in rows:\n drop = str(row[0])\n memorydb.airdrops.append(drop)\n logger.log_verbose(drop)\n\n except sqlite3.Error as e:\n if conn:\n conn.rollback()\n logger(e)\n\n finally:\n if conn:\n conn.close()\n\n\ndef save_player(player):\n try:\n conn = sqlite3.connect(db_filename)\n with conn:\n cur = conn.cursor()\n name = player.name\n steamid = player.steamid\n entityid = player.entityid\n ip = player.ip\n home = player.home\n zombies = player.zombies\n players = player.players\n score = player.score\n bag = player.bag\n try:\n cur.execute(\"INSERT INTO Players(Name,Steamid,Entityid,IP,Home,Zombies,Players,Score,Bag) VALUES (?,?,?,?,?,?,?,?,?)\",(name,steamid,entityid,ip,home,zombies,players,score,bag))\n logger.log_verbose(\"Creating new player record\")\n conn.commit()\n except Exception as e:\n if \"UNIQUE constraint failed\" in e.message:\n logger.log_debug(\"Player record exists,updatig it\")\n cur.execute(\"UPDATE Players SET Name = ?, IP = ?,Home= ?,Zombies = ?,Players =?,Score=?,Bag=? WHERE Steamid =? \",(name,ip,home,zombies,players,score,bag,steamid))\n conn.commit()\n else:\n logger.log_debug(e.message)\n\n except Exception as e:\n logger.log_debug(e.message)\n\n\ndef save_poi(player,poiname,location):\n try:\n conn = sqlite3.connect(db_filename)\n with conn:\n cur = conn.cursor()\n name = player.name\n steamid = player.steamid\n try:\n cur.execute(\"INSERT INTO Poi (Player,Steamid,Name,Location,Uniqueid) VALUES (?,?,?,?,?)\",(name,steamid,poiname,location,str(steamid)+poiname))\n logger.log_verbose(\"Creating new poi record\")\n conn.commit()\n except Exception as e:\n if \"UNIQUE constraint failed\" in e.message:\n cur.execute(\"UPDATE Poi SET Player = ?, Steamid = ?,Name= ?,Location = ? WHERE Uniqueid = ? \",(name,steamid,poiname,location,str(steamid)+poiname))\n logger.log_verbose(\"Updating poi record\")\n conn.commit()\n except Exception as e:\n logger.log_debug(e.message)\n\ndef save_airdrop(drop):\n try:\n conn = sqlite3.connect(db_filename)\n with conn:\n cur = conn.cursor()\n try:\n cur.execute(\"INSERT INTO Airdrop (Airdrop) VALUES (?)\",(drop,))\n logger.log_verbose(\"Creating new airdrop record\")\n conn.commit()\n except Exception as e:\n if \"UNIQUE constraint failed\" in e.message:\n cur.execute(\"UPDATE Airdrop SET Airdrop = ?\",(drop,))\n logger.log_verbose(\"Updating airdrop record\")\n conn.commit()\n\n except Exception as e:\n logger.log_debug(e.message)\n\ndef delete_poi(player,poiname):\n try:\n conn = sqlite3.connect(db_filename)\n with conn:\n cur = conn.cursor()\n name = player.name\n steamid = player.steamid\n try:\n cur.execute(\"DELETE from Poi where Uniqueid = ? \",(str(steamid)+poiname,))\n logger.log_verbose(\"Deleting poi record \" + poiname + \" for \"+ name)\n conn.commit()\n except Exception as e:\n logger.log_debug(e.message)\n\n except Exception as e:\n logger.log_debug(e.message)\n\ndef delete_airdrop(drop):\n try:\n conn = sqlite3.connect(db_filename)\n with conn:\n cur = conn.cursor()\n try:\n cur.execute(\"DELETE from Airdrop where Airdrop = ? \",(drop,))\n logger.log_verbose(\"Deleting airdrop record \" + drop)\n conn.commit()\n except Exception as e:\n logger.log_debug(e.message)\n\n except Exception as e:\n logger.log_debug(e.message)\n\ndef delete_all_poi(player):\n try:\n conn = sqlite3.connect(db_filename)\n with conn:\n cur = conn.cursor()\n name = player.name\n steamid = player.steamid\n try:\n cur.execute(\"DELETE FROM Poi WHERE Steamid = ? \",(str(steamid),))\n logger.log_verbose(\"Deleteing all poi records for \" + name)\n conn.commit()\n except Exception as e:\n logger.log_debug(e.message)\n\n except Exception as e:\n logger.log_debug(e.message)\n\n\n\nif not db_exists:\n create_database()\nload_players()\nload_airdrops()\n\n\n", "id": "10102923", "language": "Python", "matching_score": 2.8110780715942383, "max_stars_count": 0, "path": "sqlitedb.py" }, { "content": "__author__ = 'christopher'\n\n\nimport memorydb\nimport sqlitedb as db\n\n\ndef save_player(name):\n player = memorydb.get_player_from_name(name)\n db.save_player(player)\n\ndef save_poi(name,poiname,location):\n player = memorydb.get_player_from_name(name)\n db.save_poi(player,poiname,location)\n\ndef delete_poi(name,poiname):\n player = memorydb.get_player_from_name(name)\n db.delete_poi(player,poiname)\n\ndef delete_all_poi(name):\n player = memorydb.get_player_from_name(name)\n db.delete_all_poi(player)\n\ndef save_airdrop(drop):\n db.save_airdrop(drop)\n\ndef delete_airdrop(drop):\n db.delete_airdrop(drop)", "id": "7970648", "language": "Python", "matching_score": 1.5226019620895386, "max_stars_count": 0, "path": "playerdb.py" }, { "content": "__author__ = 'christopher'\n\nimport parse\nimport thread\nimport commands\nimport memorydb\nimport playerdb\nimport logger\nimport runtime\nimport event\nimport util\n\n\n\n\n\ndef route(line):\n try:\n p = parse.parse_log(line)\n\n if p.type == \"Filtered\":\n pass\n\n if p.type == \"GMSG\":\n logger.log(p.formatted_text)\n\n if p.type == \"SystemEvent\":\n if p.event == \"Stats\":\n runtime.time = p.time\n runtime.fps = p.fps\n runtime.heap = p.heap\n runtime.max = p.max\n runtime.chunks = p.chunks\n\n runtime.cgo = p.cgo\n runtime.ply = p.ply\n runtime.zom = p.zom\n runtime.ent = p.ent\n runtime.items = p.items\n\n if runtime.gui:\n system_event = []\n system_event.append(\"SystemUpdate\")\n event.gui_event.append(system_event)\n\n if p.event == \"Version\":\n runtime.version = p.version\n if runtime.gui:\n system_event = []\n system_event.append(\"SystemUpdate\")\n event.gui_event.append(system_event)\n\n if p.event == \"Port\":\n runtime.server_port = p.port\n if runtime.gui:\n system_event = []\n system_event.append(\"SystemUpdate\")\n event.gui_event.append(system_event)\n\n if p.event == \"MaxPlayers\":\n runtime.max_players = p.max_players\n if runtime.gui:\n system_event = []\n system_event.append(\"SystemUpdate\")\n event.gui_event.append(system_event)\n\n if p.event == \"GameMode\":\n runtime.game_mode = p.game_mode\n if runtime.gui:\n system_event = []\n system_event.append(\"SystemUpdate\")\n event.gui_event.append(system_event)\n\n if p.event == \"World\":\n runtime.world = p.world\n if runtime.gui:\n system_event = []\n system_event.append(\"SystemUpdate\")\n event.gui_event.append(system_event)\n\n if p.event == \"GameName\":\n runtime.game_name = p.game_name\n if runtime.gui:\n system_event = []\n system_event.append(\"SystemUpdate\")\n event.gui_event.append(system_event)\n\n if p.event == \"Difficulty\":\n runtime.difficulty = p.difficulty\n if runtime.gui:\n system_event = []\n system_event.append(\"SystemUpdate\")\n event.gui_event.append(system_event)\n\n if p.type == \"GameEvent\":\n if p.event == \"Airdrop\":\n location = util.format_coor(p.location)\n memorydb.airdrops.append(p.location)\n logger.log(\"Airdrop: \" + location)\n playerdb.save_airdrop(p.location)\n if runtime.server:\n commands.say(\"Airdrop: \" + location)\n\n if p.type == \"GameEvent\":\n if p.event == \"Horde\":\n logger.log(\"Spawning Wandering Horde\")\n if runtime.server:\n commands.say(\"Spawning Wandering Horde\")\n\n if p.type == \"PlayerEvent\":\n if p.event == \"Connected\":\n #logger.log(\"Player Connected: \" + p.name)\n memorydb.add_online_player(p.name)\n player_event = []\n player_event.append(\"PlayerUpdate\")\n event.gui_event.append(player_event)\n if runtime.server:\n thread.start_new_thread(commands.send_motd,(p.name,))\n\n\n if p.event == \"Disconnected\":\n #logger.log(\"Player Disconnected: \" + p.name)\n memorydb.remove_online_player(p.name)\n player_event = []\n player_event.append(\"PlayerUpdate\")\n event.gui_event.append(player_event)\n\n if p.event == \"Died\":\n player = memorydb.get_player_from_name(p.name)\n player.bag = player.location\n playerdb.save_player(p.name)\n logger.log_verbose(\"Setting \" + player.name + \" revive point to: \" + util.format_coor(player.location))\n logger.log(p.formatted_text)\n if runtime.server:\n commands.pm(player.name, \"Setting your revive point to: \"+ util.format_coor(player.location))\n\n if p.event == \"Update\":\n memorydb.add_online_player(p.name)\n player_event = []\n player_event.append(\"PlayerUpdate\")\n event.gui_event.append(player_event)\n if memorydb.player_exists_from_name(p.name):\n memorydb.update_player(p)\n else:\n memorydb.add_player(p.name, p.entityid, p.steamid, p.ip)\n logger.log_verbose(\"Adding new player: \" + p.name)\n playerdb.save_player(p.name)\n\n if p.type == \"PlayerCommand\":\n if p.event == \"Sethome\":\n logger.log(p.formatted_text)\n memorydb.set_player_home(p.name)\n player = memorydb.get_player_from_name(p.name)\n logger.log(\"Setting \"+util.format_coor(player.home) + \" as home for \" + player.name)\n playerdb.save_player(p.name)\n if runtime.server:\n commands.pm(player.name,\"Home has been set to: \" + util.format_coor(player.home))\n\n\n if p.event == \"Home\":\n player = memorydb.get_player_from_name(p.name)\n logger.log(p.formatted_text)\n if player.home == \"\":\n logger.log_verbose(\"No home set for: \" + player.name)\n if runtime.server:\n commands.pm(player.name, \"You need to set a home first\")\n else:\n logger.log_verbose(\"Teleporting \"+player.name + \" to \" + util.format_coor(player.home))\n if runtime.server:\n commands.teleport(player.name,player.home)\n\n\n if p.event == \"Setpoi\":\n logger.log(p.formatted_text)\n player = memorydb.get_player_from_name(p.name)\n playerdb.save_poi(p.name,p.poiname,player.location)\n memorydb.add_poi(p.name,p.poiname)\n logger.log(\"Poi set for \"+p.name +\" with name \"+ p.poiname +\" at: \" + util.format_coor(player.location))\n if runtime.server:\n commands.pm(player.name,\"Poi \" + p.poiname + \" set: \"+ util.format_coor(player.location))\n\n\n if p.event == \"Poi\":\n logger.log(p.formatted_text)\n location = memorydb.get_poi(p.name,p.poiname)\n if location == \"\":\n if runtime.server:\n commands.pm(p.name,\"No poi with that name.\")\n else:\n logger.log(\"Teleporting \"+p.name + \" to \" + util.format_coor(location))\n if runtime.server:\n commands.teleport(p.name,location)\n\n\n if p.event == \"Listpoi\":\n logger.log(p.formatted_text)\n if runtime.server:\n player = memorydb.get_player_from_name(p.name)\n if len(player.pois) == 0:\n commands.pm(p.name,\"No pois to list\")\n for poi in player.pois:\n name = poi.split(\",\")[0]\n location = poi.split(\",\")[1]\n commands.pm(player.name,name + \": \" + util.format_coor(location))\n\n if p.event == \"Removepoi\":\n logger.log(p.formatted_text)\n if memorydb.poi_exists(p.name,p.poiname):\n memorydb.remove_poi(p.name,p.poiname)\n playerdb.delete_poi(p.name,p.poiname)\n if runtime.server:\n commands.pm(p.name,\"Poi \" + p.poiname+ \" has been removed\")\n\n else:\n if runtime.server:\n commands.pm(p.name,\"No poi with that name\")\n\n if p.event == \"Clearpoi\":\n logger.log(p.formatted_text)\n memorydb.remove_all_pois(p.name)\n playerdb.delete_all_poi(p.name)\n if runtime.server:\n commands.pm(p.name,\"All pois have been removed\")\n\n if p.event == \"Killme\":\n logger.log(p.formatted_text)\n if runtime.server:\n commands.kill_player(p.name)\n\n if p.event == \"Help\":\n logger.log(p.formatted_text)\n if runtime.server:\n commands.help(p.name)\n\n if p.event == \"Bag\":\n logger.log(p.formatted_text)\n if runtime.server:\n player = memorydb.get_player_from_name(p.name)\n if player.bag != \"\":\n commands.teleport(p.name,player.bag)\n\n if p.event == \"Goto\":\n logger.log(p.formatted_text)\n if runtime.server:\n if memorydb.player_exists_from_name(p.othername):\n commands.teleport(p.name,p.othername)\n else:\n commands.pm(p.name,\"Player does not exist: \" + p.othername)\n\n if p.event == \"Where\":\n logger.log(p.formatted_text)\n if runtime.server:\n player = memorydb.get_player_from_name(p.name)\n commands.pm(p.name,\"Current location: \" + util.format_coor(player.location))\n\n if p.event == \"Drop\":\n logger.log(p.formatted_text)\n if runtime.server:\n for drop in memorydb.airdrops:\n if util.is_coor_formatted(drop):\n commands.pm(p.name,\"Airdrop: \" + drop)\n else:\n commands.pm(p.name,\"Airdrop: \" + util.format_coor(drop))\n\n if p.event == \"Claim\":\n logger.log(p.formatted_text)\n found = 0\n if runtime.server:\n player = memorydb.get_player_from_name(p.name)\n obj1 = player.location\n for drop in memorydb.airdrops:\n if util.in_radius(obj1,drop,runtime.drop_claim_radius):\n memorydb.airdrops.remove(drop)\n playerdb.delete_airdrop(drop)\n if util.is_coor_formatted(drop):\n commands.pm(p.name,\"You have claimed the airdrop at: \" + str(drop))\n else:\n commands.pm(p.name,\"You have claimed the airdrop at: \" + str(util.format_coor(drop)))\n found = 1\n if found == 0:\n commands.pm(p.name,\"You need to be in a \" + str(runtime.drop_claim_radius) + \" block radius of an airdrop to claim\")\n\n if p.type == \"\":\n logger.log_verbose(p.formated_text)\n\n except Exception as e:\n print(e.message)", "id": "588471", "language": "Python", "matching_score": 4.441958427429199, "max_stars_count": 0, "path": "director.py" }, { "content": "__author__ = 'christopher'\n\n#parses the log and returns a parsed_log object\n\nimport re\nimport string\nimport memorydb\nimport logger\nfrom time import strftime\n\n\n\n\nclass ParsedLog(object):\n def __init__(self):\n self.type = \"\"\n self.event = \"\"\n self.full_text = \"\"\n\n\ndef parse_log(line):\n pl = ParsedLog()\n pl.full_text = line\n\n INF = re.search(\"INF\", line)\n WRN = re.search(\"WRN\", line)\n ERR = re.search(\"ERR\", line)\n LISTPLAYERS = re.search(\"([0-9][.]\\sid=)\", line)\n F2 = re.search(\"Total of\", line) #message from player, system or admin\n\n VERSION = re.search(\"Server version:\",line)\n PORT = re.search(\"Server port:\",line)\n MAXP = re.search(\"Max players:\",line)\n GAMEMODE = re.search(\"Game mode:\",line)\n WORLD = re.search(\"World:\",line)\n GAMENAME = re.search(\"Game name:\",line)\n DIFFICULTY = re.search(\"Difficulty:\",line)\n\n\n\n if INF:\n seperated_line = line.split(\" \")\n F1 = re.search(\"Executing command\", line) #message from player, system or admin\n GMSG = re.search(\"GMSG\", line) #message from player, system or admin\n CONNECTED = re.search(\"Player connected\", line) #message from player, system or admin\n DICONNECTED = re.search(\"Player disconnected\", line) #message from player, system or admin\n TIME = re.search(\"INF Time\", line)\n HORDE = re.search(\"INF Spawning Wandering Horde\",line)\n AIRDROP = re.search(\"AIAirDrop: Spawned supply crate\",line)\n\n\n\n if CONNECTED:\n player = seperated_line[6].split(\"=\")[1][:-1]\n steamid = seperated_line[7].split(\"=\")[1][:-1]\n pl.type = \"PlayerEvent\"\n pl.event = \"Connected\"\n pl.name = player\n pl.steamid = steamid\n return pl\n\n elif DICONNECTED:\n player = seperated_line[8].split(\"=\")[1][:-1].replace(\"'\",\"\")\n steamid = seperated_line[7].split(\"=\")[1][:-1].replace(\"'\",\"\")\n pl.type = \"PlayerEvent\"\n pl.event = \"Disconnected\"\n pl.name = player\n pl.steamid = steamid\n return pl\n\n elif TIME:\n try:\n pl.type = \"SystemEvent\"\n pl.event = \"Stats\"\n pl.time = seperated_line[4]\n pl.fps = seperated_line[6]\n pl.heap = seperated_line[8]\n pl.max = seperated_line[10]\n pl.chunks = seperated_line[12]\n pl.cgo = seperated_line[14]\n pl.ply = seperated_line[16]\n pl.zom = seperated_line[18]\n pl.ent = seperated_line[20] + \" \" + seperated_line[21]\n pl.items = seperated_line[23]\n return pl\n except Exception as e:\n return pl\n logger.log_debug(\"Error parsing stats update: \"+e.message)\n\n elif AIRDROP:\n try:\n pl.type = \"GameEvent\"\n pl.event = \"Airdrop\"\n seperated_line = line.replace(\"(\",\"\").replace(\")\",\"\").replace(\",\",\"\").split()\n pl.x = seperated_line[8]\n pl.y = seperated_line[10]\n pl.z = seperated_line[9]\n pl.location = seperated_line[8] + \" \" + seperated_line[9] + \" \" + seperated_line[10]\n return pl\n\n except Exception as e:\n logger.log(line)\n logger.log_debug(\"Error parsing airdrop info\")\n return pl\n\n elif HORDE:\n pl.type = \"GameEvent\"\n pl.event = \"Horde\"\n return pl\n\n elif F1:\n pl.type = \"Filtered\"\n return pl\n\n\n\n elif GMSG:\n player = seperated_line[4][:-1]\n SETHOME = re.search(\"/sethome\", line)\n HOME = re.search(\"/home\", line)\n RANDOM = re.search(\"/random\", line)\n SETPOI = re.search(\"/setpoi\", line)\n POI = re.search(\"/poi\", line)\n RPOI = re.search(\"/rpoi\", line)\n LISTPOI = re.search(\"/listpoi\", line)\n LPOI = re.search(\"/lpoi\", line)\n CLEARPOI = re.search(\"/clearpoi\", line)\n KILLME = re.search(\"/killme\", line)\n GOTO = re.search(\"/goto\", line)\n HELP = re.search(\"/help\", line)\n BAG = re.search(\"/bag\",line)\n DIED = re.search(\"died\",line)\n WHERE = re.search(\"/where\",line)\n DROP = re.search(\"/drop\",line)\n CLAIM = re.search(\"/claim\",line)\n\n if DIED:\n try:\n player = seperated_line[5]\n pl.formatted_text = \" \".join(pl.full_text.split()[4:])\n pl.type = \"PlayerEvent\"\n pl.event = \"Died\"\n pl.name = player\n return pl\n except Exception as e:\n logger.log(\"Error parsing died system message: \"+ e.message)\n return pl\n\n elif HELP:\n pl.formatted_text = \" \".join(pl.full_text.split()[4:])\n pl.type = \"PlayerCommand\"\n pl.event = \"Help\"\n pl.name = player\n return pl\n\n elif SETHOME:\n pl.formatted_text = \" \".join(pl.full_text.split()[4:])\n pl.type = \"PlayerCommand\"\n pl.event = \"Sethome\"\n pl.name = player\n return pl\n\n elif HOME:\n pl.formatted_text = \" \".join(pl.full_text.split()[4:])\n pl.type = \"PlayerCommand\"\n pl.event = \"Home\"\n pl.name = player\n return pl\n\n elif SETPOI:\n try:\n pl.formatted_text = \" \".join(pl.full_text.split()[4:])\n poiname = seperated_line[6]\n pl.type = \"PlayerCommand\"\n pl.event = \"Setpoi\"\n pl.name = player\n pl.poiname = poiname\n return pl\n except Exception as e:\n logger.log_verbose(\"Error parsing setpoi command: \"+ e.message)\n return pl\n\n elif POI:\n try:\n pl.formatted_text = \" \".join(pl.full_text.split()[4:])\n poiname = seperated_line[6]\n pl.type = \"PlayerCommand\"\n pl.event = \"Poi\"\n pl.name = player\n pl.poiname = poiname\n return pl\n except Exception as e:\n logger.log_verbose(\"Error parsing poi command: \"+ e.message)\n return pl\n\n\n elif LISTPOI or LPOI:\n pl.formatted_text = \" \".join(pl.full_text.split()[4:])\n pl.type = \"PlayerCommand\"\n pl.event = \"Listpoi\"\n pl.name = player\n return pl\n\n elif RPOI:\n try:\n pl.formatted_text = \" \".join(pl.full_text.split()[4:])\n poiname = seperated_line[6]\n pl.type = \"PlayerCommand\"\n pl.event = \"Removepoi\"\n pl.name = player\n pl.poiname = poiname\n return pl\n except Exception as e:\n logger.log(\"Error parsing rpoi command: \"+ e.message)\n return pl\n\n elif CLEARPOI:\n pl.formatted_text = \" \".join(pl.full_text.split()[4:])\n pl.type = \"PlayerCommand\"\n pl.event = \"Clearpoi\"\n pl.name = player\n return pl\n\n elif GOTO:\n try:\n pl.formatted_text = \" \".join(pl.full_text.split()[4:])\n othername = seperated_line[6]\n pl.type = \"PlayerCommand\"\n pl.event = \"Goto\"\n pl.name = player\n pl.othername = othername\n return pl\n except Exception as e:\n logger.log(\"Error parsing goto command: \"+ e.message)\n return pl\n\n elif BAG:\n pl.formatted_text = \" \".join(pl.full_text.split()[4:])\n pl.type = \"PlayerCommand\"\n pl.event = \"Bag\"\n pl.name = player\n return pl\n\n elif KILLME:\n pl.formatted_text = \" \".join(pl.full_text.split()[4:])\n pl.type = \"PlayerCommand\"\n pl.event = \"Killme\"\n pl.name = player\n return pl\n\n elif WHERE:\n pl.formatted_text = \" \".join(pl.full_text.split()[4:])\n pl.type = \"PlayerCommand\"\n pl.event = \"Where\"\n pl.name = player\n return pl\n\n elif DROP:\n pl.formatted_text = \" \".join(pl.full_text.split()[4:])\n pl.type = \"PlayerCommand\"\n pl.event = \"Drop\"\n pl.name = player\n return pl\n\n elif CLAIM:\n pl.formatted_text = \" \".join(pl.full_text.split()[4:])\n pl.type = \"PlayerCommand\"\n pl.event = \"Claim\"\n pl.name = player\n #pl.drop = seperated_line[6] +\" \" + seperated_line[7] + \" \" + seperated_line[8] + \" \" + seperated_line[9] + \" \" + seperated_line[10]\n return pl\n\n else:\n pl.type = \"GMSG\"\n pl.event = \"Msg\"\n pl.formatted_text = \" \".join(pl.full_text.split()[4:])\n return pl\n\n else:\n pl.formated_text = \" \".join(pl.full_text.split()[3:])\n return pl\n\n elif LISTPLAYERS:\n try:\n position = str(int(round(float(line.split(\" \")[3].replace(\"pos=(\", \"\").replace(\",\", \"\"))))) + \" \" + str(int(round(float(line.split(\" \")[4].replace(\",\", \"\"))))) + \" \" + str(int(round(float(line.split(\" \")[5].replace(\"),\", \"\")))))\n entityid = line.split(\",\")[0].split(\"=\")[1]\n name = line.split(\",\")[1].replace(\" \", \"\")\n health = line.split(\",\")[9].split(\"=\")[1]\n deaths = line.split(\",\")[10].split(\"=\")[1]\n zombies = line.split(\",\")[11].split(\"=\")[1]\n players = line.split(\",\")[12].split(\"=\")[1]\n score = line.split(\",\")[13].split(\"=\")[1]\n steamid = line.split(\",\")[15].split(\"=\")[1]\n ip = line.split(\",\")[16].split(\"=\")[1]\n ping = line.split(\",\")[17].split(\"=\")[1].rstrip(string.whitespace)\n\n pl.type = \"PlayerEvent\"\n pl.event = \"Update\"\n pl.entityid = entityid\n pl.position = position\n pl.name = name\n pl.health = health\n pl.deaths = deaths\n pl.zombies = zombies\n pl.players= players\n pl.score = score\n pl.steamid = steamid\n pl.ip = ip\n pl.ping = ping\n return pl\n\n\n except Exception as e:\n logger.log_debug(\"Error parsing player update: \"+ e.message)\n return pl\n\n elif WRN:\n pl.formated_text = \" \".join(pl.full_text.split()[3:])\n return pl\n\n elif ERR:\n pl.formated_text = \" \".join(pl.full_text.split()[3:])\n return pl\n\n elif F2:\n pl.type = \"Filtered\"\n return pl\n\n elif VERSION:\n pl.type = \"SystemEvent\"\n pl.event = \"Version\"\n pl.version = line.split()[3] + \" \" + line.split()[4] + \" \"+ line.split()[5]\n return pl\n\n elif PORT:\n pl.type = \"SystemEvent\"\n pl.event = \"Port\"\n pl.port = line.split()[2]\n return pl\n\n elif MAXP:\n pl.type = \"SystemEvent\"\n pl.event = \"MaxPlayers\"\n pl.max_players = line.split()[2]\n return pl\n\n elif GAMEMODE:\n pl.type = \"SystemEvent\"\n pl.event = \"GameMode\"\n pl.game_mode = line.split()[2]\n return pl\n\n elif WORLD:\n pl.type = \"SystemEvent\"\n pl.event = \"World\"\n pl.world = line.split()[1]\n return pl\n\n elif GAMENAME:\n pl.type = \"SystemEvent\"\n pl.event = \"GameName\"\n pl.game_name = line.split()[2]\n return pl\n\n elif DIFFICULTY:\n pl.type = \"SystemEvent\"\n pl.event = \"Difficulty\"\n pl.difficulty = line.split()[1]\n return pl\n\n else:\n pl.formated_text = pl.full_text\n return pl\n\n\n\n\n\n", "id": "5819034", "language": "Python", "matching_score": 0.9991661310195923, "max_stars_count": 0, "path": "parse.py" }, { "content": "__author__ = 'christopher'\n\n\n\nfrom Tkinter import *\nfrom ttk import *\nimport memorydb\nimport telconn\nimport threading\nimport logger\nimport event\nimport runtime\nimport time\nimport config\n\n\n\nselected_player = \"\"\n\ndef toggle_verbose():\n if verbose_chk.get() == 1:\n runtime.verbose = True\n else:\n runtime.verbose = False\n\ndef toggle_debug():\n if debug_chk.get() == 1:\n runtime.debug = True\n else:\n runtime.debug = False\n\ndef toggle_server():\n if server_chk.get() == 1:\n runtime.server = True\n else:\n runtime.server = False\n\ndef save_settings():\n runtime.host = host_input.get()\n runtime.port = port_input.get()\n runtime.drop_claim_radius = claim_input.get()\n config.save_config()\n \nroot = Tk()\nroot.title(\"7dtd Telnet Client\")\nroot.columnconfigure(0, weight=1)\nroot.rowconfigure(0, weight=1)\nnote = Notebook(root)\nnote.columnconfigure(0, weight=1)\nnote.rowconfigure(0, weight=1)\nnote.grid(sticky=NSEW)\n\n#console_tab stuff here\nconsole_tab = Frame(note)\nconsole_tab.columnconfigure(0, weight=1)\nconsole_tab.columnconfigure(1, weight=1)\nconsole_tab.rowconfigure(0, weight=1)\ntextbox = Text(console_tab, height=20, width=80)\ntextbox.grid(row=0, column=0, sticky=NSEW, columnspan=2)\ntextbox.columnconfigure(0, weight=1)\ntextbox.rowconfigure(0, weight=1)\nplayerbox = Text(console_tab, height=20, width=20)\nplayerbox.grid(row=0, column=3, sticky=N+S)\ncommandLabel = Label(console_tab, width=10, text=\"Command:\")\ncommandLabel.grid(row=1, column=0, sticky=W)\ninput = Entry(console_tab, width=80)\ninput.grid(row=1, column=1, sticky=E+W, columnspan=3)\n\n#players_tab stuff here\nplayers_tab = Frame(note)\nplayers_tab.columnconfigure(0, weight=1)\nplayers_tab.rowconfigure(0, weight=1)\nplayerlist = Listbox(players_tab, height=20, width=20)\nplayerlist.grid(row=0,column=1,sticky=N+S)\ninfobox = Text(players_tab, height=10, width=80)\ninfobox.grid(row=0, column=0, sticky=N+E+W)\n\n#settings_tab stuff here\nsettings_tab = Frame(note)\nsettings_tab.rowconfigure(0, weight=1)\nsettings_tab.rowconfigure(1, weight=1)\nsettings_tab.rowconfigure(2, weight=1)\nsettings_tab.rowconfigure(3, weight=1)\nsettings_tab.rowconfigure(4, weight=1)\nsettings_tab.rowconfigure(5, weight=1)\nsettings_tab.rowconfigure(6, weight=1)\n\nverbose_chk = IntVar()\nverbose_checkbox = Checkbutton(settings_tab,text = \"Verbose Logging\",command = toggle_verbose,variable=verbose_chk)\nverbose_checkbox.grid(row=0, column=0, sticky=W)\nif runtime.verbose:\n verbose_chk.set(1)\n\ndebug_chk = IntVar()\ndebug_checkbox = Checkbutton(settings_tab,text = \"Debug Logging\",command = toggle_debug,variable=debug_chk)\ndebug_checkbox.grid(row=1, column=0, sticky=W)\nif runtime.debug:\n debug_chk.set(1)\n\nserver_chk = IntVar()\nserver_checkbox = Checkbutton(settings_tab,text = \"Server\",command = toggle_server,variable=server_chk)\nserver_checkbox.grid(row=2, column=0, sticky=W)\nif runtime.server:\n server_chk.set(1)\n\nmotd_label = Label(settings_tab, width=10, text=\"MOTD: \")\nmotd_label.grid(row=3, column=0)\nmotd_input = Entry(settings_tab, width=30)\nmotd_input.grid(row=3, column=1, sticky=W)\nmotd_input.insert(0,runtime.motd)\n\nhost_label = Label(settings_tab, width=10, text=\"Host: \")\nhost_label.grid(row=4, column=0)\nhost_input = Entry(settings_tab, width=15)\nhost_input.grid(row=4, column=1, sticky=W)\nhost_input.insert(0,runtime.host)\n\nport_label = Label(settings_tab, width=10, text=\"Port: \")\nport_label.grid(row=5, column=0)\nport_input = Entry(settings_tab, width=15)\nport_input.grid(row=5, column=1, sticky=W)\nport_input.insert(0,runtime.port)\n\nclaim_label = Label(settings_tab, width=10, text=\"Claim Radius: \")\nclaim_label.grid(row=6, column=0)\nclaim_input = Entry(settings_tab, width=15)\nclaim_input.grid(row=6, column=1, sticky=W)\nclaim_input.insert(0,runtime.drop_claim_radius)\n\n\n\n\n\nsave_btn = Button(settings_tab,text = \"Save\",command = save_settings)\nsave_btn.grid(row=7, column=0, sticky=E)\nspacer = Label(settings_tab).grid(row=8,column=0)\n\n#system_tab stuff here\nsystem_tab = Frame(note)\n\ntime_var = StringVar()\ntime_var.set(\"Time: 0m\")\ntime_label = Label(system_tab, width=15, textvariable = time_var)\ntime_label.grid(row=0, column=0)\n\nfps_var = StringVar()\nfps_var.set(\"FPS: 0\")\nfps_label = Label(system_tab, width=15, textvariable = fps_var)\nfps_label.grid(row=1, column=0)\n\nheap_var = StringVar()\nheap_var.set(\"Heap: 0MB\")\nheap_label = Label(system_tab, width=15, textvariable = heap_var)\nheap_label.grid(row=2, column=0)\n\nmax_var = StringVar()\nmax_var.set(\"Max: 0MB\")\nmax_label = Label(system_tab, width=15, textvariable = max_var)\nmax_label.grid(row=3, column=0)\n\nchunks_var = StringVar()\nchunks_var.set(\"Chunks: 0\")\nchunks_label = Label(system_tab, width=15, textvariable = chunks_var)\nchunks_label.grid(row=4, column=0)\n\ncgo_var = StringVar()\ncgo_var.set(\"CGO: 0M\")\ncgo_label = Label(system_tab, width=15, textvariable = cgo_var)\ncgo_label.grid(row=5, column=0)\n\nply_var = StringVar()\nply_var.set(\"PLY: 0\")\nply_label = Label(system_tab, width=15, textvariable = ply_var)\nply_label.grid(row=6, column=0)\n\nzom_var = StringVar()\nzom_var.set(\"Zom: 0\")\nzom_label = Label(system_tab, width=15, textvariable = zom_var)\nzom_label.grid(row=7, column=0)\n\nent_var = StringVar()\nent_var.set(\"ENT: 0\")\nent_label = Label(system_tab, width=15, textvariable = ent_var)\nent_label.grid(row=8, column=0)\n\nitems_var = StringVar()\nitems_var.set(\"Items: 0\")\nitems_label = Label(system_tab, width=15, textvariable = items_var)\nitems_label.grid(row=9, column=0)\n\nversion_var = StringVar()\nversion_var.set(\"Version: 0\")\nversion_label = Label(system_tab, width=30, textvariable = version_var)\nversion_label.grid(row=0, column=1)\n\nport_var = StringVar()\nport_var.set(\"Port: 0\")\nport_label = Label(system_tab, width=30, textvariable = port_var)\nport_label.grid(row=1, column=1)\n\nmax_players_var = StringVar()\nmax_players_var.set(\"Max Players: 0\")\nmax_players_label = Label(system_tab, width=30, textvariable = max_players_var)\nmax_players_label.grid(row=2, column=1)\n\ngame_mode_var = StringVar()\ngame_mode_var.set(\"Game Mode: 0\")\ngame_mode_label = Label(system_tab, width=30, textvariable = game_mode_var)\ngame_mode_label.grid(row=3, column=1)\n\nworld_var = StringVar()\nworld_var.set(\"World: 0\")\nworld_label = Label(system_tab, width=30, textvariable = world_var)\nworld_label.grid(row=4, column=1)\n\ngame_name_var = StringVar()\ngame_name_var.set(\"Game Name: 0\")\ngame_name_label = Label(system_tab, width=30, textvariable = game_name_var)\ngame_name_label.grid(row=5, column=1)\n\ndifficulty_var = StringVar()\ndifficulty_var.set(\"Difficulty: 0\")\ndifficulty_label = Label(system_tab, width=30, textvariable = difficulty_var)\ndifficulty_label.grid(row=6, column=1)\n\n\n\n\ndef show_player_info(name):\n infobox.delete(\"1.0\", END)\n player = memorydb.get_player_from_name(name)\n infobox.insert(END, \"Name:\"+ player.name+ \"\\n\")\n infobox.insert(END, \"SteamID:\"+ str(player.steamid)+ \"\\n\")\n infobox.insert(END, \"IP:\"+ player.ip+ \"\\n\")\n infobox.insert(END, \"Last Location:\"+ player.location+ \"\\n\")\n\ndef addInfo(info):\n\ttextbox.insert(END,info + '\\n')\n\ttextbox.see(END)\n\ndef refreshPlayerList():\n if int(playerbox.index('end-1c').split(\".\")[0])-1 != len(memorydb.online_players):\n playerbox.delete(\"1.0\", END)\n online_players = memorydb.get_online_players()\n for player in online_players:\n playerbox.insert(\"1.0\",player+ \"\\n\")\n\ndef refreshInfoList():\n if int(playerlist.index('end')) != len(memorydb.player_array):\n playerlist.delete(0, END)\n for player in memorydb.player_array:\n playerlist.insert(1, player.name)\n\ndef func(event):\n cmd = input.get()\n telconn.write_out(cmd)\n logger.log(\"Command sent: \" + cmd)\n input.delete(0, END)\n\ndef listclick(e):\n show_player_info(str(playerlist.get(playerlist.curselection())))\n\ndef set_motd(e):\n runtime.motd = motd_input.get()\n\ndef refresh_system_stats():\n time_var.set(\"Time: \" + str(runtime.time))\n fps_var.set(\"FPS: \" + str(runtime.fps))\n heap_var.set(\"Heap: \" + str(runtime.heap))\n max_var.set(\"Max: \" + str(runtime.max))\n chunks_var.set(\"Chunks: \" + str(runtime.chunks))\n cgo_var.set(\"CGO: \" + str(runtime.cgo))\n ply_var.set(\"PLY: \" + str(runtime.ply))\n zom_var.set(\"Zom: \" + str(runtime.zom))\n ent_var.set(\"Ent: \" + str(runtime.ent))\n items_var.set(\"Items: \" + str(runtime.items))\n\n version_var.set(\"Version: \" + str(runtime.version))\n port_var.set(\"Port: \" + runtime.server_port )\n max_players_var.set(\"Max Players: \" + runtime.max_players)\n game_mode_var.set(\"Game Mode: \" +runtime.game_mode )\n world_var.set(\"World: \" +runtime.world )\n game_name_var.set(\"Game Name: \" + runtime.game_name)\n difficulty_var.set(\"Difficulty: \" + runtime.difficulty)\n\n\ndef handler():\n runtime.run = False\n root.destroy()\n telconn.write_out(\"exit\")\n\n\ninput.bind('<Return>', func)\nnote.add(console_tab, text=\"Console\", compound=TOP)\n\nplayerlist.bind('<<ListboxSelect>>', listclick)\nnote.add(players_tab, text = \"Players\")\n\nmotd_input.bind('<KeyRelease>',set_motd)\n\nnote.add(settings_tab, text = \"Settings\")\n\nnote.add(system_tab, text = \"System\")\n\nroot.protocol(\"WM_DELETE_WINDOW\", handler)\n\n\n\n\ndef update():\n while runtime.run:\n time.sleep(.1)\n for event_record in event.gui_event: # this needs to be fixed but works for now\n if event.gui_event[-1][0] == \"Log\":\n addInfo(event.gui_event[-1][1])\n event.gui_event.pop()\n\n if event_record[0] == \"PlayerUpdate\":\n refreshPlayerList()\n refreshInfoList()\n event.gui_event.pop()\n\n if event_record[0] == \"SystemUpdate\":\n refresh_system_stats()\n event.gui_event.pop()\n\n\n\n\ndef start():\n if runtime.gui:\n refreshInfoList()\n threading._start_new_thread(update, ())\n root.mainloop()\n\n\n", "id": "5731073", "language": "Python", "matching_score": 4.7910661697387695, "max_stars_count": 0, "path": "gui.py" }, { "content": "__author__ = 'christopher'\n\nglobal host\nglobal port\nglobal password\nglobal motd\nglobal gui\nglobal server\nglobal verbose\nglobal debug\nglobal run\n\nglobal time\nglobal fps\nglobal heap\nglobal max\nglobal chunks\nglobal cgo\nglobal ply\nglobal zom\nglobal ent\nglobal items\n\nglobal version\nglobal server_port\nglobal max_players\nglobal game_mode\nglobal world\nglobal game_name\nglobal difficulty\n\nglobal drop_claim_radius\n\nhost = \"localhost\"\nport = 81\npassword = \"\"\nmotd = \"\"\ngui = False\nserver = True\nverbose = False\ndebug = True\nrun = True\n\n\ntime = 0\nfps = 0\nheap = 0\nmax = 0\nchunks=0\ncgo = 0\nply = 0\nzom = 0\nent = 0\nitems = 0\n\nversion = \"\"\nserver_port = \"\"\nmax_players = \"\"\ngame_mode = \"\"\nworld = \"\"\ngame_name = \"\"\ndifficulty = \"\"\n\ndrop_claim_radius = 20", "id": "6526619", "language": "Python", "matching_score": 0.2531437277793884, "max_stars_count": 0, "path": "runtime.py" }, { "content": "from cmath import log\nfrom platform import release\nimport re\nimport time\nimport config\nimport os\nfrom os.path import exists\nimport json\nfrom fuzzywuzzy import process\nimport logger\n\nSETTINGS = config.get_config()\n\nfuzzy_match_threshold = SETTINGS['main']['fuzzy_match_threshold']\n\nuse_fuzzy_match = SETTINGS['main']['fuzzy_match_images']\nimages_folder = \"\"\nif \"images_folder\" in SETTINGS['main']:\n images_folder = SETTINGS['main']['images_folder']\n\n# https://github.com/orgs/libretro-thumbnails/repositories\n\nfolder_map = {}\n\nif os.path.exists('folder_map.json'):\n with open('folder_map.json') as map_json:\n folder_map = json.load(map_json)\n\ndef fuzzy_match(name,folder):\n files = os.listdir(folder)\n if len(files) > 0:\n highest = process.extractOne(name,files)\n if highest[1] < fuzzy_match_threshold:\n logger.info(f\"Closest match {highest[0]}, match {highest[1]} under threshold of {fuzzy_match_threshold}\")\n return \"\"\n else:\n logger.info(f\"Closest match {highest[0]}, match {highest[1]} at or above threshold of {fuzzy_match_threshold}\")\n return(highest[0])\n else:\n return \"\"\n\ndef get_boxart(system,game,release_name=\"\"):\n if system in folder_map:\n boxart = os.path.join(images_folder, folder_map[system], folder_map['boxart_folder'], f'{game}.png')\n if not exists(boxart):\n logger.info(f\"No boxart art found for {game}\")\n if use_fuzzy_match and game != \"\":\n folder = os.path.join(images_folder, folder_map[system],folder_map['boxart_folder'])\n logger.info(f\"Fuzzy match enabled, attempting to match {game} for boxart in {folder}\")\n matched = \"\"\n try:\n matched = fuzzy_match(game,folder)\n except Exception as e:\n pass\n if matched == \"\" and release_name != \"\":\n logger.info(f\"Fuzzy match on {game} failed, attempting to match {release_name} for title in {folder}\")\n try:\n matched = fuzzy_match(release_name,folder)\n except Exception as e:\n pass\n if matched != \"\":\n boxart = os.path.join(images_folder, folder_map[system], folder_map['boxart_folder'], matched)\n if not exists(boxart):\n boxart = os.path.join(images_folder, folder_map[system], folder_map['boxart_folder'], f'default.png')\n else:\n logger.info(f\"Match {matched} found\")\n else:\n logger.info(f\"No match {matched} found\")\n boxart = os.path.join(images_folder, folder_map[system], folder_map['boxart_folder'], f'default.png')\n else:\n boxart = os.path.join(images_folder, folder_map[system], folder_map['boxart_folder'], f'default.png')\n else:\n logger.info(f\"Boxart art found for {game}\")\n return boxart\n else:\n logger.info(f'Boxart: {system} not found in folder map. Returning \"\"')\n return \"\"\ndef get_snap(system,game,release_name=\"\"):\n if system in folder_map:\n snap = os.path.join(images_folder, folder_map[system],folder_map['snap_folder'], f'{game}.png')\n if not exists(snap):\n logger.info(f\"No boxart art found for {game}\")\n if use_fuzzy_match and game != \"\":\n folder = os.path.join(images_folder, folder_map[system],folder_map['snap_folder'])\n logger.info(f\"Fuzzy match enabled, attempting to match {game} for snap in {folder}\")\n matched = \"\"\n try:\n matched = fuzzy_match(game,folder)\n except Exception as e:\n pass\n if matched == \"\" and release_name != \"\":\n logger.info(f\"Fuzzy match on {game} failed, attempting to match {release_name} for title in {folder}\")\n try:\n matched = fuzzy_match(release_name,folder)\n except Exception as e:\n pass\n if matched != \"\":\n snap = os.path.join(images_folder, folder_map[system], folder_map['snap_folder'], matched)\n if not exists(snap):\n snap = os.path.join(images_folder, folder_map[system], folder_map['snap_folder'], f'default.png')\n else:\n logger.info(f\"Match {matched} found\")\n else:\n logger.info(f\"No match {matched} found\")\n snap = os.path.join(images_folder, folder_map[system], folder_map['snap_folder'], f'default.png')\n else:\n snap = os.path.join(images_folder, folder_map[system], folder_map['snap_folder'], f'default.png')\n else:\n logger.info(f\"Snap art found for {game}\")\n return snap\n else:\n logger.info(f'Snap: {system} not found in folder map. Returning \"\"')\n return \"\"\n\ndef get_title(system,game,release_name=\"\"):\n if system in folder_map:\n title = os.path.join(images_folder, folder_map[system],folder_map['title_folder'], f'{game}.png')\n if not exists(title):\n logger.info(f\"No title art found for {game}\")\n if use_fuzzy_match and game != \"\":\n folder = os.path.join(images_folder, folder_map[system],folder_map['title_folder'])\n logger.info(f\"Fuzzy match enabled, attempting to match {game} for title in {folder}\")\n matched = \"\"\n try:\n matched = fuzzy_match(game,folder)\n except Exception as e:\n pass\n if matched == \"\" and release_name != \"\":\n logger.info(f\"Fuzzy match on {game} failed, attempting to match {release_name} for title in {folder}\")\n try:\n matched = fuzzy_match(release_name,folder)\n except Exception as e:\n pass\n if matched != \"\":\n title = os.path.join(images_folder, folder_map[system], folder_map['title_folder'], matched)\n if not exists(title):\n title = os.path.join(images_folder, folder_map[system],folder_map['title_folder'], f'default.png')\n else:\n logger.info(f\"Match {matched} found\")\n else:\n logger.info(f\"No match {matched} found\")\n title = os.path.join(images_folder, folder_map[system],folder_map['title_folder'], f'default.png')\n else:\n title = os.path.join(images_folder, folder_map[system], folder_map['title_folder'], f'default.png')\n else:\n logger.info(f\"Title art found for {game}\")\n return title\n else:\n logger.info(f'Title: {system} not found in folder map. Returning \"\"')\n return \"\"\n\ndef get_system(system):\n if system in folder_map:\n system_image = os.path.join(images_folder,folder_map['system_folder'], f'{system.replace(\"/\",\"-\")}.png')\n if not exists(system_image):\n system_image = os.path.join(images_folder, folder_map['system_folder'], f'default.png')\n logger.info(f\"System art found for {system}\")\n return system_image\n else:\n logger.info(f'System: {system} not found in folder map. Returning \"\"')\n return \"\"\n\ndef get_image(image_type,system,game,release_name=\"\"):\n if image_type == \"boxart\":\n return get_boxart(system,game,release_name)\n if image_type == \"snap\":\n return get_snap(system,game,release_name)\n if image_type == \"title\":\n return get_title(system,game,release_name)\n if image_type == \"system\":\n return get_system(system)\n return image_type\n\nif __name__ == \"__main__\":\n #print(get_game_images(\"Nintendo Game Boy Advance\", \"Punisher, The (USA)\")[0])\n #fuzzy_match(\"Arkanoid (Unl. Lives, slower) [hb]\")\n fuzzy_match(\"Street Fighter Alpha 2 (EU, 960229)\")", "id": "2303714", "language": "Python", "matching_score": 2.369981527328491, "max_stars_count": 0, "path": "images.py" }, { "content": "from glob import glob\nfrom time import sleep\nimport json\nfrom wsgiref.util import request_uri\nimport config\nimport time\nfrom flask import Flask, request, Response, redirect, send_file, render_template\nimport threading\nimport event_manager\nimport logger\nfrom PIL import Image\nfrom io import BytesIO\nimport os\nimport base64\nimport images\nimport sys\n\nif getattr(sys, 'frozen', False):\n template_folder = os.path.join(sys._MEIPASS, 'templates')\n static_folder = os.path.join(sys._MEIPASS, 'static')\n app = Flask(__name__, template_folder=template_folder, static_folder=static_folder)\nelse:\n app = Flask(__name__)\n\nSETTINGS = config.get_config()\n\ndetails = {}\nlast_details = {}\n\n\ndef image_to_base64(image):\n with Image.open(image) as img:\n img = Image.open(image) \n im_file = BytesIO()\n img.convert('RGB').save(im_file, format=\"PNG\")\n im_bytes = im_file.getvalue()\n im_b64 = base64.b64encode(im_bytes).decode('utf-8')\n bas64String = \"data:image/png;base64,\" + im_b64\n return bas64String\n\ndef shutdown():\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()\n\[email protected]('/')\ndef index():\n return render_template('details.html')\n\[email protected]('/details')\ndef game_details():\n def get_game_details():\n json_data = json.dumps(details)\n yield f\"data:{json_data}\\n\\n\"\n time.sleep(int(SETTINGS[\"dashboard\"][\"refresh_rate\"]))\n\n return Response(get_game_details(), mimetype='text/event-stream')\n\ndef initialize():\n threading.Thread(target=lambda: app.run(host=SETTINGS[\"dashboard\"][\"host\"], threaded=True, port=int(SETTINGS[\"dashboard\"][\"port\"]), debug=False, use_reloader=False)).start()\n \n\n\n\ndef handle_event(event,action):\n global details\n details = event.tokens.copy()\n details[\"boxart_image\"] = \"\"\n details[\"snap_image\"] = \"\"\n details[\"title_image\"] = \"\"\n details[\"system_image\"] = \"\"\n \n boxart = \"\"\n snap = \"\"\n title = \"\"\n system = \"\"\n\n if \"system\" in details:\n system = details[\"system\"]\n if \"rom_extensionless_file_name\" in details:\n boxart = images.get_boxart(system,details[\"rom_extensionless_file_name\"],details[\"release_name\"])\n snap = images.get_snap(system,details[\"rom_extensionless_file_name\"],details[\"release_name\"])\n title = images.get_title(system,details[\"rom_extensionless_file_name\"],details[\"release_name\"])\n system = images.get_system(system)\n\n if os.path.exists(boxart):\n details[\"boxart_image\"] = image_to_base64(boxart)\n if os.path.exists(snap):\n details[\"snap_image\"] = image_to_base64(snap)\n if os.path.exists(title):\n details[\"title_image\"] = image_to_base64(title)\n if os.path.exists(system):\n details[\"system_image\"] = image_to_base64(system)\n \n\n\nevent_manager.subscribers[\"Dashboard\"] = {}\nevent_manager.subscribers[\"Dashboard\"][\"initialize\"] = lambda:initialize()\nevent_manager.subscribers[\"Dashboard\"][\"handle_event\"] = {'function': handle_event, 'arg': \"args\"}\n\n\n", "id": "6111328", "language": "Python", "matching_score": 3.6470847129821777, "max_stars_count": 0, "path": "dashboard.py" }, { "content": "from time import sleep\nimport json\nimport config\nimport mister\nimport cores\nimport time\nfrom flask import Flask, request, Response, redirect, send_file, render_template\nimport openvgdb\n\napp = Flask(__name__)\nquit = False\nSETTINGS = config.get_config()\nmaps = cores.read_file_map()\nRECENTS_FOLDER = '/media/{}/config/'.format(SETTINGS['core_storage'])\n\ndetails = {}\n\n\[email protected]('/')\ndef index():\n return render_template('details.html')\n\n\[email protected]('/details')\ndef game_details():\n def get_game_details():\n while True:\n global details\n core = mister.get_running_core()\n #map_core = cores.get_map(core)\n game,filepath,filename = mister.get_last_game(core)\n if game != \"\": \n hash = mister.get_file_hash(filepath,filename)\n rom = openvgdb.get_rom_by_hash(hash)\n details[\"rom_id\"] = rom[0]\n details[\"system_id\"] = rom[1]\n #details[\"name\"] = rom[8]\n #details[\"region\"] = rom[13]\n release = openvgdb.get_release_by_rom_id(rom[0])\n details[\"name\"] = release[2]\n details[\"region\"] = release[4]\n details[\"front_cover\"] = release[7]\n details[\"back_cover\"] = release[8]\n details[\"description\"] = release[11]\n details[\"developer\"] = release[12]\n details[\"publisher\"] = release[13]\n details[\"genre\"] = release[14]\n details[\"release_date\"] = release[15]\n details[\"gamefaqs\"] = release[16]\n json_data = json.dumps(details)\n yield f\"data:{json_data}\\n\\n\"\n time.sleep(1)\n\n return Response(get_game_details(), mimetype='text/event-stream')\n\n\napp.run(threaded=True,host='0.0.0.0', port=8080)\n\n", "id": "4808327", "language": "Python", "matching_score": 5.221597671508789, "max_stars_count": 0, "path": "main.py" }, { "content": "\nfrom time import sleep\nimport json\nimport config\nfrom flask import Flask, Response, render_template\nimport time\n\napp = Flask(__name__)\nquit = False\nSETTINGS = config.get_config()\nRECENTS_FOLDER = '/media/{}/config/'.format(SETTINGS['core_storage'])\n\ndetails = {}\n\[email protected]('/')\ndef index():\n return render_template('details.html')\n\n\[email protected]('/details')\ndef game_details():\n def get_game_details():\n while True:\n json_data = json.dumps(\n {'rom_id': 5815, 'system_id': 20, 'name': '007: Everything or Nothing', 'region': 'Europe', 'front_cover': 'https://gamefaqs.gamespot.com/a/box/5/0/6/53506_front.jpg', 'back_cover': 'https://gamefaqs.gamespot.com/a/box/5/0/6/53506_back.jpg', 'description': \"Think like Bond, act like Bond, and experience an entirely new Bond adventure.<NAME>, the world's greatest secret agent, returns in Everything or Nothing with new guns and gadgets, combat skills, and clever tricks--and it's up to you to put them to good use.Travel through four exciting continents including the Valley of the Kings in Egypt and the French Quarter in New Orleans.The game also features two-player co-op missions and four-player multiplayer arena modes.\", 'developer': 'Griptonite Games', 'publisher': None, 'genre': 'Action,Shooter,Third-Person,Modern', 'release_date': 'Nov 17, 2003', 'gamefaqs': 'http://www.gamefaqs.com/gba/914854-007-everything-or-nothing'})\n yield f\"data:{json_data}\\n\\n\"\n time.sleep(1)\n\n return Response(get_game_details(), mimetype='text/event-stream')\n\n\n\napp.run(threaded=True,host='0.0.0.0', port=8080)\n\n", "id": "114342", "language": "Python", "matching_score": 1.621649980545044, "max_stars_count": 0, "path": "test.py" }, { "content": "from glob import glob\nfrom urllib import response\nfrom pkg_resources import DEVELOP_DIST\nimport requests\nimport json\nimport config\nimport logger\nimport event_manager\nfrom mister import publish\nimport threading\n\n\nSETTINGS = config.get_config()\n\nlast_details = {}\n\n\nclass SteamGameChange():\n def __init__(self, game, tokens):\n self.publisher = \"Steam\"\n self.event = \"SteamGameChange\"\n self.game = game\n self.tokens = tokens\n\n def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__,\n sort_keys=True, indent=4)\n\n\napi_key = SETTINGS[\"steam\"][\"api_key\"]\nsteam_id = SETTINGS[\"steam\"][\"steam_id\"]\n\n\ndef get_user_details():\n url = f\"http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?key={api_key}&steamids={steam_id}\"\n response = requests.get(url)\n return json.loads(response.text)[\"response\"][\"players\"][0]\n\n\ndef get_game_details_by_id(id):\n url = f\"https://store.steampowered.com/api/appdetails?appids={id}&currency=usd\"\n response = requests.get(url)\n return json.loads(response.text)[id][\"data\"]\n\n\ndef initialize():\n logger.info(\"Initializing Steam publisher ...\")\n\n\ndef publish():\n try:\n global last_details\n user_details = get_user_details()\n if last_details != user_details and \"gameid\" in user_details:\n id = user_details[\"gameid\"]\n game_details = get_game_details_by_id(id)\n genres = []\n for item in game_details[\"genres\"]:\n genres.append(item[\"description\"])\n genre = \",\".join(genres)\n tokens = {}\n tokens[\"name\"] = game_details[\"name\"]\n tokens[\"game_id\"] = id\n tokens[\"description\"] = game_details[\"short_description\"]\n tokens[\"reference_url\"] = game_details[\"website\"]\n tokens[\"developer\"] = \",\".join(game_details[\"developers\"])\n tokens[\"publisher\"] = \",\".join(game_details[\"publishers\"])\n tokens[\"genre\"] = genre\n event = SteamGameChange(game_details[\"name\"], tokens)\n threading.Thread(target=event_manager.manage_event,\n args=[event]).start()\n last_details = user_details\n except Exception as e:\n logger.error(f\"Unable to publish SteamGameChange event\")\n\n\nevent_manager.publishers[\"Steam\"] = {}\nevent_manager.publishers[\"Steam\"][\"initialize\"] = lambda: initialize()\nevent_manager.publishers[\"Steam\"][\"publish\"] = lambda: publish()\n", "id": "8714235", "language": "Python", "matching_score": 1.9782168865203857, "max_stars_count": 0, "path": "steam.py" }, { "content": "\"\"\"\n\n[\n {\n \"MisterCoreChange\": [\n {\n \"description\": \"custom script on core change\",\n \"status\": \"enabled\",\n \"file\": \"details.txt\",\n \"script\": \"scripts/write_details_kruizcontrol.py\",\n \"format\":\"\"\n }\n ]\n },\n {\n \"MisterGameChange\": [\n {\n \"description\": \"custom script on game change\",\n \"status\": \"enabled\",\n \"file\": \"details.txt\",\n \"script\": \"scripts/write_details_kruizcontrol.py\",\n \"format\":\"KruizControl\"\n }\n ]\n }\n ]\n\n\"\"\"\n\n\ndef rom_to_kruiz_control(rom):\n content = \"\"\n if rom == \"\":\n return \"\"\n for detail in rom:\n if detail == \"release_name\":\n if rom[\"release_name\"] !=\"\":\n content = content + \"name\\n\"\n content = content + f'{rom[\"release_name\"]} \\n'\n if rom[\"region\"] !=\"\":\n if detail == \"region\":\n content = content + \"region\\n\"\n content = content + f'{rom[\"region\"]} \\n'\n if rom[\"system\"] !=\"\":\n if detail == \"system\":\n content = content + \"system\\n\"\n content = content + f'{rom[\"system\"]} \\n'\n if rom[\"developer\"] !=\"\":\n if detail == \"developer\":\n content = content + \"developer\\n\"\n content = content + f'{rom[\"developer\"]} \\n'\n if rom[\"publisher\"] !=\"\":\n if detail == \"publisher\":\n content = content + \"publisher\\n\"\n content = content + f'{rom[\"publisher\"]} \\n'\n if rom[\"genre\"] !=\"\":\n if detail == \"genre\":\n content = content + \"genre\\n\"\n content = content + f'{rom[\"genre\"]} \\n'\n if rom[\"date\"] !=\"\":\n if detail == \"date\":\n content = content + \"date\\n\"\n content = content + f'{rom[\"date\"]} \\n'\n if rom[\"description\"] !=\"\":\n if detail == \"description\":\n content = content + \"description\\n\"\n content = content + f'{rom[\"description\"]} \\n'\n if rom[\"reference_url\"] !=\"\":\n if detail == \"reference_url\":\n content = content + \"url\\n\"\n content = content + f'{rom[\"reference_url\"]} \\n'\n if rom[\"manual_url\"] !=\"\":\n if detail == \"manual_url\":\n content = content + \"manual\\n\"\n content = content + f'Manual: {rom[\"manual_url\"]} \\n'\n return(content)\n\ndef write_to_file(content,filename):\n try:\n with open(filename, 'w') as f:\n f.write(content)\n logger.event(f\"Rom details writtent to {filename}\")\n except Exception as e:\n logger.error(f\"Failed to write content file to {filename}\")\n\n\n\nif action[\"format\"] == \"\":\n write_to_file(\"\",action[\"file\"])\nif action[\"format\"] == \"KruizControl\":\n content = rom_to_kruiz_control(event.rom)\n write_to_file(content,action[\"file\"])\n", "id": "4052266", "language": "Python", "matching_score": 1.024757742881775, "max_stars_count": 0, "path": "examples/scripts/write_details_kruizcontrol.py" }, { "content": "\nprint(event.tokens)\nprint(action)", "id": "4624199", "language": "Python", "matching_score": 0.9731698632240295, "max_stars_count": 0, "path": "examples/scripts/print_event.py" }, { "content": "\n\ndef replace_value(value,tokens):\n for token in tokens:\n if value == f\"{{{token}}}\":\n value = tokens[token]\n return value\n\n\ndef replace_text(text,tokens):\n for token in tokens:\n if type(tokens[token]) == str:\n text = text.replace(f\"{{{token}}}\",tokens[token])\n return text\n", "id": "3176216", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "string_util.py" }, { "content": "from listen import listen_to_microphone\n\nstop_commands = [\"quit\",\"exit\", \"stop listening\"]\n\nstop = False \n\nwhile not stop:\n speech = listen_to_microphone()\n if speech != None:\n print(\"speech: \"+ speech)\n for command in stop_commands:\n if command in speech:\n stop = True", "id": "11923937", "language": "Python", "matching_score": 1.7049013376235962, "max_stars_count": 0, "path": "test_listen.py" }, { "content": "import speech_recognition as sr\n\ndef listen_to_microphone():\n r=sr.Recognizer()\n with sr.Microphone() as source:\n r.adjust_for_ambient_noise(source,duration=.5)\n # r.energy_threshold()\n while True:\n try:\n print(\"Listening for command...\")\n audio= r.listen(source)\n text = r.recognize_google(audio)\n return(text.lower())\n except Exception as e:\n return(\"\")\n\n\n\nif __name__ == \"__main__\":\n pass", "id": "12056070", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "listen.py" }, { "content": "import board\nimport analogio\nimport math\n\nimport os\nimport busio\nimport digitalio\nimport board\nimport storage\nimport adafruit_sdcard\n\nclass Colors:\n red = 0xff0000\n orange = 0xffa500\n yellow = 0xffff00\n green = 0x008000\n blue = 0x0000FF\n purple = 0x800080\n pink = 0xffc0cb\n white = 0xFFFFFF\n black = 0x000000\n \n \n \ndef get_battery_level():\n voltage_pin = analogio.AnalogIn(board.A6)\n battery_level = (((voltage_pin.value * 3.3) / 65536 * 2)/4)* 100\n voltage_pin.deinit()\n return math.ceil(battery_level)\n\ndef writeFile(filename,data):\n SD_CS = board.SD_CS # setup for M0 Adalogger; change as needed\n spi = busio.SPI(board.SCK, board.MOSI, board.MISO)\n cs = digitalio.DigitalInOut(SD_CS)\n sdcard = adafruit_sdcard.SDCard(spi, cs)\n vfs = storage.VfsFat(sdcard)\n storage.mount(vfs, \"/sd\")\n try:\n os.remove(\"/sd/\" + filename)\n except Exception as e:\n print('Unable to delete previous save: {}'.format(e))\n\n with open(\"/sd/\" + filename, \"w+\") as f:\n f.write(data)\n storage.umount(\"/sd\")\n spi.deinit()\n cs.deinit()\n \ndef readFile(filename):\n data = ''\n SD_CS = board.SD_CS # setup for M0 Adalogger; change as needed\n spi = busio.SPI(board.SCK, board.MOSI, board.MISO)\n cs = digitalio.DigitalInOut(SD_CS)\n sdcard = adafruit_sdcard.SDCard(spi, cs)\n vfs = storage.VfsFat(sdcard)\n storage.mount(vfs, \"/sd\")\n with open(\"/sd/\" + filename, \"r\") as f:\n data = f.read()\n storage.umount(\"/sd\")\n spi.deinit()\n cs.deinit()\n return data\n\n\n\n\n\n\ncolors = Colors()\n", "id": "317697", "language": "Python", "matching_score": 4.668594837188721, "max_stars_count": 0, "path": "util.py" }, { "content": "import board\nimport analogio\nimport math\n\nimport os\nimport busio\nimport digitalio\nimport board\nimport storage\nimport adafruit_sdcard\n\n\n\ndef save_state(data):\n SD_CS = board.SD_CS # setup for M0 Adalogger; change as needed\n spi = busio.SPI(board.SCK, board.MOSI, board.MISO)\n cs = digitalio.DigitalInOut(SD_CS)\n sdcard = adafruit_sdcard.SDCard(spi, cs)\n vfs = storage.VfsFat(sdcard)\n storage.mount(vfs, \"/sd\")\n if 'save.json' in os.listdir(\"/sd/\"):\n os.remove(\"/sd/save.json\")\n with open(\"/sd/\" + 'save.json', \"w+\") as f:\n f.write(data)\n storage.umount(\"/sd\")\n spi.deinit()\n cs.deinit()\n \ndef get_sate():\n data = ''\n SD_CS = board.SD_CS # setup for M0 Adalogger; change as needed\n spi = busio.SPI(board.SCK, board.MOSI, board.MISO)\n cs = digitalio.DigitalInOut(SD_CS)\n sdcard = adafruit_sdcard.SDCard(spi, cs)\n vfs = storage.VfsFat(sdcard)\n storage.mount(vfs, \"/sd\")\n with open(\"/sd/save.json\", \"r\") as f:\n data = f.read()\n storage.umount(\"/sd\")\n spi.deinit()\n cs.deinit()\n return data\n\n\n", "id": "9426746", "language": "Python", "matching_score": 0.7861113548278809, "max_stars_count": 0, "path": "save.py" }, { "content": "import board\nimport terminalio\nimport displayio\nfrom adafruit_display_text import label\nimport os\nfrom gamepadshift import GamePadShift\nimport digitalio\nfrom time import sleep\n\n\nclass SytemBoard:\n def __init__(self,board):\n self.board = board\n self.a_button = 2\n self.b_button = 1\n self.start_button = 4\n self.select_button = 8\n\n if self.board == \"pygamer\":\n self.a_button = 2\n self.b_button = 1\n self.start_button = 4\n self.select_button = 8\n\nred = 0xff0000\norange = 0xffa500\nyellow = 0xffff00\ngreen = 0x008000\nblue = 0x0000FF\npurple = 0x800080\npink = 0xffc0cb\nwhite = 0xFFFFFF\nblack = 0x000000\n\nmyBoard = SytemBoard('pygamer') \n\napplist = []\nmenuindex = 0\n\nfor filename in os.listdir():\n if 'app_' in filename or 'main.py' in filename or 'game_' in filename:\n applist.append(filename)\napplist = sorted(applist)\n\nbc = digitalio.DigitalInOut(board.BUTTON_CLOCK)\nbo = digitalio.DigitalInOut(board.BUTTON_OUT)\nbl = digitalio.DigitalInOut(board.BUTTON_LATCH)\n\ngamepad = GamePadShift(bc,bo,bl)\n\ndisplay_group = displayio.Group(max_size=20)\n\n\nfile_text = ' ' \nfile_label = label.Label(terminalio.FONT, text=file_text)\nfile_label.x = 10\nfile_label.y = 40\nfile_label.text = applist[0]\ndisplay_group.append(file_label)\n\nheader_text = \"Program Launcher v1\"\nheader_label = label.Label(terminalio.FONT, text=header_text)\nheader_label.x = 25\nheader_label.y = 10\ndisplay_group.append(header_label)\n\ncount_text = \" \"\ncount_label = label.Label(terminalio.FONT, text=count_text)\ncount_label.x = 10\ncount_label.y = 60\ncount_label.text = 'File {} / {}'.format(str(menuindex + 1),str(len(applist)))\ndisplay_group.append(count_label)\n\ndirections_text = \"<- File Up File Down ->\"\ndirections_label = label.Label(terminalio.FONT, text=directions_text)\ndirections_label.x = 0\ndirections_label.y = 110\ndisplay_group.append(directions_label)\n\n\n\nboard.DISPLAY.show(display_group)\n\nwhile True:\n pressed = gamepad.get_pressed()\n\n if pressed == myBoard.select_button and menuindex > 0:\n menuindex -=1\n file_label.text = applist[menuindex]\n\n\n if pressed == myBoard.start_button and menuindex < len(applist) -1:\n menuindex +=1\n file_label.text = applist[menuindex]\n\n if pressed == myBoard.a_button:\n board.DISPLAY.show(None)\n bc.deinit()\n bo.deinit()\n bl.deinit()\n sleep(.1)\n __import__(applist[menuindex].strip('.py'))\n\n count_label.text = 'File {} / {}'.format(str(menuindex + 1),str(len(applist)))\n\n while pressed:\n # Wait for all buttons to be released.\n pressed = gamepad.get_pressed()\n sleep(0.1)\n \n\n\n\n", "id": "1698390", "language": "Python", "matching_score": 2.6130828857421875, "max_stars_count": 0, "path": "code.py" }, { "content": "import board\nimport displayio\nimport terminalio\nfrom adafruit_display_text import label\nfrom gamepadshift import GamePadShift\nimport digitalio\nfrom time import sleep\nimport time\nimport pet\nimport hud\nimport menu\nfrom util import colors\nimport util\nimport systemboard\nimport json\nimport save\n\nsaveDebounce = 0\nsaveDebounceMax = 428\n\n\n\n\n# prepare for gamepad buttons\npad = GamePadShift(digitalio.DigitalInOut(board.BUTTON_CLOCK),\n digitalio.DigitalInOut(board.BUTTON_OUT),\n digitalio.DigitalInOut(board.BUTTON_LATCH))\n\nmyPet = pet.Pet()\n \ntry:\n petData = json.loads(save.get_sate())\n myPet.load_pet_data(petData)\n print('loaded pet data')\nexcept Exception as e:\n print('Failed to load pet data: {}'.format(e))\n save.save_state(json.dumps(myPet.get_pet_data()))\n\nmyHud = hud.Hud(myPet)\nmyMenu = menu.Menu(myPet)\nmyBoard = systemboard.SytemBoard('pygamer') \n\n#import wifi_test\n\nwhile True:\n pressed = pad.get_pressed()\n myPet.tick()\n if pressed == myBoard.select_button and not myMenu.showing:\n myMenu.showing = True\n\n if pressed == myBoard.start_button and myMenu.showing:\n if myMenu.debounce < myMenu.debounceMax:\n myMenu.debounce += 1\n else:\n myMenu.move()\n myMenu.debounce = 0\n\n if pressed == myBoard.a_button and myMenu.showing:\n myMenu.select()\n \n if pressed == myBoard.b_button and myMenu.showing:\n myMenu.showing = False\n \n if not myMenu.showing:\n myHud.draw()\n\n if myMenu.showing:\n myMenu.draw()\n\n if saveDebounce == saveDebounceMax:\n save.save_state(json.dumps(myPet.get_pet_data()))\n saveDebounce = 0\n print('saving pet data')\n else:\n saveDebounce += 1\n\n sleep(.1)\n\n\n ", "id": "8824856", "language": "Python", "matching_score": 2.597304344177246, "max_stars_count": 0, "path": "microgotchi.py" }, { "content": "import board\nimport analogio\n\n\n\nclass SytemBoard:\n def __init__(self,board):\n self.board = board\n self.a_button = 2\n self.b_button = 1\n self.start_button = 4\n self.select_button = 8\n\n if self.board == \"pygamer\":\n self.a_button = 2\n self.b_button = 1\n self.start_button = 4\n self.select_button = 8\n\n", "id": "1130094", "language": "Python", "matching_score": 0.7441700100898743, "max_stars_count": 0, "path": "systemboard.py" } ]
1.825432
AmerJod
[ { "content": "from math import log1p, sqrt\n\n# https://en.wikipedia.org/wiki/Birthday_attack\ndef birthdayProbability(masterset, hits):\n exponent = hits * (hits - 1) / 2\n probability = 1 - pow( 1 - (1 / masterset), exponent)\n return probability\n\ndef birthday2(probability_exponent, bits):\n probability = 10.0**probability_exponent\n outputs = 2.0**bits\n print(sqrt(2.0*outputs*-log1p(-probability)))\n\nif __name__ == '__main__':\n probabilityHB = birthdayProbability(694967296, 60000) * 100.00\n print('probabilityHB of Success: %f %%' % probabilityHB)\n", "id": "3545158", "language": "Python", "matching_score": 0.08263969421386719, "max_stars_count": 2, "path": "TOR/ConnectionsHandler/Tools/BirthdayAttackCalculator.py" }, { "content": "#! /usr/bin/env python3\n\nimport queue\nimport threading\nimport dns.resolver\n\nclass BirhtdayAttak2(threading.Thread):\n def __init__(self,q,dnsIP,domain,numberOfTries, loop_time = 1.0/60):\n self.q = q\n self.timeout = loop_time\n self.dnsIP = dnsIP\n self.domain = domain\n self.res = dns.resolver.Resolver()\n self.res.nameservers = [self.dnsIP] # ['172.16.17.32','172.16.17.32','8.8.8.8']\n self.res.lifetime = 20\n self.numberOfTries = numberOfTries\n super(BirhtdayAttak2, self).__init__()\n\n def onThread(self, function, *args, **kwargs):\n self.q.put((function, args, kwargs))\n\n def run(self):\n while True:\n try:\n function, args, kwargs = self.q.get(timeout=self.timeout)\n function(*args, **kwargs)\n\n except queue.Empty:\n self.idle()\n\n def idle(self):\n pass\n # put the code you would have put in the `run` loop here\n\n def mountAttackAsycn(self):\n try:\n for i in range(1,self.numberOfTries):\n answers = self.res.query(self.domain, 'a')\n for rdata in answers:\n print(rdata.address)\n except Exception as ex:\n print(ex)\n\n\nif __name__ == '__main__':\n\n birhtdayAttak = BirhtdayAttak2(dnsIP='8.8.8.8',domain='google.com',numberOfTries=10)\n birhtdayAttak.start()\n birhtdayAttak.onThread(birhtdayAttak.mountAttackAsycn())\n birhtdayAttak.onThread(birhtdayAttak.mountAttackAsycn())\n", "id": "10185749", "language": "Python", "matching_score": 0.5325166583061218, "max_stars_count": 2, "path": "TOR/Attack/BirthdayAttack.py" }, { "content": "data =\"python <EMAIL>, <EMAIL>, <EMAIL>, amer <EMAIL>, <EMAIL>, \" \\\n \"<EMAIL>, <EMAIL>, company <EMAIL>, <EMAIL>, \" \\\n \"<EMAIL>, online at nationalgeographicexpeditions.com,\" \\\n \" <EMAIL>, google <EMAIL>, <EMAIL>, <EMAIL>, \" \\\n \"<EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, \" \\\n \"<EMAIL> facebook , nationalgeographic, orgooo\"", "id": "1013404", "language": "Python", "matching_score": 0.6952491402626038, "max_stars_count": 0, "path": "_tests/fexture/file_3.py" }, { "content": "import unittest\nimport emails_scraper\nfrom _tests.fexture import file_1, file_2, file_3\n\n# Expected results\nexpected_emails_1 = \"<EMAIL>, <EMAIL>, <EMAIL>, info [at] legalist [dot] com, christian @ legalist . com, b r i a n @ l e g a l i s t . c o m , benjamin/at/legalist/dot/com\"\nexpected_emails_2 = \"<EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, online at nationalgeographicexpeditions.com, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>\"\nexpected_emails_3 = \"<EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, online at nationalgeographicexpeditions.com, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>\"\n\n\nclass EmailFinderTest(unittest.TestCase):\n def test_get_emails(self):\n print(\"** get_emails method - Test 3 scenarios **\")\n # test a actual data\n emails = emails_scraper.get_emails(file_1.data)\n self.assertEqual(emails, expected_emails_1)\n\n # test a actual data\n emails = emails_scraper.get_emails(file_2.data)\n self.assertEqual(emails, expected_emails_2)\n\n # test a clean data\n emails = emails_scraper.get_emails(file_3.data)\n self.assertEqual(emails, expected_emails_3)\n\n\nif __name__ == \"__main__\":\n suite = unittest.TestLoader().loadTestsFromTestCase(EmailFinderTest)\n unittest.TextTestRunner(verbosity=2).run(suite)\n", "id": "3085485", "language": "Python", "matching_score": 1.2164738178253174, "max_stars_count": 0, "path": "_tests/_test_emails_finders.py" }, { "content": "import argparse\nimport os.path\nimport re\n\n# Regular expression\nregex_rules= r'(\\w+|(\\s\\w)+)\\s*(@|\\Wat\\W)\\s*(\\w+|(\\w\\s)+)\\s*(\\.|\\Wdot\\W)\\s*((\\w\\s)+|\\w+)'\n\nregex = re.compile(regex_rules)\n\n\ndef _file_content_to_str(file_path):\n \"\"\"\n Converts a file content to a string.\n Args:\n file_path (str): the path of the file.\n Return:\n str: file content as string\n \"\"\"\n\n with open(file_path) as file:\n return file.read()\n\n\ndef get_emails(string):\n \"\"\"\n Returns a list of emails as string separated by coma.\n Args:\n string (str): file content as string\n Return:\n str: list of emails as string\n \"\"\"\n\n # convert it the full text to lower case to prevent regex mismatches.\n string = string.lower()\n\n list_of_emails = re.finditer(regex, string)\n\n # Remove the duplications\n unique_emails = list(dict.fromkeys([email[0] for email in list_of_emails]))\n print(f\"({len(unique_emails)}) emails have been found.\")\n\n return \", \".join(unique_emails)\n\n\nif __name__ == \"__main__\":\n\n # create the parser\n parser = argparse.ArgumentParser(\n description=\"script for a finding all the email in file :)\"\n )\n\n # add the argument\n parser.add_argument(\"path\", metavar=\"path\", type=str, help=\"the path to file\")\n args = parser.parse_args()\n\n try:\n if os.path.isfile(args.path):\n file_content_as_string = _file_content_to_str(args.path)\n emails = get_emails(file_content_as_string)\n print(emails)\n else:\n raise Exception\n\n except Exception:\n print(f\"args {args.path} is not a file or a valid path.\")\n parser.print_usage()\n", "id": "9798009", "language": "Python", "matching_score": 0.6474350690841675, "max_stars_count": 0, "path": "emails_scraper.py" }, { "content": "from sqlalchemy import create_engine\nfrom sqlalchemy.orm.exc import *\nfrom sqlalchemy.orm import sessionmaker\n\nfrom spiderlib.db.db_modules import Base\nfrom spiderlib.db.db_modules import Quote, Tag, Author\n\nfrom spiderlib.db import logger\n\n\nclass Database(object):\n \"\"\"\n Database Class that handles all the db queries, connections\n \"\"\"\n\n # The values of those depend on your setup\n def __init__(self, **config):\n\n __db_conn_string = self.__construct_connection_string(**config)\n try:\n self.engine = create_engine(__db_conn_string)\n self.connection = self.engine.connect()\n self._session = sessionmaker(bind=self.engine)()\n\n # Enable and disable lazy_load\n self._lazy_load = config.get(\"DATABASE_LAZY_LOAD\", False)\n self._create_tables()\n logger.debug(\"DB Instance connected\")\n\n except Exception as error:\n logger.critical(\"Something went wrong while connecting to the postgres db,\"\n \"make sure that the database server is up running\")\n\n def __construct_connection_string(self, **config):\n \"\"\" Construct connection string \"\"\"\n\n # TODO: rasie an error incase one of is are empty\n # Get the db connection details\n POSTGRES_URL = config.get(\"POSTGRES_URL\")\n POSTGRES_USER = config.get(\"POSTGRES_USER\")\n POSTGRES_PW = config.get(\"POSTGRES_PW\")\n POSTGRES_DB = config.get(\"POSTGRES_DB\")\n\n logger.debug(\"Connection string has been constructed\")\n\n conn_string = f\"postgresql://{POSTGRES_USER}:{POSTGRES_PW}@{POSTGRES_URL}/{POSTGRES_DB}\"\n return conn_string\n\n # Not in used yet\n # using property decorator\n # a getter function\n @property\n def lazy_load(self):\n return self._lazy_load\n\n def _create_tables(self):\n Base.metadata.create_all(self.engine)\n logger.debug(\"DB Instance has been created\")\n\n def _drop_databae(self):\n Base.metadata.drop_all(self.engine)\n logger.debug(\"DB Instance has been dropped\")\n\n def _recreate_database(self):\n Base.metadata.drop_all(self.engine)\n Base.metadata.create_all(self.engine)\n logger.debug(\"DB Instance has been recreated\")\n\n def add(self, obj):\n \"\"\" Adds db object to the database then return the object \"\"\"\n\n try:\n self._session.add(obj)\n self._session.commit()\n logger.debug(\"db instance has has been added to the database\")\n return obj\n except Exception as error:\n logger.error(\n f\"db instance has not been added to the database, Error: {error}\"\n )\n\n def query(self, obj, **kwargs):\n \"\"\"\n Returns db objects\n Args:\n **kwargs\n Return:\n db objects\n \"\"\"\n try:\n db_objs = self._session.query(obj).filter_by(**kwargs).all()\n logger.debug(\"db has been queried\")\n return db_objs\n\n except NoResultFound as error:\n logger.error(\"Query selects no rows\")\n return None\n\n except MultipleResultsFound as error:\n logger.error(\"Multiple rows are returned for a query that returns\")\n return None\n\n except Exception as error:\n logger.error(\n f\"db instance has has been added to the database, Error: {error}\"\n )\n\n def query_one(self, obj, **kwargs):\n \"\"\"\n Returns only one object if exist.\n Args:\n **kwargs\n Return:\n db objects\n \"\"\"\n\n try:\n db_obj = self._session.query(obj).filter_by(**kwargs).one()\n logger.debug(\"db has been queried\")\n return db_obj\n\n except NoResultFound as error:\n return None\n\n except MultipleResultsFound as error:\n logger.error(\"Multiple rows are returned for a query that returns\")\n return None\n\n except Exception as error:\n logger.error(f\"Something went wrong, Error: {error}\")\n\n def exist(self, obj, **kwargs):\n \"\"\"\n Returns only one object if exist.\n Args:\n **kwargs\n Return:\n (Boolean, db_object)\n \"\"\"\n\n try:\n db_obj = self._session.query(obj).filter_by(**kwargs).one()\n logger.debug(\"db has been queried\")\n return (True, db_obj)\n\n except NoResultFound as error:\n return (False, None)\n\n except Exception as error:\n logger.error(f\"Something went wrong, Error: {error}\")", "id": "2221172", "language": "Python", "matching_score": 2.2597622871398926, "max_stars_count": 0, "path": "spiderlib/db/database.py" }, { "content": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport json\nimport datetime\n\n# Load modules from the common lib\nfrom spiderlib.db.database import Database\nfrom spiderlib.db.db_modules.tag import Tag\nfrom spiderlib.db.db_modules.author import Author\nfrom spiderlib.db.db_modules.quote import Quote\n\nfrom . import config\nimport re\n\n\nclass CleanDataPipeline(object):\n \"\"\" Clean the data \"\"\"\n\n def process_item(self, item, spider):\n # list_obj = []\n author_details = item.get(\"author_details\")\n\n # Could use a list of\n if author_details:\n author_details[\"author_name\"] = author_details.get(\"author_name\").strip()\n author_details[\"date_of_birth\"] = author_details.get(\"date_of_birth\").strip()\n\n # Get city and Country\n location = author_details.get(\"location\", \"\").strip()\n location = re.sub(\"in\\s\", \"\", location)\n locations_list = location.split(\",\")\n author_details[\"city\"] = ', '.join(locations_list[:-1]).strip()\n author_details[\"country\"] = locations_list[-1].strip()\n author_details[\"description\"] = author_details.get(\"description\").strip()\n\n item[\"author_details\"] = author_details\n\n\n item[\"author\"] = item.get(\"author\").strip()\n # TODO: Risky, might through an error\n item[\"text\"] = re.findall('“(.*?)”', item.get(\"text\"))[0]\n item[\"tags\"] = item.get(\"tags\")\n\n return item\n\n\nclass JsonWriterPipeline(object):\n \"\"\" JSON Writer - write JSON in file \"\"\"\n\n def open_spider(self, spider):\n dt = str(datetime.datetime.now())\n file_name = \"quotes_\" + dt + \".json\"\n self.file = open(f\"{file_name}\", \"w+\")\n\n def close_spider(self, spider):\n self.file.close()\n\n def process_item(self, item, spider):\n line = json.dumps(item) + \"\\n\"\n self.file.write(line)\n return item\n\n\nclass DatabaseWriterPipeline(object):\n \"\"\" Write into the database \"\"\"\n\n conn = None\n dict_authors = {}\n dict_tags = {}\n\n def process_item(self, item, spider):\n # Check if the Author Details if it has data\n author_details = item.get(\"author_details\")\n if author_details:\n author_name = author_details.get(\"author_name\")\n date_of_birth = author_details.get(\"date_of_birth\")\n city = author_details.get(\"city\")\n country = author_details.get(\"country\")\n description = author_details.get(\"description\")\n\n # check if the author is in the db\n author_obj = self.conn.query_one(Author, author_name=author_name)\n if not author_obj:\n # If not exist add it and get the obj\n author_obj = Author(\n author_name=author_name,\n date_of_birth=date_of_birth,\n city=city,\n country=country,\n description=description,\n )\n\n author_id = self.conn.add(author_obj).author_id\n self.dict_authors[author_name] = author_id\n\n # Get the quote data\n author = item.get(\"author\")\n quote = item.get(\"text\")\n tags = item.get(\"tags\")\n\n # Using simple caching mechanism - dict\n # Check if it is in the cache\n author_id = self.dict_authors.get(author)\n\n if not author_id:\n # Check if the author is already exist in the db\n author_obj = self.conn.query_one(Author, author_name=author)\n\n if not author_obj:\n # If not exist add it and get the obj\n author_db = Author(author_name=author)\n author_id = self.conn.add(author_db).author_id\n else:\n author_id = author_obj.author_id\n\n # Add it to the cache: kks\n self.dict_authors[author] = author_id\n\n # TODO: check if the quote has any updates in regards to the tags - FUTURE WORK\n # Check it the quote is already in the db\n # We can do smart cashing using the dict\n quote_obj = self.conn.query_one(Quote, text=quote)\n if not quote_obj:\n # If not exist add it and return the obj\n quote_obj = Quote(text=quote, author_id=author_id)\n\n for _tag in tags:\n tag_obj = self.conn.query_one(Tag, tag=_tag)\n if not tag_obj:\n tag_obj = Tag(tag=_tag)\n\n quote_obj.tags.append(tag_obj)\n\n # Add the Quote object\n self.conn.add(quote_obj)\n\n def open_spider(self, spider):\n self.conn = Database(**config.POSTGRES_CONN)\n\n def close_spider(self, spider):\n self.conn._session.close()\n", "id": "2272474", "language": "Python", "matching_score": 3.268944263458252, "max_stars_count": 0, "path": "legalist_spider/pipelines.py" }, { "content": "from spiderlib.db import Column, Integer, String, relationship\nfrom spiderlib.db.db_modules import Base\nfrom spiderlib.db.utils import to_json\n\n\nclass Author(Base):\n \"\"\"\n Author table\n - One to many with Quote table\n \"\"\"\n\n __tablename__ = \"authors\"\n\n author_id = Column(Integer, primary_key=True)\n author_name = Column(String(50))\n date_of_birth = Column(String)\n city = Column(String(50))\n country = Column(String(50))\n description = Column(String)\n\n quotes = relationship(\"Quote\", back_populates=\"author\", lazy=False)\n\n def __repr__(self):\n return (\n f\"<Author(author_name='{self.author_name}', date_of_birth='{self.date_of_birth}',\"\n f\" city='{self.city}' , country='{self.country}' ,description ='{self.description}' )>\"\n )\n\n # It is an easy way to convert it to json\n @property\n def to_dict(self):\n return {\n \"author_id\": self.author_id,\n \"author_name\": self.author_name,\n \"date_of_birth\": self.date_of_birth,\n \"city\": self.city,\n \"country\": self.country,\n \"description\": self.description,\n }\n\n # Not in use at the moment - kept for future work\n @property\n def json(self):\n return to_json(self, self.__class__)\n", "id": "1053162", "language": "Python", "matching_score": 4.7870097160339355, "max_stars_count": 0, "path": "spiderlib/db/db_modules/author.py" }, { "content": "from spiderlib.db import Column, Integer, String, ForeignKey, relationship\nfrom spiderlib.db.db_modules import association_table, Base\nfrom spiderlib.db.utils import to_json\n\n\nclass Quote(Base):\n \"\"\"\n Quote table\n - Many to many Tag table\n - Many to one with Author table\n \"\"\"\n\n __tablename__ = \"quotes\"\n\n quote_id = Column(Integer, primary_key=True)\n text = Column(String)\n\n author_id = Column(Integer, ForeignKey(\"authors.author_id\"))\n author = relationship(\"Author\", back_populates=\"quotes\")\n\n tags = relationship(\"Tag\", secondary=association_table)\n\n def __repr__(self):\n return f\"<Quote(text='{self.text}', author_id={self.text})>\"\n\n # It is an easy way to convert it to json\n @property\n def to_dict(self):\n return {\n \"quote_id\": self.quote_id,\n \"text\": self.text,\n \"author_id\": self.author_id,\n }\n\n # Not in use at the moment - kept for future work\n @property\n def json(self):\n return to_json(self, self.__class__)\n", "id": "1272723", "language": "Python", "matching_score": 4.6696062088012695, "max_stars_count": 0, "path": "spiderlib/db/db_modules/quote.py" }, { "content": "from spiderlib.db import Column, Integer, String, Boolean\nfrom spiderlib.db.db_modules import Base\nfrom spiderlib.db.utils import to_json\n\n\nclass Tag(Base):\n \"\"\"\n Tag table\n - Many to many with Quote table\n \"\"\"\n\n __tablename__ = \"tags\"\n tag_id = Column(Integer, primary_key=True)\n tag = Column(String(64))\n top_ten = Column(Boolean, default=False)\n\n # Not in use at the moment - kept for future work\n @property\n def json(self):\n return to_json(self, self.__class__)\n\n # It is an easy way to convert it to json\n @property\n def to_dict(self):\n return {\"tag_id\": self.tag_id, \"tag\": self.tag, \"top_ten\": self.top_ten}\n\n def __repr__(self):\n return f\"<Tag(title='{self.tag}')>\"\n", "id": "5488007", "language": "Python", "matching_score": 2.845665216445923, "max_stars_count": 0, "path": "spiderlib/db/db_modules/tag.py" }, { "content": "from sqlalchemy.ext.declarative import declarative_base\n\nfrom spiderlib.db import Table, Column, Integer, ForeignKey\n\nBase = declarative_base()\n\n\n# Many to many relationship table 'quotes_tags'\nassociation_table = Table(\n \"quotes_tags\",\n Base.metadata,\n Column(\"quote_id\", Integer, ForeignKey(\"quotes.quote_id\")),\n Column(\"tag_id\", Integer, ForeignKey(\"tags.tag_id\")),\n)\n\n# Need to be imported here after the association_table being declared\nfrom spiderlib.db.db_modules.author import Author\nfrom spiderlib.db.db_modules.tag import Tag\nfrom spiderlib.db.db_modules.quote import Quote\n", "id": "2221855", "language": "Python", "matching_score": 1.2912358045578003, "max_stars_count": 0, "path": "spiderlib/db/db_modules/__init__.py" }, { "content": "from sqlalchemy import Column, Integer, String, Date, ForeignKey, Table, PrimaryKeyConstraint, Boolean\nfrom sqlalchemy.orm import relationship\n\nfrom .utils import DBEncoderJson, DBEncoderDict\n\n# Initialize logger for the DB module\n# could be optmise ..\nfrom spiderlib.logging import NewLogger\nlogger = NewLogger(logger_name='database', store_flag=True).get_logger()", "id": "12650181", "language": "Python", "matching_score": 2.234644651412964, "max_stars_count": 0, "path": "spiderlib/db/__init__.py" }, { "content": "from app import db\n\nclass Request(db.Model):\n req_id = db.Column(db.Integer, primary_key=True)\n ip = db.Column(db.String)\n datetime = db.Column(db.String)\n\n", "id": "11007268", "language": "Python", "matching_score": 0.8040565848350525, "max_stars_count": 2, "path": "WebServer/app/models.py" }, { "content": "import setuptools\n\nwith open(\"requirements.txt\", \"r\") as rf:\n reqs = [i for i in rf.readlines()]\n\nsetuptools.setup(\n name=\"spiderlib\",\n version=\"0.0.1\",\n author=\"<NAME>\",\n packages=setuptools.find_packages(),\n requirements=reqs\n)", "id": "7746200", "language": "Python", "matching_score": 0.19418752193450928, "max_stars_count": 0, "path": "setup.py" }, { "content": "import json\nimport os\n\nfrom flask import render_template, redirect, url_for, request\nfrom datetime import datetime\nfrom app import app, db\nfrom .models import Request\nfrom Helper.Helper import Helper\nfrom Helper.Helper import TIME_FORMAT\n\nJsonRequestsPATH = 'JSON/NormalRequests/HTTPRequestNodes'\nJsonRequestsPATHCheck = 'JSON/CheckingRequests/HTTPCheckingRequestNodes' # store all the sendRequests about checkoing if the dns supports 0x20 code\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef index():\n req = Request(ip=request.remote_addr, datetime=datetime.now())\n # store it in json file\n storeHTTPRequestJSON(time=str(datetime.now()),srcIP=request.remote_addr, baseUrl=request.base_url)\n # storing in the database, so much data\n #db.session.add(req)\n #db.session.commit()\n return render_template(\"index.html\")\n\n\[email protected]('/thesis', methods=['GET', 'POST'])\ndef thesis():\n return render_template(\"thesis.html\")\n\[email protected]('/aboutUs', methods=['GET', 'POST'])\ndef aboutUs():\n return render_template(\"aboutUs.html\")\n\[email protected]('/check', methods=['GET', 'POST'])\ndef check():\n req = Request(ip=request.remote_addr, datetime=datetime.now())\n # store it in json file\n storeHTTPRequestJSON(time=str(datetime.now()),srcIP=request.remote_addr ,baseUrl=request.base_url, mode='check')\n # storing in the database, so much data\n #db.session.add(req)\n #db.session.commit()\n return render_template(\"check.html\")\n\n\n#TODO: need to implmment a class for it\ndef storeHTTPRequestJSON(time,srcIP,baseUrl,mode='none'):\n \"\"\"Help for the bar method of Foo classes\"\"\"\n date = Helper.getTime(TIME_FORMAT.DATE)\n if mode == 'check':\n file = JsonRequestsPATHCheck + '_' + date + '.json'\n else:\n # TODO: need refactoring - make it more abstract\n file = JsonRequestsPATH + '_' + date + '.json'\n\n jsons = {}\n\n if (os.path.exists(file)) != True: # check if the file exist, if not create it.\n with open(file, 'w+') as jsonfile:\n json.dump(' ', jsonfile)\n else:\n with open(file, 'r') as jsonfile:\n jsons = json.load(jsonfile)\n\n with open(file,'w') as jsonfile:\n DNSRequestNodes = {\n 'Request': {\n 'ID': str(len(jsons)+1),\n 'Time': time,\n 'SrcIP': srcIP,\n 'Url': baseUrl,\n }\n }\n jsons[str(len(jsons)+1)] = DNSRequestNodes\n # Write into Json file\n json.dump(jsons, jsonfile)\n\n\n\n", "id": "9204817", "language": "Python", "matching_score": 2.708159923553467, "max_stars_count": 2, "path": "WebServer/app/views.py" }, { "content": "#! /usr/bin/env python3\n\nimport datetime\n\nfrom enum import Enum\nfrom stem.util import term\n\n\n#\nclass MODE_TYPES(Enum):\n printing = '-out'\n none = '-none'\n\n#\nclass MSG_TYPES(Enum):\n RESULT = term.Color.GREEN\n ERROR = term.Color.RED\n YELLOW = term.Color.YELLOW\n ANY = term.Color.WHITE\n\n#\nclass TIME_FORMAT(Enum):\n FULL = 'full'\n DATE = 'date'\n TIME = 'time'\n\n#\nclass Helper:\n\n def __init__(self,mode='-none'):\n self.mode = ''\n\n def printOnScreen(msg, color=MSG_TYPES.ANY, mode='-none'):\n if mode == '-out':\n print(term.format(msg, color.value))\n\n def printOnScreenAlways(msg, color=MSG_TYPES.ANY):\n try:\n print(term.format(msg, color.value))\n\n except:\n print(msg) # could be like this\n\n def getTime(format= TIME_FORMAT.FULL):\n date = datetime.datetime.now()\n try:\n if format == TIME_FORMAT.FULL: # full\n return (((str(date)).split('.')[0]).split(' ')[1] + ' ' + ((str(date)).split('.')[0]).split(' ')[0])\n if format == TIME_FORMAT.DATE: # date\n return (((str(date)).split('.')[0]).split(' ')[0])\n if format == TIME_FORMAT.TIME: # time\n return (((str(date)).split('.')[0]).split(' ')[1])\n\n except Exception as ex:\n print('Helper - getTime: %s' % ex)\n", "id": "2974068", "language": "Python", "matching_score": 0.9650487303733826, "max_stars_count": 2, "path": "WebServer/Helper/Helper.py" }, { "content": "#! /usr/bin/env python3\n\n'''\nDNS Server - UCL - <NAME>\nMini DNS server for resolving our website 'dnstestsuite.space'.\nhttps://bitbucket.org/AmerJoudiah/dns_project/\nNote: Still private project.\n'''\n\nimport logging\nimport socket\nimport traceback\nimport os\nimport time\nimport argparse\n\nfrom Helper import DNSFunctions\nfrom Helper.Helper import Helper\nfrom Helper.Helper import MODE_TYPES\nfrom Helper.Helper import MSG_TYPES\nfrom Helper.Helper import ADVERSARY_TASK_MODE\n\n\nVERSION = '1.15 b'\n#MODIFY_DATE = '- Last modified: 11/08/2018'\nIP_ADDRESS_LOCAL = '127.0.0.1'\nIP_ADDRESS_SERVER = '172.31.16.226'\nDEBUG = False\nPORT = 53 # 53 Default port\n\nRANDOMIZE_PORT = False # True # try all the possible port\nRANDOMIZE_REQUEST_ID = False # False # try all the possible request ID\nRANDOMIZE_BOTH = False\nNUMBER_OF_TRIES = 10000 # bruteforcing\n\ndef printPortAndIP(ip,port):\n print(\"\\n Host: %s | Port: %s \\n\" % (ip, port))\n\ndef printNcase():\n Helper.printOnScreenAlways(\" ***** Randomise Domain Name Letters Capitalisation is ACTIVATED *****\",MSG_TYPES.YELLOW)\n\ndef printModifiedDate():\n try:\n filename = os.path.basename(__file__)\n os.path.abspath(os.path.dirname(__file__))\n (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(filename)\n return time.ctime(mtime)\n\n except Exception as ex:\n Helper.printOnScreenAlways(\"run_DNS -printModifedDate: %s\"%ex , MSG_TYPES.ERROR)\n\n\n\ndef setAdversaryModetask(value):\n\n global RANDOMIZE_PORT\n global RANDOMIZE_BOTH\n global RANDOMIZE_REQUEST_ID\n if value == ADVERSARY_TASK_MODE.RRANDOMIZE_PORT_NUMBER.value:\n RANDOMIZE_PORT = True\n elif value == ADVERSARY_TASK_MODE.RRANDOMIZE_REQUEST_ID.value:\n RANDOMIZE_REQUEST_ID = True\n elif value == ADVERSARY_TASK_MODE.RRANDOMIZE_BOTH.value:\n RANDOMIZE_BOTH = True\n\n\ndef main(argv, IP):\n\n global FORCE_NOT_RESPONSE_MEG\n letterCaseRandomize = argv.rcase\n port = argv.port\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((IP, port))\n\n printPortAndIP(IP, port)\n if letterCaseRandomize:\n printNcase()\n ADVERSARY_Mode = argv.adversary\n FORCE_NOT_RESPONSE_MODE = argv.dont\n if FORCE_NOT_RESPONSE_MODE:\n Helper.printOnScreenAlways(\n ' ***** NO RESPONSE MODE IS ACTIVATED *****', MSG_TYPES.YELLOW)\n DNSFunctions.loadRealZone()\n try:\n if not ADVERSARY_Mode:\n # keep listening\n #DNSFunctions.loadRealZone()\n\n while 1:\n data, addr = sock.recvfrom(512)\n response, allowResponse = DNSFunctions.getResponse(data, addr, letterCaseRandomize, forceNotResponseMode=FORCE_NOT_RESPONSE_MODE )\n if allowResponse:\n sock.sendto(response, addr)\n\n elif ADVERSARY_Mode: # attacking mode\n Helper.printOnScreenAlways(\n ' ***** ADVERSARY MODE IS ACTIVATED *****', MSG_TYPES.YELLOW)\n DNSFunctions.loadFakeZone()\n setAdversaryModetask(argv.task)\n # keep listening\n while 1:\n data, addr = sock.recvfrom(512)\n if RANDOMIZE_PORT is True: ## try all the possible Port Number 1 to 65556\n response = DNSFunctions.getResponse(data, addr, case_sensitive=False,adversaryMode=ADVERSARY_Mode,withoutRequestId=False) # we get the correct response.\n DNSFunctions.generateResponseWithPortNumber(response, sock, addr, NUMBER_OF_TRIES) # brute force all the possible port number\n\n elif RANDOMIZE_REQUEST_ID is True: ## try all the possible request IDs 1 to 65556\n response = DNSFunctions.getResponse(data, addr, case_sensitive=False,adversaryMode=ADVERSARY_Mode,withoutRequestId=True) # forge response without request ID, later we forge the ID and combine it with the whole response\n DNSFunctions.generateResponseWithRequestId(response, sock, addr, NUMBER_OF_TRIES) # brute force # we get the response once without Tre_id\n\n elif RANDOMIZE_BOTH:\n # response = DNSFunctions.getResponse(data, addr, case_sensitive=False, adversaryMode=ADVERSARY_Mode,\n # withoutRequestId=True) # forge response without request ID, later we forge the ID and combine it with the whole response\n # DNSFunctions.generat\n pass\n\n except Exception as ex:\n Helper.loggingError(str('ERROR: main ' + traceback.format_exc()))\n Helper.printOnScreenAlways(\"\\nERROR: Terminated!!! :\" + str(ex),MSG_TYPES.ERROR)\n\ndef run(argv):\n\n modifiedDate = printModifiedDate()\n DNSFunctions.makeDirectories()\n Helper.initLogger(level=logging.ERROR, enableConsole=False)\n DNSFunctions.printLogo(version=VERSION, modifyDate=modifiedDate)\n DNSFunctions.killprocess(PORT)\n DNSFunctions.setDebuggingMode(DEBUG)\n\n if argv.s is True:\n ip = socket.gethostbyname(socket.gethostname())\n else:\n ip = IP_ADDRESS_LOCAL\n\n main(argv, ip)\n\n\nif __name__ == '__main__':\n try: # on the server\n setArgs = argparse.Namespace(l=True, adversary=False, port=53, rcase=True, s=False, task='rport', dont=True)\n run(setArgs)\n\n except Exception as ex: # locally\n print('ERROR:o argv.... %s' % ex)\n\n\n# TODO: need to be refactored\ndef main_test():\n # gather Zone info and store it into memory\n\n print(\"Testing .... \")\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((IP_ADDRESS_LOCAL, PORT))\n print(\"Host: %s | Port: %s \" % (IP_ADDRESS_LOCAL,PORT ))\n # open socket and\n # keep listening\n while 1:\n data, addr = sock.recvfrom(512)\n\n# TODO: need to be deleted\ndef main_test_local():\n '''\n Gather Zone info and store it into memory\n '''\n\n DNSFunctions.loadZone()\n print(\"Testing .... \")\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((IP_ADDRESS_LOCAL, PORT))\n print(\"\\n Host: %s | Port: %s \" % (IP_ADDRESS_LOCAL,PORT ))\n # testing\n BYTES =b'\\\\$\\x00\\x10\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x01\\x02ns\\x0cdnStEstSuITE \\x05SpACe\\x00\\x00\\x1c\\x00\\x01\\x00\\x00)\\x10\\x00\\x00\\x00\\x80\\x00\\x00\\x00'\n response = DNSFunctions.getResponse(BYTES, '127.0.0.2')\n print(\"response:\")\n print(str(response))\n", "id": "8383936", "language": "Python", "matching_score": 1.908576250076294, "max_stars_count": 2, "path": "DNS/DNSServer.py" }, { "content": "#! /usr/bin/env python3\n\nimport sys\nimport os\nfrom app import app\nfrom stem.util import term\n\napp.debug = True\napp.static_folder = 'static'\n\nVERSION = '4.02'\nSTARTS = False\n\n#\ndef main(argv):\n try:\n if argv.__len__() > 0:\n if argv[0] == '-s': # on the server\n app.run(host=\"0.0.0.0\",port=\"80\",debug=False, threaded=True)\n else:\n print('Wrong parameters...')\n else:\n app.run()\n except Exception as ex:\n print(ex)\n # run it locally\n print('Running locally... ')\n app.run()\n\n#\ndef makeDirectories():\n\n if not os.path.exists('JSON'):\n os.makedirs('JSON/CheckingRequests')\n os.makedirs('JSON/NormalRequests')\n else:\n if not os.path.exists('JSON/CheckingRequests'):\n os.makedirs('JSON/CheckingRequests')\n if not os.path.exists('JSON/NormalRequests'):\n os.makedirs('JSON/NormalRequests')\n\n if not os.path.exists('Logs'):\n os.makedirs('Logs')\n\n#\ndef printLogo():\n try:\n with open('Logo/logo.txt', 'r') as f:\n lineArr = f.read()\n print(term.format(str(lineArr),term.Color.GREEN))\n with open('Logo/logo2.txt', 'r') as f:\n lineArr = f.read()\n print(term.format((str(lineArr) % str(VERSION)), term.Color.YELLOW))\n except Exception as ex:\n print('ERROR: printLogo - ' + str(ex))\n\n#\nif __name__ == '__main__':\n makeDirectories()\n printLogo()\n main(sys.argv[1:])\n\n", "id": "3541553", "language": "Python", "matching_score": 2.2513320446014404, "max_stars_count": 2, "path": "WebServer/run.py" }, { "content": "#! /usr/bin/env python3\n\n# RUN IT ONLY ON ANY UNIX DISTRIBUTION BUT NOT WINDOWS\n\nimport json\nimport os\nimport sys\nimport traceback\nimport stem.descriptor.remote\n\nfrom enum import Enum\nfrom pprint import pprint\nfrom stem.util import term\nfrom pathlib import Path\n\nfrom TOR.ConnectionsHandler import TORConnector\nfrom TOR.Helper.Helper import MSG_TYPES\nfrom TOR.Helper.Helper import Helper\nfrom TOR.Helper.Helper import MODE_TYPES\nfrom TOR.NodeHandler import NodesHandler\n\nVERSION = 1.8\n\n#\ndef printLogo():\n #\n print(term.format(('\\n Starting TOR MAPPER.. v%s' % VERSION), term.Color.YELLOW))\n with open('Logo/logo.txt', 'r') as f:\n line_Arr = f.read()\n print(term.format((line_Arr % str(VERSION)), term.Color.GREEN))\n print('\\n')\n\n#\ndef makeDirectories():\n '''\n Make the directories in case they are missing\n '''\n\n try:\n # TODO: Directories need more abtracte\n if not os.path.exists('GatheredFiles'):\n os.makedirs('GatheredFiles')\n os.makedirs('GatheredFiles/Logs')\n os.makedirs('GatheredFiles/JSON')\n else:\n if not os.path.exists('GatheredFiles/Logs'):\n os.makedirs('GatheredFiles/Logs')\n if not os.path.exists('GatheredFiles/JSON'):\n os.makedirs('GatheredFiles/JSON')\n\n\n except Exception as ex:\n Helper.printOnScreenAlways(ex, term.Color.RED)\n\n#\ndef main(argv):\n\n mode = '-none'\n required_Nodes = 0\n printLogo()\n makeDirectories()\n if argv[1:] != []: # on the server\n try:\n required_Nodes= -1\n opt = argv[1]\n error = False\n if len(argv) == 3: # mode printing\n if argv[2] == MODE_TYPES.printing.value or argv[2] == MODE_TYPES.none.value:\n mode = argv[2]\n\n elif len(argv) == 5:\n if argv[2] == '-n': # stop after certain nodes number\n required_Nodes = argv[3]\n else:\n error = True\n if argv[4] == MODE_TYPES.printing.value or argv[4] == MODE_TYPES.none.value:\n mode = argv[4]\n else:\n error = True\n\n if error is True:\n Helper.printOnScreen('WRONG ......',color=MSG_TYPES.ERROR)\n sys.exit(2)\n else:\n Helper.printOnScreen('WRONG Too Many arguments.', color=MSG_TYPES.ERROR)\n\n ###---------------------------------------\n\n try:\n Helper.printOnScreenAlways(\"Gathering Info ... \", MSG_TYPES.RESULT)\n nodes = NodesHandler.NodesHandler(mode=mode)\n nodes_Number = nodes.run()\n Helper.printOnScreenAlways((\"DONE, %s nodes have been gathered\" % str(nodes_Number)),MSG_TYPES.RESULT)\n except Exception as ex:\n Helper.printOnScreenAlways((\"Exit nodes are not gathered.. :(, ERROR : %s\" % str(ex)),MSG_TYPES.ERROR)\n sys.exit()\n\n if opt == '-r' or opt == '-c' or opt == '-cd' or opt=='-drc': # check the connections\n if(int(required_Nodes) > 0):\n con = TORConnector.TORConnections(opt,mode,required_Nodes)\n con.run()\n else:\n con = TORConnector.TORConnections(opt,mode)\n con.run()\n\n except Exception as ex:\n print(ex)\n sys.exit()\n\n\nif __name__ == '__main__':\n\n try: # on the server\n if len(sys.argv) != 1:\n main(sys.argv[1:])\n else:\n print('ERROR: argv....')\n main(['', '-drc','-n','9','-out'])\n sys.exit()\n\n except Exception as ex: # locally\n print('ERROR: argv.... OR %s' % str(ex))\n main(['','-drc','-n','9','-out'])\n sys.exit()\n\n\n", "id": "5194452", "language": "Python", "matching_score": 3.4255478382110596, "max_stars_count": 2, "path": "TOR/MainController.py" }, { "content": "#! /usr/bin/env python3\n\nimport sys\nfrom TOR.ConnectionsHandler.TORConnector import TORConnections\n\nif __name__ == '__main__':\n\n con = TORConnections('-c', 'out', 6)\n con.run()\n", "id": "7705421", "language": "Python", "matching_score": 0.07405313104391098, "max_stars_count": 2, "path": "TOR/ConnectionsHandler/runDedug.py" }, { "content": "#! /usr/bin/env python3\n\nimport psutil\nfrom subprocess import Popen\n\nfor process in psutil.process_iter():\n if process.cmdline() == ['python', 'run_DNS.py']:\n print('Process found. Terminating it.')\n process.terminate()\n break\n else:\n print('Process not found: starting it.')\n Popen(['python', 'StripCore.py'])", "id": "11943609", "language": "Python", "matching_score": 0.07197532057762146, "max_stars_count": 2, "path": "DNS/KillDns.py" }, { "content": "from cadetlib.sourcecode.repository_type import * #### DON'T DELETE THIS LINE ####\n\nfrom cadetlib.logging import logger\n\n\nclass SourceCodeFactory(object):\n \"\"\"\n SourceFactory Factory\n \"\"\"\n\n sourcecode_dict = {}\n\n @staticmethod\n def create(source_code_type, uri, credentials, branch):\n \"\"\"\n Jsonify object depends on the class name\n Args:\n source_code_type (string): source code type\n uri (string): url of the repository\n credentials (dict): could be token || username&password\n branch (string): branch name, default value: master\n Return:\n source_code object\n \"\"\"\n\n if source_code_type is not SourceCodeFactory.sourcecode_dict:\n try:\n # add it to the ChartFactory list\n SourceCodeFactory.sourcecode_dict[source_code_type] = eval(\n source_code_type + \".Factory()\"\n )\n\n except:\n\n logger.debug(f\"model: ChartFactory, {source_code_type}, Not found\")\n return None\n\n logger.debug(f\"model: Source code created instance of {source_code_type}\")\n sourcecode_repo_class = SourceCodeFactory.sourcecode_dict[source_code_type]\n sourcecode_repo_object = sourcecode_repo_class.create(uri=uri, credentials=credentials, branch=branch)\n\n return sourcecode_repo_object\n\n\n\n# TODO: for testing- delete later- Amer OR moved to a testing\nif __name__ == \"__main__\":\n\n # sourcecode_list = [\"AzureDev\"]\n # uri = \"dev.azure.com/BTL-SecOps/Cadet%20Core/_git/Cadet%20Core\"\n # credentials = {\"token\": \"<KEY>\"}\n\n sourcecode_list = [\"AzureDev\"]\n uri = \"dev.azure.com/BTL-SecOps/Cadet%20Core/_git/Cadet%20Core\"\n credentials = {\"token\": \"kgubxdu6lyfha3h35whpemkdbksgs3unkzoncxgmh6tdzf5cl35q\"}\n\n for source_code in sourcecode_list:\n repo = SourceCodeFactory.create(source_code_type=source_code, uri=uri, credentials=credentials, branch='dev')\n if source_code == \"AzureDev\":\n print(repo.download(\"Token\",'/'))\n else:\n repo.download('')\n", "id": "4590426", "language": "Python", "matching_score": 1.2636264562606812, "max_stars_count": 0, "path": "spiderlib/db/utils/Jsonify/jsonify_factory.py" }, { "content": "import abc\n\n\nclass JsonifyPlatform(metaclass=abc.ABCMeta):\n def __init__(self):\n pass\n\n # TODO: maybe you have to change the function name.\n # @abc.abstractmethod\n # def get_key(self):\n # \"\"\" Abstract get key method \"\"\"\n # pass\n #\n # @abc.abstractmethod\n # def get_oauth(self):\n # \"\"\" Abstract get oauth method \"\"\"\n # pass\n", "id": "6023237", "language": "Python", "matching_score": 0.27178066968917847, "max_stars_count": 0, "path": "spiderlib/db/utils/Jsonify/base_jsonify.py" }, { "content": "\n\nfrom sqlalchemy.ext.declarative import DeclarativeMeta\nfrom sqlalchemy.orm.collections import InstrumentedList\n\nfrom spiderlib.db.db_modules.quote import Quote\nfrom spiderlib.db.db_modules.author import Author\nfrom spiderlib.db.db_modules.tag import Tag\n\n\nimport json\n\nclass DBEncoderJson(json.JSONEncoder):\n \"\"\"\n Helper class to convert SQLAlchemy db objects into json\n \"\"\"\n\n def default(self, obj):\n # if\n if isinstance(obj.__class__, DeclarativeMeta):\n # an SQLAlchemy class\n fields = {}\n _excluded_fields = [\"metadata\", \"json\", \"dict\", \"to_dict\"]\n\n # filter the field\n for field in [x for x in dir(obj) if not x.startswith('_') and x not in _excluded_fields]:\n data = obj.__getattribute__(field)\n try:\n json.dumps(data) # this will fail on non-encodable values, like other classes\n fields[field] = data\n except TypeError:\n # object needs its own method (.to_dict)\n if not isinstance(data, InstrumentedList):\n fields[field] = data.to_dict\n else:\n # list of object\n # NOTE: it goes down one level only,\n fields[field] = []\n for item in data:\n fields[field].append(item.to_dict)\n\n # a json-encodable dict\n return fields\n\n return json.JSONEncoder.default(self, obj)\n\n\nclass DBEncoderDict(object):\n \"\"\"\n Helper class to convert SQLAlchemy nested db objects into dict\n \"\"\"\n\n @staticmethod\n def encode(obj) -> dict:\n \"\"\"\n Converts SQLAlchemy nested db objects into dict\n \"\"\"\n\n # if\n\n\n if isinstance(obj.__class__, DeclarativeMeta):\n # an SQLAlchemy class\n _dict = {}\n _excluded_fields = [\"metadata\", \"json\", \"dict\", \"to_dict\"]\n\n # filter the field\n for field in [x for x in dir(obj) if not x.startswith('_') and x not in _excluded_fields]:\n data = obj.__getattribute__(field)\n try:\n json.dumps(data) # this will fail on non-encodable values, like other classes\n _dict[field] = data\n except TypeError:\n # object needs its own method (.to_dict)\n if not isinstance(data, InstrumentedList):\n _dict[field] = data.to_dict\n else:\n # list of object\n # NOTE: it goes down one level only,\n _dict[field] = []\n for item in data:\n _dict[field].append(item.to_dict)\n return _dict\n\n\n @staticmethod\n def list_to_dict(list_obj) -> dict:\n \"\"\"\n Converts a list fof SQLAlchemy nested db objects into dict.\n \"\"\"\n _dict = dict()\n for index, obj in enumerate(list_obj):\n _dict[index] = DBEncoderDict.encode(obj)\n\n return _dict\n\n", "id": "9015207", "language": "Python", "matching_score": 2.6164653301239014, "max_stars_count": 0, "path": "spiderlib/db/utils/db_obj_encoder.py" }, { "content": "from .tools import to_json\nfrom .tools import to_dict\n\n\nfrom .db_obj_encoder import DBEncoderDict, DBEncoderJson", "id": "8932736", "language": "Python", "matching_score": 0.018445787951350212, "max_stars_count": 0, "path": "spiderlib/db/utils/__init__.py" }, { "content": "\nimport json\nfrom enum import Enum\nfrom TOR.Helper.Helper import Helper\nfrom TOR.Helper.Helper import MSG_TYPES\nfrom TOR.Helper.Helper import MODE_TYPES\nfrom TOR.Helper.Helper import EnumEncoder\n\n\n\n# We could marge them on one Enum\nclass CONNECTION_STATUS(Enum):\n CONNECTED = True\n NOT_CONNECTED = False\n\n\nclass DOMAIN_STATUS(Enum):\n STATELESS = 'Stateless'\n ACCESSIBLE = 'Accessible'\n RE_ACCESSIBLE = 'Re_Accessible'\n NOT_ACCESSIBLE = 'NotAccessible'\n\n\n\nclass Result:\n #\n def __init__(self,connectionStatus, requestingDomainStatus):\n self.connectionStatus = connectionStatus.value\n self.requestingDomainStatus = requestingDomainStatus.value\n\n #\n def reprJSON(self):\n return dict(ConnectionStatus=self.connectionStatus, RequestingDomainStatus=self.requestingDomainStatus)\n\n\nclass FinalResult:\n #\n def __init__(self,resultList,nodesCount,timetaken):\n self.resultList = resultList\n self.timetaken = timetaken\n self.nodesCount = nodesCount\n\n #\n def printCheckedResult(self):\n '''\n This function print the result of checking if the DNS support 0x20 bit encoding(Capitalization)\n '''\n connectionFailed =0\n connectionSucceeded=0\n connectedAndAccessible = 0\n connectedAndReAccessible =0\n connectedButNotAccessible =0\n connectedFailed =0\n\n print(\"\\n--------------------------\")\n Helper.printOnScreenAlways('Finished in %0.2f seconds' % (self.timetaken))\n\n Helper.printOnScreenAlways('Found ' + str(self.nodesCount) + ' Exit nodes', MSG_TYPES.RESULT)\n for result in self.resultList:\n if result.connectionStatus == CONNECTION_STATUS.CONNECTED.value:\n if result.requestingDomainStatus == DOMAIN_STATUS.ACCESSIBLE.value:\n connectedAndAccessible += 1\n elif result.requestingDomainStatus == DOMAIN_STATUS.RE_ACCESSIBLE.value:\n connectedAndReAccessible += 1\n elif result.requestingDomainStatus == DOMAIN_STATUS.NOT_ACCESSIBLE.value:\n connectedButNotAccessible += 1\n connectionSucceeded += 1\n if result.connectionStatus == CONNECTION_STATUS.NOT_CONNECTED.value:\n connectedFailed += 1\n\n Helper.printOnScreenAlways(str(connectionSucceeded) + ': were connected successfully',MSG_TYPES.RESULT)\n Helper.printOnScreenAlways(' ' +str(connectedAndAccessible) + ': were connected and checked successfully.',MSG_TYPES.RESULT)\n Helper.printOnScreenAlways(' ' +str(connectedAndReAccessible) + ': were connected and re-checked successfully.',MSG_TYPES.RESULT)\n Helper.printOnScreenAlways(' ' +str(connectedButNotAccessible) + ': were connected successfully but checked failed.',MSG_TYPES.RESULT)\n Helper.printOnScreenAlways(str(connectionFailed) + ': failed ', MSG_TYPES.RESULT)\n Helper.printOnScreenAlways(\"\\n--------------------------\")\n\n Helper.printOnScreenAlways('Checking Success rate: ' + str(connectionSucceeded / self.nodesCount * 100) + '%',MSG_TYPES.RESULT)\n Helper.printOnScreenAlways('Checking Failed rate: ' + str(connectedButNotAccessible / self.nodesCount * 100) + '%',MSG_TYPES.RESULT)\n Helper.printOnScreenAlways('Failed Connections rate: ' + str(connectionFailed / self.nodesCount * 100) + '%',MSG_TYPES.RESULT)\n Helper.printOnScreenAlways('\\n***********************************================END===============***********************************\\n',MSG_TYPES.RESULT)\n\n\n #\n def writeCheckedResult(self):\n connectionFailed = 0\n connectionSucceeded = 0\n connectedAndAccessible = 0\n connectedAndReAccessible = 0\n connectedButNotAccessible = 0\n connectedFailed = 0\n\n print(\"\\n--------------------------\")\n Helper.printOnScreenAlways('Finished in %0.2f seconds' % (self.timetaken))\n\n Helper.printOnScreenAlways('Found ' + str(self.nodesCount) + ' Exit nodes', MSG_TYPES.RESULT)\n for result in self.resultList:\n if result.connectionStatus == CONNECTION_STATUS.CONNECTED.value:\n if result.requestingDomainStatus == DOMAIN_STATUS.ACCESSIBLE.value:\n connectedAndAccessible += 1\n elif result.requestingDomainStatus == DOMAIN_STATUS.RE_ACCESSIBLE.value:\n connectedAndReAccessible += 1\n elif result.requestingDomainStatus == DOMAIN_STATUS.NOT_ACCESSIBLE.value:\n connectedButNotAccessible += 1\n connectionSucceeded += 1\n if result.connectionStatus == CONNECTION_STATUS.NOT_CONNECTED.value:\n connectedFailed += 1\n\n data = ''\n with open(self.OUTPUT_FILE, 'r') as file:\n data = file.read()\n with open(self.OUTPUT_FILE, 'w+') as file:\n file.write(data)\n file.write(\"\\n--------------------------\\n\")\n file.write(\"\\n--------------------------\\n\")\n file.write('Finished in %0.2f seconds\\n' % (self.time_taken))\n file.write('Found ' + str(self.nodesCount) + ' Exit nodes:\\n')\n file.write(' ' + str(connectionSucceeded) + ': were connected successfully\\n')\n\n file.write(' ' + str(connectionSucceeded) + ': were connected successfully')\n file.write(' ' + str(connectedAndAccessible) + ': were connected and checked successfully.')\n file.write(' ' + str(connectedAndReAccessible) + ': were connected and re-checked successfully.')\n file.write(' ' + str(connectedButNotAccessible) + ': were connected successfully but checked failed.')\n file.write(' ' + str(connectionFailed) + ': failed ')\n\n file.write('\\n--------------------------\\n')\n file.write('Checking Success rate: ' + str(connectionSucceeded / self.nodesCount * 100) + '%')\n file.write('Checking Failed rate: ' + str(connectedButNotAccessible / self.nodesCount * 100) + '%')\n file.write('Failed Connections rate: ' + str(connectionFailed / self.nodesCount * 100) + '%')\n file.write('\\n***********************************================END===============***********************************\\n')\n\n\n", "id": "12067304", "language": "Python", "matching_score": 4.355445861816406, "max_stars_count": 2, "path": "TOR/ConnectionsHandler/Models/Results.py" }, { "content": "#! /usr/bin/env python3\n\nimport datetime\nimport functools\nimport getopt\nimport io\nimport json\nimport os\nimport pickle\nimport pycurl\nimport sys,traceback\nimport time\nimport certifi\nimport stem.process\nfrom tqdm import tqdm\n\n\nimport random\nimport argparse\n\nfrom threading import Thread\nfrom stem import StreamStatus, process\nfrom stem.util import term\nfrom enum import Enum\nfrom tqdm import tqdm\nimport shlex\n\nfrom TOR.ConnectionsHandler.Models.Results import CONNECTION_STATUS\nfrom TOR.Helper.Helper import MSG_TYPES\nfrom TOR.Helper.Helper import Helper\nfrom TOR.Helper.Helper import MODE_TYPES\nfrom TOR.Helper.Helper import TASK_MODE\nfrom TOR.NodeHandler import NodesHandler\n\nfrom TOR.ConnectionsHandler.Models import Results\nfrom TOR.ConnectionsHandler.Models.Connection import Connection\nfrom TOR.ConnectionsHandler.Models.ExitNode import ExitNode\nfrom TOR.ConnectionsHandler import TORFunctions\n\nclass TORConnections:\n\n def __init__(self, opt='-r', mode='-none',requiredNodes=10000,runManyTimeMode=False):\n self.mode = mode\n self.opt = opt\n\n self.REQUIRED_NODES = requiredNodes\n self.SOCKS_PORT = 7000\n self.CONTROL_PORT = 9051\n\n self.DOMAIN_URL = 'dnstestsuite.space'\n self.DOMAIN_URL_CHECK = 'dnstestsuite.space/check' # uses to check if the dns is supporting the 0x20 coding\n self.DOMAIN__CORRECT_MESSAGE_RESULT = 'Works [email protected]' # should be the same message in check.html page\n self.TOR_CHECK_CONNECTION = 'https://icanhazip.com'\n self.FORCE_NOT_RESPONSE_MSG = 'tor_dont_response' # MUST BE THE SAME IN THE DNS SERVER\n self.OUTPUT_FILE = 'result.txt'\n self.GATHERED_NODES_PATH = 'Nodes/GatheredExitNodesJSON.json' # gathered by NodeHandler class\n self.PROCESSED_NODES_PATH = 'Nodes/ProcessedExitNodesJSON.json' # gathered by NodeHandler class\n self.TOR_CONNECTION_TIMEOUT = 30 # timeout before we give up on a circuit\n self.PYCURL_TIMEOUT = 40\n self.REQUEST_TIMES = 100\n self.RUN_MANYTIMES_MODE = runManyTimeMode\n\n self.CONSUMER_KEY = \"\"\n self.CONSUMER_SECRET = \"\"\n self.ACCESS_TOKEN = \"\"\n self.ACCESS_TOKEN_SECRET = \"\"\n self.Result_List = []\n self.ExitNodes_List = []\n\n #\n def loadExitNodesFromJSON(self):\n cur_path = os.path.dirname(__file__)\n cwd = os.getcwd()\n os.chdir(cur_path)\n # read all the nodes\n new_path = os.path.relpath(self.GATHERED_NODES_PATH, cur_path)\n\n with open(new_path) as f:\n json_Objects= json.load(f)\n # Random\n random.shuffle(json_Objects)\n return json_Objects\n\n # check Tor connection only, if the node is accessable\n def checkTorConnection(self, numberOfNodes=10000): # check Tor connection\n start_time = time.time()\n nodes_Count = 0\n successfully_Connections = 0\n successfully_Connections_Checking_Failed = 0\n failed_Connections = 0\n\n # load\n json_Objects = self.loadExitNodesFromJSON()\n\n result = 3 # assume that the connection has failed\n\n if stem.util.system.is_windows():\n # Terminate tor.exe in case if it is still running\n TORFunctions.ProcesskillForWindows('tor.exe')\n\n print('\\n')\n total_Nodes=len(json_Objects)\n number_Of_Nodes = int(numberOfNodes)\n for obj in json_Objects:\n\n fingerprint = str(obj['ExitNode']['Fingerprint'].encode(\"ascii\"),'utf-8')\n\n # total number of nodes # debugging prupuses\n nodes_Count = nodes_Count + 1\n if nodes_Count <= number_Of_Nodes:\n break\n result = self.connectToTORExitNode(fingerprint, ip, nodes_Count, TASK_MODE.TOR_CONNECTION_CHECKING)\n self.Result_List.append(result)\n\n\n time_taken = time.time() - start_time\n finalResult = Results.FinalResult(time_taken, nodes_Count, successfully_Connections, successfully_Connections_Checking_Failed, failed_Connections)\n finalResult.printResult()\n\n data = ''\n with open(self.OUTPUT_FILE,'r') as file:\n data = file.read()\n with open(self.OUTPUT_FILE,'w+') as file:\n file.write(data)\n file.write(\"\\n--------------------------\\n\")\n file.write(\"\\n--------------------------\\n\")\n file.write('Finished in %0.2f seconds\\n' % (time_taken))\n file.write('Found ' + str(nodes_Count) + ' Exit nodes:\\n')\n file.write(' '+str(successfully_Connections) + ': were connected successfully\\n')\n file.write(' '+str(successfully_Connections_Checking_Failed) + ': were connected successfully, but checking failed.\\n')\n file.write(' '+str(failed_Connections) + ': failed\\n')\n file.write('\\n--------------------------\\n')\n file.write('Checking Success rate: '+str(successfully_Connections/nodes_Count * 100)+'% \\n')\n file.write('Checking Failed rate: '+str(successfully_Connections_Checking_Failed/nodes_Count * 100)+'% \\n')\n file.write('Failed Connections rate: '+str(failed_Connections/nodes_Count * 100)+'% \\n')\n\n # check DNS if it supports domain name 0x20 coding.\n def checkWebsiteConnection(self, numberOfNodes=10000):\n start_time = time.time()\n nodesCount = 0\n successfully_Connections = 0\n successfully_Connections_Checking_Failed = 0\n\n re_successfully_Connections = 0\n re_successfully_Connections_Checking_Failed = 0\n failed_Connections = 0\n\n # load\n json_Objects = self.loadExitNodesFromJSON()\n result = 3 # assume that connection failed\n\n if stem.util.system.is_windows():\n # Terminate the tor in case if it is still running\n TORFunctions.ProcesskillForWindows('tor.exe')\n\n print('\\n')\n total_Nodes = len(json_Objects)\n number_Of_Nodes = int(numberOfNodes)\n nodesCount = 0\n\n for obj in json_Objects:\n ip = str(obj['ExitNode']['Address'].encode(\"ascii\"), 'utf-8')\n fingerprint = str(obj['ExitNode']['Fingerprint'].encode(\"ascii\"), 'utf-8')\n nickname = str(obj['ExitNode']['Nickname'].encode(\"ascii\"), 'utf-8')\n or_port = str(obj['ExitNode']['Or_port'])\n dir_port = str(obj['ExitNode']['Dir_port'])\n\n # total number of nodes\n if nodesCount >= number_Of_Nodes:\n break\n nodesCount = nodesCount + 1\n result = self.connectToTORExitNode(fingerprint, ip, nodesCount, TASK_MODE.DNS_0x20_CHECKING) # check if the website is accessible / we use this method for check if the DNS support 0x20 coding for the domain name.\n exitNode = ExitNode(ipaddress=ip, fingerprint=fingerprint, nickname=nickname, or_port=or_port,\n dir_port=dir_port, status=result)\n self.ExitNodes_List.append(exitNode)\n self.Result_List.append(result)\n\n time_taken = time.time() - start_time\n finalResult = Results.FinalResult(self.Result_List, nodesCount, time_taken)\n\n curpath = os.path.dirname(__file__)\n os.chdir(curpath)\n newJSONPath = os.path.join(curpath,self.PROCESSED_NODES_PATH)\n Helper.storeExitNodesJSON(objects=self.ExitNodes_List, path=newJSONPath)\n\n # resolve our domain via our DNS\n # findTORDNSResolver(self)\n def requestDomainViaTor(self):\n Helper.printOnScreenAlways('Requesting %s via TOR ' % self.DOMAIN_URL)\n start_time = time.time()\n nodesCount = 0\n successfully_Connections = 0\n successfully_Connections_Checking_Failed = 0\n failed_Connections = 0\n\n json_Objects = self.loadExitNodesFromJSON()\n if stem.util.system.is_windows():\n # Terminate the tor in case if it is still running\n TORFunctions.ProcesskillForWindows('tor.exe')\n\n for obj in tqdm(json_Objects, ncols=80, desc='Requesting Domain via our DNS'):\n ip = str(obj['ExitNode']['Address'].encode(\"ascii\"), 'utf-8')\n fingerprint = str(obj['ExitNode']['Fingerprint'].encode(\"ascii\"), 'utf-8')\n nickname = str(obj['ExitNode']['Nickname'].encode(\"ascii\"), 'utf-8')\n or_port = str(obj['ExitNode']['Or_port'].encode(\"ascii\"))\n dir_port = str(obj['ExitNode']['Dir_port'].encode(\"ascii\"))\n result = self.connectToTORExitNode(fingerprint, ip, nodesCount + 1, TASK_MODE.REQUEST_DOMAIN)\n exitNode = ExitNode(ipaddress=ip,fingerprint=fingerprint,nickname=nickname,or_port=or_port,dir_port=dir_port,status=result)\n self.ExitNodes_List.append(exitNode)\n self.Result_List.append(result)\n\n time_taken = time.time() - start_time\n finalResult = Results.FinalResult(self.Result_List, nodesCount, time_taken)\n\n cur_path = os.path.dirname(__file__)\n os.chdir(cur_path)\n new_path = os.path.relpath(self.PROCESSED_NODES_PATH, cur_path)\n Helper.storeExitNodesJSON(object=self.ExitNodes_List,path=new_path)\n\n\n # resolve our domain via our DNS\n def countDNSRequest(self):\n Helper.printOnScreenAlways('Requesting %s via TOR ' % self.DOMAIN_URL)\n start_time = time.time()\n nodesCount = 0\n\n\n json_Objects = self.loadExitNodesFromJSON()\n if stem.util.system.is_windows():\n # Terminate the tor in case if it is still running\n TORFunctions.ProcesskillForWindows('tor.exe')\n\n for obj in tqdm(json_Objects, ncols=80, desc='Requesting Domain via our DNS'):\n\n ip = str(obj['ExitNode']['Address'].encode(\"ascii\"), 'utf-8')\n fingerprint = str(obj['ExitNode']['Fingerprint'].encode(\"ascii\"), 'utf-8')\n nickname = str(obj['ExitNode']['Nickname'].encode(\"ascii\"), 'utf-8')\n or_port = str(obj['ExitNode']['Or_port'])\n dir_port = str(obj['ExitNode']['Dir_port'])\n self.connectToTORExitNode(fingerprint, ip, nodesCount + 1, TASK_MODE.DNS_RESOLVER_COUNTER)\n\n time_taken = time.time() - start_time\n\n def startTorConnection(self, exitFingerprint, ip,mode=TASK_MODE.DNS_0x20_CHECKING):\n # Start an instance of Tor configured to only exit through Russia. This prints\n # Tor's bootstrap information as it starts. Note that this likely will not\n # work if you have another Tor instance running.\n result = self.connectToTORExitNode(exitFingerprint, ip, 3, mode) # check if the website i\n\n\n def start1(self,exitFingerprint, ip):\n # Start an instance of Tor configured to only exit through Russia. This prints\n # Tor's bootstrap information as it starts. Note that this likely will not\n # work if you have another Tor instance running.\n\n # Terminate the tor in case if it is still running\n if not (stem.util.system.get_pid_by_port(self.CONTROL_PORT)):\n print(term.format(\"Starting Tor, connecting to: %s \\n\", term.Attr.BOLD) % ip)\n tor_process = stem.process.launch_tor_with_config(\n timeout=90,\n completion_percent=100,\n config={\n 'SocksPort': str(self.SOCKS_PORT),\n 'ExitNodes': '$' + exitFingerprint,\n 'ControlPort': str(self.CONTROL_PORT),\n 'DataDirectory': 'Connection_info',\n },\n )\n else:\n pass\n\n Helper.printOnScreen(\"\\nChecking our endpoint: \\n\", MSG_TYPES.RESULT, mode=self.mode)\n url = 'http://'+str(ip).replace('.','-')+'.'+self.DOMAIN_URL\n result = self.query(url)\n if result is True:\n Helper.printOnScreen(('Successfully connected over TOR: %S' % url), MSG_TYPES.RESULT, mode=self.mode)\n\n def wirteIntoFile(self,raw):\n data = ''\n with open(self.OUTPUT_FILE,'r') as file:\n data = file.read()\n with open(self.OUTPUT_FILE,'w+') as file:\n file.write(data)\n file.write(raw+'\\n')\n\n def wirteIntoFileJOSN(self,json):\n count = 0\n exit_Nodes = []\n stem_Nodes=stem.descriptor.remote.get_server_descriptors()\n\n\n for desc in stem_Nodes:\n # CheckingRequest if the Node is an exit one\n if desc.exit_policy.is_exiting_allowed():\n count = count + 1\n # Print nodes\n Helper.printOnScreen(' %s %s' % (desc.nickname, desc.address) ,MSG_TYPES.RESULT.value, self.mode)\n exit_Nodes.append({\n 'ExitNode': {\n 'Address': desc.address,\n 'Fingerprint': desc.fingerprint,\n 'Nickname': desc.nickname,\n 'Dir_port': desc.or_port,\n 'Or_port': desc.dir_port\n }\n })\n\n # For testing purposes\n '''if nodeCount == 0:\n break'''\n # Write into Json file\n with open(self.GATHERED_NODES_PATH, 'w') as outfile:\n json.dump(exit_Nodes, outfile)\n\n #\n def connectToTORExitNode(self, exitNodeFingerprint, exitNodeIp, index, mode):\n # Start an instance of Tor configured to only exit through Russia. This prints\n # Tor's bootstrap information as it starts. Note that this likely will not\n # work if you have another Tor instance running.\n\n # Return values\n # 1 : Connection succussed\n # 2 : Connected but failed to check it\n # 3 : Connection failed\n\n if stem.util.system.is_windows():\n self.TOR_CONNECTION_TIMEOUT=90 ## MUST be 90 - DO NOT CHANGE IT\n\n start_time = time.time()\n result = False\n\n torConnection = Connection(mode=self.mode, pycurlTimeout=self.PYCURL_TIMEOUT, socksPort=self.SOCKS_PORT, controlPort=self.CONTROL_PORT,\n torConnectionTimeout=self.TOR_CONNECTION_TIMEOUT, domainUrl =self.DOMAIN_URL, domainUrlCheck = self.DOMAIN_URL_CHECK,\n domainCorrectMessageResult= self.DOMAIN__CORRECT_MESSAGE_RESULT, torCheckConnection =self.TOR_CHECK_CONNECTION,\n forceNotResponseMsg= self.FORCE_NOT_RESPONSE_MSG,\n exitNodeFingerprint=exitNodeFingerprint, exitNodeIp=exitNodeIp)\n result = torConnection.connect(index)\n try:\n url = ''\n if result.connectionStatus == CONNECTION_STATUS.CONNECTED.value:\n if mode == TASK_MODE.REQUEST_DOMAIN: #\n # RUN_MANYTIMES_MODE to send many sendRequests to the DNS so will have alot of information(port/id they use) about TOR DNS solver.\n result = torConnection.sendRequests(self.RUN_MANYTIMES_MODE, self.REQUEST_TIMES)\n elif mode ==TASK_MODE.TOR_CONNECTION_CHECKING: #'check': # check the connection reliability of the Tor exit node only\n result = torConnection.checkTORConnection()\n elif mode == TASK_MODE.DNS_0x20_CHECKING: #'check-domain': # check if the website is accessible\n result = torConnection.checkDNSFor0x20Encoding()\n elif mode == TASK_MODE.DNS_RESOLVER_COUNTER: #'check-domain': # check if the website is accessible\n result = torConnection.sendRequestsWithResponseMode(self.RUN_MANYTIMES_MODE, self.REQUEST_TIMES,responseMode=False)\n\n return result\n\n except Exception as ex:\n torConnection.killConnection()\n traceback.print_exc(file=sys.stdout)\n print('Error.... 400000 - %s', str(ex))\n\n return result\n\n #\n def showArgu(self):\n parser = argparse.ArgumentParser(description='Enumerate all the exit nodes in TOR network -> CheckingRequest TOR connection via them || Request website.')\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-l\", \"--verbose\", action=\"store_true\")\n group.add_argument(\"-q\", \"--quiet\", action=\"store_true\")\n parser.add_argument(\"x\", type=int, help=\"the base\")\n parser.add_argument(\"y\", type=int, help=\"the exponent\")\n args = parser.parse_args()\n answer = args.x ** args.y\n print(args)\n\n #\n def maintest(self,argv):\n if argv[1:] != []: # on the server\n try:\n number_OF_Nodes = -1\n opt1 = argv[1]\n if len(argv) > 2:\n number_OF_Nodes = int(argv[2])\n if opt1 == '-r': # check the connections\n self.requestDomainViaTor()\n elif opt1 == '-c':\n self.checkTorConnection(number_OF_Nodes)\n elif opt1 == '-cd': # check the domain name connection\n self.checkWebsiteConnection(number_OF_Nodes)\n\n except Exception as ex:\n print('maintest :' + str(ex))\n sys.exit(2)\n\n #\n def run(self):\n try:\n if self.opt == '-r': # check the connections\n self.requestDomainViaTor()\n elif self.opt == '-c':\n self.checkTorConnection(self.REQUIRED_NODES)\n elif self.opt == '-cd': # check the domain name connection\n self.checkWebsiteConnection(self.REQUIRED_NODES)\n elif self.opt == '-drc': # check the domain name connection\n self.countDNSRequest()\n\n except Exception as ex:\n Helper.printOnScreenAlways('TORConnector - run %s'%str(ex),MSG_TYPES.ERROR)\n sys.exit(2)\n #maintest(['', '-c', '3'])\n\n#\nif __name__ == '__main__':\n TORFunctions.ProcesskillForWindows('tor.exe')\n con = TORConnections('-cd','-out', 5,runManyTimeMode=True)\n con.startTorConnection('8ED84B53BD9556CCBB036073A1AD508EC27CBE52', '172.16.31.10')", "id": "2707051", "language": "Python", "matching_score": 8.2506103515625, "max_stars_count": 2, "path": "TOR/ConnectionsHandler/TORConnector.py" }, { "content": "\n\"\"\"\nTOR connection class\n\"\"\"\n\nimport io\nimport os\nimport pycurl\nimport sys\nimport time\nimport traceback\nimport certifi\nimport stem.process\nimport random\n\nfrom async import thread\nfrom tqdm import tqdm\nfrom stem.util import term\nfrom TOR.Helper.Helper import MSG_TYPES\nfrom TOR.Helper.Helper import Helper\nfrom TOR.Helper.Helper import MODE_TYPES\nfrom TOR.NodeHandler import NodesHandler\nfrom TOR.ConnectionsHandler import TORFunctions\nfrom TOR.ConnectionsHandler.Models.Results import Result\nfrom TOR.ConnectionsHandler.Models.Results import DOMAIN_STATUS\nfrom TOR.ConnectionsHandler.Models.Results import CONNECTION_STATUS\nfrom multiprocessing.dummy import Pool\n\n\nclass Connection:\n OUTPUT_FILE = 'result.txt'\n\n #\n def __init__(self,mode,pycurlTimeout,socksPort,controlPort,torConnectionTimeout,domainUrl,domainUrlCheck,domainCorrectMessageResult,torCheckConnection,\n forceNotResponseMsg, exitNodeFingerprint,exitNodeIp) :\n\n self.torConnectionTimeouT = torConnectionTimeout\n self.pycurlTimeout = pycurlTimeout\n self.socksPort = socksPort\n self.controlPort = controlPort\n self.domainUrl = domainUrl\n self.domainUrlCheck = domainUrlCheck\n self.domainCorrectMessageResult = domainCorrectMessageResult\n self.torCheckConnection = torCheckConnection\n self.exitNodeFingerprint = exitNodeFingerprint\n self.exitNodeIp = exitNodeIp\n self.forceNotResponseMsg = forceNotResponseMsg\n self.mode = mode\n\n ''' # OTHER WAY TO IMPLEMENT THE CONSTRUCTOR\n def __init__(self, **kwargs):\n valid_keys = [\"mode\", \"pycurlTimeout\", \"socksPort\", \"controlPort\", \"torConnectionTimeout\", \"domainUrl\", \"domainUrlCheck\", \"domainCorrectResult\", \"torCheckConnection\", \"exitNodeFingerprint\", \"exitNodeIp\"]\n for key in valid_keys:\n self.__dict__[key] = kwargs.get(key)\n '''\n\n #\n def connect(self,index):\n Helper.printOnScreen((term.format(\"\\n\\n%d- Starting Tor, connecting to: %s\", term.Attr.BOLD) % (index, self.exitNodeIp)),\n mode=self.mode)\n Helper.printOnScreen('Fingerprint: ' + self.exitNodeFingerprint, MSG_TYPES.RESULT, mode=self.mode)\n self.wirteIntoFile('\\n%d- Starting Tor, connecting to: %s' % (index, self.exitNodeIp))\n self.wirteIntoFile('Fingerprint: ' + self.exitNodeFingerprint)\n try:\n self.tor_process = stem.process.launch_tor_with_config(\n timeout = self.torConnectionTimeouT,\n completion_percent = 100,\n config = {\n 'SocksPort': str(self.socksPort),\n 'ExitNodes': '$' + self.exitNodeFingerprint,\n 'ControlPort': str(self.controlPort),\n },\n )\n Helper.printOnScreen('Connected, Checking...', color=MSG_TYPES.YELLOW, mode=self.mode)\n self.wirteIntoFile('Connected, Checking...')\n\n return Result(CONNECTION_STATUS.CONNECTED,DOMAIN_STATUS.STATELESS)\n\n except Exception as ex:\n Helper.printOnScreen(('Connection - connect: ' + str(ex)), color=MSG_TYPES.ERROR, mode=self.mode)\n Helper.printOnScreen('Connection failed! - Timed out', color=MSG_TYPES.ERROR, mode=self.mode)\n self.wirteIntoFile('Connection failed! - Timed out')\n return Result(CONNECTION_STATUS.NOT_CONNECTED,DOMAIN_STATUS.STATELESS)\n\n #\n def killConnection(self):\n self.tor_process.kill() # stops tor\n\n #\n def query(self,url):\n # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=849845;msg=127\n # https://stackoverflow.com/questions/29876778/tor-tutorial-speaking-to-russia-stuck-at-45-50\n # Uses pycurl to fetch a site using the proxy on the SOCKS_PORT.\n output = io.BytesIO()\n query = pycurl.Curl()\n query.getinfo(pycurl.PRIMARY_IP)\n query.setopt(pycurl.CAINFO, certifi.where())\n query.setopt(pycurl.URL, url)\n\n query.setopt(pycurl.VERBOSE, False)\n query.setopt(pycurl.TIMEOUT, self.pycurlTimeout)\n query.setopt(pycurl.PROXY, '127.0.0.1')\n query.setopt(pycurl.PROXYPORT, self.socksPort)\n query.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)\n query.setopt(pycurl.USERAGENT, 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:8.0) Gecko/20100101 Firefox/8.0')\n query.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5_HOSTNAME)\n query.setopt(pycurl.WRITEFUNCTION, output.write)\n\n try:\n query.perform()\n temp = output.getvalue()\n return str(temp.decode('utf-8')).strip()\n\n except Exception as ex:\n Helper.printOnScreen((\"Unable to reach %s (%s)\" % (url, ex)), MSG_TYPES.ERROR, mode=self.mode)\n return b'unreachable'\n\n except pycurl.error as exc:\n Helper.printOnScreen((\"Unable to reach %s (%s)\" % (url, exc)),MSG_TYPES.ERROR,mode=self.mode)\n return b'unreachable'\n\n #\n def sendRequests(self, runManytimesMode, requestTimes):\n try:\n\n Helper.printOnScreen((term.format(\"Requesting our webiste:\\n\", term.Attr.BOLD)), color=MSG_TYPES.RESULT,\n mode=self.mode)\n domain = str(self.exitNodeIp).replace('.', '-') + '.' + self.domainUrl\n url = 'http://' + domain\n result = self.query(url)\n\n # requesting many times/ testing the same node 100 times./ testing how random is the PORT number and Requset ID\n if runManytimesMode is True:\n self.requestDomain(domain=domain, times=requestTimes)\n\n self.killConnection()\n if result is True:\n Helper.printOnScreenAlways(('Successfully connected over TOR: %s' % url), color=MSG_TYPES.RESULT)\n return Result(CONNECTION_STATUS.CONNECTED, DOMAIN_STATUS.ACCESSIBLE)\n else:\n return Result(CONNECTION_STATUS.CONNECTED, DOMAIN_STATUS.NOT_ACCESSIBLE)\n\n except Exception as ex:\n self.killConnection()\n TORFunctions.loggingError('Connection - sendRequests: %s' % traceback.format_exc())\n return Result(CONNECTION_STATUS.CONNECTED, DOMAIN_STATUS.STATELESS) # or something went wrong\n\n #\n def sendRequestsWithResponseMode(self, runManytimesMode, requestTimes, responseMode=True):\n try:\n Helper.printOnScreen((term.format(\"Requesting our webiste:\\n\", term.Attr.BOLD)), color=MSG_TYPES.RESULT,\n mode=self.mode)\n domain = str(self.exitNodeIp).replace('.', '-') + '.' + self.domainUrl\n url = 'http://' + domain\n # requesting many times/ testing the same node 100 times./ testing how random is the PORT number and Requset ID\n self.requestDomain(domain=domain, times=requestTimes, responseMode=responseMode,\n addtionname=self.forceNotResponseMsg, )\n self.killConnection()\n except Exception as ex:\n self.killConnection()\n TORFunctions.loggingError('Connection - sendRequests: %s' % traceback.format_exc())\n\n #\n def requestDomainThread(self, domain, times, responseMode, addtionname= None):\n # to avoid cashing\n pool = Pool(times)\n results = []\n try:\n for i in range(1, times):\n randNumber = random.randint(1, 10000)\n if addtionname is None:\n sub_Domain = (\"%d_%d_%d_%s\" % (randNumber,times, i, domain))\n else:\n sub_Domain = (\"%d_%s_%s\" % (times,addtionname, domain))\n\n url = 'http://' + sub_Domain\n Helper.printOnScreen(('%d- Requesting: %s' %(i, url)),\n color=MSG_TYPES.RESULT,\n mode=self.mode)\n\n # TODO: need to be solved\n result_ =results.append(pool.apply_async(self.query(url))) # no need to wait for the reponcse\n print(result_)\n if result_ is not None:\n if 'sock' in result_.lower() and i > 3:\n print(result_)\n pool.close()\n pool.join()\n break\n\n except Exception as ex:\n print(\"requestDomain\")\n print(ex)\n\n pool.close()\n pool.join()\n\n #\n def requestDomain(self, domain, times,responseMode,addtionname= None):\n # to avoid cashing\n results = []\n try:\n randNumber = random.randint(1, 10000)\n for i in range(1, times):\n\n if addtionname is None:\n sub_Domain = (\"%d_%d_%d_%s\" % (randNumber,times, i, domain))\n else:\n sub_Domain = (\"%d_%s_%s\" % (randNumber,addtionname, domain))\n\n url = 'http://' + sub_Domain\n Helper.printOnScreen(('%d- Requesting: %s' %(i, url)),\n color=MSG_TYPES.RESULT,\n mode=self.mode)\n # TODO: need to be solved\n result_ = self.query(url) # no need to wait for the reponcse\n\n if result_ is not None:\n if 'sock' in result_.lower() and i > 3:\n print(result_)\n break\n except Exception as ex:\n print(\"requestDomain\")\n print(ex)\n\n\n #\n def checkTORConnection(self):\n '''\n Check if is establishing connection over TOR working.\n '''\n\n try:\n url = self.torCheckConnection\n if self.exitNodeIp == self.query(url):\n Helper.printOnScreen('Connected Successfully', color=MSG_TYPES.RESULT, mode=self.mode)\n self.wirteIntoFile('Connected Successfully')\n self.killConnection()\n return Result(CONNECTION_STATUS.CONNECTED, DOMAIN_STATUS.ACCESSIBLE)\n\n else:\n Helper.printOnScreen('Checking Failed ', color=MSG_TYPES.ERROR, mode=self.mode)\n self.wirteIntoFile('Checking Failed ')\n self.killConnection()\n return Result(CONNECTION_STATUS.CONNECTED, DOMAIN_STATUS.NOT_ACCESSIBLE)\n\n except Exception as ex:\n self.killConnection()\n TORFunctions.loggingError('Connection - checkTORConnection: %s' % traceback.format_exc())\n return Result(CONNECTION_STATUS.CONNECTED, DOMAIN_STATUS.STATELESS) # or something went wrong\n\n #\n def checkDNSFor0x20Encoding(self):\n '''\n Check for 0x20 bit encoding\n '''\n\n try:\n randNumber = random.randint(1, 10000) # to avoid cashing\n domain = (str(self.exitNodeIp).replace('.', '-') + '.' + self.domainUrlCheck).strip()\n subDomain = '%d_check_%s' % (randNumber, domain) # 768_check_192.168.3.11.dnstestsuite.space/check\n url = 'http://' + subDomain\n message = self.domainCorrectMessageResult\n result_message = 'none'\n try:\n result_message = self.query(url)\n\n except:\n result_message = 'unreachable'\n\n if message == result_message: # matches\n Helper.printOnScreen(('Connected Successfully to : %s' % subDomain), color=MSG_TYPES.RESULT,\n mode=self.mode)\n self.wirteIntoFile('Connected Successfully to : %s' % subDomain)\n result = Result(CONNECTION_STATUS.CONNECTED, DOMAIN_STATUS.ACCESSIBLE)\n\n else:\n Helper.printOnScreen(('Checking Failed : %s' % subDomain), color=MSG_TYPES.ERROR, mode=self.mode)\n self.wirteIntoFile('Checking Failed : %s' % subDomain)\n # re-checking\n Helper.printOnScreenAlways('re-Checking...',color=MSG_TYPES.ANY)\n subDomain = '%d_re_check_%s' % (randNumber, domain) # 12321_re_check_192.168.3.11.dnstestsuite.space/check\n url = 'http://' + subDomain\n result_message = 'none'\n\n try:\n result_message = self.query(url)\n\n except:\n result_message = 'unreachable'\n\n if message == result_message:\n Helper.printOnScreen(('re-Checking Successful : %s' % subDomain), color=MSG_TYPES.RESULT,\n mode=self.mode)\n self.wirteIntoFile('re-Checking Successful : %s' % subDomain)\n result = Result(CONNECTION_STATUS.CONNECTED, DOMAIN_STATUS.RE_ACCESSIBLE)\n\n else:\n Helper.printOnScreen(('re-Checking Failed : %s' % subDomain), color=MSG_TYPES.ERROR,\n mode=self.mode)\n self.wirteIntoFile('re-Checking Failed : %s' % subDomain)\n result = Result(CONNECTION_STATUS.CONNECTED, DOMAIN_STATUS.NOT_ACCESSIBLE)\n\n self.killConnection()\n\n return result\n\n except Exception as ex:\n TORFunctions.loggingError('Connection - checkDNSFor0x20Encoding: %s' % traceback.format_exc())\n self.killConnection()\n\n return Result(CONNECTION_STATUS.CONNECTED, DOMAIN_STATUS.NOT_ACCESSIBLE)\n\n #\n\n\n #\n def wirteIntoFile(self, raw):\n data = ''\n with open(self.OUTPUT_FILE, 'r') as file:\n data = file.read()\n\n with open(self.OUTPUT_FILE, 'w+') as file:\n file.write(data)\n file.write(raw + '\\n')", "id": "9948753", "language": "Python", "matching_score": 2.782583475112915, "max_stars_count": 2, "path": "TOR/ConnectionsHandler/Models/Connection.py" }, { "content": "import json\nimport os\n\nfrom TOR.Helper.Helper import Helper\nfrom TOR.Helper.Helper import MODE_TYPES\nfrom TOR.Helper.Helper import MSG_TYPES\nfrom TOR.ConnectionsHandler.Models.Results import DOMAIN_STATUS\n\n\nclass ExitNodeFilter:\n\n listDNSConnected = []\n listTORConnected = []\n listTORAccessWebsite = []\n listTORREAccessWebsite = []\n listTORFullyAccessWebsite = []\n listTORNOTAccessWebsite = []\n listDNSNotConnected = []\n listDNSSupport0x20BitEncoding = []\n\n #\n def __init__(self, nodePath='',mode=''):\n self.nodePath =nodePath\n self.nodePath2 =\"C:/Users/<NAME>/Desktop/UCL/Term 2/DS/DNS_Project/TOR/ConnectionsHandler/Nodes/ProcessedExitNodesJSON.json\"\n\n #\n def filterExitNode(self):\n\n allExitnodes = self.loadExitNodesFromJSON()\n total_Nodes = len(allExitnodes)\n print('Total nodes found: %d' % total_Nodes)\n\n for exitnode in allExitnodes:\n ip = str(exitnode['ExitNode']['Address'].encode(\"ascii\"), 'utf-8')\n fingerprint = str(exitnode['ExitNode']['Fingerprint'].encode(\"ascii\"), 'utf-8')\n nickname = str(exitnode['ExitNode']['Nickname'].encode(\"ascii\"), 'utf-8')\n orPort = str(exitnode['ExitNode']['Or_port'])\n dirPort = str(exitnode['ExitNode']['Dir_port'])\n connectionStatus = exitnode['ExitNode']['Status']['ConnectionStatus']\n requestingDomainStatus= str(exitnode['ExitNode']['Status']['RequestingDomainStatus'])\n\n if connectionStatus is True:\n self.listTORConnected.append(exitnode)\n if requestingDomainStatus == DOMAIN_STATUS.ACCESSIBLE.value:\n self.listTORAccessWebsite.append(exitnode)\n\n elif requestingDomainStatus == DOMAIN_STATUS.RE_ACCESSIBLE.value:\n self.listTORREAccessWebsite.append(exitnode)\n\n elif requestingDomainStatus == DOMAIN_STATUS.NOT_ACCESSIBLE.value:\n self.listTORNOTAccessWebsite.append(exitnode)\n\n elif connectionStatus is False:\n self.listDNSNotConnected.append(exitnode)\n\n self.listTORFullyAccessWebsite = self.listTORAccessWebsite + self.listTORREAccessWebsite\n\n return (self.listTORConnected, self.listTORAccessWebsite,\n self.listTORREAccessWebsite, self.listTORFullyAccessWebsite, self.listTORNOTAccessWebsite)\n\n #\n def loadExitNodesFromJSON(self):\n\n cur_path = os.path.dirname(__file__)\n cwd = os.getcwd()\n os.chdir(cur_path)\n # read all the nodes\n new_path = os.path.relpath(self.nodePath2, cur_path)\n\n with open(new_path) as f:\n json_Objects = json.load(f)\n\n return json_Objects\n\n\nif __name__ == '__main__':\n filter = ExitNodeFilter()\n filter.filterExitNode()", "id": "12158997", "language": "Python", "matching_score": 4.6543097496032715, "max_stars_count": 2, "path": "TOR/ConnectionsHandler/Tools/TORNodeFilter.py" }, { "content": "\n\nfrom enum import Enum\n\nfrom TOR.ConnectionsHandler.Tools.DnsResolver import DNSResolver\nfrom TOR.ConnectionsHandler.Tools import TORNodeFilter\nfrom TOR.Helper.Helper import Helper\nfrom TOR.Helper.Helper import MSG_TYPES\n\n\n\nclass DNS_CATEGORY(Enum):\n\n ALL = 'All'\n SUPPORT0X20ENCODING = '0x20'\n DONOTSUPPORT0X20ENCODING = 'NO0x20'\n\nif __name__ == '__main__':\n\n exitNodeFilter = TORNodeFilter.ExitNodeFilter()\n (listTORConnected,listTORAccessWebsite, listTORREAccessWebsite,\n listTORFullyAccessWebsite, listTORNOTAccessWebsite) = exitNodeFilter.filterExitNode()\n\n DNSFilter = DNSResolver()\n DNSlist = DNSFilter.Normalize(show='no')\n resolverList = []\n for resolver in DNSlist:\n exitnodeIPList = []\n\n index =0\n for exitnode in resolver.ExitNodelist:\n exitnodeIPList.append(resolver.ExitNodelist[index].exitNodeIP) # for dns\n index += 1\n\n for exitnode in listTORREAccessWebsite: # nodes that needed to reconnect\n nodeIP = exitnode['ExitNode']['Address']\n if nodeIP in exitnodeIPList:\n if resolver.DNSIP not in resolverList:\n resolverList.append(resolver.DNSIP)\n\n # print Resolvers have implemented 0x20bit encoding\n Helper.printOnScreenAlways('%d Resolvers found that have implemented 0x20bit encoding '% len(resolverList),MSG_TYPES.RESULT)\n for dns in resolverList:\n Helper.printOnScreenAlways(dns,MSG_TYPES.RESULT)\n\n\n\n\n\n", "id": "1581623", "language": "Python", "matching_score": 1.7771469354629517, "max_stars_count": 2, "path": "TOR/ConnectionsHandler/Tools/DNSResolverFilter.py" }, { "content": "\n\"\"\"\nThis class is for each exitNode which delong to DNS resolver\n\"\"\"\n\nclass DNSExitNode():\n def __init__(self,nodeIP,nodeDomain,nodeModifiedDomainfull):\n self.exitNodeIP = nodeIP\n self.nodeDomian = nodeDomain\n self.nodeModifiedDomian = nodeModifiedDomainfull\n self.JSON = self.reprExitNodelistJSON()\n\n def reprExitNodelistJSON(self):\n return dict(nodeIP=self.exitNodeIP, nodeDomian=self.nodeDomian, nodeModifiedDomian= self.nodeModifiedDomian)", "id": "846481", "language": "Python", "matching_score": 2.2700226306915283, "max_stars_count": 2, "path": "TOR/ConnectionsHandler/Models/DNSExitNode.py" }, { "content": "'''\n This class is for DNS Resolvers information\n DNS IP and its exitnodes\n'''\n\n#\nclass DNSResolverInfo():\n #\n def __init__(self,DNSIP):\n self.DNSIP = DNSIP\n self.ExitNodelist = []\n self.ExitNodelistJSON =[]\n self.nodeCount = 0\n\n #\n def insertNode(self,Node):\n self.ExitNodelist.append(Node)\n self.ExitNodelistJSON.append(Node.JSON)\n\n #\n def reprJSON(self):\n return dict(DNSIP=self.DNSIP, ExitNodelist=self.ExitNodelistJSON,count=self.nodeCount)", "id": "9018069", "language": "Python", "matching_score": 1.1906015872955322, "max_stars_count": 2, "path": "TOR/ConnectionsHandler/Models/DNSResolverInfo.py" }, { "content": "#! /usr/bin/env python3\n\nimport glob\nimport json\n\nfrom stem.util import term\n\nNODES_PATH = 'TOR/ConnectionsHandler/Nodes/ExitNodesJSON.json'\n\n#\nclass DNSObject():\n def __init__(self,DNSIP):\n self.DNSIP = DNSIP\n self.list = []\n self.count = 0\n def insertNode(self,NodeIp):\n self.list.append(NodeIp)\n\n#\nclass ExitNodeObject():\n def __init__(self,nodeIP,nodeDomain,nodeModifiedDomainfull):\n self.nodeIP = nodeIP\n self.nodeDomian = nodeDomain\n self.nodeModifiedDomian = nodeModifiedDomainfull\n\n#\ndef loadExitNodes(dir):\n jsonFiles = glob.glob(str('%s/*.json' % dir))\n with open(jsonFiles[0]) as f:\n jsonObjects = json.load(f)\n return jsonObjects\n\n#\ndef fun3(DNSObj,WEBObj):\n count = 0\n DNSouterList = []\n for obj in DNSObj: # get all the dns ip wittout repetation\n innerList = []\n\n dnsIP = DNSObj[obj]['Request']['SrcIP'] #.encode(\"ascii\")\n dnsDomainfull = DNSObj[obj]['Request']['Domain']#.encode(\"ascii\")\n dnsDomainfull = [x.strip() for x in dnsDomainfull.split('.')][0] # remove the domain: dnstestsuite.space\n temp = ''\n dnsExitnodeIP = [x.strip() for x in dnsDomainfull.split('_')][-1:][0] # get the ip of the exitnode\n if (dnsExitnodeIP.__contains__('-')):\n if dnsIP not in DNSouterList:\n DNSouterList.append(dnsIP)\n\n DNSouterList= set(DNSouterList)\n DNSList= []\n for obj in DNSouterList:\n\n node = DNSObject(obj)\n DNSList.append(node)\n\n for DnsNodeObj in DNSList:\n tempNodeList =[]\n count = 1 + count\n for Dns in DNSObj:\n dnsIP = DNSObj[Dns]['Request']['SrcIP']\n if dnsIP == DnsNodeObj.DNSIP:\n nodeDomainfull = DNSObj[Dns]['Request']['Domain']\n nodeModifiedDomainfull = DNSObj[Dns]['Request']['modifiedDomain']\n nodeDomain = [x.strip() for x in nodeDomainfull.split('.')][0] # remove the domain: dnstestsuite.space\n dnsExitnodeIP = [x.strip() for x in nodeDomain.split('_')][-1:][0] # get the ip of the exitnode\n if (dnsExitnodeIP.__contains__('-')):\n dnsExitnodeIP = dnsExitnodeIP.replace(\"-\", \".\")\n\n if dnsExitnodeIP not in tempNodeList:\n tempNodeList.append(dnsExitnodeIP)\n exitnode=ExitNodeObject(dnsExitnodeIP,nodeDomain,nodeModifiedDomainfull)\n DnsNodeObj.insertNode(exitnode)\n DnsNodeObj.count += 1\n\n for DnsNodeObj in DNSList:\n index = 0\n print(term.format('DNS Resolver IP: %s - Exitnode: %d ' % (DnsNodeObj.DNSIP, DnsNodeObj.count) ,term.Color.GREEN))\n for node1 in DnsNodeObj.list:\n index += 1\n print(term.format(\" %d - %s \" % (index, node1.nodeIP),term.Color.YELLOW))\n print()\n print(+count)\n\nif __name__ == '__main__':\n DNSObj = loadExitNodes('DNS')\n WEBObj = loadExitNodes('WEB')\n print('DNSObj len: %d'% len(DNSObj))\n print('WEBObj len: %d'% len(WEBObj))\n fun3(DNSObj,WEBObj)\n", "id": "2016741", "language": "Python", "matching_score": 6.586476802825928, "max_stars_count": 2, "path": "TORResolver/DnsResolver.py" }, { "content": "'''\n This class is for DNS resolver\n'''\n\nimport glob\nimport json\nimport os\n\nfrom stem.util import term\n\nfrom TOR.Helper.Helper import Helper\nfrom TOR.Helper.Helper import MSG_TYPES\nfrom TOR.ConnectionsHandler.Models.DNSResolverInfo import DNSResolverInfo\nfrom TOR.ConnectionsHandler.Models.DNSExitNode import DNSExitNode\n\nNODES_PATH = 'TOR/ConnectionsHandler/Nodes/ExitNodesJSON.json'\nPROCESSED_DNSDATA_PATH = 'ProcessedDNSDataJSON.json' # gathered by NodeHandler class\n\n#\nclass DNSResolver():\n #\n def __init__(self, dir='DNS'):\n self.DNSObj = self.loadExitNodes(dir)\n\n #\n def loadExitNodes(self, dir):\n jsonFiles = glob.glob(str('%s/*.json' % dir))\n with open(jsonFiles[0]) as f:\n jsonObjects = json.load(f)\n Helper.printOnScreenAlways('DNS records have been loaded\\n', MSG_TYPES.YELLOW)\n return jsonObjects\n\n #\n def Normalize(self,show='yes'):\n count = 0\n DNSouterList = []\n for obj in self.DNSObj: # get all the dns ip wittout repetation\n innerList = []\n\n dnsIP = self.DNSObj[obj]['Request']['SrcIP']\n dnsDomainfull = self.DNSObj[obj]['Request']['Domain']\n dnsDomainfull = [x.strip() for x in dnsDomainfull.split('.')][0] # remove the domain: dnstestsuite.space\n temp = ''\n dnsExitnodeIP = [x.strip() for x in dnsDomainfull.split('_')][-1:][0] # get the ip of the exitnode\n if (dnsExitnodeIP.__contains__('-')):\n if dnsIP not in DNSouterList:\n DNSouterList.append(dnsIP)\n\n DNSouterList= set(DNSouterList)\n DNSList= []\n for obj in DNSouterList:\n node = DNSResolverInfo(obj)\n DNSList.append(node)\n\n\n for DnsNodeObj in DNSList:\n tempNodeList =[]\n count = 1 + count\n for Dns in self.DNSObj:\n dnsIP = self.DNSObj[Dns]['Request']['SrcIP']\n if dnsIP == DnsNodeObj.DNSIP:\n nodeDomainfull = self.DNSObj[Dns]['Request']['Domain']\n nodeModifiedDomainfull = self.DNSObj[Dns]['Request']['modifiedDomain']\n nodeDomain = [x.strip() for x in nodeDomainfull.split('.')][0] # remove the domain: dnstestsuite.space\n dnsExitnodeIP = [x.strip() for x in nodeDomain.split('_')][-1:][0] # get the ip of the exitnode\n if (dnsExitnodeIP.__contains__('-')):\n dnsExitnodeIP = dnsExitnodeIP.replace(\"-\", \".\")\n\n if dnsExitnodeIP not in tempNodeList:\n tempNodeList.append(dnsExitnodeIP)\n exitnode=DNSExitNode(dnsExitnodeIP,nodeDomain,nodeModifiedDomainfull)\n DnsNodeObj.insertNode(exitnode)\n DnsNodeObj.nodeCount += 1\n\n if show =='yes':\n for DnsNodeObj in DNSList:\n index = 0\n Helper.printOnScreenAlways('DNS Resolver IP: %s - Exitnode: %d ' % (DnsNodeObj.DNSIP, DnsNodeObj.nodeCount), MSG_TYPES.RESULT)\n for node in DnsNodeObj.ExitNodelist:\n index += 1\n Helper.printOnScreenAlways(\" %d - %s \" % (index, node.exitNodeIP), MSG_TYPES.YELLOW)\n print()\n\n Helper.printOnScreenAlways(\"We found %d DNS Resolvers \" % count, MSG_TYPES.RESULT)\n\n curpath = os.path.dirname(__file__)\n os.chdir(curpath)\n newJSONPath = os.path.join(curpath,PROCESSED_DNSDATA_PATH)\n Helper.storeDNSResolverData(objects=DNSList, path=newJSONPath)\n\n return DNSList\n\nif __name__ == '__main__':\n DNS = DNSResolver()\n DNS.Normalize()\n", "id": "10980457", "language": "Python", "matching_score": 3.734703779220581, "max_stars_count": 2, "path": "TOR/ConnectionsHandler/Tools/DnsResolver.py" }, { "content": "#! /usr/bin/env python3\n\nimport json\nimport os\nimport time\nimport sys\nimport stem.descriptor.remote\n\nfrom enum import Enum\nfrom pprint import pprint\nfrom stem.util import term\nfrom pathlib import Path\nfrom tqdm import tqdm\n\nfrom TOR.Helper.Helper import Helper\n\n#\nclass Node_DATA(Enum):\n Address = 1\n AllData = 2\n\n#\nclass MSG_TYPES(Enum):\n RESULT = term.Color.GREEN\n ERROR = term.Color.RED\n YELLOW = term.Color.YELLOW\n\n\nclass NodesHandler:\n '''\n Gather EXIT nodes and store them in a JSON file\n '''\n\n #\n def __init__(self, mode='none'):\n script_dir = os.path.dirname(__file__) # <-- absolute dir the script is in\n self.NODES_PATH = ('../ConnectionsHandler/Nodes/GatheredExitNodesJSON.json')\n self.NODES_PATH = os.path.join(script_dir, self.NODES_PATH)\n self.mode = '-none' #mode\n\n #\n def run(self):\n self.ExitNode()\n node_Number = self.GetJOSNInfo(Node_DATA.Address)\n return node_Number\n\n def ExitNode(self):\n count = 0\n exit_Nodes = []\n stem_Nodes=stem.descriptor.remote.get_server_descriptors()\n\n\n for desc in stem_Nodes:\n\n # CheckingRequest if the Node is an exit one\n if desc.exit_policy.is_exiting_allowed():\n count = count + 1\n Helper.printOnScreen(' %s %s' % (desc.nickname, desc.address) ,MSG_TYPES.RESULT.value, self.mode)\n exit_Nodes.append({\n 'ExitNode': {\n 'Address': desc.address,\n 'Fingerprint': desc.fingerprint,\n 'Nickname': desc.nickname,\n 'Dir_port': desc.or_port,\n 'Or_port': desc.dir_port\n }\n })\n\n # Write into Json file\n with open(self.NODES_PATH, 'w') as outfile:\n json.dump(exit_Nodes, outfile)\n\n def GetJOSNInfo(self,exit_node):\n count = 0\n with open(self.NODES_PATH) as f:\n json_Objects = json.load(f)\n\n node_number =len(json_Objects)\n for obj in tqdm(json_Objects, ncols=80, desc='Storing ExitNodes'):\n if self.mode =='-out':\n if exit_node == Node_DATA.Address:\n pprint(obj['ExitNode']['Address'].encode(\"ascii\"))\n elif exit_node == Node_DATA.AllData:\n pprint(obj)\n\n # just for showing the progress bar\n time.sleep(0.005)\n count = count + 1\n time.sleep(1)\n\n return count\n", "id": "9137687", "language": "Python", "matching_score": 4.435762405395508, "max_stars_count": 2, "path": "TOR/NodeHandler/NodesHandler.py" }, { "content": "#! /usr/bin/env python3\n\n\nimport json\nfrom enum import Enum\nfrom stem.util import term\n\n#\nclass TASK_MODE(Enum):\n REQUEST_DOMAIN = '-r'\n TOR_CONNECTION_CHECKING = '-d'\n DNS_0x20_CHECKING = '-cd' # Capitalization\n DNS_RESOLVER_COUNTER = '-drc' #\n\n#\nclass MODE_TYPES(Enum):\n printing = '-out'\n none = '-none'\n\n#\nclass MSG_TYPES(Enum):\n RESULT = term.Color.GREEN\n ERROR = term.Color.RED\n YELLOW = term.Color.YELLOW\n ANY = term.Color.WHITE\n\n#\nclass ComplexEncoder(json.JSONEncoder):\n def default(self, obj):\n if hasattr(obj,'reprJSON'):\n return obj.reprJSON()\n else:\n return json.JSONEncoder.default(self, obj)\n\n#\nclass EnumEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, Enum):\n return obj.name\n return json.JSONEncoder.default(self, obj)\n\n#\nclass Helper:\n\n #\n def __init__(self,mode='-none'):\n self.mode = ''\n\n #\n def printOnScreen(msg,color=MSG_TYPES.ANY,mode='-none'):\n if mode == '-out':\n print(term.format(msg,color.value))\n\n #\n def printOnScreenAlways(msg, color=MSG_TYPES.ANY):\n try:\n print(term.format(msg, color.value))\n\n except:\n print(msg) # could be like this\n\n #\n def storeExitNodesJSON(objects,path):\n try:\n exitNodes = []\n for exitNode in objects:\n exitNodes.append({\n 'ExitNode': {\n 'Address': exitNode.ipaddress,\n 'Fingerprint': exitNode.fingerprint,\n 'Nickname': exitNode.nickname,\n 'Dir_port': exitNode.or_port,\n 'Or_port' : exitNode.dir_port,\n 'Status' : exitNode.status.reprJSON()\n }\n })\n with open(path, 'w+') as outfile:\n json.dump(exitNodes, outfile)\n\n except Exception as ex:\n print(ex)\n\n def storeDNSResolverData(objects,path):\n try:\n DNSNodes = []\n for DNSNode in objects:\n DNSNodes.append({\n 'DNSResolver': {\n 'DNSIP': DNSNode.DNSIP,\n 'nodeCount': DNSNode.nodeCount,\n 'ExitNodeList' : DNSNode.ExitNodelistJSON\n }\n })\n with open(path, 'w+') as outfile:\n json.dump(DNSNodes, outfile)\n Helper.printOnScreenAlways(\"\\n*******************************************\\n\"\n \"DNS Resolver Servers information has saved.\\n\"\n \"*******************************************\",MSG_TYPES.RESULT)\n\n except Exception as ex:\n Helper.printOnScreenAlways(\"Something went wrong.\",MSG_TYPES.ERROR)\n print(ex)\n\n\n def storeJSONMethod2(objects,path):\n try:\n exitNodes = []\n for exitNode in objects:\n exitNodes.append(json.loads(exitNode.reprJSON(), cls=ComplexEncoder))\n with open(path, 'w+') as outfile:\n json.dumps(exitNodes, outfile)\n\n except Exception as ex:\n print(ex)\n\n", "id": "3074799", "language": "Python", "matching_score": 2.3497486114501953, "max_stars_count": 2, "path": "TOR/Helper/Helper.py" }, { "content": "'''\nExit node class ....\n'''\n\nimport json\n\n#\nclass ExitNode:\n #\n def __init__(self,ipaddress,fingerprint,nickname,or_port,dir_port,status):\n self.ipaddress = ipaddress\n self.fingerprint = fingerprint\n self.nickname = nickname\n self.or_port = or_port\n self.dir_port = dir_port\n self.status = status\n\n #\n def reprJSON(self):\n return dict(IpAddress =self.ipaddress,Fingerprint=self.fingerprint,Nickname= self.nickname,\n Dir_port= self.or_port,\n Or_port= self.dir_port,\n Status =self.status)\n", "id": "11272715", "language": "Python", "matching_score": 0.039949432015419006, "max_stars_count": 2, "path": "TOR/ConnectionsHandler/Models/ExitNode.py" }, { "content": "import argparse\nimport traceback\n\nfrom TOR.MainController import main\n\n#\ndef parserArgs():\n\n parser = argparse.ArgumentParser(prog='TORMAPPER', description='TORMAPPER Tool',\n epilog=\"that's how my Tool works\")\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('-r', action='store_true',\n help='Send DNS reuqests - Map all the DNS resolvers & Data generation')\n group.add_argument('-cr', action='store_true', help='Check the connection reliability of the Tor exit nodes')\n group.add_argument('-cd', action='store_true', help='Check for DNS-0x20 bit encoding. *')\n group.add_argument('-pa', action='store_true', help='Check for DNS publicly Accessible. *')\n group.add_argument('-drc', action='store_true', help='Fouce the not response mode. *')\n\n\n parser.add_argument('-g', action='store_true', help='Graph generator')\n parser.add_argument('-m', metavar='number of requests needed to be sent', type=int,\n help=\"how many request do you want to send over each exit node\")\n\n parser.add_argument('-n', metavar='Number of nodes needed', type=int,\n help=\"how many node do you want to try to connection through\")\n\n parser.add_argument('-out', action='store_true', help='Print in details - use for debugging usually.')\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n\n try:\n args = parserArgs()\n main(args)\n\n except Exception as ex:\n print(\" ........... Testing .........\")\n print(ex)\n print('runDns - MAIN: \\n%s ' % traceback.format_exc())\n setArgs = argparse.Namespace(r=True,cr =False,cd=False,pa=False,drc=False, g=True, m=100, n=100000, out= False)\n", "id": "4120330", "language": "Python", "matching_score": 3.468412399291992, "max_stars_count": 2, "path": "TOR/TORMAPPER.py" }, { "content": "#! /usr/bin/env python3\n\nimport argparse\nimport dnsServer\nfrom Helper.Helper import ADVERSARY_TASK_MODE\nimport traceback\n\ndef parserArgs():\n defaultPort = 53\n set_Adv_Required = set([e.value for e in ADVERSARY_TASK_MODE])\n\n parser = argparse.ArgumentParser(prog='DNS', description='DNS server for a special needs :)',\n epilog=\"And that's how my DNS server works\")\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('-s', action='store_true', help='Run on the server IP')\n group.add_argument('-l', action='store_true', help='Run on the local IP')\n parser.add_argument('-lc','--rcase', action = 'store_true', help='For randomizing lettercase in the dns reply')\n parser.add_argument('-v','--adversary', action = 'store_true', help=\"Activate ADVERSARY mode, you can specify '-t' option\")\n parser.add_argument('-t','--task',nargs='?', choices=set_Adv_Required, default='rboth', const='rboth',\n help='ADVERSARY mode task: rport: randomize Port Number || ' +\n 'rid: randomize Request Id || rboth: randomise both' +\n ', default: rboth')\n parser.add_argument('-p', '--port', type=int, default=defaultPort, help=('Which port the DNS is going to use, default: %d' % defaultPort))\n parser.add_argument('-dont', action='store_true', help='Activate the DNS to not respond to particular requests if they contain specific words, '\n 'this is used to see how many queries the DNS resolver will issue per domain name when '\n 'there is no response.')\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n try:\n args = parserArgs()\n dnsServer.run(args)\n\n except Exception as ex:\n print(\" ........... Testing .........\")\n print(ex)\n print('runDns - MAIN: \\n%s ' % traceback.format_exc())\n setArgs = argparse.Namespace(l=True, adversary=False, port=53, rcase=False, s=False, task='rboth', dont=True)\n dnsServer.run(setArgs)\n", "id": "9343955", "language": "Python", "matching_score": 1.7221676111221313, "max_stars_count": 2, "path": "DNS/RunDns.py" }, { "content": "#! /usr/bin/env python3\n\nimport glob\nimport json\nimport random\nimport os\nimport time\nimport matplotlib.pyplot as plt\nimport socket\n\n\nfrom builtins import print\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom collections import Counter\nfrom enum import Enum\nfrom multiprocessing import Pool\n\nfrom TOR.Helper.Helper import Helper\nfrom TOR.Helper.Helper import MSG_TYPES\nfrom TOR.Helper.Helper import MODE_TYPES\n\nfrom subprocess import DEVNULL, STDOUT, check_call, check_output\n\nplt.style.use('seaborn')\nRequests = [] # to store all the requests from text file\nAllLINE = [] # to store all the lines from text file\nDNSs = []\nMAXNUMBER = 30000 ## -1 means parse all the files\nMINNUMBER_DrawGraph = 3000 #000 #2000 ## -1 means parse all the files\n\n# TODO: Need to be Dynamic\n#FILE_PATH ='C:\\\\DNS9_back_new_logo_6/*.txt'\nFILE_PATH =\"C:/Users/<NAME>/Desktop/UCL/Term 2/DS/DNS_Project/TOR/GatheredFiles/Logs/*.txt\"\n\n#\nclass GRAPHS(Enum):\n ALL = 0\n HISTOGRAM = 1\n SCATTER = 2\n\n#\nclass DNSInfo():\n def __init__(self,DNSIP):\n self.DNSIP = DNSIP\n self.listIDs = []\n self.listPortNumbers = []\n self.listPortNumberAndId = [] # find anyrelastionship between them\n self.count = 0\n\n def insertPortAndID(self,RequestId,PortNumber):\n RequestId_ = int(RequestId)\n PortNumber_ = int(PortNumber)\n self.listIDs.append(RequestId_)\n self.listPortNumbers.append(PortNumber_)\n self.listPortNumberAndId.append((RequestId_,PortNumber_))\n\n#\nclass RequestInfo():\n def __init__(self, requestId, srcIP, srcPort):\n self.requestId = requestId\n self.srcIP = srcIP\n self.srcPort = srcPort\n self.Mix = int(requestId) + int(srcPort)\n self.Min = int(requestId) - int(srcPort)\n\n#\ndef filterLine(info):\n RequestId = 're'\n # TODO: Fix the repeation in PORT NUMBER and REQUEST ID:\n previousPortNumber = ''\n previousRequestId = ''\n for item in info:\n if 'RequestId' in item:\n RequestId = item\n elif 'SrcIP' in item:\n SrcIP = item\n elif 'SrcPort' in item:\n SrcPort = item\n\n RequestId_ = findValue(RequestId)\n SrcIP_ = findValue(SrcIP)\n SrcPort_ = findValue(SrcPort)\n\n # Create instance form RequestInfo class\n request = RequestInfo(RequestId_,SrcIP_,SrcPort_)\n\n return request\n\n#\ndef findValue(value):\n info = value.split(':')\n return info[1].strip() # get the second part of the ExitNodelist, For exmaple: portNumber : 39879\n\n#\ndef getInfoFormTextFiles(PATH=FILE_PATH):\n '''\n Get info form text file and store it in ExitNodelist and return the total numbe\n '''\n\n temp_Requests =[]\n totalLines = 0\n first = True\n # TODO: this should be dynamic\n txtFiles = glob.glob(PATH)\n previousPortNumber = ''\n previousRequestId = ''\n for txtfile in txtFiles:\n with open(txtfile) as file:\n for line in file:\n if 'RecordType' in line or 'Domain' in line or 'RequestId' in line:\n # get all the legitimate/reasonable records/lines from the text fileS\n AllLINE.append(line) # all the records\n info = line.split('|')\n # filter line, get sendRequests, IP and port number form the text.\n for item in info:\n if 'RequestId' in item:\n RequestId = item\n elif 'SrcIP' in item:\n SrcIP = item\n elif 'SrcPort' in item:\n SrcPort = item\n\n RequestId_ = findValue(RequestId)\n SrcIP_ = findValue(SrcIP)\n SrcPort_ = findValue(SrcPort)\n if first is True: # To avoid sendRequests repetation\n previousPortNumber = SrcPort_\n previousRequestId =RequestId_\n first= False\n temp_Requests.append(filterLine(info))\n totalLines += 1\n\n elif previousPortNumber != SrcPort_ and previousRequestId != RequestId_:\n temp_Requests.append(filterLine(info))\n previousPortNumber = SrcPort_\n previousRequestId = RequestId_\n totalLines += 1\n\n return totalLines, temp_Requests,AllLINE\n\n#\ndef dumper(obj):\n try:\n return obj.toJSON()\n\n except:\n return obj.__dict__\n\n#\ndef normalizeDNSRequests(objects): #\n '''\n Plot Request Id 1/ Port Nnumber 2\n '''\n\n listDNSTemp = []\n listDNS = []\n graphName = ''\n graphTitle = ''\n\n print(1)\n for obj in objects:\n listDNSTemp.append(obj.srcIP)\n\n print(listDNSTemp.__len__())\n listDNSTemp = set(listDNSTemp)\n\n print(2)\n try:\n for IP in listDNSTemp:\n listDNS.append(DNSInfo(IP))\n\n print(3)\n index = 0\n for dns in listDNS:\n print(index)\n for obj in objects:\n if obj.srcIP == dns.DNSIP:\n dns.insertPortAndID(obj.requestId,obj.srcPort)\n if index == MAXNUMBER:\n break\n index += 1\n\n print(4)\n index = 0\n for dns in listDNS:\n dns.listIDs.sort()\n dns.listPortNumbers.sort()\n dns.listIDs = Counter(dns.listIDs)\n dns.listPortNumbers = Counter(dns.listPortNumbers)\n if index == MAXNUMBER:\n break\n print(0)\n\n except Exception as ex:\n print(ex) # add it to the log\n\n with open('JSON/DnsFilterList.json', 'w') as F:\n # Use the json dumps method to write the ExitNodelist to disk\n F.write(json.dumps(listDNS, default=dumper))\n print('writing listDNS is done')\n\n#\ndef drawGraphScattor(objects,option, mode=0):\n '''\n Draw Request options :Id 1/ Port Nnumber 2 ||| mode: 0:normal / 1: one DNS\n '''\n\n list = []\n graphName = ''\n graphTitle = ''\n requestCount = objects.__len__()\n store_Path = ''\n if requestCount > MINNUMBER_DrawGraph:\n if(option == 1): # ID graph\n for i in objects:\n list.append(i.requestId)\n graphName ='Request Id'\n graphTitle= 'Request IDs Distribution - Requests: ' + str(requestCount)\n\n elif(option == 2): # Port number graph\n for i in objects:\n list.append(i.srcPort)\n graphName ='Port Numbers'\n graphTitle = 'Port Number Distribution - Requests: ' + str(requestCount)\n elif (option == 3):\n for i in objects:\n list.append(i.Mix)\n graphName = 'Mix Numbers'\n graphTitle = ' Distribution'\n elif (option == 4):\n for i in objects:\n list.append(i.Min)\n graphName = 'Mix Numbers'\n graphTitle = ' Distribution'\n try:\n if mode == 1: # draw grphs for every DNS IP\n graphTitle += ' ' + objects[0].srcIP + ' Requests: ' + str(requestCount)\n if (option == 1):\n store_Path = \"Graphs/DNS_Graphs/ByID/%s.png\" % (graphName + '_' + objects[0].srcIP)\n elif (option == 2):\n store_Path = \"Graphs/DNS_Graphs/ByPort/%s.png\" % (graphName + '_' + objects[0].srcIP)\n else:\n store_Path = \"Graphs/%s.png\" % (graphName)\n\n list.sort()\n unique_List = Counter(list)\n set(unique_List)\n x = []\n y = []\n markersize = 1\n if requestCount > 1500:\n for i in unique_List:\n newVal = int(i)\n newValFeq = random.uniform(-0.5,0.9) + float(unique_List[i]) # add some noise to help to read the graph\n x.append(newVal)\n y.append(newValFeq)\n plt.plot(x, y, linestyle='', marker='o', markersize=0.7)\n else:\n for i in unique_List:\n newVal = int(i)\n newValFeq = float(unique_List[i]) # add some noise to help to read the graph\n x.append(newVal)\n y.append(newValFeq)\n plt.plot(x, y, linestyle='', marker='o', markersize=2)\n\n plt.xlim([-500, 70000]) # fix the x axis\n plt.xlabel(graphName)\n plt.ylabel(\"Frequency\")\n plt.title(graphTitle)\n if os.path.isfile(store_Path):\n print('found %s' % store_Path)\n os.remove(store_Path) # Opt.: os.system(\"rm \"+strFile)\n plt.savefig(store_Path)\n plt.clf()\n\n except Exception as ex:\n print('In drawGraph' + str(ex))\n\n#\ndef drawGraph(objects, option, mode=0,graphType=GRAPHS.ALL):\n '''\n Plot Request options :Id 1/ Port Nnumber 2 ||| mode: 0:normal / 1: one DNS\n '''\n\n list = []\n graphName = ''\n graphTitle = ''\n requestCount = objects.__len__()\n storePathHistogram = ''\n storePathScatter = ''\n if requestCount > MINNUMBER_DrawGraph:\n if(option == 1): # ID graph\n for i in objects:\n list.append(int(i.requestId))\n graphName ='Request Id'\n graphTitle= 'Request IDs Distribution - Requests: ' + str(requestCount)\n\n elif(option == 2): # Port number graph\n for i in objects:\n list.append(int(i.srcPort))\n graphName ='Port Numbers'\n graphTitle = 'Port Number Distribution - Requests: ' + str(requestCount)\n elif (option == 3):\n for i in objects:\n list.append(i.Mix)\n graphName = 'Mix Numbers'\n graphTitle = ' Distribution'\n elif (option == 4):\n for i in objects:\n list.append(i.Min)\n graphName = 'Mix Numbers'\n graphTitle = ' Distribution'\n try:\n srcIP = objects[0].srcIP\n if mode == 1: # draw grphs for every DNS IP\n graphTitle += ' - IP: ' + srcIP #+ ' Requests: ' + str(requestCount)\n if (option == 1):\n storePathHistogram = \"Graphs/DNS_Graphs/ByID/H_%s.png\" % (graphName + '_' + srcIP)\n storePathScatter = \"Graphs/DNS_Graphs/ByID/S_%s.png\" % (graphName + '_' + srcIP)\n elif (option == 2):\n storePathHistogram = \"Graphs/DNS_Graphs/ByPort/H_%s.png\" % (graphName + '_' + srcIP)\n storePathScatter = \"Graphs/DNS_Graphs/ByPort/S_%s.png\" % (graphName + '_' + srcIP)\n else:\n storePathHistogram = \"Graphs/H_%s.png\" % (graphName)\n storePathScatter = \"Graphs/S_%s.png\" % (graphName)\n\n list.sort()\n if graphType == GRAPHS.ALL:\n plt.hist(list, bins=10,rwidth=0.9)\n plt.xlim([-500, 70000]) # fix the x axis\n plt.xlabel(graphName)\n plt.ylabel(\"Frequency\")\n plt.title(graphTitle)\n if os.path.isfile(storePathHistogram):\n os.remove(storePathHistogram) # Opt.: os.system(\"rm \"+strFile)\n plt.savefig(storePathHistogram)\n Helper.printOnScreenAlways(' H_%s Saved' % srcIP, MSG_TYPES.RESULT)\n plt.clf()\n\n unique_List = Counter(list)\n set(unique_List)\n\n x = []\n y = []\n markersize = 1\n if requestCount > 1500:\n for i in unique_List:\n newVal = int(i)\n newValFeq = random.uniform(-0.5,0.9) + float(unique_List[i]) # add some noise to help to read the graph\n x.append(newVal)\n y.append(newValFeq)\n plt.plot(x, y, linestyle='', marker='o', markersize=0.7)\n else:\n for i in unique_List:\n newVal = int(i)\n newValFeq = float(unique_List[i]) # add some noise to help to read the graph\n x.append(newVal)\n y.append(newValFeq)\n plt.plot(x, y, linestyle='', marker='o', markersize=2)\n\n if os.path.isfile(storePathScatter):\n os.remove(storePathScatter) # Opt.: os.system(\"rm \"+strFile)\n\n plt.xlim([-500, 70000]) # fix the x axis\n plt.xlabel(graphName)\n plt.ylabel(\"Frequency\")\n plt.title(graphTitle)\n plt.savefig(storePathScatter)\n Helper.printOnScreenAlways(' S_%s Saved' % srcIP, MSG_TYPES.RESULT)\n plt.clf()\n\n elif graphType == GRAPHS.HISTOGRAM:\n plt.hist(list, bins=10,rwidth=0.9)\n plt.xlim([-500, 70000]) # fix the x axis\n plt.xlabel(graphName)\n plt.ylabel(\"Frequency\")\n if os.path.isfile(storePathHistogram):\n os.remove(storePathHistogram) # Opt.: os.system(\"rm \"+strFile)\n plt.savefig(storePathHistogram)\n Helper.printOnScreenAlways(' H_%s Saved' % srcIP,MSG_TYPES.RESULT)\n plt.clf()\n\n elif graphType == GRAPHS.SCATTER:\n unique_List = Counter(list)\n set(unique_List)\n x = []\n y = []\n markersize = 1\n if requestCount > 1500:\n for i in unique_List:\n newVal = int(i)\n newValFeq = random.uniform(-0.5, 0.9) + float(\n unique_List[i]) # add some noise to help to read the graph\n x.append(newVal)\n y.append(newValFeq)\n plt.plot(x, y, linestyle='', marker='o', markersize=0.7)\n\n else:\n for i in unique_List:\n newVal = int(i)\n newValFeq = float(unique_List[i]) # add some noise to help to read the graph\n x.append(newVal)\n y.append(newValFeq)\n\n plt.plot(x, y, linestyle='', marker='o', markersize=2)\n\n plt.xlim([-500, 70000]) # fix the x axis\n plt.xlabel(graphName)\n plt.ylabel(\"Frequency\")\n plt.title(graphTitle)\n if os.path.isfile(storePathScatter):\n print('found %s' % storePathScatter)\n os.remove(storePathScatter) # Opt.: os.system(\"rm \"+strFile)\n plt.savefig(storePathScatter)\n Helper.printOnScreenAlways(' H_%s Saved' % srcIP,MSG_TYPES.RESULT)\n plt.clf()\n\n except Exception as ex:\n print('In drawGraph' + str(ex))\n\n#\ndef drawGraphIDPORTNumber(objects,option, mode=0):\n '''\n Plot Request options :Id 1/ Port Nnumber 2 ||| mode: 0:normal / 1: one DNS\n '''\n list = []\n graphName = ''\n graphTitle = ''\n requestCount = objects.__len__()\n store_Path = ''\n if requestCount > MINNUMBER_DrawGraph:\n if(option == 1): # ID/PORT graph\n for obj in objects:\n list.append([obj.requestId,obj.srcPort])\n graphName ='Request Id-Port Number'\n graphTitle= 'Request ID and Port Number Distribution - Requests: ' + str(requestCount)\n\n try:\n graphName = graphName.replace(' ','_')\n store_Path = \"Graphs/%s.png\" % (graphName)\n list.sort()\n unique_List = list\n x = []\n y = []\n markersize = 0.167\n for obj in unique_List:\n portNumberVal = int(obj[1])\n requestIDVal = int(obj[0])\n x.append(portNumberVal)\n y.append(requestIDVal)\n\n plt.plot(x, y, linestyle='', marker='o', markersize=markersize)\n plt.ylim([-3000, 70000]) # fix the 8 axis\n plt.xlim([-2000, 70000]) # fix the x axis\n plt.xlabel(\"Port Number\")\n plt.ylabel(\"Request Id\")\n plt.title(graphTitle)\n if os.path.isfile(store_Path):\n print('found %s' % store_Path)\n os.remove(store_Path) # Opt.: os.system(\"rm \"+strFile)\n plt.savefig(store_Path)\n plt.clf()\n\n except Exception as ex:\n print('In drawGraph' + str(ex))\n\n#\ndef drawGraphIDPORTNumber3D(objects,option, mode=0):\n '''\n Plot Request options :Id 1/ Port Nnumber 2 ||| mode: 0:normal / 1: one DNS\n '''\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n list = []\n graphName = ''\n graphTitle = ''\n requestCount = objects.__len__()\n store_Path = ''\n if requestCount > MINNUMBER_DrawGraph:\n if(option == 1): # ID/PORT graph\n for obj in objects:\n list.append((obj.requestId,obj.srcPort))\n graphName ='Request Id/PORT Number'\n graphTitle= 'Request IDs and Port Number Distribution - Requests: ' + str(requestCount)\n\n try:\n Path = \"Graphs/%s.png\" % (graphName)\n list.sort()\n unique_List = Counter(list)\n x = []\n y = []\n z =[]\n markersize = 0.1\n index = 0\n for obj in unique_List:\n index += 1\n portNumberVal = int(obj[0])\n requestIDVal = int(obj[1])\n freg = int(unique_List[obj])\n if freg > 3:\n print(obj)\n x.append(portNumberVal)\n y.append(requestIDVal)\n z.append(freg)\n\n ax.scatter(x, y, z, c='r', marker='.' ,s=0.1) # markersize=markersize)\n print('savefig0')\n\n if os.path.isfile(store_Path):\n print('found %s' % store_Path)\n os.remove(store_Path) # Opt.: os.system(\"rm \"+strFile)\n print(store_Path)\n print('d')\n\n plt.show()\n except Exception as ex:\n print('In drawGraph: ' + str(ex))\n\n#\ndef writeAllTextFiles(all):\n '''\n Write/log all the files into json file - EVERYTHING\n '''\n\n with open('JSON/AllTextFiles.json', 'w') as F:\n # Use the json dumps method to write the ExitNodelist to disk\n F.write(json.dumps(all, default=dumper))\n print('writing all text files is done')\n\n#\ndef writeAllRequests(requests):\n '''\n Write/logs all the Requests into json file - SEMI-FILTERED\n '''\n\n with open('JSON/AllRequestsInfo.json', 'w') as F:\n # Use the json dumps method to write the ExitNodelist to disk\n print(requests.__len__())\n F.write(json.dumps(requests, default=dumper))\n print('writing all requests info is done')\n\n#\ndef getAllDNSIPs(requests):\n\n listDNSTemp = []\n for obj in requests:\n listDNSTemp.append(obj.srcIP)\n listDNSTemp = set(listDNSTemp)\n with open('JSON/AllDNSIPsFiles.txt', 'w') as F:\n for obj in listDNSTemp:\n F.writelines(obj+'\\n')\n\n print('writing all DNS IPs into a text file is done')\n\n# write the Requests into json file - for espicall port/Ip - for debugging purposes\n# unfortunately this method is not accurate, because we check if ID/PORT are in the text but we can't tell which one is the PORT or which one is ID\ndef writeInfoForSpicalIP(IP,requests,ID=None,PORT=None,DRAW=False,index=0):\n list = []\n temp_Requests = []\n\n txtFiles = glob.glob(FILE_PATH)\n if ID is not None and PORT is not None:\n\n filename = ('JSON/ByIP/IP_%s_ID_%s_PORT_%s.json' % (IP, ID, PORT))\n for line in requests:\n if IP in line.srcIP and ID in line.requestId and PORT in line.srcPort:\n list.append(line)\n\n elif ID is not None:\n filename = ('JSON/ByID/IP_%s_ID_%s.json' % (IP, ID))\n for line in requests:\n if IP in line.srcIP and ID in line.requestId:\n temp_Requests.append(line)\n list.append(line)\n\n elif PORT is not None:\n filename = ('JSON/ByPort/IP_%s_PORT_%s.json' % (IP, PORT))\n for line in requests:\n if IP in line.srcIP and PORT in line.srcPort:\n temp_Requests.append(line)\n list.append(line)\n else:\n filename = ('JSON/ByIP/IP_%s.json' % IP)\n for line in requests:\n if IP == line.srcIP:\n temp_Requests.append(line)\n list.append(line)\n\n if list.__len__() > MINNUMBER_DrawGraph:\n print(\"JSON file are stored %s\" % filename)\n with open(filename, 'w') as F:\n # Use the json dumps method to write the ExitNodelist to disk\n F.write(json.dumps(list, default=dumper))\n print('Writing All Requests Info is done : %s' % str(list.__len__()))\n\n if DRAW is True:\n # TODO: add enum\n requestCount = temp_Requests.__len__()\n if requestCount > MINNUMBER_DrawGraph:\n drawGraph(temp_Requests,option=1,mode=1) # Request Ids\n drawGraph(temp_Requests,option=2,mode=1) # port Number\n else:\n Helper.printOnScreenAlways('%d - Ignored: %s - Requests: %d' % (index,temp_Requests[0].srcIP, requestCount),\n MSG_TYPES.YELLOW)\n\n#\ndef DrawGraphsForAll(requests):\n listDNSTemp = []\n for obj in requests:\n listDNSTemp.append(obj.srcIP)\n listDNSTemp = set(listDNSTemp)\n index = 1;\n for ip in listDNSTemp:\n writeInfoForSpicalIP(ip, requests,DRAW=True,index=index)\n index= index +1\n\n#\ndef makeDirectories():\n '''\n Make the directories in case they are missing.\n '''\n\n if not os.path.exists('Graphs'):\n os.makedirs('Graphs/DNS_Graphs')\n os.makedirs('Graphs/DNS_Graphs/ByID')\n os.makedirs('Graphs/DNS_Graphs/ByPort')\n elif not os.path.exists('Graphs/DNS_Graphs'):\n os.makedirs('Graphs/DNS_Graphs')\n os.makedirs('Graphs/DNS_Graphs/ByID')\n os.makedirs('Graphs/DNS_Graphs/ByPort')\n else:\n if not os.path.exists('Graphs/DNS_Graphs/ByID'):\n os.makedirs('Graphs/DNS_Graphs/ByID')\n if not os.path.exists('Graphs/DNS_Graphs/ByPort'):\n os.makedirs('Graphs/DNS_Graphs/ByPort')\n\n if not os.path.exists('JSON'):\n os.makedirs('JSON/ByID')\n os.makedirs('JSON/ByIP')\n os.makedirs('JSON/ByPort')\n else:\n if not os.path.exists('JSON/ByID'):\n os.makedirs('JSON/ByID')\n if not os.path.exists('JSON/ByIP'):\n os.makedirs('JSON/ByIP')\n if not os.path.exists('JSON/ByPort'):\n os.makedirs('JSON/ByPort')\n\n#\ndef timing(f):\n def wrap(*args):\n time1 = time.time()\n ret = f(*args)\n time2 = time.time()\n print('{:s} function took {:.3f} ms'.format(f.__name__, (time2-time1)*1000.0))\n\n return ret\n return wrap\n\n#\nclass NodeObject():\n def __init__(self,DNSIP):\n self.DNSIP = DNSIP\n self.list = []\n self.count = 0\n def insertNode(self,NodeIp):\n self.list.append(NodeIp)\n\n#\ndef loadExitNodes(dir):\n jsonFiles = glob.glob(str('%s/*.json' % dir))\n with open(jsonFiles[0]) as f:\n jsonObjects = json.load(f)\n return jsonObjects\n\n#\ndef graphTask():\n # make the directories in case they are missing\n makeDirectories()\n\n DrawGraphsforALL = False\n # Helper.printOnScreenAlways(\"TEST\",MSG_TYPES.RESULT)\n print('Files Directory: %s' % FILE_PATH)\n total, requests,all = getInfoFormTextFiles()\n print('Found %d records: ' % total)\n\n # TODO: need to be refactored/renamed- be more clear\n writeAllTextFiles(all)\n # write all the Requests into json file\n writeAllRequests(requests)\n getAllDNSIPs(requests)\n\n # match DNS with its port/ID they used.\n # normalizeDNSRequests(Requests)\n\n # for getting info for especial ip, port and ID\n # draw graphs for all the DNS records\n if DrawGraphsforALL is True:\n # print('Draw Graphs for each DNS records that has more than %d records...' % MINNUMBER_DrawGraph)\n Helper.printOnScreenAlways('Draw Graphs for each DNS that has more than %d records...' % MINNUMBER_DrawGraph,\n MSG_TYPES.RESULT)\n DrawGraphsForAll(requests)\n Helper.printOnScreenAlways('Done, Graphs are stored in the following directory: Graphs/DNS_Graphs/',\n MSG_TYPES.RESULT)\n\n drawGraphIDPORTNumber(requests, 1)\n\n#\ndef GetAllResolversInfo():\n # list = json.loads(listDNSTemp)\n list\n index = 1\n with open('JSON/AllDNSResolversInfo.json') as f:\n json_Objects = json.load(f)\n # Random\n # random.shuffle(json_Objects)\n\n accessible = 0\n inaccessible = 0\n for node in json_Objects:\n accesse = node[1]\n if accesse == True:\n accessible +=1\n elif accesse == False:\n inaccessible +=1\n\n print('%d DNS resolver can be accessible directly' % accessible)\n print('%d DNS resolver cannot be accessible directly' % inaccessible)\n\n#\ndef ResolverTask():\n\n print('Files Directory: %s' % FILE_PATH)\n total, requests, all = getInfoFormTextFiles()\n print('Found %d records: ' % total)\n\n # TODO: need to be refactored/renamed- be more clear\n # write all the Requests into json file\n getAllDNSIPs(requests=requests)\n CheckForPubliclyAccessible()\n\n#\ndef CheckForPubliclyAccessible():\n\n listDNSTempIP = []\n listDNSTemp = []\n index = 1\n with open('JSON/AllDNSIPsFiles.txt', 'r') as file:\n for obj in file:\n obj= obj.rstrip()\n listDNSTempIP.append(obj)\n\n for IP in listDNSTempIP:\n connected = checkDNSIP(IP)\n print('%d - %s : %s' % (index, IP ,str(connected)))\n listDNSTemp.append([IP,connected])\n index += 1\n\n with open('JSON/AllDNSResolversInfo.json', 'w') as F:\n # Use the json dumps method to write the ExitNodelist to disk\n F.write(json.dumps(listDNSTemp, default=dumper))\n print('writing all requests info is done')\n print('writing all DNS IPs into a text file is done')\n\n#\ndef checkDNSIP(ip):\n #ip = '8.8.8.8'\n try:\n command = 'dig +short +tries=1 DNS_Checker.dnstestsuite.space @%s' % ip\n result = check_output(command, shell=True) #stdout=DEVNULL, stderr=STDOUT)\n if result.decode(\"utf-8\").rstrip() == '172.16.17.32': #ip address of the website\n return True\n else:\n return False\n except Exception as ex:\n return False\n\n return False\n\n# TODO: add options : 1- fetch the new files or process the old ones\nif __name__ == '__main__':\n graphTask()\n", "id": "4717070", "language": "Python", "matching_score": 8.744141578674316, "max_stars_count": 2, "path": "TORResolver/DNSInfoGraph.py" }, { "content": "'''\n This class is for DNS Resolvers information\n'''\n\n\nimport glob\nimport json\nimport random\nimport os\nimport time\n\nimport matplotlib.pyplot as plt\n\nfrom builtins import print\nfrom collections import Counter\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom enum import Enum\n\nfrom TOR.Helper.Helper import Helper\nfrom TOR.Helper.Helper import MSG_TYPES\nfrom TOR.Helper.Helper import MODE_TYPES\n\n\nplt.style.use('seaborn')\nRequests = [] # to store all the requests from text file\nAllLINE = [] # to store all the lines from text file\nDNSs = []\nMAXNUMBER = 30000 ## -1 means parse all the files\nMINNUMBER_DrawGraph = 3000 #000 #2000 ## -1 means parse all the files\n\nFILE_PATH =\"C:/Users/<NAME>/Desktop/UCL/Term 2/DS/DNS_Project/TOR/GatheredFiles/Logs/*.txt\"\n\n\n#\nclass DNSInfo():\n #\n def __init__(self,DNSIP):\n self.DNSIP = DNSIP\n self.listIDs = []\n self.listPortNumbers = []\n self.listPortNumberAndId = [] # find anyrelastionship between them\n self.count = 0\n #\n def insertPortAndID(self,RequestId,PortNumber):\n RequestId_ = int(RequestId)\n PortNumber_ = int(PortNumber)\n self.listIDs.append(RequestId_)\n self.listPortNumbers.append(PortNumber_)\n self.listPortNumberAndId.append((RequestId_,PortNumber_))\n\n#\nclass RequestInfo():\n def __init__(self, requestId, srcIP, srcPort,requestIP,domain,modifiedDomain):\n self.requestId = requestId\n self.srcIP = srcIP\n self.srcPort = srcPort\n self.requestIP =requestIP\n self.domain =domain\n self.modifiedDomain = modifiedDomain\n #self.Mix = int(requestId) + int(srcPort)\n #self.Min = int(requestId) - int(srcPort)\n\n#\ndef filterLine(info):\n RequestId = 're'\n # TODO: Fix the repeation in PORT NUMBER and REQUEST ID:\n previousPortNumber = ''\n previousRequestId = ''\n for item in info:\n if 'RequestId' in item:\n RequestId = item\n elif 'SrcIP' in item:\n SrcIP = item\n elif 'SrcPort' in item:\n SrcPort = item\n elif 'SrcIPs' in item:\n SrcIP = item\n elif 'SrcPorts' in item:\n SrcPort = item\n\n RequestId_ = findValue(RequestId)\n SrcIP_ = findValue(SrcIP)\n SrcPort_ = findValue(SrcPort)\n\n # Create instance form RequestInfo class\n request = RequestInfo(RequestId_,SrcIP_,SrcPort_)\n # Add the instance to Requests List\n return request\n\ndef findValue(value):\n info = value.split(':')\n return info[1].strip() # get the second part of the ExitNodelist, For exmaple: portNumber : 39879\n\n\n#\ndef writeAllTextFiles(all):\n '''\n Write/log all the files into json file - EVERYTHING\n '''\n\n with open('JSON/AllTextFiles.json', 'w') as F:\n # Use the json dumps method to write the ExitNodelist to disk\n F.write(json.dumps(all, default=dumper))\n print('writing all text files is done')\n\n#\ndef writeAllRequests(requests):\n '''\n Write/logs all the Requests into json file - SEMI-FILTERED\n '''\n with open('JSON/AllRequestsInfo.json', 'w') as F:\n # Use the json dumps method to write the ExitNodelist to disk\n print(requests.__len__())\n F.write(json.dumps(requests, default=dumper))\n print('writing all requests info is done')\n\n#\ndef getInfoFormTextFiles(PATH=FILE_PATH):\n '''\n Get info form text file and store it in ExitNodelist and return the total numbe\n '''\n\n temp_Requests =[]\n totalLines = 0\n first = True\n # TODO: this should be dynamic\n txtFiles = glob.glob(PATH)\n previousPortNumber = ''\n previousRequestId = ''\n\n for txtfile in txtFiles:\n with open(txtfile) as file:\n\n for line in file:\n if ('RecordType' in line or 'Domain' in line or 'RequestId' in line) and 'check' in line.lower() :\n # get all the legitimate/reasonable records/lines from the text fileS\n AllLINE.append(line) # all the records\n info = line.split('|')\n # filter line, get sendRequests, IP and port number form the text.\n for item in info:\n if 'RequestId' in item:\n RequestId = item\n elif 'SrcIP' in item:\n SrcIP = item\n elif 'SrcPort' in item:\n SrcPort = item\n\n RequestId_ = findValue(RequestId)\n SrcIP_ = findValue(SrcIP)\n SrcPort_ = findValue(SrcPort)\n\n if first is True: # To avoid sendRequests repetation\n previousPortNumber = SrcPort_\n previousRequestId =RequestId_\n first= False\n temp_Requests.append(filterLine(info))\n totalLines += 1\n\n elif previousPortNumber != SrcPort_ and previousRequestId != RequestId_:\n temp_Requests.append(filterLine(info))\n previousPortNumber = SrcPort_\n previousRequestId = RequestId_\n totalLines += 1\n\n return totalLines, temp_Requests,AllLINE\n#\ndef dumper(obj):\n try:\n return obj.toJSON()\n\n except:\n return obj.__dict__", "id": "8748301", "language": "Python", "matching_score": 2.718737840652466, "max_stars_count": 2, "path": "TOR/ConnectionsHandler/Models/DNSRecordParser.py" }, { "content": "#! /usr/bin/env python3\n\nimport datetime\nimport getopt\nimport json\nimport os\nimport random\nimport socket\nimport glob\nimport sys\nimport logging\n\nfrom _socket import SOL_SOCKET\nfrom enum import Enum\nfrom stem.util import term\n\nVERSION = '0.97 F b'\nDEBUG = False\nPORT = 53\nIP_ADDRESS_LOCAL = '127.0.0.1'\nIP_ADDRESS_SERVER = '172.31.16.226'\nJsonRequestsPATH = 'JSON/DNSRequestNodes'\nJsonRequestsPATHCheck = 'JSON/CheckingRequest/DNSRequestNodes'\n\nCOUNTER = 0\n# Fix\nFixPort = True #True # try all the possible port\nFixRequestId = False #False # try all the possible request ID\n\nclass RECORD_TYPES(Enum):\n A = b'\\x00\\x01' # specifies IP4 Address\n CNAME = b'\\x00\\x05' # aliases\n MX = b'\\x00\\x0f' # mail exchange server for DNS\n NS = b'\\x00\\x02' # authoritative name server\n TXT = b'\\x00\\x10' # arbitrary non-formatted text string.\n AAAA = b'\\x00\\x1c' # specifies IP6 Address\n ANY = b'\\x00\\xff'\n\n# <editor-fold desc=\"******************* Random functions *******************\">\n\ndef log_incoming(value):\n file = Log(filename='incoming_request', mode='out')\n file.wirteIntoFile(value)\n\n# TODO: Need refactor- NOT IMPORTANT\ndef printDebugMode(values):\n if DEBUG is True: # Debug mode only\n for string in values:\n print(string)\n\n# option: 1 full (time+date)\n# option: 2 date\n# option: 3 time\ndef getTime(opt=1):\n date = datetime.datetime.now()\n if opt == 1: # full\n return (((str(date)).split('.')[0]).split(' ')[1] + ' ' + ((str(date)).split('.')[0]).split(' ')[0])\n if opt == 2: # date\n return (((str(date)).split('.')[0]).split(' ')[0])\n if opt == 3: # time\n return (((str(date)).split('.')[0]).split(' ')[1])\n\n#\ndef int_to_hex(value):\n h = hex(value) # 300 -> '0x12c'\n h = h[2:].zfill((0) * 2) # '0x12c' -> '00012c' if zfill=3\n return h\n\n#\ndef bin_to_hex(value):\n # http://stackoverflow.com/questions/2072351/python-conversion-from-binary-string-to-hexadecimal/2072384#2072384\n # '0000 0100 1000 1101' -> '\\x04\\x8d'\n value = value.replace(' ', '')\n h = '%0*X' % ((len(value) + 3) // 4, int(value, 2))\n return h.decode('hex')\n\n#\nclass Log():\n def __init__(self, filename, mode='none'):\n\n date = getTime(2)\n self.mode = mode\n # TODO: need refactoring - make it more abstract\n self.file = 'Logs/' + filename + '_' + date + '_counter+.txt'\n if (os.path.exists(self.file)) != True:\n with open(self.file, 'w+') as file:\n file.write('Start - ' + date + '\\n')\n\n def wirteIntoFile(self,raw):\n if self.mode == 'out':\n data = ''\n raw = str(getTime(3)) + ': ' + raw\n with open(self.file, 'r') as file:\n data = file.read()\n with open(self.file, 'w+') as file:\n file.write(data)\n file.write(raw + '\\n')\n\n def counter(self):\n pass\n\n\n# TODO: need to implemant a class\ndef storeDNSRequestJSON(status, time, recordType, transactionID, srcIP, srcPort, domain, modifiedDomain='none', mode='none'):\n '''\n .\n '''\n\n date = getTime(2)\n if mode == 'check':\n file = JsonRequestsPATHCheck + '_' + date + '.json'\n else:\n # TODO: need refactoring - make it more abstract\n file = JsonRequestsPATH + '_' + date + '.json'\n jsons = {}\n\n if (os.path.exists(file)) != True: # check if the file exist, if not create it.\n with open(file, 'w+') as jsonfile:\n json.dump(' ', jsonfile)\n else:\n with open(file, 'r') as jsonfile:\n jsons = json.load(jsonfile)\n\n if domain[-1:] == '.':\n domain = domain[:-1]\n\n with open(file, 'w') as jsonfile:\n DNSRequestNodes = {\n 'Request': {\n 'ID': str(len(jsons) + 1),\n 'Time': time,\n 'Status': status,\n 'TransactionID': transactionID,\n 'RecordType': recordType,\n 'SrcIP': srcIP,\n 'SrcPort': srcPort,\n 'Domain': domain,\n 'modifiedDomain': modifiedDomain,\n }\n }\n jsons[str(len(jsons) + 1)] = DNSRequestNodes\n # Write into Json file\n json.dump(jsons, jsonfile)\n\n\n# </editor-fold>\n\n# <editor-fold desc=\"******************* Zone File *******************\">\n#\ndef loadZone():\n '''\n load all zones that we have when the DNS server starts up, and put them into memory\n '''\n\n jsonZone = {} # dictionary\n zoneFiles = glob.glob('Zones/*.zone')\n printDebugMode(zoneFiles) # Debug\n\n for zone in zoneFiles:\n with open(zone) as zonedata:\n data = json.load(zonedata)\n zoneName = data['$origin']\n jsonZone[zoneName] = data\n\n return jsonZone\n\n\ndef getZone(domain):\n global ZoneDATA\n try:\n zoneName = '.'.join(domain[-3:]).lower()\n return ZoneDATA[zoneName]\n except Exception as e:\n print()\n return ''\n\n\n# </editor-fold>\n\n# <editor-fold desc=\"******************* DNS Rspoonse *******************\">\n\ndef getFlags(flags):\n response_Flag = ''\n\n # First byte contains: QR: 1 bit | Opcode: 4 bits | AA: 1 bit | TC: 1 bit |RD: 1 bit\n byte1 = bytes(flags[:1])\n # Second byte contains: RA: 1 bit | Z: 3 bits | RCODE: 4 bit\n byte2 = bytes(flags[1:2])\n\n QR = '1' # query: 0 , response: 0.\n # OPCODE\n OPCODE = ''\n for bit in range(1, 5):\n OPCODE += str(ord(byte1) & (1 << bit)) # to get option 1/0\n\n # Authoritative Answer\n AA = '1' # Always 1\n # TrunCation\n TC = '0' # 0 because we always dealing with a short message\n # Recursion Desired\n RD = '0' # 0 if it is not supported recurring\n # Recursion Available\n RA = '0'\n\n # Reserved for future use. Must be zeros in all queries and responses.\n Z = '000'\n\n # Response code\n RCODE = '0000'\n\n response_Flag = int(QR + OPCODE + AA + TC + RD, 2).to_bytes(1, byteorder='big') + int(RA + Z + RCODE).to_bytes(1,\n byteorder='big')\n # response_Flag = int(QR + '0000' + AA + TC + RD, 2).to_bytes(1, byteorder='big') + int(RA + Z + RCODE).to_bytes(1,byteorder='big')\n\n return response_Flag\n\n\ndef getQuestionDomain(data):\n state = 1\n index = 0\n first = True\n\n domainParts = []\n domainString = ''\n domainTLD = ''\n\n expectedLength = 0\n TotalLength = 0\n parts = 0\n for byte in data:\n\n if byte == 0:\n break\n if state == 1: # 1 get the domain name\n if first is True: # first byte to get the length for the zone ~ 3 bytes\n first = False\n parts += 1\n expectedLength = byte\n continue\n domainString += chr(byte)\n index += 1\n if index == expectedLength:\n TotalLength += expectedLength\n state = 2\n index = 0\n domainParts.append(domainString)\n domainString = ''\n first = True\n\n elif state == 2: # 2 get the domain zone\n if first is True: # first byte to get the length for the zone ~ 3 bytes\n first = False\n expectedLength = byte\n parts += 1 # how many parts\n continue\n domainString += chr(byte)\n index += 1\n if index == expectedLength:\n TotalLength += expectedLength\n state = 1\n index = 0\n domainParts.append(domainString)\n domainString = ''\n first = True\n\n # get question type\n questionTypeStartingIndex = TotalLength + parts\n questionType = data[questionTypeStartingIndex + 1: questionTypeStartingIndex + 3]\n if DEBUG is True: # Debug mode only\n print('Question Type: ' + str(questionType))\n print('Domain: ' + domainString + '.' + domainTLD)\n\n domainParts.append('')\n print(domainParts)\n\n return (domainParts, questionType)\n\n#\ndef getQuestionDomain_temp(data):\n state = 0\n expectedlength = 0\n domainstring = ''\n domainparts = []\n x = 0\n y = 0\n for byte in data:\n if state == 1:\n if byte != 0:\n domainstring += chr(byte)\n x += 1\n if x == expectedlength:\n domainparts.append(domainstring)\n domainstring = ''\n state = 0\n x = 0\n if byte == 0:\n domainparts.append(domainstring)\n break\n else:\n state = 1\n expectedlength = byte # get the lenght for the domain\n y += 1\n\n questiontype = data[y:y + 2]\n\n return (domainparts, questiontype)\n\n\ndef getLetterCaseSawped(dmoainParts):\n newParts = dmoainParts[:-3] # save all the elements but not the last 3 including ''\n dmoainParts = dmoainParts[-3:] # get only last 3 elemnets of the ExitNodelist exmaple.com.\n # modify randomly only in the domain and zone name\n for part in dmoainParts:\n part = \"\".join(random.choice([k.swapcase(), k]) for k in part)\n newParts.append(part)\n return newParts\n\n\ndef getRecs(data):\n try:\n domain, questionType = getQuestionDomain(data)\n qt = ''\n if questionType == RECORD_TYPES.A.value:\n qt = 'A'\n elif questionType == RECORD_TYPES.AAAA.value:\n qt = 'AAAA'\n elif questionType == RECORD_TYPES.CNAME.value:\n qt = 'CNAME'\n elif questionType == RECORD_TYPES.MX.value:\n qt = 'MX'\n elif questionType == RECORD_TYPES.NS.value:\n qt = 'NS'\n elif questionType == RECORD_TYPES.TXT.value:\n qt = 'TXT'\n elif questionType == RECORD_TYPES.ANY.value:\n qt = 'ANY'\n\n # print(domain)\n zone = getZone(domain)\n if DEBUG is True: # Debug mode only\n print('-------------7')\n\n print('Question Type: ' + str(qt))\n print('Zone: ' + str(zone[qt]))\n print('-------------5')\n print('Question Type: ' + str(qt))\n print('-------------6')\n\n return (zone[qt], qt, domain, 'OKAY')\n except Exception as ex:\n log_incoming(str(ex))\n return ('', qt, domain, 'ERROR')\n\n\ndef buildQuestion(domainName, recordType): # convert str into byte\n questionBytes = b''\n\n for part in domainName:\n length = len(part)\n questionBytes += bytes([length])\n\n for char in part:\n questionBytes += ord(char).to_bytes(1, byteorder='big')\n\n if recordType == RECORD_TYPES.A.name or recordType == RECORD_TYPES.AAAA.name:\n questionBytes += (1).to_bytes(2, byteorder='big')\n\n questionBytes += (1).to_bytes(2, byteorder='big')\n return questionBytes\n\n\ndef recordToBytes(domainName, recordType, recordTTL, recordValue):\n '''\n\n '''\n recordBytes = b'\\xc0\\x0c' # Pointer to domain name\n if recordType == RECORD_TYPES.A.name:\n recordBytes = recordBytes + bytes([0]) + bytes([1])\n\n # TODO: need to handle IP6-AAAA\n elif recordType == RECORD_TYPES.AAAA.name:\n recordBytes = recordBytes + bytes([0]) + bytes([1])\n\n recordBytes = recordBytes + bytes([0]) + bytes([1])\n recordBytes += int(recordTTL).to_bytes(4, byteorder='big')\n\n if recordType == RECORD_TYPES.A.name or recordType == RECORD_TYPES.AAAA.name:\n recordBytes = recordBytes + bytes([0]) + bytes([4])\n for part in recordValue.split('.'):\n recordBytes += bytes([int(part)])\n\n return recordBytes\n\n\ndef BruteFouceTransactionID(currentTransactionID):\n pass\n\ndef getForgedResponse(data, addr, case_sensitive=True):\n '''\n Build a DNS forged response.\n '''\n\n # DNS Header\n # Transaction ID\n TransactionID_Byte = data[:2]\n TransactionID = ''\n for byte in TransactionID_Byte:\n TransactionID += hex(byte)[2:]\n\n if DEBUG is True: # Debug mode only\n print('ID:')\n print(TransactionID)\n\n # FLAGS\n Flags = getFlags(data[2:4])\n if DEBUG is True: # Debug mode only\n print(Flags)\n\n # Question Count, how many questions in the zone file\n QDCOUNT = RECORD_TYPES.A.value # b'\\x00\\x01' # dns has one question\n\n records, recordType, domainName, recStatus = getRecs(data[12:])\n\n # Answer Count\n # ANCOUNT = len(getRecs(data[12:])[0]).to_bytes(2, byteorder='big') # 12 bytes to skip the header\n ANCOUNT = len(records).to_bytes(2, byteorder='big') # 12 bytes to skip the header\n\n # Name server nodeCount\n NSCOUNT = (0).to_bytes(2, byteorder='big')\n\n # Additional nodeCount\n ARCOUNT = (0).to_bytes(2, byteorder='big')\n\n Forged = True\n if Forged is True:\n pass\n #TransactionID_Byte = BruteFouceTransactionID(TransactionID_Byte)\n DNSHeader = Flags + QDCOUNT + ANCOUNT + NSCOUNT + ARCOUNT\n\n# DNSHeader = TransactionID_Byte + Flags + QDCOUNT + ANCOUNT + NSCOUNT + ARCOUNT\n if DEBUG is True:\n dnsH = ''\n print('DNS HEADER: ' + str(DNSHeader))\n for byte in DNSHeader:\n dnsH += hex(byte)[2:]\n print(dnsH)\n\n # ********************************** DNS Question\n\n # records, recordType, domainName = getRecs(data[12:])\n\n global COUNTER\n COUNTER += 1\n transactionID = str(int(TransactionID, 16))\n domain = '.'.join(map(str, domainName))[:-1]\n status = 'Okay'\n\n if case_sensitive is True:\n domainName = getLetterCaseSawped(domainName)\n modifiedDomain = '.'.join(map(str, domainName))[:-1]\n if recStatus == 'ERROR': # TODO: need to handle the exception in better way\n log_incoming(str(\n COUNTER) + ': ** ERROR ** : RecordType: ' + recordType + ' | RequestId: ' + transactionID + ' | SrcIP: ' +\n addr[0] + ' | SrcPort: ' + str(\n addr[1]) + ' | Domain: ' + domain + ' | Modified Domain: ' + modifiedDomain)\n status = 'ERROR'\n print(term.format(str(\n COUNTER) + ': ' + status + ' - RecordType: ' + recordType + ' - RequestId: ' + transactionID + ' From: IP ' +\n addr[0] + ' : Port: ' + str(\n addr[1]) + ' - Domain : ' + domain + ' | Modified Domain: ' + modifiedDomain + '\\n',\n term.Color.RED))\n else:\n log_incoming(\n str(COUNTER) + ': RecordType: ' + recordType + ' | RequestId: ' + transactionID + ' | SrcIP: ' + addr[\n 0] + ' | SrcPort: ' + str(\n addr[1]) + ' | Domain : ' + domain + ' | Modified Domain: ' + modifiedDomain)\n status = 'OKAY'\n print(term.format(str(\n COUNTER) + ': ' + status + ' - RecordType: ' + recordType + ' - RequestId: ' + transactionID + ' From: IP ' +\n addr[0] + ' : Port: ' + str(\n addr[1]) + ' - Domain : ' + domain + ' | Modified Domain: ' + modifiedDomain + '\\n',\n term.Color.GREEN))\n if 'Check_' in domain:\n storeDNSRequestJSON(status=status, time=getTime(3), recordType=recordType, transactionID=transactionID,\n srcIP=addr[0], srcPort=str(addr[1]), domain=domain, modifiedDomain=modifiedDomain, mode='check')\n else:\n storeDNSRequestJSON(status=status, time=getTime(3), recordType=recordType, transactionID=transactionID,\n srcIP=addr[0], srcPort=str(addr[1]), domain=domain, modifiedDomain=modifiedDomain)\n\n else:\n if recStatus == 'ERROR': # TODO: need to handle the exception in better way\n log_incoming(str(\n COUNTER) + ': ** ERROR ** : RecordType: ' + recordType + ' | RequestId: ' + transactionID + ' | SrcIP: ' +\n addr[0] + ' | SrcPort: ' + str(addr[1]) + ' | Domain: ' + domain)\n status = 'ERROR'\n print(term.format(str(\n COUNTER) + ': ' + status + ' - RecordType: ' + recordType + ' - RequestId: ' + transactionID + ' From: IP ' +\n addr[0] + ' : Port: ' + str(addr[1]) + ' - Domain : ' + domain + '\\n', term.Color.RED))\n else:\n log_incoming(\n str(COUNTER) + ': RecordType: ' + recordType + ' | RequestId: ' + transactionID + ' | SrcIP: ' + addr[\n 0] + ' | SrcPort: ' + str(addr[1]) + ' | Domain : ' + domain)\n status = 'OKAY'\n print(term.format(str(\n COUNTER) + ': ' + status + ' - RecordType: ' + recordType + ' - RequestId: ' + transactionID + ' From: IP ' +\n addr[0] + ' : Port: ' + str(addr[1]) + ' - Domain : ' + domain + '\\n',\n term.Color.GREEN))\n\n if 'Check_' in domain:\n storeDNSRequestJSON(status=status, time=getTime(3), recordType=recordType, transactionID=transactionID,\n srcIP=addr[0], srcPort=str(addr[1]), domain=domain, mode='check')\n else:\n storeDNSRequestJSON(status=status, time=getTime(3), recordType=recordType, transactionID=transactionID,\n srcIP=addr[0], srcPort=str(addr[1]), domain=domain)\n\n DNSQuestion = buildQuestion(domainName, recordType)\n if DEBUG is True:\n print('DNSQuestion: ' + str(DNSQuestion))\n\n # ** DNS Body\n # ** DNS Body\n\n DNSBody = b''\n\n for record in records:\n DNSBody += recordToBytes(domainName, recordType, record['ttl'], record['value'])\n\n if DEBUG is True:\n print(DNSBody)\n\n return DNSHeader + DNSQuestion + DNSBody\n\n\ndef getResponse(data, addr, case_sensitive=True):\n '''\n Build a DNS response.\n '''\n # ** DNS Header\n # Transaction ID\n TransactionID_Byte = data[:2]\n TransactionID = ''\n for byte in TransactionID_Byte:\n TransactionID += hex(byte)[2:]\n if DEBUG is True: # Debug mode only\n print('ID:')\n print(TransactionID)\n\n # FLAGS\n Flags = getFlags(data[2:4])\n if DEBUG is True: # Debug mode only\n print(Flags)\n\n # Question Count, how many questions in the zone file\n QDCOUNT = RECORD_TYPES.A.value # b'\\x00\\x01' # dns has one question\n\n records, recordType, domainName, recStatus = getRecs(data[12:])\n\n # Answer Count\n # ANCOUNT = len(getRecs(data[12:])[0]).to_bytes(2, byteorder='big') # 12 bytes to skip the header\n ANCOUNT = len(records).to_bytes(2, byteorder='big') # 12 bytes to skip the header\n\n # Name server nodeCount\n NSCOUNT = (0).to_bytes(2, byteorder='big')\n\n # Additional nodeCount\n ARCOUNT = (0).to_bytes(2, byteorder='big')\n\n Forged = True\n if Forged is True:\n pass\n #TransactionID_Byte = BruteFouceTransactionID(TransactionID_Byte)\n\n DNSHeader = TransactionID_Byte + Flags + QDCOUNT + ANCOUNT + NSCOUNT + ARCOUNT\n if DEBUG is True:\n dnsH = ''\n print('DNS HEADER: ' + str(DNSHeader))\n for byte in DNSHeader:\n dnsH += hex(byte)[2:]\n print(dnsH)\n\n # ********************************** DNS Question\n\n # records, recordType, domainName = getRecs(data[12:])\n\n global COUNTER\n COUNTER += 1\n transactionID = str(int(TransactionID, 16))\n domain = '.'.join(map(str, domainName))[:-1]\n status = 'Okay'\n\n if case_sensitive is True:\n domainName = getLetterCaseSawped(domainName)\n modifiedDomain = '.'.join(map(str, domainName))[:-1]\n if recStatus == 'ERROR': # TODO: need to handle the exception in better way\n log_incoming(str(\n COUNTER) + ': ** ERROR ** : RecordType: ' + recordType + ' | RequestId: ' + transactionID + ' | SrcIP: ' +\n addr[0] + ' | SrcPort: ' + str(\n addr[1]) + ' | Domain: ' + domain + ' | Modified Domain: ' + modifiedDomain)\n status = 'ERROR'\n print(term.format(str(\n COUNTER) + ': ' + status + ' - RecordType: ' + recordType + ' - RequestId: ' + transactionID + ' From: IP ' +\n addr[0] + ' : Port: ' + str(\n addr[1]) + ' - Domain : ' + domain + ' | Modified Domain: ' + modifiedDomain + '\\n',\n term.Color.RED))\n else:\n log_incoming(\n str(COUNTER) + ': RecordType: ' + recordType + ' | RequestId: ' + transactionID + ' | SrcIP: ' + addr[\n 0] + ' | SrcPort: ' + str(\n addr[1]) + ' | Domain : ' + domain + ' | Modified Domain: ' + modifiedDomain)\n status = 'OKAY'\n print(term.format(str(\n COUNTER) + ': ' + status + ' - RecordType: ' + recordType + ' - RequestId: ' + transactionID + ' From: IP ' +\n addr[0] + ' : Port: ' + str(\n addr[1]) + ' - Domain : ' + domain + ' | Modified Domain: ' + modifiedDomain + '\\n',\n term.Color.GREEN))\n\n if 'Check_' in domain:\n storeDNSRequestJSON(status=status, time=getTime(3), recordType=recordType, transactionID=transactionID,\n srcIP=addr[0], srcPort=str(addr[1]), domain=domain, modifiedDomain=modifiedDomain, mode = 'check')\n else:\n storeDNSRequestJSON(status=status, time=getTime(3), recordType=recordType, transactionID=transactionID,\n srcIP=addr[0], srcPort=str(addr[1]), domain=domain, modifiedDomain=modifiedDomain)\n\n else:\n if recStatus == 'ERROR': # TODO: need to handle the exception in better way\n log_incoming(str(\n COUNTER) + ': ** ERROR ** : RecordType: ' + recordType + ' | RequestId: ' + transactionID + ' | SrcIP: ' +\n addr[0] + ' | SrcPort: ' + str(addr[1]) + ' | Domain: ' + domain)\n status = 'ERROR'\n print(term.format(str(\n COUNTER) + ': ' + status + ' - RecordType: ' + recordType + ' - RequestId: ' + transactionID + ' From: IP ' +\n addr[0] + ' : Port: ' + str(addr[1]) + ' - Domain : ' + domain + '\\n', term.Color.RED))\n else:\n log_incoming(\n str(COUNTER) + ': RecordType: ' + recordType + ' | RequestId: ' + transactionID + ' | SrcIP: ' + addr[\n 0] + ' | SrcPort: ' + str(addr[1]) + ' | Domain : ' + domain)\n status = 'OKAY'\n print(term.format(str(\n COUNTER) + ': ' + status + ' - RecordType: ' + recordType + ' - RequestId: ' + transactionID + ' From: IP ' +\n addr[0] + ' : Port: ' + str(addr[1]) + ' - Domain : ' + domain + '\\n',\n term.Color.GREEN))\n if 'Check_' in domain:\n storeDNSRequestJSON(status=status, time=getTime(3), recordType=recordType, transactionID=transactionID,\n srcIP=addr[0], srcPort=str(addr[1]), domain=domain, mode='check')\n else:\n storeDNSRequestJSON(status=status, time=getTime(3), recordType=recordType, transactionID=transactionID,\n srcIP=addr[0], srcPort=str(addr[1]), domain=domain)\n\n DNSQuestion = buildQuestion(domainName, recordType)\n if DEBUG is True:\n print('DNSQuestion: ' + str(DNSQuestion))\n\n DNSBody = b''\n\n for record in records:\n DNSBody += recordToBytes(domainName, recordType, record['ttl'], record['value'])\n\n if DEBUG is True:\n print(DNSBody)\n\n return DNSHeader + DNSQuestion + DNSBody\n\n\ndef printOnScreenAlways(msg, color=term.Color.WHITE):\n print(term.format(msg, color))\n\n\ndef printLogo():\n try:\n print(term.format(('\\n Starting Mini DNS Server.. v%s \\n' % VERSION),\n term.Color.YELLOW))\n with open('Logo/logo.txt', 'r') as f:\n lineArr = f.read()\n print(term.format(lineArr, term.Color.GREEN))\n with open('Logo/logo2.txt', 'r') as f:\n lineArr = f.read()\n print(term.format(lineArr, term.Color.RED))\n\n except Exception as ex:\n log_incoming('ERROR: printLogo - ' + str(ex))\n\n\ndef killProcess(port):\n try:\n os.system('freeport %s' % port)\n\n except Exception as ex:\n log_incoming(str(ex))\n\n\n\n# </editor-fold>\n\n#\ndef generateResponseWithRequestId(response,sock,addr):\n '''\n Generate Request Id.\n '''\n try:\n r = 1\n while r <= 1:\n print(\"Round: \" + str(r))\n requestIds = []\n requestIds = [random.randint(1, 65536) for i in range(10000)]\n requestIds.sort()\n index = 0\n for requestId in requestIds: #range (1, 10000): # 1000 time should be enoght\n\n index+=1\n print('R: '+str(r)+' - '+str(index) +'- Transaction ID: ' + str(requestId))\n TransactionID_Byte = (requestId).to_bytes(2, byteorder='big')\n response = TransactionID_Byte + response\n sock.sendto(response, addr)\n r = r+1\n\n except Exception as ex:\n print(ex)\n\n#\ndef generateResponseWithPortNumber(response,sock,addr):\n '''\n Generate Port Number.\n '''\n try:\n portNumbers = []\n portNumbers = [random.randint(1, 65536) for i in range(10000)]\n portNumbers.sort()\n index=0\n for portNumber in portNumbers: # range (1, 10000): # 1000 time should be enoght\n index += 1\n print(str(index) +'- Port ' + str(portNumber))\n lst = list(addr)\n lst[1] = portNumber\n addr = tuple(lst)\n sock.sendto(response, addr)\n\n except Exception as ex:\n print(ex)\n\n\ndef main(argv, IP):\n # gather Zone info and store it into memory\n global ZoneDATA\n ZoneDATA = loadZone()\n print(\"\\n **Zone file has been loaded**\")\n\n case_sensitive = False\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n opts = argv\n\n if opts[1] == '-s':\n sock.bind((IP, PORT))\n if opts[2] == '-mcase':\n case_sensitive = True\n print(\"\\n Host: %s | Port: %s \\n\" % (IP, PORT))\n elif opts == '-l' or opts == '':\n sock.bind((IP_ADDRESS_LOCAL, PORT))\n print(\"\\n Host: %s | Port: %s \\n\" % (IP_ADDRESS_LOCAL, PORT))\n\n try:\n # keep listening\n while 1:\n data, addr = sock.recvfrom(512)\n response = getForgedResponse(data, addr, case_sensitive) # forge response without request ID, later we forge the ID and combine it with the whole response\n generateResponseWithRequestId(response,sock, addr) # brute force # we get the response once without Tre_id\n\n except Exception as ex:\n log_incoming('ERROR: main ' + str(ex))\n printOnScreenAlways(\"\\nERROR: Terminated!!! :\" + str(ex), term.Color.RED)\n\n\ndef main_test():\n # gather Zone info and store it into memory\n global ZoneDATA\n ZoneDATA = loadZone()\n\n print(\"testing .... \")\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((IP_ADDRESS_LOCAL, PORT))\n\n print(\"Host: %s | Port: %s \" % (IP_ADDRESS_LOCAL, PORT))\n # open socket and\n\n # keep listening\n while 1:\n data, addr = sock.recvfrom(512)\n\n if FixRequestId is True: ## try all the possible Port Number 1 to 65556\n response = getResponse(data, addr) # we get the correct response.\n generateResponseWithPortNumber(response, sock, addr) # brute force all the possible port number\n\n elif FixPort is True: ## try all the possible request IDs 1 to 65556\n response = getForgedResponse(data, addr, True) # forge response without request ID, later we forge the ID and combine it with the whole response\n generateResponseWithRequestId(response, sock, addr) # brute force # we get the response once without Tre_id\n\ndef main_test_local():\n '''\n gather Zone info and store it into memory\n '''\n global ZoneDATA\n ZoneDATA = loadZone()\n print(\"Testing .... \")\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((IP_ADDRESS_LOCAL, PORT))\n print(\"\\n Host: %s | Port: %s \" % (IP_ADDRESS_LOCAL, PORT))\n\n # testing\n BYTES = b'\\\\$\\x00\\x10\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x01\\x02ns\\x0cdnStEstSuITE\\x05SpACe\\x00\\x00\\x1c\\x00\\x01\\x00\\x00)\\x10\\x00\\x00\\x00\\x80\\x00\\x00\\x00'\n response = getResponse(BYTES, '127.0.0.2')\n print(\"response:\")\n print(str(response))\n\nif __name__ == '__main__':\n printLogo()\n killProcess(53)\n try: # on the server\n if len(sys.argv) != 1:\n ip = socket.gethostbyname(socket.gethostname())\n main(sys.argv[1:], ip)\n else:\n print('ERROR: argv....')\n main_test()\n \n except Exception as ex: # locally\n print('ERROR: argv....')\n print(ex)\n main_test()\n", "id": "760988", "language": "Python", "matching_score": 9.39214038848877, "max_stars_count": 2, "path": "DNS/DNSForged.py" }, { "content": "#! /usr/bin/env python3\n\n'''\n This file contains functions to DNS server to complete its tasks.\n'''\n\nimport datetime\nimport os\nimport json\nimport glob\nimport random\nimport logging\nimport logging.config\nimport traceback\n\nfrom stem.util import term\nfrom enum import Enum\n\nfrom Helper.Helper import Helper\nfrom Helper.Helper import MSG_TYPES\nfrom Helper.Helper import LogData\nfrom Helper.Helper import TIME_FORMAT\n\n\nJSON_REQUESTS_PATH = 'JSON/NormalRequests/NormalDNSRequestNodes'\nJSON_REQUESTS_PATH_CHECK = 'JSON/CheckingRequests/CheckingDNSRequestNodes' # store all the sendRequests about checkoing if the dns supports 0x20 code\nERRORS_LOG_PATH = 'Logs/Errors/'\nFORCE_NOT_RESPONSE_MEG = 'tor_dont_response' # if the sendRequests contains this in the sub-domain, DNS will not response to it\n\nDEBUG = False\nCOUNTER = 0\n\n\n#<editor-fold desc=\"******************* General Tools *******************\">\n\nclass RECORD_TYPES(Enum):\n A = b'\\x00\\x01' # specifies IP4 Address\n CNAME = b'\\x00\\x05' # aliases\n MX = b'\\x00\\x0f' # mail exchange server for DNS\n NS = b'\\x00\\x02' # authoritative name server\n TXT = b'\\x00\\x10' # arbitrary non-formatted text string.\n AAAA = b'\\x00\\x1c' # specifies IP6 Address\n ANY = b'\\x00\\xff'\n\n\ndef setDebuggingMode(debug):\n DEBUG = debug\n\n#\ndef setAdversaryMode(adversary_mode):\n '''\n to activate the adversary mode.\n '''\n\n ADVERSARY_MODE = adversary_mode\n\n#\ndef loggingData(value):\n file = LogData(filename='incoming_request', mode='out')\n file.wirteIntoFile(value)\n\n# Log all the incoming DNS sendRequests and return the logged row as string\ndef logDNSRequest(counter,status, recordType, requestId, srcIP, srcPort, domain, modifiedDomain='', mode='none'):\n '''\n Logging all the DNS requests.\n '''\n\n date = Helper.getTime(TIME_FORMAT.FULL)\n printedRow = ''\n\n if status =='ERROR':\n if modifiedDomain == '':\n printedRow = ('%s - %d: ** ERROR ** : | RecordType: %s | RequestId: %s | SrcIP: %s | SrcPort: %d | Domain: %s ' %\n (date, counter, recordType, requestId, srcIP, srcPort, domain))\n else:\n printedRow = ('%s - %d: ** ERROR ** : | RecordType: %s | RequestId: %s | SrcIP: %s | SrcPort: %d | Domain: %s | ModifiedDomain: %s' %\n (date, counter, recordType, requestId, srcIP, srcPort, domain, modifiedDomain))\n printStatus = MSG_TYPES.ERROR\n\n elif status =='OKAY':\n if modifiedDomain == '':\n printedRow = ('%s - %d: | RecordType: %s | RequestId: %s | SrcIP: %s | SrcPort: %d | Domain: %s ' %\n (date, counter, recordType, requestId, srcIP, srcPort, domain))\n else:\n printedRow = ('%s - %d: | RecordType: %s | RequestId: %s | SrcIP: %s | SrcPort: %d | Domain: %s | ModifiedDomain: %s' %\n (date, counter, recordType, requestId, srcIP, srcPort, domain, modifiedDomain))\n printStatus = MSG_TYPES.RESULT\n\n loggingData(printedRow)\n\n return (printedRow,printStatus)\n\n#\ndef killprocess(port):\n try:\n os.system('freeport %s' % port)\n\n except Exception as ex:\n logging.error('DNSFunctions - killprocess: %s' % ex)\n\n#\ndef printLogo(version, modifyDate):\n try:\n print(term.format(('\\n Starting Mini DNS Server.. v%s - Last modified: %s' % (version, modifyDate)), term.Color.YELLOW))\n with open('Logo/logo.txt', 'r') as f:\n lineArr = f.read()\n print(term.format(str(lineArr), term.Color.GREEN))\n with open('Logo/logo2.txt', 'r') as f:\n lineArr = f.read()\n print(term.format(str(lineArr), term.Color.RED))\n\n except Exception as ex:\n logging.error('printLogo - ' + str(ex))\n\ndef printDebugMode(values):\n if DEBUG is True: # Debug mode only\n for string in values:\n print(string)\n\n#\ndef makeDirectories():\n '''\n make the directories in case they are missing.\n '''\n\n try:\n if not os.path.exists('JSON'):\n os.makedirs('JSON')\n os.makedirs('JSON/CheckingRequests')\n os.makedirs('JSON/NormalRequests')\n else:\n if not os.path.exists('JSON/CheckingRequests'):\n os.makedirs('JSON/CheckingRequests')\n if not os.path.exists('JSON/NormalRequests'):\n os.makedirs('JSON/NormalRequests')\n\n if not os.path.exists('Logs'):\n os.makedirs('Logs')\n os.makedirs('Logs/Errors')\n\n if not os.path.exists('Logs/Errors'):\n os.makedirs('Logs/Errors')\n\n except Exception as ex:\n Helper.printOnScreenAlways(ex, term.Color.RED)\n logging.error('DNSFunctions - makeDirectories: %s' % traceback.format_exc())\n\n#\ndef int_to_hex(value, zfill=None):\n h = hex(value) # 300 -> '0x12c'\n h = h[2:].zfill((zfill or 0) * 2) # '0x12c' -> '00012c' if zfill=3\n\n return h.decode('hex')\n\n#\ndef bin_to_hex(value):\n # http://stackoverflow.com/questions/2072351/python-conversion-from-binary-string-to-hexadecimal/2072384#2072384\n # '0000 0100 1000 1101' -> '\\x04\\x8d'\n value = value.replace(' ', '')\n h = '%0*X' % ((len(value) + 3) // 4, int(value, 2))\n\n return h.decode('hex')\n\n# TODO: need to implement a class\ndef storeDNSRequestJSON(status, time, recordType, transactionID, srcIP, srcPort, domain, modifiedDomain='none', mode='none'):\n \"\"\"Help for the bar method of Foo classes\"\"\"\n date = Helper.getTime(TIME_FORMAT.DATE)\n pathDirt = ''\n if mode == 'check':\n path = JSON_REQUESTS_PATH_CHECK\n else:\n # TODO: need refactoring - make it more abstract\n path = JSON_REQUESTS_PATH\n\n pathFile = ('%s_%s.json' % (path,date))\n\n jsons = {}\n\n if (os.path.exists(pathFile)) != True: # check if the file exist, if not create it.\n with open(pathFile, 'w+') as jsonfile: # not exist\n json.dump(' ', jsonfile)\n else:\n try:\n with open(pathFile, 'r') as jsonfile:\n jsons = json.load(jsonfile)\n\n except ValueError as er:\n logging.error('DNSFunction - storeDNSRequestJSON - JSON invalid - file: %s : %s' % (path,str(er)))\n os.rename(pathFile, ('%s_%s_error_%d.json' % (path,date,random.randint(1,50))))\n\n with open(pathFile, 'a+') as jsonfile:\n json.dump(' ', jsonfile)\n\n if domain[-1:] == '.':\n domain = domain[:-1]\n\n with open(pathFile,'w') as jsonfile:\n DNSRequestNodes = {\n 'Request': {\n 'ID': str(len(jsons) + 1),\n 'Time': time,\n 'Status': status,\n 'TransactionID':transactionID,\n 'RecordType':recordType,\n 'SrcIP': srcIP,\n 'SrcPort': srcPort,\n 'Domain': domain,\n 'modifiedDomain' : modifiedDomain,\n }\n }\n\n jsons[ str(len(jsons)+1)] = DNSRequestNodes\n # Write into Json file\n json.dump(jsons, jsonfile)\n\n# TODO: need to handle storing json in a better way\ndef storeDNSRequestJSONText(status, time, recordType, transactionID, srcIP, srcPort, domain, modifiedDomain='none', mode='none'):\n '''\n Logging all the DNS request in json format\n '''\n\n date = Helper.getTime(TIME_FORMAT.DATE)\n\n if mode == 'check':\n file = JSON_REQUESTS_PATH_CHECK + '_' + date + '.json'\n else:\n # TODO: need refactoring - make it more abstract\n file = JSON_REQUESTS_PATH + '_' + date + '.json'\n\n jsons = {}\n\n if (os.path.exists(file)) != True: # check if the file exist, if not create it.\n with open(file, 'w+') as jsonfile:\n json.dump(' ', jsonfile)\n else:\n with open(file, 'r') as jsonfile:\n jsons = json.load(jsonfile)\n\n if domain[-1:] == '.':\n domain = domain[:-1]\n\n row ='\"%d\": { \"Request\" : {' \\\n '\"ID\" : str(len(jsons) + 1),\"Time\": %s,' \\\n '\"Status\": %s,' \\\n '\"TransactionID\": %s,' \\\n '\"RecordType\": %s,' \\\n '\"SrcIP\": %s,' \\\n '\"SrcPort\": %s,' \\\n '\"Domain\": %s,' \\\n '\"modifiedDomain\": %s } },' %(id, time, status, transactionID, recordType, srcIP, srcPort, domain, modifiedDomain)\n\n with open(file,'w') as jsonfile:\n DNSRequestNodes = {\n 'Request': {\n 'ID': str(len(jsons) + 1),\n 'Time': time,\n 'Status': status,\n 'TransactionID':transactionID,\n 'RecordType':recordType,\n 'SrcIP': srcIP,\n 'SrcPort': srcPort,\n 'Domain': domain,\n 'modifiedDomain' : modifiedDomain,\n }\n }\n\n jsons[ str(len(jsons)+1)] = DNSRequestNodes\n # Write into Json file\n json.dump(jsons, jsonfile)\n\n# </editor-fold>\n\n#<editor-fold desc=\"******************* Zone File *******************\">\n\n#\ndef loadRealZone():\n '''\n load all zones that we have when the DNS server starts up, and put them into memory\n '''\n\n global ZONEDATA\n jsonZone = {} # dictionary\n zFile = 'Zones/RealZone.zone'\n printDebugMode(zFile) # Debug\n with open(zFile) as zonedata:\n data = json.load(zonedata)\n zoneName = data['$origin']\n jsonZone[zoneName] = data\n\n ZONEDATA = jsonZone\n Helper.printOnScreenAlways(\"\\n =-----------------**Zone file has been loaded**------------------=\\n\",MSG_TYPES.RESULT)\n\ndef loadFakeZone():\n '''\n load all the fake zones that we have when the DNS server starts up, and put them into memory.\n '''\n global FAKEZONEDATA\n jsonZone = {} # dictionary\n zFile = 'Zones/FakeZone.zone'\n printDebugMode(zFile) # Debug\n with open(zFile) as zonedata:\n data = json.load(zonedata)\n zoneName = data['$origin']\n jsonZone[zoneName] = data\n FAKEZONEDATA = jsonZone\n Helper.printOnScreenAlways(\" =--------------**Fake Zone file has been loaded**--------------=\",MSG_TYPES.RESULT)\n\n#\ndef getZone(domain):\n '''\n get zone and domain name.\n '''\n global ZONEDATA\n try:\n zoneName = '.'.join(domain[-3:]).lower()\n return ZONEDATA[zoneName]\n except Exception as ex:\n logging.error('DNSFunctions - getZone: \\n%s ' % traceback.format_exc())\n return ''\n\ndef getFakeZone(domain):\n global FAKEZONEDATA\n try:\n zoneName = '.'.join(domain[-3:]).lower()\n return FAKEZONEDATA[zoneName]\n except Exception as ex:\n logging.error('DNSFunctions - getZone: \\n%s ' % traceback.format_exc())\n return ''\n# </editor-fold>\n\n#<editor-fold desc=\"******************* DNS Tools/Rspoonse *******************\">\n#\ndef getFlags(flags):\n\n response_Flag = ''\n\n # First byte contains: QR: 1 bit | Opcode: 4 bits | AA: 1 bit | TC: 1 bit |RD: 1 bit\n byte1 = bytes(flags[:1])\n\n # Second byte contains: RA: 1 bit | Z: 3 bits | RCODE: 4 bit\n byte2 = bytes(flags[1:2])\n\n QR = '1' # QR: indicates whether the packet is a sendRequests (0) or a response (1).\n\n # OPCODE\n OPCODE = ''\n for bit in range(1, 5):\n OPCODE += str(ord(byte1) & (1 << bit)) # to get option 1/0\n\n # Authoritative Answer\n AA = '1' # Always 1\n # TrunCation\n TC = '0' # 0 because we always dealing with a short message\n # Recursion Desired\n RD = '0' # 0 if it is not supported recurring\n # Recursion Available\n RA = '0'\n # Reserved for future use. Must be zeros in all queries and responses.\n Z = '000'\n # Response code\n RCODE = '0000'\n ('DNSFunctions - getFlags: OPCODE:%s\\n %s ' % (str(OPCODE), traceback.format_exc()))\n try:\n response_Flag = int(QR + OPCODE + AA + TC + RD, 2).to_bytes(1, byteorder='big') + int(RA + Z + RCODE).to_bytes(1,byteorder='big')\n exception = False\n\n except Exception as ex:\n TempOPCODE = '0000' # Query\n response_Flag = int(QR + TempOPCODE + AA + TC + RD, 2).to_bytes(1, byteorder='big') + int(RA + Z + RCODE).to_bytes(1,byteorder='big')\n print(response_Flag)\n\n return response_Flag\n\n#\ndef getQuestionDomain(data):\n '''\n .\n '''\n state = 1\n index = 0\n first = True\n domainParts = []\n domainString = ''\n domainTLD = ''\n\n expectedLength = 0\n TotalLength = 0\n parts = 0\n for byte in data:\n if byte == 0:\n break\n if state == 1: # 1 get the domain name\n if first is True: # first byte to get the length for the zone ~ 3 bytes\n first = False\n parts += 1\n expectedLength = byte\n continue\n domainString += chr(byte)\n index += 1\n if index == expectedLength:\n TotalLength += expectedLength\n state = 2\n index = 0\n domainParts.append(domainString)\n domainString = ''\n first = True\n\n elif state == 2: # 2 get the domain zone\n if first is True: # first byte to get the length for the zone ~ 3 bytes\n first = False\n expectedLength = byte\n parts += 1 # how many parts\n continue\n domainString += chr(byte)\n index += 1\n if index == expectedLength:\n TotalLength += expectedLength\n state = 1\n index = 0\n domainParts.append(domainString)\n domainString = ''\n first = True\n\n # get question type\n questionTypeStartingIndex = TotalLength + parts\n questionType = data[questionTypeStartingIndex+1: questionTypeStartingIndex+3]\n\n if DEBUG is True: # Debug mode only\n print('Question Type: ' + str(questionType))\n print('Domain: ' + domainString+'.'+domainTLD)\n\n domainParts.append('')\n\n return (domainParts, questionType)\n\n# TODO : Need to be deleted\ndef getQuestionDomain_temp(data):\n state = 0\n expectedlength = 0\n domainstring = ''\n domainparts = []\n x = 0\n y = 0\n for byte in data:\n if state == 1:\n if byte != 0:\n domainstring += chr(byte)\n x += 1\n if x == expectedlength:\n domainparts.append(domainstring)\n domainstring = ''\n state = 0\n x = 0\n if byte == 0:\n domainparts.append(domainstring)\n break\n else:\n state = 1\n expectedlength = byte # get the lenght for the domain\n y += 1\n\n questiontype = data[y:y + 2]\n return (domainparts, questiontype)\n\n#\ndef getLetterCaseSwapped(dmoainParts):\n newParts = dmoainParts[:-3] # save all the elements but not the last 3 including ''\n dmoainParts = dmoainParts[-3:] # get only last 3 elemnets of the ExitNodelist exmaple.com.\n # modify randomly only in the domain and zone name\n for part in dmoainParts:\n part = \"\".join(random.choice([k.swapcase(), k ]) for k in part )\n newParts.append(part)\n return newParts\n\n#\ndef getRecs(zone,domain, questionType):\n '''\n Get the record resources\n '''\n try:\n qt = ''\n if questionType == RECORD_TYPES.A.value:\n qt = 'A'\n elif questionType == RECORD_TYPES.AAAA.value:\n qt = 'AAAA'\n elif questionType == RECORD_TYPES.CNAME.value:\n qt = 'CNAME'\n elif questionType == RECORD_TYPES.MX.value:\n qt = 'MX'\n elif questionType == RECORD_TYPES.NS.value:\n qt = 'NS'\n elif questionType == RECORD_TYPES.TXT.value:\n qt = 'TXT'\n elif questionType == RECORD_TYPES.ANY.value:\n qt = 'ANY'\n\n if DEBUG is True: # Debug mode only\n print('-------------7')\n print('Question Type: ' + str(qt))\n print('Zone: ' + str(zone[qt]))\n print('-------------5')\n print('Question Type: ' + str(qt))\n print('-------------6')\n\n return (zone[qt], qt, domain, 'OKAY')\n\n except Exception as ex:\n if str(ex) != str(KeyError('AAAA')): # IPv6- if it's IPv6, is it not important\n logging.error('DNSFunctions - getRecs: \\n%s ' % traceback.format_exc())\n\n return ('', qt , domain, 'ERROR')\n\n\n#\ndef buildQuestion(domainName, recordType): # convert str into byte\n '''\n Build the record\n '''\n questionBytes = b''\n for part in domainName:\n length = len(part)\n questionBytes += bytes([length])\n for char in part:\n questionBytes += ord(char).to_bytes(1, byteorder='big')\n\n if recordType == RECORD_TYPES.A.name or recordType == RECORD_TYPES.AAAA.name:\n questionBytes += (1).to_bytes(2, byteorder='big')\n\n questionBytes += (1).to_bytes(2, byteorder='big')\n\n return questionBytes\n\n#\ndef recordToBytes(domainName, recordType, recordTTL, recordValue):\n '''\n .\n '''\n recordBytes = b'\\xc0\\x0c' # Pointer to domain name\n if recordType == RECORD_TYPES.A.name:\n recordBytes = recordBytes + bytes([0]) + bytes([1])\n\n # TODO: need to handle IPv6-AAAA\n elif recordType == RECORD_TYPES.AAAA.name:\n recordBytes = recordBytes + bytes([0]) + bytes([1])\n\n recordBytes = recordBytes + bytes([0]) + bytes([1])\n recordBytes += int(recordTTL).to_bytes(4, byteorder='big')\n if recordType == RECORD_TYPES.A.name or recordType == RECORD_TYPES.AAAA.name:\n recordBytes = recordBytes + bytes([0]) + bytes([4])\n for part in recordValue.split('.'):\n recordBytes += bytes([int(part)])\n\n return recordBytes\n\n#\ndef getResponse(data, addr,case_sensitive = False,adversaryMode=False,withoutRequestId=False ,forceNotResponseMode= False ):\n '''\n Build the DNS Response\n '''\n\n # ********************************** DNS Header\n # Transaction ID\n TransactionID_Byte = data[:2]\n TransactionID = ''\n for byte in TransactionID_Byte:\n TransactionID += hex(byte)[2:]\n if DEBUG is True: # Debug mode only\n print('ID:')\n print(TransactionID)\n\n # FLAGS\n Flags = getFlags(data[2:4])\n if DEBUG is True: # Debug mode only\n print(Flags)\n\n # Question Count, how many questions in the zone file\n QDCOUNT = RECORD_TYPES.A.value #b'\\x00\\x01' # dns has one question\n\n domain, questionType = getQuestionDomain(data[12:])\n if adversaryMode is True: # load the fake zone\n zone = getFakeZone(domain)\n else: #load the real zone\n zone = getZone(domain)\n\n records, recordType, domainName, recStatus = getRecs(zone=zone,domain=domain, questionType=questionType)\n\n # Answer Count\n #ANCOUNT = len(getRecs(data[12:])[0]).to_bytes(2, byteorder='big') # 12 bytes to skip the header\n ANCOUNT = len(records).to_bytes(2, byteorder='big') # 12 bytes to skip the header\n\n # Name server nodeCount\n NSCOUNT = (0).to_bytes(2, byteorder='big')\n\n # Additional nodeCount\n ARCOUNT = (0).to_bytes(2, byteorder='big')\n\n RealDNSHeader_Test = b' ' # for testing\n if withoutRequestId is False:\n DNSHeader = TransactionID_Byte + Flags + QDCOUNT + ANCOUNT + NSCOUNT + ARCOUNT\n else:\n # BUILD THE HEADER WITHOUT THE TRANSACTION ID/REQUEST ID, AFTER FORGE IT, WILL BE ADDED TO THE HEADER\n DNSHeader = Flags + QDCOUNT + ANCOUNT + NSCOUNT + ARCOUNT\n RealDNSHeader_Test = TransactionID_Byte + Flags + QDCOUNT + ANCOUNT + NSCOUNT + ARCOUNT\n\n if DEBUG is True:\n dnsH = ''\n print('DNS HEADER: ' + str(DNSHeader))\n print('DNS HEADER_test: ' + str(RealDNSHeader_Test))\n\n for byte in DNSHeader:\n dnsH += hex(byte)[2:]\n print('DNSHeader:' + dnsH)\n\n # ********************************** DNS Question\n\n #records, recordType, domainName = getRecs(data[12:])\n\n global COUNTER\n COUNTER += 1\n transactionID= str(int(TransactionID,16))\n srcIP = addr[0]\n srcPort = addr[1]\n domain = '.'.join(map(str, domainName))[:-1]\n status = 'Okay'\n\n time = Helper.getTime(TIME_FORMAT.TIME)\n\n # TODO: implement a method that distinguishes sendRequests if they have been called from TORMAPPER\n if case_sensitive is True and 'check_' in domain.lower(): # need to be more dynamic\n modifiedDomain = domain # without permutation\n if 're_check_' not in domain.lower(): # re_check without permutation\n domainName = getLetterCaseSwapped(domainName)\n modifiedDomain = '.'.join(map(str, domainName))[:-1]\n\n printedRow,printStatus = logDNSRequest(counter=COUNTER,status=recStatus, recordType=recordType, requestId=transactionID, srcIP=srcIP, srcPort=srcPort, domain=domain, modifiedDomain=modifiedDomain, mode='none')\n Helper.printOnScreenAlways(printedRow,printStatus)\n\n if 'check_' in domain.lower():\n storeDNSRequestJSON(status=status, time=time,recordType=recordType,transactionID=transactionID, srcIP=addr[0], srcPort=str(addr[1]), domain=domain, modifiedDomain=modifiedDomain,mode='check')\n else:\n storeDNSRequestJSON(status=status, time=time,recordType=recordType,transactionID=transactionID, srcIP=addr[0], srcPort=str(addr[1]), domain=domain, modifiedDomain=modifiedDomain)\n\n else:\n\n printedRow, printStatus = logDNSRequest(counter=COUNTER, status=recStatus, recordType=recordType,\n requestId=transactionID, srcIP=srcIP, srcPort=srcPort, domain=domain,\n mode='none')\n Helper.printOnScreenAlways(printedRow, printStatus)\n\n if 'check_' in domain.lower():\n storeDNSRequestJSON(status=status, time=time,recordType=recordType,transactionID=transactionID, srcIP=addr[0], srcPort=str(addr[1]), domain=domain, mode='check')\n else:\n storeDNSRequestJSON(status=status, time=time, recordType=recordType, transactionID=transactionID,\n srcIP=addr[0], srcPort=str(addr[1]), domain=domain)\n\n DNSQuestion = buildQuestion(domainName, recordType)\n if DEBUG is True:\n print('DNSQuestion: ' + str(DNSQuestion))\n\n response = True\n if forceNotResponseMode:\n if FORCE_NOT_RESPONSE_MEG in domain:\n response = False\n\n\n # ********************************** DNS Body\n DNSBody = b''\n for record in records:\n DNSBody += recordToBytes(domainName, recordType, record['ttl'], record['value'])\n\n if DEBUG is True:\n print('DNSBody: '+str(DNSBody))\n print(str(DNSHeader) + '\\n' + str(DNSQuestion)+'\\n' + str(DNSBody ))\n\n return ((DNSHeader + DNSQuestion + DNSBody), response) #, (RealDNSHeader_Test + DNSQuestion + DNSBody)\n\n\n# </editor-fold>\n\n#<editor-fold desc=\"******************* DNS Forged *******************\">\n#\n# TODO: need to be deleted\ndef getForgedResponse(data, addr, case_sensitive=True):\n # ********************************** DNS Header\n # Transaction ID\n TransactionID_Byte = data[:2]\n TransactionID = ''\n for byte in TransactionID_Byte:\n TransactionID += hex(byte)[2:]\n if DEBUG is True: # Debug mode only\n print('ID:')\n print(TransactionID)\n\n # FLAGS\n Flags = getFlags(data[2:4])\n if DEBUG is True: # Debug mode only\n print(Flags)\n\n # Question Count, how many questions in the zone file\n QDCOUNT = RECORD_TYPES.A.value # b'\\x00\\x01' # dns has one question\n\n records, recordType, domainName, recStatus = getRecs(data[12:])\n\n # Answer Count\n # ANCOUNT = len(getRecs(data[12:])[0]).to_bytes(2, byteorder='big') # 12 bytes to skip the header\n ANCOUNT = len(records).to_bytes(2, byteorder='big') # 12 bytes to skip the header\n\n # Name server nodeCount\n NSCOUNT = (0).to_bytes(2, byteorder='big')\n\n # Additional nodeCount\n ARCOUNT = (0).to_bytes(2, byteorder='big')\n\n #*****\n # BUILD THE HEADER WITHOUT THE TRANSACTION ID/REQUEST ID, AFTER FORGE IT, WILL BE ADDED TO THE HEADER\n #***** DNSHeader = TransactionID_Byte + Flags + QDCOUNT + ANCOUNT + NSCOUNT + ARCOUNT\n DNSHeader = Flags + QDCOUNT + ANCOUNT + NSCOUNT + ARCOUNT\n\n if DEBUG is True:\n dnsH = ''\n print('DNS HEADER: ' + str(DNSHeader))\n for byte in DNSHeader:\n dnsH += hex(byte)[2:]\n print(dnsH)\n\n # ********************************** DNS Question\n\n # records, recordType, domainName = getRecs(data[12:])\n\n global COUNTER\n COUNTER += 1\n transactionID = str(int(TransactionID, 16))\n domain = '.'.join(map(str, domainName))[:-1]\n srcIP = addr[0]\n srcPort = addr[1]\n status = 'Okay'\n\n time = Helper.getTime(TIME_FORMAT.TIME)\n if case_sensitive is True:\n domainName = getLetterCaseSwapped(domainName)\n modifiedDomain = '.'.join(map(str, domainName))[:-1]\n\n printedRow, printStatus = logDNSRequest(counter=COUNTER, status=recStatus, recordType=recordType,\n requestId=transactionID, srcIP=srcIP, srcPort=srcPort, domain=domain,\n modifiedDomain=modifiedDomain, mode='none')\n Helper.printOnScreenAlways(printedRow, printStatus)\n\n if 'check_' in domain.lower():\n storeDNSRequestJSON(status=status, time=time, recordType=recordType, transactionID=transactionID,\n srcIP=addr[0], srcPort=str(addr[1]), domain=domain, modifiedDomain=modifiedDomain, mode='check')\n else:\n storeDNSRequestJSON(status=status, time=time, recordType=recordType, transactionID=transactionID,\n srcIP=addr[0], srcPort=str(addr[1]), domain=domain, modifiedDomain=modifiedDomain)\n else:\n\n printedRow, printStatus = logDNSRequest(counter=COUNTER, status=recStatus, recordType=recordType,\n requestId=transactionID, srcIP=srcIP, srcPort=srcPort, domain=domain,\n mode='none')\n Helper.printOnScreenAlways(printedRow, printStatus)\n\n if 'check_' in domain.lower():\n storeDNSRequestJSON(status=status, time=time, recordType=recordType, transactionID=transactionID,\n srcIP=addr[0], srcPort=str(addr[1]), domain=domain, mode='check')\n else:\n storeDNSRequestJSON(status=status, time=time, recordType=recordType, transactionID=transactionID,\n srcIP=addr[0], srcPort=str(addr[1]), domain=domain)\n\n DNSQuestion = buildQuestion(domainName, recordType)\n if DEBUG is True:\n print('DNSQuestion: ' + str(DNSQuestion))\n\n # DNS Body\n\n DNSBody = b''\n\n for record in records:\n DNSBody += recordToBytes(domainName, recordType, record['ttl'], record['value'])\n\n if DEBUG is True:\n print(DNSBody)\n\n return DNSHeader + DNSQuestion + DNSBody\n\n#\ndef generateResponseWithRequestId(response,sock,addr,times): #,expectedID=0,resp=''):\n '''\n Generate the Request Id\n '''\n try:\n round_ = 1\n while round_ <= 1:\n Helper.printOnScreenAlways(\"Round: \" + str(round_),MSG_TYPES.RESULT)\n requestIds = [random.randint(1, 65536) for i in range(times)]\n requestIds.sort()\n index = 0\n hafltimes= times/2\n for requestId in requestIds: #range (1, 10000): # 1000 time should be enoght\n index+=1\n Helper.printOnScreenAlways(\"R: %d - %d- %d\" % (round_ ,index,requestId) , MSG_TYPES.YELLOW)\n TransactionID_Byte = (requestId).to_bytes(2, byteorder='big')\n finalResponse = TransactionID_Byte + response\n sock.sendto(finalResponse, addr)\n round_ = round_ + 1\n\n except Exception as ex:\n logging.error('DNSFunctions - generateResponseWithRequestId:\\n %s ' % traceback.format_exc())\n\n#\ndef generateResponseWithPortNumber(response,sock,addr,times):\n '''\n Generate the Port Number.\n '''\n\n try:\n portNumbers = [random.randint(1, 65536) for i in range(times)]\n portNumbers.sort()\n round_ = 1\n while round_ <= 1:\n Helper.printOnScreenAlways(\"Round: \" + str(round_), MSG_TYPES.RESULT)\n index=0\n for portNumber in portNumbers: # range (1, 10000): # 1000 time should be enoght\n index += 1\n Helper.printOnScreenAlways(\"R: %d - %d- %d\" % (round_, index, portNumber), MSG_TYPES.YELLOW)\n\n lst = list(addr)\n lst[1] = portNumber\n addr = tuple(lst)\n sock.sendto(response, addr)\n round_ = round_ + 1\n\n except Exception as ex:\n logging.error('DNSFunctions - generateResponseWithPortNumber: \\n %s ' % traceback.format_exc())\n\n# </editor-fold>", "id": "6522406", "language": "Python", "matching_score": 4.968429088592529, "max_stars_count": 2, "path": "DNS/Helper/DNSFunctions.py" }, { "content": "#! /usr/bin/env python3\n\nimport datetime\nimport io\nimport os\nimport logging\n\nfrom enum import Enum\nfrom stem.util import term\n\nERRORS_LOG_PATH = 'Logs/Errors/'\n\nclass ADVERSARY_TASK_MODE(Enum):\n RRANDOMIZE_PORT_NUMBER = 'rport'\n RRANDOMIZE_REQUEST_ID = 'rid'\n RRANDOMIZE_BOTH = 'both' # not implmented yet.\n\n\nclass MODE_TYPES(Enum):\n printing = '-out'\n none = '-none'\n\nclass TIME_FORMAT(Enum):\n FULL = 'full'\n DATE = 'date'\n TIME = 'time'\n\nclass MSG_TYPES(Enum):\n RESULT = term.Color.GREEN\n ERROR = term.Color.RED\n YELLOW = term.Color.YELLOW\n ANY = term.Color.WHITE\n\nclass Helper:\n\n def __init__(self,mode='-none'):\n self.mode = ''\n\n def printOnScreen(msg,color=MSG_TYPES.ANY,mode='-none'):\n if mode == '-out':\n print(term.format(msg,color.value))\n\n def printOnScreenAlways(msg, color=MSG_TYPES.ANY):\n try:\n print(term.format(msg, color.value))\n except:\n print(msg)\n\n def initLogger(level,enableConsole=False):\n\n date = Helper.getTime(TIME_FORMAT.DATE)\n file = (\"%sE-%s.log\" % (ERRORS_LOG_PATH, date))\n # set up logging to file - see previous section for more details\n logging.basicConfig(level=int(level),\n format='%(asctime)-s %(name)-8s %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M',\n filename=file\n )\n # define a Handler which writes INFO messages or higher to the sys.stderr\n console = logging.StreamHandler(stream=None)\n console.setLevel(level)\n\n # set a format which is simpler for console use\n formatter = logging.Formatter('%(asctime)-s %(name)-12s: %(levelname)-8s %(message)s')\n # tell the handler to use this format\n console.setFormatter(formatter)\n if enableConsole is True:\n # add the handler to the root logger\n logger = logging.getLogger('').addHandler(console)\n\n def loggingError(error):\n logging.error(str(error))\n\n def getTime(format=TIME_FORMAT.FULL):\n date = datetime.datetime.now()\n try:\n if format == TIME_FORMAT.FULL: # full\n return (((str(date)).split('.')[0]).split(' ')[1] + ' ' + ((str(date)).split('.')[0]).split(' ')[0])\n if format == TIME_FORMAT.DATE: # date\n return (((str(date)).split('.')[0]).split(' ')[0])\n if format == TIME_FORMAT.TIME: # time\n return (((str(date)).split('.')[0]).split(' ')[1])\n\n except Exception as ex:\n print('Helper - getTime: %s' % ex)\n\n\nclass LogData():\n\n def __init__(self, filename, mode='none'):\n date = Helper.getTime(TIME_FORMAT.DATE)\n fullDate = Helper.getTime(TIME_FORMAT.FULL)\n self.mode = mode\n\n # TODO: need refactoring - make it more abstract\n self.file = 'Logs/' + filename + '_' + date + '_counter+.txt'\n if (os.path.exists(self.file)) != True:\n with open(self.file, 'w+') as file:\n file.write('Start - ' + fullDate + '\\n')\n\n def wirteIntoFile(self, raw):\n if self.mode == 'out':\n data = ''\n\n with open(self.file, 'r') as file:\n data = file.read()\n with open(self.file, 'w+') as file:\n file.write(data)\n file.write(raw + '\\n')\n\n def counter(self):\n pass\n\n", "id": "10561462", "language": "Python", "matching_score": 6.173241138458252, "max_stars_count": 2, "path": "DNS/Helper/Helper.py" }, { "content": "'''\n\nThis file contains functions to DNS server to complete its tasks.\n\n'''\n\n\nimport os\nimport logging\nimport datetime\nimport sys, traceback\n\nERRORS_LOG_PATH = 'Logs/Errors/'\n\nDEBUG = False\nCOUNTER = 0\n\n#<editor-fold desc=\"******************* General Tools *******************\">\ndef ProcesskillForWindows(process_name):\n try:\n killed = os.system('taskkill /f /im ' + process_name)\n\n except Exception as e:\n killed = 0\n\n return killed\n\nclass LogData():\n def __init__(self, filename, mode='none'):\n date = getTime(2)\n self.mode = mode\n # TODO: need refactoring - make it more abstract\n self.file = 'Logs/' + filename + '_' + date + '_counter+.txt'\n if (os.path.exists(self.file)) != True:\n with open(self.file, 'w+') as file:\n file.write('Start - ' + date + '\\n')\n\n def wirteIntoFile(self, raw):\n if self.mode == 'out':\n data = ''\n raw = str(getTime(3)) + ': ' + raw\n with open(self.file, 'r') as file:\n data = file.read()\n with open(self.file, 'w+') as file:\n file.write(data)\n file.write(raw + '\\n')\n\n def counter(self):\n pass\n\ndef initLogger():\n date = getTime(2)\n file = (\"%sE-%s.log\" % (ERRORS_LOG_PATH,date) )\n # set up logging to file - see previous section for more details\n logging.basicConfig(level=logging.ERROR,\n format='%(asctime)-s %(name)-8s %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M',\n filename=file,\n filemode='w')\n # define a Handler which writes INFO messages or higher to the sys.stderr\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # set a format which is simpler for console use\n formatter = logging.Formatter('%(asctime)-s %(name)-12s: %(levelname)-8s %(message)s')\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)\n\ndef loggingError(fuctName,error):\n logging.error('DNSFunction - storeDNSRequestJSON - JSON invalid : %s' % str(error))\n\ndef getTime(opt = 1):\n date = datetime.datetime.now()\n if opt == 1: # full\n return (((str(date)).split('.')[0]).split(' ')[1] + ' ' + ((str(date)).split('.')[0]).split(' ')[0])\n if opt == 2: # date\n return (((str(date)).split('.')[0]).split(' ')[0])\n if opt == 3: # time\n return (((str(date)).split('.')[0]).split(' ')[1])\n\ndef loggingData(value):\n file = LogData(filename='incoming_request', mode='out')\n file.wirteIntoFile(value)\n\n\n# </editor-fold>\n\n#<editor-fold desc=\"******************* Zone File *******************\">\n\n# </editor-fold>\n", "id": "5588090", "language": "Python", "matching_score": 2.6091790199279785, "max_stars_count": 2, "path": "TOR/ConnectionsHandler/TORFunctions.py" }, { "content": "import logging\nimport os\nimport datetime\nfrom colorlog import ColoredFormatter\n\nclass config():\n LOGGER_NAME = \"log\"\n STORE_LOG = True\n\n\nclass Logger(object):\n \"\"\"\n Custom logger\n \"\"\"\n\n\n _logger = None\n\n def __init__(self, logger_name=config.LOGGER_NAME, level=logging.DEBUG, store_flag=False):\n \"\"\"\n Log construct method\n Args:\n logger_name (str): name of the log\n level (int): level_name, default(logging.DEBUG)\n store_flag (bool): write the log into file\n \"\"\"\n\n self._logger = logging.getLogger(logger_name)\n self._logger.setLevel(level)\n\n streamHandler = logging.StreamHandler()\n streamHandler.setFormatter(self.logger_format())\n\n self._logger.addHandler(streamHandler)\n\n # if config.STORE_LOG :\n if store_flag:\n self.__write_into_file(logger_name)\n\n def __write_into_file(self, logger_name):\n \"\"\"\n Writes the loggers into the log dir (all logger will be written in the same location)\n \"\"\"\n now = datetime.datetime.now()\n\n formatter = logging.Formatter(\n \"%(asctime)s \\t [%(levelname)s | %(filename)s:%(lineno)s] > %(message)s\"\n )\n\n # Get the folder path\n dir = os.path.dirname(os.path.abspath(__file__))\n\n dirname = \"log\"\n dir_path = os.path.join(dir, dirname)\n if not os.path.isdir(dir_path):\n os.mkdir(dir_path)\n\n fileHandler = logging.FileHandler(\n dir_path\n + \"/log_\"\n + \"_\"\n + logger_name\n + \"_\"\n + now.strftime(\"%Y-%m-%d\")\n + \".log\"\n )\n\n fileHandler.setFormatter(formatter)\n\n self._logger.addHandler(fileHandler)\n\n def logger_format(self):\n \"\"\"\n Format the logger stout\n \"\"\"\n\n # Style the logger\n format_str = (\n \"%(asctime)s \\t [%(levelname)s | %(filename)s:%(lineno)s] > %(message)s\"\n )\n\n date_format = \"%d/%m/%Y %H:%M:%S\"\n\n # Colors for logger different output\n log_colors = {\n \"DEBUG\": \"cyan\",\n \"INFO\": \"blue\",\n \"WARNING\": \"green\",\n \"ERROR\": \"yellow\",\n \"CRITICAL\": \"bold_red,bg_white\",\n }\n\n c_format = \"%(log_color)s\" + format_str\n\n colored_format = ColoredFormatter(\n c_format, reset=True, log_colors=log_colors, datefmt=date_format\n )\n\n return colored_format\n\n def get_logger(self):\n return self._logger\n\n\n# Method is used to get the default logger\ndef get_default_logger():\n \"\"\"\n Gets the default logger\n \"\"\"\n return Logger.__call__().get_logger()\n\n\n# For testing\nif __name__ == \"__main__\":\n logger = Logger('234123').get_logger()\n logger.info(\"testing, info\")\n logger.warning(\"testing, info\")\n logger.debug(\"testing, info\")\n # logger.error(\"testing, error\", True)\n logger.critical(\"testing, critical\")\n # logger.critical(\"testing, critical, true\", True)\n", "id": "7184471", "language": "Python", "matching_score": 2.604640483856201, "max_stars_count": 0, "path": "spiderlib/logging/_logger.py" }, { "content": "from ._logger import get_default_logger\nlogger = get_default_logger()\n\nfrom ._logger import Logger as NewLogger\n\nimport logging\n\n\n# TODO: Add the level names in the logger class\n# from logging import _levelToName\n# logger_levels = _levelToName", "id": "10442232", "language": "Python", "matching_score": 0.08922611176967621, "max_stars_count": 0, "path": "spiderlib/logging/__init__.py" }, { "content": "import os\n\n\nclass Config(object):\n # shard configurations will be here\n pass\n\n\nclass ProductionConfig(Config):\n\n run_with_scheduler = os.getenv(\"RUN_WITH_SCHEDULER\", True)\n spider_name = os.getenv(\"SPIDER_NAME\", None)\n\n POSTGRES_CONN = {\n \"POSTGRES_URL\": \"postgres\",\n \"POSTGRES_USER\": \"postgres\",\n \"POSTGRES_PW\": \"<PASSWORD>\",\n \"POSTGRES_DB\": \"postgres\",\n }\n\n\nclass DevelopmentConfig(Config):\n\n run_with_scheduler = os.getenv(\"RUN_WITH_SCHEDULER\", True)\n spider_name = os.getenv(\"SPIDER_NAME\", None)\n\n POSTGRES_CONN = {\n \"POSTGRES_URL\": \"localhost:54320\",\n \"POSTGRES_USER\": \"postgres\",\n \"POSTGRES_PW\": \"<PASSWORD>\",\n \"POSTGRES_DB\": \"postgres\",\n }\n\n\ndef get_config(testing=False):\n spiders_evn = os.getenv(\"SPIDERS_EVN\", \"false\")\n\n if not spiders_evn:\n print(\"No value set for SPIDERS_EVN, Development config loaded\")\n return ProductionConfig()\n elif spiders_evn == \"production\":\n print(\"Production config loaded\")\n return ProductionConfig()\n elif spiders_evn == \"development\":\n print(\"Development config loaded\")\n return DevelopmentConfig()\n else:\n print(\"No value match for SPIDERS_EVN, Production config loaded\")\n return ProductionConfig()\n", "id": "2634338", "language": "Python", "matching_score": 2.992011308670044, "max_stars_count": 0, "path": "legalist_spider/config.py" }, { "content": "from spiderlib.db.database import Database\n\n# For testing purposes\nPOSTGRES_CONN = {\n \"POSTGRES_URL\": \"localhost:54320\",\n \"POSTGRES_USER\": \"postgres\",\n \"POSTGRES_PW\": \"<PASSWORD>\",\n \"POSTGRES_DB\": \"postgres\"\n}\n\nif __name__ == '__main__':\n db = Database(**POSTGRES_CONN)\n print(POSTGRES_CONN)\n db._recreate_database()\n", "id": "53690", "language": "Python", "matching_score": 0.07394026219844818, "max_stars_count": 0, "path": "spiderlib/db/run.py" }, { "content": "import os\nfrom flask import Flask, request, redirect,url_for\nfrom flask_sqlalchemy import SQLAlchemy\nfrom config import basedir\n\napp = Flask(__name__, static_url_path='/static')\napp.config.from_object('config')\ndb=SQLAlchemy(app)\n\n\nfrom app import views, models\n\n", "id": "2088365", "language": "Python", "matching_score": 0.7271507382392883, "max_stars_count": 2, "path": "WebServer/app/__init__.py" }, { "content": "WTF_CSRF_ENABLED = True\nSECRET_KEY = 'bsajdfdsf$£^%$dsjkioe4895ry48fdf;E\"RFE\"RWE\"\"'\n\nimport os\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'project.sqlite')\nSQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')\nSQLALCHEMY_TRACK_MODIFICATIONS = True\n", "id": "10225570", "language": "Python", "matching_score": 0.40102672576904297, "max_stars_count": 2, "path": "WebServer/config.py" }, { "content": "#! /usr/bin/env python3\n\n'''\n This to gather all the files and put them together in one folder\n'''\n\nimport os\nimport shutil\n\nfrom TOR.Helper.Helper import Helper\nfrom TOR.Helper.Helper import MSG_TYPES\n\n#\nclass MoveFiles():\n def __init__(self, dnsPath='none', webPath='none'):\n self.mode = ''\n self.SOURCE_PATH = 'FetchFiles'\n self.DESTINATION_PATH = 'GatheredFiles/'\n\n def findAllDNSFiles(self, folder: object = 'Logs') -> object:\n RootPath = '\\\\'.join(os.path.dirname(os.path.abspath(__file__)).split('\\\\')[:-1])\n newDestinationPath = os.path.join(RootPath, self.DESTINATION_PATH)\n\n # find all the logs folders for DNS\n files = [os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(self.SOURCE_PATH)) for f in fn]\n\n if folder =='Logs':\n newDestinationPath = os.path.join(newDestinationPath, \"Logs\")\n # find all the logs files for DNS\n filteredLogFiles = [filePath for filePath in files if\n \"logs\" in filePath.lower() and \"error\" not in filePath.lower() and \"DNS\" in filePath]\n elif folder == 'JSON': # TODO: need some more work\n newDestinationPath = os.path.join(newDestinationPath, \"JSON\")\n # find all the logs files for DNS\n filteredLogFiles = [filePath for filePath in files if\n \"JSON\" in filePath.lower() and \"error\" not in filePath.lower() and \"DNS\" in filePath and \"Checking\" in filePath]\n\n count = 0\n for filePath in filteredLogFiles:\n try:\n shutil.move(filePath, newDestinationPath)\n count += 1\n except shutil.Error as e:\n pass\n # os.rename(filePath, ('v%s _' % index) + filePath)\n # shutil.move(filePath, newDestinationPath)\n Helper.printOnScreenAlways('%d files has been moved.' % count, MSG_TYPES.RESULT)\n\n def findAllWebFiles(self, folder='Logs'):\n RootPath = '\\\\'.join(os.path.dirname(os.path.abspath(__file__)).split('\\\\')[:-1])\n newDestinationPath = os.path.join(RootPath, self.DESTINATION_PATH)\n\n # find all the logs folders for DNS\n files = [os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(self.SOURCE_PATH)) for f in fn]\n\n if folder == 'Logs':\n newDestinationPath = os.path.join(newDestinationPath, \"Logs\")\n # find all the logs files for DNS\n filteredLogFiles = [filePath for filePath in files if\n \"logs\" in filePath.lower() and \"error\" not in filePath.lower() and \"WEB\" in filePath]\n elif folder == 'JSON': # TODO: need some more work\n newDestinationPath = os.path.join(newDestinationPath, \"JSON\")\n # find all the logs files for DNS\n filteredLogFiles = [filePath for filePath in files if\n \"JSON\" in filePath.lower() and \"DNS\" in filePath and \"Checking\" in filePath]\n\n count = 0\n for filePath in filteredLogFiles:\n try:\n shutil.move(filePath, newDestinationPath)\n count += 1\n\n except shutil.Error as e:\n pass\n\n Helper.printOnScreenAlways('%d files has been moved.' % count, MSG_TYPES.RESULT)\n\nif __name__ == '__main__': # for debugging purpose\n movefiles = MoveFiles()\n movefiles.findAllDNSFiles(folder='Logs') # folder='JSON'\n", "id": "11639062", "language": "Python", "matching_score": 2.8286848068237305, "max_stars_count": 2, "path": "TOR/Helper/MoveFiles.py" }, { "content": "#! /usr/bin/env python3\n\n'''\nTo move the DNS files to DNS server.\nYou MUST have the KEY stored on your PC\n'''\n\nimport sys\nimport datetime\nimport os\n\nfrom enum import Enum\nfrom DNS.Helper.Helper import Helper\nfrom DNS.Helper.Helper import MSG_TYPES\n\nDNS_SERVERIP = '172.16.58.3'\nDNS_SERVER_PATH = 'dns_115_B'\nKEY_PATH = 'C:/pem/DNS_MSc_Thesis_amer.pem'\n\nclass TransferFiles:\n\n def __init__(self):\n self.mode = ''\n self.Key = KEY_PATH\n\n def TransferToDNS(self,folderName='none'):\n try:\n RootPath = '\\\\'.join(os.path.dirname(os.path.abspath(__file__)).split('\\\\')[:-1])\n com = 'scp -r -i %s \"%s\" ubuntu@%s:/home/ubuntu/%s/'\n if folderName == 'none':\n folderName = self.StoreDNSFilesPath\n\n com = (com % (self.Key, RootPath, DNS_SERVERIP,folderName ))\n os.system(com)\n\n except Exception as ex:\n print(ex)\n\n def getdate(self):\n date = datetime.datetime.now()\n date_ = (((str(date)).split('.')[0])).replace(':', '-').replace(' ','_')\n\n return date_\n\n def DeleteTempFiles(self):\n '''\n Delete json and log files\n '''\n pass\n\n def run(self,argv):\n Helper.printOnScreenAlways('Transferring files to DNS server .....', MSG_TYPES.RESULT)\n\n print('path: ' + str(os.chdir(os.path.dirname(os.path.realpath(__file__)))))\n try:\n if len(argv) != 1:\n if argv[1] == '-move': # DNS Server\n if argv.__len__() > 3:\n if argv[2] == '-n':\n self.TransferToDNS(folderName=argv[3])\n Helper.printOnScreenAlways('Transferring files is done', MSG_TYPES.RESULT)\n else:\n Helper.printOnScreenAlways('Missing parameters. Folder Name?! ', MSG_TYPES.ERROR)\n\n except Exception as ex:\n print(ex)\n\nif __name__ == '__main__':\n\n argv = sys.argv\n transfer = TransferFiles()\n\n try:\n if argv.__len__() > 1:\n transfer.run(argv)\n else:\n transfer.run(['', '-move', '-n', DNS_SERVER_PATH])\n\n except Exception as ex:\n print(ex)\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "id": "2181989", "language": "Python", "matching_score": 3.517749309539795, "max_stars_count": 2, "path": "DNS/Helper/TransferFiles.py" }, { "content": "#! /usr/bin/env python3\n\n'''\n This file is to fetch files from and to servers(DNS/WebServer)\n YOU MUST HAVE THE KEY STORE ON YOUR MACHINE.\n'''\n\nimport sys\nimport datetime\nimport os\nfrom enum import Enum\nfrom MoveFiles import MoveFiles\nfrom Helper import Helper\nfrom Helper import MSG_TYPES\n\n\nDNS_SERVERIP = '172.16.17.32'\nWEB_SERVERIP = '192.168.127.12'\nDNS_SERVER_KEY = \"C:/pem/DNS_MSc_Thesis_amer.pem\"\nWEB_SERVER_KEY = \"C:/pem/DNS_MSc_Thesis_amer.pem\"\n\n#\nclass FETCHFROM_OPT(Enum):\n FromDNS = 'scp -r -i scp -r -i %s ' + 'ubuntu@%s' % DNS_SERVERIP + ':/home/ubuntu/%s \"%s\" '\n FromDNSLogs = 'scp -r -i scp -r -i %s ' + 'ubuntu@%s' % DNS_SERVERIP + ':/home/ubuntu/%s/Logs \"%s\" '\n FromDNSJSON = 'scp -r -i scp -r -i %s ' + 'ubuntu@%s' % DNS_SERVERIP + ':/home/ubuntu/%s/JSON \"%s\" '\n FromWebServer = 'scp -r -i %s ' + 'ubuntu@%s' % WEB_SERVERIP + ':/home/ubuntu/%s \"%s\" '\n FromWebServerJOSNs = 'scp -r -i %s ' + 'ubuntu@%s' % WEB_SERVERIP + ':/home/ubuntu/%s/JSON \"%s\" '\n\n#\nclass FETCHFILE_OPT(Enum):\n All = 'all'\n Logs = 'logs'\n JSON = 'json'\n\n#\nclass FetchFiles:\n DnsServerPath = 'dns_114_B'\n WebServerPath = 'web402'\n\n def __init__(self, dnsPath='none',webPath='none'):\n self.mode = ''\n self.Key = DNS_SERVER_KEY\n if dnsPath == 'none':\n self.DnsPath = self.DnsServerPath\n else:\n self.DnsPath = dnsPath\n\n if webPath == 'none':\n self.WebPath = self.WebServerPath\n else:\n self.WebPath = webPath\n\n self.GeneratedDate = self.getdate()\n self.StorePath = \"FetchFiles\"\n self.StoreDNSFolderName = \"DNSServer%s\" % self.GeneratedDate\n self.StoreWebFolderName = \"WEBServer%s\" % self.GeneratedDate\n self.StoreDNSFilesPath = ('%s\\%s' % (self.StorePath, self.StoreDNSFolderName))\n self.StoreWebFilesPath = ('%s\\%s' % (self.StorePath, self.StoreWebFolderName))\n\n # fetch files from our DNS server - all files or just logs.\n def fetchFromDNS(self,folderName='none', mode = FETCHFILE_OPT.Logs.value):\n Helper.printOnScreenAlways('Fetching DNS files....',MSG_TYPES.YELLOW)\n try:\n if folderName == 'none':\n folderName = self.StoreDNSFilesPath\n dict = os.getcwd() + (\"\\%s\" % folderName) # the dirctory where we store the files\n com = ''\n if mode == FETCHFILE_OPT.All.value:\n com = FETCHFROM_OPT.FromDNS.value\n elif mode == FETCHFILE_OPT.Logs.value:\n com = FETCHFROM_OPT.FromDNSLogs.value\n elif mode == FETCHFILE_OPT.JSON.value:\n com = FETCHFROM_OPT.FromDNSJSON.value\n\n com = ( com % (self.Key,self.DnsPath,dict))\n os.system(com)\n Helper.printOnScreenAlways('Fetching file is done.',MSG_TYPES.RESULT)\n except Exception as ex:\n print(ex)\n\n # fetch files from our Web server - all files or just logs.\n def fetchFromServer(self,folderName='none',mode=FETCHFILE_OPT.Logs.value):\n Helper.printOnScreenAlways('Fetching WEb server files....',MSG_TYPES.YELLOW)\n try:\n if folderName == 'none':\n folderName = self.StoreWebFilesPath\n dict = os.getcwd() + (\"\\%s\" % folderName) # the directory where we store the files\n com = ''\n if mode == FETCHFILE_OPT.All.value:\n com = FETCHFROM_OPT.FromWebServer.value\n elif mode == FETCHFILE_OPT.JSON.value:\n com = FETCHFROM_OPT.FromWebServerJOSNs.value\n\n\n com = (com % (self.Key,self.WebPath,dict))\n os.system(com)\n Helper.printOnScreenAlways('Fetching file is done.', MSG_TYPES.RESULT)\n except Exception as ex:\n print(ex)\n\n #\n def getdate(self):\n date = datetime.datetime.now()\n date_ = (((str(date)).split('.')[0])).replace(':', '-').replace(' ','_')\n return date_\n\n # make the directories in case they are missing\n def makeDirectories(self):\n try:\n\n if not os.path.exists(self.StorePath):\n os.makedirs(self.StorePath)\n os.makedirs(self.StoreDNSFilesPath)\n os.makedirs(self.StoreWebFilesPath)\n else:\n if not os.path.exists(self.StoreDNSFilesPath):\n os.makedirs(self.StoreDNSFilesPath)\n\n if not os.path.exists(self.StoreWebFilesPath):\n os.makedirs(self.StoreWebFilesPath)\n\n except Exception as ex:\n print(ex)\n\n #\n def removeEmptyDirectories(self):\n Folders = [x for x in os.listdir('FetchFiles')]\n for folder in Folders:\n os.rmdir(folder)\n\n #\n def run(self,argv):\n self.makeDirectories()\n try:\n if len(argv) != 1:\n if argv[1] == '-dns': # DNS Server\n if argv[2] == '-fetch':\n if argv.__len__() > 3:\n if argv[3] == '-all':\n self.fetchFromDNS(mode=FETCHFILE_OPT.All.value)\n elif argv[3] == '-logs':\n self.fetchFromDNS( mode=FETCHFILE_OPT.Logs.value)\n elif argv[3] == '-json':\n self.fetchFromDNS(mode=FETCHFILE_OPT.JSON.value)\n else:\n self.fetchFromDNS()\n else:\n self.fetchFromDNS() # get all the file by defailt\n elif argv[1] == '-web': # Web Server\n if argv[2] == '-fetch':\n if argv[3] == '-all':\n self.fetchFromServer(mode=FETCHFILE_OPT.All.value)\n elif argv[3] == '-json':\n self.fetchFromServer(mode=FETCHFILE_OPT.JSON.value)\n else:\n self.fetchFromServer()\n else:\n self.fetchFromServer() # get Log files by defailt\n except Exception as ex:\n print(ex)\n\nif __name__ == '__main__': # for debugging purpose\n argv= sys.argv\n fetch = FetchFiles()\n fetch.run(['','-dns','-fetch', '-all'])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "id": "8834978", "language": "Python", "matching_score": 6.8148512840271, "max_stars_count": 2, "path": "TOR/Helper/FetchFiles.py" }, { "content": "#! /usr/bin/env python3\n\n'''\n this file is to move and fetch files from and to servers(DNS/WebServer)\n you have to have the KEY\n'''\n\nimport sys\nimport datetime\nimport os\n\nfrom enum import Enum\n\n#\nclass FETCH_OPT(Enum):\n ToDNS = 'scp -r -i %s %s [email protected]:/home/ubuntu/%s/'\n FromDNSLogs = 'scp -r -i scp -r -i %s [email protected]:/home/ubuntu/%s/Logs \"%s\" '\n ToWebServer = \"\"\n FromWebServerLogs = \"\"\n\n#\nclass FetchFile:\n oDnsPath = 'dns_0994Test'\n def __init__(self, dnsPath='none',webPath='none'):\n self.mode = ''\n self.Key = \"C:/pem/DNS_MSc_Thesis_amer.pem\"\n if dnsPath == 'none':\n self.DnsPath = self.oDnsPath\n else:\n self.DnsPath = dnsPath\n\n self.StorePath = \"FetchFiles\"\n self.StoreDNSFolderName = \"DNSLogs\"\n self.StoreWebFolderName = \"WEBLogs\"\n self.StoreDNSFilesPath = ('%s\\%s' % (self.StorePath, self.StoreDNSFolderName))\n self.StoreWEbFilesPath = ('%s\\%s' % (self.StorePath, self.StoreWebFolderName))\n self.WebPath = webPath\n\n #\n def moveFromDNS(self, dnsPath ,folderName='none'):\n try:\n if folderName == 'none':\n folderName = self.StoreDNSFilesPath\n dict= os.getcwd() + (\"\\%s\" % folderName) # the dirctory where we store the files\n com = ( FETCH_OPT.FromDNSLogs.value % (self.Key,self.DnsPath,dict))\n os.system(com)\n print('Moving file is done.')\n\n except Exception as ex:\n print(ex)\n\n #\n def moveToDNS(self, dnsPath , folderName='none'):\n try:\n if folderName == 'none':\n folderName = self.StoreDNSFilesPath\n dict= os.getcwd() + (\"\\%s\" % folderName) # the dirctory where we store the files\n com = (FETCH_OPT.ToDNS.value % (self.Key,dict,self.DnsPath))\n os.system(com)\n print('Fetching file is done.')\n except Exception as ex:\n print(ex)\n\n #\n def moveToWebServer(self, folderName='web'):\n os.system('scp -r -i C:/pem/DNS_MSc_Thesis_amer.pem C:/TOR_PRJ/TO/web [email protected]:/home/ubuntu/%s/' % folderName)\n\n #\n def getdate(self):\n date = datetime.datetime.now()\n date_ = (((str(date)).split('.')[0])).replace(':', '-').replace(' ','_')\n return date_\n\n #\n def makeDirectories(self):\n '''\n Make the directories in case they are missing\n '''\n try:\n\n if not os.path.exists(self.StorePath):\n os.makedirs(self.StorePath)\n os.makedirs(self.StoreDNSFilesPath)\n os.makedirs(self.StoreWEbFilesPath)\n else:\n if not os.path.exists(self.StoreDNSFilesPath):\n os.makedirs(self.StoreDNSFilesPath)\n\n if not os.path.exists(self.StoreWEbFilesPath):\n os.makedirs(self.StoreWEbFilesPath)\n\n except Exception as ex:\n print(ex)\n\n\n #\n def run(self,argv):\n self.makeDirectories()\n date=self.getdate()\n print('path: ' + str(os.chdir(os.path.dirname(os.path.realpath(__file__)))))\n print('ste:'+ str(os.getcwd()))\n try:\n if len(argv) != 1:\n if argv[1] == '-d': # DNS Server\n if argv[2] == 'to':\n self.moveToDNS(date)\n elif argv[2] == 'from':\n self.moveFromDNS(date)\n elif argv[1] == '-w': # Web Server\n if argv[2] =='to':\n self.moveToWebServer(date)\n elif argv[2] =='from':\n self.moveFromServer(date)\n print('DONE :)')\n\n except Exception as ex:\n print(ex)\n\n\nif __name__ == '__main__':\n argv= sys.argv\n fetch =FetchFile()\n fetch.run(['','-d','to'])\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "id": "5386096", "language": "Python", "matching_score": 6.513518333435059, "max_stars_count": 2, "path": "WebServer/Helper/FetchFiles.py" }, { "content": "#! /usr/bin/env python3\n\n'''\n This file moves file between local PC and DNS Server and Web Server.\n'''\n\n\nimport sys\nimport datetime\nimport os\n\ndef moveToDNS(dic_name='dns'):\n os.system('scp -r -i C:/pem/DNS_MSc_Thesis_amer.pem C:/TOR_PRJ/DNS/TO [email protected]:/home/ubuntu/%s/' % dic_name)\n\ndef moveFromDNS(dic_name='dns_back'):\n #\n dict = 'DNSlogs\\%s' % dic_name\n os.makedirs(dict)\n dict= os.getcwd() + (\"\\%s\" % dict)\n com = ('scp -r -i scp -r -i C:/pem/DNS_MSc_Thesis_amer.pem [email protected]:/home/ubuntu/dns9/Logs \"%s\" ' % dict)\n print(com)\n os.system(com)\n\ndef moveToWebServer(dic_name='web'):\n os.system('scp -r -i C:/pem/DNS_MSc_Thesis_amer.pem C:/TOR_PRJ/TO/web [email protected]:/home/ubuntu/%s/' % dic_name)\n\ndef moveFromServer(dic_name='web_back'):\n os.mkdir('C:/Web/FROM/%s' % dic_name)\n os.system('scp -r -i C:/pem/DNS_MSc_Thesis_amer.pem [email protected]:/home/ubuntu/web/ C:/Web/FROM/%s' % dic_name)\n\ndef getdate():\n date = datetime.datetime.now()\n date_ = (((str(date)).split('.')[0])).replace(':', '-').replace(' ','_')\n return date_\n\n\ndef makeDirectories():\n '''\n Make the directories in case they are missing.\n '''\n try:\n if not os.path.exists('DNSLogs'):\n os.makedirs('DNSLogs')\n if not os.path.exists('WEBLogs'):\n os.makedirs('WEBLogs')\n\n except Exception as ex:\n print(ex)\n\ndef run(argv):\n makeDirectories()\n date=getdate()\n print('path: ' + str(os.chdir(os.path.dirname(os.path.realpath(__file__)))))\n print('ste:'+ str(os.getcwd()))\n try:\n if len(argv) != 1:\n if argv[1] == '-d': # DNS Server\n if argv[2] == 'to':\n moveToDNS(date)\n elif argv[2] == 'from':\n moveFromDNS(date)\n elif argv[1] == '-w': # Web Server\n if argv[2] =='to':\n moveToWebServer(date)\n elif argv[2] =='from':\n moveFromServer(date)\n print('DONE :)')\n\n except Exception as ex:\n print(ex)\n\n\nif __name__ == '__main__':\n argv= sys.argv\n run(['','-d','from'])\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "id": "972512", "language": "Python", "matching_score": 0.14068341255187988, "max_stars_count": 2, "path": "OtherTools/MoveFiles.py" }, { "content": "from .spider import Spider\n\n\nclass DefaultQuotesSpider(Spider):\n \"\"\"\n A spider that query the default page http://quotes.toscrape.com/'\n and retrieves all the quotes, authors and tags.\n \"\"\"\n\n name = \"default_quotes\"\n base_url = \"http://quotes.toscrape.com/\"\n\n start_urls = [\n base_url,\n ]\n\n def parse(self, response):\n # Response is a selector obj\n # Select div tag with class quote\n for quote in response.css(\"div.quote\"):\n\n # Get the micro-data of the author\n author_href = quote.css(\"span a::attr(href)\").get()\n\n quote_dict = {\n \"text\": quote.css(\"span.text::text\").get(),\n \"author\": quote.css(\"span small::text\").get(),\n \"tags\": quote.css(\"div.tags a.tag::text\").getall(),\n }\n\n yield response.follow(\n author_href, callback=self.parse_author, cb_kwargs=quote_dict\n )\n\n # Find the next page anchor tag, then select the href\n next_page = response.css(\"li.next a::attr(href)\").get()\n # Send a request to the next page if exist\n if next_page is not None:\n # print(f\"next_page: {next_page}\")\n yield response.follow(next_page, callback=self.parse)\n\n def parse_author(self, response, **cb_kwargs):\n \"\"\" parse page \"\"\"\n\n author_details_div = response.css(\"div.author-details\")\n\n cb_kwargs[\"author_details\"] = {\n \"author_name\": author_details_div.css(\"h3.author-title::text\").get(),\n \"date_of_birth\": author_details_div.css(\n \"p span.author-born-date::text\"\n ).get(),\n \"location\": author_details_div.css(\n \"p span.author-born-location::text\"\n ).get(),\n \"description\": author_details_div.css(\"div.author-description::text\").get(),\n }\n\n yield cb_kwargs\n", "id": "12586314", "language": "Python", "matching_score": 4.286336898803711, "max_stars_count": 0, "path": "legalist_spider/spiders/default_spider.py" }, { "content": "from .spider import Spider\nfrom scrapy.http import FormRequest\n\n\nclass LoginQuotesSpider(Spider):\n \"\"\"\n A spider that query the page http://quotes.toscrape.com/ after login'\n and retrieves all the quotes, authors and tags.\n \"\"\"\n\n name = \"login_quotes\"\n base_url = \"http://quotes.toscrape.com/login\"\n\n start_urls = [\n base_url,\n ]\n\n def parse(self, response):\n # Get the csrf_token from the form\n token = response.css(\"form input::attr(value)\").extract_first()\n\n return FormRequest.from_response(\n response,\n formdata={\"csrf_token\": token, \"password\": \"<PASSWORD>\", \"username\": \"amer\"},\n callback=self.scrape_pages,\n )\n\n def scrape_pages(self, response):\n\n for quote in response.css(\"div.quote\"):\n\n quote_dict = {\n \"text\": quote.css(\"span.text::text\").get(),\n \"author\": quote.css(\"span small::text\").get(),\n \"tags\": quote.css(\"div.tags a.tag::text\").getall(),\n }\n\n yield quote_dict\n\n # Find the next page anchor tag, then select the href\n next_page = response.css(\"li.next a::attr(href)\").get()\n # Send a request to the next page if exist\n if next_page is not None:\n yield response.follow(next_page, callback=self.scrape_pages)\n", "id": "550722", "language": "Python", "matching_score": 3.4521095752716064, "max_stars_count": 0, "path": "legalist_spider/spiders/login_spider.py" }, { "content": "from .spider import Spider\n\n\nclass TableQuotesSpider(Spider):\n \"\"\"\n A spider that query the table page http://quotes.toscrape.com/tableful/'\n and retrieves all the quotes, authors and tags.\n \"\"\"\n\n name = \"table_quotes\"\n base_url = \"http://quotes.toscrape.com/tableful\"\n\n start_urls = [\n base_url,\n ]\n\n def parse(self, response):\n\n # First row and last row are blank\n table_rows = response.css(\"tr\")[1:]\n quotes_table_rows = table_rows[:-1]\n\n # Go through all the tables rows (tr)\n for i, tr in enumerate(quotes_table_rows):\n # Every even row will be a text and author\n if i % 2 == 0:\n # Text and author are in the same table column (td)\n # need to split them\n text_and_author = tr.css(\"td::text\").get()\n text_and_author = text_and_author.split(\" Author: \")\n\n text = text_and_author[0]\n author = text_and_author[1]\n\n # Every odd row will be tags\n else:\n tags = tr.css(\"a::text\").getall()\n\n info_dict = {\"text\": text, \"author\": author, \"tags\": tags}\n\n # Yield only in the odd iteration\n yield info_dict\n\n # Find the next page anchor tag, then select the href\n # B check if the last two td tags contain the word 'Next'\n for text in table_rows[-1].css(\"td a::text\").getall():\n if \"Next\" in text:\n next_page = table_rows[-1].css(\"td a::attr(href)\").getall()[-1]\n # Send a request to the next page if exist\n if next_page is not None:\n yield response.follow(next_page, callback=self.parse)\n", "id": "12429111", "language": "Python", "matching_score": 3.2084975242614746, "max_stars_count": 0, "path": "legalist_spider/spiders/table_spider.py" }, { "content": "import re\nfrom .spider import Spider\n\n\nclass JavascriptQuotesSpider(Spider):\n \"\"\"\n A spider that query the javascript page http://quotes.toscrape.com/js'\n and retrieves all the quotes, authors and tags.\n \"\"\"\n\n # URL query: \"http://quotes.toscrape.com/js/page/NUMBER/\"\n name = \"js_quotes\"\n base_url = \"http://quotes.toscrape.com/js/page/%s/\"\n page = 1\n\n # Construct the initial URL query\n start_urls = [\n base_url % page,\n ]\n\n def parse(self, response):\n # Response body is an HTML that includes js to\n # Render the quotes into the HTML\n js_text = response.selector.xpath(\"//script\").getall()[1].replace(\"\\n\", \"\")\n # Remove any instance of two spaces or more\n js_text = re.sub(\"\\s\\s+\", \"\", js_text)\n\n # Regular Expressions to extract the tags, authors, and texts\n # From the JSON obj\n tags_pattern = '\"tags\": \\[.*?\\]'\n author_pattern = '\"name\": \".*?\"'\n text_pattern = '\"text\": \".*?\"'\n\n # Extract the tags, authors, and texts\n tags_list = re.findall(tags_pattern, js_text)\n authors_list = re.findall(author_pattern, js_text)\n texts_list = re.findall(text_pattern, js_text)\n\n # To use the same pipeline we need to yield each item individually\n list_of_dicts = []\n\n for i in range(len(authors_list)):\n tags = tags_list[i]\n author = authors_list[i]\n text = texts_list[i]\n\n author = author.split(\": \")[1]\n text = text.split(\": \")[1]\n\n list_of_dicts.append({\"text\": text, \"author\": author, \"tags\": tags})\n\n # yield each item individually\n for info_dict in list_of_dicts:\n yield info_dict\n\n # Find the next page anchor tag, then select the href\n next_page = response.css(\"li.next a::attr(href)\").get()\n if next_page is not None:\n self.page += 1\n yield response.follow(self.base_url % self.page, callback=self.parse)\n", "id": "8547156", "language": "Python", "matching_score": 3.665860652923584, "max_stars_count": 0, "path": "legalist_spider/spiders/javascript_spider.py" }, { "content": "import scrapy\nimport json\nfrom .spider import Spider\n\n\nclass InfiniteScrollQuotesSpider(Spider):\n \"\"\"\n A spider that query the infinite scroll page http://quotes.toscrape.com/scroll'\n and retrieves all the quotes, authors and tags.\n \"\"\"\n\n # API URL: \"http://quotes.toscrape.com/api/quotes?page=NUMBER\"\n name = \"scroll_quotes\"\n base_url = \"http://quotes.toscrape.com/api/quotes?page=%s\"\n page = 1\n\n # Construct the initial api query\n start_urls = [\n base_url % page,\n ]\n\n def parse(self, response):\n # Response.body is a JSON obj as string\n response_body = response.body\n data = json.loads(response_body)\n quotes = data[\"quotes\"]\n\n # For each quote select the text, author, and tags.\n for quote in quotes:\n info_dict = {\n \"text\": quote[\"text\"],\n \"author\": quote[\"author\"][\"name\"],\n \"tags\": quote[\"tags\"],\n }\n\n yield info_dict\n\n # Check if there are more pages to query\n # And query the next page if exist\n if data[\"has_next\"]:\n self.page += 1\n yield response.follow(self.base_url % self.page, callback=self.parse)\n", "id": "4680453", "language": "Python", "matching_score": 1.482591152191162, "max_stars_count": 0, "path": "legalist_spider/spiders/infinite_scroll_spider.py" }, { "content": "import time\n\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom scrapy.crawler import CrawlerProcess\n\nfrom spiders.default_spider import DefaultQuotesSpider\nfrom spiders.infinite_scroll_spider import InfiniteScrollQuotesSpider\nfrom spiders.javascript_spider import JavascriptQuotesSpider\nfrom spiders.login_spider import LoginQuotesSpider\nfrom spiders.table_spider import TableQuotesSpider\n\nfrom config import get_config\nconfig = get_config()\n\ndef run_spider(spider_name='scroll_quotes'):\n # Run without scheduler\n from scrapy import cmdline\n cmdline.execute(f\"scrapy crawl {spider_name}\".split())\n\n\ndef run_all_spiders():\n process_default = CrawlerProcess()\n # We assume that the default spider has to run first and finish before the other spiders run.\n process_default.crawl(DefaultQuotesSpider)\n process_default.start()\n time.sleep(120)\n # process_default.stop()\n\n process = CrawlerProcess()\n process.join()\n active_spiders = [\n TableQuotesSpider,\n JavascriptQuotesSpider,\n LoginQuotesSpider,\n InfiniteScrollQuotesSpider,\n ]\n for spider in active_spiders:\n process.crawl(spider)\n process.start()\n\n\n# Scheduler the spiders\nscheduler = BlockingScheduler()\nscheduler.add_job(run_all_spiders, \"cron\", day=\"*\", hour=\"11,23\")\n\nif __name__ == \"__main__\":\n print (\"Trigger spider\")\n if config.run_with_scheduler is True:\n print(\"Trigger all spiders at certain time - with scheduler\")\n scheduler.start()\n else:\n print(\"Trigger one spider now, then trigger the scheduler\")\n run_spider(config.spider_name)\n scheduler.start()\n", "id": "11990586", "language": "Python", "matching_score": 3.1369946002960205, "max_stars_count": 0, "path": "legalist_spider/main.py" }, { "content": "from .config import get_config\n\nconfig = get_config()\n\n\n# from .spiders.default_spider import DefaultQuotesSpider\n# from .spiders.infinite_scroll_spider import InfiniteScrollQuotesSpider\n# from .spiders.javascript_spider import JavascriptQuotesSpider\n# from .spiders.login_spider import LoginQuotesSpider\n# from .spiders.table_spider import TableQuotesSpider\n#\n", "id": "10392164", "language": "Python", "matching_score": 1.2108280658721924, "max_stars_count": 0, "path": "legalist_spider/__init__.py" }, { "content": "import scrapy\n\n\nclass Spider(scrapy.Spider):\n \"\"\" Base spider class \"\"\"\n\n pass\n", "id": "5259232", "language": "Python", "matching_score": 0.9564781188964844, "max_stars_count": 0, "path": "legalist_spider/spiders/spider.py" } ]
2.477195
tschum
[ { "content": "from ctypes import *\r\nfrom ctypes.wintypes import *\r\nfrom enum import Enum\r\nimport sys\r\n\r\nclass xinput():\r\n def __init__(self,player=0):\r\n if player > 3 or player < 0:\r\n raise Exception(\"Player number must be in the range 0 to 3\")\r\n self.player = player\r\n try:\r\n self.xi_lib = cdll.LoadLibrary(r\"XInput1_4.dll\")\r\n except Exception as err:\r\n print(\"Could not read lib {}\".format(err))\r\n class XINPUT_GAMEPAD(Structure):\r\n _fields_ =[(\"wButtons\",WORD),(\"bLeftTrigger\",c_ubyte),\r\n (\"bRightTrigger\",c_ubyte),(\"sThumbLX\",SHORT),(\"sThumbLY\",SHORT),(\"sThumbRX\",SHORT),(\"sThumbRY\",SHORT)]\r\n class XINPUT_STATE(Structure):\r\n _fields_ =[(\"dwPacketNumber\",DWORD),\r\n (\"Gamepad\",XINPUT_GAMEPAD)]\r\n self.BUTTON_NAME_VALUE ={ 0:\"None\",1:'jpUp',2:'jpDn',4:'jpLeft',8:'jpRight',16:'start',32:'back',64:'LS',\r\n 128:'RS',256:'LB',512:'RB',1024:'Future1',2048:'Future2', 4096:'A',8192:'B',16384:'X',32768:'Y'}\r\n self.buttons_down =[]\r\n self.last_packet = -1\r\n self.thumbSlack = 3000\r\n self.state = XINPUT_STATE()\r\n self.poll()\r\n\r\n def connected(self):\r\n self.ret_val = self.xi_lib.XInputGetState(self.player,pointer(self.state))\r\n return self.ret_val==0\r\n\r\n def poll(self):\r\n self.connected()\r\n if self.ret_val == 0: # xbox stick is conected\r\n self.update_buttons()\r\n else:\r\n self.buttons_down = []\r\n return self.ret_val == 0,self.buttons_down\r\n\r\n def update_buttons(self):\r\n self.last_packet = self.state.dwPacketNumber\r\n self.buttons_down=[ {self.BUTTON_NAME_VALUE[key]: key}\r\n for key in self.BUTTON_NAME_VALUE.keys()\r\n if key & self.state.Gamepad.wButtons ]\r\n if self.state.Gamepad.bRightTrigger!=0:\r\n self.buttons_down.append({'RT':\r\n self.state.Gamepad.bRightTrigger})\r\n if self.state.Gamepad.bLeftTrigger!=0:\r\n self.buttons_down.append({'LT':\r\n self.state.Gamepad.bLeftTrigger})\r\n\r\n if True in [abs(i) - self.thumbSlack > 0 for i in [\r\n self.state.Gamepad.sThumbLX,\r\n self.state.Gamepad.sThumbLY]]:\r\n self.buttons_down.append( { 'LTS': (\r\n self.state.Gamepad.sThumbLX,\r\n self.state.Gamepad.sThumbLY )})\r\n\r\n if True in [ abs(i) - self.thumbSlack > 0 for i in [\r\n self.state.Gamepad.sThumbRX,\r\n self.state.Gamepad.sThumbRY]]:\r\n self.buttons_down.append({'RTS': (\r\n self.state.Gamepad.sThumbRX,\r\n self.state.Gamepad.sThumbRY )})\r\n\r\n def BatteryLevel (self):\r\n class XINPUT_BATTERY_INFORMATION(Structure):\r\n _fields_ =[(\"BatteryType\",BYTE),(\"BatteryLevel\",BYTE)]\r\n class BATTERY_DEVTYPE(Enum):\r\n _GAMEPAD=0x00\r\n _HEADSET=0x01\r\n class BATTERY_TYPE(Enum):\r\n _DISCONNECTED = 0x00 # This device is not connected\r\n _WIRED = 0x01 # Wired device, no battery\r\n _ALKALINE = 0x02 # Alkaline battery source\r\n _TYPE_NIMH = 0x03 # Nickel Metal Hydride battery source\r\n _TYPE_UNKNOWN = 0xFF # Cannot determine the battery type\r\n # These are only valid for wireless, connected devices, with known battery types\r\n # The amount of use time remaining depends on the type of device.\r\n class BATTERY_LEVEL(Enum):\r\n _EMPTY = 0x00\r\n _LOW = 0x01\r\n _MEDIUM = 0x02\r\n _FULL = 0x03\r\n class BATTERY_ERROR(Enum):\r\n _S_OK = 0\r\n _DEVICE_NOT_CONNECTED = 1167\r\n self.battery = XINPUT_BATTERY_INFORMATION()\r\n ret_val = self.xi_lib.XInputGetBatteryInformation(self.player,\r\n BYTE(BATTERY_DEVTYPE._GAMEPAD.value),\r\n pointer(self.battery))\r\n if ret_val == BATTERY_ERROR._S_OK.value:\r\n return ret_val, \"Battery type is {} level is {}.\".format(\r\n BATTERY_TYPE(self.battery.BatteryType).name,\r\n BATTERY_LEVEL(self.battery.BatteryLevel).name)\r\n else:\r\n return ret_val, \"Error reading battery {}.\".format(\r\n BATTERY_ERROR(ret_val).name)\r\n\r\n\r\nif '-test' in sys.argv:\r\n player = [xinput(i) for i in range(4)]\r\n for i in range(len(player)):\r\n er_code, bat_data = player[i].BatteryLevel()\r\n if (er_code != 1167):\r\n print(bat_data)\r\n\r\n while True in [i.connected() for i in player]:\r\n for i in range(len(player)):\r\n change,value = player[i].poll()\r\n if change and len(value)>0:\r\n print(\"player={}\".format(i),value)\r\n\r\n print(\"xinput shows no devices connected.\")\r\n", "id": "4256685", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "xinput.py" } ]
0
liuya00
[ { "content": "#!/usr/bin/env python\n#\n# Examples from the talk:\n# <NAME> - Advanced Go Concurrency Patterns - Google I/O 2013\n# https://www.youtube.com/watch?v=QDDwwePbDtw\n# https://code.google.com/p/go/source/browse/2013/advconc?repo=talks\nimport argparse\nimport collections\nimport random\nimport threading\nimport time\n\nfrom chan import Chan, chanselect, quickthread\n\n\nMOCK_POSTS = [\n 'First post from ',\n 'This is the second post from ',\n 'A third post? How interesting. Thanks ',\n 'Woah! A fourth post from '\n]\n\nItem = collections.namedtuple('Item', ['channel', 'title'])\n\nclass MockFetcher(object):\n def __init__(self, domain):\n self.domain = domain\n self.posts = [p + self.domain for p in MOCK_POSTS]\n\n def fetch(self):\n if not self.posts:\n return [], time.time() + 1000.0\n item_list = [Item(self.domain, self.posts.pop(0))]\n next_time = time.time() + random.random() * 0.2 + 0.1\n return item_list, next_time\n\n\ndef timeout_after(delay):\n def thread(ch):\n time.sleep(delay)\n ch.put(None)\n c = Chan()\n t = threading.Thread(name='timeout', target=thread, args=(c,))\n t.daemon = True\n t.start()\n return c\n\n\nclass Subscription(object):\n def __init__(self, fetcher):\n self.fetcher = fetcher\n self.updates_chan = Chan()\n self.quit = Chan()\n self.thread = threading.Thread(\n name='Subscription',\n target=self._run)\n #self.thread.daemon = True\n self.thread.start()\n\n def _run(self):\n next_time = time.time()\n pending = [] # First is most recent. Should be a deque\n err = None\n\n while True:\n start_fetch = timeout_after(max(0.0, next_time - time.time()))\n\n # Does or doesn't wait on updates_chan depending on if we have\n # items ready.\n if pending:\n outchans = [(self.updates_chan, pending[0])]\n else:\n outchans = []\n\n ch, value = chanselect([self.quit, start_fetch], outchans)\n if ch == self.quit:\n errc = value\n self.updates_chan.close()\n errc.put(err)\n return\n elif ch == start_fetch:\n try:\n err = None\n item_list, next_time = self.fetcher.fetch()\n except Exception as ex:\n err = ex\n next_time = time.time() + 10.0\n continue\n pending.extend(item_list)\n else: # self.updates_chan\n pending.pop(0) # Pops the sent item\n\n def updates(self):\n return self.updates_chan\n\n def close(self):\n errc = Chan()\n self.quit.put(errc)\n result = errc.get()\n self.thread.join(0.2)\n assert not self.thread.is_alive()\n return result\n\n\nclass Merged(object):\n def __init__(self, subscriptions):\n self.subscriptions = subscriptions\n self.updates_chan = Chan()\n self.quit = Chan()\n\n self.thread = threading.Thread(\n name=\"Merged\",\n target=self._run)\n self.thread.start()\n\n def _close_subs_collect_errs(self):\n return [sub.close() for sub in self.subscriptions]\n\n def _run(self):\n subchans = [sub.updates() for sub in self.subscriptions]\n while True:\n c, value = chanselect(subchans + [self.quit], [])\n if c == self.quit:\n value.put(self._close_subs_collect_errs())\n self.updates_chan.close()\n return\n else:\n item = value\n\n c, _ = chanselect([self.quit], [(self.updates_chan, item)])\n if c == self.quit:\n value.put(self._close_subs_collect_errs())\n self.updates_chan.close()\n return\n else:\n pass # Send successful\n\n def updates(self):\n return self.updates_chan\n\n def close(self):\n errc = Chan()\n self.quit.put(errc)\n result = errc.get()\n self.thread.join(timeout=0.2)\n assert not self.thread.is_alive()\n return result\n\n\ndef main():\n FetcherCls = MockFetcher\n\n merged = Merged([\n Subscription(FetcherCls('blog.golang.org')),\n Subscription(FetcherCls('googleblog.blogspot.com')),\n Subscription(FetcherCls('googledevelopers.blogspot.com'))])\n\n # Close after a while\n def close_later():\n time.sleep(3)\n print(\"Closed: {}\".format(merged.close()))\n quickthread(close_later)\n\n for it in merged.updates():\n print(\"{} -- {}\".format(it.channel, it.title))\n\n time.sleep(0.1)\n print(\"Still active: (should only be _MainThread and timeouts)\")\n for active in threading._active.itervalues():\n print(\" {}\".format(active))\n\nif __name__ == '__main__':\n main()\n\n", "id": "2150629", "language": "Python", "matching_score": 3.2643280029296875, "max_stars_count": 20, "path": "examples/ajmani-adv-patt.py" }, { "content": "#!/usr/bin/env python\n#\n# Examples from the talk:\n# <NAME> - Google I/O 2012 - Go Concurrency Patterns\n# http://www.youtube.com/watch?v=f6kdp27TYZs\n# http://code.google.com/p/go/source/browse/2012/concurrency.slide?repo=talks\n\nfrom chan import Chan, chanselect, quickthread\n\nfrom collections import namedtuple\nfrom collections import OrderedDict\nimport time\nimport random\nimport sys\n\nEXAMPLES = OrderedDict()\n\n\n#---------------------------------------------------------------------------\n# Fan In\n#---------------------------------------------------------------------------\n\n\ndef example_fan_in():\n def boring(message):\n def sender(message, c):\n i = 0\n while True:\n c.put(\"%s: %d\" % (message, i))\n time.sleep(0.2 * random.random())\n i += 1\n\n c = Chan()\n quickthread(sender, message, c)\n return c\n\n def fan_in(input1, input2):\n def forwarder(input, output):\n while True:\n output.put(input.get())\n\n c = Chan()\n quickthread(forwarder, input1, c)\n quickthread(forwarder, input2, c)\n return c\n\n c = fan_in(boring(\"Joe\"), boring(\"Ann\"))\n for i in xrange(10):\n print c.get()\n print \"You're both boring; I'm leaving.\"\n\nEXAMPLES['fanin'] = example_fan_in\n\n\n#---------------------------------------------------------------------------\n# Sequence\n#---------------------------------------------------------------------------\n\ndef example_sequence():\n Message = namedtuple(\"Message\", ['string', 'wait'])\n\n def boring(msg):\n c = Chan()\n wait_for_it = Chan()\n\n def sender():\n i = 0\n while True:\n c.put(Message(\"%s: %d\" % (msg, i), wait_for_it))\n time.sleep(0.2 * random.random())\n wait_for_it.get()\n i += 1\n quickthread(sender)\n return c\n\n def fan_in(*input_list):\n def forward(input, output):\n while True:\n output.put(input.get())\n\n c = Chan()\n for input in input_list:\n quickthread(forward, input, c)\n return c\n\n c = fan_in(boring('Joe'), boring('Ann'))\n for i in xrange(5):\n msg1 = c.get(); print msg1.string\n msg2 = c.get(); print msg2.string\n msg1.wait.put(True)\n msg2.wait.put(True)\n print \"You're all boring; I'm leaving\"\n\nEXAMPLES['sequence'] = example_sequence\n\n\n#---------------------------------------------------------------------------\n# Select\n#---------------------------------------------------------------------------\n\ndef example_select():\n def boring(msg):\n c = Chan()\n\n def sender():\n i = 0\n while True:\n c.put(\"%s: %d\" % (msg, i))\n time.sleep(1.0 * random.random())\n i += 1\n quickthread(sender)\n return c\n\n def fan_in(input1, input2):\n c = Chan()\n\n def forward():\n while True:\n chan, value = chanselect([input1, input2], [])\n c.put(value)\n\n quickthread(forward)\n return c\n\n c = fan_in(boring(\"Joe\"), boring(\"Ann\"))\n for i in xrange(10):\n print c.get()\n print \"You're both boring; I'm leaving.\"\n\nEXAMPLES['select'] = example_select\n\n\n#---------------------------------------------------------------------------\n# Timeout\n#---------------------------------------------------------------------------\n\ndef timer(duration):\n def timer_thread(chan, duration):\n time.sleep(duration)\n chan.put(time.time())\n c = Chan()\n quickthread(timer_thread, c, duration)\n return c\n\n\ndef example_timeout():\n def boring(msg):\n c = Chan()\n\n def sender():\n i = 0\n while True:\n c.put(\"%s: %d\" % (msg, i))\n time.sleep(1.5 * random.random())\n i += 1\n quickthread(sender)\n return c\n\n c = boring(\"Joe\")\n while True:\n chan, value = chanselect([c, timer(1.0)], [])\n if chan == c:\n print value\n else:\n print \"You're too slow.\"\n return\n\nEXAMPLES['timeout'] = example_timeout\n\n\n#---------------------------------------------------------------------------\n# RCV Quit\n#---------------------------------------------------------------------------\n\ndef example_rcvquit():\n def boring(msg, quit):\n c = Chan()\n\n def sender():\n i = 0\n while True:\n time.sleep(1.0 * random.random())\n\n chan, _ = chanselect([quit], [(c, \"%s: %d\" % (msg, i))])\n if chan == quit:\n quit.put(\"See you!\")\n i += 1\n quickthread(sender)\n return c\n\n quit = Chan()\n c = boring(\"Joe\", quit)\n for i in xrange(random.randint(0, 10), 0, -1):\n print c.get()\n quit.put(\"Bye!\")\n print \"Joe says:\", quit.get()\n\nEXAMPLES['rcvquit'] = example_rcvquit\n\n\n#---------------------------------------------------------------------------\n# Daisy chain\n#---------------------------------------------------------------------------\n\ndef example_daisy():\n def f(left, right):\n left.put(1 + right.get())\n\n N = 1000 # Python's threads aren't that lightweight\n leftmost = Chan()\n rightmost = leftmost\n left = leftmost\n for i in xrange(N):\n right = Chan()\n quickthread(f, left, right)\n left = right\n\n def putter():\n right.put(1)\n quickthread(putter)\n\n print leftmost.get()\n\nEXAMPLES['daisy'] = example_daisy\n\n\ndef main():\n if len(sys.argv) < 2 or sys.argv[1] not in EXAMPLES:\n print \"Possible examples:\"\n for example in EXAMPLES.iterkeys():\n print \" %s\" % example\n return\n\n EXAMPLES[sys.argv[1]]()\n\nif __name__ == '__main__':\n main()\n", "id": "3313543", "language": "Python", "matching_score": 1.3373878002166748, "max_stars_count": 20, "path": "examples/pike-conc-patt.py" }, { "content": "import os\nimport os.path\nfrom setuptools import setup\n\nDIR = os.path.dirname(__file__)\n\nwith open(os.path.join(DIR, 'README.rst')) as f:\n README = f.read()\n\nimport chan\n\nsetup(\n name='chan',\n version=chan.__version__,\n description=\"Chan for Python, lovingly stolen from Go\",\n author='<NAME>',\n author_email='<EMAIL>',\n url='http://github.com/stuglaser/pychan',\n long_description=README,\n keywords='go chan channel select chanselect concurrency',\n license='BSD',\n packages=['chan'],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: BSD License',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n ],\n)\n", "id": "7400179", "language": "Python", "matching_score": 0.6179019808769226, "max_stars_count": 20, "path": "setup.py" }, { "content": "import random\nimport threading\nimport time\nimport unittest\n\nfrom chan import Chan, chanselect, quickthread\nfrom chan import ChanClosed, Timeout\nfrom chan.chan import RingBuffer\n\n\ndef sayset(chan, phrases, delay=0.5):\n for ph in phrases:\n chan.put(ph)\n time.sleep(delay)\n chan.close()\n\n\ndef distributer(inchans, outchans, delay_max=0.5):\n inchans = inchans[:] # Copy. Will remove closed chans\n while True:\n try:\n _, value = chanselect(inchans, [])\n time.sleep(random.random() * delay_max)\n except ChanClosed as ex:\n inchans.remove(ex.which)\n continue\n _, _ = chanselect([], [(chan, value) for chan in outchans])\n\n\ndef accumulator(chan, into=None):\n if into is None:\n into = []\n for value in chan:\n into.append(value)\n\n\nclass RingBufferTests(unittest.TestCase):\n def test_pushpop(self):\n buf = RingBuffer(4)\n for i in range(12):\n buf.push(i)\n self.assertEqual(buf.pop(), i)\n\n def test_fillunfill(self):\n S = 4\n buf = RingBuffer(S)\n for i in range(12):\n for j in range(S):\n buf.push(100 * i + j)\n for j in range(S):\n self.assertEqual(buf.pop(), 100 * i + j)\n\n # Moves ahead one space\n buf.push('NaN')\n buf.pop()\n\n\nclass ChanTests(unittest.TestCase):\n def test_simple(self):\n chan = Chan()\n results = []\n quickthread(accumulator, chan, results)\n\n chan.put(\"Hello\")\n time.sleep(0.01) # Technically unsafe\n\n self.assertEqual(len(results), 1)\n self.assertEqual(results[0], \"Hello\")\n\n def test_nothing_lost(self):\n phrases = ['Hello_%03d' % x for x in range(1000)]\n firstchan = Chan()\n chan_layer1 = [Chan() for i in range(6)]\n lastchan = Chan()\n sayer = quickthread(sayset, firstchan, phrases, delay=0.001,\n __name='sayer')\n\n # Distribute firstchan -> chan_layer1\n for i in range(12):\n outchans = [chan_layer1[(i+j) % len(chan_layer1)]\n for j in range(3)]\n quickthread(distributer, [firstchan], outchans, delay_max=0.005,\n __name='dist_layer1_%02d' % i)\n\n # Distribute chan_layer1 -> lastchan\n for i in range(12):\n inchans = [chan_layer1[(i+j) % len(chan_layer1)]\n for j in range(0, 9, 3)]\n quickthread(distributer, inchans, [lastchan], delay_max=0.005,\n __name='dist_layer2_%02d' % i)\n\n results = []\n quickthread(accumulator, lastchan, results, __name='accumulator')\n sayer.join(10)\n self.assertFalse(sayer.is_alive())\n time.sleep(1) # Unsafe. Lets the data propagate to the accumulator\n\n # Checks that none are missing, and there are no duplicates.\n self.assertEqual(len(results), len(phrases))\n self.assertEqual(set(results), set(phrases))\n\n def test_iter_and_closed(self):\n c = Chan()\n quickthread(sayset, c, [1, 2, 3], delay=0)\n\n def listener():\n it = iter(c)\n self.assertEqual(next(it), 1)\n self.assertEqual(next(it), 2)\n self.assertEqual(next(it), 3)\n self.assertRaises(StopIteration, it.__next__)\n t = quickthread(listener)\n\n time.sleep(0.1)\n self.assertFalse(t.is_alive())\n\n def test_putget_timeout(self):\n c = Chan()\n self.assertRaises(Timeout, c.put, 'x', timeout=0)\n self.assertRaises(Timeout, c.put, 'x', timeout=0.01)\n self.assertRaises(Timeout, c.get, timeout=0)\n self.assertRaises(Timeout, c.get, timeout=0.01)\n self.assertRaises(Timeout, c.put, 'x', timeout=0)\n\n def test_chanselect_timeout(self):\n a = Chan()\n b = Chan()\n c = Chan()\n self.assertRaises(Timeout, chanselect, [a, b], [(c, 42)], timeout=0)\n self.assertRaises(Timeout, chanselect, [a, b], [(c, 42)], timeout=0.01)\n\n # Verifies that chanselect didn't leave any wishes lying around.\n self.assertRaises(Timeout, a.put, 12, timeout=0)\n self.assertRaises(Timeout, c.get, timeout=0)\n\n def test_select_and_closed(self):\n a, b, c = [Chan() for _ in range(3)]\n out = Chan()\n quickthread(sayset, a, [0, 1, 2], delay=0.01, __name='sayset1')\n quickthread(sayset, b, [3, 4, 5], delay=0.01, __name='sayset2')\n quickthread(sayset, c, [6, 7, 8], delay=0.01, __name='sayset2')\n\n def fanin_until_closed(inchans, outchan):\n inchans = inchans[:]\n while inchans:\n try:\n _, val = chanselect(inchans, [])\n out.put(val)\n except ChanClosed as ex:\n inchans.remove(ex.which)\n out.close()\n\n quickthread(fanin_until_closed, [a, b, c], out, __name='fanin')\n\n into = []\n acc = quickthread(accumulator, out, into)\n acc.join(10)\n self.assertFalse(acc.is_alive())\n\n results = set(into)\n self.assertEqual(len(results), 9)\n self.assertEqual(results, set(range(9)))\n\n def test_buf_simple(self):\n S = 5\n c = Chan(S)\n for i in range(S):\n c.put(i)\n c.close()\n\n results = list(c)\n self.assertEqual(results, list(range(S)))\n\n def test_buf_overfull(self):\n c = Chan(5)\n quickthread(sayset, c, list(range(20)), delay=0)\n time.sleep(0.1) # Fill up buffer\n\n results = list(c)\n self.assertEqual(results, list(range(20)))\n\n def test_buf_kept_empty(self):\n c = Chan(5)\n quickthread(sayset, c, list(range(20)), delay=0.02)\n results = list(c)\n self.assertEqual(results, list(range(20)))\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "2106298", "language": "Python", "matching_score": 1.4037452936172485, "max_stars_count": 20, "path": "tests/chan_tests.py" }, { "content": "from .chan import Error, ChanClosed, Timeout\nfrom .chan import Chan, chanselect\nfrom .chan import quickthread\n\n__version__ = '0.3.1'\n", "id": "697708", "language": "Python", "matching_score": 0.9865038990974426, "max_stars_count": 20, "path": "chan/__init__.py" } ]
1.337388
dawodx
[ { "content": "##The MIT License\n##\n##Copyright 2018 <NAME>. <EMAIL>\n##\n##Permission is hereby granted, free of charge, to any person obtaining a copy\n##of this software and associated documentation files (the \"Software\"), to deal\n##in the Software without restriction, including without limitation the rights\n##to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n##copies of the Software, and to permit persons to whom the Software is\n##furnished to do so, subject to the following conditions:\n##\n##The above copyright notice and this permission notice shall be included in\n##all copies or substantial portions of the Software.\n##\n##THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n##IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n##FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n##AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n##LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n##OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n##THE SOFTWARE.\n\n##\n##CORONA Corona Image Patcher_v1.5\n##\n\n##System\nimport subprocess\nimport os\nimport getpass\n##File dialog GUI\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter.filedialog import askopenfilename\nfrom time import sleep\n\n##Split Path\nimport ntpath\ndef path_leaf(path):\n head, tail = ntpath.split(path)\n return head, tail\n\n##Custome Functions\ndef my_range(start, end, step):\n while start <= end:\n yield start\n start += step\n\ndef OpenFile():\n root.fileName = askopenfilename(filetypes=((\"Corona Image\", \"*.cxr\"),(\"All files\", \"*.*\")))\n \n #v.set(\"Right!!\")\n print(\"Right!!\")\n openButton.config(state=\"disabled\")\n root.update()\n \n \n head , tail=path_leaf(root.fileName)\n folderPath= head\n fileName,extension = tail.split(\".\")\n\n outDir=folderPath+\"\\\\\"+fileName+\"_\"+\"output\\\\\"\n if not os.path.exists(outDir):\n os.makedirs(outDir)\n \n lightmixColors = '--set \"Vec3Array colorMap.lightmixColors = 1 1 1, 1 1 1, 1 1 1, 1 1 1, 1 1 1\"'\n lightmixEnabledLayers = '--set \"BoolArray colorMap.lightmixEnabledLayers = %d, %d, %d, %d, %d\"' %(1, 1, 1, 1, 1)\n renderElement='--element \"LightMix Interactive\"'\n \n i=0\n for x in my_range(0.01, 1, 0.5):\n j=0\n for y in my_range(0.01, 3, 0.9): #reception lights\n k=0\n for z in my_range(0.01, 1, 0.5):\n a=0\n for u in my_range(0.01, 1, 0.5):\n b=0\n for v in my_range(0.01, 1, 0.5):\n lightmixIntensities = '--set \" FloatArray colorMap.lightmixIntensities = %1.2f, %1.2f, %1.2f, %1.2f, %1.2f\"' %(x, y, z, u, v)\n #inputCXR= '1.cxr'\n outputName= 'image_%s_%d_%d_%d_%d_%d.png'%(fileName,i,j,k,a,b)\n command= coronaPath+\" \"+lightmixColors+\" \"+lightmixIntensities+\" \"+lightmixEnabledLayers+\" \"+renderElement+\" \"+root.fileName+\" \"+outDir+outputName\n #print(command)\n startupinfo = None\n if os.name == 'nt':\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n subprocess.call(command, startupinfo=startupinfo)\n print(outputName)\n print(\"done\")\n b+=1\n a+=1\n k+=1\n j+=1\n i+=1\n\n #v.set(\"WOHOO,Finished!!!\")\n print(\"WOHOO,Finished!!!\")\n #root.update()\n sleep(2)\n root.destroy()\n\n\n \n\n#####################START CODE########################\n \ncoronaPath= 'C:\\\\Progra~1\\\\Corona\\\\CoronaImageCmd.exe'\n\n#########################GUI###########################\nroot = Tk()\n#get user name\nuser=getpass.getuser().upper()\n#create title variable messege\nv = StringVar()\nv.set(\"Hey %s Load the Corona!\"%(user))\nTitle = root.title( \"CORONA Image Patcher v 1.5\")\nlabel = ttk.Label(root, textvariable=v,foreground=\"red\",font=(\"Helvetica\", 20))\nlabel.pack()\n\n#Menu Bar\nmenu = Menu(root)\nroot.config(menu=menu)\n\nopenButton = Button(root, text=\"Open\",font=(\"Helvetica\", 20), command=OpenFile,)\nopenButton.config(state=\"normal\")\nopenButton.pack()\n\nfile = Menu(menu)\nfile.add_command(label = 'Open', command = OpenFile)\nfile.add_command(label = 'Exit', command = lambda:exit())\n\nmenu.add_cascade(label = 'File', menu = file)\n\nroot.mainloop()\n\n", "id": "1069962", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "CoronaImagePatcher.py" } ]
0
lukemerrett
[ { "content": "import json\nimport pulumi\nimport pulumi_aws as aws\n\nbucket = aws.s3.Bucket(\n \"bucket-writer-sample\",\n force_destroy=True # Allows deletion even if objects exist in the bucket\n)\n\naccess_bucket_policy = aws.iam.Policy(\n \"access_bucket_policy\",\n policy=json.dumps({\n \"Version\": \"2012-10-17\",\n \"Statement\": [{\n \"Effect\": \"Allow\",\n \"Action\": [\"s3:*\"],\n \"Resource\": f\"arn:aws:s3:::bucket-writer-sample*\"\n }],\n })\n)\n\nrole_for_lambda = aws.iam.Role(\n \"lambda_exec\",\n assume_role_policy=json.dumps({\n \"Version\": \"2012-10-17\",\n \"Statement\": [{\n \"Effect\": \"Allow\",\n \"Principal\": {\"Service\": \"lambda.amazonaws.com\"},\n \"Action\": \"sts:AssumeRole\"\n }]\n }),\n managed_policy_arns=[access_bucket_policy.arn]\n)\n\naws_lambda = aws.lambda_.Function(\n \"bucket_writer\",\n code=pulumi.FileArchive(\"./src\"),\n role=role_for_lambda.arn,\n handler=\"lambda.handler\",\n runtime=\"python3.9\",\n environment=aws.lambda_.FunctionEnvironmentArgs(\n variables={\n \"BUCKET_NAME\": bucket.id\n }\n )\n)\n\n# Export the name of the bucket\npulumi.export(\"bucket_name\", bucket.id)\npulumi.export(\"lambda_name\", aws_lambda.id)\n", "id": "12407819", "language": "Python", "matching_score": 2.1819822788238525, "max_stars_count": 0, "path": "__main__.py" }, { "content": "\"\"\"\nHandler for the events received by the Lambda\n\"\"\"\n\nimport boto3\nimport datetime\nimport json\nimport os\n\nBUCKET_NAME = os.environ[\"BUCKET_NAME\"]\n\n\ndef get_timestamp_filename():\n \"\"\"\n Returns a filename constructed of the current timestamp in the format:\n '2020-02-04_07.46.29.315237.json'\n \"\"\"\n return str(datetime.datetime.now()).replace(\" \", \"_\").replace(\":\", \".\") + \".json\"\n\n\ndef save_to_s3(content):\n \"\"\"\n Saves the contents to S3 with a timestamped file name\n \"\"\"\n key = get_timestamp_filename()\n s3 = boto3.client(\"s3\")\n s3.put_object(Body=str(json.dumps(content, indent=2)), Bucket=BUCKET_NAME, Key=key)\n return key\n\n\ndef handler(event, context):\n content = json.dumps(event, indent=2)\n print(f\"Received event: {content}\")\n return save_to_s3(content)\n", "id": "2089651", "language": "Python", "matching_score": 0.19685229659080505, "max_stars_count": 0, "path": "src/lambda.py" }, { "content": "\"\"\"empty message\n\nRevision ID: 653d68312991\nRevises: <KEY>\nCreate Date: 2021-08-20 12:04:14.107041\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '6<PASSWORD>'\ndown_revision = '<KEY>'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('user', 'name',\n existing_type=sa.VARCHAR(length=128),\n nullable=False)\n op.drop_column('jim', 'id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False))\n op.alter_column('user', 'name',\n existing_type=sa.VARCHAR(length=128),\n nullable=True)\n # ### end Alembic commands ###\n", "id": "12390310", "language": "Python", "matching_score": 1.7719968557357788, "max_stars_count": 0, "path": "migrations/versions/653d68312991_.py" }, { "content": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\n\napp = Flask(__name__)\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"postgresql://postgres:postgres@localhost:6456/postgres\"\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n\n\nclass User(db.Model):\n name = db.Column(db.String(128), primary_key=True)\n date = db.Column(db.DateTime())\n department = db.Column(db.String(128))\n", "id": "8429616", "language": "Python", "matching_score": 0.42620354890823364, "max_stars_count": 0, "path": "app.py" }, { "content": "sample_config_value = \"Wave\"\n", "id": "3062308", "language": "Python", "matching_score": 0.5419999957084656, "max_stars_count": 0, "path": "dags/dependencies/config.py" }, { "content": "print(\"Sample script\")\n", "id": "425481", "language": "Python", "matching_score": 0.2291979044675827, "max_stars_count": 0, "path": "main.py" }, { "content": "import socket\nimport unittest\n\nfrom kazoo.client import KazooClient\nfrom kafka import KafkaProducer, KafkaConsumer\n\n\nclass TestStringMethods(unittest.TestCase):\n def test_kafka_is_running(self):\n kafka_server = 'localhost:9092'\n zookeeper_server = 'localhost:2181'\n topic = 'integration_test_sample_topic'\n\n producer = KafkaProducer(bootstrap_servers=kafka_server)\n print(f'Sending dummy message')\n producer.send(topic, b'random_message')\n\n zk = KazooClient(hosts=zookeeper_server)\n zk.start()\n\n data = zk.get_children('/brokers/topics')\n self.assertIn(topic, data)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "2743515", "language": "Python", "matching_score": 1.0887353420257568, "max_stars_count": 20, "path": "test_scripts/test_connections.py" }, { "content": "import unittest\n\nif __name__ == \"__main__\":\n # Finds all tests in submodules ending in *tests.py and runs them\n suite = unittest.TestLoader().discover('.', pattern = \"*tests.py\")\n unittest.TextTestRunner().run(suite)\n", "id": "55402", "language": "Python", "matching_score": 1.0393685102462769, "max_stars_count": 2, "path": "runtests.py" }, { "content": "import pytest\n\n\ndef add(x, y):\n return x + y\n\n\ndef divide(x, y):\n if y == 0:\n raise Exception(\"Cannot divide by zero\")\n return x / y\n\n\ndef test_should_pass():\n assert add(1, 2) == 3\n\n\ndef test_should_throw():\n with pytest.raises(Exception):\n divide(1, 0)\n\n\nclass TestClass:\n def test_addition(self):\n assert add(3, 4) == 7\n", "id": "10350570", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tests/test_basic.py" }, { "content": "", "id": "12297865", "language": "Python", "matching_score": 0, "max_stars_count": 2, "path": "evernoteclient/__init__.py" }, { "content": "\nclass Environment:\n __poop_level = None\n\n def __init__(self):\n self.__poop_level = 0\n\n def current_poop_level(self):\n return self.__poop_level\n\n def pet_has_pooped(self):\n self.__poop_level += 1\n\n def clean_up_poop(self):\n self.__poop_level = 0\n", "id": "7137477", "language": "Python", "matching_score": 1.6053483486175537, "max_stars_count": 0, "path": "EnvironmentObjects/environment.py" }, { "content": "\n\nclass Bladder:\n __bladder_level = None\n __bursting_level = 5\n __environment = None\n\n def __init__(self, environment):\n self.__bladder_level = 0\n self.__environment = environment\n\n def has_been_fed(self):\n self.__bladder_level += 1\n\n def needs_the_toilet(self):\n return self.__bladder_level >= self.__bursting_level\n\n def go_to_toilet(self):\n self.__bladder_level = 0\n self.__environment.pet_has_pooped()\n", "id": "6378414", "language": "Python", "matching_score": 1.595389485359192, "max_stars_count": 0, "path": "PetObjects/bladder.py" }, { "content": "__author__ = '<NAME>'\n\nfrom Helpers import datehelper\n\nclass HungerLevel:\n __seconds_until_hungry = 120 # 2 minutes\n __last_fed = None\n __bladder = None\n\n def __init__(self, bladder):\n self.__last_fed = datehelper.todays_date()\n self.__bladder = bladder\n\n def is_hungry(self):\n date_pet_is_hungry = datehelper.add_seconds_to_date(self.__last_fed, self.__seconds_until_hungry)\n return datehelper.is_date_earlier_than_today(date_pet_is_hungry)\n\n def feed_pet(self):\n self.__last_fed = datehelper.todays_date()\n self.__bladder.has_been_fed()\n print(\"Your pet has been fed\")", "id": "7771678", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "PetObjects/hunger.py" }, { "content": "author = \"luke.merrett\"\n", "id": "5142117", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tests/__init__.py" }, { "content": "from django.shortcuts import render\nfrom rango.models import Category\nfrom rango.models import Page\n\ndef index(request):\n category_list = Category.objects.order_by('-likes')[:5]\n context_dict = {'categories': category_list}\n\n return render(request, 'rango/index.html', context_dict)\n\ndef category(request, category_name_slug):\n context_dict = {}\n\n try:\n category = Category.objects.get(slug=category_name_slug)\n context_dict['category_name'] = category.name\n\n pages = Page.objects.filter(category=category)\n\n context_dict['pages'] = pages\n context_dict['category'] = category\n except Category.DoesNotExist:\n # We get here if we didn't find the specified category.\n # Don't do anything - the template displays the \"no category\" message for us.\n pass\n\n return render(request, 'rango/category.html', context_dict)", "id": "9909089", "language": "Python", "matching_score": 0.003893066430464387, "max_stars_count": 0, "path": "tango_with_django_project/rango/views.py" }, { "content": "import re\n\n\ndef convert_file_to_csv_using_regex_groups(\n grouping_regex, source_filename, destination_filename\n):\n \"\"\"\n Uses a regex containing groups to pull each line of a file into a csv. Particularly useful for parsing log files\n :param grouping_regex: The regex containing a list of groups to pull out.\n :param source_filename: The file to pull the data from.\n :param destination_filename: The destination file to create.\n \"\"\"\n if not isinstance(grouping_regex, str):\n raise TypeError(\"grouping_regex must be a string\")\n\n if not isinstance(source_filename, str):\n raise TypeError(\"source_filename must be a string\")\n\n if not isinstance(destination_filename, str):\n raise TypeError(\"destination_filename must be a string\")\n\n if not re.match(r\"\\(\\)\", grouping_regex):\n raise RegexContainsNoGroups()\n\n compiled_regex = re.compile(grouping_regex)\n output_lines = []\n\n with open(source_filename, \"r\") as f:\n file_contents = f.readlines()\n\n for line in file_contents:\n parsed_groups = compiled_regex.match(line)\n if parsed_groups:\n line = \",\".join(parsed_groups.groups())\n output_lines.append(line)\n\n with open(destination_filename, \"w\", newline=\"\") as f:\n f.writelines(output_lines)\n\n\nclass RegexContainsNoGroups(Exception):\n \"\"\"\n Exception raised when a provided regular expression contains no defined groups.\n \"\"\"\n\n def __init__(self):\n super(RegexContainsNoGroups, self).__init__(\n \"Provided grouping_regex does not contain any regex groups, e.g: (?'MyGroupName'.*)\"\n )\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--groupingregex\",\n dest=\"grouping_regex\",\n help=\"Regular expression containing a set of groups showing how to extract the data from each line of the file.\",\n )\n parser.add_argument(\n \"--sourcefile\",\n dest=\"source_filename\",\n help=\"Location of the source file to extract the data from.\",\n )\n parser.add_argument(\n \"--destinationfile\",\n dest=\"destination_filename\",\n help=\"Where the resulting csv should be outputted to.\",\n )\n args = parser.parse_args()\n\n convert_file_to_csv_using_regex_groups(\n args.grouping_regex, args.source_filename, args.destination_filename\n )\n", "id": "4513118", "language": "Python", "matching_score": 2.196614980697632, "max_stars_count": 0, "path": "src/poorly_formatted_file.py" }, { "content": "__author__ = '<NAME>'\n\nimport argparse\n\nclass CmdLineParser:\n __parser = None\n\n def __init__(self):\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '-ln', '--list-notebooks',\n dest='list_notebooks', # Result stored in a variable of this name\n action='store_true', # Means we don't expect this flag key to come with a value\n help='Lists the current notebooks'\n )\n\n parser.add_argument(\n '-c', '--create-note',\n dest=\"create_note\",\n nargs=2,\n help='Creates a new note',\n metavar=('\"Title\"', '\"Body\"')\n )\n\n self.__parser = parser\n\n def parse_args(self):\n return self.__parser.parse_args()\n", "id": "5203323", "language": "Python", "matching_score": 3.0267205238342285, "max_stars_count": 0, "path": "cmdline/cmdlineparser.py" }, { "content": "__author__ = '<NAME>'\n\nfrom cmdline.cmdlineparser import CmdLineParser\nfrom evernoteclient.operations import EvernoteOperations\n\nif __name__ == \"__main__\":\n parser = CmdLineParser()\n operations = EvernoteOperations()\n\n args = parser.parse_args()\n\n if args.list_notebooks:\n operations.print_list_of_notebooks()\n\n if args.create_note:\n operations.create_new_note(args.create_note[0], args.create_note[1])", "id": "11994652", "language": "Python", "matching_score": 1.667952299118042, "max_stars_count": 0, "path": "program.py" }, { "content": "__author__ = '<NAME>'\n\nimport settings\nfrom evernote.api.client import EvernoteClient\nimport evernote.edam.type.ttypes as Types\n\nclass EvernoteOperations:\n __client = None\n\n def __init__(self):\n self.__client = EvernoteClient(token=settings.developer_token, sandbox=settings.sandbox)\n\n def print_list_of_notebooks(self):\n notebooks = self.get_list_of_notebooks()\n print(\"Found \", len(notebooks), \" notebooks:\")\n for notebook in notebooks:\n print(\" * \", notebook.name)\n\n def get_list_of_notebooks(self):\n note_store = self.__client.get_note_store()\n return note_store.listNotebooks()\n\n def create_new_note(self, title, body, *notebook_guid):\n note_store = self.__client.get_note_store()\n\n note = Types.Note()\n note.title = title\n if notebook_guid:\n note.notebookGuid = notebook_guid\n\n self.__write_note_content(note, body)\n\n created_note = note_store.createNote(note)\n\n print('\"' + title + '\" note has been created')\n\n return created_note\n\n @staticmethod\n def __write_note_content(note, body):\n note.content = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>'\n note.content += '<!DOCTYPE en-note SYSTEM \"http://xml.evernote.com/pub/enml2.dtd\">'\n note.content += '<en-note>' + body + '</en-note>'", "id": "3792344", "language": "Python", "matching_score": 1.733575463294983, "max_stars_count": 0, "path": "evernoteclient/operations.py" }, { "content": "\nsandbox = True\ndeveloper_token = '<PASSWORD>'\n", "id": "9883953", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "settings.py" }, { "content": "__author__ = '<NAME>'\n\nfrom EnvironmentObjects.environment import Environment\nfrom PetObjects.pet import Pet\n\nconsole_options = None\nenvironment = None\nmyPet = None\n\ndef run_option(x):\n if x in console_options:\n console_options[x]()\n else:\n print(\"You didn\\'t enter an option\")\n wait()\n\ndef wait():\n print(\"Waiting\")\n\ndef console_loop():\n pets_status = myPet.get_pets_status()\n\n if pets_status.has_reached_its_lifespan:\n print('Your pet has died!')\n exit()\n\n print('\\n%s\\'s status' % myPet.Name)\n print('----------\\n')\n print(myPet.Age.current_age_string())\n print('Hungry: ' + ('Yes! Feed Me!' if pets_status.is_hungry else 'Not yet'))\n print('Needs the Toilet: ' + ('Bursting!' if pets_status.needs_the_toilet else 'Nope'))\n print('Poop in the Cage: ' + environment.current_poop_level().__str__())\n print('')\n print('What would you like to do:\\n')\n print('1. Feed Pet')\n print('2. Go to Toilet')\n print('3. Clean up the Poop')\n print('4. Wait')\n print('5. Exit')\n\n number_chosen = input('Choose a number (1,2,3 etc): ')\n\n run_option(number_chosen)\n\n# Entry Point\nif __name__ == '__main__':\n environment = Environment()\n myPet = Pet()\n\n print('What would you like to call your pet? ')\n pets_name = input('Name: ')\n\n myPet.hatch(environment, pets_name)\n\n console_options = {\n '1': myPet.HungerLevel.feed_pet,\n '2': myPet.Bladder.go_to_toilet,\n '3': environment.clean_up_poop,\n '4': wait,\n '5': exit\n }\n\n print('Your pet has hatched; welcome it to the new world!')\n\n while(True):\n console_loop()\n\n exit()", "id": "3663871", "language": "Python", "matching_score": 4.396629333496094, "max_stars_count": 0, "path": "pythonpet.py" }, { "content": "__author__ = '<NAME>'\n\nfrom PetObjects.status import PetStatus\nfrom PetObjects.age import Age\nfrom PetObjects.bladder import Bladder\nfrom PetObjects.hunger import HungerLevel\n\nclass Pet:\n Age = None\n HungerLevel = None\n Name = None\n Bladder = None\n\n def __init__(self):\n pass\n\n def hatch(self, environment, name):\n \"\"\"\n Sets the original details for the pet\n \"\"\"\n self.Age = Age()\n self.Bladder = Bladder(environment)\n self.HungerLevel = HungerLevel(self.Bladder)\n self.Name = name\n\n def get_pets_status(self):\n \"\"\"\n Returns an object outlining the status of the pet\n :return: An object showing the status of the pet\n \"\"\"\n return PetStatus(\n self.Age.has_pet_reached_its_lifespan(),\n self.HungerLevel.is_hungry(),\n self.Bladder.needs_the_toilet(),\n self.Name)", "id": "442454", "language": "Python", "matching_score": 3.2087016105651855, "max_stars_count": 0, "path": "PetObjects/pet.py" }, { "content": "\nclass PetStatus:\n has_reached_its_lifespan = None\n is_hungry = None\n needs_the_toilet = None\n name = None\n\n def __init__(self, has_reached_its_lifespan, is_hungry, needs_the_toilet, name):\n self.has_reached_its_lifespan = has_reached_its_lifespan\n self.is_hungry = is_hungry\n self.needs_the_toilet = needs_the_toilet\n self.name = name\n", "id": "1442166", "language": "Python", "matching_score": 0.6785779595375061, "max_stars_count": 0, "path": "PetObjects/status.py" }, { "content": "\nfrom random import randint\nfrom Helpers import datehelper\n\nclass Age:\n __minimum_potential_lifespan_in_seconds = 86400 # 1 day\n __total_potential_lifespan_in_seconds = 31536000 # 1 year\n __birth_date = None\n __lifespan_in_seconds = None\n\n def __init__(self):\n self.__birth_date = datehelper.todays_date()\n\n self.__lifespan_in_seconds = randint(\n self.__minimum_potential_lifespan_in_seconds,\n self.__total_potential_lifespan_in_seconds)\n\n def current_age_in_seconds(self):\n \"\"\"\n Gets the current age of the pet in seconds\n :return: The total age of the pet in seconds\n \"\"\"\n return datehelper.get_total_seconds_difference(datehelper.todays_date(),self.__birth_date)\n\n def current_age_string(self):\n \"\"\"\n Gets the age of the pet in a human readable string\n :return: A human readable form of the pets age\n \"\"\"\n current_age = datehelper.get_difference_as_relative_delta(datehelper.todays_date(), self.__birth_date)\n return \"Your pet is currently %d years %d months %d days %d hours %d minutes old\" % (\n current_age.years,\n current_age.months,\n current_age.days,\n current_age.hours,\n current_age.minutes)\n\n def has_pet_reached_its_lifespan(self):\n \"\"\"\n Returns a value indicating whether the pet has reached it's current lifespan\n :return: True if the pet is dead, false if the pet is still kicking around.\n \"\"\"\n time_of_death = datehelper.add_seconds_to_date(self.__birth_date, self.__lifespan_in_seconds)\n return datehelper.is_date_earlier_than_today(time_of_death)", "id": "8527431", "language": "Python", "matching_score": 2.268113374710083, "max_stars_count": 0, "path": "PetObjects/age.py" }, { "content": "\nfrom datetime import datetime, timedelta\nfrom dateutil.relativedelta import relativedelta\n\ndef add_seconds_to_date(date, seconds):\n return date + timedelta(seconds=seconds)\n\ndef is_date_earlier_than_today(date):\n return date <= todays_date()\n\ndef get_difference_as_relative_delta(latest_date, earliest_date):\n return relativedelta(latest_date, earliest_date)\n\ndef get_total_seconds_difference(latest_date, earliest_date):\n return (latest_date - earliest_date).total_seconds()\n\ndef todays_date():\n return datetime.now()\n", "id": "255999", "language": "Python", "matching_score": 0.14035259187221527, "max_stars_count": 0, "path": "Helpers/datehelper.py" }, { "content": "import logging\nfrom datetime import timedelta\n\n# Testing we can import PIP modules using the `requirements.txt` file\nimport pandas as pd\n\nfrom airflow import DAG\nfrom airflow.operators.python import PythonOperator\nfrom airflow.operators.dummy import DummyOperator\nfrom airflow.utils.dates import days_ago\n\n# Testing we can import a value from a package within the DAGs folder in S3\nfrom dependencies.config import sample_config_value\n\n\ndef _operation():\n logging.info(sample_config_value)\n df = pd.DataFrame(data={\n \"A\": [1, 2],\n \"B\": [3, 4]\n })\n logging.info(df[\"A\"])\n\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': days_ago(2),\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=1)\n}\ndag = DAG(\n 'dag_with_dependencies',\n default_args=default_args,\n description='A dag that depends on internal packages and external modules',\n schedule_interval='@once',\n tags=[\"simple\"]\n)\n\nstart = DummyOperator(\n task_id='start',\n dag=dag\n)\n\ncheck_dependencies = PythonOperator(\n task_id='check_dependencies',\n python_callable=_operation,\n dag=dag\n)\n\nend = DummyOperator(\n task_id='end',\n dag=dag\n)\n\n\nstart >> check_dependencies >> end\n", "id": "11734668", "language": "Python", "matching_score": 5.083563804626465, "max_stars_count": 0, "path": "dags/dag_with_dependencies.py" }, { "content": "import logging\nfrom datetime import timedelta\n\nfrom airflow import DAG\nfrom airflow.operators.python import PythonOperator\nfrom airflow.operators.dummy import DummyOperator\nfrom airflow.utils.dates import days_ago\n\n\ndef _operation():\n logging.info(\"Hello world!\")\n\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': days_ago(2),\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=1)\n}\ndag = DAG(\n 'simple_dag',\n default_args=default_args,\n description='A basic hello world template',\n schedule_interval='@once',\n tags=[\"simple\"]\n)\n\nstart = DummyOperator(\n task_id='start',\n dag=dag\n)\n\nsay_hello = PythonOperator(\n task_id='say_hello',\n python_callable=_operation,\n dag=dag\n)\n\nend = DummyOperator(\n task_id='end',\n dag=dag\n)\n\n\nstart >> say_hello >> end\n", "id": "10509096", "language": "Python", "matching_score": 0.17264191806316376, "max_stars_count": 0, "path": "dags/simple_dag.py" }, { "content": "from Graph import *\nimport Queue\n\n\"\"\"\n Construct the graph with a series of obstacles that can still be navigated around\n \n Source: http://www.redblobgames.com/pathfinding/a-star/introduction.html\n\"\"\"\n\nxSize = 5\nySize = 10\n\nobstacles = [(0,1), (2,4), (2,5), \n\t (2,6), (4,2), (4,3),\n\t (2,0), (2,1), (2,2), \n\t (2,3)]\n\ngraph = Graph(xSize, ySize, obstacles)\n\nhigherCostingNodes = [(3,5), (3,6)]\n\nfor node in higherCostingNodes:\n graph.setCost(node, 5)\n\n\"\"\"\n Breadth first search with Dijkstra's Algorithm\n \n Uses the flood fill mapping we've done previously but \n now calculates the shorted path between the startNode and goalNode\n \n This implementation allows us to assign cost to each node\n so you can define which areas are more or less efficient to walk through\n \n Dijkstra's algorithm is then used to work out the optimal path through the nodes\n\"\"\"\n\nstartNode = (0,2)\ngoalNode = (4,1)\n\nfrontier = Queue.PriorityQueue()\nfrontier.put(startNode, 0)\ncame_from = {}\ncost_so_far = {}\ncame_from[startNode] = None #Python version of \"null\"\ncost_so_far[startNode] = 0\n\n\n\n# Construct a map of all possible paths for the startNode across the map\nwhile not frontier.empty():\n current = frontier.get() # Get instead of peek, dequeues the item\n \n for neighbour in graph.getNeighbours(current):\n new_cost = cost_so_far[current] + graph.getCost(neighbour)\n if neighbour not in cost_so_far or new_cost < cost_so_far[neighbour]:\n cost_so_far[neighbour] = new_cost\n priority = new_cost\n frontier.put(neighbour, priority)\n came_from[neighbour] = current\n\n\n\n# Create the path between the startNode and goalNode\ncurrentNode = goalNode\npath = [currentNode]\nwhile currentNode != startNode:\n currentNode = came_from[currentNode]\n path.append(currentNode)\n\n\n# Output the resulting path graphically to the command line\nresultingGrid = \"\\n\"\n\nfor x in range(xSize):\n for y in range(ySize):\n if (x,y) in obstacles:\n resultingGrid += \" # \"\n elif (x,y) == startNode:\n resultingGrid += \" S \"\n elif (x,y) == goalNode:\n resultingGrid += \" G \"\n elif (x,y) in path:\n resultingGrid += \"---\"\n elif (x,y) in higherCostingNodes:\n resultingGrid += \"...\"\n else:\n resultingGrid += \" . \"\n resultingGrid +=\"\\n\"\n\nprint resultingGrid\n", "id": "10722227", "language": "Python", "matching_score": 5.023602485656738, "max_stars_count": 1, "path": "BreadthFirstWithCost.py" }, { "content": "from Graph import *\nimport Queue\n\n\"\"\"\n Construct the graph with a series of obstacles that can still be navigated around\n \n Source: http://www.redblobgames.com/pathfinding/a-star/introduction.html\n\"\"\"\n\nxSize = 5\nySize = 10\n\nobstacles = [(0,1), (2,4), (2,5), \n\t (2,6), (4,2), (4,3),\n\t (2,0), (2,1), (2,2),\n\t (2,3)]\n\ngraph = Graph(xSize, ySize, obstacles)\n\n\"\"\"\n Breadth first search\n \n Uses the flood fill mapping we've done previously but \n now calculates the shorted path between the startNode and goalNode\n \n This implementation assumes each space has equal cost in movement\n\"\"\"\n\nstartNode = (0,2)\ngoalNode = (4,1)\n\nfrontier = Queue.Queue()\nfrontier.put(startNode)\ncame_from = {}\ncame_from[startNode] = None #Python version of \"null\"\n\n\n\n# Construct a map of all possible paths for the startNode across the map\nwhile not frontier.empty():\n current = frontier.get() # Get instead of peek, dequeues the item\n \n # Early exit, we've found a valid path\n if current == goalNode:\n break\n \n for neighbour in graph.getNeighbours(current):\n if neighbour not in came_from:\n frontier.put(neighbour)\n came_from[neighbour] = current\n\n\n\n# Create the path between the startNode and goalNode\ncurrentNode = goalNode\npath = [currentNode]\nwhile currentNode != startNode:\n currentNode = came_from[currentNode]\n path.append(currentNode)\n\n\n\n# Output the resulting path graphically to the command line\nresultingGrid = \"\\n\"\n\nfor x in range(xSize):\n for y in range(ySize):\n if (x,y) in obstacles:\n resultingGrid += \" # \"\n elif (x,y) == startNode:\n resultingGrid += \" S \"\n elif (x,y) == goalNode:\n resultingGrid += \" G \"\n elif (x,y) in path:\n resultingGrid += \"---\"\n else:\n resultingGrid += \" . \"\n resultingGrid +=\"\\n\"\n\nprint resultingGrid\n", "id": "4623796", "language": "Python", "matching_score": 3.016279935836792, "max_stars_count": 1, "path": "BreadthFirstSimple.py" }, { "content": "\"\"\"\n Data structure for a node:\n __all_nodes[0][0] = X coord\n __all_nodes[0][1] = Y coord\n\"\"\"\n\n\"\"\"\n Constructs a graph of nodes and edges \n including obstacles that cannot be moved through.\n \n Source: http://www.redblobgames.com/pathfinding/grids/graphs.html\n\"\"\"\nclass Graph:\n __all_nodes = []\n __movement_costs = {}\n \n \"\"\"\n Graph with obstacles; movement only permitted around them.\n obstacles is an array of x,y coords, eg: [(0, 1), (0, 3)]\n obstacles is nullable, if you don't provide it no obstacles are placed\n \"\"\"\n def __init__(self, xSize, ySize, obstacles = []):\n self.__constructGrid(xSize, ySize)\n for obstacle in obstacles:\n self.__all_nodes.remove(obstacle)\n \n \"\"\"\n Constructs a grid of nodes using the provided x and y size.\n \"\"\"\n def __constructGrid(self, xSize, ySize):\n for x in range(xSize):\n for y in range(ySize):\n\tself.__all_nodes.append((x, y))\n\tself.__movement_costs[(x, y)] = 1 # Default all costs to 1\n \n \"\"\"\n Finds out all the edges for each node based on whether they can be moved into\n \"\"\"\n def getNeighbours(self, node):\n # I can move up, down, left or right, but not diagonally\n directions = [(1 ,0), (0, 1), (-1, 0), (0, -1)]\n neighbours = []\n for direction in directions:\n neighbour = (node[0] + direction[0], node[1] + direction[1])\n if neighbour in self.__all_nodes:\n\tneighbours.append(neighbour)\n return neighbours\n \n \"\"\"\n Gets the current cost of the node, the higher the cost the less efficient \n it is to move through that node \n \"\"\"\n def getCost(self, node):\n return self.__movement_costs[node]\n \n \"\"\"\n Sets the current cost of the node, the higher the cost the less efficient \n it is to move through that node\n \"\"\"\n def setCost(self, node, newCost):\n self.__movement_costs[node] = newCost\n \n \"\"\"\n Defines how close 2 nodes are together\n \"\"\"\n def heuristic(self, a, b):\n # Manhattan distance on a square grid\n return abs(a[0] - b[0]) + abs(a[1] - b[1])\n\n\n\n", "id": "6829437", "language": "Python", "matching_score": 2.19368052482605, "max_stars_count": 1, "path": "Graph.py" }, { "content": "from Graph import *\nimport Queue\n\n\"\"\"\n Construct the graph with a series of obstacles\n that can still be navigated around\n \n Source: http://www.redblobgames.com/pathfinding/a-star/introduction.html\n\"\"\"\nobstacles = [(0,1), (2,4), (2,5), (2,6), (2,7)]\n\ngraph = Graph(20, 10, obstacles)\n\n\"\"\"\n Flood fill\n \n Navigates every node in the graph using it's neighbours\n until it has mapped the whole graph excluding any obstacles\n\"\"\"\n\nstartNode = (0,2)\n\nfrontier = Queue.Queue()\nfrontier.put(startNode)\nvisited = {}\nvisited[startNode] = True\n\nneighbours = graph.getNeighbours(startNode)\n\nwhile not frontier.empty():\n current = frontier.get() # Get instead of peek, dequeues the item\n for neighbour in graph.getNeighbours(current):\n if neighbour not in visited:\n frontier.put(neighbour)\n visited[neighbour] = True", "id": "1642139", "language": "Python", "matching_score": 0.3641953468322754, "max_stars_count": 1, "path": "FloodFill.py" }, { "content": "\nimport json\nfrom urllib import request\nfrom builders import urlbuilder\n\nclass SteamApiClient:\n def get_player_summary(self):\n return self.__get_json_from_url(urlbuilder.get_player_summary())\n\n def get_player_owned_games(self):\n response = self.__get_json_from_url(urlbuilder.get_player_owned_games())\n return response['response']['games']\n\n @staticmethod\n def __get_json_from_url(url):\n data = request.urlopen(url)\n return json.loads(data.readall().decode('utf-8'))\n", "id": "6503508", "language": "Python", "matching_score": 2.44797682762146, "max_stars_count": 2, "path": "clients/steamapi.py" }, { "content": "__author__ = '<NAME>'\n\nimport settings\nimport unittest\nfrom clients.steamapi import SteamApiClient\n\nclass SteamAPIClientTests(unittest.TestCase):\n def setUp(self):\n self.client = SteamApiClient()\n\n def test_get_player_summary_returns_json_dictionary(self):\n json = self.client.get_player_summary()\n\n self.assertTrue(isinstance(json, dict))\n\n player = json['response']['players'][0]\n\n self.assertEqual(settings.steam_user_id, player['steamid'])\n\n def test_get_player_owned_games_returns_json_dictionary(self):\n games = self.client.get_player_owned_games()\n self.assertTrue(isinstance(games, list))\n self.assertTrue(isinstance(games[0], dict))\n", "id": "2931787", "language": "Python", "matching_score": 3.1464107036590576, "max_stars_count": 2, "path": "unittests/steamapiclientests.py" }, { "content": "__author__ = '<NAME>'\n\nimport settings\nimport unittest\nfrom builders import urlbuilder\n\nclass TestUrlBuilder(unittest.TestCase):\n def test_get_player_summary_returns_url(self):\n url = urlbuilder.get_player_summary()\n\n self.assertTrue(settings.steam_api_url in url)\n self.assertTrue('ISteamUser' in url)\n self.assertTrue('GetPlayerSummaries' in url)\n self.assertTrue('v0002' in url)\n self.assertTrue('key' in url)\n self.assertTrue(settings.steam_webapi_key in url)\n self.assertTrue('steamids' in url)\n self.assertTrue(settings.steam_user_id in url)\n\n def test_get_player_owned_games_returns_url(self):\n url = urlbuilder.get_player_owned_games()\n\n self.assertTrue(settings.steam_api_url in url)\n self.assertTrue('IPlayerService' in url)\n self.assertTrue('GetOwnedGames' in url)\n self.assertTrue('v0001' in url)\n self.assertTrue('key' in url)\n self.assertTrue(settings.steam_webapi_key in url)\n self.assertTrue('steamid' in url)\n self.assertTrue(settings.steam_user_id in url)\n self.assertTrue('include_appinfo' in url)\n self.assertTrue('include_played_free_games' in url)\n", "id": "6452580", "language": "Python", "matching_score": 4.417666435241699, "max_stars_count": 2, "path": "unittests/urlbuildertests.py" }, { "content": "__author__ = '<NAME>'\n\nimport settings\n\nvalid_interfaces = {\n 'ISteamUser': [\n 'GetPlayerSummaries'\n ],\n 'IPlayerService': [\n 'GetOwnedGames'\n ]\n}\n\ndef get_player_summary():\n return __get_url('ISteamUser', 'GetPlayerSummaries', 'v0002', {\n 'steamids': settings.steam_user_id\n })\n\ndef get_player_owned_games():\n return __get_url('IPlayerService', 'GetOwnedGames', 'v0001', {\n 'steamid': settings.steam_user_id,\n 'include_appinfo':'1',\n 'include_played_free_games':'1'\n })\n\ndef __get_url(interfaceName, methodName, version, parameters):\n if interfaceName not in valid_interfaces:\n raise Exception('The interfaceName provided is not supported')\n\n valid_methods = valid_interfaces[interfaceName]\n\n if methodName not in valid_methods:\n raise Exception('The methodName provided is not supported for this interfaceName')\n\n if not isinstance(parameters, dict):\n raise Exception('parameters must be a dictionary')\n\n url = settings.steam_api_url + interfaceName + '/' + methodName + '/' + version\n url += '?key=' + settings.steam_webapi_key + '&format=' + settings.response_format\n\n for key, value in parameters.items():\n url += '&' + key + '=' + value\n\n return url", "id": "10640926", "language": "Python", "matching_score": 2.410289764404297, "max_stars_count": 2, "path": "builders/urlbuilder.py" }, { "content": "__author__ = '<NAME>'\n\nsteam_webapi_key = 'Get your key here http://steamcommunity.com/dev'\nregistered_domain = 'The domain used to register above'\nsteam_user_id = 'Enter your user id here'\nsteam_user_folder = 'Enter your user folder here, eg C:\\\\Program Files (x86)\\\\Steam\\\\userdata\\\\22222222'\n\n# Maybe you brought some games you'll never play (how could you)\n# Add them in your local settings to exclude from the random game chooser\nignored_games = ['Game name to ignore']\n\n# How about games you've already completed?\n# These should go here to exclude them from the random game chooser\ncompleted_games = ['Completed game name']\n\n# API settings\n# Full template: http://api.steampowered.com/<interface name>/<method name>/v<version>/?key=<api key>&format=<format>.\nsteam_api_url = 'http://api.steampowered.com/'\nresponse_format = 'json'\n\n# Database settings\nsqlite_database_name = 'SteamProgress.db'\n\n# Charting settings\ntarget_chart_export_filename = 'stored_playtime.svg'\n\n# Override with your local settings, stops them being committed to GitHub\ntry:\n from local_settings import *\nexcept ImportError:\n pass\n\n# Do not modify, this just simplifies the code later on.\nexcluded_games = []\nexcluded_games.extend(ignored_games)\nexcluded_games.extend(completed_games)\n", "id": "8326678", "language": "Python", "matching_score": 1.7659657001495361, "max_stars_count": 2, "path": "settings.py" }, { "content": "import os\nimport settings\n\n__installed_games = os.listdir(settings.steam_user_folder)\n\ndef is_game_in_installed_games_list(game):\n return str(game['appid']) in __installed_games\n", "id": "1937141", "language": "Python", "matching_score": 0.5827558636665344, "max_stars_count": 2, "path": "clients/installed_games.py" }, { "content": "\nimport argparse\nfrom analytics import playtime\n\ndef get_cmd_arguments():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '-np', '--never-played',\n dest='never_played',\n action='store_true',\n help='Only returns games you\\'ve never played'\n )\n\n parser.add_argument(\n '-i', '--installed',\n dest='installed',\n action='store_true',\n help='Only returns games that are currently installed'\n )\n\n return parser.parse_args()\n\nif __name__ == '__main__':\n args = get_cmd_arguments()\n\n game_found, game_to_play = playtime.choose_a_random_game_to_play(\n args.never_played,\n args.installed\n )\n\n if not game_found:\n print('Couldn\\'t find a game to play!')\n else:\n print('You should totally play: \"' + game_to_play + '\"')\n input()\n", "id": "8078362", "language": "Python", "matching_score": 2.6230006217956543, "max_stars_count": 2, "path": "random_game_chooser.py" }, { "content": "__author__ = '<NAME>'\n\nimport unittest\nfrom analytics import playtime\n\nclass PlaytimeTests(unittest.TestCase):\n def test_get_total_playtime_for_last_two_weeks_returns_playtime(self):\n playtime_in_minutes = playtime.get_total_playtime_for_last_two_weeks()\n self.assertTrue(playtime_in_minutes > 0)\n\n def test_get_total_playtime_ever_returns_playtime(self):\n playtime_in_minutes = playtime.get_total_playtime_ever()\n self.assertTrue(playtime_in_minutes > 0)\n\n def test_choose_a_random_game_to_play_returns_a_game(self):\n game_found, game_to_play = playtime.choose_a_random_game_to_play(False, False)\n if game_found:\n self.assertTrue(isinstance(game_to_play, str))\n else:\n self.assertIsNone(game_to_play)\n\n def test_can_retrieve_a_game_thats_never_been_played(self):\n game_found, game_to_play = playtime.choose_a_random_game_to_play(True, False)\n if game_found:\n self.assertTrue(isinstance(game_to_play, str))\n else:\n self.assertIsNone(game_to_play)\n\n def test_can_retrieve_an_installed_game(self):\n game_found, game_to_play = playtime.choose_a_random_game_to_play(False, True)\n if game_found:\n self.assertTrue(isinstance(game_to_play, str))\n else:\n self.assertIsNone(game_to_play)\n\n def test_can_retrieve_an_installed_game_thats_never_been_played(self):\n game_found, game_to_play = playtime.choose_a_random_game_to_play(True, True)\n if game_found:\n self.assertTrue(isinstance(game_to_play, str))\n else:\n self.assertIsNone(game_to_play)\n", "id": "9746604", "language": "Python", "matching_score": 2.9203314781188965, "max_stars_count": 2, "path": "unittests/playtimetests.py" }, { "content": "__author__ = '<NAME>'\n\nimport random\nimport settings\nfrom clients.steamapi import SteamApiClient\nfrom clients import installed_games\n\ndef get_total_playtime_for_last_two_weeks():\n \"\"\"\n Calculates the total time in minutes played in the last two weeks\n :return: The total playtime in minutes for the last two weeks\n \"\"\"\n games = SteamApiClient().get_player_owned_games()\n return __add_numeric_key_values(games, 'playtime_2weeks')\n\ndef get_total_playtime_ever():\n \"\"\"\n Calculates the total time in minutes played ever\n :return: The total playtime in minutes ever\n \"\"\"\n games = SteamApiClient().get_player_owned_games()\n return __add_numeric_key_values(games, 'playtime_forever')\n\ndef choose_a_random_game_to_play(choose_never_played, choose_installed):\n \"\"\"\n Gets a single random games from the list of games\n that you should really play now.\n :return: A game to play\n \"\"\"\n games = SteamApiClient().get_player_owned_games()\n\n if choose_never_played:\n games_never_played = []\n for game in games:\n if 'playtime_forever' in game:\n if game['playtime_forever'] == 0:\n games_never_played.append(game)\n games = games_never_played\n\n if choose_installed:\n games_installed = []\n for game in games:\n if installed_games.is_game_in_installed_games_list(game):\n games_installed.append(game)\n games = games_installed\n\n unexcluded_games = []\n for game in games:\n if game['name'] not in settings.excluded_games:\n unexcluded_games.append(game)\n games = unexcluded_games\n\n if games.__len__() == 0:\n return False, None\n\n random_int = random.randrange(games.__len__())\n return True, games[random_int]['name']\n\ndef __add_numeric_key_values(games, key_to_add):\n total = 0\n\n for game in games:\n if key_to_add in game:\n total += game[key_to_add]\n\n return total\n", "id": "8005503", "language": "Python", "matching_score": 3.352858304977417, "max_stars_count": 2, "path": "analytics/playtime.py" }, { "content": "from clients import installed_games, steamapi\n\nif __name__ == '__main__':\n api = steamapi.SteamApiClient()\n games = api.get_player_owned_games()\n for game in games:\n game['installed'] = installed_games.is_game_in_installed_games_list(game)\n\n for game in sorted(games, key=lambda game: game['name']):\n installed = 'Yes' if game['installed'] else 'No'\n print (game['name'])\n print(' Installed: ' + installed)\n print(' Playtime in Minutes: ' + str(game['playtime_forever']))\n print('')\n", "id": "4532959", "language": "Python", "matching_score": 1.5796722173690796, "max_stars_count": 2, "path": "list_all_games.py" }, { "content": "\nimport settings\nimport datetime\nimport pygal\nimport webbrowser\nfrom peewee import *\nfrom clients.steamapi import SteamApiClient\nfrom collections import defaultdict, OrderedDict\nfrom datetime import date\nfrom dateutil.relativedelta import relativedelta\n\ndb = SqliteDatabase(settings.sqlite_database_name, threadlocals=True)\n\nclass BaseModel(Model):\n class Meta:\n database = db\n\nclass PlaytimeInLast2Weeks(BaseModel):\n \"\"\"\n Table holding details on games and their playtime over the last 2 weeks\n \"\"\"\n game_name = TextField()\n playtime_in_minutes = IntegerField()\n date_captured = DateField(default=datetime.date.today())\n\nclass PlaytimeOperations:\n \"\"\"\n Gets all the playtime for the last 2 weeks and stores it in Sqlite\n \"\"\"\n def __init__(self):\n db.connect()\n db.create_table(PlaytimeInLast2Weeks, True)\n\n def get_and_store_playtime(self):\n records_for_today = PlaytimeInLast2Weeks.select().where(\n PlaytimeInLast2Weeks.date_captured == datetime.date.today()\n )\n\n if records_for_today.count() > 0:\n print('Playtime already captured for today, cancelling save operation')\n return\n\n games = SteamApiClient().get_player_owned_games()\n games_played = []\n\n for game in games:\n if 'playtime_2weeks' in game:\n games_played.append(game)\n\n for game in games_played:\n print('Saving playtime for ' + game['name'])\n record = PlaytimeInLast2Weeks.create(game_name=game['name'], playtime_in_minutes=game['playtime_2weeks'])\n record.save()\n\n def print_all_stored_playtime(self):\n playtime = PlaytimeInLast2Weeks.select().order_by(PlaytimeInLast2Weeks.date_captured.desc())\n\n print('\\nGame, Date Captured, Playtime in minutes')\n print('----------------------------------------')\n for time in playtime:\n output = ', '.join([time.game_name, str(time.date_captured), str(time.playtime_in_minutes)])\n print(output)\n\n def chart_stored_playtime(self):\n four_months_ago = date.today() + relativedelta(months=-4)\n\n playtime = (\n PlaytimeInLast2Weeks\n .select()\n .where(PlaytimeInLast2Weeks.date_captured >= four_months_ago)\n .order_by(PlaytimeInLast2Weeks.date_captured.desc())\n )\n\n bar_chart = pygal.HorizontalBar(title=\"Playtime in minutes over the last 2 weeks by date captured\")\n\n # Group by date captured\n time_by_date_captured = defaultdict(list)\n for time in playtime:\n if not str(time.date_captured) in time_by_date_captured.keys():\n time_by_date_captured[str(time.date_captured)] = time.playtime_in_minutes\n else:\n time_by_date_captured[str(time.date_captured)] += time.playtime_in_minutes\n\n time_by_date_captured = OrderedDict(sorted(time_by_date_captured.items(), key=lambda t: t[0], reverse=True))\n\n bar_chart.x_labels = time_by_date_captured.keys()\n\n bar_chart.add(\"Playtime\", time_by_date_captured.values())\n\n bar_chart.render_to_file(settings.target_chart_export_filename)\n\n webbrowser.open_new(settings.target_chart_export_filename)\n", "id": "153953", "language": "Python", "matching_score": 2.866377115249634, "max_stars_count": 2, "path": "database/playtime_operations.py" }, { "content": "\nfrom database.playtime_operations import PlaytimeOperations\n\nif __name__ == '__main__':\n s = PlaytimeOperations()\n\n s.chart_stored_playtime()\n", "id": "2385005", "language": "Python", "matching_score": 0, "max_stars_count": 2, "path": "graph_playtime.py" }, { "content": "\nfrom setuptools import setup, find_packages\n\nsetup(\n name=\"SteamProgress\",\n version=\"0.1\",\n packages=find_packages(),\n install_requires=['peewee', 'pygal', 'Pillow'],\n author=\"<NAME>\",\n author_email=\"<EMAIL>\",\n url='https://github.com/lukemerrett/SteamProgress',\n license=\"MIT\",\n)\n", "id": "3031179", "language": "Python", "matching_score": 2.8319337368011475, "max_stars_count": 2, "path": "setup.py" }, { "content": "from setuptools import setup, find_packages\n\nsetup(\n name=\"workflowy.automation\",\n packages=find_packages(),\n author=\"<NAME>\",\n description=\"Scripts for automating Workflowy tasks using Selenium\",\n license=\"MIT\",\n url=\"https://github.com/lukemerrett/Workflowy-Automation\",\n install_requires=['selenium']\n)\n", "id": "1521935", "language": "Python", "matching_score": 0.5181840658187866, "max_stars_count": 9, "path": "setup.py" }, { "content": "import time\nimport settings\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\nclass WorkflowyScheduler(object):\n workflowy_url = \"https://workflowy.com\"\n browser = webdriver.Firefox()\n\n @classmethod\n def schedule_items_for_today(cls):\n todays_date_tag = cls.__get_todays_date_tag()\n\n cls.browser.get(cls.workflowy_url)\n\n cls.__login()\n cls.__search(todays_date_tag)\n cls.__mark_results_with_tag(\"#Focus\")\n cls.__save_changes()\n\n cls.browser.close()\n\n @classmethod\n def __login(cls):\n cls.__click_button(\"div.header-bar a.button--top-right\")\n cls.__wait_for_element_to_appear(\"#id_username\")\n cls.__fill_text_box(\"#id_username\", settings.workflowy_username)\n cls.__fill_text_box(\"#id_password\", settings.workflowy_password)\n cls.__click_button(\"input.button--submit\")\n\n @classmethod\n def __search(cls, search_term: str):\n cls.__wait_for_element_to_appear(\"#searchBox\")\n cls.__fill_text_box(\"#searchBox\", search_term)\n\n @classmethod\n def __mark_results_with_tag(cls, tag: str):\n for element in cls.browser.find_elements_by_css_selector(\"div.name.matches\"):\n text = element.text\n if tag not in text:\n text_box = element.find_element_by_css_selector(\"div.content\")\n text_box.click()\n text_box.send_keys(Keys.END)\n text_box.send_keys(\" \" + tag)\n\n @classmethod\n def __save_changes(cls):\n cls.browser.find_element_by_css_selector(\"div.saveButton\").click()\n cls.__wait_for_element_to_appear(\"div.saveButton.saved\")\n\n @classmethod\n def __click_button(cls, css_selector: str):\n cls.browser.find_element_by_css_selector(css_selector).click()\n\n @classmethod\n def __wait_for_element_to_appear(cls, css_selector):\n WebDriverWait(cls.browser, 10).until(lambda driver: driver.find_element_by_css_selector(css_selector))\n\n @classmethod\n def __fill_text_box(cls, css_selector: str, text_to_input: str):\n cls.browser.find_element_by_css_selector(css_selector).send_keys(text_to_input)\n\n @classmethod\n def __get_todays_date_tag(cls) -> str:\n return \"#%s\" % time.strftime(\"%Y-%m-%d\")\n\n\nif __name__ == \"__main__\":\n WorkflowyScheduler.schedule_items_for_today()\n", "id": "276040", "language": "Python", "matching_score": 2.2374281883239746, "max_stars_count": 9, "path": "flag_due_workflowy_items.py" }, { "content": "# Fill these in using your own local_settings.py file in the same directory\nworkflowy_username = \"\"\nworkflowy_password = \"\"\n\ntry:\n from local_settings import *\nexcept ImportError:\n pass\n", "id": "1369150", "language": "Python", "matching_score": 1.0679315328598022, "max_stars_count": 9, "path": "settings.py" } ]
1.667952
narmin24
[ { "content": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport tweepy\nimport pandas as pd\nimport numpy as np\nimport webbrowser\nimport time\nfrom tweepy import OAuthHandler\nimport json\nimport csv\nimport re\nimport string\nimport os\n\n\n# In[ ]:\n\n\nkey = \"<KEY>\"\nsecret = \"<KEY>\"\ncallback_url = \"oob\"\nauth = tweepy.OAuthHandler(key, secret, callback_url)\nredirect_url = auth.get_authorization_url()\nwebbrowser.open(redirect_url)\npin_input = input(\"Enter Pin Value : \")\nauth.get_access_token(pin_input)\n\n\n# In[ ]:\n\n\napi = tweepy.API(auth)\n\n\n# In[ ]:\n\n\nlonely_list = 'need help OR lonely OR alone OR feeling lonely OR love me OR dead inside OR i want to die OR #Ineedtotalk OR i need OR all alone'\nanxiety_list = \"I just can’t OR I’m fine OR Overthinking OR I tried OR I'm okay OR Help me OR I'm fine OR I need OR Left out OR Worry OR Nervous\"\nstress_list = \"very hard OR incredibly OR stressed OR sad OR tired OR It's not easy being OR tension OR selfcare OR insomnia OR trauma OR awake\"\n\n\n# In[ ]:\n\n\nlonely_tweets = pd.DataFrame(columns = ['username', 'acctdesc', 'location', 'usercreatedts', 'tweetcreatedts',\n 'retweetcount', 'text', 'hashtags'])\nanxiety_tweets = pd.DataFrame(columns = ['username', 'acctdesc', 'location', 'usercreatedts', 'tweetcreatedts',\n 'retweetcount', 'text', 'hashtags'])\nstress_tweets = pd.DataFrame(columns = ['username', 'acctdesc', 'location', 'usercreatedts', 'tweetcreatedts',\n 'retweetcount', 'text', 'hashtags'])\n\n\n# In[ ]:\n\n\ndef scraptweets(search_words, numTweets, numRuns, db_tweets):\n \n program_start = time.time()\n for i in range(0, numRuns):\n start_run = time.time()\n \n tweets = tweepy.Cursor(api.search_30_day,environment_name='tweets30days',q=search_words).items(numTweets)\n \n tweet_list = [tweet for tweet in tweets if tweet.lang=='en'] \n noTweets = 0\n \n \n for tweet in tweet_list:\n username = tweet.user.screen_name\n acctdesc = tweet.user.description\n location = tweet.user.location\n usercreatedts = tweet.user.created_at\n tweetcreatedts = tweet.created_at\n retweetcount = tweet.retweet_count\n hashtags = tweet.entities['hashtags']\n try:\n text = tweet.retweeted_status.full_text\n except AttributeError: # Not a Retweet\n #text = tweet.full_text\n if tweet.truncated:\n text = tweet.extended_tweet['full_text']\n else:\n text = tweet.text\n \n ith_tweet = [username, acctdesc, location,\n usercreatedts, tweetcreatedts, retweetcount, text, hashtags]\n\n db_tweets.loc[len(db_tweets)] = ith_tweet \n noTweets += 1\n \n \n end_run = time.time()\n duration_run = round((end_run-start_run)/60, 2)\n \n print('no. of tweets scraped for run {} is {}'.format(i + 1, noTweets))\n print('time take for {} run to complete is {} mins'.format(i+1, duration_run))\n \n time.sleep(5) #15 minute sleep time\n\n \n program_end = time.time()\n print('Scraping has completed!')\n print('Total time taken to scrap is {} minutes.'.format(round(program_end - program_start)/60, 2))\n\n\n# In[ ]:\n\n\nnumTweets = 2500\nnumRuns = 1\n\n\n# In[ ]:\n\n\nscraptweets(lonely_list, numTweets, numRuns, lonely_tweets)\n\n\n# In[ ]:\n\n\nscraptweets(anxiety_list, numTweets, numRuns, anxiety_tweets)\n\n\n# In[ ]:\n\n\nscraptweets(stress_list, numTweets, numRuns, stress_tweets)\n\n\n# In[ ]:\n\n\nlonely_tweets['text'] = lonely_tweets['text'].str.replace(r'[^\\x00-\\x7F]+', '', regex=True)\n\n\n# In[ ]:\n\n\nanxiety_tweets['text'] = anxiety_tweets['text'].str.replace(r'[^\\x00-\\x7F]+', '', regex=True)\n\n\n# In[ ]:\n\n\nstress_tweets['text'] = stress_tweets['text'].str.replace(r'[^\\x00-\\x7F]+', '', regex=True)\n\n\n# In[ ]:\n\n\nlonely_tweets.to_csv('lonely_tweets.csv')\nanxiety_tweets.to_csv('anxiety_tweets.csv')\nstress_tweets.to_csv('stress_tweets.csv')\n\n\n# In[ ]:\n\n\nnormal_list = '-stress OR -lonely OR -anxious OR -alone OR -sad OR -tension OR -help OR -die OR -miss OR -need'\n\n\n# In[ ]:\n\n\nnormal_tweets = pd.DataFrame(columns = ['username', 'acctdesc', 'location', 'usercreatedts', 'tweetcreatedts',\n 'retweetcount', 'text', 'hashtags'])\n\n\n# In[ ]:\n\n\ndef scraprecenttweets(search_words, numTweets, numRuns, db_tweets):\n \n program_start = time.time()\n for i in range(0, numRuns):\n start_run = time.time()\n \n tweets = tweepy.Cursor(api.search,q=search_words,tweet_mode = 'extended',lang='en').items(numTweets)\n \n tweet_list = [tweet for tweet in tweets] \n noTweets = 0\n \n \n for tweet in tweet_list:\n username = tweet.user.screen_name\n acctdesc = tweet.user.description\n location = tweet.user.location\n usercreatedts = tweet.user.created_at\n tweetcreatedts = tweet.created_at\n retweetcount = tweet.retweet_count\n hashtags = tweet.entities['hashtags']\n try:\n text = tweet.retweeted_status.full_text\n except AttributeError: # Not a Retweet\n text = tweet.full_text\n #if tweet.truncated:\n # text = tweet.extended_tweet['full_text']\n #else:\n # text = tweet.text\n \n ith_tweet = [username, acctdesc, location,\n usercreatedts, tweetcreatedts, retweetcount, text, hashtags]\n\n db_tweets.loc[len(db_tweets)] = ith_tweet \n noTweets += 1\n \n \n end_run = time.time()\n duration_run = round((end_run-start_run)/60, 2)\n \n print('no. of tweets scraped for run {} is {}'.format(i + 1, noTweets))\n print('time take for {} run to complete is {} mins'.format(i+1, duration_run))\n \n time.sleep(5) #15 minute sleep time\n\n \n program_end = time.time()\n print('Scraping has completed!')\n print('Total time taken to scrap is {} minutes.'.format(round(program_end - program_start)/60, 2))\n\n\n# In[ ]:\n\n\nnumTweets_1 = 2000\nnumRuns_1 = 1\n\n\n# In[ ]:\n\n\nscraprecenttweets(normal_list, numTweets_1, numRuns_1, normal_tweets)\n\n\n# In[ ]:\n\n\nnormal_tweets.to_csv('normal_tweets.csv')\n\n\n# In[ ]:\n\n\nlonely_tweets.to_csv('lonely_tweets_2.csv')\nanxiety_tweets.to_csv('anxiety_tweets_2.csv')\nstress_tweets.to_csv('stress_tweets_2.csv')\n\n\n# In[ ]:\n\n\n\n\n", "id": "11856796", "language": "Python", "matching_score": 2.554206371307373, "max_stars_count": 4, "path": "Extracting Targeted Tweets.py" }, { "content": "#!/usr/bin/env python\n# coding: utf-8\n\n# ## Streamlit Web Deployment\n# Run this file as .ipynb notebook, cells division is specified\n\n# In[ ]:\n\n!pip install -q tf-models-official==2.3.0\n!pip install streamlit\n!pip install pyngrok\n\n\n# In[ ]:\n\n\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\n\n# In[ ]:\n\n\n%%writefile utilss.py\nimport nltk\nnltk.download('punkt')\nnltk.download('stopwords')\nnltk.download('averaged_perceptron_tagger')\nnltk.download('wordnet')\nimport re\nimport string\nimport random\nfrom nltk.tokenize import WordPunctTokenizer\nfrom nltk.tag import pos_tag\nfrom nltk.stem.wordnet import WordNetLemmatizer \nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nimport tweepy\nconsumerKey = \"VEyxpXLGHG9USYhM7spHVKl36\"\nconsumerSecret = \"<KEY>\"\naccessToken = \"<KEY>\"\naccessTokenSecret = \"<KEY>\"\n\nauthenticate = tweepy.OAuthHandler(consumerKey, consumerSecret) \nauthenticate.set_access_token(accessToken, accessTokenSecret) \napi = tweepy.API(authenticate, wait_on_rate_limit = True)\nfrom official.modeling import tf_utils\nfrom official import nlp\nfrom official.nlp import bert\n\n# Load the required submodules\nimport official.nlp.bert.bert_models\nimport official.nlp.bert.configs\nimport official.nlp.bert.tokenization as tokenization\nimport PIL\nimport pandas as pd\nimport numpy as np\nimport io\nimport tensorflow_hub as hub\n\nfrom keras.layers import Input, Dropout, Dense, Activation\nfrom tensorflow.keras.utils import to_categorical\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential\nmodule_url = 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2'\nbert_layer = hub.KerasLayer(module_url, trainable=True)\n\nvocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()\ndo_lower_case = bert_layer.resolved_object.do_lower_case.numpy()\ntokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport streamlit as st\n\ndef bert_encode(texts, tokenizer, max_len=512):\n all_tokens = []\n all_masks = []\n all_segments = []\n \n for text in texts:\n text = tokenizer.tokenize(text)\n \n text = text[:max_len-2]\n input_sequence = [\"[CLS]\"] + text + [\"[SEP]\"]\n pad_len = max_len - len(input_sequence)\n \n tokens = tokenizer.convert_tokens_to_ids(input_sequence) + [0] * pad_len\n pad_masks = [1] * len(input_sequence) + [0] * pad_len\n segment_ids = [0] * max_len\n \n all_tokens.append(tokens)\n all_masks.append(pad_masks)\n all_segments.append(segment_ids)\n \n return np.array(all_tokens), np.array(all_masks), np.array(all_segments)\n\nmodel_lonely = keras.models.load_model('/content/drive/MyDrive/Utrack_Models/Utrack_Lonely')\n\ndef Show_Recent_Tweets(raw_text):\n posts = api.user_timeline(screen_name=raw_text, count = 100, lang =\"en\", tweet_mode=\"extended\") \n def get_tweets():\n column_names = ['tweet', 'time']\n user = pd.DataFrame(columns =column_names)\n \n tweet_time = []\n tweet_text = []\n for info in posts[:100]:\n tweet_time.append(info.created_at)\n tweet_text.append(info.full_text)\n \n user['time'] = tweet_time\n user['tweet'] = tweet_text\n \n return user\n \n recent_tweets=get_tweets() \n return recent_tweets\n \ndef tokenize_tweets(clown) :\n tweets = clown.tweet.tolist()\n tokenizer = WordPunctTokenizer() \n cleaned = []\n for i in range(0, len(tweets)):\n text = tweets[i]\n text = re.sub('^https?://.*[rn]*','', text)\n text = re.sub(r'\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*', '', text)\n text = re.sub(\"(@[A-Za-z0-9_]+)\",\"\", text)\n text = re.sub(\"([^\\w\\s])\", \"\", text)\n text = re.sub(\"^RT\", \"\", text)\n text = tokenizer.tokenize(text)\n element = [text]\n cleaned.append(element)\n return cleaned\n\ndef lemmatize_sentence(tweet_tokens, stop_words = ()):\n lemmatizer = WordNetLemmatizer()\n cleaned_tokens = []\n for token, tag in pos_tag(tweet_tokens):\n if tag.startswith('NN'):\n pos = 'n'\n elif tag.startswith('V'):\n pos = 'v'\n else:\n pos = 'a'\n token = lemmatizer.lemmatize(token, pos)\n if len(token) > 0 and token not in string.punctuation and token.lower() not in stop_words:\n cleaned_tokens.append(token.lower())\n return cleaned_tokens\n\ndef create_lemmatized_sent(words):\n cleaned = []\n stop_words = stopwords.words('english')\n for i in range(0, len(words)):\n sent = lemmatize_sentence(words[i][0], stop_words)\n if len(sent) >= 0:\n element = [sent]\n cleaned.append(element)\n return cleaned\n\ndef remove_emoji(string):\n emoji_pattern = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n u\"\\U00002702-\\U000027B0\"\n u\"\\U000024C2-\\U0001F251\"\n \"]+\", flags=re.UNICODE)\n return emoji_pattern.sub(r'', string)\n\ndef write_sent(clown, sent):\n cleaned = []\n for i in sent:\n s = \"\"\n for j in i[0]:\n j = str(j)\n j = j + \" \"\n s = s + j\n s = remove_emoji(s)\n element = [s]\n cleaned.append(element)\n df = pd.DataFrame(cleaned,columns = ['text'])\n df1 = clown\n df1 = df1['time']\n big = pd.concat([df, df1], axis = 1)\n return big\n \ndef import_and_predict(df, model):\n max_len = 150\n \n test_input = bert_encode(df[\"text\"].values, tokenizer, max_len=max_len)\n prediction = model.predict(test_input)\n\n return prediction\n\n\ndef output_dataframe(df, prediction):\n df2 = pd.concat([df, prediction], axis = 1)\n return df2\n\ndef visualisation(n, file):\n #constructing data\n df = file\n jscolumn = df.predictions\n\n df['final'] = jscolumn\n df['perc'] = 100*(df.final)\n new_df = df.drop(columns = ['predictions','final'])\n plt.figure(figsize=(40,15))\n n = int(input())\n temp_df = new_df[:n]\n final_df = temp_df.iloc[::-1]\n sns.lineplot(x='time', y='perc', data=final_df, linewidth=7, color = 'red')\n plt.title(\"Mental State vs Date\", fontsize= 40,fontweight='bold')\n sns.set_style('white')\n\n plt.xlabel('Month',fontsize=30,fontweight='bold')\n plt.xticks(fontsize=20,rotation=90)\n plt.ylabel('Percentage',fontsize=30,fontweight='bold')\n plt.yticks(fontsize=25)\n plt.grid(axis='y', alpha=0.5)\n\n st.pyplot()\n\n monthdict = {\"January\":1, \"February\":2, \"March\":3, \"April\":4, \"May\":5, \"June\":6, \"July\":7, \"August\":8,\n \"September\":9, \"October\":10, \"November\":11, \"December\":12}\n values= []\n for month in df.months.unique():\n dftempo = df[pd.to_datetime(df['time']).dt.month == monthdict[month]]\n values.append(jsmean(dftempo.final))\n \n plt.figure(figsize=(15,10))\n x= df.months.unique()\n height = 100*np.array(values)\n plt.bar(x, height, width=0.5, bottom=None, align='center', color=['#78C850', # Grass\n '#f20a53', # Fire\n '#6890F0', # Water\n '#A8B820', # Bug\n '#A8A878', # Normal\n '#A040A0', # Poison\n '#F8D030', # Electric\n '#E0C068', # Ground\n '#EE99AC', # Fairy\n '#C03028', # Fighting\n '#6cf5d3', \n '#561191'\n ])\n\n\n sns.set_style('white')\n\n\n plt.xlabel('Month',fontsize=15,fontweight='bold')\n plt.xticks(fontsize=15,rotation=45)\n plt.ylabel('Percentage',fontsize=15,fontweight='bold')\n plt.yticks(fontsize=15)\n plt.grid(axis='y', alpha=0.5)\n plt.title('Average Percentage across months', fontsize=20)\n\n st.pyplot()\n\n df['months'] = df['time'].dt.month_name()\n plt.figure(figsize=(10,5))\n sns.set_style('white')\n sns.swarmplot(x='months', y='perc', data=df.iloc[::-1])\n #plt.xticks(rotation=90);\n plt.xlabel('Month',fontsize=15,fontweight='bold')\n plt.xticks(fontsize=15,rotation=0)\n plt.ylabel('Percentage',fontsize=15,fontweight='bold')\n plt.yticks(fontsize=15)\n plt.title('Percentage across months', fontsize=20)\n plt.grid(axis='y', alpha=0.5)\n\n st.pyplot()\n\n plt.figure(figsize=(10,6))\n \n sns.violinplot(x='months',\n y='perc', \n data=df.iloc[::-1], \n inner=None)\n \n sns.swarmplot(x='months', \n y='perc', \n data=df.iloc[::-1], \n color='k',\n alpha=1) \n plt.grid(axis='y', alpha=0.5)\n plt.xlabel('Month',fontsize=15,fontweight='bold')\n plt.xticks(fontsize=15,rotation=0)\n plt.ylabel('Percentage',fontsize=15,fontweight='bold')\n plt.yticks(fontsize=15)\n plt.title('Percentage across months', fontsize=20)\n\n st.pyplot()\n\n def make_pie(sizes, text, colors):\n import matplotlib.pyplot as plt\n import numpy as np\n sizes = [100-100*jsmean(df['final']), 100*jsmean(df['final'])]\n text = round(jsmean(df['final'])*100,2)\n col = [[i/255. for i in c] for c in colors]\n\n fig, ax = plt.subplots()\n ax.axis('equal')\n width = 0.30\n kwargs = dict(colors=col, startangle=90)\n outside, _ = ax.pie(sizes, radius=1, pctdistance=1-width/2,**kwargs)\n plt.setp( outside, width=width, edgecolor='white')\n\n kwargs = dict(size=20, fontweight='bold', va='center')\n ax.text(0, 0, text, ha='center', **kwargs)\n plt.show()\n\n c2 = (226,33,0)\n c1 = (40,133,4)\n\n make_pie([257,90], round(df['perc'].mean(), 2),[c1,c2])\n \n st.pyplot()\n\ndef probability_out(x):\n n=len(x)\n for i in range(n):\n if(x.iloc[i,0]<0): \n x.iloc[i,0]=0\n if(x.iloc[i,0]>=0 and x.iloc[i,0]<=1):\n x.iloc[i,0]=np.sin(x.iloc[i,0])\n if(x.iloc[i,0]>1):\n x.iloc[i,0]=(np.log(x.iloc[i,0])+(np.pi)*(np.pi)*(np.sin(1)))/((np.pi)**2)\n if(x.iloc[i,0]>1):\n x.iloc[i,0]=1\n return x\n\ndef tweets_conclusion(df):\n #compute weights\n def weight(x):\n return (np.exp(x)-1)/(np.exp(1)-1)\n\n def jsmean(arr):\n num = 0\n den = 0\n for i in arr:\n den = den + weight(i)\n num = num + i*weight(i)\n return (num/den)[0]\n\n new_df = df.values\n return jsmean(new_df)\n\ndef combine_all(user_name):\n #preprocessing input data\n raw_text = user_name\n recent_tweets=Show_Recent_Tweets(raw_text)\n words = tokenize_tweets(recent_tweets)\n sent = create_lemmatized_sent(words)\n df = write_sent(recent_tweets, sent)\n #loading models\n \n us = f\"Setting up models for analysing the profile of **{api.get_user(screen_name=raw_text).name}**\"\n st.markdown(us)\n\n st.text(\"Loading the model\")\n model_lonely = keras.models.load_model('/content/drive/MyDrive/Utrack_Models/Utrack_Lonely')\n model_stress = keras.models.load_model('/content/drive/MyDrive/Utrack_Models/Utrack_Stress')\n model_anxiety = keras.models.load_model('/content/drive/MyDrive/Utrack_Models/Utrack_Anxiety')\n \n intro = f\"Twitter Bio of the user => **{api.get_user(screen_name=raw_text).description}**\"\n st.markdown(intro)\n\n bio = f\"User lives in **{api.get_user(screen_name=raw_text).location}**\"\n st.markdown(bio)\n\n fol = f\"Number of Followers of the user => **{api.get_user(screen_name=raw_text).followers_count}**\"\n st.markdown(fol)\n\n st.text(\"Hold Up!! Working on Predictions...\")\n\n prediction_lonely = import_and_predict(df, model_lonely)\n prediction_stress = import_and_predict(df, model_stress)\n prediction_anxiety = import_and_predict(df, model_anxiety)\n\n st.text(\"Predictions Done\")\n \n col1, col2, col3 = st.beta_columns(3)\n\n prediction_lonely = pd.DataFrame(prediction_lonely, columns = ['Loneliness'])\n prediction_stress = pd.DataFrame(prediction_stress, columns = ['Stress'])\n prediction_anxiety = pd.DataFrame(prediction_anxiety, columns = ['Anxiety'])\n \n prediction_lonely = probability_out(prediction_lonely)\n prediction_stress = probability_out(prediction_stress)\n prediction_anxiety = probability_out(prediction_anxiety)\n \n df_total = output_dataframe(df,prediction_lonely)\n df_total = output_dataframe(df_total,prediction_stress)\n df_total = output_dataframe(df_total,prediction_anxiety)\n\n st.write(df_total)\n df_total = df_total.rename(columns={'time':'index'}).set_index('index')\n \n with col1:\n st.text(\"LONELINESS LEVELS\")\n st.success(tweets_conclusion(prediction_lonely))\n st.line_chart(data=df_total['Loneliness'])\n\n with col2:\n st.text(\"STRESS LEVELS\")\n st.success(tweets_conclusion(prediction_stress))\n st.line_chart(data=df_total['Stress'])\n \n with col3:\n st.text(\"ANXIETY LEVELS\")\n st.success(tweets_conclusion(prediction_anxiety))\n st.line_chart(data=df_total['Anxiety'])\n\n\n# In[ ]:\n\n\n%%writefile app.py\nfrom utilss import combine_all\nimport tensorflow as tf\nimport streamlit as st\nfrom tensorflow import keras\nst.set_option('deprecation.showfileUploaderEncoding', False)\nst.set_page_config(\n page_title=\"UTrack\",\n layout=\"wide\"\n)\nst.title(\"UTrack\")\n\nst.subheader('*Analysing Twitter Users on Tweet-to-Tweet basis to track levels of Loneliness, Stress & Anxiety*')\n\nraw_text = st.text_input(\"Enter the exact twitter handle of the Personality (without @)\")\nst.text(raw_text)\nif raw_text == '':\n st.text('OOPS!!!! Enter userID')\nelse:\n combine_all(raw_text)\n\n \n\n\n# ## Running localhost server for colab from ngrok\n\n# In[ ]:\n\n\n!ngrok authtoken 1pqPDOU30ORUzHtrlCA5DX7odxX_4N3in7gRue2ctUDTBYPun\n\n\n# In[ ]:\n\n\n!nohup streamlit run app.py --server.port 80 &\n\n\n# In[ ]:\n\n\nfrom pyngrok import ngrok\n\nurl = ngrok.connect(port=80)\nurl\n\n\n# In[ ]:\n\n\n!cat /content/nohup.out\n\n\n# In[ ]:\n\n\n# Uncomment this only to kill the terminals.\n## ! killall ngrok\n\n\n# In[ ]:\n\n\n\n\n", "id": "8747838", "language": "Python", "matching_score": 6.464303016662598, "max_stars_count": 0, "path": "Streamlit Deployment.py" }, { "content": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport pandas as pd\nimport nltk\nnltk.download('punkt')\nnltk.download('stopwords')\nnltk.download('averaged_perceptron_tagger')\nnltk.download('wordnet')\nimport re\nimport string\nimport random\nfrom nltk.tokenize import WordPunctTokenizer\nfrom nltk.tag import pos_tag\nfrom nltk.stem.wordnet import WordNetLemmatizer \nfrom nltk.corpus im pplport stopwords\n\n\n# In[ ]:\n\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\n\n\n# In[ ]:\n\n\nfrom google.colab import files \nuploaded = files.upload()\n\n\n# In[ ]:\n\n\ndef extract_csv():\n my_filtered_csv = pd.read_csv('clown_1.csv', usecols=['tweet'])\n return my_filtered_csv\n\ndef tokenize_tweets(clown_1) :\n tweets = clown_1.tweet.tolist()\n tokenizer = WordPunctTokenizer() \n cleaned = []\n for i in range(0, len(tweets)):\n text = tweets[i]\n text = re.sub('^https?://.*[rn]*','', text)\n text = re.sub(r'\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*', '', text)\n text = re.sub(\"(@[A-Za-z0-9_]+)\",\"\", text)\n text = re.sub(\"([^\\w\\s])\", \"\", text)\n text = re.sub(\"^RT\", \"\", text)\n text = tokenizer.tokenize(text)\n element = [text]\n cleaned.append(element)\n return cleaned\n\ndef lemmatize_sentence(tweet_tokens, stop_words = ()):\n lemmatizer = WordNetLemmatizer()\n cleaned_tokens = []\n for token, tag in pos_tag(tweet_tokens):\n if tag.startswith('NN'):\n pos = 'n'\n elif tag.startswith('V'):\n pos = 'v'\n else:\n pos = 'a'\n token = lemmatizer.lemmatize(token, pos)\n if len(token) > 0 and token not in string.punctuation and token.lower() not in stop_words:\n cleaned_tokens.append(token.lower())\n return cleaned_tokens\n\ndef create_lemmatized_sent(words):\n cleaned = []\n stop_words = stopwords.words('english')\n for i in range(0, len(words)):\n sent = lemmatize_sentence(words[i][0], stop_words)\n if len(sent) >= 0:\n element = [sent]\n cleaned.append(element)\n return cleaned\n\ndef remove_emoji(string):\n emoji_pattern = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n u\"\\U00002702-\\U000027B0\"\n u\"\\U000024C2-\\U0001F251\"\n \"]+\", flags=re.UNICODE)\n return emoji_pattern.sub(r'', string)\n\ndef write_sent(sent):\n cleaned = []\n for i in sent:\n s = \"\"\n for j in i[0]:\n j = str(j)\n j = j + \" \"\n s = s + j\n s = remove_emoji(s)\n element = [s]\n cleaned.append(element)\n df = pd.DataFrame(cleaned)\n # print(df.iloc[1])\n #df.to_csv('cleaned_clown_1.csv', index=False)\n df1 = pd.read_csv('clown_1.csv')\n df1 = df1['time']\n big = pd.concat([df, df1], axis = 1)\n big.to_csv('cleaned_clown_1.csv', index=False)\n\nclown_1 = extract_csv()\nwords = tokenize_tweets(clown_1)\nsent = create_lemmatized_sent(words)\nwrite_sent(sent)\n\n\n# In[ ]:\n\n\ndf = pd.read_csv('cleaned_clown_1.csv')\ndf = df.rename(columns={\"0\": \"clean_tweet\"})\ndf\n\n\n# In[ ]:\n\n\ndf.to_csv('cleaned_clown_1.csv', index=True) \nfiles.download('cleaned_clown_1.csv')\n\n\n# In[ ]:\n\n\n\n\n", "id": "674980", "language": "Python", "matching_score": 0.5753105878829956, "max_stars_count": 4, "path": "Cleaning Tweets.py" }, { "content": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport pandas as pd\nimport numpy as np\nimport tensorflow_hub as hub\nimport io\nfrom keras.layers import Input, Dropout, Dense, Activation\nfrom tensorflow.keras.utils import to_categorical\nimport tensorflow as tf\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\n\n# In[ ]:\n\n\npip install -q tf-models-official==2.3.0\n\n\n# In[ ]:\n\n\nfrom official.modeling import tf_utils\nfrom official import nlp\nfrom official.nlp import bert\n\n# Load the required submodules\nimport official.nlp.bert.bert_models\nimport official.nlp.bert.configs\nimport official.nlp.bert.tokenization as tokenization\n\nfrom sklearn.preprocessing import LabelEncoder\n\n\n# In[ ]:\n\n\ndf = pd.read_csv('/content/finaldata.csv')\ndf = df.sample(frac=1, random_state = 111).reset_index(drop=True)\ntrain_data = df.sample(frac=0.8, random_state = 111)\ntest_data = df.drop(train_data.index)\ny_df = df['target']\nx_df = df.drop('target', axis=1)\nx_df.reset_index(drop = True, inplace=True)\ny_df.reset_index(drop=True, inplace=True)\n\n\n# In[ ]:\n\n\nlabelencoder = LabelEncoder()\ndf = df.copy()\ndf.target = labelencoder.fit_transform(df.target)\n\n\n# In[ ]:\n\n\nmodule_url = 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2'\nbert_layer = hub.KerasLayer(module_url, trainable=True)\n\n\n# In[ ]:\n\n\nvocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()\ndo_lower_case = bert_layer.resolved_object.do_lower_case.numpy()\ntokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)\n\n\n# In[ ]:\n\n\ndef bert_encode(texts, tokenizer, max_len=512):\n all_tokens = []\n all_masks = []\n all_segments = []\n \n for text in texts:\n text = tokenizer.tokenize(text)\n \n text = text[:max_len-2]\n input_sequence = [\"[CLS]\"] + text + [\"[SEP]\"]\n pad_len = max_len - len(input_sequence)\n \n tokens = tokenizer.convert_tokens_to_ids(input_sequence) + [0] * pad_len\n pad_masks = [1] * len(input_sequence) + [0] * pad_len\n segment_ids = [0] * max_len\n \n all_tokens.append(tokens)\n all_masks.append(pad_masks)\n all_segments.append(segment_ids)\n \n return np.array(all_tokens), np.array(all_masks), np.array(all_segments)\n\n\n# In[ ]:\n\n\ndef build_model(bert_layer, max_len=512):\n input_word_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name=\"input_word_ids\")\n input_mask = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name=\"input_mask\")\n segment_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name=\"segment_ids\")\n\n pooled_output, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])\n clf_output = sequence_output[:, 0, :]\n net = tf.keras.layers.Dense(64, activation='relu')(clf_output)\n net = tf.keras.layers.Dropout(0.2)(net)\n net = tf.keras.layers.Dense(32, activation='relu')(net)\n net = tf.keras.layers.Dropout(0.2)(net)\n out = tf.keras.layers.Dense(1, activation=None)(net)\n \n model = tf.keras.models.Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out)\n model.compile(tf.keras.optimizers.Adam(lr=1e-5), loss='binary_crossentropy', metrics=['accuracy'])\n \n return model\n\n\n# In[ ]:\n\n\nmax_len = 150\ntrain_input = bert_encode(df.cleaned_text.values, tokenizer, max_len=max_len)\ntrain_labels = np.array(df.target.values)\n\n\n# In[ ]:\n\n\nmodel = build_model(bert_layer, max_len=max_len)\nmodel.summary()\n\n\n# In[ ]:\n\n\ncheckpoint = tf.keras.callbacks.ModelCheckpoint('anxiety.h5', monitor='val_accuracy', save_best_only=True, verbose=1)\nearlystopping = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=5, verbose=1)\ntrain_history = model.fit(train_input, train_labels, validation_split = 0.25, epochs = 30, callbacks=[checkpoint, earlystopping], verbose = 1,batch_size = 32)\n\n\n# In[ ]:\n\n\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\n\n# In[ ]:\n\n\nmodel.save('/content/drive/MyDrive/UTrack_Models/UTrack_Anxiety') \n\n", "id": "12234460", "language": "Python", "matching_score": 8.333146095275879, "max_stars_count": 4, "path": "Training Models/Anxiety Model.py" }, { "content": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport pandas as pd\nimport numpy as np\nimport tensorflow_hub as hub\nimport io\nfrom keras.layers import Input, Dropout, Dense, Activation\nfrom keras.utils import to_categorical\nimport tensorflow as tf\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\n\n# In[ ]:\n\n\npip install -q tf-models-official==2.3.0\n\n\n# In[ ]:\n\n\nfrom official.modeling import tf_utils\nfrom official import nlp\nfrom official.nlp import bert\n\n# Load the required submodules\nimport official.nlp.bert.bert_models\nimport official.nlp.bert.configs\nimport official.nlp.bert.tokenization as tokenization\n\nfrom sklearn.preprocessing import LabelEncoder\n\n\n# In[ ]:\n\n\nnp.random.seed(321)\n\n\n# In[ ]:\n\n\nlonely_df = pd.read_csv(\"big_lonely.csv\")\nnormal_df = pd.read_csv(\"df_normal_edited.csv\")\n\nlonely_df.drop('Unnamed: 0', inplace=True, axis=1)\nnormal_df.drop('Unnamed: 0', inplace=True, axis=1)\n\nlonely_df['target'] = '1'\nnormal_df['target'] = '0'\n\nlonely_df.rename(columns={'0':'cleaned_text'}, inplace=True)\n\ncombo_df = pd.concat([lonely_df, normal_df])\ncombo_df.reset_index(drop=True, inplace=True)\n\ncombo_df = combo_df.sample(frac=1, random_state=321).reset_index(drop=True)\n\ntrain_data = combo_df.sample(frac = 0.8, random_state=321)\ntest_data = combo_df.drop(train_data.index) \n\ny_df = combo_df['target']\nx_df = combo_df.drop('target', axis=1)\n\nx_df.reset_index(drop=True, inplace=True)\ny_df.reset_index(drop=True, inplace=True)\n\n\n# In[ ]:\n\n\nlabelencoder = LabelEncoder()\ncombo_df= combo_df.copy()\ncombo_df.target = labelencoder.fit_transform(combo_df.target)\n\n\n# In[ ]:\n\n\nmodule_url = 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2'\nbert_layer = hub.KerasLayer(module_url, trainable=True)\n\n\n# In[ ]:\n\n\nvocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()\ndo_lower_case = bert_layer.resolved_object.do_lower_case.numpy()\ntokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)\n\n\n# In[ ]:\n\n\ndef bert_encode(texts, tokenizer, max_len=512):\n all_tokens = []\n all_masks = []\n all_segments = []\n \n for text in texts:\n text = tokenizer.tokenize(text)\n \n text = text[:max_len-2]\n input_sequence = [\"[CLS]\"] + text + [\"[SEP]\"]\n pad_len = max_len - len(input_sequence)\n \n tokens = tokenizer.convert_tokens_to_ids(input_sequence) + [0] * pad_len\n pad_masks = [1] * len(input_sequence) + [0] * pad_len\n segment_ids = [0] * max_len\n \n all_tokens.append(tokens)\n all_masks.append(pad_masks)\n all_segments.append(segment_ids)\n \n return np.array(all_tokens), np.array(all_masks), np.array(all_segments)\n\n\n# In[ ]:\n\n\ndef build_model(bert_layer, max_len=512):\n input_word_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name=\"input_word_ids\")\n input_mask = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name=\"input_mask\")\n segment_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name=\"segment_ids\")\n\n pooled_output, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])\n clf_output = sequence_output[:, 0, :]\n net = tf.keras.layers.Dense(64, activation='relu')(clf_output)\n net = tf.keras.layers.Dropout(0.2)(net)\n net = tf.keras.layers.Dense(32, activation='relu')(net)\n net = tf.keras.layers.Dropout(0.2)(net)\n out = tf.keras.layers.Dense(1, activation=None)(net)\n \n model = tf.keras.models.Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out)\n model.compile(tf.keras.optimizers.Adam(lr=1e-5), loss='binary_crossentropy', metrics=['accuracy'])\n \n return model\n\n\n# In[ ]:\n\n\nmax_len = 150\ntrain_input = bert_encode(combo_df.cleaned_text.values, tokenizer, max_len=max_len)\ntrain_labels = np.array(combo_df.target.values)\n\n\n# In[ ]:\n\n\nmodel = build_model(bert_layer, max_len=max_len)\nmodel.summary()\n\n\n# In[ ]:\n\n\ncheckpoint = tf.keras.callbacks.ModelCheckpoint('lonely.h5', monitor='val_accuracy', save_best_only=True, verbose=1)\nearlystopping = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=5, verbose=1)\ntrain_history = model.fit(train_input, train_labels, validation_split = 0.25, epochs = 10, callbacks=[checkpoint, earlystopping], verbose = 1,batch_size = 32)\n\n\n# In[ ]:\n\n\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\n\n# In[ ]:\n\n\nmodel.save('/content/drive/MyDrive/UTrack_Models/UTrack_Lonely') \n\n", "id": "1946631", "language": "Python", "matching_score": 7.143367767333984, "max_stars_count": 4, "path": "Training Models/Lonely Model.py" } ]
6.464303
jifilho
[ { "content": "import math\n\nmath=sqrt(9)", "id": "11534924", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "bhaskara.py" }, { "content": "lado=float(input(\"Defina um lado do quadrado: \"))\n\narea=lado**2\nperimetro=lado*4\n\nprint(\"A área do quadrado é\", area, \"e o perímetro é\", perimetro)", "id": "4587446", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "lista/ex-01.py" }, { "content": "def main():\n a=int(12)\n b=int(17)\n c=int(4)\n d=int(-6)\n e=int(8)\n f=int(0)\n\n soma=a+b+c+d+e+f\n\n print(\"o valor final do resultado do código é\", soma)\nmain()", "id": "2033025", "language": "Python", "matching_score": 0.9827288389205933, "max_stars_count": 0, "path": "contador-03.py" }, { "content": "a=1\nb=98237493874\nsoma=a+b\nprint(\"A soma de\", a, \"com\", b, \"é igual a\", soma)", "id": "2939939", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "contador-01.py" }, { "content": "segundos_str=input(\"Por favor, entre com o número de segundos que deseja converter: \")\ntotal_segs=int(segundos_str)\n\ndias=total_segs//86400\ndias_rest=total_segs%86400\nhoras=dias_rest//3600\nsegs_restantes=total_segs%3600\nminutos=segs_restantes//60\nsegs_resantes_final=segs_restantes%60\n\nprint(dias, \"dias, \", horas, \"horas, \", minutos, \"minutos e\", segs_resantes_final, \"segundos. \")", "id": "8642261", "language": "Python", "matching_score": 0.7550125122070312, "max_stars_count": 0, "path": "conversor_segs_hrs.py" }, { "content": "def main():\n a=int(input(\"Digite o primeiro número: \"))\n b=int(input(\"Digite o segundo número: \"))\n\n soma=a+b\n print(\"A soma de \", a, \"+\", b, \"é igual a\", soma)\nmain()", "id": "8896297", "language": "Python", "matching_score": 1.4142135381698608, "max_stars_count": 0, "path": "contador-02.py" }, { "content": "temperaturaFarenheit=input(\"Digite uma temperatura em Farenheit:\")\n\ntemp=float(temperaturaFarenheit)\n\ntemperaturaCelsius=(temp-32)*5/9\n\nprint(\"A temperatura em celsius é\", temperaturaCelsius)", "id": "2248196", "language": "Python", "matching_score": 0.7370932698249817, "max_stars_count": 0, "path": "conversor_temperatura.py" }, { "content": "primeira=float(input(\"Digite sua primeira nota: \"))\nsegunda=float(input(\"Digite sua segunda nota: \"))\nterceira=float(input(\"Digite sua terceira nota: \"))\nquarta=float(input(\"Digite sua quarta nota: \"))\n\nresult=primeira+segunda+terceira+quarta\nresult_final=result/4\n\nprint(\"Sua média aritimética é\", result_final)", "id": "11763244", "language": "Python", "matching_score": 1.2202627658843994, "max_stars_count": 0, "path": "lista/ex-02.py" }, { "content": "peso=float(input(\"Digite seu peso: \"))\naltura=float(input(\"Digite sua altura: \"))\nIMC=peso//altura**2\nprint(\"Seu IMC é\", IMC)\n", "id": "973721", "language": "Python", "matching_score": 1.1000615358352661, "max_stars_count": 0, "path": "contador-de-IMC.py" }, { "content": "nomeDaMae=input(\"Qual o nome da sua mãe? \")\nnomeDoPai=input(\"Qual o nome do seu pai? \")\n\nprint(\"Bom dia Sra.\", nomeDaMae, \"e Sr.\", nomeDoPai)", "id": "364088", "language": "Python", "matching_score": 1.180996060371399, "max_stars_count": 0, "path": "nome_mae_pai.py" } ]
0.868871
mikkame
[ { "content": "import sys\nimport os\nimport shutil\nimport subprocess\nimport platform\nimport dukpy\nimport jsmin\n\ndef compile(build_dir,target_dir, option, license_js, effekseer_core_js, effekseer_src_js, effekseer_js, effekseer_min_js):\n if not os.path.exists(build_dir):\n os.mkdir(build_dir)\n os.chdir(build_dir)\n\n if platform.system() == \"Windows\":\n subprocess.check_call([\"cmd\", \"/c\", \"emcmake\", \"cmake\",\n \"-G\", \"MinGW Makefiles\", option, target_dir])\n subprocess.check_call([\"mingw32-make\"])\n else:\n subprocess.check_call([\"command\", \"emcmake\", \"cmake\", option, target_dir])\n subprocess.check_call([\"make\"])\n\n outfile_js = open(effekseer_js, \"w\")\n outfile_min_js = open(effekseer_min_js, \"w\")\n\n with open(license_js) as infile:\n data = infile.read()\n outfile_js.write(data)\n outfile_min_js.write(data)\n with open(effekseer_core_js) as infile:\n data = infile.read()\n outfile_js.write(data)\n outfile_min_js.write(data)\n with open(effekseer_src_js) as infile:\n data = infile.read()\n data_es5 = dukpy.babel_compile(data)[\"code\"]\n outfile_js.write(data_es5)\n outfile_min_js.write(jsmin.jsmin(data_es5))\n\n os.chdir('../')\n\n\ncompile('build_asmjs',\n '../src/',\n '-DAS_WASM=OFF',\n license_js = os.path.join(\"..\", \"src\", \"js\", \"license.js\"),\n effekseer_core_js = os.path.join(\".\", \"effekseer.core.js\"),\n effekseer_src_js = os.path.join(\"..\", \"src\", \"js\", \"effekseer.src.js\"),\n effekseer_js = os.path.join(\"..\", \"Release\", \"effekseer_asmjs.js\"),\n effekseer_min_js = os.path.join(\"..\", \"Release\", \"effekseer_asmjs.min.js\"))\n\ncompile('build_wasm',\n '../src/',\n '-DAS_WASM=ON',\n license_js = os.path.join(\"..\", \"src\", \"js\", \"license.js\"),\n effekseer_core_js = os.path.join(\".\", \"effekseer.core.js\"),\n effekseer_src_js = os.path.join(\"..\", \"src\", \"js\", \"effekseer.src.js\"),\n effekseer_js = os.path.join(\"..\", \"Release\", \"effekseer.js\"),\n effekseer_min_js = os.path.join(\"..\", \"Release\", \"effekseer.min.js\"))\n\nshutil.copy('build_wasm/effekseer.core.wasm', 'Release/effekseer.wasm')", "id": "3835151", "language": "Python", "matching_score": 0, "max_stars_count": 66, "path": "build.py" } ]
0
renatolotto
[ { "content": "import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport urllib, json\nimport joblib\nimport googlemaps\nfrom PIL import Image\nfrom datetime import datetime\n\n\n\ncat_features = ['Município','Condições Climáticas']\nnum_features = ['Latitude', 'Longitude','Mes_sin', 'Mes_cos','dias_semana_sin','horario_sin','horario_cos']\n\ndef load_model():\n return joblib.load('Models/classifier_v0.joblib')\n\ndef load_encoder():\n return joblib.load('Models/encoder_v0.joblib')\n\ndef app():#isolar todos os campos gráficos aqui dentro dessa função app\n\n st.set_page_config(layout='wide',page_title = \"Motorcycle Risk Predictor - SP\",page_icon=\":vertical_traffic_light:\")\n\n image = Image.open('logo3.jpg')\n\n st.image(image)#,use_column_width=True\n st.title('Avaliação de risco para motociclistas na grande São Paulo')\n # st.markdown('descrição do app')\n st.sidebar.title('Parâmetros de entrada')\n\n\n\n # inputs sidebar\n input_adress = st.sidebar.text_input(\"Endereço da localização atual\")\n current=st.sidebar.checkbox('Dia e Horário Atual')\n if current == False:\n input_date = st.sidebar.text_input(\"Data escolhida - Formato DD/MM/YYYY\")\n input_time = st.sidebar.text_input(\"Horario escolhido - Formato HH:MM\")\n\n # definindo 2 colunas\n col1, col2 = st.beta_columns(2)\n\n #carregando artefatos\n encoder = load_encoder()\n classifier = load_model()\n\n #do Endereço busca na API -- Lat, Lon, Município ####### Google Maps API ########\n try:\n key='<KEY>'\n gmaps = googlemaps.Client(key)\n geocode_result = gmaps.geocode(input_adress)\n lat = geocode_result[0].get('geometry').get('viewport').get('southwest').get('lat')\n lon = geocode_result[0].get('geometry').get('viewport').get('southwest').get('lng')\n city = geocode_result[0].get('address_components')[3].get('long_name')\n map_data = pd.DataFrame({'lat': [lat], 'lon': [lon]})\n\n with col1:\n st.map(map_data)\n with col2:\n st.write('Informações:')\n st.write('Latitude:',round(lat,5),'Longitude:',round(lon,5))\n st.write('Município: ',city)\n\n except:\n pass\n\n #do horário calcula o horario_sen e horario_cos \n try:\n if current == False:\n list_h =input_time.split(':') # Cria lista com Hora, Minuto\n else:\n list_h = datetime.now().strftime(\"%H:%M\").split(':')\n hor_flt = float(list_h[0]) + float(list_h[1])/60 # Transforma hora e minuto em um float\n horario_sin=np.sin(2.*np.pi*hor_flt/24.) # Transforma o horário em sin.\n horario_cos=np.cos(2.*np.pi*hor_flt/24.) # Transforma o horário em sin.\n\n except:\n pass\n\n #da data calcula o mes_sen e mes_cos\n try:\n if current == False:\n list_d =input_date.split('/') # Cria lista com dia, mes e ano\n else:\n list_d = datetime.now().strftime(\"%d/%m/%Y\").split('/')\n dia = float(list_d[0])\n mes = float(list_d[1])\n ano = float(list_d[2])\n Mes_sin=np.sin(2.*np.pi*mes/12)\n Mes_cos=np.cos(2.*np.pi*mes/12)\n except:\n pass\n\n\n\n #da data calcula o dia_sem_sin e dia_sem_cos\n try:\n if current == False:\n date_chosen = datetime.strptime(input_date, '%d/%m/%Y')\n date_chosen = date_chosen.date()\n day_name = date_chosen.strftime(\"%A\")\n else:\n date_chosen = datetime.today()\n date_chosen = date_chosen.date()\n day_name = datetime.today().strftime(\"%A\")#retorna o nome do dia em inglês\n # date_chosen = datetime.date(ano, mes,dia)\n # day_name = datetime.date(int(ano), int(mes),int(dia)).strftime(\"%A\")#retorna o nome do dia em inglês\n if day_name=='Friday':\n day_num=5\n elif day_name=='Thursday':\n day_num=4\n elif day_name=='Wednesday':\n day_num=3\n elif day_name=='Tuesday':\n day_num=2\n elif day_name=='Monday':\n day_num=1\n elif day_name=='Sunday':\n day_num=7\n elif day_name=='Saturday':\n day_num=6\n day_num_norm = day_num/7 #normalizando o dia\n dias_semana_sin=np.sin(2.*np.pi*day_num_norm)\n dias_semana_cos=np.cos(2.*np.pi*day_num_norm)\n \n with col2:\n st.write('Data escolhida:',date_chosen)\n st.write('Hora:',int(list_h[0]),':',int(list_h[1]))\n except:\n pass\n\n\n\n # da data e horário busca na API a condição climática\n try:\n Weather_but = st.sidebar.button('Calcular Risco')\n key1='<KEY>'\n key2='<KEY>'\n url_weather = \"https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/{},{}/{}?key={}\".format(lat,lon,date_chosen,key2)\n\n def get_API_value():\n response = urllib.request.urlopen(url_weather)\n data = json.loads(response.read())\n condition = data.get('days')[0].get('hours')[int(list_h[0])].get('conditions')\n precip = data.get('days')[0].get('hours')[int(list_h[0])].get('precip')\n return [condition,precip]\n if Weather_but == True:\n cond_precip= get_API_value()\n if cond_precip[0] =='Partially cloudy':\n condition_por = 'NUBLADO'\n # image2 = Image.open('nublado.png')\n elif cond_precip[0] == 'Overcast':\n condition_por = 'NUBLADO'\n # image2 = Image.open('nublado.png')\n elif cond_precip[0] == 'Rain':\n condition_por = 'CHUVA'\n # image2 = Image.open('chuva.png')\n elif cond_precip[0] == 'Clear':\n condition_por = 'BOM'\n # image2 = Image.open('bom.png')\n elif cond_precip[0] == 'Rain, Partially cloudy':\n condition_por = 'CHUVA'\n # image2 = Image.open('chuva.png')\n elif cond_precip[0] == 'Rain, Overcast':\n condition_por = 'CHUVA'\n # image2 = Image.open('chuva.png') \n\n with col2: \n # st.write('Condição do tempo: ',cond_precip[0])\n st.write('Condição do tempo: ',condition_por) \n except:\n pass\n\n #CRIANDO DF COM TODOS INPUTS e calculando proba\n try:\n df = pd.DataFrame({'Município':[city], 'Latitude':[lat], 'Longitude':[lon], 'Condições Climáticas':[condition_por], 'Mes_sin':[Mes_sin],'Mes_cos':[Mes_cos], 'dias_semana_sin':[dias_semana_sin], 'horario_sin':[horario_sin], 'horario_cos':[horario_cos]})\n df.to_csv('predict.csv',index=False)\n df_proc = encoder.transform(df)\n y = classifier.predict_proba(df_proc)\n y=y[0]\n probability = y[1]# classe positiva\n with col2: \n st.write('Grau de Risco:',probability)\n\n if probability <0.2:\n output_msg = 'Você está sob RISCO BEM BAIXO, porém mantenha sempre a atenção!'\n elif probability >=0.2 and probability< 0.40 and horario_cos > 0 : #noite\n output_msg = 'Você está sob RISCO BAIXO, lembre-se de ligar as luzes'\n elif probability >=0.2 and probability< 0.40 and horario_cos < 0 : #dia\n output_msg = 'Mesmo com um RISCO BAIXO, lembre-se de usar capacete e dar a seta'\n elif probability >=0.4 and probability< 0.65:\n output_msg = 'Você está sob RISCO MODERADO, sempre cumpra as leis de trânsito!'\n elif probability >=0.65 and probability< 0.85 and condition_por != 'CHUVA':\n output_msg = 'Você está sob RISCO ALTO, todo cuidado é pouco, vá devagar e sempre use capacete'\n elif probability >=0.65 and probability< 0.85 and condition_por == 'CHUVA':\n output_msg = 'Você está sob RISCO ALTO e com a pista molhada redobre o cuidado'\n elif probability >=0.85 and condition_por != 'CHUVA':\n output_msg = 'Você está sob RISCO MUITO ALTO, preste muita atenção e diminua a velocidade'\n elif probability >=0.85 and condition_por == 'CHUVA':\n output_msg = 'Você está sob RISCO MUITO ALTO, mantenha a distância e diminua a velocidade'\n \n with col2: \n st.subheader(output_msg)\n except:\n pass\n\n with st.sidebar:\n \"\"\"\n #### :desktop_computer: [Source code in Github](https://github.com/renatolotto/Projeto-TERA-Data-Science---Acidentes-Motocicletas)\n \"\"\"\n # \"\"\"\n # ##### Source code: [![Renato](https://img.shields.io/github/stars/renatolotto/Projeto-TERA-Data-Science---Acidentes-Motocicletas?style=social)](https://github.com/renatolotto/Projeto-TERA-Data-Science---Acidentes-Motocicletas)\n # \"\"\"\napp()\n\n", "id": "4104906", "language": "Python", "matching_score": 0, "max_stars_count": 2, "path": "07_app.py" }, { "content": "import streamlit as st\n\nst.title(\"Teste Docker using Heroku\")\nst.write('This app was deployed using Streamlit, Docker and Heroku Container Registry')\n", "id": "1519599", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "app.py" } ]
0
himanshurewar
[ { "content": "#!/usr/bin/env python3\n\nimport os\nimport posixpath\nfrom urllib.parse import quote\n\nreadme = open('README.md', 'w', encoding=\"utf-8\")\n\n# Copy template to README\nwith open('README_nolist.md', 'r') as file:\n for line in file:\n readme.write(line)\n\n# Write title\nreadme.write('\\n### This repository currently contains \"Hello World\" programs in the following languages:\\n')\n\n# List the available languages\nfor directory in sorted(os.listdir('.')):\n if not (directory == '.' or directory == '..' or directory[0] == '.' or os.path.isfile(directory)):\n for filename in sorted(os.listdir(directory), key=lambda s: s.lower()):\n if os.path.isfile(os.path.join(directory, filename)):\n language = os.path.splitext(filename)[0].replace('-', ' ').replace('_', ' ').title()\n readme.write(f'* [{language}]({posixpath.join(quote(directory), quote(filename))})\\n')\n\nreadme.close()\n", "id": "8772138", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "list_langs.py" } ]
0
obieda01
[ { "content": "import matplotlib \nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport sklearn\n\n#load data\noecd_bli=pd.read_csv(\"C:\\Deep Learning Course\\handson-ml-master\\github\\datasets\\lifesat\\oecd_bli_2015.csv\",thousands=',')\ngdp_per_capita=pd.read_csv(\"C:\\Deep Learning Course\\handson-ml-master\\github\\datasets\\lifesat\\gdp_per_capita.csv\"\n ,thousands=',',delimiter='\\t',encoding='latin1',na_values='n/a')\n\n\n#prepare the data \ncountry_stats=prepare_country_stats(oecd_bli,gdp_per_capita)\nx=np.c_[country_stats[\"GPD per Capita\"]]\ny=np.c_[country_stats[\"Life satisfaction\"]]\n\n#visualize the data \ncountry_stats.plot(kind='scatter',x=\"GPD per Capita\",y=\"Life satisfaction\")\nplt.show()\n\n#Select a linear model\nmodel=sklearn.linear_model.LinearRegression()\n\n#Train the model\nmodel.fit(x,y)\n\n\n#Make a prediction for Cyprus\nX_new=[[22587]]\nprint(model.predict(X_new))", "id": "1586852", "language": "Python", "matching_score": 0, "max_stars_count": 3, "path": "Ahmad work/1.GDP.py" } ]
0
joeribekker
[ { "content": "# \n# Generates a JSON file with Corona statistics.\n#\n# Example::\n#\n# {\n# \"charts\": {\"cases\": \"Cases\", ...},\n# \"countries\": {\"netherlands\": \"The Netherlands\", ...},\n# \"datasets\": [{\n# \"country\": \"netherlands\",\n# \"data\": [{\n# \"chart\": \"cases\",\n# \"dates\": [\"Feb 15\", ...],\n# \"values\": [0, 1, 2, ...]\n# }]\n# }]\n# }\n#\nimport json\nimport re\n\nimport requests\n\nBASE_URL = r\"https://www.worldometers.info/coronavirus/country/{country}/\"\nCOUNTRIES = {\n # key : Country slug used in the BASE_URL\n # value : Pretty country name\n \"netherlands\": \"The Netherlands\",\n \"italy\": \"Italy\",\n \"germany\": \"Germany\",\n \"belgium\": \"Belgium\",\n \"france\": \"France\",\n}\nCHARTS = {\n # key : Chart ID on the website\n # value : Pretty chart name\n \"coronavirus-deaths-linear\": \"Deaths\",\n \"coronavirus-cases-linear\": \"Cases\",\n}\n\n\ndef parse_data(document, chart_id):\n match = re.search(\n chart_id + r\"'([^;]*)\\}\\);\", doc.content.decode(\"utf8\"), re.DOTALL\n )\n if not match:\n raise Exception(f\"Cannot find graph for cases.\")\n snippet = match.group(0)\n\n match = re.search(r\"categories: \\[([^\\]]*)\\]\", snippet)\n if not match:\n raise Exception(f\"Cannot find dates in graph for cases.\")\n dates = [el.strip('\"') for el in match.group(1).split(\",\")]\n\n match = re.search(r\"data: \\[([^\\]]*)\\]\", snippet)\n if not match:\n raise Exception(f\"Cannot find data in graph for cases.\")\n values = list(map(int, match.group(1).split(\",\")))\n\n if len(values) != len(dates):\n raise Exception(f\"Inconsistent data found.\")\n\n return {\"chart\": chart_id, \"dates\": dates, \"values\": values}\n\n\nif __name__ == \"__main__\":\n\n print(\"Downloading...\")\n\n # General JSON-layout\n result = {\n \"charts\": CHARTS,\n \"countries\": COUNTRIES,\n \"datasets\": [],\n }\n\n for country_slug, country_name in COUNTRIES.items():\n print(f\"{country_name}... \", end=\"\")\n\n chart_data = []\n\n try:\n url = BASE_URL.format(country=country_slug)\n\n doc = requests.get(url)\n if doc.status_code != 200:\n raise Exception(f\"Could not download from URL: {url}\")\n\n for chart_id, chart_name in CHARTS.items():\n chart_data.append(parse_data(doc, chart_id))\n except Exception as e:\n print(f\"Error: {e}\")\n continue\n\n result[\"datasets\"].append({\"country\": country_slug, \"data\": chart_data})\n print(\"OK\")\n\n with open(f\"data/stats.json\", \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(result))\n\n print(\"Done.\")\n", "id": "12535917", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "download.py" } ]
0
troydhanson
[ { "content": "from distutils.core import setup, Extension\n\nmodule1 = Extension('kvspool',\n sources = ['kvspool.c'], \n include_dirs=['../include'],\n library_dirs = ['../src'], \n libraries = ['kvspool'])\n\nsetup (name = 'kvspool',\n version = '1.0',\n description = 'Python interface to kvspool',\n ext_modules = [module1])\n", "id": "11437178", "language": "Python", "matching_score": 0, "max_stars_count": 16, "path": "bindings/kvpy/build.py" }, { "content": "#!/usr/bin/python\n\n# so we don't have to install kvpy.so (or we could \n# have set PYTHONPATH to include its dir beforehand)\nimport sys\nsys.path.append(\"../build/lib.linux-i686-2.6\")\n\nimport kvspool\nkv = kvspool.Kvspool(\"spool\")\nd = {\"key\":\"value\",\"key2\":\"value2\"}\nkv.write(d)\n", "id": "4243129", "language": "Python", "matching_score": 3.314923048019409, "max_stars_count": 16, "path": "bindings/kvpy/tests/write.py" }, { "content": "#!/usr/bin/python\n\n# so we don't have to install kvpy.so (or we could \n# have set PYTHONPATH to include its dir beforehand)\nimport sys\nsys.path.append(\"../build/lib.linux-i686-2.6\")\n\nfrom datetime import datetime;\n\nimport kvspool\nkv = kvspool.Kvspool(\"/tmp/spool\");\n\nd = {\"key\":\"value\",\"key2\":\"value2\"}\n\n# write test\nt1 = datetime.now()\nfor i in range(100000):\n kv.write(d);\nt2 = datetime.now()\nt3 = t2 - t1\nprint \"write:\", int(100 / (t3.seconds + (t3.microseconds/1000000.0))), \"kfps\"\n\n# read test\nt1 = datetime.now()\nfor i in range(100000):\n d = kv.read()\nt2 = datetime.now()\nt3 = t2 - t1\nprint \"read: \", int(100 / (t3.seconds + (t3.microseconds/1000000.0))), \"kfps\"\n\n", "id": "1611936", "language": "Python", "matching_score": 3.432877540588379, "max_stars_count": 16, "path": "bindings/kvpy/tests/speed.py" }, { "content": "#!/usr/bin/python\n\n# so we don't have to install kvpy.so (or we could \n# have set PYTHONPATH to include its dir beforehand)\nimport sys\nsys.path.append(\"../build/lib.linux-i686-2.6\")\n\nimport kvspool\nkv = kvspool.Kvspool(\"spool\")\nd = kv.read()\nfor key in d.keys():\n print \"key: \" + key + \" value: \" + d[key]\n", "id": "9617076", "language": "Python", "matching_score": 3.432877540588379, "max_stars_count": 16, "path": "bindings/kvpy/tests/read.py" }, { "content": "#!/usr/bin/python\n\n# so we don't have to install kvpy.so (or we could \n# have set PYTHONPATH to include its dir beforehand)\nimport sys\nsys.path.append(\"../build/lib.linux-i686-2.6\")\n\nimport kvspool\nkv = kvspool.Kvspool(\"spool\")\nkv.blocking = 0\nwhile True:\n d = kv.read()\n if (d == None):\n break\n for key in d.keys():\n print \"key: \" + key + \" value: \" + d[key]\n", "id": "8393474", "language": "Python", "matching_score": 3.432877540588379, "max_stars_count": 16, "path": "bindings/kvpy/tests/read_all.py" } ]
3.432878
FFeng6
[ { "content": "# This file is licensed under the terms of the MIT license. See the file\r\n# \"LICENSE\" in the project root for more information.\r\n#\r\n# This module was \"original\" developed by <NAME> at the assistant chair for\r\n# Sustainable Architecture and Building Technologies (Suat) at the Institute of\r\n# Technology in Architecture, ETH Zuerich. See http://suat.arch.ethz.ch for\r\n# more information.\r\n\r\n##### modification#####\r\n# This module was then modified by <NAME>, who is Ph.D. student at Texas A&M. \r\n\r\n\r\n'''\r\nesoreader.py\r\n\r\nA python module for reading \\*.eso files generated by EnergyPlus\r\n\r\nThe eso files generated by EnergyPlus contains a data dictionary, which\r\ndescribes the values reported by EnergyPlus. The list of values reported\r\ndepends on the simulation input file, specifically the Output:Variable\r\nobjects. EnergyPlus can output the same variable at different\r\nfrequencies and for different \"keys\", which are for instance surfaces or\r\nequipment names.\r\n\r\nFollowing the data dictionary is a list of output variable values for\r\neach of the configured variable coordinates.\r\n\r\nThe output of the esoreader module is therefore a data dictionary object\r\nthat contains a mapping of variable \"coordinates\" (grouping of reporting\r\nfrequency, key and variable name) to the index used by EnergyPlus and a\r\ndata object, which essentially just maps that index to the timeseries\r\ndata.\r\n\r\nExample\r\n=======\r\n\r\nNew interface:\r\n\r\n import esoreader\r\n PATH_TO_ESO = r'/Path/To/EnergyPlus/Output/eplusout.eso'\r\n eso = esoreader.read_from_path(PATH_TO_ESO)\r\n df = eso.to_frame('total heat loss energy') # pandas.DataFrame\r\n\r\n\r\nOld interface: (still works)\r\n::\r\n\r\n import esoreader\r\n\r\n PATH_TO_ESO = r'/Path/To/EnergyPlus/Output/eplusout.eso'\r\n dd, data = esoreader.read(PATH_TO_ESO)\r\n frequency, key, variable = dd.find_variable(\r\n 'Zone Ventilation Total Heat Loss Energy')[0]\r\n idx = dd.index[frequency, key, variable]\r\n time_series = data[idx]\r\n'''\r\n\r\n\r\ndef read(eso_file_path):\r\n \"\"\"Read in an .eso file and return the data dictionary and a dictionary\r\n representing the data.\r\n NOTE: this function is here for backward compatibilty reasons. Use\r\n read_from_path() instead to obtain an EsoFile object.\r\n \"\"\"\r\n eso = read_from_path(eso_file_path)\r\n return eso.dd, eso.data\r\n\r\n\r\ndef read_from_path(eso_file_path):\r\n \"\"\"\r\n read in a .eso file and return an EsoFile object that can be used\r\n to read in pandas DataFrame and Series objects.\r\n \"\"\"\r\n with open(eso_file_path, 'r') as eso_file:\r\n eso = EsoFile(eso_file)\r\n return eso\r\n\r\n\r\nclass DataDictionary(object):\r\n def __init__(self, version=None, timestamp=None):\r\n '''\r\n variables = dict of ids, int => [reporting_frequency,\r\n key, variable, unit]\r\n\r\n index = dict {(key, variable, reporting_frequency) => id)}\r\n '''\r\n self.version = version\r\n self.timestamp = timestamp\r\n self.year = 2019 # 2022 is just a default value\r\n self.variables = {}\r\n self.index = {}\r\n\r\n def build_index(self):\r\n \"\"\"builds a reverse index for finding ids.\r\n \"\"\"\r\n for id, value in self.variables.items():\r\n reporting_frequency, key, variable, unit = value\r\n self.index[reporting_frequency, key, variable] = id\r\n\r\n def find_variable(self, search):\r\n \"\"\"returns the coordinates (timestep, key, variable_name) in the\r\n data dictionary that can be used to find an index. The search is case\r\n insensitive.\"\"\"\r\n return [(timestep, key, variable_name)\r\n for timestep, key, variable_name in self.index.keys()\r\n if search.lower() in variable_name.lower()]\r\n\r\n\r\nclass EsoFile(object):\r\n\r\n def __init__(self, eso_file):\r\n self.eso_file = eso_file\r\n self.dd = self._read_data_dictionary()\r\n self.dd.build_index()\r\n self.data = self._read_data()\r\n \r\n\r\n def find_variable(self, search, key=None, frequency='TimeStep'):\r\n \"\"\"returns the coordinates (timestep, key, variable_name) in the\r\n data dictionary that can be used to find an index. The search is case\r\n insensitive and need only be specified partially.\"\"\"\r\n variables = self.dd.find_variable(search)\r\n if frequency:\r\n variables = [v for v in variables\r\n if v[0].lower() == frequency.lower()]\r\n if key:\r\n variables = [v for v in variables\r\n if v[1].lower() == key.lower()]\r\n return variables\r\n\r\n def to_frame(self, search, key=None, frequency= None, index=None, use_key_for_columns=False):\r\n \"\"\"\r\n creates a pandas DataFrame objects with a column for every variable\r\n that matches the search pattern and key. An None key matches all keys.\r\n NOTE: The frequency *has* to be the same for all variables selected.\r\n (uses find_variable to select the variables)\r\n\r\n :key: default value is \"None\"\r\n : frequency: default value is \"*\", which means all matched variables will be returned\r\n \"\"\"\r\n from pandas import DataFrame\r\n variables = self.find_variable(search, key=key, frequency=frequency)\r\n\r\n data = {}\r\n data['datetime'] = self.data[self.dd.index[variables[0]]]['dtime']\r\n if use_key_for_columns:\r\n for v in variables:\r\n data[v[1]] = self.data[self.dd.index[v]]['data'] \r\n else:# use variable name as column name\r\n for v in variables:\r\n data[v[2]] = self.data[self.dd.index[v]]['data']\r\n data['dayType'] = self.data[self.dd.index[variables[0]]]['dayType'] \r\n\r\n df = DataFrame(data)\r\n \r\n if index is not None:\r\n df.index = index\r\n return df\r\n\r\n def _read_reporting_frequency(self, line):\r\n reporting_frequency = None\r\n if '! ' in line:\r\n line = line.split('! ')[0] #'! ',\r\n if ' !' in line:\r\n line, reporting_frequency = line.split(' !') #' !'\r\n # RunPeriod contains more stuff (\" [Value,Min,Month,Day,Hour,\r\n # Minute, Max,Month,Day,Hour,Minute]\")split it off\r\n reporting_frequency = reporting_frequency.split()[0]\r\n return line, reporting_frequency\r\n\r\n def _read_variable_unit(self, variable):\r\n unit = None\r\n if '[' in variable:\r\n variable, unit = variable.split('[')\r\n unit = unit[:-1] # remove ']' at the end\r\n variable = variable.strip()\r\n return variable, unit\r\n\r\n def _read_data_dictionary(self):\r\n \"\"\"parses the head of the eso_file, returning the data dictionary.\r\n the file object eso_file is advanced to the position needed by\r\n read_data.\r\n \"\"\"\r\n version, timestamp = [s.strip() for s\r\n in self.eso_file.readline().split(',')[-2:]]\r\n dd = DataDictionary(version, timestamp)\r\n line = self.eso_file.readline().strip()\r\n\r\n while line != 'End of Data Dictionary':\r\n line, reporting_frequency = self._read_reporting_frequency(line)\r\n if reporting_frequency:\r\n fields = [f.strip() for f in line.split(',')]\r\n if len(fields) >= 4:\r\n id, nfields, key, variable = fields[:4]\r\n else:\r\n id, nfields, variable = fields[:3]\r\n key = None\r\n variable, unit = self._read_variable_unit(variable)\r\n dd.variables[int(id)] = [reporting_frequency, key,\r\n variable, unit]\r\n else:\r\n # ignore the lines that aren't report variables\r\n pass\r\n line = self.eso_file.readline().strip()\r\n dd.ids = set(dd.variables.keys())\r\n return dd\r\n\r\n def _read_data(self):\r\n import datetime\r\n '''parse the data from the .eso file returning,\r\n NOTE: eso_file should be the same file object that was passed to\r\n read_data_dictionary(eso_file) to obtain dd.'''\r\n\r\n '''\r\n Modified by Fan. \r\n '''\r\n data = {} # id => [value]\r\n for id in self.dd.variables.keys():\r\n data[id] = {'dtime':[],\r\n 'data':[],\r\n 'dayType':[]}\r\n \r\n for line in self.eso_file:\r\n if line.startswith('End of Data'):\r\n break\r\n fields = [f.strip() for f in line.split(',')]\r\n id = int(fields[0])\r\n if id == 2: # this is the timestamp for all following outputs\r\n dtime = datetime.datetime(self.dd.year,int(fields[2]),int(fields[3]),int(float(fields[5]))-1,int(float(fields[6])))\r\n dayType = fields[-1]\r\n continue\r\n\r\n if id not in self.dd.ids:\r\n # skip entries that are not output:variables\r\n continue\r\n data[id]['dtime'].append(dtime)\r\n data[id]['data'].append(float(fields[1]))\r\n data[id]['dayType'].append(dayType)\r\n return data\r\n", "id": "1247174", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "esoreader.py" }, { "content": "from distutils.core import setup\nimport os\n\n\nlong_description = 'see the GitHub repository for more information: https://github.com/architecture-building-systems/esoreader' # noqa\nif os.path.exists('README.rst'):\n long_description = open('README.rst').read()\n\n\nsetup(\n name='esoreader',\n py_modules=['esoreader'], # this must be the same as the name above\n version='1.2.3',\n description='A module for parsing EnergyPlus *.eso files',\n long_description=long_description,\n author='<NAME>',\n author_email='<EMAIL>',\n url='https://github.com/architecture-building-systems/esoreader',\n download_url='https://github.com/architecture-building-systems/esoreader/archive/1.2.3.tar.gz', # noqa\n keywords=['simulation', 'parsing', 'energyplus', 'pandas'], # arbitrary keywords # noqa\n classifiers=[],\n)\n", "id": "12476805", "language": "Python", "matching_score": 0, "max_stars_count": 21, "path": "setup.py" } ]
0
ElaineYao
[ { "content": "#!/usr/bin/python\n\n\"\"\" Neural Network.\n\nA 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)\nimplementation with TensorFlow. This example is using the MNIST database\nof handwritten digits (http://yann.lecun.com/exdb/mnist/).\n\nLinks:\n [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).\n\nAuthor: <NAME>\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n\"\"\"\n\nfrom __future__ import print_function\n\n# Import MNIST data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\nimport tensorflow as tf\nimport TensorFI as ti\n\nimport math\nimport sys\n\n# Parameters\nlearning_rate = 0.1\nnum_steps = 500\nbatch_size = 128\ndisplay_step = 100\n\n# Network Parameters\nn_hidden_1 = 256 # 1st layer number of neurons\nn_hidden_2 = 256 # 2nd layer number of neurons\nn_hidden_3 = 256 # 3rd layer number of neurons\nn_hidden_4 = 256 \nnum_input = 784 # MNIST data input (img shape: 28*28)\nnum_classes = 10 # MNIST total classes (0-9 digits)\n\n# tf Graph input\nX = tf.placeholder(\"float\", [None, num_input])\nY = tf.placeholder(\"float\", [None, num_classes])\n\n# Store layers weight & bias\nweights = {\n 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),\n 'h4': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_4])),\n 'out': tf.Variable(tf.random_normal([n_hidden_4, num_classes]))\n}\nbiases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'b3': tf.Variable(tf.random_normal([n_hidden_3])),\n 'b4': tf.Variable(tf.random_normal([n_hidden_4])),\n 'out': tf.Variable(tf.random_normal([num_classes]))\n}\n\n\n# Create model\ndef neural_net(x):\n # Hidden fully connected layer with 256 neurons\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n # Hidden fully connected layer with 256 neurons\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])\n layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])\n # Output fully connected layer with a neuron for each class\n out_layer = tf.matmul(layer_4, weights['out']) + biases['out']\n return out_layer\n\n# Construct model\nlogits = neural_net(X)\nprediction = tf.nn.softmax(logits)\n\n# Define loss and optimizer\nloss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=Y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntrain_op = optimizer.minimize(loss_op)\n\n# Evaluate model\ncorrect_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n# Initialize the variables (i.e. assign their default value)\ninit = tf.global_variables_initializer()\n\n# Start training\nwith tf.Session() as sess:\n\n # Run the initializer\n sess.run(init)\n\n print(\"Training now ...\")\n\n for step in range(1, num_steps+1):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n \n\t# Run optimization op (backprop)\n sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})\n if step % display_step == 0 or step == 1:\n # Calculate batch loss and accuracy\n loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,\n Y: batch_y})\n print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\n \"{:.3f}\".format(acc))\n\n print(\"Training finished! Testing now ...\")\n\n print(\"Accuracy (with no injections):\", \\\n accuracy.eval({X: mnist.test.images[:256], Y: mnist.test.labels[:256]}))\n\n # Add the fault injection code here to instrument the graph\n fi = ti.TensorFI(sess, name = \"Neural Network 4\", logLevel = 50, disableInjections = False)\n\n print(\"Accuracy (with injections):\", \\\n accuracy.eval({X: mnist.test.images[:256], Y: mnist.test.labels[:256]}))", "id": "3798905", "language": "Python", "matching_score": 3.9750609397888184, "max_stars_count": 35, "path": "testSuite/NOT_INCL_YET_injections_nn_mnist.py" }, { "content": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\nimport time\n\nimport TensorFI as ti\n\nmnist_data = input_data.read_data_sets(\"MNIST_data\", one_hot=True)\n\ndef weights_init(shape):\n return tf.Variable(tf.truncated_normal(shape, stddev=0.05))\n\ndef bias_init(shape, bias_init=0.05):\n return tf.Variable(tf.constant(bias_init, shape=shape))\n\ndef fully_connected_layer(input, input_shape, output_shape, activation=tf.nn.relu):\n weights = weights_init([input_shape, output_shape])\n bias = bias_init([output_shape])\n layer = tf.add(tf.matmul(input, weights), bias) #x*w + b\n \n if activation != None:\n return activation(layer)\n else:\n return layer\n\ndef highway_fc_layer(input, hidden_layer_size, carry_b = -2.0, activation=tf.nn.relu):\n #Step 1. Define weights and biases for the activation gate\n weights_normal = weights_init([hidden_layer_size, hidden_layer_size])\n bias_normal = bias_init([hidden_layer_size])\n \n #Step 2. Define weights and biases for the transform gate\n weights_transform = weights_init([hidden_layer_size, hidden_layer_size])\n bias_transform = bias_init(shape=[hidden_layer_size], bias_init=carry_b)\n \n #Step 3. calculate activation gate\n H = activation(tf.matmul(input, weights_normal) + bias_normal, name=\"Input_gate\")\n #Step 4. calculate transform game\n T = tf.nn.sigmoid(tf.matmul(input, weights_transform) +bias_transform, name=\"T_gate\")\n #Step 5. calculate carry get (1 - T)\n C = tf.subtract(1.0, T, name='C_gate')\n # y = (H * T) + (x * C)\n #Final step 6. campute the output from the highway fully connected layer\n y = tf.add(tf.multiply(H, T), tf.multiply(input, C), name='output_highway')\n return y\n\n#defining hyperparams\ninput_shape = 784 #28x28x1 <- Number of pixels of MNIST image\n\nhidden_size = 50 # This is number of neurons used at EVERY hidden highway layer, you can test with this number\n #but becuase we have highway (deep) network this number doesn't have to be very large\n\noutput_size = 10 # number of neurons at the output layer, 10 because we have 10 classes\n\nnumber_of_layers = 18 # this is another hyperparam to care about in highway networks, play with it \n\ncary_bias = -20.0 # This is cary bias used at transform gate inside highway layer\n\nepochs = 40 # How many times are we going to run through whole dataset\n\nbatch_size = 64 # How many data samples to feed to a network at onces\n\nlearning_rate = 0.01\n\n#Defining inputs to tensorflow graph, one is for images - inputs, and another one is for classes - targets\ninputs = tf.placeholder(tf.float32, shape=[None, input_shape], name='Input')\ntargets = tf.placeholder(tf.float32, shape=[None, output_size], name='output')\n\n#Defining HIGHWAY NETWORK\nprev_layer = None\noutput = None\nfor layer in range(number_of_layers):\n \n if layer == 0:\n #This for input layer\n prev_layer = fully_connected_layer(inputs, input_shape, hidden_size)\n elif layer == number_of_layers-1:\n #This if for output layer\n output = fully_connected_layer(prev_layer, hidden_size, output_size, activation=None)\n else:\n # for any layer between input and output layer\n prev_layer = highway_fc_layer(prev_layer, hidden_size, carry_b=cary_bias)\n\n#Defining error/cost/loss function and optimizier\ncost = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=targets)) #this is standard cross entropy loss\noptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n#This is used only for testing\ny_pred = tf.nn.softmax(output)\ny_pred_scores = tf.argmax(y_pred, 1)\ny_true = tf.argmax(targets, 1)\n\n#Getting accuracy\ncorrect_prediction = tf.equal(y_pred_scores, y_true)\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nsaver = tf.train.Saver()\n\n# if you make some mistake or change the structure of your network, good practice is to reset default graph.\n# tf.reset_default_graph()\n\nsession = tf.Session()\n\nsession.run(tf.global_variables_initializer())\n\ndef optimize():\n \n for i in range(epochs):\n epoch_cost = []\n epoch_time = time.time()\n for ii in range(mnist_data.train.num_examples//batch_size):\n batch = mnist_data.train.next_batch(batch_size)\n imgs = batch[0]\n labs = batch[1]\n \n c, _ = session.run([cost, optimizer], feed_dict={inputs:imgs, targets:labs})\n\n epoch_cost.append(c)\n print(\"Epoch: {}/{}\".format(i+1, epochs), \" | Current loss: {}\".format(np.mean(epoch_cost)),\n \" | Epoch time: {:.2f}s\".format(time.time() - epoch_time))\n print(\"test accuracy %g\" % session.run(accuracy ,feed_dict={ inputs: mnist_data.test.images, targets: mnist_data.test.labels }))\n saver.save(session, './fcn')\n\ndef test_model():\n saver.restore(session, tf.train.latest_checkpoint('.'))\n return session.run(accuracy, feed_dict={inputs:mnist_data.test.images[:256], \n targets:mnist_data.test.labels[:256]})\n\noptimize()\n\nprint (\"Accuracy is: \", test_model())\n\nfi = ti.TensorFI(session, logLevel = 100, name = \"fcn\", disableInjections=False)\n\nprint (\"Accuracy is: \", test_model())", "id": "8427382", "language": "Python", "matching_score": 4.202256202697754, "max_stars_count": 35, "path": "testSuite/NOT_INCL_YET_injections_highwayfcn_mnist.py" }, { "content": "#!/usr/bin/python\n'''\nLeNet CNN for MNIST\nProject: https://github.com/sujaybabruwad/LeNet-in-Tensorflow\n'''\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\nimport random\nfrom sklearn.utils import shuffle\nimport tensorflow as tf\nimport TensorFI as ti\nimport os\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", reshape=False)\nX_train, y_train = mnist.train.images, mnist.train.labels\nX_validation, y_validation = mnist.validation.images, mnist.validation.labels\nX_test, y_test = mnist.test.images[:256], mnist.test.labels[:256]\n\nassert(len(X_train) == len(y_train))\nassert(len(X_validation) == len(y_validation))\nassert(len(X_test) == len(y_test))\n\nprint(\"Image Shape: {}\".format(X_train[0].shape))\nprint(\"Training Set: {} samples\".format(len(X_train)))\nprint(\"Validation Set: {} samples\".format(len(X_validation)))\nprint(\"Test Set: {} samples\".format(len(X_test)))\n\n# Pad images with 0s\nX_train = np.pad(X_train, ((0,0),(2,2),(2,2),(0,0)), 'constant')\nX_validation = np.pad(X_validation, ((0,0),(2,2),(2,2),(0,0)), 'constant')\nX_test = np.pad(X_test, ((0,0),(2,2),(2,2),(0,0)), 'constant')\n \nprint(\"Updated Image Shape: {}\".format(X_train[0].shape))\n\nindex = random.randint(0, len(X_train))\nimage = X_train[index].squeeze()\n\nprint(y_train[index])\n\nX_train, y_train = shuffle(X_train, y_train)\n\nEPOCHS = 20\nBATCH_SIZE = 128\n\ndef LeNet(x): \n # Hyperparameters\n mu = 0\n sigma = 0.1\n layer_depth = {\n 'layer_1' : 6,\n 'layer_2' : 16,\n 'layer_3' : 120,\n 'layer_f1' : 84\n }\n\n\t# Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.\n conv1_w = tf.Variable(tf.truncated_normal(shape = [5,5,1,6],mean = mu, stddev = sigma))\n conv1_b = tf.Variable(tf.zeros(6))\n conv1 = tf.nn.conv2d(x,conv1_w, strides = [1,1,1,1], padding = 'VALID') + conv1_b \n # Activation.\n conv1 = tf.nn.relu(conv1)\n\n # Pooling. Input = 28x28x6. Output = 14x14x6.\n pool_1 = tf.nn.max_pool(conv1,ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'VALID')\n \n # Layer 2: Convolutional. Output = 10x10x16.\n conv2_w = tf.Variable(tf.truncated_normal(shape = [5,5,6,16], mean = mu, stddev = sigma))\n conv2_b = tf.Variable(tf.zeros(16))\n conv2 = tf.nn.conv2d(pool_1, conv2_w, strides = [1,1,1,1], padding = 'VALID') + conv2_b\n # Activation.\n conv2 = tf.nn.relu(conv2)\n\n # Pooling. Input = 10x10x16. Output = 5x5x16.\n pool_2 = tf.nn.max_pool(conv2, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'VALID') \n\n pool_2_shape = pool_2.get_shape().as_list()\n # Flatten. Input = 5x5x16. Output = 400.\n fc1 = tf.reshape(pool_2, [-1, pool_2_shape[1]*pool_2_shape[2]*pool_2_shape[3]])\n # fc1 = tf.contrib.layers.flatten(pool_2)\n\n # Layer 3: Fully Connected. Input = 400. Output = 120.\n fc1_w = tf.Variable(tf.truncated_normal(shape = (400,120), mean = mu, stddev = sigma))\n fc1_b = tf.Variable(tf.zeros(120))\n fc1 = tf.matmul(fc1,fc1_w) + fc1_b\n \n # Activation.\n fc1 = tf.nn.relu(fc1)\n\n # Layer 4: Fully Connected. Input = 120. Output = 84.\n fc2_w = tf.Variable(tf.truncated_normal(shape = (120,84), mean = mu, stddev = sigma))\n fc2_b = tf.Variable(tf.zeros(84))\n fc2 = tf.matmul(fc1,fc2_w) + fc2_b\n # Activation.\n fc2 = tf.nn.relu(fc2)\n\n # Layer 5: Fully Connected. Input = 84. Output = 10.\n fc3_w = tf.Variable(tf.truncated_normal(shape = (84,10), mean = mu , stddev = sigma))\n fc3_b = tf.Variable(tf.zeros(10))\n logits = tf.matmul(fc2, fc3_w) + fc3_b\n return logits\n\nx = tf.placeholder(tf.float32, (None, 32, 32, 1))\ny = tf.placeholder(tf.int32, (None))\none_hot_y = tf.one_hot(y, 10)\n\nrate = 0.001\n\nlogits = LeNet(x)\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = one_hot_y)\nloss_operation = tf.reduce_mean(cross_entropy)\noptimizer = tf.train.AdamOptimizer(learning_rate = rate)\ntraining_operation = optimizer.minimize(loss_operation)\n\ncorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))\naccuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nsaver = tf.train.Saver()\n\ndef evaluate(X_data, y_data):\n num_examples = len(X_data)\n total_accuracy = 0\n sess = tf.get_default_session()\n for offset in range(0, num_examples, BATCH_SIZE):\n batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]\n accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})\n total_accuracy += (accuracy * len(batch_x))\n return total_accuracy / num_examples\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n num_examples = len(X_train)\n print(\"Training...\")\n print()\n for i in range(EPOCHS):\n X_train, y_train = shuffle(X_train, y_train)\n for offset in range(0, num_examples, BATCH_SIZE):\n end = offset + BATCH_SIZE\n batch_x, batch_y = X_train[offset:end], y_train[offset:end]\n sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})\n validation_accuracy = evaluate(X_validation, y_validation)\n print(\"EPOCH {} ...\".format(i+1))\n print(\"Validation Accuracy = {:.3f}\".format(validation_accuracy))\n saver.save(sess, './lenet')\n print(\"Model saved\")\n\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('.'))\n\n test_accuracy = evaluate(X_test, y_test)\n print(\"Accuracy (with no injections): {:.3f}\".format(test_accuracy))\n\n # Add the fault injection code here to instrument the graph\n fi = ti.TensorFI(sess, logLevel = 10, name = \"lenet\", disableInjections=False)\n test_accuracy = evaluate(X_test, y_test)\n print(\"Accuracy (with injections): {:.3f}\".format(test_accuracy))", "id": "9737104", "language": "Python", "matching_score": 3.9642562866210938, "max_stars_count": 35, "path": "testSuite/NOT_INCL_YET_injections_lenet_mnist.py" }, { "content": "#!/usr/bin/python\n# MNIST dataset recognition using Keras - example taken from the Keras tutorial\n# https://blog.keras.io/keras-as-a-simplified-interface-to-tensorflow-tutorial.html#calling-keras-layers-on-tensorflow-tensors\n\nfrom __future__ import print_function\nimport sys\nimport imp\n\nimport tensorflow as tf\nimport TensorFI as ti\n\n# Remove deprecated warnings from TensorFlow\ntf.logging.set_verbosity(tf.logging.FATAL)\nsess = tf.Session()\n\nfrom keras import backend as K\nK.set_session(sess)\n\n# this placeholder will contain our input digits, as flat vectors\nimg = tf.placeholder(tf.float32, shape=(None, 784))\n\nfrom keras.layers import Dense\n\n# Keras layers can be called on TensorFlow tensors:\nx = Dense(128, activation='relu')(img) # fully-connected layer with 128 units and ReLU activation\nx = Dense(128, activation='relu')(x)\npreds = Dense(10, activation='softmax')(x) # output layer with 10 units and a softmax activation\n\n# Place-holder for the labels and loss function\nlabels = tf.placeholder(tf.float32, shape=(None, 10))\n\nfrom keras.objectives import categorical_crossentropy\nloss = tf.reduce_mean(categorical_crossentropy(labels, preds))\n\n# Model training with MNIST data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist_data = input_data.read_data_sets('MNIST_data', one_hot=True)\n\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n\n# Initialize all variables\ninit_op = tf.global_variables_initializer()\nsess.run(init_op)\n\n# Run training loop\nwith sess.as_default():\n for i in range(100):\n batch = mnist_data.train.next_batch(50)\n train_step.run(feed_dict={img: batch[0],\n labels: batch[1]})\n\n# Run the model and print the accuracy\nfrom keras.metrics import categorical_accuracy as accuracy\n\nacc_value = accuracy(labels, preds)\nwith sess.as_default():\n print( \"Accuracy = \", acc_value.eval(feed_dict={img: mnist_data.test.images,\n labels: mnist_data.test.labels}) )\nprint(\"Done running model\");\n\n# Instrument the graph with TensorFI\nfi = ti.TensorFI(sess, logLevel = 100)\nfi.turnOnInjections();\nwith sess.as_default():\n print( \"Accuracy = \", acc_value.eval(feed_dict={img: mnist_data.test.images,\n labels: mnist_data.test.labels}) )\nprint(\"Done running instrumented model\");\n", "id": "8382492", "language": "Python", "matching_score": 0.23113685846328735, "max_stars_count": 35, "path": "Tests/keras-mnist.py" }, { "content": "#!/usr/bin/python\n# PrintGraph library for iterating over and pretty-printing a TensorFlow graph\n\ndef getTensor(tensor):\n\t\"Returns a string representation of a tensor\"\n\tresult = [\"{\"]\n\tresult.append( tensor.name )\n\tresult.append( tensor.dtype.name )\n\tif tensor.shape:\n\t\tshapeList = tensor.shape.as_list()\n\t\tshapeStr = \"\"\n\t\tfor shape in shapeList:\n\t\t\tshapeStr = shapeStr + str(shape)\n \t\tresult.append(shapeStr)\n\tresult.append(\"}\")\n\treturn \" \".join(result)\n\n\ndef getOperation(op):\n\t\"Returns a specific operation as a string\"\n\topAttr = [\"{\"]\n\topAttr.append(\"type: \" + op.type)\n\topAttr.append(\"name: \" + op.name) \n\topAttr.append(\"inputs { \")\n\tfor input in op.inputs:\n\t\ttensorStr = getTensor(input)\n\t\topAttr.append( tensorStr )\n\topAttr.append(\"}\")\n\topAttr.append(\"control_inputs { \")\n\tfor control_input in op.control_inputs:\n\t\ttensorStr = getOperation(control_input)\n\t\topAttr.append( tensorStr )\n\topAttr.append(\"}\")\n\topAttr.append(\"outputs: { \")\n\tfor output in op.outputs:\n\t\ttensorStr = getTensor(output) \n\t\topAttr.append( tensorStr )\n\t# opAttr.append( str(op.run) )\n\topAttr.append(\"}\")\n\tsep = \"\\t\"\n\treturn sep.join(opAttr)\n\n\n\ndef getOperations(g):\n\t\"Return the operations in a graph as a string\"\n\tops = g.get_operations()\n\tif len(ops)==0:\n\t\treturn \"{ }\\n\"\n\tstr = \"{\\n\"\n\tfor op in ops: \n\t\topStr = getOperation(op)\n\t\tstr = str + \"\\t\" + opStr + \"\\n\"\n\tstr = str + \"}\\n\";\n\treturn str\n\ndef getGraph(s):\n\t\"Returns the operations of the graph in the session\"\n\tg = s.graph\n\tresult = [ ]\n\tif g is not None:\n\t\tresult.append( \"Version : \" + str(g.version) )\n\t\tresult.append( \"Name scope : \" + g.get_name_scope() )\n\t\tresult.append( \"Graph: \" + getOperations(g) )\n\treturn \"\\n\".join(result)\n\ndef printGraph(s):\n\t\"Print the graph corresponding to session s\"\n\tprint( getGraph(s) )\t\n", "id": "11713701", "language": "Python", "matching_score": 1.8578499555587769, "max_stars_count": 35, "path": "TensorFI/printGraph.py" }, { "content": "# Simple statistics gathering for fault injection experiments\n\nfrom enum import Enum\nimport sys\n\nclass Stats(Enum):\n\tInjected = \"Injected\"\n\tIncorrect = \"Incorrect\"\n\tDiff = \"Diff\" \n# End of Stats Enum \n\n# Initial values of the statistics gathered\ninitVal = { Stats.Injected: 0,\n\t Stats.Incorrect: 0,\n\t Stats.Diff: 0.0\n}\n\n# NOTE: This is the base class that can be overridden for more elaborate functionality\nclass FIStat:\n\t\"Base class for statistics gathering\"\n\n\tdef __init__(self, name = \"\", fileName=None):\n\t\t\"Setup statistics collection with a default file name\"\n\t\tself.stats = { }\n\t\tself.name = name\n\t\tself.outFile = sys.stdout\t\n\t\tif fileName:\n\t\t\t# FIXME: Open a file and dump stats to it later\n\t\t\ttry:\n\t\t\t\tself.outFile = open(fileName,\"w\")\n\t\t\texcept IOError:\n\t\t\t\tprint \"Error opening statistics file\", fileName\n\n\tdef init(self):\n\t\t\"Initialize the statistics\"\n\t\tfor stat in Stats:\n\t\t\tstatName = stat.value\n\t\t\tself.stats[ statName ] = initVal[ stat ]\n\n\tdef update(self, stat, value = 1):\n\t\t\"Increment the statistic by the value\"\n\t\tself.stats[ stat.value ] += value\n\n\tdef getStats(self):\n\t\t\"Return the stats dictionary as a string\"\t\n\t\tresStr = self.name + \" { \"\n\t\tfor (key, value) in self.stats.items():\n\t\t\tresStr += \"\\n\\t\" + str(key) + \" : \" + str(value)\n\t\tresStr += \"\\n}\"\n\t\treturn resStr\n\n\tdef writeToFile(self):\n\t\t\"Write the statistics to a file\"\n\t\tself.outFile.write( \"-------------------\\n\")\n\t\tself.outFile.write( self.getStats() + \"\\n\" )\n\t\tself.outFile.write( \"-------------------\\n\")\n\n\n\tdef __del__(self):\n\t\t\"Destructor: make sure the output file is closed\"\n\t\tif self.outFile!=sys.stdout:\n\t\t\tself.writeToFile()\n\t\t\tself.outFile.close()\n\t\n# Done with FIStat\n\ndefaultStats = FIStat(\"Default\")\n\ndef getDefaultStats():\n\t\"Return the default stats as a string\"\n\treturn defaultStats.getStats()\n\ndef collateStats(StatList, name = \"Overall\", fileName = None):\n\t\"Takes a bunch of different Statistics and collates them together\"\n\tresultStat = FIStat(name, fileName)\n\tresultStat.init()\n\t# We asssume the StatList has only FIStat objects here\n\tfor stat in StatList:\n\t\tfor statField in Stats:\n\t\t\tstatName = statField.value\n\t\t\tvalue = stat.stats[ statName ]\n\t\t\tresultStat.update( statField, value ) \n\t\t# End inner for\n\t# End outer for\n\treturn resultStat\n", "id": "4462443", "language": "Python", "matching_score": 2.5045759677886963, "max_stars_count": 35, "path": "TensorFI/fiStats.py" }, { "content": "# Library of fault injection functions called at runtime for common operations in TensorFlow\n# NOTE: These are called by the corresponding functions inserted in the TensorFlow graph at RUNTIME\n\nimport tensorflow as tf\nimport numpy as np\nimport logging \nfrom fiConfig import * \nfrom fiLog import *\nfrom threading import current_thread\n\n# FIXME: Add this to the list of dependencies for this module\nfrom sklearn.neighbors import KNeighborsClassifier\t\nfrom sklearn.utils.extmath import softmax\n\n# global variable to determine fine grained levels of logging\n# WARNING: Setting these to True may generate a lot of log data\n\nlogReturn = True\t# log return values of functions\t\nlogArgs = True\t\t# log arguments of operators\nlogInjection = True\t# log fault injection and checking\n\n# This is the initialization function for the config file \n# and is called from TensorFI.py's constructor\n\n# NOTE: This has to be in this module or else fiConf won't be accessible\ndef initFIConfig(fiParams):\n\t\"Initialize the global variable fiConf with the params\"\n\tglobal fiConf\n\tglobal count\n\t# instance of the current op (e.g., 3 ADD op means 3 instances of ADD op)\n\tglobal visitedOp\n\t# random instance of the op to be injected \n\tglobal randInstanceMap\n\t# order of the current op (e.g., the sequence of the current op in all of the op in the dataflow graph)\n\tglobal totalVistedOp\n\t# which op to be injected in the whole run\n\tglobal injectedOp\n\n\tfiConf = FIConfig(fiParams)\n\tlogging.debug(\"Initialized config file : \" + str(fiConf))\n\t\n\t# Setup the random seed for the fault injector if one is specified\n\tif fiConf.faultSeed: np.random.seed( fiConf.faultSeed )\t \n\n\n\t# Initialize the count of the selected operations to 0 (for skipCount)\n\tcount = 0\n\tvisitedOp = {}\n\trandInstanceMap = {}\n\ttotalVistedOp = 0\n\tinjectedOp = 0\n\treturn fiConf\n\n# End of fiConfing\n\ndef getFIConfig():\n\t\"Return the fiConfig that was initialized\"\n\tglobal fiConf\n\treturn fiConf\n# End of getFIConfig\n\n# These functions have to do with the faultLog and are called from TensorFI.py\n\nfaultLogs = { }\t\t# Global map of Threads to their fault logs\n\ndef initFILog(name):\n\t\"Initialize the fault injection log - optionally specify a thread number\"\n\t\n\tglobal faultLogs\n\tglobal logName\n\n\tlogName = name\n\tfaultLog = FILog(logName)\n\n\t# Add the fault log to the log for the current thread\n\tcurrent = current_thread()\n\tfaultLogs[ current ] = faultLog\n\n\t# logging.debug(\"Initialized faultLog for thread \" + str(current) + \" as \" + logName)\n\n# End of initFILog\n\ndef getCurrentFaultLog():\n\t\"Return the fault log for the current thread (if it exists), add it otherwise\"\n\t# Precondition: faultLogs != None\n\n\tglobal faultLogs\n\tglobal logName\n\t\n\tcurrent = current_thread()\n\tfaultLog = None\n\t\n\t# If we cannot find the faultLog for the current thread, add it to the faultLogs\n\t# FIXME: This doesn't work because TensorFlow uses its own threading infrastructure\n\t# and ThreadIDs are not the same during log creation time and log access time\n\t# So we always end up writing to the first entry of the faultLogs dictionary\n\tif not faultLogs.has_key(current):\n\t\t# logging.debug(\"Cannot find fault log for \" + str(current) )\n\t\tfaultLog = FILog(logName + \"-\" + current.name)\n\t\tfaultLogs[ current ] = faultLog\n\t\t# faultLog = faultLogs.values()[0]\n\telse:\n\t\t# Otherwise, return the fault log for the current thread\n\t\tfaultLog = faultLogs[current]\n\n\t# logging.debug(\"Returning fault log \" + str(faultLog) + \" for thread \" + str(current) )\n\treturn faultLog\n\n# End of getCurrentFaultLog\n\ndef logRun(runCount):\n\t\"Update the run count in the log file\"\n\t\n\tglobal count\n\n\t# Reset the count on a new run\n\tcount = 0\n\tfaultLog = getCurrentFaultLog()\t# Get the fault log for the current thread\n\n\t# Log the runCount and start a new section of the logFile\n\tfaultLog.updateRunCount( runCount ) \n\tfaultLog.dashedLine()\n\n# End of logRun\n\n# These are the basic fault injection functions that're called at runtime\n# NOTE: We need to first call initFIConfig before these are called \n\ndef perturb(val):\n\t\"Inject a single fault in res - fault type depends on config param\"\n\t# Precoditions: injectScalar != None && injectTensor != None\n\t\n\tfaultLog = getCurrentFaultLog()\t# Get the fault log for the current thread\n\n\tisScalar = np.isscalar(val)\n\tvType = val.dtype\n\n\tif logInjection:\n\t\tlogging.debug(\"\\tPerturbing \" + str(val) + \" of type: \" + str(vType) + \" isScalar: \" + str(isScalar) )\n\n\t\n\t# Check if the object is a scalar or a tensor, and call the corresponding injection function\n\tif isScalar: \n\t\tres = fiConf.injectScalar( vType, val.copy()) \n\telse: \n\t\tres = fiConf.injectTensor( vType, val.copy()) \n\n\t# Enter an entry in the fault log that we injected a fault here\n\tfaultLog.updateOriginal( val )\n\tfaultLog.updateInjected( res )\n\n\treturn res\n\n# End of perturb\n\ndef condPerturb(op, res):\n\t\"Calls the perturb function if and only if the op Operation is included for injection\"\n\t\n\t# Pre-condition: injectMap != None && skipCount != None \n\tglobal count\t# Keeps track of how many times the selected operation(s) are executed\n\tglobal visitedOp\n\n\tfaultLog = getCurrentFaultLog()\t# Get the fault log for the current thread\n\t\n\tif logInjection: \n\t\tlogging.debug(\"\\tChecking if operation \" + str(op) + \" is chosen for injection\")\n\t\n\t# Check if the operation is chosen for injection and if so, inject a fault\t\n \n\tif fiConf.isSelected(op): \n\t\tcount = count + 1\t# If it's selected, then update the execution count\n\n\t\tif logInjection: logging.debug(\"\\tOperation \" + str(op) + \" is chosen for injection\")\n\t\t\n\t\t# Enter the op and count in the faultLog - as we won't have access to it later\n\t\t# NOTE: This is not actually written to the logFIle till faultLog.commit is called\n\t\t#\t\tso we won't write to the log if a fault is not injected into it\n\t\tfaultLog.updateOp( op )\n\t\tfaultLog.updateCount( count )\n\t\t\n\t\t# If the operation exceeds the number of times it is to be skipped (default=0)\n\t\tif (count > fiConf.skipCount):\t\n\n \t\t\t\"(1) inject faults based on the error rate\"\n\t\t\tif(fiConf.injectMode == \"errorRate\" ):\n\t\t\t\t# Retreive the probability of perturbing this instruction\n\t\t\t\t# and generate a random number in the interval [0, 1]\n\t\t\t\t# and only perturb it only if the random no. <= the probability \n \t\t\t\t\n\t\t\t\tprob = fiConf.getProbability(op)\n\t\t\t\trn = np.random.random()\t\t# random.random returns a number in [0, 1] \n\t\t\t\tif (rn <= prob): \n\t\t\t\t\tres = perturb(res) # Perturb is called to inject the fault \n\t\t\t\t\tfaultLog.commit() # Write the log entry to the fault log \t \n\t\t\t\n\t\t\t\"(2) inject faults based on the dynamic instance of op, i.e., inject one instance for each op\"\n \t\t\tif(fiConf.injectMode == \"dynamicInstance\"):\n\t\t\t\t# Retreive the total instances of this instruction\n\t\t\t\t# each operation will be injected once only\n\t\t\t\t# and generate a random number to select a random instance of the operation\n\t\t\t\t# and only perturb it only if the current instance has been selected \n\t\t\t\tinstance = fiConf.getInstance(op) \n\t\t\t\t\n\t\t\t\t# You can manually specify the instance here rather than using the random instances\n\t\t\t\t# So that you can inject fault into a target operator\n\t\t\t\t# E.g., randInstanceMap[op] = instance of op to be injected\n\t\t\t\tif (not randInstanceMap.has_key(op)): \n\t\t\t\t\t# random instance of the selected op to be injected\n\t\t\t\t\trandInstanceMap[op] = np.random.randint(low=1, high=instance+1)\t\n\t\t\t\t\n\t\t\t\t# first instance of the op\n\t\t\t\tif(not visitedOp.has_key(op)):\tvisitedOp[op] = 1\t\n\t\t\t\t# not the first instance of op\n\t\t\t\telse:\t\t\t\t\t\t\tvisitedOp[op] += 1\t\n\n\t\t\t\t# determine if the current instance is selected for injection \n\t\t\t\tif(visitedOp[op] == randInstanceMap[op]): \n\t\t\t\t\tres = perturb(res) \n\t\t\t\t\tfaultLog.updateInjectedInstance(randInstanceMap[op], instance)\n\t\t\t\t\tfaultLog.commit()\n\n\t\t\t\t# current run has finished, re-initialize the visit table for the next run \n\t\t\t\t# used when you need to do injection on the same op in the next run\n\t\t\t\tif(visitedOp[op] == instance):\n\t\t\t\t\tvisitedOp[op] = 0 \n\n\t\t\t\"(3) inject one fault per run\"\n\t\t\tif(fiConf.injectMode == \"oneFaultPerRun\"):\n\t\t\t\t# refer the global variable for memorizing the order of the current op\n\t\t\t\tglobal totalVistedOp\n\t\t\t\tglobal injectedOp\n\t\t\t\t# get the amount of total op\n\t\t\t\ttotalInstance = fiConf.totalInstance\n\t\t\t\ttotalVistedOp += 1\n\t\t\t\t# select one random op to be injected in the whole run\n\t\t\t\tif(injectedOp == 0):\n\t\t\t\t\tinjectedOp = np.random.randint(low=1, high=totalInstance+1) \n\t\t\t\t# inject fault at the output of the operation\n\t\t\t\tif(totalVistedOp == injectedOp):\n\t\t\t\t\tres = perturb(res)\n\t\t\t\t\tfaultLog.updateInjectedInstance(injectedOp, totalInstance)\n\t\t\t\t\tfaultLog.commit()\n\t\t\t\t# current run has finished, re-initialize the visit table for the next run (optional)\n\t\t\t\tif(totalVistedOp == totalInstance):\n\t\t\t\t\ttotalVistedOp = 0\n\t\t\t\t\tinjectedOp = 0\n\n\t\t# Done with if count\n\n\t# Done with if isSelected\n\treturn res\n\n# End of condPerturb\n\n# This is a specialized function to cast into values of different types\t\ndef castType(type):\n\t\"Returns the appropriate injection function based on the type\"\n\t\n\t# Create specialized functions for each type\n\t# FIXME: Only 4 types are supported now. Support more types later.\n\tdef castFloat32(value):\n\t\tlogging.debug(\"Casting to \" + str(type))\n\t\treturn np.float32(value) \n\tdef castInt32(value):\n\t\tlogging.debug(\"Casting to \" + str(type))\n\t\treturn np.int32(value) \n\tdef castInt64(value):\n\t\tlogging.debug(\"Casting to \" + str(type))\n\t\treturn np.int64(value)\n\tdef castFloat64(value):\n\t\tlogging.debug(\"Casting to \" + str(type))\n\t\treturn np.float64(value)\n\t\n\t# Check the type parameter and return the appropriate function\n\tif (type==np.float32):\n\t\treturn castFloat32\n\telif (type==np.int32):\n\t\treturn castInt32\n\telif (type==np.int64):\n\t\treturn castInt64\n\telif (type==np.float64):\n\t\treturn castFloat64\n\telse:\n\t\traise TypeError(\"Unknown type \" + type)\n\treturn None\n# End of castType\n\n# Debugging function to log the values of the arguments\n# if and only if logArgs is set to True\ndef getArgs(*args):\n\t\"Return a string of the args if logArgs is True; Empty String otherwise\"\n\tres = \" \"\n\tif logArgs:\n\t\tres +=\"( \"\n\t\tfor arg in args:\n\t\t\tres = res + \" , \" + str(arg)\n\t\tres += \" )\"\n\treturn res\n\n# Start the implementation of the injectFault functions for each op type\n\n# This is a special case for the Cast function which needs to remember the type\n# We use closures to remember the type and cast it appropriately at \"runtime\"\ndef createInjectFaultCast(type):\n\t\"Returns a Function to call injectFault on cast nodes\"\n\t\n\tcastInto = castType(type) \t# get the appropriate casting function for the type\n\n\tdef injectFaultCast(a, b = None):\n\t\t\"Inject a fault into a Cast instruction\"\n\t\tlogging.debug(\"Calling Operator Cast \" + getArgs(a, b))\n\t\t# If we're given 2 parameters, treat it as the default case\n\t\tif b != None:\n\t\t\tres = np.cast(a, b)\n\t\telse:\n\t\t\t# Call the function for this type with 'a'\n\t\t\tres = castInto(a)\n\t\tres = condPerturb(Ops.CAST, res)\n\n\t\tif logReturn: logging.debug(\"\\tReturning \" + str(res) )\n\t\treturn res\n\n\t# Return the injectFaultCast function\n\treturn injectFaultCast\n\n\ndef injectFaultNoop():\n\t\"Inject a fault in the Noop operaton - does nothing\"\n\tlogging.debug(\"Calling Operator Noop\") \n\t# No need to call Perturb as there's nothing to return\n\treturn\n\ndef injectFaultAssign(a, b):\n\t\"Inject a fault in the assignement operation\"\n\tlogging.debug(\"Calling Operator Assigment \" + getArgs(a, b))\n\tres = b\t\t# FIXME: Check semantics of assignment operator\n\tres = condPerturb(Ops.ASSIGN, res)\n\tif logReturn: logging.debug(\"\\tReturning from Assignment \" + str(res) )\n\treturn res\t\n\ndef injectFaultIdentity(a):\n\t\"Inject a fault in the identitiy operation\"\t\n\tlogging.debug(\"Calling Operator Identity \" + getArgs(a))\n\tres = a\n\tres = condPerturb(Ops.IDENTITY, res)\n\tif logReturn: logging.debug(\"\\tReturning from Identity \" + str(res) )\n\treturn res\t\n\ndef injectFaultAdd(a, b):\n\t\"Function to call injectFault on Add nodes\"\n\tlogging.debug(\"Calling Operator Add \" + getArgs(a, b))\n\tresOp = tf.add(a, b)\n\twith tf.Session() as sess:\n\t\tres = resOp.eval()\n\tres = condPerturb(Ops.ADD, res)\n\tif logReturn: logging.debug(\"\\tReturning from Add \" + str(res) )\n\treturn res\t\n\ndef injectFaultSub(a, b):\n\t\"Function to call injectFault on Sub nodes\"\n\tlogging.debug(\"Calling Operator Sub \" + getArgs(a, b))\n\tres = a - b\n\tres = condPerturb(Ops.SUB, res)\n\tif logReturn: logging.debug(\"\\tReturning from Sub \" + str(res) )\n\treturn res\t\n\ndef injectFaultMul(a, b):\n\t\"Function to call injectFault on Mul nodes\"\n\tlogging.debug(\"Calling Operator Mul \" + getArgs(a, b))\n\tres = a * b\n\tres = condPerturb(Ops.MUL,res)\n\tif logReturn: logging.debug(\"\\tReturning from Mul \" + str(res) )\n\treturn res\n\ndef injectFaultSquare(a):\n\t\"Function to call injectFault on Square nodes\"\n\tlogging.debug(\"Calling Operator Square \" + getArgs(a))\n\tres = a * a\n\tres = condPerturb(Ops.SQUARE,res)\n\tif logReturn: logging.debug(\"\\tReturning from Square \" + str(res) )\n\treturn res\n\ndef injectFaultShape(a):\n\t\"Function to call injectFault on Shape nodes\"\n\tlogging.debug(\"Calling Operator Shape \" + getArgs(a))\n\t# If it's a tensor, call shape on it directly\n\t# Otherwise, use numpy to get its shape\n\tif isinstance(a, tf.Tensor):\n\t\tres = a.shape()\n\telse:\n\t\t# res = tf.convert_to_tensor( np.shape(a) , dtype = np.int32 )\n\t\tres = np.int32( np.shape(a) )\n\t# res should be either a scalar or tensor here\n\tres = condPerturb(Ops.SHAPE,res)\n\tif logReturn: logging.debug(\"\\tReturning from Shape \" + str(res) )\n\treturn res\n\ndef injectFaultSize(a):\n\t\"Function to call injectFault on Size nodes\"\n\tlogging.debug(\"Calling Operator Size \" + getArgs(a))\n\tres = a.size()\n\tres = condPerturb(Ops.SIZE, res)\n\tif logReturn: logging.debug(\"\\tReturning from Size \" + str(res) )\n\treturn res\n\ndef injectFaultFill(a, b):\n\t\"Function to call injectFault on Shape nodes\"\n\tlogging.debug(\"Calling Operator Fill \" + getArgs(a, b))\n\tres = np.full(a, b)\n\tres = condPerturb(Ops.FILL, res)\n\tif logReturn: logging.debug(\"\\tReturning from Fill\" + str(res) )\n\treturn res\n\ndef injectFaultFloorMod(a, b):\n\t\"Function to call injectFault on FloorMod nodes\"\n\tlogging.debug(\"Calling Operator FloorMod \" + getArgs(a, b)) \n\t# FIXME: Need to check if mod is the equivalent of floorMod in NumPy\n\tres = np.mod(a, b)\n\tres = condPerturb(Ops.FLOORMOD, res)\n\tif logReturn: logging.debug(\"\\tReturning from FloorMod \" + str(res) )\n\treturn res\n\ndef injectFaultRange(start, stop, step, dtype = None):\n\t\"Function to call injectFault on Range nodes\"\n\tlogging.debug(\"Calling Operator Range \" + getArgs(start, stop, step))\n\tres = np.int32(np.arange(start, stop, step, dtype))\n\tres = condPerturb(Ops.RANGE, res)\n\tif logReturn: logging.debug(\"\\tReturning from Range \" + str(res) )\n\treturn res\t\n\ndef injectFaultRank(a):\n\t\"Function to call injectFault on Rank nodes\"\n\tlogging.debug(\"Calling Operator Rank \" + getArgs(a))\n\tres = np.int32( np.ndim(a) )\n\tres = condPerturb(Ops.RANK, res)\n\tif logReturn: logging.debug(\"\\tReturning from Rank \" + str(res) )\n\treturn res\t\n\ndef injectFaultSum(a, b):\n\t\"Function to call injectFault on Sum nodes\"\n\tlogging.debug(\"Calling Operator Sum \" + getArgs(a, b))\n\t# Check if b is an integer scalar array\n\t# and if so, pass it to np.sum\n\t# Otherwise, ignore it (FIXME: is this the correct behavior ?)\n\tif np.isscalar(b):\n\t\tres = np.sum(a, b)\n\telse:\n\t\tres = np.sum(a)\n\tres = condPerturb(Ops.SUM, res)\n\tif logReturn: logging.debug(\"\\tReturning from Sum \" + str(res) )\n\treturn res\n\ndef injectFaultReshape(a, b):\n\t\"Function to call injectFault on Reshape\"\n\tlogging.debug(\"Calling Operator Reshape \" + getArgs(a, b))\n\tres = np.reshape(a, b)\n\tres = condPerturb(Ops.RESHAPE, res) \n\tif logReturn: logging.debug(\"\\tReturning from Reshape \" + str(res) )\n\treturn res\n\ndef injectFaultOneHot(a, b, c, d):\n\t\"Function to call injectFault on OneHot\"\n\tlogging.debug(\"Calling Operator One Hot \" + getArgs(a, b, c, d))\n\t# TF adds two default arguments, so we need to pass them as well\n\tresOp = tf.one_hot(a, b, c, d)\n\twith tf.Session() as sess:\n\t\tres = resOp.eval()\n\tres = condPerturb(Ops.ONE_HOT, res)\n\tif logReturn: logging.debug(\"\\tReturning from One Hot \" + str(res) )\n\treturn res\n\ndef injectFaultMatMul(a, b):\n\t\"Function to call injectFault on matrix multiplication\"\n\tlogging.debug(\"Calling Operator MatMul \" + getArgs(a, b))\n\n\tmatmul = tf.matmul(a,b)\n\twith tf.Session() as sess:\n\t\tres = matmul.eval()\n#\tres = np.matmul(a, b)\n\tres = condPerturb(Ops.MATMUL, res)\n\tif logReturn: logging.debug(\"\\tReturning from MatMul \" + str(res) )\n\treturn res\n\ndef injectFaultArgMax(a, b):\n\t\"Function to call injectFault on ArgMax\"\n\tlogging.debug(\"Calling Operator ArgMax \" + getArgs(a, b))\n\tresOp = tf.argmax(a, b)\n\twith tf.Session() as sess:\n\t\tres = resOp.eval()\n\tres = condPerturb(Ops.ARGMAX, res)\n\tif logReturn: logging.debug(\"\\tReturning from ArgMax \" + str(res) )\n\treturn res\n\ndef injectFaultArgMin(a, b):\n\t\"Function to call injectFault on ArgMin\"\n\tlogging.debug(\"Calling Operator ArgMin \" + getArgs(a, b))\n\tres = np.argmin(a, b)\n\tres = condPerturb(Ops.ARGMIN, res)\n\tif logReturn: logging.debug(\"\\tReturning from ArgMin \" + str(res) )\n\treturn res\n\ndef injectFaultEqual(a, b):\n\t\"Function to call injectFault on equal\"\n\tlogging.debug(\"Calling Operator Equal \" + getArgs(a, b)) \n\tres = np.equal(a, b)\n\tres = condPerturb(Ops.EQUAL, res)\n\tif logReturn: logging.debug(\"\\tReturning from Equal \" + str(res) )\n\treturn res\n\ndef injectFaultNotEqual(a, b):\n\t\"Function to call injectFault on not equal\"\n\tlogging.debug(\"Calling Operator Not Equal \" + getArgs(a, b))\n\tres = np.not_equal(a, b)\n\tres = condPerturb(Ops.NOT_EQUAL, res)\n\tif logReturn: logging.debug(\"\\tReturning from Not Equal \" + str(res) )\n\treturn res\n\ndef injectFaultLessEqual(a, b):\n\t\"Function to call injectFault on less equal\"\n\tlogging.debug(\"Calling Operator Less Equal \" + getArgs(a, b))\n\tres = np.less_equal(a, b)\n\tres = condPerturb(Ops.LESS_EQUAL, res)\n\tif logReturn: logging.debug(\"\\tReturning from Less Equal \" + str(res) )\n\treturn res\n\ndef injectFaultGreaterEqual(a, b):\n\t\"Function to call injectFault on greater equal\"\n\tlogging.debug(\"Calling Operator Greater Equal \" + getArgs(a, b))\n\tres = np.greater_equal(a, b)\n\tres = condPerturb(Ops.GREATER_EQUAL, res)\n\tif logReturn: logging.debug(\"\\tReturning from Greater Equal \" + str(res) )\n\treturn res\n\ndef injectFaultMean(a, b):\n\t\"Function to call injectFault on mean\"\n\tlogging.debug(\"Calling Operator mean \" + getArgs(a, b))\n\t# FIXME: This only works if we call np.mean on b[0]. Need to figure out why.\n\tres = np.mean(a, b[0])\n\tres = condPerturb(Ops.MEAN, res)\n\tif logReturn: logging.debug(\"\\tReturning from Mean \" + str(res) )\n\treturn res\n\ndef injectFaultCountNonZero(a):\n\t\"Function to call injectFault on countNonZero\"\n\tlogging.debug(\"Calling Operator CountNonZero \" + getArgs(a)) \n\tres = np.count_nonzero(a)\n\tres = condPerturb(Ops.COUNT_NONZERO, res)\n\tif logReturn: logging.debug(\"\\tReturning on CountNonZero \" + str(res) )\n\treturn res\n\ndef injectFaultConv2D(a, b, strides, padding):\n\t\"Function to call injectFault on Conv2D\"\n\tlogging.debug(\"Calling Operator conv2D \" + getArgs(a, b)) \n\tconv = tf.nn.conv2d(a , b, strides=strides.tolist(), padding=padding)\n\twith tf.Session() as sess:\n\t\tres = conv.eval()\n\tres = condPerturb(Ops.CONV2D, res)\n\tif logReturn: logging.debug(\"\\tReturning from Conv2D \" + str(res) )\n\treturn res\n\ndef injectFaultRelu(a):\n\t\"Function to call injectFault on RelU\"\n\tlogging.debug(\"Calling Operator RelU \" + getArgs(a))\n\trelu = tf.nn.relu(a)\n\twith tf.Session() as sess:\n\t\tres = relu.eval()\n\tres = condPerturb(Ops.RELU, res)\n\tif logReturn: logging.debug(\"\\tReturning from RelU \" + str(res) )\n\treturn res\n\ndef injectFaultMaxPool(a, ksize, strides, padding): \n\t\"Function to call injectFault on MaxPool\" \n\tmaxpool = tf.nn.max_pool(a, ksize=ksize.tolist(), strides=strides.tolist(), padding=padding)\n\twith tf.Session() as sess:\n\t\tres = maxpool.eval()\t\n\tres = condPerturb(Ops.MAXPOOL, res)\n\tif logReturn: logging.debug(\"\\tReturningfrom MaxPool \" + str(res) )\n\treturn res\n\ndef injectFaultUnpack(a):\n\t\"Function to call injectFault on unpack\"\n\tlogging.debug(\"Calling Operator Unpack \" + getArgs(a))\n\t# This operation is deprecated in TF 1.0 and above\n\tres = np.array_split(a, a.shape[1]) \n\t# FIXME: Can't inject faults into unpack as it's not a tensor or scalar\n\t# res = condPerturb(Ops.UNPACK, res)\n\tif logReturn: logging.debug(\"\\tReturning from Unpack \" + str(res) )\n\treturn res\n\ndef injectFaultUnstack(a):\n\t\"Function to call injectFault on unstack\"\n\t# This is the same as Unpack in newer versions of TF\n\tlogging.debug(\"Calling Operator Unstack \" + getArgs(a, b, c))\n\tresOp = tf.unstack(a, b, c)\n\twith tf.Session() as sess:\n\t\tres = resOp.eval()\n\tif logReturn: logging.debug(\"\\tReturning from Unstack \" + str(res) )\n\treturn res\n\ndef injectFaultStridedSlice(a, b, c, d):\n\t\"Function to call injectFault on StridedSlice\"\n\tlogging.debug(\"Calling Operator StridedSlice \" + getArgs(a, b, c, d))\n\t# FIXME: Implement this functionality\n\tresOp = tf.strided_slice(a, b, c, d)\n\twith tf.Session() as sess:\n\t\tres = resOp.eval()\n\tres = condPerturb(Ops.STRIDEDSLICE, res)\n\tif logReturn: logging.debug(\"\\tReturning from StridedSlice \" + str(res) )\n\treturn res\n\t\t\ndef injectFaultExpandDims(a, b):\n\t\"Function to call injectFault on ExpandDims\"\n\tlogging.debug(\"Calling Operator ExpandDims \" + getArgs(a, b))\n\tres = np.expand_dims(a, b)\n\tres = condPerturb(Ops.EXPANDDIMS, res)\n\tif logReturn: logging.debug(\"\\tReturning from ExpandDims \" + str(res) )\n\treturn res\n\ndef injectFaultPack(a, b):\n\t\"Function to call injectFault on Pack\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Pack\" + getArgs(a, b))\n\t# res = np.stack(a, b)\n\t# FIXME: This throws an exception, so we dummied it out\n\tres = a\n\tres = condPerturb(Ops.PACK, res)\n\tif logReturn: logging.debug(\"\\tReturning \" + str(res) )\n\treturn res\n\ndef injectFaultConcatV2(a, b, c):\n\t\"Function to call injectFault on ConcatV2\"\n\tlogging.debug(\"Calling Operator ConcatV2\" + getArgs(a, b, c))\n\tres = np.concatenate((a, b), c)\n\tres = condPerturb(Ops.PACK, res)\n\tif logReturn: logging.debug(\"\\tReturning from Concat \" + str(res) )\n\treturn res\n\ndef injectFaultSoftmax(a):\n\t\"Function to call injectFault on Softmax\"\n\tlogging.debug(\"Calling Operator Softmax \" + getArgs(a))\n\tresOp = tf.nn.softmax(a)\n\twith tf.Session() as sess:\n\t\tres = resOp.eval() \n\tres = condPerturb(Ops.SOFTMAX, res)\n\tif logReturn: logging.debug(\"\\tReturning from Softmax \" + str(res) )\n\treturn res\n\ndef injectFaultMaximum(a, b):\n\t\"Function to call injectFault on Maximum\"\n\tlogging.debug(\"Calling Operator Maximum \" + getArgs(a, b)) \n \tres = np.maximum(a, b)\n\tres = condPerturb(Ops.MAXIMUM, res)\n\tif logReturn: logging.debug(\"\\tReturning from Maximum \" + str(res) )\n\treturn res\n\ndef injectFaultMinimum(a, b):\n\t\"Function to call injectFault on Maximum\"\n\tlogging.debug(\"Calling Operator Minimum \" + getArgs(a, b)) \n \tres = np.minimum(a, b)\n\tres = condPerturb(Ops.MINIMUM, res)\n\tif logReturn: logging.debug(\"\\tReturning from Minimum \" + str(res) )\n\treturn res\n\ndef injectFaultSwitch(a, b):\n\t\"Function to call injectFault on Switch\"\n\tlogging.debug(\"Calling Operator Switch \" + getArgs(a, b))\n\t# FIXME: Actually implement the Switch operation\n\t# \tOnly there's no TensorFlow documentation for it !!!\n\t# res = np.select(a, b)\n\tres = a, a\n\t# res = condPerturb(Ops.SWITCH, res)\n\tif logReturn: logging.debug(\"\\tReturning from Switch \" + str(res) )\n\treturn res\n\ndef injectFaultGreater(a, b):\n\t\"Function to call injectFault on Greater\"\n\tlogging.debug(\"Calling Operator Greater \" + getArgs(a, b))\n \tres = np.greater(a, b)\n\tres = condPerturb(Ops.GREATER, res)\n\tif logReturn: logging.debug(\"\\tReturning from Greater \" + str(res) )\n\treturn res\n\ndef injectFaultNeg(a):\n\t\"Function to call injectFault on negative\"\n\tlogging.debug(\"Calling Operator Neg \" + getArgs(a))\n \tres = np.negative(a)\n\tres = condPerturb(Ops.NEGATIVE, res)\n\tif logReturn: logging.debug(\"\\tReturning from Neg \" + str(res) )\n\treturn res\n\ndef injectFaultPow(a, b):\n\t\"Function to call injectFault on pow\"\n\tlogging.debug(\"Calling Operator Pow \" + getArgs(a, b))\n \tres = np.power(a, b)\n\tres = condPerturb(Ops.POWER, res)\n\tif logReturn: logging.debug(\"\\tReturning from Pow \" + str(res) )\n\treturn res\n\ndef injectFaultAbs(a):\n\t\"Function to call injectFault on absolute\"\n\tlogging.debug(\"Calling Operator Abs \" + getArgs(a))\n \tres = np.absolute(a)\n\tres = condPerturb(Ops.ABSOLUTE, res)\n\tif logReturn: logging.debug(\"\\tReturning from Abs \" + str(res) )\n\treturn res\n\ndef injectFaultRsqrt(a):\n\t\"Function to call injectFault on Rsqrt\"\n\tlogging.debug(\"Calling Operator Rsqrt \" + getArgs(a))\n \tres = np.reciprocal( np.sqrt(a) )\n\tres = condPerturb(Ops.RSQRT, res)\n\tif logReturn: logging.debug(\"\\tReturning from Rsqrt \" + str(res) )\n\treturn res\n\ndef injectFaultNN(a, b, c):\n\t\"Function to call injectFault on Nearest Neighbors\"\n\t# FIXME: According to the TF docs, this operation doesn't exist !\n\t#\tNot sure what the third parameter is supposed to be.\n\tlogging.debug(\"Calling Operator Nearest Neighbors \" + getArgs(a, b, c))\n\tres = KNeighborsClassifier(a)\n\tif logReturn: logging.debug(\"\\tReturning from Nearest Neighbors \" + str(res) )\n\treturn res\n\ndef injectFaultLog(a):\n\t\"Function to call injectFault on Log\"\n\tlogging.debug(\"Calling Operator Log \" + getArgs(a))\n \tres = np.reciprocal( np.log(a) )\n\tres = condPerturb(Ops.LOG, res)\n\tif logReturn: logging.debug(\"\\tReturning from Log \" + str(res) )\n\treturn res\n\ndef injectFaultRealDiv(a, b):\n\t\"Function to call injectFault on RealDiv\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Log \" + getArgs(a, b))\n \tres = np.divide( a, b )\n\tres = condPerturb(Ops.REALDIV, res)\n\tif logReturn: logging.debug(\"\\tReturning from RealDiv \" + str(res) )\n\treturn res\n\ndef injectFaultBiasAdd(a, b):\n\t\"Function to call injectFault on BiasAdd\"\n\tlogging.debug(\"Calling Operator BiasAdd \" + getArgs(a, b))\n\tres = a + b \n\tres = condPerturb(Ops.BIASADD, res)\n\tif logReturn: logging.debug(\"\\tReturning from BiasAdd \" + str(res) )\n\treturn res\n\ndef injectFaultSigmoid(a):\n\t\"Function to call injectFault on Sigmoid\"\n\tlogging.debug(\"Calling Operator Sigmoid \" + getArgs(a))\n\tres = np.reciprocal( 1 + np.exp(-a) ) \n\tres = condPerturb(Ops.SIGMOID, res)\n\tif logReturn: logging.debug(\"\\tReturning from Sigmoid \" + str(res) )\n\treturn res\n\ndef injectFaultTanh(a):\n\t\"Function to call injectFault on Tanh\"\n\tlogging.debug(\"Calling Operator Tanh \" + getArgs(a))\n\tres = np.tanh( a ) \n\tres = condPerturb(Ops.TANH, res)\n\tif logReturn: logging.debug(\"\\tReturning from Tanh \" + str(res) )\n\treturn res\n\ndef injectFaultLRN(a, bias, alpha, beta):\n\t\"Function to call injectFault on LRN\"\n\tlogging.debug(\"Calling Operator LRN\" + getArgs(a, bias, alpha, beta)) \n\t# FIXME: How to derive the depth_radius from LRN\n\t# Currently we manually use the value from the main program.\n\n\t# depth_radius = 2\n\tresOp = tf.nn.lrn( a , 2, bias=bias, alpha=alpha, beta=beta)\n\twith tf.Session() as sess:\n\t\tres = resOp.eval() \n\tres = condPerturb(Ops.LRN, res)\n\tif logReturn: logging.debug(\"\\tReturning from LRN \" + str(res) )\n\treturn res\n\ndef injectFaultELU(a):\n\t\"Function to call injectFault on ELU\"\n\tlogging.debug(\"Calling Operator ELU \" + getArgs(a))\n\n\trelu = tf.nn.elu(a)\n\twith tf.Session() as sess:\n\t\tres = relu.eval()\n\tres = condPerturb(Ops.ELU, res)\n\tif logReturn: logging.debug(\"\\tReturning from ELU \" + str(res) )\n\treturn res\n\ndef injectFaultRandomUniform(a):\n\t\"Function to call injectFault on Random Uniform\"\n\tlogging.debug(\"Calling Operator RandomUniform\" + getArgs(a))\n\tru = tf.random.uniform(a)\n\twith tf.Session() as sess:\n\t\tres = ru.eval()\n\tres = condPerturb(Ops.RANDOM_UNIFORM, res)\n\tif logReturn: logging.debug(\"\\tReturning from Random Uniform \" + str(res) )\n\treturn res\n\n# End of implemented operators\n\n\n##### None of the functions below have been implemented yet as they're not used #####\n#### If you implement any of them, please move them above the line ####\n##### Otherwise, they will all raise NotImplementedError(OpName) ####3\n \ndef injectFaultDynamicStitch(inputs):\n\t\"Function to call injectFault on Dynamic stitch\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Dynamic stitch \") \n\traise NotImplementedError(\"DynamicStitch\")\t\n\ndef injectFaultFloorDiv(inputs):\n\t\"Function to call injectFault on FloorDiv\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator FloorDiv \") \n\traise NotImplementedError(\"FloorDiv\")\t\n\ndef injectFaultTile(inputs):\n\t\"Function to call injectFault on Tile\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Tile\")\n\traise NotImplementedError(\"Tile\")\t\n\ndef injectFaultConcatOffset(inputs):\n\t\"Function to call injectFault on ConcatOffset\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator ConcatOffset\")\n\traise NotImplementedError(\"ConcatOffset\")\t\n\ndef injectFaultSplit(inputs):\n\t\"Function to call injectFault on Split\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Split\")\n\traise NotImplementedError(\"Split\")\t\n\ndef injectFaultSoftmaxCEWL(inputs):\n\t\"Function to call injectFault on Softmax CEWL\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator SoftmaxCEWL\")\n\traise NotImplementedError(\"SoftmaCEWL\")\t\n\ndef injectFaultSlice(inputs):\n\t\"Function to call injectFault on Slice\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Slice\")\n\traise NotImplementedError(\"Slice\")\t\n\ndef injectFaultBroadcastGA(inputs):\n\t\"Function to call injectFault on Broadcast gradient args\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator BroadcastGA\")\n\traise NotImplementedError(\"BroadcastGA\")\t\n\ndef injectFaultTruncatedNormal(a):\n\t\"Function to call injectFault on TruncatedNormal\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator TruncatedNormal\") # + str(a))\n\traise NotImplementedError(\"TruncatedNormal\")\n\ndef injectFaultRandomUniformInt(a):\n\t\"Function to call injectFault on Random Uniform Int\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator RandomUniformInt\")\n\traise NotImplementedError(\"RandomUniformInt\")\n\ndef injectFaultRandomStandardNormal(a):\n\t\"Function to call injectFault on Random Standard Normal\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator RandomStandardNormal\")\n\traise NotImplementedError(\"RandomStandardNormal\")\n\ndef injectFaultRefSwitch(a):\n\t\"Function to call injectFault on RefSwitch\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator RefSwitch\")\n\traise NotImplementedError(\"RefSwitch\")\n\ndef injectFaultProd(a):\n\t\"Function to call injectFault on Prod\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Prod\")\n\traise NotImplementedError(\"Prod\")\n\ndef injectFaultUnique(a):\n\t\"Function to call injectFault on Unique\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Unique\")\n\traise NotImplementedError(\"Unique\")\n\ndef injectFaultReciprocal(a):\n\t\"Function to call injectFault on Reciprocal\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Reciprocal\")\n\traise NotImplementedError(\"Reciprocal\")\n\ndef injectFaultScatterAdd(a):\n\t\"Function to call injectFault on ScatterAdd\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator ScatterAdd\")\n\traise NotImplementedError(\"ScatterAdd\")\n\ndef injectFaultReluGrad(a):\n\t\"Function to call injectFault on ReluGrad\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator ReluGrad\")\n\traise NotImplementedError(\"ReluGrad\")\n\ndef injectFaultMaxPoolGrad(a):\n\t\"Function to call injectFault on MaxPoolGrad\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator MaxPoolGrad\")\n\traise NotImplementedError(\"MaxPoolGrad\")\n\ndef injectFaultTanhGrad(a):\n\t\"Function to call injectFault on TanhGrad\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator TanhGrad\")\n\traise NotImplementedError(\"TanhGrad\")\n\ndef injectFaultSigmoidGrad(a):\n\t\"Function to call injectFault on SigmoidGrad\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator SigmoidGrad\")\n\traise NotImplementedError(\"SigmoidGrad\")\n\ndef injectFaultBiasAddGrad(a):\n\t\"Function to call injectFault on BiasAddGrad\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator BiasAddGrad\")\n\traise NotImplementedError(\"BiasAddGrad\")\n\ndef injectFaultShapeN(inputs):\n\t\"Function to call injectFault on ShapeN\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator ShapeN\")\n\traise NotImplementedError(\"ShapeN\")\n\ndef injectFaultAddN(inputs):\n\t\"Function to call injectFault on AddN\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator AddN\")\n\traise NotImplementedError(\"AddN\")\n\ndef injectFaultConv2DBackprop(inputs):\n\t\"Function to call injectFault on Conv2DBackprop\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Conv2DBackProp\")\n\traise NotImplementedError(\"Conv2DBackProp\")\n\ndef injectFaultApplyAdam(inputs):\n\t\"Function to call injectFault on ApplyAdam\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator ApplyAdam\")\n\traise NotImplementedError(\"ApplyAdam\")\n\t\ndef injectFaultSelect(inputs):\n\t\"Function to call injectFault on Select\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Select\")\n\traise NotImplementedError(\"Select\")\n\ndef injectFaultMerge(inputs):\n\t\"Function to call injectFault on Merge\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Merge\")\n\traise NotImplementedError(\"Merge\")\n\ndef injectFaultTranspose(inputs):\n\t\"Function to call injectFault on Transpose\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Transpose\")\n\traise NotImplementedError(\"Transpose\")\n\ndef injectFaultTranspose(inputs):\n\t\"Function to call injectFault on Transpose\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Transpose\")\n\traise NotImplementedError(\"Transpose\")\n\ndef injectFaultGather(inputs):\n\t\"Function to call injectFault on Gather\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Gather\")\n\traise NotImplementedError(\"Gather\")\n\ndef injectFaultUnsortedSegmentSum(inputs):\n\t\"Function to call injectFault on UnsortedSegmentSum\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator UnsortedSegmentSum\")\n\traise NotImplementedError(\"UnsortedSegmentSum\")\n\ndef injectFaultInvertPermutation(inputs):\n\t\"Function to call injectFault on InvertPermutation\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator InvertPermuation\")\n\traise NotImplementedError(\"InvertPermutation\")\n\t\ndef injectFaultApplyGradientDescent(inputs):\n\t\"Function to call injectFault on applying gradient descent\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator ApplyGradientDescent\")\n\traise NotImplementedError(\"ApplyGradientDescent\")\n\ndef injectFaultZerosLike(inputs):\n\t\"Function to call injectFault on ZerosLike\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator ZerosLike\")\n\traise NotImplementedError(\"ZerosLike\")\n\t\ndef injectFaultPreventGradient(inputs):\n\t\"Function to call injectFault on PreventGradient\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator PreventGradient\")\n\traise NotImplementedError(\"PreventGradient\")\n\t\ndef injectFaultSSSmcEWL(inputs):\n\t\"Function to call injectFault on SoftSparseMax..\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator SoftSparseMax\")\n\traise NotImplementedError(\"SoftSparseMax\")\n\t\ndef injectFaultAll(a):\n\t\"Function to call injectFault on All operation\"\n\t# FIXME: Implement this functionality\n\t# Not clear what this does - TF doc is silent about this\n\tlogging.debug(\"Calling Operator All\")\n\traise NotImplementedError(\"All\")\n\t\ndef injectFaultAssert(a):\n\t\"Function to call injectFault on Assert operation\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Assert\")\n\traise NotImplementedError(\"Assert\")\n\t\ndef injectFaultLess(a):\n\t\"Function to call injectFault on Less operation\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Less\")\n\traise NotImplementedError(\"Less\")\n\ndef injectFaultFSRHOP(a):\n\t\"Function to call Inject fault on FertileResource Op\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator FSRHOP\")\n\traise NotImplementedError(\"FSRHOP\")\n\ndef injectFaultL2Loss(a):\n\t\"Function to call Inject fault on L2Loss operation\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator L2Loss\")\n\traise NotImplementedError(\"L2Loss\")\n\ndef injectFaultApplyMomentum(a):\n\t\"Function to call Inject fault on ApplyMomentum operation\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator ApplyMomentum\")\n\traise NotImplementedError(\"ApplyMomentum\")\n\ndef injectFaultAssignAdd(a):\n\t\"Function to call Inject fault on AssignAdd operation\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator AssignAdd\")\n\traise NotImplementedError(\"AssignAdd\")\n\ndef injectFaultFloor(a):\n\t\"Function to call injectFault on Floor\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Floor\")\n\traise NotImplementedError(\"Floor\")\n\ndef injectFaultSqueeze(a):\n\t\"Function to call injectFault on Squeeze\"\n\t# FIXME: Implement this functionality\n\tlogging.debug(\"Calling Operator Squeeze\")\n\traise NotImplementedError(\"Squeeze\")\n\n##### End of unimplemented functions ###################\n\t\n# This is the generic \"Catch-all\" function - it should be last\n# It takes a variable number of arguments in the inputs array\ndef injectFaultGeneric(*inputs):\n\t\"Generic Function to call fault injection on each input and zero it out\"\n\toutputs = []\n\tlogging.debug(\"Calling generic fiFunc on \" + str(inputs))\n\t# Perturb the input and add it to the outpus\n\t# FIXME: Should we NOT actually do the operation as well ??\n\t# For now, we don't do any injection at all at this function\n\n\tfor input in inputs:\n\t\toutputs.append( input )\n\tif logReturn: logging.debug(\"\\tReturning \" + str(outputs))\n\treturn outputs\n\n\n\n\n# End of injectFault operations\n\n# The functions in this table are the ones defined above\n# FIXME: These are fairly repetitive, so perhaps generate them automatically\n#\tAlso, maybe these should be sorted alphabetically - this is getting quite big\nopTable = { \n\t\t\t\"NoOp\" : injectFaultNoop,\t# First operation \n\t\t\t\"Add\": injectFaultAdd,\n\t\t\t\"Sub\": injectFaultSub,\n\t\t\t\"Mul\": injectFaultMul,\n\t\t\t\"Square\" : injectFaultSquare,\n\t\t\t\"Assign\" : injectFaultAssign,\t\n\t\t\t\"Identity\": injectFaultIdentity,\n\t\t\t\"Range\": injectFaultRange,\n\t\t\t\"Rank\": injectFaultRank,\n\t\t\t\"Sum\" : injectFaultSum,\n\t\t\t\"Shape\": injectFaultShape,\n\t\t\t\"Fill\": injectFaultFill,\n\t\t\t\"Size\": injectFaultSize,\n\t\t\t\"FloorMod\" : injectFaultFloorMod,\n\t\t\t\"DynamicStitch\" : injectFaultDynamicStitch,\n\t\t\t\"Maximum\" : injectFaultMaximum,\n\t\t\t\"Max\" : injectFaultMaximum,\t# FIXME: Not sure if Max is a synonymn of Maximum or a new operation\n\t\t\t\"Minimum\" : injectFaultMinimum,\n\t\t\t\"Min\" : injectFaultMinimum,\t# FIXME: Not sure if Min is a synonymn of Minimum or a new operation\n\t\t\t\"FloorDiv\" : injectFaultFloorDiv,\n\t\t\t\"Reshape\" : injectFaultReshape,\n\t\t\t\"OneHot\": injectFaultOneHot,\n\t\t\t\"Tile\" : injectFaultTile,\n\t\t\t\"ConcatV2\" : injectFaultConcatV2,\n\t\t\t\"ConcatOffset\" : injectFaultConcatOffset,\n\t\t\t\"BiasAdd\" : injectFaultBiasAdd,\n\t\t\t\"Split\" : injectFaultSplit,\n\t\t\t\"Sigmoid\" : injectFaultSigmoid,\n\t\t\t\"Tanh\" : injectFaultTanh,\n\t\t\t\"Softmax\" : injectFaultSoftmax,\n\t\t\t\"SoftmaxCrossEntropyWithLogits\" : injectFaultSoftmaxCEWL,\n\t\t\t\"Pack\" : injectFaultPack,\n\t\t\t\"Slice\" : injectFaultSlice,\n\t\t\t\"StridedSlice\" : injectFaultStridedSlice,\n\t\t\t\"BroadcastGradientArgs\" : injectFaultBroadcastGA,\n\t\t\t\"Neg\" : injectFaultNeg,\n\t\t\t\"Pow\" : injectFaultPow,\n\t\t\t\"Abs\" : injectFaultAbs,\n\t\t\t\"Unpack\": injectFaultUnpack,\n\t\t\t\"Unstack\": injectFaultUnstack,\n\t\t\t\"MatMul\" : injectFaultMatMul,\n\t\t\t\"ArgMax\" : injectFaultArgMax,\n\t\t\t\"ArgMin\" : injectFaultArgMin,\n\t\t\t\"Equal\" : injectFaultEqual,\n\t\t\t\"NotEqual\" : injectFaultNotEqual,\n\t\t\t\"LessEqual\" : injectFaultLessEqual,\n\t\t\t\"GreaterEqual\" : injectFaultGreaterEqual,\n\t\t\t\"TruncatedNormal\" : injectFaultTruncatedNormal,\n\t\t\t\"Conv2D\" : injectFaultConv2D,\n\t\t\t\"Relu\" : injectFaultRelu, \n\t\t\t\"MaxPool\" : injectFaultMaxPool, \n\t\t\t\"RandomUniform\" : injectFaultRandomUniform,\n\t\t\t\"RandomUniformInt\" : injectFaultRandomUniformInt,\n\t\t\t\"RandomStandardNormal\" : injectFaultRandomStandardNormal,\n\t\t\t\"Floor\" : injectFaultFloor,\n\t\t\t\"Rsqrt\" : injectFaultRsqrt,\n\t\t\t\"Log\" : injectFaultLog,\n\t\t\t\"RefSwitch\" : injectFaultRefSwitch,\n\t\t\t\"NearestNeighbors\" : injectFaultNN, \n\t\t\t\"Prod\" : injectFaultProd,\n\t\t\t\"Squeeze\" : injectFaultSqueeze,\n\t\t\t\"Unique\" : injectFaultUnique,\n\t\t\t\"Reciprocal\" : injectFaultReciprocal,\n\t\t\t\"ScatterAdd\" : injectFaultScatterAdd,\n\t\t\t\"ReluGrad\" : injectFaultReluGrad,\n\t\t\t\"MaxPoolGrad\" : injectFaultMaxPoolGrad,\n\t\t\t\"TanhGrad\" : injectFaultTanhGrad,\n\t\t\t\"SigmoidGrad\" : injectFaultSigmoidGrad,\n\t\t\t\"BiasAddGrad\" : injectFaultBiasAddGrad,\n\t\t\t\"ShapeN\" : injectFaultShapeN,\n\t\t\t\"AddN\" : injectFaultAddN,\n\t\t\t\"Conv2DBackpropInput\" : injectFaultConv2DBackprop,\n\t\t\t\"Conv2DBackpropFilter\" : injectFaultConv2DBackprop,\n\t\t\t\"ApplyAdam\" : injectFaultApplyAdam,\n\t\t\t\"Select\" : injectFaultSelect,\n\t\t\t\"Switch\" : injectFaultSwitch,\n\t\t\t\"Merge\" : injectFaultMerge,\n\t\t\t\"Transpose\" : injectFaultTranspose,\n\t\t\t\"Gather\" : injectFaultGather,\n\t\t\t\"UnsortedSegmentSum\" : injectFaultUnsortedSegmentSum,\n\t\t\t\"InvertPermutation\" : injectFaultInvertPermutation,\n\t\t\t# Casts are treated differently, so don't add them to this table ! See createInjectFaultCast\n\t\t\t# \"Cast\" : injectFaultCast,\t\t\n\t\t\t\"Mean\" : injectFaultMean,\n\t\t\t\"Count_nonzero\" : injectFaultCountNonZero,\n\t\t\t\"RealDiv\" : injectFaultRealDiv,\n\t\t\t\"Greater\" : injectFaultGreater,\n\t\t\t\"ApplyGradientDescent\" : injectFaultApplyGradientDescent,\n\t\t\t\"ZerosLike\" : injectFaultZerosLike,\n\t\t\t\"PreventGradient\" : injectFaultPreventGradient,\n\t\t\t\"ExpandDims\" : injectFaultExpandDims,\n\t\t\t\"SparseSoftmaxCrossEntropyWithLogits\" : injectFaultSSSmcEWL,\n\t\t\t\"All\" : injectFaultAll,\n\t\t\t\"Assert\" : injectFaultAssert,\n\t\t\t\"Less\" : injectFaultLess,\n\t\t\t\"FertileStatsResourceHandleOp\" : injectFaultFSRHOP,\n\t\t\t\"L2Loss\" : injectFaultL2Loss,\n\t\t\t\"ApplyMomentum\" : injectFaultApplyMomentum,\n\t\t\t\"AssignAdd\" : injectFaultAssignAdd,\n\t\t\t\"LRN\" : injectFaultLRN,\n\t\t\t\"Elu\" : injectFaultELU,\n\t\t\t\"Unknown\": injectFaultGeneric\t\t# Last operation\n\t\t\t# \"Unknown\": None\t\t\t# For debugging purposes\n\t\t}\t\n\n", "id": "5469728", "language": "Python", "matching_score": 3.72737717628479, "max_stars_count": 0, "path": "TensorFI/injectFault.py" }, { "content": "'''\nAlexNet implementation example using TensorFlow library.\nThis example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)\nAlexNet Paper (http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf)\nAuthor: <NAME>\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n'''\n\n# suppress warnings\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport yaml\nyaml.warnings({'YAMLLoadWarning': False})\n\nimport tensorflow as tf\nimport TensorFI as ti\nimport sys\n\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\n# set logging folder\nfrom globals import TESTSUITE_DIR\nlogDir = TESTSUITE_DIR + \"/faultLogs/\"\nconfFile = TESTSUITE_DIR + \"/confFiles/injections_config.yaml\"\n\n# Parameters\nlearning_rate = 0.001\ntraining_iters = 10000\nbatch_size = 64\ndisplay_step = 20\n\n# Network Parameters\nn_input = 784 # MNIST data input (img shape: 28*28)\nn_classes = 10 # MNIST total classes (0-9 digits)\ndropout = 0.8 # Dropout, probability to keep units\n\n# tf Graph input\nx = tf.placeholder(tf.float32, [None, n_input])\ny = tf.placeholder(tf.float32, [None, n_classes])\nkeep_prob = tf.placeholder(tf.float32) # dropout (keep probability)\n\n# Create AlexNet model\ndef conv2d(name, l_input, w, b):\n return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1], padding='SAME'),b), name=name)\n\ndef max_pool(name, l_input, k):\n return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME', name=name)\n\ndef norm(name, l_input, lsize=4):\n return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name)\n\ndef alex_net(_X, _weights, _biases, _dropout):\n # Reshape input picture\n _X = tf.reshape(_X, shape=[-1, 28, 28, 1])\n\n # Convolution Layer\n conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1'])\n # Max Pooling (down-sampling)\n pool1 = max_pool('pool1', conv1, k=2)\n # Apply Normalization\n norm1 = norm('norm1', pool1, lsize=4)\n # Apply Dropout\n norm1 = tf.nn.dropout(norm1, _dropout)\n\n # Convolution Layer\n conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2'])\n # Max Pooling (down-sampling)\n pool2 = max_pool('pool2', conv2, k=2)\n # Apply Normalization\n norm2 = norm('norm2', pool2, lsize=4)\n # Apply Dropout\n norm2 = tf.nn.dropout(norm2, _dropout)\n\n # Convolution Layer\n conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'])\n # Max Pooling (down-sampling)\n pool3 = max_pool('pool3', conv3, k=2)\n # Apply Normalization\n norm3 = norm('norm3', pool3, lsize=4)\n # Apply Dropout\n norm3 = tf.nn.dropout(norm3, _dropout)\n\n # Fully connected layer\n dense1 = tf.reshape(norm3, [-1, _weights['wd1'].get_shape().as_list()[0]]) # Reshape conv3 output to fit dense layer input\n dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc1') # Relu activation\n\n dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc2') # Relu activation\n\n # Output, class prediction\n out = tf.matmul(dense2, _weights['out']) + _biases['out']\n return out\n\ndef run_test(suppress_out=False):\n\n if suppress_out:\n sys.stdout = open(os.devnull, \"w\")\n sys.stderr = open(os.devnull, \"w\")\n\n # Import MINST data\n from tensorflow.examples.tutorials.mnist import input_data\n mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n \n # Store layers weight & bias\n weights = {\n 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64])),\n 'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128])),\n 'wc3': tf.Variable(tf.random_normal([3, 3, 128, 256])),\n 'wd1': tf.Variable(tf.random_normal([4*4*256, 1024])),\n 'wd2': tf.Variable(tf.random_normal([1024, 1024])),\n 'out': tf.Variable(tf.random_normal([1024, 10]))\n }\n biases = {\n 'bc1': tf.Variable(tf.random_normal([64])),\n 'bc2': tf.Variable(tf.random_normal([128])),\n 'bc3': tf.Variable(tf.random_normal([256])),\n 'bd1': tf.Variable(tf.random_normal([1024])),\n 'bd2': tf.Variable(tf.random_normal([1024])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n }\n\n # Construct model\n pred = alex_net(x, weights, biases, keep_prob)\n\n # Define loss and optimizer\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n # Evaluate model\n correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n # Initializing the variables\n init = tf.initialize_all_variables()\n\n passed_bool = None\n\n # Launch the graph\n with tf.Session() as sess:\n sess.run(init)\n step = 1\n # Keep training until reach max iterations\n while step * batch_size < training_iters:\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n # Fit training using batch data\n sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})\n if step % display_step == 0:\n # Calculate batch accuracy\n acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})\n # Calculate batch loss\n loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})\n print \"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \"{:.5f}\".format(acc)\n step += 1\n print \"Optimization Finished!\"\n # Calculate accuracy for 256 mnist test images\n acc1 = sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.})\n print \"Testing Accuracy:\", acc1\n fi = ti.TensorFI(sess, name = \"lenet\", disableInjections=False, logDir=logDir)\n acc2 = sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.})\n print \"Testing Accuracy:\", acc2\n if acc1 == acc2:\n passed_bool = True\n else:\n passed_bool = False\n \n if suppress_out:\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n\n return passed_bool\n\nif __name__ == \"__main__\":\n passed_bool = run_test()\n\n print \"\\n\\nTEST RESULTS\"\n if passed_bool:\n print \"Test passed\"\n else:\n print \"Test failed\"\n", "id": "12586683", "language": "Python", "matching_score": 7.341583251953125, "max_stars_count": 35, "path": "testSuite/injections_alexnet_mnist.py" }, { "content": "#!/usr/bin/python\n\n'''\nA logistic regression learning algorithm example using TensorFlow library.\nThis example is using the MNIST database of handwritten digits\n(http://yann.lecun.com/exdb/mnist/)\n\nAuthor: <NAME>\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n'''\n\n# set logging folder\nfrom globals import TESTSUITE_DIR\nlogDir = TESTSUITE_DIR + \"/faultLogs/\"\nconfFile = TESTSUITE_DIR + \"/confFiles/***_config.yaml\" #specify the config file\n# ***the call to TensorFI to instrument the model must include these directories, e.g., fi = ti.TensorFI(..., configFileName= confFile, logDir=logDir)\n\n# suppress warnings\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport yaml\nyaml.warnings({'YAMLLoadWarning': False})\n\nimport tensorflow as tf\nimport TensorFI as ti\nimport sys\n\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\ndef run_test(suppress_out=False):\n\n if suppress_out:\n sys.stdout = open(os.devnull, \"w\")\n sys.stderr = open(os.devnull, \"w\")\n\n # Import MNIST data\n from tensorflow.examples.tutorials.mnist import input_data\n mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\n # Parameters\n learning_rate = 0.01\n training_epochs = 25\n batch_size = 100\n display_step = 1\n\n # tf Graph Input\n x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784\n y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes\n\n # Set model weights\n W = tf.Variable(tf.zeros([784, 10]))\n b = tf.Variable(tf.zeros([10]))\n\n # Construct model\n pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax\n\n # Minimize error using cross entropy\n cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))\n # Gradient Descent\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n # Initialize the variables (i.e. assign their default value)\n init = tf.global_variables_initializer()\n\n # Start training\n with tf.Session() as sess:\n\n # Run the initializer\n sess.run(init)\n\n # Training cycle\n for epoch in range(training_epochs):\n avg_cost = 0.\n total_batch = int(mnist.train.num_examples/batch_size)\n # Loop over all batches\n for i in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n # Run optimization op (backprop) and cost op (to get loss value)\n _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,\n y: batch_ys})\n # Compute average loss\n avg_cost += c / total_batch\n # Display logs per epoch step\n if (epoch+1) % display_step == 0:\n print \"Epoch: \" + str(epoch+1) + \", cost = \" + \"{:.9f}\".format(avg_cost)\n\n print \"Optimization Finished!\"\n\n # Test model\n correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n \n # Calculate accuracy (before fault injections)\n acc_before = accuracy.eval({x: mnist.test.images, y: mnist.test.labels})\n print \"Accuracy (before instrumentation): \" + str(acc_before)\n \n # Instrument the graph for fault injection \n fi = ti.TensorFI(sess, name = \"logistReg\", logLevel = 30, disableInjections = True, logDir=logDir)\n \n # Calculate accuracy (with no fault injections)\n acc_nofi = accuracy.eval({x: mnist.test.images, y: mnist.test.labels})\n print \"Accuracy (with no injections):\" + str(acc_nofi)\n \n # Make the log files in TensorBoard\t\n logs_path = \"./logs\"\n logWriter = tf.summary.FileWriter( logs_path, sess.graph )\n\n # Calculate accuracy (with fault injections)\n fi.turnOnInjections()\n acc_fi = accuracy.eval({x: mnist.test.images, y: mnist.test.labels})\n print \"Accuracy (with injections):\" + str(acc_fi)\n\n if acc_nofi == acc_fi:\n passed_bool = True\n else:\n passed_bool = False\n\n if suppress_out:\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n \n return passed_bool # set this based on the test requirements (True if the test passed, False otherwise)\n\nif __name__ == \"__main__\":\n passed_bool = run_test()\n\n print \"\\n\\nTEST RESULTS\"\n if passed_bool:\n print \"Test passed\"\n else:\n print \"Test failed\"\n", "id": "719473", "language": "Python", "matching_score": 7.247441291809082, "max_stars_count": 35, "path": "testSuite/injections_logistic_regression.py" }, { "content": "#!/usr/bin/python\n'''\nA nearest neighbor learning algorithm example using TensorFlow library.\nThis example is using the MNIST database of handwritten digits\n(http://yann.lecun.com/exdb/mnist/)\n\nAuthor: <NAME>\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n'''\n\n# NOTE: please define how this test \"passes\" (at the moment, passed_bool is always True)\n# once this is done, include in runAll.py\n\nimport numpy as np\n\n# set logging folder\nfrom globals import TESTSUITE_DIR\nlogDir = TESTSUITE_DIR + \"/faultLogs/\"\nconfFile = TESTSUITE_DIR + \"/confFiles/***_config.yaml\" #specify the config file\n# ***the call to TensorFI to instrument the model must include these directories, e.g., fi = ti.TensorFI(..., configFileName= confFile, logDir=logDir)\n\n# suppress warnings\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport yaml\nyaml.warnings({'YAMLLoadWarning': False})\n\nimport tensorflow as tf\nimport TensorFI as ti\nimport sys\n\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\ndef run_test(suppress_out=False):\n\n if suppress_out:\n sys.stdout = open(os.devnull, \"w\")\n sys.stderr = open(os.devnull, \"w\")\n \n # Import MNIST data\n from tensorflow.examples.tutorials.mnist import input_data\n mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\n # In this example, we limit mnist data\n Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates)\n Xte, Yte = mnist.test.next_batch(200) #200 for testing\n\n # tf Graph Input\n xtr = tf.placeholder(\"float\", [None, 784])\n xte = tf.placeholder(\"float\", [784])\n\n # Nearest Neighbor calculation using L1 Distance\n # Calculate L1 Distance\n distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1)\n # Prediction: Get min distance index (Nearest neighbor)\n pred = tf.arg_min(distance, 0)\n\n accuracy = 0.\n\n # Initialize the variables (i.e. assign their default value)\n init = tf.global_variables_initializer()\n\n # Start training\n with tf.Session() as sess:\n\n # Run the initializer\n sess.run(init)\n\n # Add the fault injection code here to instrument the graph\n # We start injecting the fault right away here unlike earlier\n fi = ti.TensorFI(sess, name = \"NearestNeighbor\", logLevel = 50, logDir=logDir)\n \n # loop over test data\n for i in range(len(Xte)):\n # Get nearest neighbor\n nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]})\n # Get nearest neighbor class label and compare it to its true label\n print \"Test \" + str(i) + \", Prediction: \" + str(np.argmax(Ytr[nn_index])) + \", True Class: \" + str(np.argmax(Yte[i]))\n # Calculate accuracy\n if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):\n accuracy += 1./len(Xte)\n print \"Accuracy:\" + str(accuracy)\n\n # Make the log files in TensorBoard\t\n logs_path = \"./logs\"\n logWriter = tf.summary.FileWriter( logs_path, sess.graph )\n\n passed_bool = True\n\n if suppress_out:\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n \n return passed_bool # set this based on the test requirements (True if the test passed, False otherwise)\n\nif __name__ == \"__main__\":\n passed_bool = run_test()\n\n print \"\\n\\nTEST RESULTS\"\n if passed_bool:\n print \"Test passed\"\n else:\n print \"Test failed\"\n", "id": "9006869", "language": "Python", "matching_score": 4.401123046875, "max_stars_count": 35, "path": "testSuite/SEE_NOTE_injections_nearest_neighbor.py" }, { "content": "#!/usr/bin/python\n\n# Example 4 from TensorFlow tutorial \n\n# set logging folder\nfrom globals import TESTSUITE_DIR\nlogDir = TESTSUITE_DIR + \"/faultLogs/\"\nconfFile = TESTSUITE_DIR + \"/confFiles/injections_config.yaml\" #specify the config file\n# ***the call to TensorFI to instrument the model must include these directories, e.g., fi = ti.TensorFI(..., configFileName= confFile, logDir=logDir)\n\n# suppress warnings\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport yaml\nyaml.warnings({'YAMLLoadWarning': False})\n\nimport tensorflow as tf\nimport TensorFI as ti\nimport sys\n\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\ndef run_test(suppress_out=False):\n\n\tif suppress_out:\n\t\tsys.stdout = open(os.devnull, \"w\")\n\t\tsys.stderr = open(os.devnull, \"w\")\n\n\t# Create 2 variables for keeping track of weights\n\tW = tf.Variable([.3], dtype=tf.float32)\n\tb = tf.Variable([-.3], dtype=tf.float32)\n\n\t# Create a placeholder for inputs, and a linear model\n\tx = tf.placeholder(tf.float32)\n\ty = tf.placeholder(tf.float32)\n\tlinear_model = W*x + b\n\n\t# Calculate the error as the sum of square of the dviations from the linear model \n\tsquared_deltas = tf.square( linear_model - y )\n\terror = tf.reduce_sum(squared_deltas)\n\n\t# Initialize a gradient descent optimizer to minimize errors\n\toptimizer = tf.train.GradientDescentOptimizer(0.01)\n\ttrain = optimizer.minimize(error)\n\n\t# Training data for x and y\n\tx_train = [1, 2, 3, 4]\n\ty_train = [0, -1, -2, -3]\n\n\t# Create a session, initialize variables\n\ts = tf.Session()\n\tinit = tf.global_variables_initializer()\n\ts.run(init)\n\n\t# Run the initial model\n\tcurr_W, curr_b, curr_error = s.run([W, b, error], {x: x_train, y: y_train})\n\tprint \"After initialization\\tW: \" + str(curr_W) + \" b: \" + str(curr_b) + \" error: \" + str(curr_error)\n\n\t# Iterate to train the model\n\tsteps = 1000\n\tfor i in range(steps):\n\t\ts.run( train, {x: x_train, y:y_train} )\n\n\tcurr_W, curr_b, curr_error = s.run([W, b, error], {x: x_train, y: y_train})\n\tprint \"No injections\\tW: \" + str(curr_W) + \" b: \" + str(curr_b) + \" error: \" + str(curr_error)\n\n\t# Instrument the session\n\tfi = ti.TensorFI(s, logDir=logDir)\n\n\t# Create a log for visualizng in TensorBoard (during training)\n\tlogs_path = \"./logs\"\n\tlogWriter = tf.summary.FileWriter( logs_path, s.graph )\n\n\t# Turn off the injections during the first run\n\tfi.turnOffInjections()\n\n\t# Run the trained model without fault injections\n\tcurr_W, curr_b, curr_error = s.run([W, b, error], {x: x_train, y: y_train})\n\tcurr_W_A = curr_W\n\tcurr_b_A = curr_b\n\tcurr_error_A = curr_error\n\tprint \"Before injections\\tW: \" + str(curr_W) + \" b: \" + str(curr_b) + \" error: \" + str(curr_error)\n\n\t# Turn on the injections during running\n\tfi.turnOnInjections()\n\n\t# Run the trained model with the fault injected functions from the cached run\n\tcurr_W, curr_b, curr_error = s.run(useCached = True)\n\tcurr_W_B = curr_W\n\tcurr_b_B = curr_b\n\tcurr_error_B = curr_error\n\tprint \"After injections\\tW: \" + str(curr_W) + \" b: \" + str(curr_b) + \" error: \" + str(curr_error)\n\t\n\tif suppress_out:\n\t\tsys.stdout = sys.__stdout__\n\t\tsys.stderr = sys.__stderr__\n\t\n\tif curr_W_A == curr_W_B and curr_W_A == curr_W_B:\n\t\tpassed_bool = True\n\telse:\n\t\tpassed_bool = False\n\n\treturn passed_bool\n\n\nif __name__ == \"__main__\":\n passed_bool = run_test()\n\n print \"\\n\\nTEST RESULTS\"\n if passed_bool:\n print \"Test passed\"\n else:\n print \"Test failed\"\n", "id": "12501586", "language": "Python", "matching_score": 3.8907458782196045, "max_stars_count": 35, "path": "testSuite/injections_gradient.py" }, { "content": "#!/usr/bin/python\n\n# set logging folder\nfrom globals import TESTSUITE_DIR\nlogDir = TESTSUITE_DIR + \"/faultLogs/\"\nconfFile = TESTSUITE_DIR + \"/confFiles/***_config.yaml\" #specify the config file\n# ***the call to TensorFI to instrument the model must include these directories, e.g., fi = ti.TensorFI(..., configFileName= confFile, logDir=logDir)\n\n# suppress warnings\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport yaml\nyaml.warnings({'YAMLLoadWarning': False})\n\nimport tensorflow as tf\nimport TensorFI as ti\nimport sys\n\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\ndef run_test(suppress_out=False):\n\n if suppress_out:\n sys.stdout = open(os.devnull, \"w\")\n sys.stderr = open(os.devnull, \"w\")\n\n # Create 2 variables for keeping track of weights\n W = tf.Variable([.3], dtype=tf.float32)\n b = tf.Variable([-.3], dtype=tf.float32)\n\n # Create a placeholder for inputs, and a linear model\n x = tf.placeholder(tf.float32)\n y = tf.placeholder(tf.float32)\n linear_model = W*x + b\n squared_deltas = tf.square( linear_model - y )\n loss = tf.reduce_sum(squared_deltas)\n\n init = tf.global_variables_initializer()\n\n # Create a session, initialize variables and run the linear model\n s = tf.Session()\n init_nofi = s.run(init)\n print \"Initial : \" + str(init_nofi)\n model_nofi = s.run(linear_model, { x: [1, 2, 3, 4] })\n print \"Linear Model : \" + str(model_nofi)\n loss_nofi = s.run(loss, { x: [1, 2, 3, 4], y: [0, -1, -2, -3] })\n print \"Loss Function : \" + str(loss_nofi)\n\n # Instrument the session\n fi = ti.TensorFI(s, logDir=logDir)\n\n # Create a log for visualizng in TensorBoard\n logs_path = \"./logs\"\n logWriter = tf.summary.FileWriter( logs_path, s.graph )\n\n # initialize variables and run the linear model\n init_fi = s.run(init)\n print \"Initial : \" + str(init_fi)\n model_fi = s.run(linear_model, { x: [1, 2, 3, 4] })\n print \"Linear Model : \" + str(model_fi)\n loss_fi = s.run(loss, { x: [1, 2, 3, 4], y: [0, -1, -2, -3] })\n print \"Loss Function : \" + str(loss_fi)\n\n if loss_nofi == loss_fi:\n passed_bool = True\n else:\n passed_bool = False\n\n if suppress_out:\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n\n return passed_bool # set this based on the test requirements (True if the test passed, False otherwise)\n\nif __name__ == \"__main__\":\n passed_bool = run_test()\n\n print \"\\n\\nTEST RESULTS\"\n if passed_bool:\n print \"Test passed\"\n else:\n print \"Test failed\"\n", "id": "7555626", "language": "Python", "matching_score": 5.317303657531738, "max_stars_count": 35, "path": "testSuite/injections_loss.py" }, { "content": "#!/usr/bin/python\n\n# Example 4 from TensorFlow tutorial \n\nfrom __future__ import print_function\nimport sys\nimport tensorflow as tf\nsys.path.append(\"/data/gpli/tensorfi/TensorFI/TensorFI/\")\nimport TensorFI as ti\n\n# Create 2 variables for keeping track of weights\nW = tf.Variable([.3], dtype=tf.float32)\nb = tf.Variable([-.3], dtype=tf.float32)\n\n# Create a placeholder for inputs, and a linear model\nx = tf.placeholder(tf.float32)\ny = tf.placeholder(tf.float32)\nlinear_model = W*x + b\nsquared_deltas = tf.square( linear_model - y )\nloss = tf.reduce_sum(squared_deltas)\n\ninit = tf.global_variables_initializer()\n\n# Create a session, initialize variables and run the linear model\ns = tf.Session()\nprint(\"Initial : \", s.run(init))\nprint(\"Linear Model : \", s.run(linear_model, { x: [1, 2, 3, 4] }))\nprint(\"Loss Function : \", s.run(loss, { x: [1, 2, 3, 4], y: [0, -1, -2, -3] }))\n\n# Instrument the session\nfi = ti.TensorFI(s)\n\n# Create a log for visualizng in TensorBoard\nlogs_path = \"./logs\"\nlogWriter = tf.summary.FileWriter( logs_path, s.graph )\n\n# Create a session, initialize variables and run the linear model\nprint(\"Initial : \", s.run(init))\nprint(\"Linear Model : \", s.run(linear_model, { x: [1, 2, 3, 4] }))\nprint(\"Loss Function : \", s.run(loss, { x: [1, 2, 3, 4], y: [0, -1, -2, -3] }))\n", "id": "12237958", "language": "Python", "matching_score": 2.053499698638916, "max_stars_count": 35, "path": "Tests/loss.py" }, { "content": "#!/usr/bin/python\n\n# Example2 from TensorFlow tutorial \n\nfrom __future__ import print_function\nimport tensorflow as tf\nimport TensorFI as ti\n\na = tf.placeholder(tf.float32, name=\"a\")\nb = tf.placeholder(tf.float32, name=\"b\")\nadder = tf.add(a, b, name=\"adder\") # Use this syntax for name\naddTriple = 3 * adder\n\nsess = tf.Session()\n\n# Run the session with scalars and tensors\nprint( sess.run( addTriple, { a:3, b:4.5 } ) )\nprint( sess.run( addTriple, { a:[3,1], b:[4,5] } ) )\n\n# Instrument the session\nfi = ti.TensorFI(sess)\n\n# Run the above session commands with fault injections\nprint( sess.run( addTriple, { a:3, b:4.5 } ) )\nprint( sess.run( addTriple, { a:[3,1], b:[4,5] } ) )\n\n# Create a log for visualizing in TensorBoard\nlogs_path = \"./logs\"\nlogWriter = tf.summary.FileWriter( logs_path, sess.graph )\n\n", "id": "3758042", "language": "Python", "matching_score": 1.4500125646591187, "max_stars_count": 35, "path": "Tests/placeholder.py" }, { "content": "#/usr/bin/python\n\n# Example1 from TensorFlow tutorial \n\nfrom __future__ import print_function\nimport sys\n\nimport tensorflow as tf\nimport TensorFI as ti\n\nnode1 = tf.constant(3, dtype=tf.float64)\nnode2 = tf.constant(4, dtype=tf.float64)\n\nprint(\"Node1 = \", node1)\nprint(\"Node 2 = \", node2)\nnode3 = tf.add(node1, node2, name = \"add1\")\nprint(\"Node3 = \", node3)\n\ns = tf.Session()\n\n# Run it first\nres1 = s.run([ node3 ])\nprint(\"res1 = \", res1)\n\n# Instrument the FI session \nfi = ti.TensorFI(s, logLevel = 100)\n\n# Create a log for visualizng in TensorBoard\nlogs_path = \"./logs\"\nlogWriter = tf.summary.FileWriter( logs_path, s.graph )\n\n# Run it again with fault injection enabled\nres2 = s.run([ node3 ])\nprint(\"res2 = \", res2)\n\n", "id": "6700059", "language": "Python", "matching_score": 3.413322687149048, "max_stars_count": 35, "path": "Tests/constant.py" }, { "content": "#/usr/bin/python\n\n# Example 0 - Dummy NoOp operation\n\nfrom __future__ import print_function\nimport sys\n\nimport tensorflow as tf\nimport TensorFI as ti\n\nnode = tf.no_op()\n\nprint(\"Node = \", node)\n\ns = tf.Session()\n\n# Run it first\nres1 = s.run([ node ])\nprint(\"res1 = \", res1)\n\n# Instrument the FI session \nfi = ti.TensorFI(s, logLevel = 0)\n\n# Create a log for visualizng in TensorBoard\nlogs_path = \"./logs\"\nlogWriter = tf.summary.FileWriter( logs_path, s.graph )\n\n# Run it again with fault injection enabled\nres2 = s.run([ node ])\nprint(\"res2 = \", res2)\n\n", "id": "3037579", "language": "Python", "matching_score": 3.058645486831665, "max_stars_count": 35, "path": "Tests/noop.py" } ]
3.809062
objektwerks
[ { "content": "\"\"\"\nBoltzmann Machine test on movie data.\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn.parallel\nimport torch.utils.data\n\nmovies = pd.read_csv('./../data/movies/movies.dat', sep='::', header=None, engine='python', encoding='latin-1')\nusers = pd.read_csv('./../data/movies/users.dat', sep='::', header=None, engine='python', encoding='latin-1')\nratings = pd.read_csv('./../data/movies/ratings.dat', sep='::', header=None, engine='python', encoding='latin-1')\n\ntraining_set = pd.read_csv('./../data/movies/u1.base', delimiter='\\t')\ntraining_set = np.array(training_set, dtype='int')\ntest_set = pd.read_csv('./../data/movies/u1.test', delimiter='\\t')\ntest_set = np.array(test_set, dtype='int')\n\nnb_users = int(max(max(training_set[:, 0]), max(test_set[:, 0])))\nnb_movies = int(max(max(training_set[:, 1]), max(test_set[:, 1])))\n\n\ndef convert(data):\n new_data = []\n for id_users in range(1, nb_users + 1):\n id_movies = data[:, 1][data[:, 0] == id_users]\n id_ratings = data[:, 2][data[:, 0] == id_users]\n ratings = np.zeros(nb_movies)\n ratings[id_movies - 1] = id_ratings\n new_data.append(list(ratings))\n return new_data\n\n\ntraining_set = convert(training_set)\ntest_set = convert(test_set)\n\ntraining_set = torch.FloatTensor(training_set)\ntest_set = torch.FloatTensor(test_set)\n\ntraining_set[training_set == 0] = -1\ntraining_set[training_set == 1] = 0\ntraining_set[training_set == 2] = 0\ntraining_set[training_set >= 3] = 1\ntest_set[test_set == 0] = -1\ntest_set[test_set == 1] = 0\ntest_set[test_set == 2] = 0\ntest_set[test_set >= 3] = 1\n\n\nclass RBM():\n def __init__(self, nv, nh):\n self.W = torch.randn(nh, nv)\n self.a = torch.randn(1, nh)\n self.b = torch.randn(1, nv)\n\n def sample_h(self, x):\n wx = torch.mm(x, self.W.t())\n activation = wx + self.a.expand_as(wx)\n p_h_given_v = torch.sigmoid(activation)\n return p_h_given_v, torch.bernoulli(p_h_given_v)\n\n def sample_v(self, y):\n wy = torch.mm(y, self.W)\n activation = wy + self.b.expand_as(wy)\n p_v_given_h = torch.sigmoid(activation)\n return p_v_given_h, torch.bernoulli(p_v_given_h)\n\n def train(self, v0, vk, ph0, phk):\n self.W += torch.mm(v0.t(), ph0) - torch.mm(vk.t(), phk)\n self.b += torch.sum((v0 - vk), 0)\n self.a += torch.sum((ph0 - phk), 0)\n\n\nnv = len(training_set[0])\nnh = 100\nbatch_size = 100\nrbm = RBM(nv, nh)\n\n# Train RBM\nnb_epoch = 10\nfor epoch in range(1, nb_epoch + 1):\n train_loss = 0\n s = 0.\n for id_user in range(0, nb_users - batch_size, batch_size):\n vk = training_set[id_user:id_user + batch_size]\n v0 = training_set[id_user:id_user + batch_size]\n ph0, _ = rbm.sample_h(v0)\n for k in range(10):\n _, hk = rbm.sample_h(vk)\n _, vk = rbm.sample_v(hk)\n vk[v0 < 0] = v0[v0 < 0]\n phk, _ = rbm.sample_h(vk)\n rbm.train(v0, vk, ph0, phk)\n train_loss += torch.mean(torch.abs(v0[v0 >= 0] - vk[v0 >= 0]))\n s += 1.\n print('epoch: ' + str(epoch) + ' loss: ' + str(train_loss / s))\n\n# Test RBM\ntest_loss = 0\ns = 0.\nfor id_user in range(nb_users):\n v = training_set[id_user:id_user + 1]\n vt = test_set[id_user:id_user + 1]\n if len(vt[vt >= 0]) > 0:\n _, h = rbm.sample_h(v)\n _, v = rbm.sample_v(h)\n test_loss += torch.mean(torch.abs(vt[vt >= 0] - v[vt >= 0]))\n s += 1.\nprint('test loss: ' + str(test_loss / s))\n", "id": "11401067", "language": "Python", "matching_score": 3.938074827194214, "max_stars_count": 0, "path": "dl/boltzman.machines.py" }, { "content": "\"\"\"\nAutoEncoder test on movie data.\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.autograd import Variable\n\nmovies = pd.read_csv('./../data/movies/movies.dat', sep='::', header=None, engine='python', encoding='latin-1')\nusers = pd.read_csv('./../data/movies/users.dat', sep='::', header=None, engine='python', encoding='latin-1')\nratings = pd.read_csv('./../data/movies/ratings.dat', sep='::', header=None, engine='python', encoding='latin-1')\n\ntraining_set = pd.read_csv('./../data/movies/u1.base', delimiter='\\t')\ntraining_set = np.array(training_set, dtype='int')\ntest_set = pd.read_csv('./../data/movies/u1.test', delimiter='\\t')\ntest_set = np.array(test_set, dtype='int')\n\nnb_users = int(max(max(training_set[:, 0]), max(test_set[:, 0])))\nnb_movies = int(max(max(training_set[:, 1]), max(test_set[:, 1])))\n\n\ndef convert(data):\n new_data = []\n for id_users in range(1, nb_users + 1):\n id_movies = data[:, 1][data[:, 0] == id_users]\n id_ratings = data[:, 2][data[:, 0] == id_users]\n ratings = np.zeros(nb_movies)\n ratings[id_movies - 1] = id_ratings\n new_data.append(list(ratings))\n return new_data\n\n\ntraining_set = convert(training_set)\ntest_set = convert(test_set)\n\n\nclass SAE(nn.Module):\n def __init__(self, ):\n super(SAE, self).__init__()\n self.fc1 = nn.Linear(nb_movies, 20)\n self.fc2 = nn.Linear(20, 10)\n self.fc3 = nn.Linear(10, 20)\n self.fc4 = nn.Linear(20, nb_movies)\n self.activation = nn.Sigmoid()\n\n def forward(self, x):\n x = self.activation(self.fc1(x))\n x = self.activation(self.fc2(x))\n x = self.activation(self.fc3(x))\n x = self.fc4(x)\n return x\n\n\nsae = SAE()\ncriterion = nn.MSELoss()\noptimizer = optim.RMSprop(sae.parameters(), lr=0.01, weight_decay=0.5)\n\n# Train SAE\nnb_epoch = 10\nfor epoch in range(1, nb_epoch + 1):\n train_loss = 0\n s = 0.\n for id_user in range(nb_users):\n source = Variable(torch.FloatTensor(training_set[id_user])).unsqueeze(0)\n target = source.clone()\n if torch.sum(target.data > 0) > 0:\n sink = sae(source)\n target.require_grad = False\n sink[target == 0] = 0\n loss = criterion(sink, target)\n mean_corrector = nb_movies / float(torch.sum(target.data > 0) + 1e-10)\n loss.backward()\n train_loss += np.sqrt(loss.data[0] * mean_corrector)\n s += 1.\n optimizer.step()\n print('epoch: ' + str(epoch) + ' loss: ' + str(train_loss / s))\n\n# Test SAE\ntest_loss = 0\ns = 0.\nfor id_user in range(nb_users):\n source = Variable(torch.FloatTensor(training_set[id_user])).unsqueeze(0)\n target = Variable(torch.FloatTensor(training_set[id_user]))\n if torch.sum(target.data > 0) > 0:\n sink = sae(source)\n target.require_grad = False\n sink[target == 0] = 0\n loss = criterion(sink, target)\n mean_corrector = nb_movies / float(torch.sum(target.data > 0) + 1e-10)\n test_loss += np.sqrt(loss.data[0] * mean_corrector)\n s += 1.\nprint('test loss: ' + str(test_loss / s))\n", "id": "5152989", "language": "Python", "matching_score": 1.5528254508972168, "max_stars_count": 0, "path": "dl/auto.encoders.py" }, { "content": "\"\"\"\nLogistic Regression test on purchases data.\n\"\"\"\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.preprocessing import OneHotEncoder, LabelEncoder\nfrom sklearn.preprocessing import StandardScaler\n\ndf = pd.read_csv('./../../data/purchases.csv')\nX = df.iloc[:, :-1].values\ny = df.iloc[:, 3].values\nprint(\"X: \", X)\nprint(\"y: \", y)\n\nage_salary_imputer = Imputer(missing_values='NaN', strategy='mean', axis=0)\nage_salary_imputer = age_salary_imputer.fit(X[:, 1:3])\nX[:, 1:3] = age_salary_imputer.transform(X[:, 1:3])\nprint(\"X age-salary nan-to-mean imputer:\\n\", X)\n\ncountry_encoder = LabelEncoder()\nX[:, 0] = country_encoder.fit_transform(X[:, 0])\nprint(\"X country label encoder:\\n\", X)\n\ncountry_hot_encoder = OneHotEncoder(categorical_features=[0])\nX = country_hot_encoder.fit_transform(X).toarray()\nprint(\"X country hot encoder:\\n\", X)\n\npurchased_encoder = LabelEncoder()\ny = purchased_encoder.fit_transform(y)\nprint(\"y purchased label encoder: \", y)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\nprint(\"X/y train-test split!\")\n\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\n\nprint(\"X shape / y shape: \", X.shape, y.shape)\nprint(\"X train / X test shape: \", X_train.shape, X_test.shape)\nprint(\"y train / y test shape: \", y_train.shape, y_test.shape)\n\nmodel = LogisticRegression()\nmodel.fit(X_train, y_train)\nprint(\"y predicted: \", model.predict(X_test))\n", "id": "5223537", "language": "Python", "matching_score": 4.5678391456604, "max_stars_count": 0, "path": "ml/regression/logistic.regression.py" }, { "content": "\"\"\"\nLinear Regression test on startups data.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport statsmodels.formula.api as sm\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\n\ndf = pd.read_csv('./../../data/startups.csv')\nX = df.iloc[:, :-1].values\ny = df.iloc[:, 4].values\n\nstate_encoder = LabelEncoder()\nX[:, 3] = state_encoder.fit_transform(X[:, 3])\n\nstate_hot_encoder = OneHotEncoder(categorical_features=[3])\nX = state_hot_encoder.fit_transform(X).toarray()\n\n# Remove [dummy vars - 1] to avoid dummy var trap.\n# Done automatically by sklearn linear regression.\nX = X[:, 1:]\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\nprint(\"X shape / y shape: \", X.shape, y.shape)\nprint(\"X train / X test shape: \", X_train.shape, X_test.shape)\nprint(\"y train / y test shape: \", y_train.shape, y_test.shape)\n\nmodel = LinearRegression()\nmodel.fit(X_train, y_train)\ny_predicted = model.predict(X_test)\n\nplt.scatter(y_test, y_predicted, color='red')\nplt.title('Predicted Profit vs Profit')\nplt.xlabel('Profit')\nplt.ylabel('Predicited Profit')\nplt.show()\n\n# Prepend x0 to X.\nX = np.append(arr=np.ones((50, 1)).astype(int), values=X, axis=1)\n\n# Backward elimination by independent variable of P < 0.05.\nmodel_OLS = sm.OLS(endog=y, exog=X[:, [0, 1, 2, 3, 4, 5]]).fit()\nprint(\"OLS Summary [0, 1, 2, 3, 4, 5]\\n\", model_OLS.summary())\n\n# Removed Dummy variable.\nmodel_OLS = sm.OLS(endog=y, exog=X[:, [0, 1, 3, 4, 5]]).fit()\nprint(\"OLS Summary [0, 1, 3, 4, 5]\\n\", model_OLS.summary())\n\n# Removed State variable.\nmodel_OLS = sm.OLS(endog=y, exog=X[:, [0, 3, 4, 5]]).fit()\nprint(\"OLS Summary [0, 3, 4, 5]\\n\", model_OLS.summary())\n\n# Removed Admin variable.\n# MarketingSpend and R&DSpend best predict Profit.\nmodel_OLS = sm.OLS(endog=y, exog=X[:, [0, 3, 5]]).fit()\nprint(\"OLS Summary [0, 3, 5]\\n\", model_OLS.summary())\n", "id": "4861367", "language": "Python", "matching_score": 1.725860834121704, "max_stars_count": 0, "path": "ml/regression/linear.regression.x.py" }, { "content": "\"\"\"\nMultiple models run against iris data.\n\"\"\"\nfrom sklearn import metrics\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\n\nX, y = load_iris(return_X_y=True)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=3)\nprint(\"X shape / y shape: \", X.shape, y.shape)\nprint(\"X train / X test shape: \", X_train.shape, X_test.shape)\nprint(\"y train / y test shape: \", y_train.shape, y_test.shape)\n\ndtc = DecisionTreeClassifier()\ndtc.fit(X_train, y_train)\ndtc_predicted = dtc.predict(X_test)\nprint(\"Decision Tree Classifier accuracy score:\", metrics.accuracy_score(y_test, dtc_predicted))\n\nknn = KNeighborsClassifier(n_neighbors=16)\nknn.fit(X_train, y_train)\nknn_predicted = knn.predict(X_test)\nprint(\"K Neighbors Classifier accuracy score:\", metrics.accuracy_score(y_test, knn_predicted))\n\nlr = LogisticRegression()\nlr.fit(X_train, y_train)\nlr_predicted = lr.predict(X_test)\nprint(\"Logistic Regression accuracy score: \", metrics.accuracy_score(y_test, lr_predicted))\n\ngnb = GaussianNB()\ngnb.fit(X_train, y_train)\ngnb_predicted = gnb.predict(X_test)\nprint(\"Gaussian Naive Bayes accuracy score: \", metrics.accuracy_score(y_test, gnb_predicted))\n\nsvm = SVC()\nsvm.fit(X_train, y_train)\nsvm_predicted = svm.predict(X_test)\nprint(\"Support Vector Machine accuracy score: \", metrics.accuracy_score(y_test, svm_predicted))\n", "id": "3593262", "language": "Python", "matching_score": 4.863769054412842, "max_stars_count": 0, "path": "ml/iris/iris.all.py" }, { "content": "\"\"\"\nDecision Tree Classifier test on iris data.\n\"\"\"\nfrom sklearn import metrics\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\n\nX, y = load_iris(return_X_y=True)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=3)\nprint(\"X shape / y shape: \", X.shape, y.shape)\nprint(\"X train / X test shape: \", X_train.shape, X_test.shape)\nprint(\"y train / y test shape: \", y_train.shape, y_test.shape)\n\nmodel = DecisionTreeClassifier()\nmodel.fit(X_train, y_train)\ny_predicted = model.predict(X_test)\nprint(\"Accuracy score: \", metrics.accuracy_score(y_test, y_predicted))\n", "id": "8781546", "language": "Python", "matching_score": 2.6435630321502686, "max_stars_count": 0, "path": "ml/iris/decision.tree.classifier.py" }, { "content": "\"\"\"\nKNeighbors Classifier test on iris data.\n\"\"\"\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.neighbors import KNeighborsClassifier\n\nX, y = load_iris(return_X_y=True)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=3)\nprint(\"X shape / y shape: \", X.shape, y.shape)\nprint(\"X train / X test shape: \", X_train.shape, X_test.shape)\nprint(\"y train / y test shape: \", y_train.shape, y_test.shape)\n\nk_range = list(range(1, 31))\nk_scores = []\nfor k in k_range:\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train, y_train)\n y_predicted = knn.predict(X_test)\n k_scores.append(metrics.accuracy_score(y_test, y_predicted))\n\ncv_range = list(range(1, 31))\ncv_scores = []\nfor cv in cv_range:\n knn = KNeighborsClassifier(n_neighbors=cv)\n cv_scores.append(cross_val_score(knn, X, y, cv=10, scoring='accuracy').mean())\n\nmodel = KNeighborsClassifier(n_neighbors=16)\nmodel.fit(X_train, y_train)\ny_predicted = model.predict(X_test)\nprint(\"Highest [knn=16] accuracy score: \", metrics.accuracy_score(y_test, y_predicted))\nprint(\"Cross-validation mean accuracy score: \",\n cross_val_score(model, X, y, cv=10, scoring='accuracy').mean())\n\nplt.plot(k_range, k_scores)\nplt.xlabel('Value of K for KNN')\nplt.ylabel('Testing Accuracy')\nplt.show()\n\nplt.plot(cv_range, cv_scores)\nplt.xlabel('Value of K for KNN')\nplt.ylabel('Cross-Validation Accuracy')\nplt.show()\n", "id": "7384143", "language": "Python", "matching_score": 4.612954139709473, "max_stars_count": 0, "path": "ml/iris/k.nearest.neighbor.py" }, { "content": "\"\"\"\nLogistic Regression test on iris data.\n\"\"\"\nfrom sklearn import metrics\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split, cross_val_score\n\nX, y = load_iris(return_X_y=True)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=3)\nprint(\"X shape / y shape: \", X.shape, y.shape)\nprint(\"X train / X test shape: \", X_train.shape, X_test.shape)\nprint(\"y train / y test shape: \", y_train.shape, y_test.shape)\n\nmodel = LogisticRegression()\nmodel.fit(X_train, y_train)\ny_predicted = model.predict(X_test)\nprint(\"Accuracy score: \", metrics.accuracy_score(y_test, y_predicted))\nprint(\"Cross-validation mean accuracy score: \",\n cross_val_score(model, X, y, cv=10, scoring='accuracy').mean())\n", "id": "2221255", "language": "Python", "matching_score": 3.1131110191345215, "max_stars_count": 0, "path": "ml/iris/logistic.regression.py" }, { "content": "\"\"\"\nLinear Regression using pandas and advertising data.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sb\nfrom sklearn import metrics\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split, cross_val_score\n\ndf = pd.read_csv('./../../data/ads.csv', index_col=0)\nprint(\"Data shape: \", df.shape)\nprint(\"Data:\\n\", df.head(n=3))\n\nX = df[['TV', 'Radio']]\nprint(\"X type: \", type(X))\nprint(\"X shape: \", X.shape)\nprint(\"X:\\n\", X.head(n=3))\n\ny = df['Sales']\nprint(\"y type: \", type(y))\nprint(\"y shape: \", y.shape)\nprint(\"y:\\n\", y.head(n=3))\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)\nprint(\"X train / X test shape: \", X_train.shape, X_test.shape)\nprint(\"y train / y test shape: \", y_train.shape, y_test.shape)\n\nmodel = LinearRegression()\nmodel.fit(X_train, y_train)\ny_predicted = model.predict(X_test)\nprint(\"Root Mean Squared Error (RMSE): \", np.sqrt(metrics.mean_squared_error(y_test, y_predicted)))\nprint(\"Cross-validation mean RMSE: \",\n np.sqrt(-cross_val_score(model, X, y, cv=10, scoring='neg_mean_squared_error')).mean())\n\nsb.pairplot(df, x_vars=['TV', 'Radio'], y_vars='Sales', size=7, aspect=0.7, kind='reg')\nplt.show()\n", "id": "3707268", "language": "Python", "matching_score": 3.1935393810272217, "max_stars_count": 0, "path": "ml/regression/linear.regression.py" }, { "content": "\"\"\"\nLinear Regression test on salaries data.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\n\ndf = pd.read_csv('./../../data/salaries.csv')\nX = df.iloc[:, :-1].values\ny = df.iloc[:, 1].values\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\nprint(\"X shape / y shape: \", X.shape, y.shape)\nprint(\"X train / X test shape: \", X_train.shape, X_test.shape)\nprint(\"y train / y test shape: \", y_train.shape, y_test.shape)\n\nmodel = LinearRegression()\nmodel.fit(X_train, y_train)\n\nplt.scatter(X_train, y_train, color='red')\nplt.plot(X_train, model.predict(X_train), color='blue')\nplt.title('Training')\nplt.xlabel('Experience')\nplt.ylabel('Salary')\nplt.show()\n\nplt.scatter(X_test, y_test, color='red')\nplt.plot(X_train, model.predict(X_train), color='blue')\nplt.title('Test')\nplt.xlabel('Experience')\nplt.ylabel('Salary')\nplt.show()\n", "id": "2713398", "language": "Python", "matching_score": 0.9921008944511414, "max_stars_count": 0, "path": "ml/regression/linear.regression.y.py" }, { "content": "\"\"\"\nDecision Tree Regression test on roles.salaries data.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.tree import DecisionTreeRegressor\n\ndf = pd.read_csv('./../../data/roles.salaries.csv')\nX = df.iloc[:, 1:2].values\ny = df.iloc[:, 2].values\nprint(\"X shape / y shape: \", X.shape, y.shape)\nprint(\"X:\\n\", X)\nprint(\"y:\\n\", y)\n\nmodel = DecisionTreeRegressor(random_state=0)\nmodel.fit(X, y)\nprint(\"Decision Tree Regression predict @ 6.5 role-level salary: \", model.predict(6.5))\n\n# Role-Level 1 - 10, step 0.1\nX_grid = np.arange(min(X), max(X), 0.1)\nX_grid = X_grid.reshape((len(X_grid), 1))\n\nplt.scatter(X, y, color='red')\nplt.plot(X_grid, model.predict(X_grid), color='blue')\nplt.title('Decision Tree Regression')\nplt.xlabel('Role-Level')\nplt.ylabel('Salary')\nplt.show()\n", "id": "3065553", "language": "Python", "matching_score": 3.8641412258148193, "max_stars_count": 0, "path": "ml/regression/decision.tree.regressor.py" }, { "content": "\"\"\"\nRandom Forest Regression test on roles.salaries data.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestRegressor\n\ndf = pd.read_csv('./../../data/roles.salaries.csv')\nX = df.iloc[:, 1:2].values\ny = df.iloc[:, 2].values\nprint(\"X shape / y shape: \", X.shape, y.shape)\nprint(\"X:\\n\", X)\nprint(\"y:\\n\", y)\n\nmodel = RandomForestRegressor(n_estimators=300, random_state=0)\nmodel.fit(X, y)\nprint(\"Random Forest Regression predict @ 6.5 role-level salary: \", model.predict(6.5))\n\n# Role-Level 1 - 10, step 0.01\nX_grid = np.arange(min(X), max(X), 0.01)\nX_grid = X_grid.reshape((len(X_grid), 1))\n\nplt.scatter(X, y, color='red')\nplt.plot(X_grid, model.predict(X_grid), color='blue')\nplt.title('Random Forest Regression')\nplt.xlabel('Role-Level')\nplt.ylabel('Salary')\nplt.show()\n", "id": "1341475", "language": "Python", "matching_score": 2.8406975269317627, "max_stars_count": 0, "path": "ml/regression/random.forrest.regressor.py" }, { "content": "\"\"\"\nSupport Vector Regression test on roles.salaries data.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVR\n\ndf = pd.read_csv('./../../data/roles.salaries.csv')\nX = df.iloc[:, 1:2].values\ny = df.iloc[:, 2].values\nprint(\"X shape / y shape: \", X.shape, y.shape)\nprint(\"X:\\n\", X)\nprint(\"y:\\n\", y)\n\nsc_X = StandardScaler()\nsc_y = StandardScaler()\nX = sc_X.fit_transform(X)\ny = y.reshape(-1, 1)\ny = sc_y.fit_transform(y)\nprint(\"Feature scaled X:\\n\", X)\nprint(\"Feature scaled y:\\n\", y)\n\nmodel = SVR(kernel='rbf')\nmodel.fit(X, y)\n\ny_predicted = sc_y.inverse_transform(model.predict(sc_X.transform(np.array([[6.5]]))))\nprint(\"Support Vector Regression predict @ 6.5 role-level salary: \", y_predicted)\n\nplt.scatter(X, y, color='red')\nplt.plot(X, model.predict(X), color='blue')\nplt.title('Support Vector Regression')\nplt.xlabel('Role-Level')\nplt.ylabel('Salary')\nplt.show()\n\n# Role-Level 1 - 10, step 0.1\nX_grid = np.arange(min(X), max(X), 0.1)\nX_grid = X_grid.reshape((len(X_grid), 1))\n\nplt.scatter(X, y, color='red')\nplt.plot(X_grid, model.predict(X_grid), color='blue')\nplt.title('Smooth Support Vector Regression')\nplt.xlabel('Role-Level')\nplt.ylabel('Salary')\nplt.show()\n", "id": "11061967", "language": "Python", "matching_score": 4.273321151733398, "max_stars_count": 0, "path": "ml/regression/support.vector.regression.py" }, { "content": "\"\"\"\nPolynominal Regression test on roles.salaries data.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\n\ndf = pd.read_csv('./../../data/roles.salaries.csv')\nX = df.iloc[:, 1:2].values\ny = df.iloc[:, 2].values\nprint(\"X shape / y shape: \", X.shape, y.shape)\nprint(\"X:\\n\", X)\nprint(\"y:\\n\", y)\n\nmodel = LinearRegression()\nmodel.fit(X, y)\n\nfeatures = PolynomialFeatures(degree=4)\nX_poly = features.fit_transform(X)\nfeatures.fit(X_poly, y)\n\nploy_model = LinearRegression()\nploy_model.fit(X_poly, y)\n\nplt.scatter(X, y, color='red')\nplt.plot(X, model.predict(X), color='blue')\nplt.title('Linear Regression')\nplt.xlabel('Role-Level')\nplt.ylabel('Salary')\nplt.show()\n\nplt.scatter(X, y, color='red')\nplt.plot(X, ploy_model.predict(X_poly), color='blue')\nplt.title('Polynomial Regression')\nplt.xlabel('Role-Level')\nplt.ylabel('Salary')\nplt.show()\n\n# Role-Level 1 - 10, step 0.1\nX_grid = np.arange(min(X), max(X), 0.1)\nX_grid = X_grid.reshape((len(X_grid), 1))\nX_smooth_poly = features.fit_transform(X_grid)\n\nplt.scatter(X, y, color='red')\nplt.plot(X_grid, ploy_model.predict(X_smooth_poly), color='blue')\nplt.title('Smooth Polynomial Regression')\nplt.xlabel('Role-Level')\nplt.ylabel('Salary')\nplt.show()\n\n# Polynominal Regression is more accurate!\nprint(\"Linear Regression predict @ 6.5 role-level salary: \", model.predict(6.5))\nprint(\"Polynominal Regression predict @ 6.5 role-level salary: \", ploy_model.predict(features.fit_transform(6.5)))\n", "id": "9997685", "language": "Python", "matching_score": 0.5103799104690552, "max_stars_count": 0, "path": "ml/regression/polynominal.regression.py" }, { "content": "\"\"\"\nK-Means clustering test on mall customer data.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.cluster import KMeans\n\ndf = pd.read_csv('./../../data/mall.customers.csv')\nX = df.iloc[:, [3, 4]].values\nprint(\"X shape: \", X.shape)\n\nwcss = []\nfor i in range(1, 11):\n kmeans = KMeans(n_clusters=i, init='k-means++', random_state=42)\n kmeans.fit(X)\n wcss.append(kmeans.inertia_)\nplt.plot(range(1, 11), wcss)\nplt.title('The Elbow Method')\nplt.xlabel('Number of clusters')\nplt.ylabel('WCSS')\nplt.show()\n\nkmeans = KMeans(n_clusters=5, init='k-means++', random_state=42)\ny_kmeans = kmeans.fit_predict(X)\n\nplt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s=100, c='red', label='Cluster 1')\nplt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s=100, c='blue', label='Cluster 2')\nplt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s=100, c='green', label='Cluster 3')\nplt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s=100, c='cyan', label='Cluster 4')\nplt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s=100, c='magenta', label='Cluster 5')\nplt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=300, c='yellow', label='Centroids')\nplt.title('Clusters of Customers')\nplt.xlabel('Annual Income')\nplt.ylabel('Spending Score (1-100)')\nplt.legend()\nplt.show()\n", "id": "4039478", "language": "Python", "matching_score": 3.5255422592163086, "max_stars_count": 0, "path": "ml/clustering/k.means.clustering.py" }, { "content": "\"\"\"\nHierarchical clustering test on mall customer data.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy.cluster.hierarchy as sch\nfrom sklearn.cluster import AgglomerativeClustering\n\ndf = pd.read_csv('./../../data/mall.customers.csv')\nX = df.iloc[:, [3, 4]].values\nprint(\"X shape: \", X.shape)\n\ndendrogram = sch.dendrogram(sch.linkage(X, method='ward'))\nplt.title('Dendrogram')\nplt.xlabel('Customers')\nplt.ylabel('Euclidean Distances')\nplt.show()\n\nhc = AgglomerativeClustering(n_clusters=5, affinity='euclidean', linkage='ward')\ny_hc = hc.fit_predict(X)\n\nplt.scatter(X[y_hc == 0, 0], X[y_hc == 0, 1], s=100, c='red', label='Cluster 1')\nplt.scatter(X[y_hc == 1, 0], X[y_hc == 1, 1], s=100, c='blue', label='Cluster 2')\nplt.scatter(X[y_hc == 2, 0], X[y_hc == 2, 1], s=100, c='green', label='Cluster 3')\nplt.scatter(X[y_hc == 3, 0], X[y_hc == 3, 1], s=100, c='cyan', label='Cluster 4')\nplt.scatter(X[y_hc == 4, 0], X[y_hc == 4, 1], s=100, c='magenta', label='Cluster 5')\nplt.title('Clusters of Customers')\nplt.xlabel('Annual Income')\nplt.ylabel('Spending Score (1-100)')\nplt.legend()\nplt.show()\n", "id": "9567644", "language": "Python", "matching_score": 0.4221644699573517, "max_stars_count": 0, "path": "ml/clustering/hierarchical.clustering.py" }, { "content": "\"\"\"\nThompson Sampling test on ads click-thru-rate data.\n\"\"\"\nimport random\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndf = pd.read_csv('./../../data/ads.ctr.csv')\n\nN = 10000\nd = 10\nads_selected = []\nnumbers_of_rewards_1 = [0] * d\nnumbers_of_rewards_0 = [0] * d\ntotal_reward = 0\nfor n in range(0, N):\n ad = 0\n max_random = 0\n for i in range(0, d):\n random_beta = random.betavariate(numbers_of_rewards_1[i] + 1, numbers_of_rewards_0[i] + 1)\n if random_beta > max_random:\n max_random = random_beta\n ad = i\n ads_selected.append(ad)\n reward = df.values[n, ad]\n if reward == 1:\n numbers_of_rewards_1[ad] = numbers_of_rewards_1[ad] + 1\n else:\n numbers_of_rewards_0[ad] = numbers_of_rewards_0[ad] + 1\n total_reward = total_reward + reward\n\nplt.hist(ads_selected)\nplt.title('Histogram of Ad Selections')\nplt.xlabel('Ads')\nplt.ylabel('Number of times each Ad was Selected')\nplt.show()\n", "id": "5355546", "language": "Python", "matching_score": 3.3708064556121826, "max_stars_count": 0, "path": "ml/learning/thompson.sampling.py" }, { "content": "\"\"\"\nUpper Confidence Bound test on ads click-thru-rate data.\n\"\"\"\nimport math\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndf = pd.read_csv('./../../data/ads.ctr.csv')\n\nN = 10000\nd = 10\nads_selected = []\nnumbers_of_selections = [0] * d\nsums_of_rewards = [0] * d\ntotal_reward = 0\nfor n in range(0, N):\n ad = 0\n max_upper_bound = 0\n for i in range(0, d):\n if (numbers_of_selections[i] > 0):\n average_reward = sums_of_rewards[i] / numbers_of_selections[i]\n delta_i = math.sqrt(3 / 2 * math.log(n + 1) / numbers_of_selections[i])\n upper_bound = average_reward + delta_i\n else:\n upper_bound = 1e400\n if upper_bound > max_upper_bound:\n max_upper_bound = upper_bound\n ad = i\n ads_selected.append(ad)\n numbers_of_selections[ad] = numbers_of_selections[ad] + 1\n reward = df.values[n, ad]\n sums_of_rewards[ad] = sums_of_rewards[ad] + reward\n total_reward = total_reward + reward\n\nplt.hist(ads_selected)\nplt.title('Histogram of Ad Selections')\nplt.xlabel('Ads')\nplt.ylabel('Number of times each Ad was Selected')\nplt.show()\n", "id": "4785185", "language": "Python", "matching_score": 3.360480546951294, "max_stars_count": 0, "path": "ml/learning/ucb.py" }, { "content": "\"\"\"\nRandom Selection test on ads click-thru-rate data.\n\"\"\"\nimport random\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndf = pd.read_csv('./../../data/ads.ctr.csv')\n\nN = 10000\nd = 10\nads_selected = []\ntotal_reward = 0\nfor n in range(0, N):\n ad = random.randrange(d\n ads_selected.append(ad)\n reward = df.values[n, ad]\n total_reward = total_reward + reward\n\n plt.hist(ads_selected)\n plt.title('Histogram of Ad Selections')\n plt.xlabel('Ads')\n plt.ylabel('Number of times each Ad was Selected')\n plt.show()\n", "id": "10401492", "language": "Python", "matching_score": 0.1874673068523407, "max_stars_count": 0, "path": "ml/learning/random.selection.py" }, { "content": "\"\"\"\nPandas features.\n\"\"\"\nimport pandas as pd\n\nads = pd.read_csv('./../data/ads.csv', index_col=0).dropna()\nprint(\"Ads shape: \", ads.shape)\nprint(\"Ads data:\\n\", ads.head(n=3))\nprint(\"Ad sales >= 20.0:\\n\", ads[ads.Sales >= 20.0].sort_values('Sales', ascending=False).head(n=3))\nprint(\"Ad average sales:\\n\", ads.mean(axis='index'))\n\norders = pd.read_table('./../data/orders.tsv', index_col=0).dropna()\norders.drop(['choice_description'], axis=1, inplace=True)\nprint(\"Orders shape: \", orders.shape)\nprint(\"Orders data:\\n\", orders.sort_values('item_price', ascending=False).head(n=3))\nprint(\"Orders average total:\\n\", orders.mean(axis='index'))\n\njob_cols = ['id', 'age', 'gender', 'job', 'zip']\njobs = pd.read_table('./../data/jobs.psv', sep='|', header=None, names=job_cols, index_col=0).dropna()\nprint(\"Jobs shape: \", jobs.shape)\nprint(\"Jobs data:\\n\", jobs.sort_values('job').head(n=3))\nprint(\"Job by age >= 30 and gender = M:\\n\", jobs[(jobs.age >= 30) & (jobs.gender == 'M')]\n .sort_values('age', ascending=False).head(n=3))\nprint(\"Geeks:\\n\", jobs[jobs.job.isin(['programmer', 'technician'])].head(n=3))\nprint(\"Jobs average age:\\n\", jobs.mean(axis='index'))\nprint(\"Jobs gender/job crosstab:\\n\", pd.crosstab(jobs.gender, jobs.job))\n\nufos = pd.read_csv('./../data/ufos.csv').dropna()\nufos['Location'] = ufos.City + ', ' + ufos.State\nufos.rename(columns={'Time': 'Date_Time'}, inplace=True)\nufos.drop(['Color', 'State'], axis=1, inplace=True)\nprint(\"UFOs shape: \", ufos.shape)\nprint(\"UFOs data:\\n\", ufos.sort_values('Location').head(n=3))\n\nmovies = pd.read_csv('./../data/imdb.csv').dropna()\nprint(\"Movies shape: \", movies.shape)\nprint(\"Movie ratings:\\n\", movies.sort_values('stars', ascending=False).head(n=3))\nprint(\"Movie genres:\\n\", movies.genre.describe())\n\ndrinks = pd.read_csv('./../data/drinks.csv').dropna()\nprint(\"Drinks shape: \", drinks.shape)\nprint(\"Drinks litres:\\n\", drinks.sort_values('litres', ascending=False).head(n=3))\nprint(\"Drinks describe:\\n\", drinks.describe())\nprint(\"Beer by continent:\\n\", drinks.groupby('continent').beer.agg(['mean', 'max']))\n", "id": "4625443", "language": "Python", "matching_score": 0.40377524495124817, "max_stars_count": 0, "path": "pd/pandas.data.py" }, { "content": "\"\"\"\nApriori test on shopping data.\n\"\"\"\nimport pandas as pd\nfrom apriorilib import apriori\n\ndf = pd.read_csv('./../../data/shopping.csv', header=None)\nbaskets = []\nfor i in range(0, 7501):\n baskets.append([str(df.values[i, j]) for j in range(0, 20)])\nrelations = apriori(baskets, min_support=0.003, min_confidence=0.2, min_lift=3, min_length=2)\nj = 0\nfor relation in relations:\n print(relation)\n j += 1\n if j == 10:\n break\n", "id": "10644993", "language": "Python", "matching_score": 0.015580356121063232, "max_stars_count": 0, "path": "ml/learning/apriori.py" }, { "content": "\"\"\"\nKernel Principal Component Analysis (PCA) test, with bidirectional elimination on social network ads data.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.decomposition import KernelPCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\ndf = pd.read_csv('./../../data/social.network.ads.csv')\nX = df.iloc[:, [2, 3]].values\ny = df.iloc[:, 4].values\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)\nprint(\"X shape / y shape: \", X.shape, y.shape)\nprint(\"X train / X test shape: \", X_train.shape, X_test.shape)\nprint(\"y train / y test shape: \", y_train.shape, y_test.shape)\n\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\nmodel = KernelPCA(n_components=2, kernel='rbf')\nX_train = model.fit_transform(X_train)\nX_test = model.transform(X_test)\n\nclassifier = LogisticRegression(random_state=0)\nclassifier.fit(X_train, y_train)\ny_pred = classifier.predict(X_test)\ncm = confusion_matrix(y_test, y_pred)\nprint(\"Confusion matrix:\\n\", cm)\n\nX_set, y_set = X_train, y_train\nX1, X2 = np.meshgrid(np.arange(start=X_set[:, 0].min() - 1, stop=X_set[:, 0].max() + 1, step=0.01),\n np.arange(start=X_set[:, 1].min() - 1, stop=X_set[:, 1].max() + 1, step=0.01))\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha=0.75, cmap=ListedColormap(('red', 'green')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c=ListedColormap(('red', 'green'))(i), label=j)\nplt.title('Logistic Regression (Training set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated Salary')\nplt.legend()\nplt.show()\n\nX_set, y_set = X_test, y_test\nX1, X2 = np.meshgrid(np.arange(start=X_set[:, 0].min() - 1, stop=X_set[:, 0].max() + 1, step=0.01),\n np.arange(start=X_set[:, 1].min() - 1, stop=X_set[:, 1].max() + 1, step=0.01))\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha=0.75, cmap=ListedColormap(('red', 'green')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c=ListedColormap(('red', 'green'))(i), label=j)\nplt.title('Logistic Regression (Test set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated Salary')\nplt.legend()\nplt.show()\n", "id": "6432765", "language": "Python", "matching_score": 3.215334892272949, "max_stars_count": 0, "path": "ml/reduction/kernal.pca.py" }, { "content": "\"\"\"\nNatural Language Processing test on restaurant review data.\n\"\"\"\nimport re\n\nimport nltk\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\n\ndf = pd.read_csv('./../../data/restaurant.reviews.tsv', delimiter='\\t', quoting=3)\n\nnltk.download('stopwords')\ncorpus = []\nfor line in range(0, 1000):\n review = re.sub('[^a-zA-Z]', ' ', df['Review'][line])\n review = review.lower()\n review = review.split()\n ps = PorterStemmer()\n review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]\n review = ' '.join(review)\n corpus.append(review)\n\nmodel = CountVectorizer(max_features=1500)\nX = model.fit_transform(corpus).toarray()\ny = df.iloc[:, 1].values\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=0)\nprint(\"X shape / y shape: \", X.shape, y.shape)\nprint(\"X train / X test shape: \", X_train.shape, X_test.shape)\nprint(\"y train / y test shape: \", y_train.shape, y_test.shape)\n\nclassifier = GaussianNB()\nclassifier.fit(X_train, y_train)\ny_pred = classifier.predict(X_test)\ncm = confusion_matrix(y_test, y_pred)\nprint(\"Confusion matrix: \", cm)\n", "id": "10925698", "language": "Python", "matching_score": 3.2293341159820557, "max_stars_count": 0, "path": "ml/learning/nlp.py" }, { "content": "\"\"\"\nXGBoost test on churn modeling data.\n\nINSTALL:\n\n1. brew install gcc\n2. git clone --recursive https://github.com/dmlc/xgboost\n3. cd xgboost\n4. ./build.sh\n5. cd python-package\n6. python3 setup.py install\n\"\"\"\nimport pandas as pd\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom xgboost import XGBClassifier\n\ndf = pd.read_csv('./../../data/churn.modeling.csv')\nX = df.iloc[:, 3:13].values\ny = df.iloc[:, 13].values\n\nlabelencoder_X_1 = LabelEncoder()\nX[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])\nlabelencoder_X_2 = LabelEncoder()\nX[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])\nonehotencoder = OneHotEncoder(categorical_features=[1])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\nprint(\"X shape / y shape: \", X.shape, y.shape)\nprint(\"X train / X test shape: \", X_train.shape, X_test.shape)\nprint(\"y train / y test shape: \", y_train.shape, y_test.shape)\n\nclassifier = XGBClassifier()\nclassifier.fit(X_train, y_train)\ny_pred = classifier.predict(X_test)\ncm = confusion_matrix(y_test, y_pred)\nprint(\"Confusion matrix:\\n\", cm)\n\naccuracies = cross_val_score(estimator=classifier, X=X_train, y=y_train, cv=10)\nprint(\"Accuracy mean: \", accuracies.mean())\nprint(\"Accuracy standard deviation: \", accuracies.std())\n", "id": "6477521", "language": "Python", "matching_score": 4.529550075531006, "max_stars_count": 0, "path": "ml/classification/xgboost.py" }, { "content": "\"\"\"\nArtificial Neural Network test on churn modeling data.\nWARNING: For Python 3.6 install Tensorflow 1.5+ Note the version number in the url!\nINSTALL:\n1. pip3 instal theano\n2. pip3 install --upgrade https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.5.0rc1-py3-none-any.whl\n3. pip3 install keras\n\"\"\"\nimport pandas as pd\nfrom keras.layers import Dense\nfrom keras.models import Sequential\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.preprocessing import StandardScaler\n\ndf = pd.read_csv('./../data/churn.modeling.csv')\nX = df.iloc[:, 3:13].values\ny = df.iloc[:, 13].values\n\nlabelencoder_X_1 = LabelEncoder()\nX[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])\nlabelencoder_X_2 = LabelEncoder()\nX[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])\nonehotencoder = OneHotEncoder(categorical_features=[1])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\nprint(\"X shape / y shape: \", X.shape, y.shape)\nprint(\"X train / X test shape: \", X_train.shape, X_test.shape)\nprint(\"y train / y test shape: \", y_train.shape, y_test.shape)\n\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\nclassifier = Sequential()\n\n# Input Layer\nclassifier.add(Dense(activation=\"relu\", input_dim=11, units=6, kernel_initializer=\"uniform\"))\n# Hidden Layer\nclassifier.add(Dense(activation=\"relu\", units=6, kernel_initializer=\"uniform\"))\n# Output Layer\nclassifier.add(Dense(activation=\"sigmoid\", units=1, kernel_initializer=\"uniform\"))\n\nclassifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\nclassifier.fit(X_train, y_train, batch_size=10, epochs=10)\n\ny_pred = classifier.predict(X_test)\ny_pred = (y_pred > 0.5)\ncm = confusion_matrix(y_test, y_pred)\nprint(\"Confusion matrix:\\n\", cm)\n\n\"\"\"\nWARNING: The KerasClassifier hangs! And the GridSearchCV requires hours to run, still resulting\nin only 86% accuracy.\n\"\"\"\n", "id": "9431994", "language": "Python", "matching_score": 4.074506759643555, "max_stars_count": 0, "path": "dl/artificial.neural.network.py" }, { "content": "\"\"\"\nConvolutional Neural Network test on cats and dogs data.\n\nWARNING: The cats and dogs dataset is TOO LARGE to push to Github. Download at\nhttps://www.superdatascience.com/machine-learning/ , Part 8. Deep Learning,\nConvolutional Neural Networks.zip Note the directory structure for this test:\npython.ml\n dataset\n test\n training\nWARNING: For Python 3.6 install Tensorflow 1.5+ Note the version number in the url!\nINSTALL: pip3 install --upgrade https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.5.0rc1-py3-none-any.whl\n\nThe test takes about 12+ minutes:\nUsing TensorFlow backend.\nFound 8000 images belonging to 2 classes.\nFound 2000 images belonging to 2 classes.\nEpoch 1/10\n2018-01-21 15:27:56.977934: I tensorflow/core/platform/cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.2 AVX\n - 79s - loss: 0.6603 - acc: 0.5970 - val_loss: 0.5914 - val_acc: 0.7005\nEpoch 2/10\n - 77s - loss: 0.5902 - acc: 0.6815 - val_loss: 0.5421 - val_acc: 0.7360\nEpoch 3/10\n - 80s - loss: 0.5582 - acc: 0.7111 - val_loss: 0.5501 - val_acc: 0.7245\nEpoch 4/10\n - 71s - loss: 0.5317 - acc: 0.7314 - val_loss: 0.5167 - val_acc: 0.7435\nEpoch 5/10\n - 77s - loss: 0.5008 - acc: 0.7576 - val_loss: 0.4741 - val_acc: 0.7755\nEpoch 6/10\n - 82s - loss: 0.4731 - acc: 0.7761 - val_loss: 0.4753 - val_acc: 0.7725\nEpoch 7/10\n - 79s - loss: 0.4588 - acc: 0.7801 - val_loss: 0.4472 - val_acc: 0.7925\nEpoch 8/10\n - 88s - loss: 0.4568 - acc: 0.7792 - val_loss: 0.5146 - val_acc: 0.7540\nEpoch 9/10\n - 84s - loss: 0.4324 - acc: 0.7961 - val_loss: 0.4570 - val_acc: 0.7890\nEpoch 10/10\n - 82s - loss: 0.4215 - acc: 0.8066 - val_loss: 0.4434 - val_acc: 0.8045\nobjektwerks:dl objektwerks$\n\nIncrease epochs for greater accuracy, requiring more processing time.\n\"\"\"\nfrom keras.layers import Conv2D\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers import MaxPooling2D\nfrom keras.models import Sequential\nfrom keras.preprocessing.image import ImageDataGenerator\n\ntrain_datagen = ImageDataGenerator(rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\ntraining_set = train_datagen.flow_from_directory('./../dataset/training',\n target_size=(64, 64),\n batch_size=32,\n class_mode='binary')\n\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\ntest_set = test_datagen.flow_from_directory('./../dataset/test',\n target_size=(64, 64),\n batch_size=32,\n class_mode='binary')\n\nclassifier = Sequential()\nclassifier.add(Conv2D(32, (3, 3), input_shape=(64, 64, 3), activation=\"relu\"))\nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\nclassifier.add(Conv2D(32, (3, 3), activation=\"relu\"))\nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\nclassifier.add(Flatten())\nclassifier.add(Dense(activation=\"relu\", units=128))\nclassifier.add(Dense(activation=\"sigmoid\", units=1))\nclassifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\nclassifier.fit_generator(training_set,\n steps_per_epoch=250,\n epochs=10,\n verbose=2,\n validation_data=test_set)\n", "id": "8796516", "language": "Python", "matching_score": 2.657655954360962, "max_stars_count": 0, "path": "dl/convolution.neural.network.py" }, { "content": "\"\"\"\nRecurrent Neural Network test on stock price data, using long-short-term memory (LSTM) networks.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import LSTM\nfrom keras.models import Sequential\nfrom sklearn.preprocessing import MinMaxScaler\n\ndf_train = pd.read_csv('./../data/stock.price.train.csv')\ntraining_set = df_train.iloc[:, 1:2].values\n\nsc = MinMaxScaler(feature_range=(0, 1))\ntraining_set_scaled = sc.fit_transform(training_set)\n\nX_train = []\ny_train = []\nfor i in range(60, 1258):\n X_train.append(training_set_scaled[i - 60:i, 0])\n y_train.append(training_set_scaled[i, 0])\nX_train, y_train = np.array(X_train), np.array(y_train)\nX_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))\n\nregressor = Sequential()\n\nregressor.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1)))\nregressor.add(Dropout(0.2))\n\nregressor.add(LSTM(units=50, return_sequences=True))\nregressor.add(Dropout(0.2))\n\nregressor.add(LSTM(units=50, return_sequences=True))\nregressor.add(Dropout(0.2))\n\nregressor.add(LSTM(units=50))\nregressor.add(Dropout(0.2))\n\nregressor.add(Dense(units=1))\n\nregressor.compile(optimizer='adam', loss='mean_squared_error')\nregressor.fit(X_train, y_train, epochs=10, batch_size=32)\n\ndf_test = pd.read_csv('./../data/stock.price.test.csv')\nreal_stock_price = df_test.iloc[:, 1:2].values\n\ndf_train_test_merged = pd.concat((df_train['Open'], df_test['Open']), axis=0)\ninputs = df_train_test_merged[len(df_train_test_merged) - len(df_test) - 60:].values\ninputs = inputs.reshape(-1, 1)\ninputs = sc.transform(inputs)\nX_test = []\nfor i in range(60, 80):\n X_test.append(inputs[i - 60:i, 0])\nX_test = np.array(X_test)\nX_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))\npredicted_stock_price = regressor.predict(X_test)\npredicted_stock_price = sc.inverse_transform(predicted_stock_price)\n\nplt.plot(real_stock_price, color='red', label='Real Stock Price')\nplt.plot(predicted_stock_price, color='blue', label='Predicted Stock Price')\nplt.title('Stock Price Prediction')\nplt.xlabel('Time')\nplt.ylabel('Stock Price')\nplt.legend()\nplt.show()\n", "id": "8574933", "language": "Python", "matching_score": 3.598214626312256, "max_stars_count": 0, "path": "dl/recurrent.neural.network.py" }, { "content": "\"\"\"\nSelf Organizing Map test on credit card app data.\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom pylab import bone, pcolor, colorbar, plot, show\nfrom sklearn.preprocessing import MinMaxScaler\nfrom somlib import MiniSom\n\ndf = pd.read_csv('./../data/credit.card.apps.csv')\nX = df.iloc[:, :-1].values\ny = df.iloc[:, -1].values\nprint(\"X shape / y shape: \", X.shape, y.shape)\n\nsc = MinMaxScaler(feature_range=(0, 1))\nX = sc.fit_transform(X)\n\nsom = MiniSom(x=10, y=10, input_len=15, sigma=1.0, learning_rate=0.5)\nsom.random_weights_init(X)\nsom.train_random(data=X, num_iteration=100)\n\nbone()\npcolor(som.distance_map().T)\ncolorbar()\nmarkers = ['o', 's']\ncolors = ['r', 'g']\nfor i, x in enumerate(X):\n w = som.winner(x)\n plot(w[0] + 0.5,\n w[1] + 0.5,\n markers[y[i]],\n markeredgecolor=colors[y[i]],\n markerfacecolor='None',\n markersize=10,\n markeredgewidth=2)\nshow()\n\nmappings = som.win_map(X)\nfrauds = np.concatenate((mappings[(8, 1)], mappings[(6, 8)]), axis=0)\nfrauds = sc.inverse_transform(frauds)\nprint(\"Frauds:\\n\")\nprint(frauds)\n", "id": "2679676", "language": "Python", "matching_score": 0.4234010577201843, "max_stars_count": 0, "path": "dl/self.organizing.map.py" }, { "content": "\"\"\"\nWord count app.\n\"\"\"\nfrom operator import add\nfrom pyspark.sql import SparkSession\n\nsparkSession = SparkSession.builder.master(\"local[*]\").appName(\"wordcount\").getOrCreate()\ntext = sparkSession.read.text(\"LICENSE\").rdd.cache()\n\nlines = text.map(lambda r: r[0])\ncounts = lines.flatMap(lambda l: l.split(' ')).map(lambda w: (w, 1)).reduceByKey(add)\nwords = counts.collect()\nfor (word, count) in words:\n print(\"%s: %i\" % (word, count))\n\nsparkSession.stop()\n", "id": "3694493", "language": "Python", "matching_score": 0.6152708530426025, "max_stars_count": 0, "path": "spark/wordcount.py" } ]
3.193539
melonattacker
[ { "content": "import os\nfrom flask import Flask, request, jsonify, send_file\nfrom flask_cors import CORS, cross_origin\nfrom werkzeug.utils import secure_filename\nimport json\nfrom process import *\n\nALLOWED_EXTENSIONS = {'csv'}\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = './uploads/'\nCORS(\n app,\n supports_credentials=True\n)\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\[email protected]('/', methods=['GET', 'POST'])\ndef upload_and_send_file():\n if request.method == 'POST':\n # post requestにfile partがあるかチェック\n if 'file' not in request.files:\n return \"No file part\"\n file = request.files['file']\n # ファイル名のチェック\n if file.filename == '':\n return \"No selected file\"\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n filepath = process(app.config['UPLOAD_FOLDER'], filename)\n return send_file(\n filepath, as_attachment=True\n )\n\n\[email protected](\"/employee\", methods=[\"GET\", \"POST\"])\ndef employee():\n if request.method == 'POST':\n return register_employee()\n else:\n return get_employees()\n \ndef register_employee():\n data = request.json\n print(data[\"params\"][\"c_a\"])\n best_response = calc_equ(data[\"params\"])\n insert_employee(data[\"name\"], best_response)\n return \"ok\"\n\ndef get_employees():\n employees = query_employees()\n print(employees)\n return json.dumps(employees)\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0', port=8080)", "id": "10409022", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "backend/app.py" }, { "content": "import pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\n\ndef calc_best_response(row, params):\n c_r = params[\"c_r\"]\n c_rw = params[\"c_rw\"]\n c_w = params[\"c_w\"]\n c_ww = params[\"c_ww\"]\n c_a = row[\"c_a\"]\n v_r = row[\"v_r\"]\n v_w = row[\"v_w\"]\n p_i = row[\"insider_prob\"]\n\n actions = []\n actions.append((p_i*(-c_r+v_r)+(1-p_i)*(-c_r-c_rw), \"Revoke\")) # Revoke\n actions.append((p_i*(-c_w+v_w)+(1-p_i)*(-c_w-c_ww), \"Warn\")) # Warn\n actions.append((p_i*(-c_a), \"Keep\")) # Keep\n return max(actions)[1]\n\ndef process(path, filename):\n # ロジスティック回帰学習済みのパラメータを設定\n lr = LogisticRegression()\n lr.coef_= np.array([[-0.00811399, -0.0147414, 0.01349019, 0.03293019, 0.00601434, -1.8426328, -0.79240534, -0.10645485, 0.17172625, 1.80135312, 0.79577801, 0.12998358, -0.17681121, -0.19468721, -0.05885516, 0.07327601, -0.12799194, 0.11971723, -0.02095433, 0.11640385, 0.1111035]])\n lr.intercept_ = np.array([2.08857843])\n lr.classes_ = np.array([0, 1])\n\n test_df = pd.read_csv(path+filename, index_col=0)\n X_test = test_df[['O', 'C', 'E', 'A', 'N', 'dev_con_0to6', 'dev_con_6to12', 'dev_con_12to18', 'dev_con_18to24', 'dev_dis_0to6', 'dev_dis_6to12', 'dev_dis_12to18', 'dev_dis_18to24', 'pc_on_0to6', 'pc_on_6to12', 'pc_on_12to18', 'pc_on_18to24', 'pc_off_0to6', 'pc_off_6to12', 'pc_off_12to18', 'pc_off_18to24']]\n Y_prob = lr.predict_proba(X_test) # 推論\n Y_prob = np.delete(Y_prob, [1], 1)\n test_df['insider_prob'] = np.ravel(Y_prob)\n test_df['insider_prob'] = test_df['insider_prob'].round(5)\n\n test_df['c_a'] = test_df['authority'].apply(lambda x : 10.0 if x == \"Strong\" else (7.0 if x == \"Normal\" else 5.0))\n test_df['v_r'] = test_df['authority'].apply(lambda x : 10.0 if x == \"Strong\" else (7.0 if x == \"Normal\" else 5.0))\n test_df['v_w'] = test_df['v_r'].apply(lambda x : x-0.2*x)\n\n params = {'c_r': 2.5, 'c_rw': 8.0, 'c_w': 1.5, 'c_ww': 7.0}\n\n # 最適反応を計算\n test_df[\"best_response\"] = test_df.apply(calc_best_response, params=params, axis=1)\n\n # 不要な列を削除\n test_df = test_df.drop(['c_a', 'v_r', 'v_w', 'O', 'C', 'E', 'A', 'N', 'dev_con_0to6', 'dev_con_6to12', 'dev_con_12to18', 'dev_con_18to24', 'dev_dis_0to6', 'dev_dis_6to12', 'dev_dis_12to18', 'dev_dis_18to24', 'pc_on_0to6', 'pc_on_6to12', 'pc_on_12to18', 'pc_on_18to24', 'pc_off_0to6', 'pc_off_6to12', 'pc_off_12to18', 'pc_off_18to24'], axis=1)\n\n # ファイルを保存\n filepath = path+\"result_\"+filename\n test_df.to_csv(filepath)\n\n return filepath", "id": "6478927", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "backend/process.py" } ]
0
ziacko
[ { "content": "# Partially stolen from https://bitbucket.org/mblum/libgp/src/2537ea7329ef/.ycm_extra_conf.py\nimport os\nimport ycm_core\n\n# These are the compilation flags that will be used in case there's no\n# compilation database set (by default, one is not set).\n# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.\nflags = [\n '-Wall',\n '-Wextra',\n '-Werror',\n # '-Wc++98-compat',\n '-Wno-long-long',\n '-Wno-variadic-macros',\n '-fexceptions',\n # THIS IS IMPORTANT! Without a \"-std=<something>\" flag, clang won't know which\n # language to use when compiling headers. So it will guess. Badly. So C++\n # headers will be compiled as C headers. You don't want that so ALWAYS specify\n # a \"-std=<something>\".\n # For a C project, you would set this to something like 'c99' instead of\n # 'c++11'.\n '-std=c++11',\n # ...and the same thing goes for the magic -x option which specifies the\n # language that the files to be compiled are written in. This is mostly\n # relevant for c++ headers.\n # For a C project, you would set this to 'c' instead of 'c++'.\n '-x', 'c++',\n # This path will only work on OS X, but extra paths that don't exist are not\n # harmful\n '-isystem', '/System/Library/Frameworks/Python.framework/Headers',\n '-isystem', '/usr/local/include',\n '-isystem', '/usr/local/include/eigen3',\n '-I', 'include', '-I./dependencies', \n '-I./dependencies/glm/glm/', \n '-I./dependencies/stb', \n '-I./dependencies/imgui', \n '-I./dependencies/assimp/include/assimp/', \n '-I./dependencies/tinyextender/Include/', \n '-I./dependencies/tinyshaders/Include', \n '-I./dependencies/tinywindow/Include/', \n '-I./dependencies/tinyclock/Include/', \n '-I./include/' \n]\n\n# Set this to the absolute path to the folder (NOT the file!) containing the\n# compile_commands.json file to use that instead of 'flags'. See here for\n# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html\n#\n# Most projects will NOT need to set this to anything; you can just change the\n# 'flags' list of compilation flags. Notice that YCM itself uses that approach.\ncompilation_database_folder = ''\n\nif compilation_database_folder:\n database = ycm_core.CompilationDatabase( compilation_database_folder )\nelse:\n database = None\n\n\ndef DirectoryOfThisScript():\n return os.path.dirname( os.path.abspath( __file__ ) )\n\n\ndef MakeRelativePathsInFlagsAbsolute( flags, working_directory ):\n if not working_directory:\n return list( flags )\n new_flags = []\n make_next_absolute = False\n path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]\n for flag in flags:\n new_flag = flag\n\n if make_next_absolute:\n make_next_absolute = False\n if not flag.startswith( '/' ):\n new_flag = os.path.join( working_directory, flag )\n\n for path_flag in path_flags:\n if flag == path_flag:\n make_next_absolute = True\n break\n\n if flag.startswith( path_flag ):\n path = flag[ len( path_flag ): ]\n new_flag = path_flag + os.path.join( working_directory, path )\n break\n\n if new_flag:\n new_flags.append( new_flag )\n return new_flags\n\n\ndef FlagsForFile( filename ):\n if database:\n # Bear in mind that compilation_info.compiler_flags_ does NOT return a\n # python list, but a \"list-like\" StringVec object\n compilation_info = database.GetCompilationInfoForFile( filename )\n final_flags = MakeRelativePathsInFlagsAbsolute(\n compilation_info.compiler_flags_,\n compilation_info.compiler_working_dir_ )\n else:\n relative_to = DirectoryOfThisScript()\n final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )\n\n return {\n 'flags': final_flags,\n 'do_cache': True\n }\n", "id": "4694995", "language": "Python", "matching_score": 0, "max_stars_count": 28, "path": ".ycm_extra_conf.py" }, { "content": "#-*- coding: UTF-8 -*-\n\nfrom ctypes import POINTER, c_void_p, c_int, c_uint, c_char, c_float, Structure, c_char_p, c_double, c_ubyte, c_size_t, c_uint32\n\n\nclass Vector2D(Structure):\n \"\"\"\n See 'aiVector2D.h' for details.\n \"\"\" \n\n\n _fields_ = [\n (\"x\", c_float),(\"y\", c_float),\n ]\n\nclass Matrix3x3(Structure):\n \"\"\"\n See 'aiMatrix3x3.h' for details.\n \"\"\" \n\n\n _fields_ = [\n (\"a1\", c_float),(\"a2\", c_float),(\"a3\", c_float),\n (\"b1\", c_float),(\"b2\", c_float),(\"b3\", c_float),\n (\"c1\", c_float),(\"c2\", c_float),(\"c3\", c_float),\n ]\n\nclass Texel(Structure):\n \"\"\"\n See 'aiTexture.h' for details.\n \"\"\" \n\n _fields_ = [\n (\"b\", c_ubyte),(\"g\", c_ubyte),(\"r\", c_ubyte),(\"a\", c_ubyte),\n ]\n\nclass Color4D(Structure):\n \"\"\"\n See 'aiColor4D.h' for details.\n \"\"\" \n\n\n _fields_ = [\n # Red, green, blue and alpha color values\n (\"r\", c_float),(\"g\", c_float),(\"b\", c_float),(\"a\", c_float),\n ]\n\nclass Plane(Structure):\n \"\"\"\n See 'aiTypes.h' for details.\n \"\"\" \n\n _fields_ = [\n # Plane equation\n (\"a\", c_float),(\"b\", c_float),(\"c\", c_float),(\"d\", c_float),\n ]\n\nclass Color3D(Structure):\n \"\"\"\n See 'aiTypes.h' for details.\n \"\"\" \n\n _fields_ = [\n # Red, green and blue color values\n (\"r\", c_float),(\"g\", c_float),(\"b\", c_float),\n ]\n\nclass String(Structure):\n \"\"\"\n See 'aiTypes.h' for details.\n \"\"\" \n\n MAXLEN = 1024\n\n _fields_ = [\n # Binary length of the string excluding the terminal 0. This is NOT the\n # logical length of strings containing UTF-8 multibyte sequences! It's\n # the number of bytes from the beginning of the string to its end.\n (\"length\", c_size_t),\n \n # String buffer. Size limit is MAXLEN\n (\"data\", c_char*MAXLEN),\n ]\n\nclass MaterialPropertyString(Structure):\n \"\"\"\n See 'aiTypes.h' for details.\n \n The size of length is truncated to 4 bytes on 64-bit platforms when used as a\n material property (see MaterialSystem.cpp aiMaterial::AddProperty() for details).\n \"\"\"\n\n MAXLEN = 1024\n\n _fields_ = [\n # Binary length of the string excluding the terminal 0. This is NOT the\n # logical length of strings containing UTF-8 multibyte sequences! It's\n # the number of bytes from the beginning of the string to its end.\n (\"length\", c_uint32),\n \n # String buffer. Size limit is MAXLEN\n (\"data\", c_char*MAXLEN),\n ]\n\nclass MemoryInfo(Structure):\n \"\"\"\n See 'aiTypes.h' for details.\n \"\"\" \n\n _fields_ = [\n # Storage allocated for texture data\n (\"textures\", c_uint),\n \n # Storage allocated for material data\n (\"materials\", c_uint),\n \n # Storage allocated for mesh data\n (\"meshes\", c_uint),\n \n # Storage allocated for node data\n (\"nodes\", c_uint),\n \n # Storage allocated for animation data\n (\"animations\", c_uint),\n \n # Storage allocated for camera data\n (\"cameras\", c_uint),\n \n # Storage allocated for light data\n (\"lights\", c_uint),\n \n # Total storage allocated for the full import.\n (\"total\", c_uint),\n ]\n\nclass Quaternion(Structure):\n \"\"\"\n See 'aiQuaternion.h' for details.\n \"\"\" \n\n\n _fields_ = [\n # w,x,y,z components of the quaternion\n (\"w\", c_float),(\"x\", c_float),(\"y\", c_float),(\"z\", c_float),\n ]\n\nclass Face(Structure):\n \"\"\"\n See 'aiMesh.h' for details.\n \"\"\" \n\n _fields_ = [\n # Number of indices defining this face.\n # The maximum value for this member is\n #AI_MAX_FACE_INDICES.\n (\"mNumIndices\", c_uint),\n \n # Pointer to the indices array. Size of the array is given in numIndices.\n (\"mIndices\", POINTER(c_uint)),\n ]\n\nclass VertexWeight(Structure):\n \"\"\"\n See 'aiMesh.h' for details.\n \"\"\" \n\n _fields_ = [\n # Index of the vertex which is influenced by the bone.\n (\"mVertexId\", c_uint),\n \n # The strength of the influence in the range (0...1).\n # The influence from all bones at one vertex amounts to 1.\n (\"mWeight\", c_float),\n ]\n\nclass Matrix4x4(Structure):\n \"\"\"\n See 'aiMatrix4x4.h' for details.\n \"\"\" \n\n\n _fields_ = [\n (\"a1\", c_float),(\"a2\", c_float),(\"a3\", c_float),(\"a4\", c_float),\n (\"b1\", c_float),(\"b2\", c_float),(\"b3\", c_float),(\"b4\", c_float),\n (\"c1\", c_float),(\"c2\", c_float),(\"c3\", c_float),(\"c4\", c_float),\n (\"d1\", c_float),(\"d2\", c_float),(\"d3\", c_float),(\"d4\", c_float),\n ]\n\nclass Vector3D(Structure):\n \"\"\"\n See 'aiVector3D.h' for details.\n \"\"\" \n\n\n _fields_ = [\n (\"x\", c_float),(\"y\", c_float),(\"z\", c_float),\n ]\n\nclass MeshKey(Structure):\n \"\"\"\n See 'aiAnim.h' for details.\n \"\"\" \n\n _fields_ = [\n # The time of this key\n (\"mTime\", c_double),\n \n # Index into the aiMesh::mAnimMeshes array of the\n # mesh corresponding to the\n #aiMeshAnim hosting this\n # key frame. The referenced anim mesh is evaluated\n # according to the rules defined in the docs for\n #aiAnimMesh.\n (\"mValue\", c_uint),\n ]\n\nclass MetadataEntry(Structure):\n \"\"\"\n See 'metadata.h' for details\n \"\"\"\n AI_BOOL = 0\n AI_INT32 = 1\n AI_UINT64 = 2\n AI_FLOAT = 3\n AI_DOUBLE = 4\n AI_AISTRING = 5\n AI_AIVECTOR3D = 6\n AI_META_MAX = 7\n _fields_ = [\n # The type field uniquely identifies the underlying type of the data field\n (\"mType\", c_uint),\n (\"mData\", c_void_p),\n ]\n\nclass Metadata(Structure):\n \"\"\"\n See 'metadata.h' for details\n \"\"\"\n _fields_ = [\n # Length of the mKeys and mValues arrays, respectively\n (\"mNumProperties\", c_uint),\n\n # Arrays of keys, may not be NULL. Entries in this array may not be NULL\n # as well.\n (\"mKeys\", POINTER(String)),\n\n # Arrays of values, may not be NULL. Entries in this array may be NULL\n # if the corresponding property key has no assigned value.\n (\"mValues\", POINTER(MetadataEntry)),\n ]\n\nclass Node(Structure):\n \"\"\"\n See 'aiScene.h' for details.\n \"\"\" \n\n\nNode._fields_ = [\n # The name of the node.\n # The name might be empty (length of zero) but all nodes which\n # need to be accessed afterwards by bones or anims are usually named.\n # Multiple nodes may have the same name, but nodes which are accessed\n # by bones (see\n #aiBone and\n #aiMesh::mBones) *must* be unique.\n # Cameras and lights are assigned to a specific node name - if there\n # are multiple nodes with this name, they're assigned to each of them.\n # <br>\n # There are no limitations regarding the characters contained in\n # this text. You should be able to handle stuff like whitespace, tabs,\n # linefeeds, quotation marks, ampersands, ... .\n (\"mName\", String),\n \n # The transformation relative to the node's parent.\n (\"mTransformation\", Matrix4x4),\n \n # Parent node. NULL if this node is the root node.\n (\"mParent\", POINTER(Node)),\n \n # The number of child nodes of this node.\n (\"mNumChildren\", c_uint),\n \n # The child nodes of this node. NULL if mNumChildren is 0.\n (\"mChildren\", POINTER(POINTER(Node))),\n \n # The number of meshes of this node.\n (\"mNumMeshes\", c_uint),\n \n # The meshes of this node. Each entry is an index into the mesh\n (\"mMeshes\", POINTER(c_uint)),\n\n # Metadata associated with this node or NULL if there is no metadata.\n # Whether any metadata is generated depends on the source file format.\n (\"mMetadata\", POINTER(Metadata)),\n ]\n\nclass Light(Structure):\n \"\"\"\n See 'aiLight.h' for details.\n \"\"\" \n\n\n _fields_ = [\n # The name of the light source.\n # There must be a node in the scenegraph with the same name.\n # This node specifies the position of the light in the scene\n # hierarchy and can be animated.\n (\"mName\", String),\n \n # The type of the light source.\n # aiLightSource_UNDEFINED is not a valid value for this member.\n (\"mType\", c_uint),\n \n # Position of the light source in space. Relative to the\n # transformation of the node corresponding to the light.\n # The position is undefined for directional lights.\n (\"mPosition\", Vector3D),\n \n # Direction of the light source in space. Relative to the\n # transformation of the node corresponding to the light.\n # The direction is undefined for point lights. The vector\n # may be normalized, but it needn't.\n (\"mDirection\", Vector3D),\n \n # Constant light attenuation factor.\n # The intensity of the light source at a given distance 'd' from\n # the light's position is\n # @code\n # Atten = 1/( att0 + att1\n # d + att2\n # d*d)\n # @endcode\n # This member corresponds to the att0 variable in the equation.\n # Naturally undefined for directional lights.\n (\"mAttenuationConstant\", c_float),\n \n # Linear light attenuation factor.\n # The intensity of the light source at a given distance 'd' from\n # the light's position is\n # @code\n # Atten = 1/( att0 + att1\n # d + att2\n # d*d)\n # @endcode\n # This member corresponds to the att1 variable in the equation.\n # Naturally undefined for directional lights.\n (\"mAttenuationLinear\", c_float),\n \n # Quadratic light attenuation factor.\n # The intensity of the light source at a given distance 'd' from\n # the light's position is\n # @code\n # Atten = 1/( att0 + att1\n # d + att2\n # d*d)\n # @endcode\n # This member corresponds to the att2 variable in the equation.\n # Naturally undefined for directional lights.\n (\"mAttenuationQuadratic\", c_float),\n \n # Diffuse color of the light source\n # The diffuse light color is multiplied with the diffuse\n # material color to obtain the final color that contributes\n # to the diffuse shading term.\n (\"mColorDiffuse\", Color3D),\n \n # Specular color of the light source\n # The specular light color is multiplied with the specular\n # material color to obtain the final color that contributes\n # to the specular shading term.\n (\"mColorSpecular\", Color3D),\n \n # Ambient color of the light source\n # The ambient light color is multiplied with the ambient\n # material color to obtain the final color that contributes\n # to the ambient shading term. Most renderers will ignore\n # this value it, is just a remaining of the fixed-function pipeline\n # that is still supported by quite many file formats.\n (\"mColorAmbient\", Color3D),\n \n # Inner angle of a spot light's light cone.\n # The spot light has maximum influence on objects inside this\n # angle. The angle is given in radians. It is 2PI for point\n # lights and undefined for directional lights.\n (\"mAngleInnerCone\", c_float),\n \n # Outer angle of a spot light's light cone.\n # The spot light does not affect objects outside this angle.\n # The angle is given in radians. It is 2PI for point lights and\n # undefined for directional lights. The outer angle must be\n # greater than or equal to the inner angle.\n # It is assumed that the application uses a smooth\n # interpolation between the inner and the outer cone of the\n # spot light.\n (\"mAngleOuterCone\", c_float),\n ]\n\nclass Texture(Structure):\n \"\"\"\n See 'aiTexture.h' for details.\n \"\"\" \n\n\n _fields_ = [\n # Width of the texture, in pixels\n # If mHeight is zero the texture is compressed in a format\n # like JPEG. In this case mWidth specifies the size of the\n # memory area pcData is pointing to, in bytes.\n (\"mWidth\", c_uint),\n \n # Height of the texture, in pixels\n # If this value is zero, pcData points to an compressed texture\n # in any format (e.g. JPEG).\n (\"mHeight\", c_uint),\n \n # A hint from the loader to make it easier for applications\n # to determine the type of embedded compressed textures.\n # If mHeight != 0 this member is undefined. Otherwise it\n # is set set to '\\\\0\\\\0\\\\0\\\\0' if the loader has no additional\n # information about the texture file format used OR the\n # file extension of the format without a trailing dot. If there\n # are multiple file extensions for a format, the shortest\n # extension is chosen (JPEG maps to 'jpg', not to 'jpeg').\n # E.g. 'dds\\\\0', 'pcx\\\\0', 'jpg\\\\0'. All characters are lower-case.\n # The fourth character will always be '\\\\0'.\n (\"achFormatHint\", c_char*4),\n \n # Data of the texture.\n # Points to an array of mWidth\n # mHeight aiTexel's.\n # The format of the texture data is always ARGB8888 to\n # make the implementation for user of the library as easy\n # as possible. If mHeight = 0 this is a pointer to a memory\n # buffer of size mWidth containing the compressed texture\n # data. Good luck, have fun!\n (\"pcData\", POINTER(Texel)),\n ]\n\nclass Ray(Structure):\n \"\"\"\n See 'aiTypes.h' for details.\n \"\"\" \n\n _fields_ = [\n # Position and direction of the ray\n (\"pos\", Vector3D),(\"dir\", Vector3D),\n ]\n\nclass UVTransform(Structure):\n \"\"\"\n See 'aiMaterial.h' for details.\n \"\"\" \n\n _fields_ = [\n # Translation on the u and v axes.\n # The default value is (0|0).\n (\"mTranslation\", Vector2D),\n \n # Scaling on the u and v axes.\n # The default value is (1|1).\n (\"mScaling\", Vector2D),\n \n # Rotation - in counter-clockwise direction.\n # The rotation angle is specified in radians. The\n # rotation center is 0.5f|0.5f. The default value\n # 0.f.\n (\"mRotation\", c_float),\n ]\n\nclass MaterialProperty(Structure):\n \"\"\"\n See 'aiMaterial.h' for details.\n \"\"\" \n\n _fields_ = [\n # Specifies the name of the property (key)\n # Keys are generally case insensitive.\n (\"mKey\", String),\n \n # Textures: Specifies their exact usage semantic.\n # For non-texture properties, this member is always 0\n # (or, better-said,\n #aiTextureType_NONE).\n (\"mSemantic\", c_uint),\n \n # Textures: Specifies the index of the texture.\n # For non-texture properties, this member is always 0.\n (\"mIndex\", c_uint),\n \n # Size of the buffer mData is pointing to, in bytes.\n # This value may not be 0.\n (\"mDataLength\", c_uint),\n \n # Type information for the property.\n # Defines the data layout inside the data buffer. This is used\n # by the library internally to perform debug checks and to\n # utilize proper type conversions.\n # (It's probably a hacky solution, but it works.)\n (\"mType\", c_uint),\n \n # Binary buffer to hold the property's value.\n # The size of the buffer is always mDataLength.\n (\"mData\", POINTER(c_char)),\n ]\n\nclass Material(Structure):\n \"\"\"\n See 'aiMaterial.h' for details.\n \"\"\" \n\n _fields_ = [\n # List of all material properties loaded.\n (\"mProperties\", POINTER(POINTER(MaterialProperty))),\n \n # Number of properties in the data base\n (\"mNumProperties\", c_uint),\n \n # Storage allocated\n (\"mNumAllocated\", c_uint),\n ]\n\nclass Bone(Structure):\n \"\"\"\n See 'aiMesh.h' for details.\n \"\"\" \n\n _fields_ = [\n # The name of the bone.\n (\"mName\", String),\n \n # The number of vertices affected by this bone\n # The maximum value for this member is\n #AI_MAX_BONE_WEIGHTS.\n (\"mNumWeights\", c_uint),\n \n # The vertices affected by this bone\n (\"mWeights\", POINTER(VertexWeight)),\n \n # Matrix that transforms from mesh space to bone space in bind pose\n (\"mOffsetMatrix\", Matrix4x4),\n ]\n\nclass Mesh(Structure):\n \"\"\"\n See 'aiMesh.h' for details.\n \"\"\" \n\n AI_MAX_FACE_INDICES = 0x7fff\n AI_MAX_BONE_WEIGHTS = 0x7fffffff\n AI_MAX_VERTICES = 0x7fffffff\n AI_MAX_FACES = 0x7fffffff\n AI_MAX_NUMBER_OF_COLOR_SETS = 0x8\n AI_MAX_NUMBER_OF_TEXTURECOORDS = 0x8\n\n _fields_ = [\n # Bitwise combination of the members of the\n #aiPrimitiveType enum.\n # This specifies which types of primitives are present in the mesh.\n # The \"SortByPrimitiveType\"-Step can be used to make sure the\n # output meshes consist of one primitive type each.\n (\"mPrimitiveTypes\", c_uint),\n \n # The number of vertices in this mesh.\n # This is also the size of all of the per-vertex data arrays.\n # The maximum value for this member is\n #AI_MAX_VERTICES.\n (\"mNumVertices\", c_uint),\n \n # The number of primitives (triangles, polygons, lines) in this mesh.\n # This is also the size of the mFaces array.\n # The maximum value for this member is\n #AI_MAX_FACES.\n (\"mNumFaces\", c_uint),\n \n # Vertex positions.\n # This array is always present in a mesh. The array is\n # mNumVertices in size.\n (\"mVertices\", POINTER(Vector3D)),\n \n # Vertex normals.\n # The array contains normalized vectors, NULL if not present.\n # The array is mNumVertices in size. Normals are undefined for\n # point and line primitives. A mesh consisting of points and\n # lines only may not have normal vectors. Meshes with mixed\n # primitive types (i.e. lines and triangles) may have normals,\n # but the normals for vertices that are only referenced by\n # point or line primitives are undefined and set to QNaN (WARN:\n # qNaN compares to inequal to *everything*, even to qNaN itself.\n # Using code like this to check whether a field is qnan is:\n # @code\n #define IS_QNAN(f) (f != f)\n # @endcode\n # still dangerous because even 1.f == 1.f could evaluate to false! (\n # remember the subtleties of IEEE754 artithmetics). Use stuff like\n # @c fpclassify instead.\n # @note Normal vectors computed by Assimp are always unit-length.\n # However, this needn't apply for normals that have been taken\n # directly from the model file.\n (\"mNormals\", POINTER(Vector3D)),\n \n # Vertex tangents.\n # The tangent of a vertex points in the direction of the positive\n # X texture axis. The array contains normalized vectors, NULL if\n # not present. The array is mNumVertices in size. A mesh consisting\n # of points and lines only may not have normal vectors. Meshes with\n # mixed primitive types (i.e. lines and triangles) may have\n # normals, but the normals for vertices that are only referenced by\n # point or line primitives are undefined and set to qNaN. See\n # the\n #mNormals member for a detailed discussion of qNaNs.\n # @note If the mesh contains tangents, it automatically also\n # contains bitangents (the bitangent is just the cross product of\n # tangent and normal vectors).\n (\"mTangents\", POINTER(Vector3D)),\n \n # Vertex bitangents.\n # The bitangent of a vertex points in the direction of the positive\n # Y texture axis. The array contains normalized vectors, NULL if not\n # present. The array is mNumVertices in size.\n # @note If the mesh contains tangents, it automatically also contains\n # bitangents.\n (\"mBitangents\", POINTER(Vector3D)),\n \n # Vertex color sets.\n # A mesh may contain 0 to\n #AI_MAX_NUMBER_OF_COLOR_SETS vertex\n # colors per vertex. NULL if not present. Each array is\n # mNumVertices in size if present.\n (\"mColors\", POINTER(Color4D)*AI_MAX_NUMBER_OF_COLOR_SETS),\n \n # Vertex texture coords, also known as UV channels.\n # A mesh may contain 0 to AI_MAX_NUMBER_OF_TEXTURECOORDS per\n # vertex. NULL if not present. The array is mNumVertices in size.\n (\"mTextureCoords\", POINTER(Vector3D)*AI_MAX_NUMBER_OF_TEXTURECOORDS),\n \n # Specifies the number of components for a given UV channel.\n # Up to three channels are supported (UVW, for accessing volume\n # or cube maps). If the value is 2 for a given channel n, the\n # component p.z of mTextureCoords[n][p] is set to 0.0f.\n # If the value is 1 for a given channel, p.y is set to 0.0f, too.\n # @note 4D coords are not supported\n (\"mNumUVComponents\", c_uint*AI_MAX_NUMBER_OF_TEXTURECOORDS),\n \n # The faces the mesh is constructed from.\n # Each face refers to a number of vertices by their indices.\n # This array is always present in a mesh, its size is given\n # in mNumFaces. If the\n #AI_SCENE_FLAGS_NON_VERBOSE_FORMAT\n # is NOT set each face references an unique set of vertices.\n (\"mFaces\", POINTER(Face)),\n \n # The number of bones this mesh contains.\n # Can be 0, in which case the mBones array is NULL.\n (\"mNumBones\", c_uint),\n \n # The bones of this mesh.\n # A bone consists of a name by which it can be found in the\n # frame hierarchy and a set of vertex weights.\n (\"mBones\", POINTER(POINTER(Bone))),\n \n # The material used by this mesh.\n # A mesh does use only a single material. If an imported model uses\n # multiple materials, the import splits up the mesh. Use this value\n # as index into the scene's material list.\n (\"mMaterialIndex\", c_uint),\n \n # Name of the mesh. Meshes can be named, but this is not a\n # requirement and leaving this field empty is totally fine.\n # There are mainly three uses for mesh names:\n # - some formats name nodes and meshes independently.\n # - importers tend to split meshes up to meet the\n # one-material-per-mesh requirement. Assigning\n # the same (dummy) name to each of the result meshes\n # aids the caller at recovering the original mesh\n # partitioning.\n # - Vertex animations refer to meshes by their names.\n (\"mName\", String),\n \n # NOT CURRENTLY IN USE. The number of attachment meshes\n (\"mNumAnimMeshes\", c_uint),\n \n # NOT CURRENTLY IN USE. Attachment meshes for this mesh, for vertex-based animation.\n # Attachment meshes carry replacement data for some of the\n # mesh'es vertex components (usually positions, normals).\n ]\n\nclass Camera(Structure):\n \"\"\"\n See 'aiCamera.h' for details.\n \"\"\" \n\n\n _fields_ = [\n # The name of the camera.\n # There must be a node in the scenegraph with the same name.\n # This node specifies the position of the camera in the scene\n # hierarchy and can be animated.\n (\"mName\", String),\n \n # Position of the camera relative to the coordinate space\n # defined by the corresponding node.\n # The default value is 0|0|0.\n (\"mPosition\", Vector3D),\n \n # 'Up' - vector of the camera coordinate system relative to\n # the coordinate space defined by the corresponding node.\n # The 'right' vector of the camera coordinate system is\n # the cross product of the up and lookAt vectors.\n # The default value is 0|1|0. The vector\n # may be normalized, but it needn't.\n (\"mUp\", Vector3D),\n \n # 'LookAt' - vector of the camera coordinate system relative to\n # the coordinate space defined by the corresponding node.\n # This is the viewing direction of the user.\n # The default value is 0|0|1. The vector\n # may be normalized, but it needn't.\n (\"mLookAt\", Vector3D),\n \n # Half horizontal field of view angle, in radians.\n # The field of view angle is the angle between the center\n # line of the screen and the left or right border.\n # The default value is 1/4PI.\n (\"mHorizontalFOV\", c_float),\n \n # Distance of the near clipping plane from the camera.\n # The value may not be 0.f (for arithmetic reasons to prevent\n # a division through zero). The default value is 0.1f.\n (\"mClipPlaneNear\", c_float),\n \n # Distance of the far clipping plane from the camera.\n # The far clipping plane must, of course, be further away than the\n # near clipping plane. The default value is 1000.f. The ratio\n # between the near and the far plane should not be too\n # large (between 1000-10000 should be ok) to avoid floating-point\n # inaccuracies which could lead to z-fighting.\n (\"mClipPlaneFar\", c_float),\n \n # Screen aspect ratio.\n # This is the ration between the width and the height of the\n # screen. Typical values are 4/3, 1/2 or 1/1. This value is\n # 0 if the aspect ratio is not defined in the source file.\n # 0 is also the default value.\n (\"mAspect\", c_float),\n ]\n\nclass VectorKey(Structure):\n \"\"\"\n See 'aiAnim.h' for details.\n \"\"\" \n\n _fields_ = [\n # The time of this key\n (\"mTime\", c_double),\n \n # The value of this key\n (\"mValue\", Vector3D),\n ]\n\nclass QuatKey(Structure):\n \"\"\"\n See 'aiAnim.h' for details.\n \"\"\" \n\n _fields_ = [\n # The time of this key\n (\"mTime\", c_double),\n \n # The value of this key\n (\"mValue\", Quaternion),\n ]\n\nclass NodeAnim(Structure):\n \"\"\"\n See 'aiAnim.h' for details.\n \"\"\" \n\n _fields_ = [\n # The name of the node affected by this animation. The node\n # must exist and it must be unique.\n (\"mNodeName\", String),\n \n # The number of position keys\n (\"mNumPositionKeys\", c_uint),\n \n # The position keys of this animation channel. Positions are\n # specified as 3D vector. The array is mNumPositionKeys in size.\n # If there are position keys, there will also be at least one\n # scaling and one rotation key.\n (\"mPositionKeys\", POINTER(VectorKey)),\n \n # The number of rotation keys\n (\"mNumRotationKeys\", c_uint),\n \n # The rotation keys of this animation channel. Rotations are\n # given as quaternions, which are 4D vectors. The array is\n # mNumRotationKeys in size.\n # If there are rotation keys, there will also be at least one\n # scaling and one position key.\n (\"mRotationKeys\", POINTER(QuatKey)),\n \n # The number of scaling keys\n (\"mNumScalingKeys\", c_uint),\n \n # The scaling keys of this animation channel. Scalings are\n # specified as 3D vector. The array is mNumScalingKeys in size.\n # If there are scaling keys, there will also be at least one\n # position and one rotation key.\n (\"mScalingKeys\", POINTER(VectorKey)),\n \n # Defines how the animation behaves before the first\n # key is encountered.\n # The default value is aiAnimBehaviour_DEFAULT (the original\n # transformation matrix of the affected node is used).\n (\"mPreState\", c_uint),\n \n # Defines how the animation behaves after the last\n # key was processed.\n # The default value is aiAnimBehaviour_DEFAULT (the original\n # transformation matrix of the affected node is taken).\n (\"mPostState\", c_uint),\n ]\n\nclass Animation(Structure):\n \"\"\"\n See 'aiAnim.h' for details.\n \"\"\" \n\n _fields_ = [\n # The name of the animation. If the modeling package this data was\n # exported from does support only a single animation channel, this\n # name is usually empty (length is zero).\n (\"mName\", String),\n \n # Duration of the animation in ticks.\n (\"mDuration\", c_double),\n \n # Ticks per second. 0 if not specified in the imported file\n (\"mTicksPerSecond\", c_double),\n \n # The number of bone animation channels. Each channel affects\n # a single node.\n (\"mNumChannels\", c_uint),\n \n # The node animation channels. Each channel affects a single node.\n # The array is mNumChannels in size.\n (\"mChannels\", POINTER(POINTER(NodeAnim))),\n \n # The number of mesh animation channels. Each channel affects\n # a single mesh and defines vertex-based animation.\n (\"mNumMeshChannels\", c_uint),\n \n # The mesh animation channels. Each channel affects a single mesh.\n # The array is mNumMeshChannels in size.\n ]\n\nclass Scene(Structure):\n \"\"\"\n See 'aiScene.h' for details.\n \"\"\" \n\n AI_SCENE_FLAGS_INCOMPLETE = 0x1\n AI_SCENE_FLAGS_VALIDATED = 0x2\n AI_SCENE_FLAGS_VALIDATION_WARNING = \t0x4\n AI_SCENE_FLAGS_NON_VERBOSE_FORMAT = \t0x8\n AI_SCENE_FLAGS_TERRAIN = 0x10\n\n _fields_ = [\n # Any combination of the AI_SCENE_FLAGS_XXX flags. By default\n # this value is 0, no flags are set. Most applications will\n # want to reject all scenes with the AI_SCENE_FLAGS_INCOMPLETE\n # bit set.\n (\"mFlags\", c_uint),\n \n # The root node of the hierarchy.\n # There will always be at least the root node if the import\n # was successful (and no special flags have been set).\n # Presence of further nodes depends on the format and content\n # of the imported file.\n (\"mRootNode\", POINTER(Node)),\n \n # The number of meshes in the scene.\n (\"mNumMeshes\", c_uint),\n \n # The array of meshes.\n # Use the indices given in the aiNode structure to access\n # this array. The array is mNumMeshes in size. If the\n # AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always\n # be at least ONE material.\n (\"mMeshes\", POINTER(POINTER(Mesh))),\n \n # The number of materials in the scene.\n (\"mNumMaterials\", c_uint),\n \n # The array of materials.\n # Use the index given in each aiMesh structure to access this\n # array. The array is mNumMaterials in size. If the\n # AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always\n # be at least ONE material.\n (\"mMaterials\", POINTER(POINTER(Material))),\n \n # The number of animations in the scene.\n (\"mNumAnimations\", c_uint),\n \n # The array of animations.\n # All animations imported from the given file are listed here.\n # The array is mNumAnimations in size.\n (\"mAnimations\", POINTER(POINTER(Animation))),\n \n # The number of textures embedded into the file\n (\"mNumTextures\", c_uint),\n \n # The array of embedded textures.\n # Not many file formats embed their textures into the file.\n # An example is Quake's MDL format (which is also used by\n # some GameStudio versions)\n (\"mTextures\", POINTER(POINTER(Texture))),\n \n # The number of light sources in the scene. Light sources\n # are fully optional, in most cases this attribute will be 0\n (\"mNumLights\", c_uint),\n \n # The array of light sources.\n # All light sources imported from the given file are\n # listed here. The array is mNumLights in size.\n (\"mLights\", POINTER(POINTER(Light))),\n \n # The number of cameras in the scene. Cameras\n # are fully optional, in most cases this attribute will be 0\n (\"mNumCameras\", c_uint),\n \n # The array of cameras.\n # All cameras imported from the given file are listed here.\n # The array is mNumCameras in size. The first camera in the\n # array (if existing) is the default camera view into\n # the scene.\n (\"mCameras\", POINTER(POINTER(Camera))),\n\n # This data contains global metadata which belongs to the scene like\n # unit-conversions, versions, vendors or other model-specific data. This\n # can be used to store format-specific metadata as well.\n (\"mMetadata\", POINTER(Metadata)),\n ]\n\nassimp_structs_as_tuple = (Matrix4x4,\n Matrix3x3,\n Vector2D,\n Vector3D,\n Color3D,\n Color4D,\n Quaternion,\n Plane,\n Texel)\n", "id": "8617667", "language": "Python", "matching_score": 0, "max_stars_count": 28, "path": "dependencies/assimp/port/PyAssimp/pyassimp/structs.py" } ]
0
mohitm123
[ { "content": "from django.contrib import admin\nfrom .models import Project\n\n\[email protected](Project)\nclass ProjectAdmin(admin.ModelAdmin):\n class Meta:\n model = Project\n fields = '__all__'\n", "id": "12139674", "language": "Python", "matching_score": 2.51285457611084, "max_stars_count": 11, "path": "src/project/admin.py" }, { "content": "from django.contrib import admin\nfrom .models import StudentProfile, MentorProfile\nfrom project.models import StudentProposal\n\n\[email protected](StudentProfile)\nclass StudentProfileAdmin(admin.ModelAdmin):\n class Meta:\n model = StudentProfile\n fields = '__all__'\n\n\[email protected](MentorProfile)\nclass MentorProfileAdmin(admin.ModelAdmin):\n class Meta:\n model = MentorProfile\n fields = '__all__'\n\n\[email protected](StudentProposal)\nclass StudentProposalAdmin(admin.ModelAdmin):\n class Meta:\n model = StudentProposal\n fields = '__all__'\n", "id": "9369569", "language": "Python", "matching_score": 1.8048509359359741, "max_stars_count": 11, "path": "src/account/admin.py" }, { "content": "from django.test import TestCase\nfrom django.shortcuts import reverse\nfrom rest_framework import status\nfrom django.contrib.auth.models import User\n\nfrom account.models import StudentProfile, MentorProfile\nfrom project.models import Project\n\n\nclass ProjectViewTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.student = User.objects.create_user(username='student', email='<EMAIL>', password='password')\n cls.mentor = User.objects.create_user(username='mentor', email='<EMAIL>', password='password')\n cls.student_profile = StudentProfile.objects.create(user=cls.student, phone='9999999999',\n github='https://github.com',\n year=StudentProfile.YEAR_CHOICES[0][0],\n gender=StudentProfile.GENDER_CHOICES[0][0],\n branch=StudentProfile.BRANCH_CHOICES[0][0])\n cls.mentor_profile = MentorProfile.objects.create(user=cls.mentor, phone='9999999999',\n github='https://github.com',\n gender=StudentProfile.GENDER_CHOICES[0][0])\n\n def test_status_OK(self):\n response = self.client.get(reverse('api:project:projects-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.content.decode('utf-8'), '[]')\n\n def test_create(self):\n data = {\n 'name': 'Project 2',\n 'github_link': 'https://github.com',\n 'short_description': 'Short description',\n 'description': 'Description',\n 'technologies': 'Python,Django,C++',\n 'mentors': [self.mentor_profile.id]\n }\n response = self.client.post(reverse('api:project:projects-list'), data=data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.client.login(username=self.mentor.username, password='password')\n response = self.client.post(reverse('api:project:projects-list'), data=data)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.mentor_profile.is_approved = True\n self.mentor_profile.save()\n response = self.client.post(reverse('api:project:projects-list'), data=data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(Project.objects.filter(name='Project 2', mentors='{}'.format(self.mentor_profile.id)).exists())\n", "id": "8943287", "language": "Python", "matching_score": 2.8236875534057617, "max_stars_count": 11, "path": "src/project/api/tests.py" }, { "content": "from rest_framework.permissions import BasePermission\nfrom account.models import MentorProfile, StudentProfile\n\n\nclass IsApprovedMentor(BasePermission):\n \"\"\"\n Allows access only to instances of MentorProfile class.\n \"\"\"\n\n def has_permission(self, request, view):\n return MentorProfile.objects.filter(user=request.user.id, is_approved=True).exists()\n\n\nclass IsStudent(BasePermission):\n \"\"\"\n Allows access only to instances of StudentProfile class.\n \"\"\"\n\n def has_permission(self, request, view):\n return StudentProfile.objects.filter(user=request.user.id).exists()\n", "id": "1179138", "language": "Python", "matching_score": 1.2880746126174927, "max_stars_count": 11, "path": "src/account/api/permissions.py" }, { "content": "from rest_framework.routers import DefaultRouter\nfrom account.api.views import StudentProfileViewSet, MentorProfileViewSet, UserViewSet\nfrom .views import AuthenticationCheckAPIView\nfrom django.urls import path\n\napp_name = 'account'\n\nrouter = DefaultRouter()\nrouter.register(r'student-profile', StudentProfileViewSet, base_name='student-profile')\nrouter.register(r'mentor-profile', MentorProfileViewSet, base_name='mentor-profile')\nrouter.register(r'user', UserViewSet, base_name='user')\n\nurlpatterns = router.urls + [path('auth-check/', AuthenticationCheckAPIView.as_view(), name='auth-check')]\n", "id": "3723287", "language": "Python", "matching_score": 2.8441638946533203, "max_stars_count": 11, "path": "src/account/api/urls.py" }, { "content": "from rest_framework.routers import DefaultRouter\nfrom project.api.views import ProjectViewSet, StudentProposalViewSet\n\napp_name = 'project'\n\nrouter = DefaultRouter()\nrouter.register(r'projects', ProjectViewSet, base_name='projects')\nrouter.register(r'student-proposal', StudentProposalViewSet, base_name='student-proposal')\nurlpatterns = router.urls\n", "id": "1465373", "language": "Python", "matching_score": 1.5800645351409912, "max_stars_count": 11, "path": "src/project/api/urls.py" }, { "content": "from rest_framework.viewsets import ModelViewSet\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.shortcuts import get_object_or_404\nfrom account.api.permissions import IsApprovedMentor, IsStudent\nfrom project.api.serializers import ProjectSerializer, StudentProposalSerializer, StudentProposalApproveSerializer\nfrom project.models import StudentProposal, Project\n\n\nclass ProjectViewSet(ModelViewSet):\n serializer_class = ProjectSerializer\n queryset = Project.objects.all()\n permission_classes = [IsApprovedMentor]\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n if self.action in ['retrieve', 'list', 'proposals']:\n self.permission_classes = [AllowAny]\n return super().get_permissions()\n\n @action(methods=['get'], detail=True)\n def all_proposals(self, request, pk=None):\n proposals = get_object_or_404(Project, pk=pk).studentproposal_set.all()\n serializer = StudentProposalSerializer(proposals, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass StudentProposalViewSet(ModelViewSet):\n serializer_class = StudentProposalSerializer\n queryset = StudentProposal.objects.all()\n\n def get_permissions(self):\n if self.action in ['retrieve', 'list']:\n self.permission_classes = (AllowAny,)\n return super().get_permissions()\n\n def get_serializer_class(self):\n return StudentProposalApproveSerializer if self.action == 'approve' else super().get_serializer_class()\n\n @action(methods=['put'], detail=True)\n def approve(self, request, *args, **kwargs):\n return self.update(request, args, kwargs)\n", "id": "2521453", "language": "Python", "matching_score": 2.7848405838012695, "max_stars_count": 0, "path": "src/project/api/views.py" }, { "content": "from rest_framework import serializers\nfrom project.models import StudentProposal, Project\n\n\nclass ProjectSerializer(serializers.ModelSerializer):\n class Meta:\n model = Project\n fields = '__all__'\n\n\nclass StudentProposalSerializer(serializers.ModelSerializer):\n is_accepted = serializers.BooleanField(read_only=True)\n\n class Meta:\n model = StudentProposal\n fields = '__all__'\n\n\nclass StudentProposalApproveSerializer(serializers.ModelSerializer):\n class Meta:\n model = StudentProposal\n fields = ('id', 'is_accepted')\n", "id": "9226229", "language": "Python", "matching_score": 0.09879253804683685, "max_stars_count": 11, "path": "src/project/api/serializers.py" }, { "content": "from django.urls import path, include\n\napp_name = 'api'\n\nurlpatterns = [\n path('account/', include('account.api.urls')),\n path('project/', include('project.api.urls'))\n]\n", "id": "6110525", "language": "Python", "matching_score": 0.74988853931427, "max_stars_count": 11, "path": "src/api/urls.py" }, { "content": "from django.contrib import admin\nfrom django.urls import path, include, re_path\nfrom django.conf import settings\nfrom django.contrib.auth.views import LogoutView\nfrom .views import VueView\n\nurlpatterns = [\n path('', include('social_django.urls', namespace='social')),\n path('logout/', LogoutView.as_view(next_page='/sign-in'), name='logout'),\n path('admin/', admin.site.urls),\n path('api/', include('api.urls'))\n]\n\nif settings.DEBUG:\n from rest_framework.permissions import AllowAny\n from drf_yasg.views import get_schema_view\n from drf_yasg import openapi\n\n schema_view = get_schema_view(\n openapi.Info(\n title=\"WoC API\",\n default_version='v1',\n description=\"Documentation for WoC API\",\n contact=openapi.Contact(email=\"<EMAIL>\"),\n license=openapi.License(name=\"MIT License\"),\n ),\n validators=['flex', 'ssv'],\n public=True,\n permission_classes=(AllowAny,),\n )\n\n urlpatterns += [\n re_path(r'^api/docs/swagger(?P<format>\\.json|\\.yaml)$', schema_view.without_ui(cache_timeout=0),\n name='schema-json'),\n re_path(r'^api/docs/swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n re_path(r'^api/docs/redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),\n ]\n\nurlpatterns += [\n re_path(r'.*', VueView.as_view(), name='vue-js') # Catch all URL to send all urls to VueJS\n]\n", "id": "9036724", "language": "Python", "matching_score": 2.1548516750335693, "max_stars_count": 11, "path": "src/woc/urls.py" }, { "content": "from django.views.generic import TemplateView\n\n\nclass VueView(TemplateView):\n template_name = 'index.html'\n", "id": "5347940", "language": "Python", "matching_score": 1.2137646675109863, "max_stars_count": 11, "path": "src/woc/views.py" } ]
1.804851
gkshri
[ { "content": "#Using python3\ndef gcd(a, b):\n if a == 0 or b == 0:\n return a + b\n return gcd(b, a % b)\n\nif __name__=='__main__':\n print (gcd(10,100))\n print (gcd(9,145))\n", "id": "2933754", "language": "Python", "matching_score": 0, "max_stars_count": 147, "path": "Maths/Euclidean Algorithm/Python/GCD.py" }, { "content": "\"\"\"\nThis is a simple Linear search algorithm for python.\nThis is the exact implementation of the pseudocode written in README.\n\"\"\"\ndef linear_search(item_list,item):\n index=0\n while index < len(item_list):\n if item_list[index] == item:\n return index\n index+=1\n return None\n\n#Simple demonstration of the function\nif __name__=='__main__':\n print (linear_search([1,3,5,7],3))\n print (linear_search([1,2,5,7,97],0))\n", "id": "9620364", "language": "Python", "matching_score": 0, "max_stars_count": 147, "path": "Searching/Linear Search/Python/LinearSearch.py" } ]
0
asermax
[ { "content": "from django.apps import AppConfig\n\n\nclass OperativeConfig(AppConfig):\n name = 'operative'\n", "id": "4115036", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "api/src/operative/apps.py" }, { "content": "import os\nimport shlex\nimport subprocess\nimport sys\n\nfrom time import sleep\n\nfrom .compat import is_win32, stdout\nfrom .constants import DEFAULT_PLAYER_ARGUMENTS\nfrom .utils import ignored\n\nif is_win32:\n import msvcrt\n\n\nclass Output(object):\n def __init__(self):\n self.opened = False\n\n def open(self):\n self._open()\n self.opened = True\n\n def close(self):\n if self.opened:\n self._close()\n\n self.opened = False\n\n def write(self, data):\n if not self.opened:\n raise IOError(\"Output is not opened\")\n\n return self._write(data)\n\n def _open(self):\n pass\n\n def _close(self):\n pass\n\n def _write(self, data):\n pass\n\n\nclass FileOutput(Output):\n def __init__(self, filename=None, fd=None):\n self.filename = filename\n self.fd = fd\n\n def _open(self):\n if self.filename:\n self.fd = open(self.filename, \"wb\")\n\n if is_win32:\n msvcrt.setmode(self.fd.fileno(), os.O_BINARY)\n\n def _close(self):\n if self.fd is not stdout:\n self.fd.close()\n\n def _write(self, data):\n self.fd.write(data)\n\n\nclass PlayerOutput(Output):\n def __init__(self, cmd, args=DEFAULT_PLAYER_ARGUMENTS,\n filename=None, quiet=True, kill=True,\n call=False, http=False, namedpipe=None):\n self.cmd = cmd\n self.args = args\n self.kill = kill\n self.call = call\n self.quiet = quiet\n\n self.filename = filename\n self.namedpipe = namedpipe\n self.http = http\n\n if self.namedpipe or self.filename or self.http:\n self.stdin = sys.stdin\n else:\n self.stdin = subprocess.PIPE\n\n if self.quiet:\n self.stdout = open(os.devnull, \"w\")\n self.stderr = open(os.devnull, \"w\")\n else:\n self.stdout = sys.stdout\n self.stderr = sys.stderr\n\n @property\n def running(self):\n sleep(0.5)\n self.player.poll()\n return self.player.returncode is None\n\n def _create_arguments(self):\n if self.namedpipe:\n filename = self.namedpipe.path\n elif self.filename:\n filename = self.filename\n elif self.http:\n filename = self.http.url\n else:\n filename = \"-\"\n\n # shlex removes un-escaped backslashes\n cmd = self.cmd.replace(\"\\\\\", \"\\\\\\\\\")\n args = self.args.format(filename=filename)\n args = args.replace(\"\\\\\", \"\\\\\\\\\")\n\n return shlex.split(cmd) + shlex.split(args)\n\n def _open(self):\n try:\n if self.call and self.filename:\n self._open_call()\n else:\n self._open_subprocess()\n finally:\n if self.quiet:\n # Output streams no longer needed in parent process\n self.stdout.close()\n self.stderr.close()\n\n def _open_call(self):\n subprocess.call(self._create_arguments(),\n stdout=self.stdout,\n stderr=self.stderr)\n\n def _open_subprocess(self):\n # Force bufsize=0 on all Python versions to avoid writing the\n # unflushed buffer when closing a broken input pipe\n self.player = subprocess.Popen(self._create_arguments(),\n stdin=self.stdin, bufsize=0,\n stdout=self.stdout,\n stderr=self.stderr)\n\n # Wait 0.5 seconds to see if program exited prematurely\n if not self.running:\n raise OSError(\"Process exited prematurely\")\n\n if self.namedpipe:\n self.namedpipe.open(\"wb\")\n elif self.http:\n self.http.open()\n\n def _close(self):\n # Close input to the player first to signal the end of the\n # stream and allow the player to terminate of its own accord\n if self.namedpipe:\n self.namedpipe.close()\n elif self.http:\n self.http.close()\n elif not self.filename:\n self.player.stdin.close()\n\n if self.kill:\n with ignored(Exception):\n self.player.kill()\n self.player.wait()\n\n def _write(self, data):\n if self.namedpipe:\n self.namedpipe.write(data)\n elif self.http:\n self.http.write(data)\n else:\n self.player.stdin.write(data)\n\n__all__ = [\"PlayerOutput\", \"FileOutput\"]\n", "id": "5797376", "language": "Python", "matching_score": 0.6584028601646423, "max_stars_count": 0, "path": "src/livestreamer_cli/output.py" }, { "content": "import django_filters as filters\n\nfrom operative import models as operative_models\nfrom . import models\n\n\nclass OrderFilterSet(filters.FilterSet):\n organization = filters.ModelMultipleChoiceFilter(\n field_name='organization__slug',\n to_field_name='slug',\n queryset=models.Organization.objects.all(),\n )\n\n class Meta:\n model = models.Order\n fields = ('organization', 'operative')\n\n def __init__(self, data, *args, **kwargs):\n if 'operative' not in data:\n # by default filter by the current operative\n data = data.copy()\n data['operative'] = operative_models.Operative.objects.get_current().id\n\n super().__init__(data, *args, **kwargs)\n", "id": "1073384", "language": "Python", "matching_score": 1.835252285003662, "max_stars_count": 0, "path": "api/src/consumer/filters.py" }, { "content": "from rest_framework import viewsets\n\nfrom msa import permissions\nfrom operative import models as operative_models\nfrom . import models, serializers, filters\n\n\nclass OrganizationViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = models.Organization.objects.all()\n serializer_class = serializers.OrganizationSerializer\n\n\nclass OrderViewSet(viewsets.ModelViewSet):\n queryset = models.Order.objects.all()\n serializer_class = serializers.OrderSerializer\n filterset_class = filters.OrderFilterSet\n permission_classes = (permissions.IsAuthenticatedOrCreateOnly,)\n\n def perform_create(self, serializer):\n # use the current operative when creating a new order\n current_operative = operative_models.Operative.objects.get_current()\n serializer.save(operative=current_operative)\n", "id": "5592836", "language": "Python", "matching_score": 3.744492769241333, "max_stars_count": 0, "path": "api/src/consumer/views.py" }, { "content": "from rest_framework import viewsets\n\nfrom . import models\nfrom . import serializers\n\n\nclass ProductViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = models.Product.objects.only_enabled()\n serializer_class = serializers.ProductSerializer\n ordering_fields = ('producer', 'category')\n\n\nclass ProducerViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = models.Producer.objects.all()\n serializer_class = serializers.ProducerSerializer\n", "id": "7206134", "language": "Python", "matching_score": 1.9233723878860474, "max_stars_count": 0, "path": "api/src/producer/views.py" }, { "content": "from rest_framework import routers\n\nimport user.views\nimport producer.views\nimport consumer.views\n\nROUTER = routers.DefaultRouter()\nROUTER.register('products', producer.views.ProductViewSet)\nROUTER.register('producers', producer.views.ProducerViewSet)\nROUTER.register('organizations', consumer.views.OrganizationViewSet)\nROUTER.register('orders', consumer.views.OrderViewSet)\nROUTER.register('sessions', user.views.SessionViewSet, base_name='session')\n", "id": "11507480", "language": "Python", "matching_score": 0.9375147223472595, "max_stars_count": 0, "path": "api/src/msa/router.py" }, { "content": "from rest_framework import permissions\n\n\nclass IsAuthenticatedOrCreateOnly(permissions.BasePermission):\n \"\"\" Allows anyone to create instances but only logged in users can view and edit them. \"\"\"\n\n def has_permission(self, request, view):\n return (\n request.method == 'POST' or\n request.user and\n request.user.is_authenticated\n )\n", "id": "5767624", "language": "Python", "matching_score": 0.5305300354957581, "max_stars_count": 0, "path": "api/src/msa/permissions.py" }, { "content": "from .settings_base import * # noqa\n\nDEBUG = False\n\nALLOWED_HOSTS = [\n 'msalimentaria.com.ar',\n]\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SECURE = True\n", "id": "1641769", "language": "Python", "matching_score": 0.1005549281835556, "max_stars_count": 0, "path": "api/src/msa/settings_prod.py" }, { "content": "from livestreamer.exceptions import NoStreamsError\nfrom livestreamer.plugin import Plugin\nfrom livestreamer.stream import HTTPStream, HLSStream\nfrom livestreamer.utils import urlget, verifyjson, parse_json, parse_qsd\n\nimport re\n\nclass Youtube(Plugin):\n @classmethod\n def can_handle_url(self, url):\n return \"youtube.com\" in url\n\n @classmethod\n def stream_weight(cls, stream):\n match = re.match(\"(\\w+)_3d\", stream)\n if match:\n weight, group = Plugin.stream_weight(match.group(1))\n weight -= 1\n group = \"youtube_3d\"\n else:\n weight, group = Plugin.stream_weight(stream)\n\n return weight, group\n\n def _find_config(self, data):\n match = re.search(\"'PLAYER_CONFIG': (.+)\\n.+}\\);\", data)\n if match:\n return match.group(1)\n\n match = re.search(\"yt.playerConfig = (.+)\\;\\n\", data)\n if match:\n return match.group(1)\n\n match = re.search(\"ytplayer.config = (.+);</script>\", data)\n if match:\n return match.group(1)\n\n match = re.search(\"data-swf-config=\\\"(.+)\\\"\", data)\n if match:\n config = match.group(1)\n config = config.replace(\"&amp;quot;\", \"\\\"\")\n\n return config\n\n def _get_stream_info(self, url):\n res = urlget(url)\n config = self._find_config(res.text)\n\n if config:\n return parse_json(config, \"config JSON\")\n\n def _parse_stream_map(self, streammap):\n streams = []\n\n for stream_qs in streammap.split(\",\"):\n stream = parse_qsd(stream_qs)\n streams.append(stream)\n\n return streams\n\n def _parse_format_map(self, formatsmap):\n formats = {}\n\n if len(formatsmap) == 0:\n return formats\n\n for format in formatsmap.split(\",\"):\n s = format.split(\"/\")\n (w, h) = s[1].split(\"x\")\n formats[s[0]] = h + \"p\"\n\n return formats\n\n def _get_streams(self):\n info = self._get_stream_info(self.url)\n\n if not info:\n raise NoStreamsError(self.url)\n\n args = verifyjson(info, \"args\")\n\n streams = {}\n\n uestreammap = verifyjson(args, \"url_encoded_fmt_stream_map\")\n fmtlist = verifyjson(args, \"fmt_list\")\n\n streammap = self._parse_stream_map(uestreammap)\n formatmap = self._parse_format_map(fmtlist)\n\n for streaminfo in streammap:\n if \"s\" in streaminfo and self._decrypt_signature(streaminfo[\"s\"]):\n streaminfo[\"sig\"] = self._decrypt_signature(streaminfo[\"s\"])\n\n if not (\"url\" in streaminfo and \"sig\" in streaminfo):\n continue\n\n stream = HTTPStream(self.session, streaminfo[\"url\"],\n params=dict(signature=streaminfo[\"sig\"]))\n\n if streaminfo[\"itag\"] in formatmap:\n quality = formatmap[streaminfo[\"itag\"]]\n else:\n quality = streaminfo[\"quality\"]\n\n if streaminfo.get(\"stereo3d\") == \"1\":\n quality += \"_3d\"\n\n streams[quality] = stream\n\n if \"hlsvp\" in args:\n url = args[\"hlsvp\"]\n\n try:\n hlsstreams = HLSStream.parse_variant_playlist(self.session, url,\n namekey=\"pixels\")\n streams.update(hlsstreams)\n except IOError as err:\n self.logger.warning(\"Failed to get variant playlist: {0}\", err)\n\n if not streams and args.get(\"live_playback\", \"0\") == \"0\":\n self.logger.warning(\"VOD support may not be 100% complete. Try youtube-dl instead.\")\n\n return streams\n\n def _decrypt_signature(self, s):\n \"\"\" \n Turn the encrypted s field into a working signature\n https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/youtube.py\n \"\"\"\n\n if len(s) == 92:\n return s[25] + s[3:25] + s[0] + s[26:42] + s[79] + s[43:79] + s[91] + s[80:83]\n elif len(s) == 90:\n return s[25] + s[3:25] + s[2] + s[26:40] + s[77] + s[41:77] + s[89] + s[78:81]\n elif len(s) == 88:\n return s[48] + s[81:67:-1] + s[82] + s[66:62:-1] + s[85] + s[61:48:-1] + s[67] + s[47:12:-1] + s[3] + s[11:3:-1] + s[2] + s[12]\n elif len(s) == 87:\n return s[4:23] + s[86] + s[24:85]\n elif len(s) == 86:\n return s[83:85] + s[26] + s[79:46:-1] + s[85] + s[45:36:-1] + s[30] + s[35:30:-1] + s[46] + s[29:26:-1] + s[82] + s[25:1:-1]\n elif len(s) == 85:\n return s[2:8] + s[0] + s[9:21] + s[65] + s[22:65] + s[84] + s[66:82] + s[21]\n elif len(s) == 84:\n return s[83:36:-1] + s[2] + s[35:26:-1] + s[3] + s[25:3:-1] + s[26]\n elif len(s) == 83:\n return s[6] + s[3:6] + s[33] + s[7:24] + s[0] + s[25:33] + s[53] + s[34:53] + s[24] + s[54:]\n elif len(s) == 82:\n return s[36] + s[79:67:-1] + s[81] + s[66:40:-1] + s[33] + s[39:36:-1] + s[40] + s[35] + s[0] + s[67] + s[32:0:-1] + s[34]\n elif len(s) == 81:\n return s[56] + s[79:56:-1] + s[41] + s[55:41:-1] + s[80] + s[40:34:-1] + s[0] + s[33:29:-1] + s[34] + s[28:9:-1] + s[29] + s[8:0:-1] + s[9]\n elif len(s) == 79:\n return s[54] + s[77:54:-1] + s[39] + s[53:39:-1] + s[78] + s[38:34:-1] + s[0] + s[33:29:-1] + s[34] + s[28:9:-1] + s[29] + s[8:0:-1] + s[9]\n else:\n self.logger.warning(\"Unable to decrypt signature, key length {0} not supported; retrying might work\", len(s))\n return None\n\n__plugin__ = Youtube\n", "id": "3987177", "language": "Python", "matching_score": 3.218087673187256, "max_stars_count": 1, "path": "src/livestreamer/plugins/youtube.py" }, { "content": "from livestreamer.stream import RTMPStream, HTTPStream\nfrom livestreamer.plugin import Plugin\nfrom livestreamer.exceptions import NoStreamsError\nfrom livestreamer.utils import urlget, verifyjson, res_json\n\nimport re\n\nLIVE_API = \"http://www.hitbox.tv/api/media/live/{0}?showHidden=true\"\nPLAYER_API = \"http://www.hitbox.tv/api/player/config/{0}/{1}?embed=false&showHidden=true\"\n\n# you can get this from http://www.hitbox.tv/js/hitbox-combined.js\n# but a 1.4MB download seems a bit much ;)\n# lets hope it doesn't change much...\nSWF_BASE = \"http://edge.vie.hitbox.tv/static/player/flowplayer/\"\n\n\nclass Hitbox(Plugin):\n @classmethod\n def can_handle_url(self, url):\n return \"hitbox.tv\" in url\n\n def _get_quality(self, label):\n match = re.search(r\".*?(\\d+p)\", label)\n if match:\n return match.group(1)\n return \"live\"\n\n def _get_streams(self):\n self.logger.debug(\"Fetching stream info\")\n media_is_live = 0\n\n match = re.search(r\".*hitbox.tv/([^/]*)/?(\\d+)?\", self.url)\n if not match:\n raise NoStreamsError(self.url)\n\n stream_name, media_id = match.groups()\n\n if stream_name != \"video\":\n res = urlget(LIVE_API.format(stream_name))\n json = res_json(res)\n livestream = verifyjson(json, \"livestream\")\n media_id = verifyjson(livestream[0], \"media_id\")\n media_is_live = int(verifyjson(livestream[0], \"media_is_live\"))\n if not media_is_live:\n raise NoStreamsError(self.url)\n\n media_type = \"live\" if media_is_live else \"video\"\n res = urlget(PLAYER_API.format(media_type, media_id))\n json = res_json(res)\n clip = verifyjson(json, \"clip\")\n live = verifyjson(clip, \"live\")\n bitrates = verifyjson(clip, \"bitrates\")\n\n streams = {}\n if live:\n for bitrate in bitrates:\n connection_provider = verifyjson(clip, \"connectionProvider\")\n plugins = verifyjson(json, \"plugins\")\n provider_plugin = verifyjson(plugins, connection_provider)\n swf = verifyjson(provider_plugin, \"url\")\n rtmp = verifyjson(provider_plugin, \"netConnectionUrl\")\n quality = self._get_quality(verifyjson(bitrate, \"label\"))\n url = verifyjson(bitrate, \"url\")\n\n streams[quality] = RTMPStream(self.session, {\n \"rtmp\": rtmp,\n \"pageUrl\": self.url,\n \"playpath\": url,\n \"swfVfy\": SWF_BASE + swf,\n \"live\": True\n })\n else:\n for bitrate in bitrates:\n base_url = verifyjson(clip, \"baseUrl\")\n url = verifyjson(bitrate, \"url\")\n quality = self._get_quality(verifyjson(bitrate, \"label\"))\n streams[quality] = HTTPStream(self.session,\n base_url + \"/\" + url)\n\n return streams\n\n__plugin__ = Hitbox\n", "id": "7333531", "language": "Python", "matching_score": 3.1068074703216553, "max_stars_count": 0, "path": "src/livestreamer/plugins/hitbox.py" }, { "content": "import re\n\nfrom collections import defaultdict\nfrom functools import partial\nfrom io import BytesIO, IOBase\nfrom random import randint\nfrom time import sleep\nfrom threading import Thread\n\nfrom livestreamer.buffers import RingBuffer\nfrom livestreamer.compat import urlparse, urljoin\nfrom livestreamer.exceptions import StreamError, PluginError, NoStreamsError\nfrom livestreamer.plugin import Plugin\nfrom livestreamer.stream import RTMPStream, HLSStream, HTTPStream, Stream\nfrom livestreamer.utils import urlget\n\nfrom livestreamer.packages.flashmedia import AMFPacket, AMFError\nfrom livestreamer.packages.flashmedia.tag import Header\n\ntry:\n import librtmp\n HAS_LIBRTMP = True\nexcept ImportError:\n HAS_LIBRTMP = False\n\n\nCDN_KEYS = [\"cdnStreamUrl\", \"cdnStreamName\"]\nPROVIDER_KEYS = [\"streams\", \"name\", \"url\"]\n\nAMF_URL = \"http://cgw.ustream.tv/Viewer/getStream/1/{0}.amf\"\nHLS_PLAYLIST_URL = \"http://iphone-streaming.ustream.tv/uhls/{0}/streams/live/iphone/playlist.m3u8\"\nRECORDED_URL = \"http://tcdn.ustream.tv/video/{0}\"\nRECORDED_URL_PATTERN = r\"^(http(s)?://)?(www\\.)?ustream.tv/recorded/(?P<video_id>\\d+)\"\nRTMP_URL = \"rtmp://channel.live.ums.ustream.tv:80/ustream\"\nSWF_URL = \"http://static-cdn1.ustream.tv/swf/live/viewer.rsl:505.swf\"\n\n\ndef valid_cdn(item):\n name, cdn = item\n return all(cdn.get(key) for key in CDN_KEYS)\n\n\ndef valid_provider(info):\n return all(info.get(key) for key in PROVIDER_KEYS)\n\n\ndef validate_module_info(result):\n if (result and isinstance(result, list) and result[0].get(\"stream\")):\n return result[0]\n\n\ndef create_ums_connection(app, media_id, page_url, exception=PluginError):\n params = dict(application=app, media=str(media_id))\n conn = librtmp.RTMP(RTMP_URL, connect_data=params,\n swfurl=SWF_URL, pageurl=page_url)\n\n try:\n conn.connect()\n except librtmp.RTMPError:\n raise exception(\"Failed to connect to RTMP server\")\n\n return conn\n\n\nclass UHSStreamFiller(Thread):\n def __init__(self, stream, conn, provider, stream_index):\n Thread.__init__(self)\n self.daemon = True\n self.running = False\n\n self.conn = conn\n self.provider = provider\n self.stream_index = stream_index\n self.stream = stream\n\n self.chunk_ranges = {}\n self.chunk_id = None\n self.chunk_id_max = None\n\n self.filename_format = \"\"\n self.header_written = False\n\n def download_chunk(self, chunk_id):\n self.stream.logger.debug(\"[{0}] Downloading chunk\".format(chunk_id))\n url = self.format_chunk_url(chunk_id)\n\n attempts = 3\n while attempts and self.running:\n try:\n res = urlget(url, stream=True, exception=IOError, timeout=10)\n break\n except IOError as err:\n self.stream.logger.error(\"[{0}] Failed to open chunk: {1}\".format(\n chunk_id, err))\n attempts -= 1\n else:\n return\n\n while self.running:\n try:\n data = res.raw.read(8192)\n except IOError as err:\n self.stream.logger.error(\"[{0}] Failed to read chunk {1}\".format(\n chunk_id, err))\n break\n\n if not data:\n break\n\n if not self.header_written:\n flv_header = Header(has_video=True, has_audio=True)\n self.stream.buffer.write(flv_header.serialize())\n self.header_written = True\n\n self.stream.buffer.write(data)\n\n def process_module_info(self):\n try:\n result = self.conn.process_packets(invoked_method=\"moduleInfo\",\n timeout=30)\n except (IOError, librtmp.RTMPError) as err:\n self.stream.logger.error(\"Failed to get module info: {0}\".format(err))\n return\n\n result = validate_module_info(result)\n if not result:\n return\n\n providers = result.get(\"stream\")\n if providers == \"offline\":\n self.stream.logger.debug(\"Stream went offline\")\n self.stop()\n elif not isinstance(providers, list):\n return\n\n for provider in providers:\n if provider.get(\"name\") == self.stream.stream.provider:\n break\n else:\n return\n\n try:\n stream = provider.get(\"streams\")[self.stream_index]\n except IndexError:\n self.stream.logger.debug(\"Stream index not in result\")\n return\n\n filename_format = stream.get(\"streamName\").replace(\"%\", \"%s\")\n filename_format = urljoin(provider.get(\"url\"), filename_format)\n\n self.filename_format = filename_format\n self.update_chunk_info(stream)\n\n def update_chunk_info(self, result):\n chunk_range = result.get(\"chunkRange\")\n\n if not chunk_range:\n return\n\n self.chunk_id_max = int(result.get(\"chunkId\"))\n self.chunk_ranges.update(map(partial(map, int),\n chunk_range.items()))\n\n if self.chunk_id is None:\n self.chunk_id = self.chunk_id_max\n\n def format_chunk_url(self, chunk_id):\n chunk_hash = \"\"\n for chunk_start in sorted(self.chunk_ranges):\n if chunk_id >= chunk_start:\n chunk_hash = self.chunk_ranges[chunk_start]\n\n return self.filename_format % (chunk_id, chunk_hash)\n\n def run(self):\n self.stream.logger.debug(\"Starting buffer filler thread\")\n\n while self.running:\n self.check_connection()\n self.process_module_info()\n\n if self.chunk_id is None:\n continue\n\n while self.chunk_id <= self.chunk_id_max:\n self.download_chunk(self.chunk_id)\n self.chunk_id += 1\n\n self.stop()\n self.stream.logger.debug(\"Buffer filler thread completed\")\n\n def check_connection(self):\n if not self.conn.connected:\n self.stream.logger.error(\"Disconnected, attempting to reconnect\")\n\n try:\n self.conn = create_ums_connection(\"channel\",\n self.stream.stream.channel_id,\n self.stream.stream.page_url)\n except PluginError as err:\n self.stream.logger.error(\"Failed to reconnect: {0}\", err)\n self.stop()\n\n def start(self):\n self.running = True\n\n return Thread.start(self)\n\n def stop(self):\n self.running = False\n self.conn.close()\n self.stream.buffer.close()\n\n\nclass UHSStreamIO(IOBase):\n def __init__(self, session, stream, timeout=30):\n self.session = session\n self.stream = stream\n self.timeout = timeout\n\n self.logger = session.logger.new_module(\"stream.uhs\")\n self.buffer = None\n\n def open(self):\n self.buffer = RingBuffer(self.session.get_option(\"ringbuffer-size\"))\n\n conn = create_ums_connection(\"channel\",\n self.stream.channel_id,\n self.stream.page_url,\n exception=StreamError)\n\n self.filler = UHSStreamFiller(self, conn, self.stream.provider,\n self.stream.stream_index)\n self.filler.start()\n\n def read(self, size=-1):\n if not self.buffer:\n return b\"\"\n\n return self.buffer.read(size, block=self.filler.is_alive(),\n timeout=self.timeout)\n\n def close(self):\n self.filler.stop()\n\n if self.filler.is_alive():\n self.filler.join()\n\n\nclass UHSStream(Stream):\n __shortname__ = \"uhs\"\n\n def __init__(self, session, channel_id, page_url, provider,\n stream_index):\n Stream.__init__(self, session)\n\n self.channel_id = channel_id\n self.page_url = page_url\n self.provider = provider\n self.stream_index = stream_index\n\n def __repr__(self):\n return (\"<UHSStream({0!r}, {1!r}, \"\n \"{2!r}, {3!r})>\").format(self.channel_id, self.page_url,\n self.provider, self.stream_index)\n\n def __json__(self):\n return dict(channel_id=self.channel_id,\n page_url=self.page_url,\n provider=self.provider,\n stream_index=self.stream_index,\n **Stream.__json__(self))\n\n def open(self):\n fd = UHSStreamIO(self.session, self)\n fd.open()\n\n return fd\n\n\nclass UStreamTV(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return \"ustream.tv\" in url\n\n @classmethod\n def stream_weight(cls, stream):\n match = re.match(\"mobile_(\\w+)\", stream)\n if match:\n weight, group = Plugin.stream_weight(match.group(1))\n weight -= 1\n group = \"mobile_ustream\"\n elif stream == \"recorded\":\n weight, group = 720, \"ustream\"\n else:\n weight, group = Plugin.stream_weight(stream)\n\n return weight, group\n\n def _get_channel_id(self, url):\n match = re.search(\"ustream.tv/embed/(\\d+)\", url)\n if match:\n return int(match.group(1))\n\n match = re.search(\"\\\"cid\\\":(\\d+)\", urlget(url).text)\n if match:\n return int(match.group(1))\n\n def _get_hls_streams(self, wait_for_transcode=False):\n # HLS streams are created on demand, so we may have to wait\n # for a transcode to be started.\n attempts = wait_for_transcode and 10 or 1\n playlist_url = HLS_PLAYLIST_URL.format(self.channel_id)\n streams = {}\n while attempts and not streams:\n try:\n streams = HLSStream.parse_variant_playlist(self.session,\n playlist_url,\n nameprefix=\"mobile_\")\n except IOError:\n # Channel is probably offline\n break\n\n attempts -= 1\n sleep(3)\n\n return streams\n\n def _create_rtmp_stream(self, cdn, stream_name):\n parsed = urlparse(cdn)\n options = dict(rtmp=cdn, app=parsed.path[1:],\n playpath=stream_name, pageUrl=self.url,\n swfUrl=SWF_URL, live=True)\n\n return RTMPStream(self.session, options)\n\n def _get_module_info(self, app, media_id):\n self.logger.debug(\"Waiting for moduleInfo invoke\")\n conn = create_ums_connection(app, media_id, self.url)\n\n attempts = 3\n while conn.connected and attempts:\n try:\n result = conn.process_packets(invoked_method=\"moduleInfo\",\n timeout=30)\n except (IOError, librtmp.RTMPError) as err:\n raise PluginError(\"Failed to get stream info: {0}\".format(err))\n\n result = validate_module_info(result)\n if result:\n break\n else:\n attempts -= 1\n\n conn.close()\n\n return result\n\n def _get_streams_from_rtmp(self):\n module_info = self._get_module_info(\"channel\", self.channel_id)\n if not module_info:\n raise NoStreamsError(self.url)\n\n providers = module_info.get(\"stream\")\n if providers == \"offline\":\n raise NoStreamsError(self.url)\n elif not isinstance(providers, list):\n raise PluginError(\"Invalid stream info: {0}\".format(providers))\n\n streams = {}\n for provider in filter(valid_provider, providers):\n provider_url = provider.get(\"url\")\n provider_name = provider.get(\"name\")\n provider_streams = provider.get(\"streams\")\n\n for stream_index, stream_info in enumerate(provider_streams):\n stream = None\n stream_height = int(stream_info.get(\"height\", 0))\n stream_name = (stream_info.get(\"description\") or\n (stream_height > 0 and \"{0}p\".format(stream_height)) or\n \"live\")\n\n if stream_name in streams:\n provider_name_clean = provider_name.replace(\"uhs_\", \"\")\n stream_name += \"_alt_{0}\".format(provider_name_clean)\n\n if provider_name.startswith(\"uhs_\"):\n stream = UHSStream(self.session, self.channel_id,\n self.url, provider_name,\n stream_index=stream_index)\n elif (provider_url.startswith(\"rtmp\") and\n RTMPStream.is_usable(self.session)):\n playpath = stream_info.get(\"streamName\")\n stream = self._create_rtmp_stream(provider_url,\n playpath)\n\n if stream:\n streams[stream_name] = stream\n\n return streams\n\n def _get_streams_from_amf(self):\n if not RTMPStream.is_usable(self.session):\n raise NoStreamsError(self.url)\n\n res = urlget(AMF_URL.format(self.channel_id))\n\n try:\n packet = AMFPacket.deserialize(BytesIO(res.content))\n except (IOError, AMFError) as err:\n raise PluginError(\"Failed to parse AMF packet: {0}\".format(err))\n\n for message in packet.messages:\n if message.target_uri == \"/1/onResult\":\n result = message.value\n break\n else:\n raise PluginError(\"No result found in AMF packet\")\n\n streams = {}\n stream_name = result.get(\"streamName\")\n if stream_name:\n cdn = result.get(\"cdnUrl\") or result.get(\"fmsUrl\")\n if cdn:\n stream = self._create_rtmp_stream(cdn, stream_name)\n\n if \"videoCodec\" in result and result[\"videoCodec\"][\"height\"] > 0:\n stream_name = \"{0}p\".format(int(result[\"videoCodec\"][\"height\"]))\n else:\n stream_name = \"live\"\n\n streams[stream_name] = stream\n else:\n self.logger.warning(\"Missing cdnUrl and fmsUrl from result\")\n\n stream_versions = result.get(\"streamVersions\")\n if stream_versions:\n for version, info in stream_versions.items():\n stream_version_cdn = info.get(\"streamVersionCdn\", {})\n\n for name, cdn in filter(valid_cdn, stream_version_cdn.items()):\n stream = self._create_rtmp_stream(cdn[\"cdnStreamUrl\"],\n cdn[\"cdnStreamName\"])\n stream_name = \"live_alt_{0}\".format(name)\n streams[stream_name] = stream\n\n return streams\n\n def _get_live_streams(self):\n self.channel_id = self._get_channel_id(self.url)\n\n if not self.channel_id:\n raise NoStreamsError(self.url)\n\n streams = defaultdict(list)\n\n if not RTMPStream.is_usable(self.session):\n self.logger.warning(\"rtmpdump is not usable. \"\n \"Not all streams may be available.\")\n\n if HAS_LIBRTMP:\n desktop_streams = self._get_streams_from_rtmp\n else:\n self.logger.warning(\"python-librtmp is not installed. \"\n \"Not all streams may be available.\")\n desktop_streams = self._get_streams_from_amf\n\n try:\n for name, stream in desktop_streams().items():\n streams[name].append(stream)\n except PluginError as err:\n self.logger.error(\"Unable to fetch desktop streams: {0}\", err)\n except NoStreamsError:\n pass\n\n try:\n mobile_streams = self._get_hls_streams(wait_for_transcode=not streams)\n for name, stream in mobile_streams.items():\n streams[name].append(stream)\n except PluginError as err:\n self.logger.error(\"Unable to fetch mobile streams: {0}\", err)\n except NoStreamsError:\n pass\n\n return streams\n\n def _get_recorded_streams(self, video_id):\n streams = {}\n\n if HAS_LIBRTMP:\n module_info = self._get_module_info(\"recorded\", video_id)\n if not module_info:\n raise NoStreamsError(self.url)\n\n providers = module_info.get(\"stream\")\n if not isinstance(providers, list):\n raise PluginError(\"Invalid stream info: {0}\".format(providers))\n\n for provider in providers:\n for stream_info in provider.get(\"streams\"):\n bitrate = int(stream_info.get(\"bitrate\", 0))\n stream_name = (bitrate > 0 and \"{0}k\".format(bitrate) or\n \"recorded\")\n\n if stream_name in streams:\n stream_name += \"_alt\"\n\n stream = HTTPStream(self.session,\n stream_info.get(\"streamName\"))\n streams[stream_name] = stream\n\n else:\n self.logger.warning(\"The proper API could not be used without \"\n \"python-librtmp installed. Stream URL may be \"\n \"incorrect.\")\n\n url = RECORDED_URL.format(video_id)\n random_hash = \"{0:02x}{1:02x}\".format(randint(0, 255),\n randint(0, 255))\n params = dict(hash=random_hash)\n stream = HTTPStream(self.session, url, params=params)\n streams[\"recorded\"] = stream\n\n return streams\n\n def _get_streams(self):\n recorded = re.match(RECORDED_URL_PATTERN, self.url)\n if recorded:\n return self._get_recorded_streams(recorded.group(\"video_id\"))\n else:\n return self._get_live_streams()\n\n__plugin__ = UStreamTV\n", "id": "171443", "language": "Python", "matching_score": 4.685859203338623, "max_stars_count": 0, "path": "src/livestreamer/plugins/ustreamtv.py" }, { "content": "from __future__ import division\n\nimport base64\nimport hmac\nimport re\nimport requests\nimport os.path\n\nfrom binascii import unhexlify\nfrom hashlib import sha256\nfrom io import BytesIO, IOBase\nfrom math import ceil\nfrom threading import Thread, Timer\nfrom time import time\n\nfrom .stream import Stream\nfrom ..buffers import RingBuffer\nfrom ..cache import Cache\nfrom ..compat import urljoin, urlparse, bytes, queue, range, is_py33\nfrom ..compat import parse_qsl\nfrom ..exceptions import StreamError\nfrom ..utils import absolute_url, urlget, res_xml\nfrom ..utils import swfdecompress\n\nfrom ..packages.flashmedia import F4V, F4VError, FLVError\nfrom ..packages.flashmedia.box import Box\nfrom ..packages.flashmedia.tag import (AudioData, AACAudioData, VideoData,\n AVCVideoData, VideoCommandFrame,\n ScriptData, Header, Tag,\n TAG_TYPE_SCRIPT, TAG_TYPE_AUDIO,\n TAG_TYPE_VIDEO)\n\n# Akamai HD player verification key\n# Use unhexlify() rather than bytes.fromhex() for compatibility with before\n# Python 3. However, in Python 3.2 (not 3.3+), unhexlify only accepts a byte\n# string.\nAKAMAIHD_PV_KEY = unhexlify(\n b\"<KEY>\")\n\nAAC_SEQUENCE_HEADER = 0x00\nAVC_SEQUENCE_HEADER = 0x00\nAVC_SEQUENCE_END = 0x02\n\n# Some streams hosted by Akamai seems to require a hdcore parameter\n# to function properly.\nHDCORE_VERSION = \"3.1.0\"\n\nclass HDSStreamFiller(Thread):\n def __init__(self, stream):\n Thread.__init__(self)\n\n self.daemon = True\n self.error = None\n self.running = False\n self.stream = stream\n self.queue = queue.Queue(maxsize=5)\n\n self.avc_header_written = False\n self.aac_header_written = False\n\n self.timestamps = {\n TAG_TYPE_AUDIO: None,\n TAG_TYPE_VIDEO: None,\n TAG_TYPE_SCRIPT: None\n }\n\n self.create_tag_buffer(8182 * 8)\n\n def create_tag_buffer(self, size):\n if is_py33:\n self.tag_buffer = memoryview(bytearray(size))\n else:\n self.tag_buffer = bytearray(size)\n\n def download_fragment(self, segment, fragment):\n url = self.stream.fragment_url(segment, fragment)\n\n self.stream.logger.debug(\"[Fragment {0}-{1}] Opening URL: {2}\",\n segment, fragment, url)\n\n retries = 3\n res = None\n\n while retries > 0 and self.running:\n try:\n res = urlget(url, stream=True, exception=IOError,\n session=self.stream.rsession, timeout=10)\n break\n except IOError as err:\n self.stream.logger.error(\"[Fragment {0}-{1}] Failed to open: {2}\",\n segment, fragment, str(err))\n\n retries -= 1\n\n if not res:\n return\n\n size = int(res.headers.get(\"content-length\", \"0\"))\n size = size * self.stream.buffer_fragments\n\n if size > self.stream.buffer.buffer_size:\n self.stream.buffer.resize(size)\n\n return self.convert_fragment(segment, fragment, res.raw)\n\n def convert_fragment(self, segment, fragment, fd):\n mdat = None\n\n try:\n f4v = F4V(fd, raw_payload=True)\n\n # Fast forward to mdat box\n for box in f4v:\n if box.type == \"mdat\":\n mdat = box.payload.data\n break\n\n except F4VError as err:\n self.stream.logger.error(\"[Fragment {0}-{1}] Failed to deserialize: {2}\",\n segment, fragment, str(err))\n return\n\n if not mdat:\n self.stream.logger.error(\"[Fragment {0}-{1}] No mdat box found\",\n segment, fragment)\n return\n\n self.stream.logger.debug((\"[Fragment {0}-{1}] Extracting FLV tags from\"\n \" MDAT box\"), segment, fragment)\n\n mdat_size = len(mdat)\n\n if mdat_size > len(self.tag_buffer):\n self.create_tag_buffer(mdat_size)\n\n self.mdat_offset = 0\n self.tag_offset = 0\n\n while self.running and self.mdat_offset < mdat_size:\n try:\n self.extract_flv_tag(mdat)\n except (FLVError, IOError) as err:\n self.stream.logger.error((\"Failed to extract FLV tag from MDAT\"\n \" box: {0}\").format(str(err)))\n break\n\n self.stream.buffer.write(self.tag_buffer[:self.tag_offset])\n\n return True\n\n def extract_flv_tag(self, mdat):\n tag, self.mdat_offset = Tag.deserialize_from(mdat, self.mdat_offset)\n\n if tag.filter:\n self.stop()\n self.error = IOError(\"Tag has filter flag set, probably encrypted\")\n raise self.error\n\n if isinstance(tag.data, AudioData):\n if isinstance(tag.data.data, AACAudioData):\n if tag.data.data.type == AAC_SEQUENCE_HEADER:\n if self.aac_header_written:\n return\n\n self.aac_header_written = True\n else:\n if not self.aac_header_written:\n self.stream.logger.debug(\"Skipping AAC data before header\")\n return\n\n if isinstance(tag.data, VideoData):\n if isinstance(tag.data.data, AVCVideoData):\n if tag.data.data.type == AVC_SEQUENCE_HEADER:\n if self.avc_header_written:\n return\n\n self.avc_header_written = True\n else:\n if not self.avc_header_written:\n self.stream.logger.debug(\"Skipping AVC data before header\")\n return\n\n elif isinstance(tag.data.data, VideoCommandFrame):\n self.stream.logger.debug(\"Skipping video command frame\")\n return\n\n\n if tag.type in self.timestamps:\n if self.timestamps[tag.type] is None:\n self.timestamps[tag.type] = tag.timestamp\n else:\n tag.timestamp = max(0, tag.timestamp - self.timestamps[tag.type])\n\n self.tag_offset = tag.serialize_into(self.tag_buffer, self.tag_offset)\n\n def run(self):\n self.stream.logger.debug(\"Starting buffer filler thread\")\n\n while self.running:\n try:\n segment, fragment, fragment_duration = self.queue.get(True, 5)\n except queue.Empty:\n continue\n\n # Make sure timestamps don't get out of sync when\n # a fragment is missing or failed to download.\n if not self.download_fragment(segment, fragment):\n for key, value in self.timestamps.items():\n if value is not None:\n self.timestamps[key] += fragment_duration\n else:\n self.timestamps[key] = fragment_duration\n\n if fragment == self.stream.end_fragment:\n break\n\n self.stop()\n self.stream.logger.debug(\"Buffer filler thread completed\")\n\n def start(self):\n self.running = True\n\n return Thread.start(self)\n\n def stop(self):\n self.running = False\n self.stream.buffer.close()\n\n if self.stream.bootstrap_timer:\n self.stream.bootstrap_timer.cancel()\n\n\nclass HDSStreamIO(IOBase):\n FragmentURL = \"{url}{identifier}{quality}Seg{segment}-Frag{fragment}\"\n\n def __init__(self, session, baseurl, url, bootstrap, metadata=None,\n timeout=60, rsession=None):\n\n self.buffer = None\n self.buffer_time = session.options.get(\"hds-live-edge\")\n self.buffer_fragments = int(session.options.get(\"hds-fragment-buffer\"))\n self.baseurl = baseurl\n self.bootstrap = bootstrap\n self.logger = session.logger.new_module(\"stream.hds\")\n self.metadata = metadata\n self.session = session\n self.timeout = timeout\n self.url = url\n\n if rsession:\n self.rsession = rsession\n else:\n self.rsession = requests.session()\n\n def open(self):\n self.current_segment = -1\n self.current_fragment = -1\n self.first_fragment = 1\n self.last_fragment = -1\n self.end_fragment = None\n\n self.bootstrap_timer = None\n self.bootstrap_minimal_reload_time = 2.0\n self.bootstrap_reload_time = self.bootstrap_minimal_reload_time\n self.bootstrap_reload_timestamp = 0\n self.invalid_fragments = set()\n\n self.buffer = RingBuffer()\n self.header_written = False\n\n self.filler = HDSStreamFiller(self)\n self.filler.start()\n\n try:\n self.update_bootstrap(silent=False, fillqueue=True)\n except StreamError:\n self.close()\n raise\n\n return self\n\n def close(self):\n self.filler.stop()\n\n if self.filler.is_alive():\n self.filler.join()\n\n def read(self, size=-1):\n if not self.buffer:\n return b\"\"\n\n if self.filler.error:\n raise self.filler.error\n\n return self.buffer.read(size, block=self.filler.is_alive(),\n timeout=self.timeout)\n\n def fragment_url(self, segment, fragment):\n url = absolute_url(self.baseurl, self.url)\n\n return self.FragmentURL.format(url=url, identifier=\"\",\n quality=\"\", segment=segment,\n fragment=fragment)\n\n\n def update_bootstrap(self, silent=True, fillqueue=False):\n if not self.filler.running:\n return\n\n if self.end_fragment and self.current_fragment > self.end_fragment:\n return\n\n # Wait until buffer has room before requesting a new bootstrap\n self.buffer.wait_free()\n\n elapsed = time() - self.bootstrap_reload_timestamp\n if elapsed > self.bootstrap_reload_time:\n try:\n self._update_bootstrap()\n except IOError as err:\n self.bootstrap_reload_time = self.bootstrap_minimal_reload_time\n\n if silent:\n self.logger.error(\"Failed to update bootstrap: {0}\",\n str(err))\n else:\n raise StreamError(str(err))\n\n if not self.header_written:\n flvheader = Header(has_video=True, has_audio=True)\n self.buffer.write(flvheader.serialize())\n\n if self.metadata:\n # Remove duration from metadata when it's a livestream\n # since it will just confuse players anyway.\n if self.live and \"duration\" in self.metadata.value:\n del self.metadata.value[\"duration\"]\n\n tag = Tag(TAG_TYPE_SCRIPT, timestamp=0, data=self.metadata)\n self.buffer.write(tag.serialize())\n\n self.header_written = True\n\n if self.bootstrap_changed:\n self._queue_fragments(fillqueue)\n\n if self.bootstrap_timer:\n self.bootstrap_timer.cancel()\n\n self.bootstrap_timer = Timer(1, self.update_bootstrap)\n self.bootstrap_timer.daemon = True\n self.bootstrap_timer.start()\n\n def _update_bootstrap(self):\n self.logger.debug(\"Updating bootstrap\")\n\n if isinstance(self.bootstrap, Box):\n bootstrap = self.bootstrap\n else:\n bootstrap = self._fetch_bootstrap(self.bootstrap)\n\n self.live = bootstrap.payload.live\n self.profile = bootstrap.payload.profile\n self.timestamp = bootstrap.payload.current_media_time\n self.identifier = bootstrap.payload.movie_identifier\n self.time_scale = bootstrap.payload.time_scale\n self.segmentruntable = bootstrap.payload.segment_run_table_entries[0]\n self.fragmentruntable = bootstrap.payload.fragment_run_table_entries[0]\n\n self.first_fragment, last_fragment = self._fragment_count()\n fragment_duration = self._fragment_duration(last_fragment)\n\n if last_fragment != self.last_fragment:\n self.bootstrap_changed = True\n self.last_fragment = last_fragment\n else:\n self.bootstrap_changed = False\n\n if self.current_fragment < 0:\n if self.live:\n current_fragment = last_fragment\n\n # Less likely to hit edge if we don't start with last fragment,\n # default buffer is 10 sec.\n fragment_buffer = int(ceil(self.buffer_time / fragment_duration))\n current_fragment = max(self.first_fragment, current_fragment - (fragment_buffer - 1))\n\n self.logger.debug(\"Live edge buffer {0} sec is {1} fragments\",\n self.buffer_time, fragment_buffer)\n else:\n current_fragment = self.first_fragment\n\n self.current_fragment = current_fragment\n\n self.logger.debug(\"Current timestamp: {0}\", self.timestamp / self.time_scale)\n self.logger.debug(\"Current segment: {0}\", self.current_segment)\n self.logger.debug(\"Current fragment: {0}\", self.current_fragment)\n self.logger.debug(\"First fragment: {0}\", self.first_fragment)\n self.logger.debug(\"Last fragment: {0}\", self.last_fragment)\n self.logger.debug(\"End fragment: {0}\", self.end_fragment)\n\n self.bootstrap_reload_timestamp = time()\n self.bootstrap_reload_time = fragment_duration\n\n if self.live and not self.bootstrap_changed:\n self.logger.debug(\"Bootstrap not changed, shortening timer\")\n self.bootstrap_reload_time /= 2\n\n if self.bootstrap_reload_time < self.bootstrap_minimal_reload_time:\n self.bootstrap_reload_time = self.bootstrap_minimal_reload_time\n\n def _queue_fragments(self, fillqueue=False):\n for i, fragment in enumerate(range(self.current_fragment, self.last_fragment + 1)):\n if not self.filler.running or (fillqueue and i == self.filler.queue.maxsize):\n break\n\n if fragment in self.invalid_fragments:\n continue\n\n self.current_fragment = fragment + 1\n self.current_segment = self._segment_from_fragment(fragment)\n fragment_duration = int(self._fragment_duration(fragment) * 1000)\n entry = (self.current_segment, fragment, fragment_duration)\n\n self.logger.debug(\"[Fragment {0}-{1}] Adding to queue\",\n entry[0], entry[1])\n\n while self.filler.running:\n try:\n self.filler.queue.put(entry, True, 5)\n break\n except queue.Full:\n continue\n\n self.bootstrap_changed = self.current_fragment != self.last_fragment\n\n def _fetch_bootstrap(self, url):\n res = urlget(url, session=self.rsession, exception=IOError)\n return Box.deserialize(BytesIO(res.content))\n\n def _segment_from_fragment(self, fragment):\n table = self.segmentruntable.payload.segment_run_entry_table\n\n for segment, start, end in self._iterate_segments(table):\n if fragment >= start and fragment <= end:\n break\n else:\n segment = 1\n\n return segment\n\n def _iterate_segments(self, table):\n # If the first segment in the table starts at the beginning we can go from there,\n # otherwise we start from the end and use the total fragment count to figure\n # out where the last segment ends.\n\n if table[0].first_segment == 1:\n prev_frag = self.first_fragment - 1\n\n for segmentrun in table:\n start = prev_frag + 1\n end = prev_frag + segmentrun.fragments_per_segment\n\n yield segmentrun.first_segment, start, end\n\n prev_frag = end\n else:\n prev_frag = self.last_fragment + 1\n\n for segmentrun in reversed(table):\n start = prev_frag - segmentrun.fragments_per_segment\n end = prev_frag - 1\n\n yield segmentrun.first_segment, start, end\n\n prev_frag = start\n\n def _debug_fragment_table(self):\n fragmentruntable = self.fragmentruntable.payload.fragment_run_entry_table\n\n for i, fragmentrun in enumerate(fragmentruntable):\n print(fragmentrun.first_fragment, fragmentrun.first_fragment_timestamp,\n fragmentrun.fragment_duration, fragmentrun.discontinuity_indicator)\n\n def _fragment_count(self):\n table = self.fragmentruntable.payload.fragment_run_entry_table\n first_fragment, end_fragment = None, None\n\n for i, fragmentrun in enumerate(table):\n if fragmentrun.discontinuity_indicator is not None:\n if fragmentrun.discontinuity_indicator == 0:\n break\n elif fragmentrun.discontinuity_indicator > 0:\n continue\n\n if first_fragment is None:\n first_fragment = fragmentrun.first_fragment\n\n end_fragment = fragmentrun.first_fragment\n fragment_duration = fragmentrun.first_fragment_timestamp + fragmentrun.fragment_duration\n\n if self.timestamp > fragment_duration:\n offset = (self.timestamp - fragment_duration) / fragmentrun.fragment_duration\n end_fragment += int(offset)\n\n if first_fragment is None:\n first_fragment = 1\n\n if end_fragment is None:\n end_fragment = 1\n\n return first_fragment, end_fragment\n\n def _fragment_duration(self, fragment):\n fragment_duration = 0\n table = self.fragmentruntable.payload.fragment_run_entry_table\n time_scale = self.fragmentruntable.payload.time_scale\n\n for i, fragmentrun in enumerate(table):\n if fragmentrun.discontinuity_indicator is not None:\n self.invalid_fragments.add(fragmentrun.first_fragment)\n\n # Check for the last fragment of the stream\n if fragmentrun.discontinuity_indicator == 0:\n if i > 0:\n prev = table[i-1]\n self.end_fragment = prev.first_fragment\n\n break\n elif fragmentrun.discontinuity_indicator > 0:\n continue\n\n if fragment >= fragmentrun.first_fragment:\n fragment_duration = fragmentrun.fragment_duration / time_scale\n\n return fragment_duration\n\n\nclass HDSStream(Stream):\n \"\"\"\n Implements the Adobe HTTP Dynamic Streaming protocol\n\n *Attributes:*\n\n - :attr:`baseurl` Base URL\n - :attr:`url` Base path of the stream, joined with the base URL when fetching fragments\n - :attr:`bootstrap` Either a URL pointing to the bootstrap or a bootstrap :class:`Box` object\n used for initial information about the stream\n - :attr:`metadata` Either `None` or a :class:`ScriptData` object that contains metadata about\n the stream, such as height, width and bitrate\n \"\"\"\n\n __shortname__ = \"hds\"\n\n def __init__(self, session, baseurl, url, bootstrap, metadata=None,\n timeout=60, rsession=None):\n Stream.__init__(self, session)\n\n self.baseurl = baseurl\n self.url = url\n self.bootstrap = bootstrap\n self.metadata = metadata\n self.timeout = timeout\n self.rsession = rsession\n\n def __repr__(self):\n return (\"<HDSStream({0!r}, {1!r}, {2!r},\"\n \" metadata={3!r}, timeout={4!r})>\").format(self.baseurl,\n self.url,\n self.bootstrap,\n self.metadata,\n self.timeout)\n\n def __json__(self):\n if isinstance(self.bootstrap, Box):\n bootstrap = base64.b64encode(self.bootstrap.serialize())\n else:\n bootstrap = self.bootstrap\n\n if isinstance(self.metadata, ScriptData):\n metadata = self.metadata.__dict__\n else:\n metadata = self.metadata\n\n return dict(type=HDSStream.shortname(), baseurl=self.baseurl,\n url=self.url, bootstrap=bootstrap, metadata=metadata)\n\n def open(self):\n fd = HDSStreamIO(self.session, self.baseurl, self.url, self.bootstrap,\n self.metadata, self.timeout, self.rsession)\n\n return fd.open()\n\n @classmethod\n def parse_manifest(cls, session, url, timeout=60, rsession=None,\n pvswf=None):\n \"\"\"Parses a HDS manifest and returns it's substreams.\n\n :param url: The URL to the manifest.\n :param timeout: How long to wait for data to be returned from\n from the stream before raising an error.\n :param rsession: requests session used for the streams.\n :param pvswf: URL of player SWF for Akamai HD player verification.\n \"\"\"\n\n if not rsession:\n rsession = requests.session()\n\n if \"akamaihd\" in url:\n rsession.params[\"hdcore\"] = HDCORE_VERSION\n\n res = urlget(url, exception=IOError, session=rsession)\n manifest = res_xml(res, \"manifest XML\", ignore_ns=True,\n exception=IOError)\n\n parsed = urlparse(url)\n baseurl = manifest.findtext(\"baseURL\")\n baseheight = manifest.findtext(\"height\")\n bootstraps = {}\n streams = {}\n\n\n if not baseurl:\n baseurl = urljoin(url, os.path.dirname(parsed.path)) + \"/\"\n\n for bootstrap in manifest.findall(\"bootstrapInfo\"):\n name = bootstrap.attrib.get(\"id\") or \"_global\"\n url = bootstrap.attrib.get(\"url\")\n\n if url:\n box = absolute_url(baseurl, url)\n else:\n data = base64.b64decode(bytes(bootstrap.text, \"utf8\"))\n box = Box.deserialize(BytesIO(data))\n\n bootstraps[name] = box\n\n pvtoken = manifest.findtext(\"pv-2.0\")\n if pvtoken:\n if not pvswf:\n raise IOError(\"This manifest requires the 'pvswf' parameter \"\n \"to verify the SWF\")\n\n params = cls._pv_params(pvswf, pvtoken)\n rsession.params.update(params)\n\n for media in manifest.findall(\"media\"):\n url = media.attrib.get(\"url\")\n bootstrapid = media.attrib.get(\"bootstrapInfoId\", \"_global\")\n href = media.attrib.get(\"href\")\n\n if url and bootstrapid:\n bootstrap = bootstraps.get(bootstrapid)\n\n if not bootstrap:\n continue\n\n bitrate = media.attrib.get(\"bitrate\")\n streamid = media.attrib.get(\"streamId\")\n height = media.attrib.get(\"height\")\n\n if height:\n quality = height + \"p\"\n elif bitrate:\n quality = bitrate + \"k\"\n elif streamid:\n quality = streamid\n elif baseheight:\n quality = baseheight + \"p\"\n else:\n quality = \"live\"\n\n metadata = media.findtext(\"metadata\")\n\n if metadata:\n metadata = base64.b64decode(bytes(metadata, \"utf8\"))\n metadata = ScriptData.deserialize(BytesIO(metadata))\n else:\n metadata = None\n\n stream = HDSStream(session, baseurl, url, bootstrap,\n metadata=metadata, timeout=timeout,\n rsession=rsession)\n streams[quality] = stream\n\n elif href:\n url = absolute_url(baseurl, href)\n child_streams = cls.parse_manifest(session, url,\n timeout=timeout,\n rsession=rsession)\n\n for name, stream in child_streams.items():\n # Override stream name if bitrate is available in parent\n # manifest but not the child one.\n bitrate = media.attrib.get(\"bitrate\")\n\n if bitrate and not re.match(\"^(\\d+)k$\", name):\n name = bitrate + \"k\"\n\n streams[name] = stream\n\n return streams\n\n @classmethod\n def _pv_params(cls, pvswf, pv):\n \"\"\"Returns any parameters needed for Akamai HD player verification.\n\n Algorithm originally documented by KSV, source:\n http://stream-recorder.com/forum/showpost.php?p=43761&postcount=13\n \"\"\"\n\n (data, hdntl) = pv.split(\";\")\n cache = Cache(filename=\"stream.json\")\n key = \"akamaihd-player:\" + pvswf\n cached = cache.get(key)\n\n headers = dict()\n if cached:\n headers[\"If-Modified-Since\"] = cached[\"modified\"]\n swf = urlget(pvswf, headers=headers)\n\n if cached and swf.status_code == 304: # Server says not modified\n hash = cached[\"hash\"]\n else:\n # Calculate SHA-256 hash of the uncompressed SWF file, base-64\n # encoded\n hash = sha256()\n hash.update(swfdecompress(swf.content))\n hash = base64.b64encode(hash.digest()).decode(\"ascii\")\n modified = swf.headers.get(\"Last-Modified\", \"\")\n\n # Only save in cache if a valid date is given\n if len(modified) < 40:\n cache.set(key, dict(hash=hash, modified=modified))\n\n msg = \"st=0~exp=9999999999~acl=*~data={0}!{1}\".format(data, hash)\n auth = hmac.new(AKAMAIHD_PV_KEY, msg.encode(\"ascii\"), sha256)\n pvtoken = \"{0}~hmac={1}\".format(msg, auth.hexdigest())\n\n # The \"hdntl\" parameter can be accepted as a cookie or passed in the\n # query string, but the \"pvtoken\" parameter can only be in the query\n # string\n params = [(\"pvtoken\", pvtoken)]\n params.extend(parse_qsl(hdntl, keep_blank_values=True))\n\n return params\n\n", "id": "9715086", "language": "Python", "matching_score": 2.4668238162994385, "max_stars_count": 0, "path": "src/livestreamer/stream/hds.py" }, { "content": "import re\n\nfrom io import BytesIO\n\nfrom livestreamer.compat import is_py2, range\nfrom livestreamer.plugin import Plugin\nfrom livestreamer.stream import RTMPStream\nfrom livestreamer.utils import urlget\n\nfrom livestreamer.packages.flashmedia.types import U8, U32LE\nfrom livestreamer.plugin.api.support_plugin import common_swf as swfparser\n\n\n# This token extraction and decryption code has been ported from\n# secureToken.d which was created by DEAD_MAN_WALKING of WiZiWiG forums.\nclass Decryptor(object):\n def __init__(self, key):\n key = bytes_list(key)\n data = list(range(256))\n\n b, n = 0, len(key)\n for i in range(256):\n b += (data[i] + key[i%n])\n b &= 0xff\n data[i], data[b] = data[b], data[i]\n\n self.c1 = self.c2 = 0\n self.data = data\n\n def decrypt(self, data):\n data = bytes_list(data)\n\n for i, c in enumerate(data):\n data[i] ^= self.next_byte()\n\n return \"\".join(chr(c) for c in data)\n\n def next_byte(self):\n self.c1 += 1\n self.c2 += self.data[self.c1]\n self.c2 &= 0xff\n self.data[self.c1], self.data[self.c2] = (self.data[self.c2],\n self.data[self.c1])\n\n return self.data[(self.data[self.c1] + self.data[self.c2]) & 0xff]\n\n\ndef bytes_list(val):\n if is_py2:\n return [ord(c) for c in val]\n else:\n return list(val)\n\n\ndef extract_bin(tag):\n tag_bin = tag.data[6:]\n\n if len(tag_bin) > 4 and tag_bin[:3] != b\"CWS\":\n return tag_bin\n\n\ndef extract_bins(swf):\n for tag in swf.tags:\n if tag.type == 87 and len(tag.data) >= 6:\n tag_bin = extract_bin(tag)\n if tag_bin:\n yield tag_bin\n\n\ndef extract_strings(data, keys):\n fd = BytesIO(keys)\n keys = [fd.read(16) for i in range(U8.read(fd))]\n if not keys:\n return\n\n fd = BytesIO(data)\n for i in range(U32LE.read(fd)):\n msg = fd.read(U32LE.read(fd))\n key = keys[i % len(keys)]\n\n return Decryptor(key).decrypt(msg)\n\n\nclass ILive(Plugin):\n @classmethod\n def can_handle_url(self, url):\n return \"ilive.to\" in url\n\n def _extract_token(self, swf):\n res = urlget(swf)\n swf = swfparser.parse_swf(res.content)\n bins = list(extract_bins(swf))\n\n for tag_bin in bins:\n for tag_bin2 in filter(lambda b: b != tag_bin, bins):\n token = extract_strings(tag_bin, tag_bin2)\n if token:\n return token\n\n def _get_streams(self):\n self.logger.debug(\"Fetching stream info\")\n res = urlget(self.url)\n\n match = re.search(\"flashplayer: \\\"(.+.swf)\\\".+streamer: \\\"(.+)\\\".+\"\n \"file: \\\"(.+).flv\\\"\", res.text, re.DOTALL)\n if not match:\n return\n\n params = {\n \"rtmp\": match.group(2),\n \"pageUrl\": self.url,\n \"swfVfy\": match.group(1),\n \"playpath\" : match.group(3),\n \"token\": self._extract_token(match.group(1)),\n \"live\": True\n }\n\n streams = {}\n streams[\"live\"] = RTMPStream(self.session, params)\n\n return streams\n\n\n__plugin__ = ILive\n", "id": "4373374", "language": "Python", "matching_score": 2.741492509841919, "max_stars_count": 1, "path": "src/livestreamer/plugins/ilive.py" }, { "content": "import re\n\nfrom livestreamer.plugin import Plugin\nfrom livestreamer.stream import HLSStream\nfrom livestreamer.utils import urlget\n\n\nUSER_AGENT = \"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)\"\nHEADERS = {\"User-Agent\": USER_AGENT}\n\nPLAYLIST_URL = \"http://m.afreeca.com/live/stream/a/hls/broad_no/{0}\"\nCHANNEL_URL = \"http://live.afreeca.com:8079/app/index.cgi\"\nCHANNEL_REGEX = \"http(s)?://(\\w+\\.)?afreeca.com/(?P<username>\\w+)\"\n\n\nclass AfreecaTV(Plugin):\n @classmethod\n def can_handle_url(self, url):\n return re.match(CHANNEL_REGEX, url)\n\n def _find_broadcast(self, username):\n res = urlget(CHANNEL_URL, headers=HEADERS,\n params=dict(szBjId=username))\n\n match = re.search(r\"<img id=\\\"broadImg\\\" src=\\\".+\\/(\\d+)\\.gif\\\"\",\n res.text)\n if match:\n return match.group(1)\n\n def _get_streams(self):\n match = re.match(CHANNEL_REGEX, self.url)\n if not match:\n return\n\n username = match.group(\"username\")\n broadcast = self._find_broadcast(username)\n\n if not broadcast:\n return\n\n return HLSStream.parse_variant_playlist(self.session,\n PLAYLIST_URL.format(broadcast))\n\n__plugin__ = AfreecaTV\n", "id": "6783510", "language": "Python", "matching_score": 0.21535225212574005, "max_stars_count": 1, "path": "src/livestreamer/plugins/afreecatv.py" }, { "content": "from django.contrib import auth\nfrom rest_framework import viewsets, mixins, response, status\n\nfrom . import serializers\n\n\nclass SessionViewSet(viewsets.GenericViewSet,\n mixins.CreateModelMixin):\n def get_serializer_class(self):\n serializer_class = None\n\n if self.action == 'list':\n serializer_class = serializers.UserSerializer\n else:\n serializer_class = serializers.SessionSerializer\n\n return serializer_class\n\n def list(self, request, *args, **kwargs):\n if request.user.is_anonymous:\n return response.Response(status=status.HTTP_404_NOT_FOUND)\n else:\n serializer = self.get_serializer(request.user)\n return response.Response(serializer.data)\n", "id": "11502459", "language": "Python", "matching_score": 2.2889575958251953, "max_stars_count": 0, "path": "api/src/user/views.py" }, { "content": "from django.contrib import auth\nfrom rest_framework import serializers\nfrom social_django import utils as social_utils\n\n\nclass SessionSerializer(serializers.Serializer):\n code = serializers.CharField()\n\n def validate(self, data):\n # validate the user against the social backend\n social_strategy = social_utils.load_strategy()\n backend = social_utils.load_backend(social_strategy, 'google-oauth2', None)\n backend.data = data\n backend.redirect_uri = social_strategy.get_setting(\n 'SOCIAL_AUTH_GOOGLE_OAUTH2_REDIRECT_URI'\n )\n params = backend.auth_complete_params()\n response = backend.request_access_token(\n backend.access_token_url(),\n data=params,\n headers=backend.auth_headers(),\n auth=backend.auth_complete_credentials(),\n method=backend.ACCESS_TOKEN_METHOD\n )\n backend.process_error(response)\n user = backend.do_auth(response['access_token'], response=response)\n\n if not user:\n raise serializers.ValidationError('bad token')\n\n # return the user as the validated data\n return {'user': user}\n\n def create(self, validated_data):\n request = self.context['request']\n user = validated_data['user']\n\n # login the user\n auth.login(request, user)\n\n # return the user\n return user\n\n def to_representation(self, instance):\n # the instance of this serializer is actually an user (once it got created)\n return UserSerializer(instance).data\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = auth.get_user_model()\n fields = ('id', 'email')\n", "id": "9277039", "language": "Python", "matching_score": 2.0376362800598145, "max_stars_count": 0, "path": "api/src/user/serializers.py" }, { "content": "from rest_framework import serializers\n\nfrom . import models\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Product\n fields = ('id', 'name', 'price', 'unit', 'min_amount', 'producer')\n\n\nclass ProducerSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Producer\n fields = ('id', 'name', 'slug')\n", "id": "6172897", "language": "Python", "matching_score": 2.6226747035980225, "max_stars_count": 0, "path": "api/src/producer/serializers.py" }, { "content": "from rest_framework import serializers\n\nfrom . import models\n\n\nclass OrganizationSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Organization\n fields = ('id', 'name', 'slug')\n\n\nclass NestedOrderProductSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.OrderProduct\n fields = ('product', 'amount')\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n organization = serializers.SlugRelatedField(\n slug_field='slug',\n queryset=models.Organization.objects.all()\n )\n products = NestedOrderProductSerializer(many=True)\n\n class Meta:\n model = models.Order\n fields = ('id', 'user', 'organization', 'products', 'created', 'paid')\n read_only_fields = ('created',)\n\n def create(self, validated_data):\n products = validated_data.pop('products')\n order = models.Order.objects.create(**validated_data)\n models.OrderProduct.objects.bulk_create([\n models.OrderProduct(order=order, **product) for product in products\n ])\n\n return order\n", "id": "11722393", "language": "Python", "matching_score": 0.09821432083845139, "max_stars_count": 0, "path": "api/src/consumer/serializers.py" }, { "content": "from django.contrib import admin\nfrom . import models\n\n\nclass OperativeAdmin(admin.ModelAdmin):\n fields = ('start_date', 'ordering_limit_date')\n readonly_fields = ('start_date',)\n\n\nadmin.site.register(models.Operative, OperativeAdmin)\n", "id": "7228079", "language": "Python", "matching_score": 1.816821575164795, "max_stars_count": 0, "path": "api/src/operative/admin.py" }, { "content": "from django.contrib import admin\nfrom . import models\n\n\nclass OrganizationAdmin(admin.ModelAdmin):\n model = models.Organization\n prepopulated_fields = {'slug': ('name',)}\n\n\nclass OrderProductInline(admin.TabularInline):\n model = models.OrderProduct\n\n\nclass OrderAdmin(admin.ModelAdmin):\n model = models.Order\n inlines = (OrderProductInline,)\n\n\nadmin.site.register(models.Order, OrderAdmin)\nadmin.site.register(models.Organization, OrganizationAdmin)\n", "id": "12710894", "language": "Python", "matching_score": 2.060035228729248, "max_stars_count": 0, "path": "api/src/consumer/admin.py" }, { "content": "from django.contrib import admin\nfrom django.utils import formats\nfrom . import models\n\n\nclass ProducerAdmin(admin.ModelAdmin):\n model = models.Producer\n prepopulated_fields = {'slug': ('name',)}\n\n\nclass CategoryAdmin(admin.ModelAdmin):\n model = models.Category\n\n\nclass ProductAdmin(admin.ModelAdmin):\n model = models.Product\n list_display = ('name', 'unit', 'precio_por_unidad', 'producer', 'min_amount', 'disabled')\n\n def precio_por_unidad(self, obj):\n return f'${formats.number_format(obj.price, force_grouping=True)}'\n\n\nadmin.site.register(models.Producer, ProducerAdmin)\nadmin.site.register(models.Category, CategoryAdmin)\nadmin.site.register(models.Product, ProductAdmin)\n", "id": "4427796", "language": "Python", "matching_score": 1.8117632865905762, "max_stars_count": 0, "path": "api/src/producer/admin.py" }, { "content": "# Generated by Django 2.1.5 on 2019-02-11 02:11\n\nfrom django.db import migrations\n\ndef forward(apps, schema_editor):\n Category = apps.get_model('producer', 'Category')\n Product = apps.get_model('producer', 'Product')\n\n # create placeholder category\n category = Category.objects.create(name='Placeholder')\n\n # set the category on all existing products\n Product.objects.update(category=category)\n\n\ndef backward(apps, schema_editor):\n Product = apps.get_model('producer', 'Product')\n\n # remove the producer from all existing products\n Product.objects.update(category=None)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('producer', '0008_add_category_model'),\n ]\n\n operations = [\n migrations.RunPython(forward, backward)\n ]\n", "id": "6952962", "language": "Python", "matching_score": 3.7331364154815674, "max_stars_count": 0, "path": "api/src/producer/migrations/0009_add_default_category.py" }, { "content": "# Generated by Django 2.1 on 2018-09-13 02:29\n\nfrom django.db import migrations\n\n\ndef forward(apps, schema_editor):\n Producer = apps.get_model('producer', 'Producer')\n Product = apps.get_model('producer', 'Product')\n\n # create placeholder producer\n producer = Producer.objects.create(name='Placeholder', slug='placeholder')\n\n # set the producer on all existing products\n Product.objects.update(producer=producer)\n\n\ndef backward(apps, schema_editor):\n Product = apps.get_model('producer', 'Product')\n\n # remove the producer from all existing products\n Product.objects.update(producer=None)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('producer', '0002_auto_20180913_0229'),\n ]\n\n operations = [\n migrations.RunPython(forward, backward)\n ]\n", "id": "8404716", "language": "Python", "matching_score": 1.8490790128707886, "max_stars_count": 0, "path": "api/src/producer/migrations/0003_placeholder_producer.py" }, { "content": "# Generated by Django 2.1.1 on 2018-09-30 01:34\n\nfrom django.db import migrations\nfrom django.utils import timezone\n\n\ndef forward(apps, schema_editor):\n Operative = apps.get_model('operative', 'Operative')\n Order = apps.get_model('consumer', 'Order')\n\n # create placeholder operative\n operative = Operative.objects.create(ordering_limit_date=timezone.now())\n\n # set the operative on all existing orders\n Order.objects.update(operative=operative)\n\n\ndef backward(apps, schema_editor):\n Order = apps.get_model('consumer', 'Order')\n\n # remove the operative from all existing orders\n Order.objects.update(operative=None)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('consumer', '0006_order_operative'),\n ('operative', '0002_auto_20180930_0116')\n ]\n\n operations = [\n migrations.RunPython(forward, backward)\n ]\n", "id": "79403", "language": "Python", "matching_score": 2.972022533416748, "max_stars_count": 0, "path": "api/src/consumer/migrations/0007_order_base_operative_20180930_0134.py" }, { "content": "# Generated by Django 2.1.1 on 2018-10-05 04:44\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('operative', '0002_auto_20180930_0116'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='operative',\n options={'get_latest_by': ('-start_date',), 'verbose_name': 'operativo'},\n ),\n ]\n", "id": "11333490", "language": "Python", "matching_score": 2.581749439239502, "max_stars_count": 0, "path": "api/src/operative/migrations/0003_auto_20181005_0444.py" }, { "content": "# Generated by Django 2.1.1 on 2018-09-30 01:33\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('operative', '0002_auto_20180930_0116'),\n ('consumer', '0005_auto_20180930_0113'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='order',\n name='operative',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='operative.Operative', verbose_name='operativo'),\n ),\n ]\n", "id": "9487200", "language": "Python", "matching_score": 4.365248203277588, "max_stars_count": 0, "path": "api/src/consumer/migrations/0006_order_operative.py" }, { "content": "# Generated by Django 2.1.1 on 2018-09-30 01:39\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('consumer', '0007_order_base_operative_20180930_0134'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='order',\n name='operative',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='operative.Operative', verbose_name='operative'),\n ),\n ]\n", "id": "9716375", "language": "Python", "matching_score": 2.838231325149536, "max_stars_count": 0, "path": "api/src/consumer/migrations/0008_auto_20180930_0139.py" }, { "content": "# Generated by Django 2.1 on 2018-08-26 23:50\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('consumer', '0003_auto_20180823_0231'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Organization',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=200)),\n ('slug', models.SlugField(max_length=200)),\n ],\n ),\n migrations.AddField(\n model_name='order',\n name='organization',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='consumer.Organization'),\n preserve_default=False,\n ),\n ]\n", "id": "6185549", "language": "Python", "matching_score": 4.003812789916992, "max_stars_count": 0, "path": "api/src/consumer/migrations/0004_auto_20180826_2350.py" }, { "content": "# Generated by Django 2.1 on 2018-08-23 02:31\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('consumer', '0002_auto_20180823_0201'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='order',\n name='products',\n ),\n migrations.AlterField(\n model_name='orderproduct',\n name='order',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='consumer.Order'),\n ),\n ]\n", "id": "12571772", "language": "Python", "matching_score": 3.665160655975342, "max_stars_count": 0, "path": "api/src/consumer/migrations/0003_auto_20180823_0231.py" }, { "content": "# Generated by Django 2.1.1 on 2018-09-30 01:13\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('consumer', '0004_auto_20180826_2350'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='order',\n options={'verbose_name': 'orden', 'verbose_name_plural': 'ordenes'},\n ),\n migrations.AlterModelOptions(\n name='orderproduct',\n options={'verbose_name': 'productos de orden'},\n ),\n migrations.AlterModelOptions(\n name='organization',\n options={'verbose_name': 'organización', 'verbose_name_plural': 'organizaciones'},\n ),\n migrations.AlterField(\n model_name='order',\n name='created',\n field=models.DateTimeField(auto_now_add=True, verbose_name='fecha de creación'),\n ),\n migrations.AlterField(\n model_name='order',\n name='organization',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='consumer.Organization', verbose_name='organización'),\n ),\n migrations.AlterField(\n model_name='order',\n name='user',\n field=models.CharField(max_length=200, verbose_name='usuario'),\n ),\n migrations.AlterField(\n model_name='orderproduct',\n name='amount',\n field=models.DecimalField(decimal_places=2, max_digits=5, verbose_name='cantidad'),\n ),\n migrations.AlterField(\n model_name='orderproduct',\n name='order',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='consumer.Order', verbose_name='orden'),\n ),\n migrations.AlterField(\n model_name='orderproduct',\n name='product',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='producer.Product', verbose_name='producto'),\n ),\n migrations.AlterField(\n model_name='organization',\n name='name',\n field=models.CharField(max_length=200, verbose_name='nombre'),\n ),\n ]\n", "id": "12439036", "language": "Python", "matching_score": 5.82268762588501, "max_stars_count": 0, "path": "api/src/consumer/migrations/0005_auto_20180930_0113.py" }, { "content": "from django.db import models\n\n\nclass Organization(models.Model):\n name = models.CharField(max_length=200, verbose_name='nombre')\n slug = models.SlugField(max_length=200)\n\n class Meta:\n verbose_name = 'organización'\n verbose_name_plural = 'organizaciones'\n\n def __str__(self):\n return self.name\n\n\nclass Order(models.Model):\n user = models.CharField(max_length=200, verbose_name='usuario')\n organization = models.ForeignKey(\n 'Organization',\n on_delete=models.CASCADE,\n verbose_name='organización',\n )\n operative = models.ForeignKey(\n 'operative.Operative',\n on_delete=models.CASCADE,\n verbose_name='operativo',\n )\n created = models.DateTimeField(auto_now_add=True, verbose_name='fecha de creación')\n paid = models.BooleanField(default=False, verbose_name='pagado')\n\n class Meta:\n verbose_name = 'orden'\n verbose_name_plural = 'ordenes'\n\n def __str__(self):\n return 'Orden de {}'.format(self.user)\n\n\nclass OrderProduct(models.Model):\n order = models.ForeignKey(\n 'Order',\n on_delete=models.CASCADE,\n related_name='products',\n verbose_name='orden',\n )\n product = models.ForeignKey(\n 'producer.Product',\n on_delete=models.CASCADE,\n related_name='+',\n verbose_name='producto',\n )\n amount = models.DecimalField(max_digits=5, decimal_places=2, verbose_name='cantidad')\n\n class Meta:\n verbose_name = 'producto de orden'\n verbose_name = 'productos de orden'\n", "id": "4501048", "language": "Python", "matching_score": 4.52211856842041, "max_stars_count": 0, "path": "api/src/consumer/models.py" }, { "content": "from django.db import models\n\n\nclass List(models.Model):\n name = models.CharField(max_length=100)\n creation_date = models.DateTimeField(auto_now_add=True)\n\n def __unicode__(self):\n return self.name\n\n\nclass Item(models.Model):\n list = models.ForeignKey('List', related_name='items')\n text = models.TextField()\n done = models.BooleanField(default=False)\n creation_date = models.DateTimeField(auto_now_add=True)\n\n def __unicode__(self):\n return self.text\n", "id": "10687928", "language": "Python", "matching_score": 1.3887690305709839, "max_stars_count": 0, "path": "api-django/todo/models.py" }, { "content": "from django.db import models\nfrom django.utils import formats\n\n\nclass OperativeQueryset(models.QuerySet):\n def get_current(self):\n return self.latest()\n\n\nclass Operative(models.Model):\n start_date = models.DateField(auto_now_add=True, verbose_name='fecha de inicio')\n ordering_limit_date = models.DateField(verbose_name='fecha límite de ordenes')\n\n objects = OperativeQueryset.as_manager()\n\n class Meta:\n get_latest_by = ('start_date',)\n verbose_name = 'operativo'\n\n def __str__(self):\n return f'Operativo {formats.date_format(self.start_date, format=\"YEAR_MONTH_FORMAT\")}'\n", "id": "1342390", "language": "Python", "matching_score": 3.519529342651367, "max_stars_count": 0, "path": "api/src/operative/models.py" }, { "content": "# Generated by Django 2.1.1 on 2018-09-30 01:16\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('operative', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='operative',\n options={'verbose_name': 'operativo'},\n ),\n migrations.AlterField(\n model_name='operative',\n name='ordering_limit_date',\n field=models.DateField(verbose_name='fecha límite de ordenes'),\n ),\n migrations.AlterField(\n model_name='operative',\n name='start_date',\n field=models.DateField(auto_now_add=True, verbose_name='fecha de inicio'),\n ),\n ]\n", "id": "6972660", "language": "Python", "matching_score": 2.973762273788452, "max_stars_count": 0, "path": "api/src/operative/migrations/0002_auto_20180930_0116.py" }, { "content": "# Generated by Django 2.1.1 on 2018-09-30 01:13\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('producer', '0004_auto_20180913_0230'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='producer',\n options={'verbose_name': 'productor', 'verbose_name_plural': 'productores'},\n ),\n migrations.AlterModelOptions(\n name='product',\n options={'ordering': ('id',), 'verbose_name': 'producto'},\n ),\n migrations.AlterField(\n model_name='producer',\n name='name',\n field=models.CharField(max_length=200, verbose_name='nombre'),\n ),\n migrations.AlterField(\n model_name='product',\n name='min_amount',\n field=models.DecimalField(decimal_places=2, max_digits=4, verbose_name='cantidad mínima'),\n ),\n migrations.AlterField(\n model_name='product',\n name='name',\n field=models.CharField(max_length=200, verbose_name='nombre'),\n ),\n migrations.AlterField(\n model_name='product',\n name='price',\n field=models.DecimalField(decimal_places=2, max_digits=7, verbose_name='precio'),\n ),\n migrations.AlterField(\n model_name='product',\n name='producer',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='producer.Producer', verbose_name='productor'),\n ),\n migrations.AlterField(\n model_name='product',\n name='unit',\n field=models.CharField(max_length=50, verbose_name='unidad'),\n ),\n ]\n", "id": "6721815", "language": "Python", "matching_score": 4.388657569885254, "max_stars_count": 0, "path": "api/src/producer/migrations/0005_auto_20180930_0113.py" }, { "content": "from django.db import models\n\n\nclass Producer(models.Model):\n name = models.CharField(max_length=200, verbose_name='nombre')\n slug = models.SlugField(max_length=200)\n\n class Meta:\n verbose_name = 'productor'\n verbose_name_plural = 'productores'\n\n def __str__(self):\n return self.name\n\n\nclass Category(models.Model):\n name = models.CharField(max_length=200, verbose_name='nombre')\n\n class Meta:\n verbose_name = 'categoria'\n verbose_name_plural = 'categorias'\n\n def __str__(self):\n return self.name\n\n\nclass ProductQueryset(models.QuerySet):\n def only_enabled(self):\n return self.exclude(disabled=True)\n\n\nclass Product(models.Model):\n name = models.CharField(max_length=200, verbose_name='nombre')\n price = models.DecimalField(max_digits=7, decimal_places=2, verbose_name='precio')\n unit = models.CharField(max_length=50, verbose_name='unidad')\n min_amount = models.DecimalField(\n max_digits=4,\n decimal_places=2,\n verbose_name='cantidad mínima',\n )\n disabled = models.BooleanField(default=False, verbose_name='deshabilitado')\n producer = models.ForeignKey(Producer, models.CASCADE, verbose_name='productor')\n category = models.ForeignKey(Category, models.CASCADE, verbose_name='categoria')\n\n objects = ProductQueryset.as_manager()\n\n class Meta:\n ordering = ('id',)\n verbose_name = 'producto'\n\n def __str__(self):\n return self.name\n", "id": "3314560", "language": "Python", "matching_score": 4.119510173797607, "max_stars_count": 0, "path": "api/src/producer/models.py" }, { "content": "# Generated by Django 2.0.7 on 2018-07-29 17:32\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Product',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=200)),\n ('price', models.DecimalField(decimal_places=2, max_digits=7)),\n ('unit', models.CharField(max_length=50)),\n ('min_amount', models.DecimalField(decimal_places=2, max_digits=4)),\n ],\n options={\n 'ordering': ('id',),\n },\n ),\n ]\n", "id": "11715139", "language": "Python", "matching_score": 3.474663257598877, "max_stars_count": 0, "path": "api/src/producer/migrations/0001_initial.py" }, { "content": "# Generated by Django 2.1 on 2018-08-23 02:01\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('consumer', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='orderproduct',\n name='amount',\n field=models.DecimalField(decimal_places=2, max_digits=5),\n ),\n ]\n", "id": "11351045", "language": "Python", "matching_score": 0.08120565116405487, "max_stars_count": 0, "path": "api/src/consumer/migrations/0002_auto_20180823_0201.py" }, { "content": "import graphene\nfrom graphene_django.types import DjangoObjectType\n\nfrom . import models\n\n\nclass ListType(DjangoObjectType):\n class Meta:\n model = models.List\n\n\nclass ItemType(DjangoObjectType):\n class Meta:\n model = models.Item\n\n\nclass Query(object):\n list = graphene.Field(ListType, id=graphene.ID())\n lists = graphene.List(ListType)\n\n def resolve_list(self, info, id):\n return models.List.objects.get(pk=id)\n\n def resolve_lists(self, info, **kwargs):\n return models.List.objects.all()\n\n\nclass CreateList(graphene.Mutation):\n list = graphene.Field(ListType, required=True)\n\n class Arguments:\n name = graphene.String(required=True)\n\n def mutate(self, info, name):\n list = models.List.objects.create(name=name)\n\n return CreateList(list=list)\n\n\nclass DeleteList(graphene.Mutation):\n ok = graphene.Boolean(required=True)\n\n class Arguments:\n id = graphene.ID(required=True)\n\n def mutate(self, info, id):\n delete_result = models.List.objects.filter(id=id).delete()\n\n return DeleteList(ok=delete_result[0] == 1)\n\n\nclass CreateItem(graphene.Mutation):\n item = graphene.Field(ItemType, required=True)\n\n class Arguments:\n list_id = graphene.ID(required=True)\n text = graphene.String(required=True)\n\n def mutate(self, info, list_id, text):\n item = models.Item.objects.create(list_id=list_id, text=text)\n\n return CreateItem(item=item)\n\n\nclass ChangeItemText(graphene.Mutation):\n item = graphene.Field(ItemType, required=True)\n\n class Arguments:\n id = graphene.ID(required=True)\n text = graphene.String(required=True)\n\n def mutate(self, info, id, text):\n item = models.Item.objects.get(id=id)\n item.text = text\n item.save()\n\n return ChangeItemText(item=item)\n\n\nclass ToggleItem(graphene.Mutation):\n item = graphene.Field(ItemType, required=True)\n\n class Arguments:\n id = graphene.ID(required=True)\n done = graphene.Boolean()\n\n def mutate(self, info, id, done=None):\n item = models.Item.objects.get(id=id)\n\n if done is not None:\n item.done = done\n else:\n item.done = not item.done\n\n item.save()\n\n return ToggleItem(item=item)\n\n\nclass DeleteItem(graphene.Mutation):\n ok = graphene.Boolean(required=True)\n\n class Arguments:\n id = graphene.ID(required=True)\n\n def mutate(self, info, id):\n delete_result = models.Item.objects.filter(id=id).delete()\n\n return DeleteItem(ok=delete_result[0] == 1)\n\n\nclass Mutation(object):\n create_list = CreateList.Field()\n delete_list = DeleteList.Field()\n create_item = CreateItem.Field()\n change_item_text = ChangeItemText.Field()\n toggle_item = ToggleItem.Field()\n delete_item = DeleteItem.Field()\n", "id": "5973298", "language": "Python", "matching_score": 1.5626133680343628, "max_stars_count": 0, "path": "api-django/todo/schema.py" }, { "content": "import graphene\nfrom todo import schema as todo_schema\n\n\nclass Query(todo_schema.Query, graphene.ObjectType):\n # This class will inherit from multiple Queries\n # as we begin to add more apps to our project\n pass\n\n\nclass Mutation(todo_schema.Mutation, graphene.ObjectType):\n # This class will inherit from multiple Mutations\n # as we begin to add more apps to our project\n pass\n\n\nschema = graphene.Schema(query=Query, mutation=Mutation)\n", "id": "12720206", "language": "Python", "matching_score": 1.3763922452926636, "max_stars_count": 0, "path": "api-django/api/schema.py" } ]
2.524287
cttsai1985
[ { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 20 14:24:59 2017\r\n\r\n@author: Tsai, Chia-Ta\r\n\"\"\"\r\nfrom math import exp, expm1, log1p, log10, log2, sqrt, ceil, floor\r\nfrom random import choice, sample, uniform\r\nimport time\r\n#pyData stack\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy import sparse\r\n#sklearn preprocessing, model selection\r\nfrom sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit\r\n#sklearn classifier\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\r\nfrom sklearn.metrics import jaccard_similarity_score, accuracy_score\r\n\r\nfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation\r\nfrom keras.layers.merge import concatenate\r\nfrom keras.models import Model\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\r\n\r\n\r\n\r\ndef to_time(df, f_time='time'):\r\n \r\n df[f_time] = pd.to_datetime(df[f_time], unit='s')\r\n \r\n #numeric\r\n f_hour = 'inf_hour'\r\n f_wday = 'inf_wday'\r\n f_week = 'inf_week'\r\n f_wdhr = 'inf_wdhr'\r\n \r\n #d, h, m, w = 31, 24, 60, 7\r\n df[f_hour] = df[f_time].dt.hour\r\n df[f_wday] = df[f_time].dt.dayofweek\r\n df[f_week] = df[f_time].dt.week\r\n df[f_wdhr] = df[f_wday] * 24 + df[f_hour]\r\n df[f_wdhr] = df[f_wdhr].apply(str)\r\n \r\n #print(df.describe())\r\n\r\n#string\r\ndef titles_agg(train_data, test_data, hist, stem='tmp'):\r\n \r\n print('{}:\\t{} records'.format(stem, hist.shape[0]), flush=True)\r\n #list and count\r\n tmp = hist.groupby('user_id')['title_id'].agg(' '.join)#.apply(lambda x: x.split())\r\n tmp = tmp.rename('list_ttl_{}'.format(stem)).to_frame()\r\n tmp['user_id'] = tmp.index\r\n tmp = tmp.reset_index(drop=True)\r\n \r\n train_data = train_data.merge(tmp, how='left', on='user_id')\r\n train_data = train_data.fillna('')\r\n train_data['f_cnt_{}'.format(stem)] = train_data['list_ttl_{}'.format(stem)].apply(lambda x: len(x.split()))\r\n \r\n test_data = test_data.merge(tmp, how='left', on='user_id')\r\n test_data = test_data.fillna('')\r\n test_data['f_cnt_{}'.format(stem)] = test_data['list_ttl_{}'.format(stem)].apply(lambda x: len(x.split()))\r\n \r\n del tmp\r\n return train_data, test_data\r\n\r\n#int\r\ndef sum_watch_time(train_data, test_data, hist, stem='tmp'):\r\n \r\n #sum time\r\n tmp = hist.groupby('user_id')['watch_time'].sum()\r\n tmp = tmp.rename('f_time_sum_{}'.format(stem)).to_frame()\r\n tmp['user_id'] = tmp.index\r\n tmp = tmp.reset_index(drop=True)\r\n \r\n #merge\r\n train_data = train_data.merge(tmp, how='left', on='user_id') \r\n test_data = test_data.merge(tmp, how='left', on='user_id')\r\n del tmp\r\n\r\n #var time\r\n tmp = hist.groupby('user_id')['watch_time'].var()\r\n tmp = tmp.rename('f_time_var_{}'.format(stem)).to_frame()\r\n tmp['user_id'] = tmp.index\r\n tmp = tmp.reset_index(drop=True)\r\n \r\n #merge\r\n train_data = train_data.merge(tmp, how='left', on='user_id') \r\n test_data = test_data.merge(tmp, how='left', on='user_id')\r\n del tmp\r\n \r\n train_data = train_data.fillna(0) \r\n test_data = test_data.fillna(0)\r\n\r\n #print(train_data)\r\n return train_data, test_data\r\n\r\n#string\r\ndef trigger_time(train_data, test_data, hist, stem='tmp'):\r\n\r\n tmp = hist.groupby('user_id')['inf_wdhr'].agg(' '.join)#.apply(lambda x: x.split())\r\n tmp = tmp.rename('list_trg_{}'.format(stem)).to_frame()\r\n tmp['user_id'] = tmp.index\r\n tmp = tmp.reset_index(drop=True)\r\n\r\n #merge\r\n train_data = train_data.merge(tmp, how='left', on='user_id')\r\n train_data = train_data.fillna('')\r\n train_data['f_cnt_{}'.format(stem)] = train_data['list_trg_{}'.format(stem)].apply(lambda x: len(x.split()))\r\n \r\n test_data = test_data.merge(tmp, how='left', on='user_id')\r\n test_data = test_data.fillna('')\r\n test_data['f_cnt_{}'.format(stem)] = test_data['list_trg_{}'.format(stem)].apply(lambda x: len(x.split()))\r\n \r\n\r\n del tmp\r\n return train_data, test_data\r\n\r\n\r\n#read\r\ninput_folder = './'\r\n####train\r\ntrain_events = pd.read_csv(input_folder + 'events_train.csv', dtype={'user_id': np.str, 'title_id': np.str})\r\ntrain_users = pd.read_csv(input_folder + 'labels_train.csv', dtype={'user_id': np.str, 'title_id': np.str})\r\n####test\r\ntest_events = pd.read_csv(input_folder + 'events_test.csv', dtype={'user_id': np.str, 'title_id': np.str})\r\ntest_users = pd.DataFrame()\r\ntest_users['user_id'] = test_events['user_id'].unique()\r\n\r\n#use top titles from both train and test; \r\nall_events = pd.concat([train_events, test_events]).reset_index(drop=True)\r\nto_time(all_events)\r\n\r\n#clearing labels\r\ntotal = len(train_users)\r\n \r\nmin_hits = 5\r\nsel = train_users['title_id'].value_counts()\r\nprint('Existing {} Labels'.format(len(sel)))\r\nsel = sel.loc[sel >= min_hits].index.tolist()\r\nprint('Reduced to {} Labels, removing minors less freq <= {}'.format(len(sel), min_hits), flush=True)\r\ntrain_users = train_users.loc[(train_users['title_id'].isin(sel))]\r\nratio = len(train_users) / total\r\nprint('Ratio = {:.6f}\\n'.format(ratio), flush=True)\r\n\r\n\r\n#all\r\ns = 'overall'\r\ntrain_users, test_users = titles_agg(train_users, test_users, all_events, stem=s)\r\ntrain_users, test_users = sum_watch_time(train_users, test_users, all_events, stem=s)\r\ntrain_users, test_users = trigger_time(train_users, test_users, all_events, stem=s)\r\n\r\n#rough\r\n#short=>dislike\r\nt = 60 * 5 #watch_time\r\ns = 'in{:04d}s'.format(t)\r\nsel_events = all_events.loc[all_events['watch_time'] <= t]\r\ntrain_users, test_users = titles_agg(train_users, test_users, sel_events, stem=s)\r\n\r\n###########\r\n#lastest-1\r\n#recent intested in\r\nw = 39 #w-th week\r\nt = 60 * 5 #watch_time\r\ns = 'out{:04d}s{}w'.format(t, w)\r\nsel_events = all_events.loc[(all_events['watch_time'] >= t) & (all_events['inf_week'] >= w)]\r\ntrain_users, test_users = titles_agg(train_users, test_users, sel_events, stem=s)\r\ntrain_users, test_users = sum_watch_time(train_users, test_users, sel_events, stem=s)\r\n\r\nprint(train_users.shape)\r\n\r\n#features list\r\nprint('Extracted features:')\r\nf_ttl = [s for s in train_users.columns.tolist() if s.startswith('list_ttl')]\r\nprint('{}: {}'.format(len(f_ttl), f_ttl))\r\nf_trg = [s for s in train_users.columns.tolist() if s.startswith('list_trg')]\r\nprint('{}: {}'.format(len(f_trg), f_trg))\r\nf_num = [s for s in train_users.columns.tolist() if s.startswith('f_')]\r\nprint('{}: {}'.format(len(f_num), f_num))\r\n\r\n#dataset\r\ntarget_lbl = LabelEncoder()\r\ntrain_y = target_lbl.fit_transform(train_users['title_id'].tolist())\r\ny_max = max(train_y) + 1\r\nprint(train_y.shape)\r\n\r\n#numerics\r\nfor f in f_num:\r\n train_users[f] = train_users[f].apply(np.nan_to_num)\r\n test_users[f] = test_users[f].apply(np.nan_to_num)\r\n \r\nscalar = MinMaxScaler(feature_range=(0, 1), copy=True)\r\ntrain_users[f_num] = scalar.fit_transform(train_users[f_num])\r\ntest_users[f_num] = scalar.transform(test_users[f_num])\r\n\r\ntrain_X_num = train_users[f_num].as_matrix()\r\ntest_X_num = test_users[f_num].as_matrix()\r\n\r\n\r\ntrain_X = [train_X_num]\r\ntest_X = [test_X_num]\r\n\r\nttl_cnt = len(list(all_events['title_id'].unique()))\r\n#CountVec Merged\r\ncntVec = CountVectorizer(ngram_range=(1, 1), analyzer='word')\r\ncntVec.fit(all_events['title_id'])\r\nfor f in f_ttl:\r\n add = cntVec.transform(train_users[f])\r\n add = np.log1p(add)\r\n train_X.append(add.todense())\r\n print('{} +{}'.format(f, add.shape[1]), flush=True)\r\n \r\n add = cntVec.transform(test_users[f])\r\n add = np.log1p(add)\r\n test_X.append(add.todense())\r\n\r\n#CountVec Merged\r\ncntVec = CountVectorizer(ngram_range=(1, 1), analyzer='word')\r\ncntVec.fit(all_events['inf_wdhr'])\r\nfor f in f_trg:\r\n add = cntVec.transform(train_users[f])\r\n add = np.log1p(add)\r\n train_X.append(add.todense())\r\n print('{} +{}'.format(f, add.shape[1]), flush=True)\r\n \r\n add = cntVec.transform(test_users[f])\r\n add = np.log1p(add)\r\n test_X.append(add.todense())\r\n \r\n wdhr = add.todense().shape[1]\r\n\r\nprint('\\ndims for each feature', flush=True)\r\ninputs_ndim = []\r\nfor x in train_X:\r\n print(x.shape, flush=True)\r\n inputs_ndim.append(x.shape[1])\r\n\r\n#fold for CV\r\nprint('Assigning CV', flush=True)\r\nnr_splits = 5\r\nfold_gen_seed = 62017\r\ntrain_sets, valid_sets = list(), list()\r\nfold_gen = StratifiedKFold(n_splits=nr_splits, shuffle=True, random_state=fold_gen_seed)\r\nfor train_indices, valid_indices in fold_gen.split(train_y, train_y):\r\n train_sets.append(train_indices)\r\n valid_sets.append(valid_indices)\r\n\r\nX_train = []\r\nX_valid = []\r\ny_train = train_y[train_sets[0]]\r\ny_valid = train_y[valid_sets[0]]\r\nfor x in train_X:\r\n X_train.append(x[train_sets[0]])\r\n X_valid.append(x[valid_sets[0]])\r\n\r\n\r\ntmstmp = '{}'.format(time.strftime(\"%Y-%m-%d-%H-%M\"))\r\n\r\n# define the model structure\r\n########################################\r\ninputs_collected = []\r\ndense_collected = []\r\n\r\nnum_dence_input = Input(shape=(inputs_ndim[0],))#, dtype='int32')\r\n\r\n#ordinary dense\r\nnum_dence = Dense(16, activation='relu')(num_dence_input)\r\n\r\ninputs_collected.append(num_dence_input)\r\ndense_collected.append(num_dence)\r\n\r\n#shared dense\r\ndense_ttl = Dense(16, activation='relu')\r\ndense_wdhr = Dense(4, activation='relu')\r\n\r\nfor x in inputs_ndim:\r\n #for titles\r\n if x == ttl_cnt:\r\n ttl_dence_input = Input(shape=(ttl_cnt,))#, dtype='int32')\r\n ttl_dence1 = dense_ttl(ttl_dence_input)\r\n \r\n inputs_collected.append(ttl_dence_input)\r\n dense_collected.append(ttl_dence1)\r\n \r\n #for wdhr\r\n if x == wdhr:\r\n wdhr_dence_input = Input(shape=(wdhr,))#, dtype='int32')\r\n wdhr_dence1 = dense_wdhr(wdhr_dence_input)\r\n \r\n inputs_collected.append(wdhr_dence_input)\r\n dense_collected.append(wdhr_dence1) \r\n \r\nconcat = concatenate(dense_collected, axis=-1)\r\n\r\n#final\r\ndense_bn = BatchNormalization()(concat)\r\ndense_dp1 = Dropout(0.25)(dense_bn)\r\ndense_ds1 = Dense(512, activation='relu')(dense_dp1)\r\ndense_dp2 = Dropout(0.5)(dense_ds1)\r\noutput = Dense(y_max, activation='softmax')(dense_dp2)\r\n\r\nmodel = Model(inputs=inputs_collected, outputs=output)\r\nmodel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])\r\n\r\n#print(model.summary(), flush=True)\r\nprint('Training keras', flush=True)\r\n\r\n#callback\r\nearly_stopping = EarlyStopping(monitor='val_loss', patience=5)\r\nbst_model_path = tmstmp + '.h5'\r\nmodel_checkpoint = ModelCheckpoint(bst_model_path, save_best_only=True, save_weights_only=True)\r\n\r\n#fit\r\nhist = model.fit(X_train, y_train, validation_data=(X_valid, y_valid), epochs=100, batch_size=128, shuffle=True, callbacks=[early_stopping, model_checkpoint])\r\n # class_weight=class_weight, callbacks=[early_stopping, model_checkpoint])\r\n \r\nmodel.load_weights(bst_model_path)\r\nbst_val_score = min(hist.history['val_loss'])\r\n \r\nval_probs = model.predict(X_valid, batch_size=4096, verbose=1)\r\nval_preds = np.argmax(val_probs, axis=1)\r\njcc = jaccard_similarity_score(y_valid, val_preds)\r\nacc = accuracy_score(y_valid, val_preds)\r\nprint('\\nVal: loss={:.6f}, jcc={:.6f}, acc={:.6f}'.format(bst_val_score, jcc, acc), flush=True)\r\n \r\n# make the submission\r\nprint('\\nPrediction', flush=True)\r\nprobs = model.predict(test_X, batch_size=4096, verbose=1)\r\n \r\n#\r\nprint(\"\\nWriting output...\\n\\n\")\r\nsub = pd.DataFrame()\r\nsub['user_id'] = test_users['user_id']\r\nsub['title_id'] = target_lbl.inverse_transform(np.argmax(probs, axis=1))\r\n#sub['title_id'] = sub['title_id'].apply(lambda x: '{:08d}'.format(x))\r\nprint(sub['title_id'].value_counts())\r\nsub.to_csv(\"preds_keras_{}_s{:.6f}.csv\".format(tmstmp, jcc* ratio), index=False)\r\n\r\n", "id": "7159786", "language": "Python", "matching_score": 6.868613243103027, "max_stars_count": 0, "path": "KKBOXeras.py" }, { "content": "from itertools import combinations\r\nfrom math import exp, expm1, log1p, log10, log2, sqrt, ceil, floor, radians, sin, cos\r\nfrom random import choice, sample, uniform\r\nimport time\r\n#pyData stack\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy import sparse\r\n#sklearn preprocessing, model selection\r\nfrom sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit\r\n#sklearn classifier\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\r\nfrom sklearn.metrics import jaccard_similarity_score, accuracy_score\r\nfrom sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.decomposition import TruncatedSVD, NMF, KernelPCA\r\nimport lightgbm as lgb\r\n\r\nfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation\r\nfrom keras.layers.merge import concatenate\r\nfrom keras.models import Model\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\r\nfrom keras.utils.np_utils import to_categorical\r\n\r\n\r\ndef to_time(df, f_time='time'):\r\n \r\n df[f_time] = pd.to_datetime(df[f_time], unit='s')\r\n \r\n #numeric\r\n #f_mday = 'inf_scl_{}_day'.format(f_time)\r\n f_hour = 'inf_hour'\r\n f_wday = 'inf_wday'\r\n f_week = 'inf_week'\r\n f_wdhr = 'inf_wdhr'\r\n #f_week = 'inf_{}_week'.format(f_time)\r\n \r\n #d, h, m, w = 31, 24, 60, 7\r\n #df[f_mday] = df[f_time].dt.day# /d\r\n df[f_hour] = df[f_time].dt.hour# /h\r\n df[f_wday] = df[f_time].dt.dayofweek# /w\r\n df[f_week] = df[f_time].dt.week\r\n df[f_wdhr] = df[f_wday] * 24 + df[f_hour]\r\n df[f_wdhr] = df[f_wdhr].apply(str)\r\n \r\n #print(df.describe())\r\n\r\n#string\r\ndef titles_agg(train_data, test_data, hist, stem='tmp', last_only=False):\r\n \r\n print('{}:\\t{} records'.format(stem, hist.shape[0]), flush=True)\r\n col = 'list_ttl_{}'.format(stem)\r\n #list and count\r\n if last_only:\r\n col = 'list_ttl_{}_last_only'.format(stem)\r\n tmp = hist.groupby('user_id')['title_id'].agg(' '.join).apply(lambda x: x.split()[-1])\r\n \r\n else: \r\n col = 'list_ttl_{}'.format(stem)\r\n tmp = hist.groupby('user_id')['title_id'].agg(' '.join)#.apply(lambda x: x.split())\r\n\r\n tmp = tmp.rename(col).to_frame()\r\n tmp['user_id'] = tmp.index\r\n tmp = tmp.reset_index(drop=True)\r\n \r\n train_data = train_data.merge(tmp, how='left', on='user_id')\r\n test_data = test_data.merge(tmp, how='left', on='user_id')\r\n\r\n train_data = train_data.fillna('')\r\n test_data = test_data.fillna('')\r\n\r\n if last_only:\r\n del tmp\r\n col = 'f_time_lastest_{}_last_only'.format(stem)\r\n tmp = hist.groupby('user_id')['watch_time'].agg(lambda x: ' '.join(str(x))).apply(lambda x: x.split()[-1])\r\n \r\n tmp = tmp.rename(col).to_frame()\r\n tmp['user_id'] = tmp.index\r\n tmp = tmp.reset_index(drop=True)\r\n\r\n train_data = train_data.merge(tmp, how='left', on='user_id')\r\n test_data = test_data.merge(tmp, how='left', on='user_id')\r\n\r\n else:\r\n train_data['f_cnt_{}'.format(stem)] = train_data[col].apply(lambda x: len(x.split()))\r\n test_data['f_cnt_{}'.format(stem)] = test_data[col].apply(lambda x: len(x.split()))\r\n \r\n del tmp\r\n return train_data, test_data\r\n\r\n#int\r\ndef sum_watch_time(train_data, test_data, hist, stem='tmp'):\r\n \r\n #sum time\r\n tmp = hist.groupby('user_id')['watch_time'].sum()\r\n tmp = tmp.rename('f_time_sum_{}'.format(stem)).to_frame()\r\n tmp['user_id'] = tmp.index\r\n tmp = tmp.reset_index(drop=True)\r\n \r\n #merge\r\n train_data = train_data.merge(tmp, how='left', on='user_id') \r\n test_data = test_data.merge(tmp, how='left', on='user_id')\r\n del tmp\r\n\r\n #var time\r\n tmp = hist.groupby('user_id')['watch_time'].var()\r\n tmp = tmp.rename('f_time_var_{}'.format(stem)).to_frame()\r\n tmp['user_id'] = tmp.index\r\n tmp = tmp.reset_index(drop=True)\r\n \r\n #merge\r\n train_data = train_data.merge(tmp, how='left', on='user_id') \r\n test_data = test_data.merge(tmp, how='left', on='user_id')\r\n del tmp\r\n \r\n #median time\r\n tmp = hist.groupby('user_id')['watch_time'].median()\r\n tmp = tmp.rename('f_time_median_{}'.format(stem)).to_frame()\r\n tmp['user_id'] = tmp.index\r\n tmp = tmp.reset_index(drop=True)\r\n \r\n #merge\r\n train_data = train_data.merge(tmp, how='left', on='user_id') \r\n test_data = test_data.merge(tmp, how='left', on='user_id') \r\n del tmp\r\n \r\n train_data = train_data.fillna(0) \r\n test_data = test_data.fillna(0)\r\n\r\n #print(train_data)\r\n return train_data, test_data\r\n\r\n#string\r\ndef trigger_time(train_data, test_data, hist, stem='tmp'):\r\n\r\n tmp = hist.groupby('user_id')['inf_wdhr'].agg(' '.join)#.apply(lambda x: x.split())\r\n tmp = tmp.rename('list_trg_{}'.format(stem)).to_frame()\r\n tmp['user_id'] = tmp.index\r\n tmp = tmp.reset_index(drop=True)\r\n\r\n #merge\r\n train_data = train_data.merge(tmp, how='left', on='user_id')\r\n train_data = train_data.fillna('')\r\n train_data['f_cnt_{}'.format(stem)] = train_data['list_trg_{}'.format(stem)].apply(lambda x: len(x.split()))\r\n \r\n test_data = test_data.merge(tmp, how='left', on='user_id')\r\n test_data = test_data.fillna('')\r\n test_data['f_cnt_{}'.format(stem)] = test_data['list_trg_{}'.format(stem)].apply(lambda x: len(x.split()))\r\n \r\n del tmp\r\n return train_data, test_data\r\n\r\n\r\n#evaluation\r\ndef display_val_score(y, p, r):\r\n v = np.argmax(p, axis=1)\r\n jcc = jaccard_similarity_score(y, v)\r\n acc = accuracy_score(y, v)\r\n print('\\nVal: jcc={:.6f}, acc={:.6f}'.format(jcc, acc), flush=True)\r\n print('Adjusted Val: jcc={:.6f}, acc={:.6f}'.format(jcc * ratio, acc * ratio), flush=True) \r\n return jcc\r\n\r\n#\r\ndef write_csv(test_id, labels, t='t', stem='', score=0):\r\n print(\"\\nWriting output...\\n\")\r\n sub = pd.DataFrame()\r\n sub['user_id'] = test_id\r\n sub['title_id'] = labels\r\n print(sub['title_id'].value_counts())\r\n sub.to_csv(\"preds_{}_{}_s{:.6f}.csv\".format(stem, t, jcc * ratio), index=False)\r\n\r\n\r\n#read\r\ninput_folder = '../input/'\r\n####train\r\ntrain_events = pd.read_csv(input_folder + 'events_train.csv', dtype={'user_id': np.str, 'title_id': np.str})\r\ntrain_users = pd.read_csv(input_folder + 'labels_train.csv', dtype={'user_id': np.str, 'title_id': np.str})\r\n####test\r\ntest_events = pd.read_csv(input_folder + 'events_test.csv', dtype={'user_id': np.str, 'title_id': np.str})\r\ntest_users = pd.DataFrame()\r\ntest_users['user_id'] = test_events['user_id'].unique()\r\n\r\n#use top titles from both train and test; \r\nall_events = pd.concat([train_events, test_events]).reset_index(drop=True)\r\nto_time(all_events)\r\n\r\n#clearing labels\r\ntotal = len(train_users)\r\nsel = train_users['title_id'].value_counts()\r\n#print(sel)\r\n#for i in range(100):\r\n# tmp = sel.loc[sel >= i].index.tolist()\r\n# users = train_users.loc[(train_users['title_id'].isin(tmp))]\r\n# print('{}: {}, {} ({:.6f}, {:.6f})'.format(i, len(tmp), len(users), len(users)/total, i/total), flush=True) \r\n \r\nmin_hits = 7 #min1\r\nsel = train_users['title_id'].value_counts()\r\nprint('Existing {} Labels'.format(len(sel)))\r\nsel = sel.loc[sel >= min_hits].index.tolist()\r\nprint('Reduced to {} Labels, removing minors less freq <= {}'.format(len(sel), min_hits), flush=True)\r\ntrain_users = train_users.loc[(train_users['title_id'].isin(sel))]\r\nratio = len(train_users) / total\r\nprint('Ratio = {:.6f}\\n'.format(ratio), flush=True)\r\n\r\n\r\n#all\r\ns = 'overall'\r\ntrain_users, test_users = titles_agg(train_users, test_users, all_events, stem=s)\r\ntrain_users, test_users = sum_watch_time(train_users, test_users, all_events, stem=s)\r\ntrain_users, test_users = trigger_time(train_users, test_users, all_events, stem=s)\r\n\r\ns = 'lastest'\r\ntrain_users, test_users = titles_agg(train_users, test_users, all_events, stem=s, last_only=True)\r\npostfix_stem = 'list_ttl_{}_last_only'.format(s)\r\n\r\n#print(train_users)\r\n\r\n#short=>dislike\r\nt = 60 * 5 #watch_time\r\ns = 'in{:04d}s'.format(t)\r\nsel_events = all_events.loc[(all_events['watch_time'] <= t)]\r\ntrain_users, test_users = titles_agg(train_users, test_users, sel_events, stem=s)\r\n\r\n#medium\r\nt = 60 * 3 #watch_time\r\nm = 60 * 50\r\ns = 'out{:04d}s{:04d}s'.format(t, m)\r\nsel_events = all_events.loc[(all_events['watch_time'] >= t) & (all_events['watch_time'] <= m)]\r\ntrain_users, test_users = titles_agg(train_users, test_users, sel_events, stem=s)\r\n\r\n#long\r\nt = 60 * 40 #watch_time\r\ns = 'out{:04d}s'.format(t)\r\nsel_events = all_events.loc[(all_events['watch_time'] >= t)]\r\ntrain_users, test_users = titles_agg(train_users, test_users, sel_events, stem=s)\r\n\r\n#lastest-1\r\n#recent intested in\r\nw = 39 - 1 #w-th week\r\nt = 60 * 3 #watch_time\r\ns = 'out{:04d}s{}w'.format(t, w)\r\nsel_events = all_events.loc[(all_events['watch_time'] >= t) & (all_events['inf_week'] >= w)]\r\ntrain_users, test_users = titles_agg(train_users, test_users, sel_events, stem=s)\r\ntrain_users, test_users = sum_watch_time(train_users, test_users, sel_events, stem=s)\r\ntrain_users, test_users = trigger_time(train_users, test_users, all_events, stem=s)\r\n\r\nprint(train_users.shape)\r\n\r\n\r\n#features list\r\nf_ttl = [s for s in train_users.columns.tolist() if s.startswith('list_ttl')]\r\nprint('{}: {}'.format(len(f_ttl), f_ttl))\r\nf_trg = [s for s in train_users.columns.tolist() if s.startswith('list_trg')]\r\nprint('{}: {}'.format(len(f_trg), f_trg))\r\nf_num = [s for s in train_users.columns.tolist() if s.startswith('f_')]\r\nprint('{}: {}'.format(len(f_num), f_num))\r\n\r\n#dataset\r\ntarget_lbl = LabelEncoder()\r\ncandidates = train_users['title_id'].tolist() + train_users[postfix_stem].tolist() + test_users[postfix_stem].tolist()\r\ncandidates = target_lbl.fit_transform(candidates)\r\ntrain_y = target_lbl.transform(train_users['title_id'].tolist())\r\n#y_max = max(train_y) + 1\r\ny_max = max(candidates) + 1\r\nprint(train_y.shape)\r\n#positx\r\ntrain_postfix = target_lbl.transform(train_users[postfix_stem].tolist())\r\ntest_postfix = target_lbl.transform(test_users[postfix_stem].tolist())\r\n\r\n#numerics\r\nfor f in f_num:\r\n train_users[f] = train_users[f].apply(np.nan_to_num)\r\n test_users[f] = test_users[f].apply(np.nan_to_num)\r\n #print(train_users[f])\r\n \r\nscalar = MinMaxScaler(feature_range=(0, 1), copy=True)\r\ntrain_users[f_num] = scalar.fit_transform(train_users[f_num])\r\ntest_users[f_num] = scalar.transform(test_users[f_num])\r\n\r\ntrain_X_num = train_users[f_num].as_matrix()\r\ntest_X_num = test_users[f_num].as_matrix()\r\n\r\n\r\ntrain_X = [train_X_num]\r\ntest_X = [test_X_num]\r\n\r\n#CountVec Merged\r\nttl_cnt = len(list(all_events['title_id'].unique()))\r\ncntVec = CountVectorizer(ngram_range=(1, 1), analyzer='word')\r\ncntVec.fit(all_events['title_id'])\r\n#cntVec.fit(candidates)\r\nfor f in f_ttl:\r\n add = cntVec.transform(train_users[f])\r\n add = np.log1p(add)\r\n #train_X = sparse.hstack((train_X, add)).todense()\r\n train_X.append(add.todense())\r\n print('{} +{}'.format(f, add.shape[1]), flush=True)\r\n #del add\r\n #ttl_cnt = add.todense().shape[1]\r\n \r\n add = cntVec.transform(test_users[f])\r\n add = np.log1p(add)\r\n #test_X = sparse.hstack((test_X, add)).todense()\r\n test_X.append(add.todense())\r\n #del add\r\n\r\n#CountVec Merged\r\n#wdhr = len(list(all_events['inf_wdhr'].unique()))\r\ncntVec = CountVectorizer(ngram_range=(1, 1), analyzer='word')\r\ncntVec.fit(all_events['inf_wdhr'])\r\nfor f in f_trg:\r\n add = cntVec.transform(train_users[f])\r\n add = np.log1p(add)\r\n #train_X = sparse.hstack((train_X, add)).todense()\r\n train_X.append(add.todense())\r\n print('{} +{}'.format(f, add.shape[1]), flush=True)\r\n #del add\r\n \r\n add = cntVec.transform(test_users[f])\r\n add = np.log1p(add)\r\n #test_X = sparse.hstack((test_X, add)).todense()\r\n test_X.append(add.todense())\r\n #del add\r\n \r\n wdhr = add.todense().shape[1]\r\n\r\nprint('\\ndims for each feature', flush=True)\r\ninputs_ndim = []\r\nfor x in train_X:\r\n print(x.shape, flush=True)\r\n inputs_ndim.append(x.shape[1])\r\n\r\n#fold for CV\r\nprint('Assigning CV', flush=True)\r\nnr_splits = 7\r\nfold_gen_seed = 62017\r\ntrain_sets, valid_sets = list(), list()\r\nfold_gen = StratifiedKFold(n_splits=nr_splits, shuffle=True, random_state=fold_gen_seed)\r\nfor train_indices, valid_indices in fold_gen.split(train_y, train_y):\r\n train_sets.append(train_indices)\r\n valid_sets.append(valid_indices)\r\n\r\n\r\nX_train = []\r\nX_valid = []\r\ny_train = train_y[train_sets[0]]\r\ny_valid = train_y[valid_sets[0]]\r\n#postfix\r\npostfix_valid = train_postfix[valid_sets[0]]\r\n\r\nfor x in train_X:\r\n X_train.append(x[train_sets[0]])\r\n X_valid.append(x[valid_sets[0]])\r\n\r\n\r\ntmstmp = '{}'.format(time.strftime(\"%Y-%m-%d-%H-%M\"))\r\n\r\n# define the model structure\r\n########################################\r\ninputs_collected = []\r\ndense_collected = []\r\n\r\nnum_dence_input = Input(shape=(inputs_ndim[0],))#, dtype='int32')\r\n\r\n#ordinary dense\r\nnum_dence = Dense(32, activation='relu')(num_dence_input)\r\n\r\ninputs_collected.append(num_dence_input)\r\ndense_collected.append(num_dence)\r\n\r\n#shared dense\r\ndense_ttl = Dense(16, activation='relu')#16 * 6\r\ndense_wdhr = Dense(8, activation='relu')\r\n\r\nfor x in inputs_ndim:\r\n #for titles\r\n if x == ttl_cnt:\r\n ttl_dence_input = Input(shape=(ttl_cnt,))#, dtype='int32')\r\n ttl_dence1 = dense_ttl(ttl_dence_input)\r\n \r\n inputs_collected.append(ttl_dence_input)\r\n dense_collected.append(ttl_dence1)\r\n \r\n #for wdhr\r\n if x == wdhr:\r\n wdhr_dence_input = Input(shape=(wdhr,))#, dtype='int32')\r\n wdhr_dence1 = dense_wdhr(wdhr_dence_input)\r\n \r\n inputs_collected.append(wdhr_dence_input)\r\n dense_collected.append(wdhr_dence1) \r\n \r\nconcat = concatenate(dense_collected, axis=-1)\r\n\r\n#final\r\ndense_bn = BatchNormalization()(concat)\r\ndense_dp1 = Dropout(0.25)(dense_bn)\r\ndense_ds1 = Dense(256, activation='relu')(dense_dp1)\r\ndense_dp2 = Dropout(0.5)(dense_ds1)\r\noutput = Dense(y_max, activation='softmax')(dense_dp2)\r\n\r\nmodel = Model(inputs=inputs_collected, outputs=output)\r\nmodel.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])\r\n\r\n\r\ntrain_keras = True\r\n#train_keras = False\r\n\r\nif train_keras:\r\n print(model.summary(), flush=True)\r\n print('Training keras', flush=True)\r\n\r\n #callback\r\n early_stopping = EarlyStopping(monitor='val_loss', patience=10)\r\n bst_model_path = tmstmp + '.h5'\r\n model_checkpoint = ModelCheckpoint(bst_model_path, save_best_only=True, save_weights_only=True)\r\n\r\n #fit\r\n hist = model.fit(X_train, y_train, validation_data=(X_valid, y_valid), epochs=1000, batch_size=128, shuffle=True, callbacks=[early_stopping, model_checkpoint])\r\n # class_weight=class_weight, callbacks=[early_stopping, model_checkpoint])\r\n \r\n model.load_weights(bst_model_path)\r\n bst_val_score = min(hist.history['val_loss'])\r\n \r\n val_probs = model.predict(X_valid, batch_size=4096, verbose=1)\r\n jcc = display_val_score(y=y_valid, p=val_probs, r=ratio) \r\n \r\n # make the submission\r\n print('\\n\\nPrediction', flush=True)\r\n probs = model.predict(test_X, batch_size=4096, verbose=1)\r\n #\r\n preds = target_lbl.inverse_transform(np.argmax(probs, axis=1))\r\n write_csv(test_id=test_users['user_id'], labels=preds, t=tmstmp, stem='keras', score=jcc * ratio)\r\nelse:\r\n val_probs = np.zeros((X_valid[0].shape[0], y_max))\r\n probs = np.zeros((test_users.shape[0], y_max))\r\n\r\n\r\n#sklearn\r\nX_train = np.nan_to_num(np.array(np.concatenate(X_train, axis=1)))\r\nX_valid = np.nan_to_num(np.array(np.concatenate(X_valid, axis=1)))\r\nprint(X_train.shape, X_valid.shape)\r\ntest_X = np.nan_to_num(np.array(np.concatenate(test_X, axis=1)))\r\n\r\n#rescale\r\nscalar = MinMaxScaler(feature_range=(0, 1), copy=True)\r\nX_train = scalar.fit_transform(X_train)\r\nX_valid = scalar.transform(X_valid)\r\ntest_X = scalar.transform(test_X)\r\n\r\n\r\ntrain_sklearn = True\r\n#train_sklearn = False\r\n\r\nif train_sklearn:\r\n print('\\nGBM', flush=True)\r\n params = {}\r\n params['num_threads'] = 4\r\n #params['boost'] = 'gbdt'\r\n params['boost'] = 'dart'\r\n #params['num_class'] = 1\r\n #params['metric'] = 'multi_logloss' \r\n #params['objective'] = 'multiclass'\r\n params['is_unbalance'] = True\r\n params['metric'] = 'binary_logloss'\r\n params['objective'] = 'binary'\r\n params['min_data_in_leaf'] = 2 ** 1 #default 100\r\n \r\n #learning\r\n params['learning_rate'] = 0.11\r\n params['num_leaves'] = 2 ** 5\r\n \r\n if params.get('boost') == 'dart':\r\n params['drop_rate'] = 0.25 #dart, deafault 0.1\r\n params['skip_drop'] = 0.75 #dart, deafault 0.5\r\n params['max_drop'] = 50 #dart, deafault 50\r\n params['uniform_drop'] = False #dart, deafault False\r\n params['xgboost_dart_mode'] = False #dart, deafault False\r\n #params['xgboost_dart_mode'] = True #dart, deafault False\r\n \r\n #params['min_hessian'] = 10.0 #default 10.0\r\n params['feature_fraction'] = 0.5 #default=1.0\r\n params['bagging_fraction'] = 0.7 #default=1.0\r\n params['bagging_freq'] = 3\r\n params['lambda_l1'] = 0.007 #default 0\r\n params['lambda_l2'] = 0.019 #default 0\r\n params['data_random_seed'] = 62017\r\n params['verbose'] = 0 #<0 = Fatel, =0 = Error(Warn), >0 = Info\r\n \r\n #metric\r\n params['metric_freq'] = 5 #deafult 1\r\n\r\n max_bin = 2 ** 13\r\n \r\n num_rounds, min_rounds = 250, 10\r\n # \r\n sk_probs = np.zeros((X_valid.shape[0], y_max))\r\n test_probs = np.zeros((test_users.shape[0], y_max)) \r\n\r\n y_train_sparse = np.zeros((X_train.shape[0], y_max))\r\n for i, j in enumerate(y_train):\r\n y_train_sparse[i, j] = 1\r\n\r\n y_valid_sparse = np.zeros((X_valid.shape[0], y_max))\r\n for i, j in enumerate(y_valid):\r\n y_valid_sparse[i, j] = 1\r\n\r\n i = 0\r\n for c in range(y_max):\r\n if np.sum(y_train_sparse[:, c]) > 0:\r\n print('lightGBM w/ eta={} leaves={}'.format(params['learning_rate'], params['num_leaves']))\r\n dtrain = lgb.Dataset(X_train, label=y_train_sparse[:, c], weight=None, max_bin=max_bin, reference=None, free_raw_data=False)\r\n dvalid = lgb.Dataset(X_valid, label=y_valid_sparse[:, c], reference=X_train, free_raw_data=False)\r\n gbm = lgb.train(params, dtrain, valid_sets=[dtrain, dvalid], valid_names=['tr', 'va'], \r\n num_boost_round=num_rounds, early_stopping_rounds=min_rounds)\r\n \r\n sk_probs[:, c] = gbm.predict(X_valid, num_iteration=gbm.best_iteration)[:]#[:, 1]\r\n test_probs[:, c] = gbm.predict(test_X, num_iteration=gbm.best_iteration)[:]#[:, 1]\r\n i += 1\r\n print('no{:04d}: {:04d}'.format(i, c), flush=True)\r\n \r\n jcc = display_val_score(y=y_valid, p=sk_probs, r=ratio)\r\n \r\n #\r\n preds = target_lbl.inverse_transform(np.argmax(test_probs, axis=1))\r\n write_csv(test_id=test_users['user_id'], labels=preds, t=tmstmp, stem='gbm', score=jcc* ratio)\r\n\r\n w = 0.8\r\n val_probs += sk_probs * w\r\n probs += test_probs * w\r\n\r\nopt_postfix = True\r\n#opt_postfix = False\r\nif opt_postfix:\r\n print('\\nPostFix Labels')\r\n max_iter = 1000\r\n fix, best_fix, best_jcc = 0.001, 0, 0\r\n for k in range(max_iter+1):\r\n #fixing\r\n eval_probs = val_probs.copy()\r\n for i, j in enumerate(postfix_valid):\r\n eval_probs[i, j] += fix * k\r\n \r\n #eval\r\n jcc = jaccard_similarity_score(y_valid, np.argmax(eval_probs, axis=1))\r\n if jcc > best_jcc:\r\n best_jcc = jcc\r\n best_fix = fix * k\r\n print('*current best jcc={:.6f} w/ fix={:.3f}'.format(best_jcc, best_fix), flush=True)\r\n \r\n print('Best jcc={:.6f} w/ fix={:.3f}'.format(best_jcc, best_fix), flush=True)\r\n print('Adjusted best jcc={:.6f} w/ fix={:.3f}'.format(best_jcc * ratio, best_fix), flush=True)\r\n jcc = best_jcc * ratio\r\n for i, j in enumerate(test_postfix):\r\n probs[i, j] += best_fix\r\n \r\n #make the submission\r\n print('\\n\\nPrediction', flush=True)\r\n preds = target_lbl.inverse_transform(np.argmax(probs, axis=1))\r\n write_csv(test_id=test_users['user_id'], labels=preds, t=tmstmp, stem='keras_fix', score=jcc * ratio)\r\n\r\n \r\n \r\n \r\n", "id": "5040242", "language": "Python", "matching_score": 4.053896427154541, "max_stars_count": 0, "path": "KKBOXensemble.py" }, { "content": "#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis script is for training a ConvNet model, which its structure is defined \r\nat 'models.py' with some parameters of structure and weights' location is \r\nset at 'param.py'. \r\n\r\nThe ConvNet model takes multi-inputs: 1) TWO-channel with capability to perform \r\naugmentations from 'augmentations.py' and 2) meta info such as 'inc_angle'. \r\nFour types of augmentations: 'Flip', 'Rotate', 'Shift', 'Zoom' are available.\r\n\r\n@author: cttsai (<NAME>), @Oct 2017\r\n\"\"\"\r\nimport os\r\n#\r\nimport numpy as np # linear algebra\r\nimport pandas as pd # data processing\r\nimport datetime as dt\r\n#\r\nfrom random import shuffle, uniform, seed\r\n#evaluation\r\nfrom sklearn.model_selection import StratifiedShuffleSplit\r\nfrom sklearn.metrics import log_loss\r\n#\r\nfrom keras.optimizers import Adam, SGD\r\nfrom keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard\r\n#\r\nimport augmentations as aug\r\nimport utils\r\nimport models\r\nimport params\r\n\r\n###############################################################################\r\n\r\ndef data_generator(data=None, meta_data=None, labels=None, batch_size=16, augment={}, opt_shuffle=True):\r\n \r\n indices = [i for i in range(len(labels))]\r\n \r\n while True:\r\n \r\n if opt_shuffle:\r\n shuffle(indices)\r\n \r\n x_data = np.copy(data)\r\n x_meta_data = np.copy(meta_data)\r\n x_labels = np.copy(labels)\r\n \r\n for start in range(0, len(labels), batch_size):\r\n end = min(start + batch_size, len(labels))\r\n sel_indices = indices[start:end]\r\n \r\n #select data\r\n data_batch = x_data[sel_indices]\r\n xm_batch = x_meta_data[sel_indices]\r\n y_batch = x_labels[sel_indices]\r\n x_batch = []\r\n \r\n for x in data_batch:\r\n \r\n #augment \r\n if augment.get('Rotate', False):\r\n x = aug.Rotate(x, u=0.1, v=np.random.random())\r\n x = aug.Rotate90(x, u=0.1, v=np.random.random())\r\n\r\n if augment.get('Shift', False):\r\n x = aug.Shift(x, u=0.05, v=np.random.random())\r\n\r\n if augment.get('Zoom', False):\r\n x = aug.Zoom(x, u=0.05, v=np.random.random())\r\n\r\n if augment.get('Flip', False):\r\n x = aug.HorizontalFlip(x, u=0.5, v=np.random.random())\r\n x = aug.VerticalFlip(x, u=0.5, v=np.random.random())\r\n\r\n x_batch.append(x)\r\n \r\n x_batch = np.array(x_batch, np.float32)\r\n \r\n yield [x_batch, xm_batch], y_batch\r\n \r\n\r\n###############################################################################\r\nif __name__ == '__main__':\r\n \r\n np.random.seed(1017)\r\n target = 'is_iceberg'\r\n \r\n #Load data\r\n train, train_bands = utils.read_jason(file='train.json', loc='../input/')\r\n test, test_bands = utils.read_jason(file='test.json', loc='../input/')\r\n \r\n #target\r\n train_y = train[target].values\r\n split_indices = train_y.copy()\r\n \r\n #data set\r\n train_X = utils.rescale(train_bands)\r\n train_meta = train['inc_angle'].values\r\n test_X_dup = utils.rescale(test_bands)\r\n test_meta = test['inc_angle'].values\r\n\r\n #training keras\r\n #model\r\n nb_filters = params.nb_filters\r\n nb_dense = params.nb_dense\r\n weights_file = params.weights_file\r\n model = models.get_model(img_shape=(75, 75, 2), f=nb_filters, h=nb_dense)\r\n weights_init = params.weights_init\r\n model.save(weights_init)\r\n #training\r\n epochs = params.epochs\r\n batch_size = params.batch_size\r\n print('epochs={}, batch={}'.format(epochs, batch_size), flush=True)\r\n opt_augments = {'Flip': False, 'Rotate': False, 'Shift': False, 'Zoom': False}\r\n opt_augments['Flip'] = True\r\n opt_augments['Rotate'] = True\r\n opt_augments['Shift'] = True\r\n opt_augments['Zoom'] = True \r\n print(opt_augments)\r\n\r\n #train, validataion split\r\n test_ratio = 0.159\r\n nr_runs = 1\r\n split_seed = 25\r\n kf = StratifiedShuffleSplit(n_splits=nr_runs, test_size=test_ratio, train_size=None, random_state=split_seed)\r\n\r\n #training, evaluation, test and make submission\r\n for r, (train_index, valid_index) in enumerate(kf.split(train, split_indices)):\r\n\r\n tmp = dt.datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\r\n \r\n y1, y2 = train_y[train_index], train_y[valid_index]\r\n x1, x2 = train_X[train_index], train_X[valid_index]\r\n xm1, xm2 = train_meta[train_index], train_meta[valid_index]\r\n\r\n print('splitted: {0}, {1}'.format(x1.shape, x2.shape), flush=True)\r\n print('splitted: {0}, {1}'.format(y1.shape, y2.shape), flush=True)\r\n ################################\r\n if r > 0:\r\n model.load_weights(weights_init)\r\n \r\n #optim = SGD(lr=0.005, momentum=0.0, decay=0.002, nesterov=True)\r\n optim = Adam(lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.002)\r\n \r\n model.compile(optimizer=optim, loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\r\n #call backs\r\n earlystop = EarlyStopping(monitor='val_loss', patience=100, verbose=1, min_delta=1e-4, mode='min')\r\n reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=40, verbose=1, epsilon=1e-4, mode='min')\r\n model_chk = ModelCheckpoint(monitor='val_loss', filepath=weights_file, save_best_only=True, save_weights_only=True, mode='min')\r\n \r\n callbacks = [earlystop, reduce_lr_loss, model_chk, TensorBoard(log_dir='../logs')]\r\n ##########\r\n \r\n model.fit_generator(generator=data_generator(x1, xm1, y1, batch_size=batch_size, augment=opt_augments),\r\n steps_per_epoch= np.ceil(8.0 * float(len(y1)) / float(batch_size)),\r\n epochs=epochs,\r\n verbose=2,\r\n callbacks=callbacks,\r\n validation_data=data_generator(x2, xm2, y2, batch_size=batch_size),\r\n validation_steps=np.ceil(8.0 * float(len(y2)) / float(batch_size)))\r\n\r\n\r\n if os.path.isfile(weights_file):\r\n\r\n model.load_weights(weights_file)\r\n \r\n p = model.predict([x2, xm2], batch_size=batch_size, verbose=1)\r\n print('\\n\\nEvaluate loss in validation data: {}'.format(log_loss(y2, p)), flush=True)\r\n\r\n p = model.predict([x1, xm1], batch_size=batch_size, verbose=1)\r\n print('\\n\\nEvaluate loss in training data: {}'.format(log_loss(y1, p)), flush=True)\r\n \r\n print('\\nPredict...', flush=True)\r\n ids = test['id'].values\r\n\r\n #prediction\r\n pred = model.predict([test_X_dup, test_meta], batch_size=batch_size, verbose=1)\r\n pred = np.squeeze(pred, axis=-1)\r\n \r\n file = 'subm_{}_f{:03d}.csv'.format(tmp, nb_filters)\r\n subm = pd.DataFrame({'id': ids, target: pred})\r\n subm.to_csv('../submit/{}'.format(file), index=False, float_format='%.6f')\r\n\r\n", "id": "1180556", "language": "Python", "matching_score": 7.319066047668457, "max_stars_count": 26, "path": "scripts/cnn_train.py" }, { "content": "#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis script is for predicting imgs using model, which uses 'param.py' to define\r\nstructure and to locate weights of model\r\n\r\n@author: cttsai (<NAME>), @Oct 2017\r\n\"\"\"\r\n\r\nimport os\r\n#\r\nimport numpy as np # linear algebra\r\nimport pandas as pd # data processing\r\nimport datetime as dt\r\n#\r\nfrom sklearn.metrics import log_loss\r\n#\r\nimport utils\r\nimport models\r\nimport params\r\n \r\n###############################################################################\r\nif __name__ == '__main__':\r\n \r\n np.random.seed(1017)\r\n target = 'is_iceberg'\r\n \r\n #Load data\r\n test, test_bands = utils.read_jason(file='test.json', loc='../input/') \r\n test_X_dup = utils.rescale(test_bands)\r\n test_meta = test['inc_angle'].values \r\n \r\n tmp = dt.datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\r\n file_weights = '../weights/weights_current.hdf5'\r\n \r\n if os.path.isfile(file_weights):\r\n \r\n #define and load model\r\n nb_filters = params.nb_filters\r\n nb_dense = params.nb_dense\r\n weights_file = params.weights_file \r\n model = models.get_model(img_shape=(75, 75, 2), f=nb_filters, h=nb_dense)\r\n model.load_weights(weights_file)\r\n \r\n #\r\n batch_size = params.batch_size_test\r\n \r\n if params.validate_before_test:\r\n \r\n train, train_bands = utils.read_jason(file='train.json', loc='../input/') \r\n train_X = utils.rescale(train_bands)\r\n train_meta = train['inc_angle'].values\r\n train_y = train[target].values\r\n print('\\nPredict training data as validation: {} {}'.format(train_X.shape, train_meta.shape), flush=True)\r\n \r\n p = model.predict([train_X, train_meta], batch_size=batch_size, verbose=1)\r\n print('\\nValid loss on training data: {}'.format(log_loss(train_y, p)), flush=True)\r\n\r\n print('\\nPredict test data: {} {}'.format(test_X_dup.shape, test_meta.shape), flush=True)\r\n ids = test['id'].values\r\n\r\n #prediction\r\n pred = model.predict([test_X_dup, test_meta], batch_size=batch_size, verbose=1)\r\n pred = np.squeeze(pred, axis=-1)\r\n \r\n file = 'subm_{}_f{:03d}.csv'.format(tmp, nb_filters)\r\n print('\\nSave to {}'.format(file))\r\n subm = pd.DataFrame({'id': ids, target: pred})\r\n subm.to_csv('../submit/{}'.format(file), index=False, float_format='%.6f')\r\n", "id": "2933237", "language": "Python", "matching_score": 3.5874433517456055, "max_stars_count": 26, "path": "scripts/cnn_predict.py" }, { "content": "#model\r\nnb_filters = 8\r\nnb_dense = 128\r\n#train\r\nbatch_size = 64\r\nepochs = 3\r\n#weights\r\nweights_init = '../weights/weights_init.hdf5'\r\nweights_file = '../weights/weights_current.hdf5'\r\n#test\r\nbatch_size_test = batch_size\r\nvalidate_before_test = True\r\n", "id": "9372540", "language": "Python", "matching_score": 0.8019939661026001, "max_stars_count": 26, "path": "scripts/params.py" }, { "content": "from typing import Optional, Tuple, Callable\nimport numpy as np\nimport logging\n\nfrom tensorflow.keras.callbacks import Callback\n\n\nclass CustomMetricEarlyStoppingCallback(Callback):\n def __init__(\n self, data: Tuple[np.array], training_data: Optional[Tuple[np.array]] = None,\n score_func: Callable = None, min_delta: float = 0, patience: int = 0, verbose: int = 0, mode: str = 'auto',\n baseline: float = None, restore_best_weights: bool = False):\n\n super().__init__()\n\n self.x_train: Optional[np.arary] = None\n self.y_train: Optional[np.arary] = None\n if training_data is not None:\n self.x_train, self.y_train = training_data\n\n self.x_valid, self.y_valid = data\n self.score_func = score_func\n\n self.patience = patience\n self.verbose = verbose\n self.baseline = baseline\n self.min_delta = abs(min_delta)\n self.wait = 0\n self.stopped_epoch = 0\n self.restore_best_weights = restore_best_weights\n self.best_weights = None\n\n if mode not in ['auto', 'min', 'max']:\n logging.warning(f'EarlyStopping mode {mode} is unknown, fallback to auto mode.')\n mode = 'auto'\n\n if mode == 'min':\n self.monitor_op = np.less\n elif mode == 'max':\n self.monitor_op = np.greater\n else:\n self.monitor_op = np.greater\n\n if self.monitor_op == np.greater:\n self.min_delta *= 1\n else:\n self.min_delta *= -1\n\n def on_train_begin(self, logs=None):\n # Allow instances to be re-used\n self.wait = 0\n self.stopped_epoch = 0\n self.best_weights = self.model.get_weights()\n if self.baseline is not None:\n self.best = self.baseline\n else:\n self.best = np.Inf if self.monitor_op == np.less else -np.Inf\n\n def on_epoch_end(self, epoch, logs=None):\n current = self.score_func(self.y_valid, self.model.predict(self.x_valid))\n if self.y_train is not None and self.x_train is not None:\n current_train = self.score_func(self.y_train, self.model.predict(self.x_train))\n diff = current_train - current\n print(\n f'\\nEarlyStopping Metric: {current:.3f}, training: {current_train:.3f}, fitting diff: {diff:.3f}\\n')\n else:\n print(\n f'\\nEarlyStopping Metric: {current:.3f}, best: {self.best:.3f}\\n')\n\n if current is None:\n return\n\n if self.monitor_op(current - self.min_delta, self.best):\n self.best = current\n self.wait = 0\n if self.restore_best_weights:\n self.best_weights = self.model.get_weights()\n else:\n self.wait += 1\n if self.wait >= self.patience:\n self.stopped_epoch = epoch\n self.model.stop_training = True\n if self.restore_best_weights:\n if self.verbose > 0:\n print('Restoring model weights from the end of the best epoch.')\n self.model.set_weights(self.best_weights)\n\n def on_train_end(self, logs=None):\n if self.stopped_epoch > 0 and self.verbose > 0:\n print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))\n\n def get_monitor_value(self, logs):\n logs = logs or {}\n monitor_value = None\n return monitor_value\n", "id": "1241346", "language": "Python", "matching_score": 3.319838285446167, "max_stars_count": 2, "path": "nlp_utils/Callback/CustomMetricEarlyStopping.py" }, { "content": "from .CustomMetricEarlyStopping import CustomMetricEarlyStoppingCallback\n", "id": "8504501", "language": "Python", "matching_score": 0.162636399269104, "max_stars_count": 2, "path": "nlp_utils/Callback/__init__.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\nCreated on Thu Jun 28 2018\n\n@author: cttsai\n'''\n\nfrom xgboost import XGBClassifier\nfrom lightgbm import LGBMClassifier\nfrom catboost import CatBoostClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom skopt.space import Real, Integer\n\n\ncv_type = 'StratifiedKFold'\nn_jobs = -1\n\n\n#default loc at file_dir_path['data']\nfileFeatureImportance = []\n\nModelConfigs = {\n 'LossGuideXGB': {\n 'model': XGBClassifier,\n\n 'hyperparameters' : {}, # obtained from finetuned\n 'hyperparameter_optimization': {\n #skopt itself\n 'search_settings':{\n 'n_calls' : 50,\n 'n_inits' : 8,\n 'random_state': 42,\n },\n #cv settings\n 'evaluation_settings': {\n 'validation' : 'StratifiedKFold',\n 'nr_fold' : 5, # CV\n 'nr_splits' : 3, # Splits\n 'train_size' : 0.5, # split size\n 'valid_size' : 0.5, # split size\n 'split_seed' : 538,\n 'eval_metric' : 'roc_auc', # sklearn scorer\n },\n #\n 'initialize': {\n 'objective' : 'binary:logistic',\n 'booster' : 'gbtree',\n# 'n_jobs' : n_jobs,\n 'n_estimators' : 1600,\n 'tree_method' : 'hist',\n 'grow_policy' : 'lossguide',\n 'max_depth' : 7, # deafult=6\n 'base_score' : 0.95,\n 'max_delta_step' : 3, #default=0\n },\n #skopt\n 'search_space': {\n 'scale_pos_weight' : Real(2, 16, 'log-uniform'),\n# 'max_depth' : Integer(8, 15),\n 'learning_rate' : Real(1e-3, 1e-1, 'log-uniform'),\n 'max_leaves' : Integer(11, 47),\n 'min_child_weight' : Integer(2, 64),\n 'gamma' : Real(1e-4, 1e-1, 'log-uniform'), # default=0\n 'subsample' : Real(0.6, 0.9),\n 'colsample_bytree' : Real(0.5, 0.9),\n 'reg_alpha' : Real(1e-5, 1e-2, 'log-uniform'), # default=0\n 'reg_lambda' : Real(1e-2, 1e1, 'log-uniform'), # default=1\n },\n },\n }, # LossGuideXGB\n\n #sample lgbm\n 'LGBM': {\n 'model': LGBMClassifier,\n 'hyperparameter_optimization': {\n #skopt itself\n 'search_settings':{\n 'n_calls' : 40,\n 'n_inits' : 8,\n 'random_state': 42,\n },\n #cv settings\n 'evaluation_settings': {\n 'validation' : cv_type,\n 'nr_fold' : 5, # CV\n 'nr_splits' : 3, # Splits\n 'train_size' : 0.5, # split size\n 'valid_size' : 0.5, # split size\n 'split_seed' : 538,\n 'eval_metric' : 'roc_auc', # sklearn scorer\n },\n #\n 'initialize': {\n 'device' : 'cpu',\n 'objective' : 'binary',\n 'boosting_type' : 'gbdt',\n 'n_jobs' : n_jobs,\n 'max_depth' : 8,\n 'n_estimators' : 2000,\n 'subsample_freq' : 2,\n 'subsample_for_bin' : 200000,\n 'min_data_per_group': 100, #default=100\n 'max_cat_to_onehot' : 4, #default=4\n 'cat_l2' : 10., #default=10\n 'cat_smooth' : 10., #default=10\n 'max_cat_threshold' : 32, #default=32\n 'metric_freq' : 10,\n 'verbosity' : -1,\n 'metric' : 'auc',\n# 'metric' : 'binary_logloss',\n },\n #skopt\n 'search_space': {\n 'num_leaves' : Integer(15, 63),\n 'learning_rate' : Real(1e-3, 1e-1, 'log-uniform'),\n 'scale_pos_weight' : Real(2, 16, 'log-uniform'),\n 'min_split_gain' : Real(1e-4, 1e-1, 'log-uniform'), # defult=0\n 'min_child_weight' : Real(1e-2, 1e2, 'log-uniform'), # defaul=1e-3\n 'min_child_samples' : Integer(10, 80), # defult=20\n 'subsample' : Real(0.6, 0.9),\n 'colsample_bytree' : Real(0.5, 0.9),\n 'reg_alpha' : Real(1e-5, 1e-1, 'log-uniform'), # defult=0\n 'reg_lambda' : Real(1e-4, 1e-0, 'log-uniform'), # defult=0\n 'cat_l2' : Real(1e0, 1e2, 'log-uniform'), #default=10\n 'cat_smooth' : Real(1e0, 1e2, 'log-uniform'), #default=10\n },\n },\n }, # LGBM\n\n 'ScikitRF': {\n 'model': RandomForestClassifier,\n 'hyperparameter_optimization': {\n #skopt itself\n 'search_settings':{\n 'n_calls' : 16,\n 'n_inits' : 8,\n 'random_state': 42,\n },\n #cv settings\n 'evaluation_settings': {\n 'validation' : 'StratifiedKFold',\n 'nr_fold' : 5, # CV\n 'nr_splits' : 3, # Splits\n 'train_size' : 0.5, # split size\n 'valid_size' : 0.5, # split size\n 'split_seed' : 538,\n 'eval_metric' : 'roc_auc', # sklearn scorer\n },\n #\n 'initialize': {\n 'criterion' : 'entropy', #'gini',\n 'oob_score' : True,\n 'n_jobs' : -1,\n 'random_state': 42,\n# 'class_weight': 'balanced'\n },\n #skopt\n 'search_space': {\n 'n_estimators' : Integer(800, 1600),\n 'min_samples_split': Integer(16, 64),\n 'min_samples_leaf' : Integer(2, 15),\n 'max_leaf_nodes' : Integer(63, 511),\n 'max_depth' : Integer(10, 16),\n },\n },\n }, #RF\n\n 'ScikitXT': {\n 'model': ExtraTreesClassifier,\n 'hyperparameter_optimization': {\n #skopt itself\n 'search_settings':{\n 'n_calls' : 16,\n 'n_inits' : 8,\n 'random_state': 42,\n },\n #cv settings\n 'evaluation_settings': {\n 'validation' : 'StratifiedKFold',\n 'nr_fold' : 5, # CV\n 'nr_splits' : 3, # Splits\n 'train_size' : 0.5, # split size\n 'valid_size' : 0.5, # split size\n 'split_seed' : 538,\n 'eval_metric' : 'roc_auc', # sklearn scorer\n },\n #\n 'initialize': {\n 'criterion' : 'entropy', #'gini',\n 'n_jobs' : -1,\n 'random_state': 42,\n# 'class_weight': 'balanced'\n },\n #skopt\n 'search_space': {\n 'n_estimators' : Integer(1000, 2000),\n 'min_samples_split': Integer(11, 25),\n 'min_samples_leaf' : Integer(2, 10),\n 'max_leaf_nodes' : Integer(255, 765),\n 'max_depth' : Integer(10, 16),\n },\n },\n }, #XT\n\n}\n", "id": "12001909", "language": "Python", "matching_score": 4.94395112991333, "max_stars_count": 0, "path": "configs/SampleModelConfigs.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu July 29 2018\n\n@author: cttsai\n\"\"\"\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\nfrom sklearn.ensemble import BaggingClassifier, AdaBoostClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom xgboost import XGBClassifier\nfrom lightgbm import LGBMClassifier\n\n\n# parsing result from './data'\nExternalMetaConfigs = {\n 'SampleLGBM': [],\n}\n\n\nStackerConfigs = {\n # meta-stacking from mlxtender and external results\n 'stacker': [\n {'name' : 'LinearStacker',\n 'meta_classifier' : SGDClassifier,\n 'params' : {\n 'loss' : 'modified_huber',\n 'penalty' : 'elasticnet',\n 'l1_ratio' : 0.15,\n 'n_jobs' : -1,\n 'random_state': 42,\n 'max_iter' : 2000,\n },\n 'cv' : 3,\n 'use_features' : False,\n 'stratify' : True,\n 'base_classifiers': [\n {'model' : BaggingClassifier,\n 'params': {\n 'base_estimator': 'HuberLR',\n 'n_estimators': 10,\n 'max_samples': 0.9,\n 'max_features': 0.8,\n 'n_jobs': -1,\n 'random_state': 42,\n }\n },\n {'model': XGBClassifier,\n 'params' : {\n 'objective' : 'binary:logistic',\n 'booster' : 'gblinear',\n 'n_jobs' : -1,\n 'base_score' : 0.95,\n 'scale_pos_weight' : 1.,\n 'learning_rate' : 0.05,\n 'n_estimators' : 100,\n 'reg_alpha' : 0.025,\n 'reg_lambda' : 1.25,\n 'eval_metric' : 'auc'\n },\n },\n ],\n },\n ],\n\n # stacking\n 'feature': [\n {'name': 'LinearStacker',\n 'meta_classifier' : SGDClassifier,\n 'params' : {\n 'loss' : 'modified_huber', #'log' or 'modified_huber'\n 'penalty' : 'elasticnet',\n 'l1_ratio' : 0.15,\n 'n_jobs' : -1,\n 'random_state': 42,\n 'max_iter' : 1000},\n 'cv' : 3,\n 'seed' : 538,\n 'use_features' : False,\n 'stratify' : True,\n #'sources': ['RF', 'XT', 'ScikitRF'],\n 'sources': ['histXGB', 'LGBM'],\n },\n ],\n}\n\n\n# base classifiers to do stacking and re-stacking\nBaseModelConfigs = {\n\n 'GaussianNB': {\n 'model' : GaussianNB,\n 'params': {},\n },\n\n 'HuberLR': {\n 'model': SGDClassifier,\n 'params': {\n 'loss': 'modified_huber',\n 'penalty': 'elasticnet',\n 'l1_ratio': 0.15,\n 'max_iter': 2000,\n 'n_jobs': -1,\n 'random_state': 42,\n },\n },\n\n 'wHuberLR': {\n 'model': SGDClassifier,\n 'params': {\n 'loss': 'modified_huber',\n 'penalty': 'elasticnet',\n 'l1_ratio': 0.15,\n 'max_iter': 2000,\n 'n_jobs': -1,\n 'random_state': 42,\n 'class_weight': 'balanced'\n },\n },\n\n 'LR': {\n 'model': SGDClassifier,\n 'params': {\n 'loss': 'log',\n 'penalty': 'elasticnet',\n 'l1_ratio': 0.15,\n 'max_iter': 2000,\n 'n_jobs': -1,\n 'random_state': 42,\n },\n },\n\n 'wScikitRF': {\n 'model' : RandomForestClassifier,\n 'params': { # override by task\n 'n_estimators' : 10,\n 'criterion' : 'gini',\n 'max_depth' : 12,\n 'min_samples_split': 32,\n 'min_samples_leaf' : 32,\n 'oob_score' : True,\n 'n_jobs' : -1,\n 'random_state' : 42,\n 'class_weight': 'balanced'\n },\n },\n\n 'kNN13': {\n 'model': KNeighborsClassifier,\n 'params': {\n 'n_neighbors': 13,\n 'weights' : 'distance', #'uniform',\n 'p' : 1,\n 'n_jobs' :-1,\n },\n },\n\n 'LinearGBM': {\n 'model': XGBClassifier,\n 'params': {\n 'objective' : 'binary:logistic',\n 'booster' : 'gblinear',\n 'n_jobs' : -1,\n 'base_score' : 0.95,\n 'scale_pos_weight': 1,\n 'learning_rate' : 0.05,\n 'n_estimators' : 100,\n 'reg_alpha' : 0.002,\n 'reg_lambda' : 1.,\n 'eval_metric' : 'auc',\n },\n },\n\n 'RF': {'model' : RandomForestClassifier,\n 'task': 'ScikitRF', # load results from HPOs\n },\n\n 'XT': {'model' : ExtraTreesClassifier,\n 'task': 'ScikitXT',\n },\n\n 'LGBM': {'model' : LGBMClassifier,\n 'task': 'LGBM', # load results from HPOs\n },\n\n 'histXGB': {\n 'model': XGBClassifier,\n 'task': 'LossGuideXGB'\n },\n\n}\n", "id": "8259695", "language": "Python", "matching_score": 0.4784887135028839, "max_stars_count": 0, "path": "configs/DebugStackerConfigs.py" }, { "content": "import warnings\nfrom typing import List\n\nimport numpy as np\nfrom sklearn import metrics\n\nwarnings.filterwarnings(\"ignore\")\n\n\n# competition metrics\ndef alaska_weighted_auc(\n y_true: np.array, y_valid: np.array, tpr_thresholds: List[float] = [0.0, 0.4, 1.0],\n weights: List[float] = [2, 1]):\n \"\"\"\n https://www.kaggle.com/anokas/weighted-auc-metric-updated\n \"\"\"\n # size of subsets\n areas = np.array(tpr_thresholds[1:]) - np.array(tpr_thresholds[:-1])\n\n # The total area is normalized by the sum of weights such that the final weighted AUC is between 0 and 1.\n normalization = np.dot(areas, weights)\n\n def compute_sub_metrics(tpr_min: float, tpr_max: float, fpr_arr: np.array, tpr_arr: np.array) -> float:\n mask = (tpr_min <= tpr_arr) & (tpr_arr <= tpr_max)\n\n if not mask.any(): # at least one sample\n return 0.\n\n fpr_sel = fpr_arr[mask]\n fpr_sel = np.concatenate([fpr_sel, [fpr_sel[-1], 1.]])\n tpr_sel = np.concatenate([tpr_arr[mask], [tpr_max, tpr_max]])\n return metrics.auc(fpr_sel, tpr_sel - tpr_min) # normalize such that curve starts at y=0\n\n fpr, tpr, thresholds = metrics.roc_curve(y_true, y_valid, pos_label=1)\n sub_metrics = [compute_sub_metrics(\n tpr_min=a, tpr_max=b, fpr_arr=fpr, tpr_arr=tpr) for a, b in zip(tpr_thresholds[:-1], tpr_thresholds[1:])]\n return np.dot(sub_metrics, weights) / normalization\n", "id": "9428938", "language": "Python", "matching_score": 0.5506094694137573, "max_stars_count": 0, "path": "alaska_utils/eval_metrics.py" }, { "content": "import numpy as np\nfrom .BaseAugmentation import MixinAugmentation\n\n\nclass _BaseLabel(MixinAugmentation):\n def __init__(self, min_value: float = .05, max_value: float = .95, random_seed: int = 42, threshold: float = .5):\n super().__init__(random_seed=random_seed, threshold=threshold)\n self.min_value: float = min_value\n self.max_value: float = max_value\n if self.max_value < self.min_value:\n raise ValueError()\n\n def _do_scale(self, x: np.array) -> np.array:\n raise NotImplementedError()\n\n def transform(self, x: np.array) -> np.array:\n if self._active_augmentation:\n return self._do_scale(x)\n\n return x\n\n\nclass ClipLabel(_BaseLabel):\n def __init__(self, min_value: float = .05, max_value: float = .95, random_seed: int = 42, threshold: float = .5):\n super().__init__(min_value=min_value, max_value=max_value, random_seed=random_seed, threshold=threshold)\n\n def _do_scale(self, x: np.array) -> np.array:\n return np.clip(x, a_min=self.min_value, a_max=self.max_value)\n\n\nclass LabelSoften(_BaseLabel):\n def __init__(self, min_value: float = .05, max_value: float = .95, random_seed: int = 42, threshold: float = .5):\n super().__init__(min_value=min_value, max_value=max_value, random_seed=random_seed, threshold=threshold)\n self.intercept: float = self.max_value - self.min_value\n\n def _do_scale(self, x: np.array) -> np.array:\n return x * self.intercept + self.min_value\n", "id": "12594427", "language": "Python", "matching_score": 0.718543291091919, "max_stars_count": 2, "path": "nlp_utils/Augmentation/LabelAugmentation.py" }, { "content": "from typing import List\nfrom itertools import compress\nfrom .BaseAugmentation import MixinAugmentation\n\n\nclass _RandomTruncate(MixinAugmentation):\n def __init__(self, min_length: int = 128, max_length: int = 256, random_seed: int = 42, threshold: float = .5):\n super().__init__(random_seed=random_seed, threshold=threshold)\n self.min_length: int = min_length\n self.max_length: int = max_length\n\n def _do_truncate(self, x, length: int):\n raise NotImplementedError\n\n def transform(self, x: List[str]) -> List[str]:\n len_x: int = len(x)\n if len_x <= self.min_length:\n return x\n\n if self._active_augmentation:\n seq = (self.min_length, min(len_x, self.max_length))\n length = self.rng.randint(min(seq), max(seq))\n return self._do_truncate(x, length)\n\n return x\n\n\nclass RandomTruncateHead(_RandomTruncate):\n def __init__(self, min_length: int = 128, max_length: int = 256, random_seed: int = 42, threshold: float = .5):\n super().__init__(min_length=min_length, max_length=max_length, random_seed=random_seed, threshold=threshold)\n\n def _do_truncate(self, x, length: int):\n return x[-length:]\n\n\nclass RandomTruncateTail(_RandomTruncate):\n def __init__(self, min_length: int = 128, max_length: int = 256, random_seed: int = 42, threshold: float = .5):\n super().__init__(min_length=min_length, max_length=max_length, random_seed=random_seed, threshold=threshold)\n\n def _do_truncate(self, x, length: int):\n return x[:length]\n\n\nclass RandomDropWords(MixinAugmentation):\n def __init__(\n self, min_length: int = 1, max_drop: int = 5, drop_rate: float = .1, random_seed: int = 42,\n threshold: float = .5):\n super().__init__(random_seed=random_seed, threshold=threshold)\n self.min_length: int = min_length\n self.max_drop: int = max_drop\n self.drop_rate: float = drop_rate\n\n def transform(self, x: List[str]) -> List[str]:\n len_x: int = len(x)\n if len_x < self.min_length:\n return x\n\n if self._active_augmentation:\n max_drop = min(max(0, len_x - self.min_length), max(int(self.drop_rate * len_x), self.max_drop))\n if max_drop < 1:\n return x\n\n mask = self._get_mask(len_x, max_drop)\n x = list(compress(x, mask))\n\n return x\n\n", "id": "3228575", "language": "Python", "matching_score": 3.042358160018921, "max_stars_count": 2, "path": "nlp_utils/Augmentation/SequenceAugmentation.py" }, { "content": "import numpy as np\nfrom .IAugmentation import ISampleAugmentation\n\n\nclass MixinAugmentation(ISampleAugmentation):\n def __init__(self, random_seed: int = 42, threshold: float = .5):\n self.rng = np.random.RandomState(random_seed)\n self.threshold = threshold\n\n def transform(self, x):\n raise NotImplementedError\n\n def _active_augmentation(self) -> bool:\n if self.rng.uniform(low=0.0, high=1.0) <= self.threshold:\n return True\n\n return False\n\n def _get_mask(self, mask_size: int, max_mask: int):\n \"\"\"True: keep words\"\"\"\n _max_mask = self.rng.randint(0, high=max_mask)\n if _max_mask == 0:\n return [True] * mask_size\n\n sequence = [True] * (mask_size - max_mask) + [False] * max_mask\n self.rng.shuffle(sequence)\n return sequence\n", "id": "3117995", "language": "Python", "matching_score": 4.109506130218506, "max_stars_count": 2, "path": "nlp_utils/Augmentation/BaseAugmentation.py" }, { "content": "from typing import List\n\n\nclass ISampleAugmentation:\n def transform(self, x: List[str]) -> List[str]:\n raise NotImplementedError\n", "id": "8758070", "language": "Python", "matching_score": 1.0354039669036865, "max_stars_count": 2, "path": "nlp_utils/Augmentation/IAugmentation.py" }, { "content": "from typing import List, Optional, Dict\n\nfrom .SequenceAugmentation import RandomTruncateHead\nfrom .SequenceAugmentation import RandomTruncateTail\nfrom .SequenceAugmentation import RandomDropWords\n\nfrom .LabelAugmentation import ClipLabel\nfrom .LabelAugmentation import LabelSoften\n\n\ndef _augmentation_factory(augmentation_gen: str, params: Dict):\n augmentations = globals()\n if augmentation_gen not in augmentations.keys():\n raise NotImplementedError()\n\n return augmentations[augmentation_gen](**params)\n\n\nclass AugmentationMaster:\n def __init__(self, func_x_list: List, func_y_list: List):\n self.transform_x = self._initialize_func_list(func_x_list)\n self.transform_y = self._initialize_func_list(func_y_list)\n\n @staticmethod\n def _initialize_func_list(func_list) -> List:\n return [_augmentation_factory(augmentation_gen=func_gen, params=params) for func_gen, params in func_list]\n\n def transform(self, x=None, y=None):\n if x is not None:\n for tf in self.transform_x:\n x = tf.transform(x)\n\n if y is None:\n return x\n\n for tf in self.transform_y:\n y = tf.transform(y)\n\n return y\n", "id": "3504842", "language": "Python", "matching_score": 2.1699602603912354, "max_stars_count": 2, "path": "nlp_utils/Augmentation/AugmentationMaster.py" }, { "content": "from .AugmentationMaster import AugmentationMaster\n", "id": "8773351", "language": "Python", "matching_score": 0, "max_stars_count": 2, "path": "nlp_utils/Augmentation/__init__.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis script provide a class to read and save files\nCreated on Sat July 21 2018\n\n@author: cttsai\n\"\"\"\nimport pandas as pd\n\nfrom Utility import CheckFileExist\nfrom LibConfigs import logger, hdf5_compress_option, fast_hdf5_compress_option\n\n\nclass DataFileIO(object):\n \"\"\"\n \"\"\"\n def __init__(self):\n self.data_lastet_load = {}\n\n def getLastestLoaded(self):\n return self.data_lastet_load.copy()\n\n @staticmethod\n def checkFile(filename):\n return CheckFileExist(filename, silent=False)\n\n @staticmethod\n def loadEmpty(configs):\n return {k: pd.DataFrame() for k in configs.keys()}\n\n @staticmethod\n def readHDF(filename, configs={}, opt_load=True):\n with pd.HDFStore(filename, 'r', **hdf5_compress_option) as store:\n logger.info(\"{} contained {} items\".format(filename, len(store.keys())))\n for k in store.keys():\n logger.info(\"{}: {}\".format(k, store[k].shape))\n\n if opt_load and configs: # load and limited by configs\n ret = {k: pd.DataFrame() for k in configs.keys()}\n ret.update({k.strip('/'): store[k] for k in store.keys() if k.strip('/') in configs.keys()})\n return ret\n\n if opt_load: # load all saved dataframes\n return {k.strip('/'): store[k] for k in store.keys()}\n\n return {}\n\n def showHDF(self, filename):\n self.checkFile(filename)\n self.readHDF(filename, opt_load=False)\n\n def loadCSV(self, configs={}):\n \"\"\"\n configs = {'name': 'file_path'}\n return load_data = {'name': dataframe}\n \"\"\"\n logger.info(\"Read Data from CSV\")\n load_data = {}\n\n for k, f_path in configs.items():\n if not self.checkFile(f_path):\n continue\n\n load_data[k] = pd.read_csv(f_path)\n logger.info(\"Read in {}: from {}, shape={}\".format(k, f_path, load_data[k].shape))\n\n self.data_lastet_load = load_data.copy()\n return load_data\n\n def loadHDF(self, filename, configs={}, limited_by_configs=True):\n \"\"\"\n \"\"\"\n logger.info(\"Read Data from HDFS\")\n\n if not self.checkFile(filename):\n return self.loadEmpty(configs)\n\n if limited_by_configs:\n logger.info(\"Load selected DataFrame Only\")\n load_data = self.readHDF(filename, configs, opt_load=True)\n else: # full loaded\n load_data = self.readHDF(filename, opt_load=True)\n\n for k, v in load_data.items():\n if isinstance(v, pd.DataFrame):\n logger.info('memory usage on {} is {:.3f} MB'.format(k, v.memory_usage().sum() / 1024. ** 2))\n self.data_lastet_load = load_data#.copy()\n return load_data\n\n def saveHDF(self, filename, data, opt_overwrite=True, opt_fast=False):\n if self.checkFile(filename):\n if not opt_overwrite:\n logger.warning(\"overwrite is not allowed\")\n return False\n\n compress_option = hdf5_compress_option\n if opt_fast:\n logger.info(\"use faster compression option\")\n compress_option = fast_hdf5_compress_option\n with pd.HDFStore(filename, 'w', **compress_option) as store:\n logger.info(\"Save to {}\".format(filename))\n for k, d in data.items():\n store.put(k, d, format='table')\n #store.put(k, d, format='fixed')\n logger.info(\"Save {}: {}\".format(k, d.shape))\n", "id": "12861042", "language": "Python", "matching_score": 1.5778425931930542, "max_stars_count": 0, "path": "lib/DataFileIO.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis script provides feature tranform and forked from\nCreated on Thu July 20 2018\n\n@author: cttsai\n\"\"\"\nimport gc; gc.enable()\n\nimport numpy as np\nimport pandas as pd\n\nfrom LibConfigs import logger, file_dir_path\nfrom DataFileIO import DataFileIO\n\n\nclass FeatureImportance(object):\n def __init__(self, default_result_dir=file_dir_path['data']):\n \"\"\"\n preds = {\n 'train_oof' : oof_preds_df,\n 'test_oof' : sub_preds_df,\n 'test_full' : test_preds_full,\n 'feature_importance': feature_importance_df\n }\n \"\"\"\n self.result_dir = default_result_dir\n self.importance_series = pd.Series()\n self.keys = {'importance': 'importance',\n 'feature' : 'feature'}\n\n def _analyzeFeatures(self, df):\n if not df.empty:\n self.importance_series = df.groupby(self.keys['feature']).sum()[self.keys['importance']]\n logger.info('feature distribution\\n{}'.format(\n self.importance_series.describe([0.1, 0.2, 0.25, 0.5, 0.75, 0.8, 0.9])))\n\n def LoadResult(self, result_files):\n if not result_files:\n logger.warning('no result file to rank features')\n return False\n\n elif len(result_files) == 1:\n ret = DataFileIO().loadHDF('{loc}/{filename}'.format(loc=self.result_dir,\n filename=result_files[0]))\n df = ret.get('feature_importance', pd.DataFrame())\n\n else:\n logger.info('concate {} results to rank features'.format(len(result_files)))\n rets = list()\n for f in result_files:\n rets.append(DataFileIO().loadHDF('{loc}/{filename}'.format(loc=self.result_dir, filename=f)))\n\n rets = [ret.get('feature_importance', pd.DataFrame()) for ret in rets]\n df = pd.concat(rets, axis=1)\n\n self._analyzeFeatures(df)\n\n def GetBlacklist(self, threshold=10.):\n\n if self.importance_series.empty:\n logger.warning('no feature')\n return list()\n\n logger.info('create blacklist on score <= {}'.format(threshold))\n ret = self.importance_series.loc[self.importance_series <= threshold].index.tolist()\n logger.info('return blacklist of {} from {} features'.format(len(ret), len(self.importance_series)))\n return ret\n\n def CullFeatures(self, x, blacklist=list()):\n\n if not blacklist:\n logger.warning('empty blacklist')\n return x\n\n before = x.shape\n x = x[[f for f in x.columns if f not in blacklist]]\n logger.info('shrink from {} to {} by {}'.format(before, x.shape, len(blacklist)))\n return x\n\n\ndef main():\n obj = FeatureImportance('../data/')\n obj.LoadResult(filename='probs_selected_features_level[0]_LightGBM_features[0706]_score[0.783694]_fold[5]_2018-08-25-09-45.hdf5')\n import pdb; pdb.set_trace()\n\nif __name__ == '__main__':\n main()\n", "id": "3677809", "language": "Python", "matching_score": 3.0956990718841553, "max_stars_count": 0, "path": "lib/FeatureImportance.py" }, { "content": "\"\"\"\nthis is forked from two excellent kernels:\nhttps://www.kaggle.com/jsaguiar/updated-0-792-lb-lightgbm-with-simple-features\nhttps://www.kaggle.com/ogrellier/lighgbm-with-selected-features\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport gc\nimport time\nimport lightgbm as lgb\nfrom sklearn.metrics import roc_auc_score, roc_curve\nfrom sklearn.model_selection import KFold, StratifiedKFold\n\nimport ModulePaths\nfrom DataFileIO import DataFileIO\nfrom Utility import ComposeResultName, InitializeConfigs\next = InitializeConfigs(filename=\"./external/lighgbm-with-selected-features.py\")\n\n# LightGBM GBDT with KFold or Stratified KFold\n# Parameters from Tilii kernel: https://www.kaggle.com/tilii7/olivier-lightgbm-parameters-by-bayesian-opt/code\ndef kfold_lightgbm(df, num_folds, params, stratified=False, debug=False):\n # Divide in training/validation and test data\n train_df = df[df['TARGET'].notnull()]\n test_df = df[df['TARGET'].isnull()]\n print(\"Starting LightGBM. Train shape: {}, test shape: {}\".format(train_df.shape, test_df.shape))\n del df; gc.collect()\n\n # Cross validation model\n if stratified:\n folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=1001)\n submission_file_name = \"submission_with_selected_features_lgbm_stratified.csv\"\n identifier = 'stratified_selected_features'\n else:\n folds = KFold(n_splits= num_folds, shuffle=True, random_state=1001)\n submission_file_name = \"submission_with_selected_features_lgbm.csv\"\n identifier = 'selected_features'\n # Create arrays and dataframes to store results\n\n best_iterations = []\n oof_preds = np.zeros(train_df.shape[0])\n sub_preds = np.zeros(test_df.shape[0])\n feature_importance_df = pd.DataFrame()\n feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]\n\n for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df[feats], train_df['TARGET'])):\n dtrain = lgb.Dataset(data=train_df[feats].iloc[train_idx],\n label=train_df['TARGET'].iloc[train_idx],\n free_raw_data=False, silent=True)\n dvalid = lgb.Dataset(data=train_df[feats].iloc[valid_idx],\n label=train_df['TARGET'].iloc[valid_idx],\n free_raw_data=False, silent=True)\n\n clf = lgb.train(\n params=params,\n train_set=dtrain,\n num_boost_round=10000,\n valid_sets=[dtrain, dvalid],\n early_stopping_rounds=500,\n verbose_eval=200\n )\n\n oof_preds[valid_idx] = clf.predict(dvalid.data)\n sub_preds += clf.predict(test_df[feats]) / folds.n_splits\n\n print('best iteration: {}'.format(clf.best_iteration))\n best_iterations.append(clf.best_iteration)\n\n fold_importance_df = pd.DataFrame()\n fold_importance_df[\"feature\"] = feats\n fold_importance_df[\"importance\"] = clf.feature_importance(importance_type='gain')\n fold_importance_df[\"fold\"] = n_fold + 1\n feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)\n print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(dvalid.label, oof_preds[valid_idx])))\n del clf, dtrain, dvalid\n gc.collect()\n\n ## INSERTED BEGIN\n oof_preds_df = train_df[['SK_ID_CURR', 'TARGET']].set_index('SK_ID_CURR')\n oof_preds_df['PROBA'] = oof_preds\n sub_preds_df = test_df[['SK_ID_CURR']].set_index('SK_ID_CURR')\n sub_preds_df['PROBA'] = sub_preds\n\n params['n_estimators'] = max(best_iterations)\n clf = lgb.LGBMClassifier(**params)\n clf.fit(train_df[feats], train_df['TARGET'])\n test_preds_full = sub_preds_df.copy()\n test_preds_full['PROBA'] = clf.predict_proba(test_df[feats])[:,1]\n\n file_stem = {\n 'level': 0,\n 'model': 'LightGBM',\n 'feature_num': test_df.shape[1],\n 'score': roc_auc_score(oof_preds_df['TARGET'].values,\n oof_preds_df['PROBA'].values),\n 'fold': num_folds,}\n stem = ComposeResultName(file_stem)\n\n data = {\n 'train': train_df.set_index('SK_ID_CURR'),\n 'test' : test_df.set_index('SK_ID_CURR'),\n }\n DataFileIO().saveHDF('./data/data_{}_{}.hdf5'.format(identifier, stem),\n data,\n opt_overwrite=True,\n opt_fast=False)\n\n preds = {\n 'train_oof' : oof_preds_df,\n 'test_oof' : sub_preds_df,\n 'test_full' : test_preds_full,\n 'feature_importance': feature_importance_df\n }\n DataFileIO().saveHDF('./data/probs_{}_{}.hdf5'.format(identifier, stem),\n preds,\n opt_overwrite=True,\n opt_fast=False)\n ## INSERTED END\n\n print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds))\n # Write submission file and plot feature importance\n if not debug:\n sub_df = test_df[['SK_ID_CURR']].copy()\n sub_df['TARGET'] = sub_preds\n sub_df[['SK_ID_CURR', 'TARGET']].to_csv('output/{}'.format(submission_file_name), index= False)\n #display_importances(feature_importance_df)\n return feature_importance_df\n\n\ndef main(debug=False):\n # LightGBM parameters found by Bayesian optimization\n ParamsLGBM = {\n 'objective': 'binary',\n 'boosting_type': 'gbdt',\n #'nthread': 4,\n 'learning_rate': 0.02, # 02,\n 'num_leaves': 20,\n 'colsample_bytree': 0.9497036,\n 'subsample': 0.8715623,\n 'subsample_freq': 1,\n 'max_depth': 8,\n 'reg_alpha': 0.041545473,\n 'reg_lambda': 0.0735294,\n 'min_split_gain': 0.0222415,\n 'min_child_weight': 60, # 39.3259775,\n 'seed': 0,\n 'verbose': -1,\n 'metric': 'auc',\n 'device': 'gpu',\n }\n\n num_rows = 10000 if debug else None\n df = ext.application_train_test(num_rows)\n with ext.timer(\"Process bureau and bureau_balance\"):\n bureau = ext.bureau_and_balance(num_rows)\n print(\"Bureau df shape:\", bureau.shape)\n df = df.join(bureau, how='left', on='SK_ID_CURR')\n del bureau; gc.collect()\n with ext.timer(\"Process previous_applications\"):\n prev = ext.previous_applications(num_rows)\n print(\"Previous applications df shape:\", prev.shape)\n df = df.join(prev, how='left', on='SK_ID_CURR')\n del prev; gc.collect()\n with ext.timer(\"Process POS-CASH balance\"):\n pos = ext.pos_cash(num_rows)\n print(\"Pos-cash balance df shape:\", pos.shape)\n df = df.join(pos, how='left', on='SK_ID_CURR')\n del pos; gc.collect()\n with ext.timer(\"Process installments payments\"):\n ins = ext.installments_payments(num_rows)\n print(\"Installments payments df shape:\", ins.shape)\n df = df.join(ins, how='left', on='SK_ID_CURR')\n del ins; gc.collect()\n with ext.timer(\"Process credit card balance\"):\n cc = ext.credit_card_balance(num_rows)\n print(\"Credit card balance df shape:\", cc.shape)\n df = df.join(cc, how='left', on='SK_ID_CURR')\n del cc; gc.collect()\n with ext.timer(\"Run LightGBM with kfold\"):\n print(df.shape)\n df.drop(ext.features_with_no_imp_at_least_twice, axis=1, inplace=True)\n gc.collect()\n print(df.shape)\n\n feat_importance = kfold_lightgbm(df,\n num_folds=5,\n params=ParamsLGBM,\n stratified=False,\n debug=debug)\n\n feat_importance = kfold_lightgbm(df,\n num_folds=5,\n params=ParamsLGBM,\n stratified=True,\n debug=debug)\n\nif __name__ == \"__main__\":\n submission_file_name = \"submission_with_selected_features.csv\"\n with ext.timer(\"Full model run\"):\n main()", "id": "7947356", "language": "Python", "matching_score": 3.495089292526245, "max_stars_count": 0, "path": "LGBMSelectedFeatures.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 28 2018\n\n@author: cttsai\n\"\"\"\nimport os, sys\nimport pickle\nfrom datetime import datetime as dt\n\nimport numpy as np\nimport pandas as pd\n\nfrom LibConfigs import logger\nfrom LibConfigs import enable_gpu_options, disable_gpu_options\n\n\ndef SwitchDevice(params, enable_gpu=True):\n\n def func(params, opt, silent=False):\n for k, v in params.items():\n a = k in opt.keys()\n b = v in opt.get(k, {}).keys()\n if all([a, b]):\n params.update({k: opt[k].get(v)})\n logger.info('switch {} to {}'.format(k, params[k]))\n return params\n\n switched_params = params.copy()\n if enable_gpu:\n switched_params = func(switched_params, enable_gpu_options)\n else:\n switched_params = func(switched_params, disable_gpu_options)\n\n return switched_params\n\n\ndef IdentifyCategoricalColumn(df):\n return [col for col in df.columns if df[col].dtype == 'object']\n\n\ndef CheckColumnsExist(df, columns):\n cols_exist = [f for f in columns if f in df.columns]\n cols_not_exist = [f for f in columns if f not in df.columns]\n return cols_exist, cols_not_exist\n\ndef CheckFileExist(filename, silent=True):\n if not os.path.exists(filename):\n if not silent:\n logger.warning('{} does not exist'.format(filename))\n\n return False\n\n return True\n\n\ndef MkDirSafe(directory):\n if not os.path.exists(directory):\n logger.info('make {}'.format(directory))\n os.makedirs(directory)\n\n\ndef AnyEmptyDataframe(data):\n if not data:\n logger.warning('passing no dataframes')\n return True\n\n if isinstance(data, dict):\n return any([v.empty for k, v in data.items()])\n\n elif isinstance(data, list):\n return any([l.empty for l in data])\n\n return False\n\n\ndef SavePickle(model, filename):\n with open(filename, 'wb') as f:\n pickle.dump(model, f)\n logger.info('save model to {}'.format(filename))\n\n\ndef LoadPickle(filename):\n if not CheckFileExist(filename):\n return None\n\n with open(filename, 'rb') as f:\n logger.info('load model {}'.format(filename))\n return pickle.load(f)\n\n\ndef Cast64To32(df, blacklist=['SK_ID_CURR', 'SK_ID_PREV', 'SK_ID_BUREAU']):\n series_dtypes = df.dtypes\n series_dtypes = series_dtypes.loc[~series_dtypes.index.isin(blacklist)]\n if not series_dtypes.empty:\n logger.info('cast dataframe from 64 to 32')\n to_float32 = series_dtypes.loc[series_dtypes.apply(lambda x: x == np.float64)].index.tolist()\n df[to_float32] = df[to_float32].astype(np.float32)\n logger.info('cast {} columns float32: {}'.format(len(to_float32), to_float32))\n\n to_int32 = series_dtypes.loc[series_dtypes.apply(lambda x: x == np.int64)].index.tolist()\n df[to_int32] = df[to_int32].astype(np.int32)\n logger.info('cast {} columns to int32: {}'.format(len(to_int32), to_int32))\n\n return df\n\n\ndef InitializeConfigs(filename):\n if not filename:\n return None\n\n if not os.path.exists(filename):\n raise ValueError(\"Spec file {spec_file} does not exist\".format(spec_file=filename))\n\n module_name = filename.split(os.sep)[-1].replace('.', '')\n\n import importlib.util\n spec = importlib.util.spec_from_file_location(module_name, filename)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n\n\ndef ComposeResultName(meta={}):\n \"\"\"\n input\n ----\n meta = {\n 'level': int,\n 'model': str,\n 'feature_num': int,\n 'score': float,\n 'fold': int,\n 'timestamp': datetime.datetime}\n\n return\n ----\n [level]_[model_type]_[feature_num]_[local_score]_[local_cv]_[time]\n\n \"\"\"\n #logger.info('{}'.format(meta))\n template = 'level[{level}]_{model_type}_features[{feature_num:04d}]_score[{score:.6f}]_fold[{fold}]_{timestamp}'\n result_name = template.format(level=int(meta.get('level', 0)),\n model_type=meta.get('model', 'unknown'),\n feature_num=meta.get('feature_num', 0),\n score=meta.get('score', 0.0),\n fold=int(meta.get('fold', 0)),\n timestamp=meta.get('timestamp', dt.now()).strftime('%Y-%m-%d-%H-%M'))\n return result_name\n\n\ndef DecomposeResultName(name):\n \"\"\"\n input\n ----\n [level]_[model_type]_[feature_num]_[local_score]_[local_cv]_[time] or\n 'header' + [level]_[model_type]_[feature_num]_[local_score]_[local_cv]_[time].extinsion\n\n return\n ----\n meta = {\n 'level': int,\n 'model': str,\n 'feature_num': int,\n 'score': float,\n 'fold': int,\n 'timestamp': datetime.datetime}\n \"\"\"\n if (len(name) - name.rfind('.') < 10): #have an extension\n name = name[name.find('level'):name.rfind('.')]\n else:\n name = name[name.find('level'):]\n\n level, model_type, feature_num, score, nr_fold, timestamp = name.split('_')\n\n def extract(x):\n return x.split(']')[0].split('[')[1]\n\n return {'level': int(extract(level)),\n 'model': model_type,\n 'feature_num': int(extract(feature_num)),\n 'score': float(extract(score)),\n 'fold': int(extract(nr_fold)),\n 'timestamp': dt.strptime(timestamp, '%Y-%m-%d-%H-%M')}\n\n\n###############################################################################\ndef main(argc, args):\n \"\"\"\n this is a testing module\n \"\"\"\n print('Test Compose Result Name')\n print(ComposeResultName({}))\n\n n = ComposeResultName({'level': 1, 'model': 'xgb', 'score': 1.4, 'fold': 3})\n print(n, DecomposeResultName(n))\n\n n = 'subm_' + n + '.csv'\n print(n, DecomposeResultName(n))\n\n import pdb; pdb.set_trace()\n\n return\n\nif __name__ == '__main__':\n main(len(sys.argv), sys.argv)\n\n", "id": "9518429", "language": "Python", "matching_score": 2.2128453254699707, "max_stars_count": 0, "path": "lib/Utility.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu July 29 2018\n\n@author: cttsai\n\"\"\"\n\nimport sys, os\nimport argparse\nimport pickle\n\nimport ModulePaths\nfrom lib.LibConfigs import logger, file_dir_path\nfrom lib.DataProvider import DataProvider\nfrom lib.ScikitOptimize import ScikitOptimize\nfrom lib.FeatureImportance import FeatureImportance\nfrom lib.AutoStacker import AutoStacker\nfrom lib.Utility import InitializeConfigs, CheckFileExist, SwitchDevice\n\ndef parse_command_line():\n\n default_cache_prefix = 'sample'\n\n params_loc = file_dir_path.get('params', './params')\n configs_loc = file_dir_path.get('configs', './configs')\n default_data_configs_path = '{}/SampleDataConfigs.py'.format(configs_loc)\n default_model_configs_path = '{}/SampleModelConfigs.py'.format(configs_loc)\n default_stacker_configs_path = '{}/SampleStackerConfigs.py'.format(configs_loc)\n default_select_to_hpo = None\n default_feature_score_cutoff = 10.\n\n parser = argparse.ArgumentParser(description='Home Credit Default Risk Modeler',\n add_help=True,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-a', '--cache-prefix', type=str, default=default_cache_prefix, help='specifiy cache file prefix')\n parser.add_argument('-d', '--configs-data', type=str, default=default_data_configs_path, help='path to data configs')\n parser.add_argument('-m', '--configs-model', type=str, default=default_model_configs_path, help='path to model configs')\n parser.add_argument('-s', '--configs-stacker', type=str, default=default_stacker_configs_path, help='path to stacker configs')\n parser.add_argument('-t', '--select-hpo', type=str, default=default_select_to_hpo, help='hpo on selected models')\n parser.add_argument( '--cutoff-score', type=float, default=default_feature_score_cutoff, help='cutoff to remove unimportant features')\n parser.add_argument('-c', '--cull_features', action='store_true', default=False, help='cull features')\n parser.add_argument('--enable-gpu', action='store_true', default=False, help='compute using gpu')\n parser.add_argument('--refresh-cache', action='store_true', default=False, help='refresh cache by data configs')\n parser.add_argument('--refresh-meta', action='store_true', default=False, help='refresh constructed meta features')\n parser.add_argument('--compute-hpo', action='store_true', default=False, help='hpo')\n parser.add_argument('--compute-stack', action='store_true', default=False, help='stacking')\n parser.add_argument('--debug', action='store_true', default=False, help='debug moode using 20000 samples')\n\n args = parser.parse_args()\n\n logger.info('running task with prefix={}'.format(args.cache_prefix))\n\n if args.enable_gpu:\n logger.info('enable GPU computing in hyperparameters')\n\n if args.cull_features:\n logger.info('cull feature features scores under {}'.format(args.cutoff_score))\n\n if args.select_hpo:\n args.select_hpo = args.select_hpo.split(',')\n\n if args.debug:\n logger.warning('**Debug Mode**')\n args.configs_model = '{}/DebugModelConfigs.py'.format(configs_loc)\n args.configs_stacker = '{}/DebugStackerConfigs.py'.format(configs_loc)\n\n return args\n\n\ndef compute(args):\n\n # loading configs\n DataConfigs = InitializeConfigs(args.configs_data).DataConfigs\n if args.compute_hpo:\n ModelConfigs = InitializeConfigs(args.configs_model).ModelConfigs\n if args.compute_stack:\n StackerConfigs = InitializeConfigs(args.configs_stacker).StackerConfigs\n BaseModelZoo = InitializeConfigs(args.configs_stacker).BaseModelConfigs\n ExtMetaConfigs = InitializeConfigs(args.configs_stacker).ExternalMetaConfigs\n\n\n dp = DataProvider(IOConfigs=file_dir_path)\n if args.refresh_cache:\n data = dp.LoadData(DataConfigs, source='from_processed', prefix=args.cache_prefix)\n else:\n data = dp.LoadData(DataConfigs, source='from_train_test', prefix=args.cache_prefix)\n\n train_x, train_y, test_x, test_y = data\n\n if args.cull_features: # a bit feature selection\n f_path = InitializeConfigs(args.configs_model).fileFeatureImportance\n featSel = FeatureImportance()\n featSel.LoadResult(f_path)\n blacklist = featSel.GetBlacklist(args.cutoff_score)\n train_x = featSel.CullFeatures(train_x, blacklist)\n test_x = featSel.CullFeatures(test_x, blacklist)\n\n if args.debug:\n train_x = train_x.iloc[:20000]\n train_y = train_y.iloc[:20000]\n logger.warning('debug mode: x={}'.format(train_x.shape))\n args.cache_prefix = 'debug'\n logger.info('P/N ratio:\\n{}'.format(train_y.value_counts(normalize=True)))\n\n if args.compute_hpo:\n logger.info('load hpo configs of {} models'.format(len(ModelConfigs)))\n if args.select_hpo:\n ModelConfigs = {k: v for k, v in ModelConfigs.items() if k in args.select_hpo}\n logger.info('compute hpo for selected {} models'.format(len(ModelConfigs)))\n\n for k, v in ModelConfigs.items():\n try:\n model = v.get(\"model\")\n hpo_range = v.get(\"hyperparameter_optimization\")\n init = hpo_range.get('initialize', {})\n hpo_range.update({'initialize': SwithDevice(init, enable_gpu=args.enable_gpu)})\n hpo_search = ScikitOptimize(model,\n hpo_range,\n task_name='{}'.format(k),\n data_prefix=args.cache_prefix)\n hpo_search.search(train_x, train_y)\n hpo_search.save_hyperparameters(export=True)\n # TODO: fine tune model\n except:\n logger.info('Errors in optimizing {}'.format(task_name='{}'.format(k)))\n\n if args.compute_stack:\n stackers = AutoStacker(StackerConfigs, args.enable_gpu,\n data_prefix=args.cache_prefix)\n\n if args.refresh_meta:\n stackers.buildMetaFeatures(BaseModelZoo)\n stackers.fit_transform(train_x, train_y, test_x, seed=42)\n\n else:\n stackers.loadExternalMeta(ExtMetaConfigs)\n stackers.buildMetaClassifiers(BaseModelZoo)\n stackers.fit_predict(train_x, train_y, test_x, seed=538)\n\n return\n\n\ndef main(argc, argv):\n logger.info('reading arguments')\n args = parse_command_line()\n\n logger.info('starting to compute')\n compute(args)\n\n return\n\n\nif __name__ == '__main__':\n main(len(sys.argv), sys.argv)\n", "id": "2823092", "language": "Python", "matching_score": 3.291213274002075, "max_stars_count": 0, "path": "ModelPipeline.py" }, { "content": "\nfrom itertools import combinations\nimport pandas as pd\nimport numpy as np\n\nfrom mlxtend.classifier import StackingCVClassifier\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.preprocessing import PolynomialFeatures\n\nfrom sklearn.metrics import roc_auc_score\n\nfrom lib.DataFileIO import DataFileIO\nfrom lib.LibConfigs import logger, file_dir_path\nfrom lib.LibConfigs import filename_submit_mlxtend_meta, filename_submit_mlxtend_base\nfrom lib.LibConfigs import filename_hpo_external\nfrom lib.LibConfigs import filename_mlxtend_meta_features\nfrom lib.LibConfigs import filename_mlxtend_meta_features_external, filename_mlxtend_stacker_external\nfrom lib.Utility import ComposeResultName, CheckFileExist, LoadPickle, SwitchDevice\n\n#base-level, 2nd-level (feature with meta, meta), 3rd-level (meta)\nclass AutoStacker(object):\n def __init__(self, configs, enable_gpu=False, data_prefix=None):\n\n self.prefix = data_prefix\n self.input_loc = file_dir_path['data']\n self.output_loc = file_dir_path['output']\n self.params_loc = file_dir_path['params']\n self.meta_stacker_configs = configs['stacker']\n self.meta_feature_configs = configs['feature']\n self.data_io_manager = DataFileIO()\n\n self.enable_gpu = enable_gpu\n\n # mlxtend stacked model\n #self.clf_names = list()\n self.clfs = list()\n self.clfs_info = list()\n\n # meta models from mlxtend stacked model\n self.meta_clfs = list()\n #self.meta_clf_names = list()\n self.meta_clfs_info = list()\n\n # store of meta features, meta=collected, agg=concated\n self.X_meta = list()\n self.test_X_meta = list()\n self.X_agg = pd.DataFrame()\n self.test_X_agg = pd.DataFrame()\n\n @staticmethod\n def _set_submit_filename(level=1, name=None, feature_num=None, score=None, nr_fold=None, seed=None):\n return {\n 'level': level,\n 'model': name,\n 'feature_num': feature_num,\n 'score': score,\n 'fold': nr_fold,\n 'seed': seed,\n }\n\n def saveSubmit(self, file_stem, preds, template):\n stem = ComposeResultName(file_stem)\n filename = template.format(loc=self.output_loc,\n prefix=self.prefix,\n stem=stem)\n logger.info('Save predictions to {}'.format(filename))\n preds.to_csv(filename)\n\n def saveMetaFeatures(self, file_stem, data, stacker_level=False):\n stem = ComposeResultName(file_stem)\n filename = filename_mlxtend_meta_features.format(loc=self.input_loc,\n prefix=self.prefix,\n stem=stem)\n\n logger.info('Save meta features to {}'.format(filename))\n self.data_io_manager.saveHDF(filename,\n data,\n opt_overwrite=True,\n opt_fast=False)\n\n if stacker_level:\n filename = filename_mlxtend_stacker_external.format(loc=self.input_loc,\n prefix=self.prefix)\n else:\n filename = filename_mlxtend_meta_features_external.format(loc=self.input_loc, \n prefix=self.prefix)\n \n logger.info('export meta features to {}'.format(filename))\n self.data_io_manager.saveHDF(filename,\n data,\n opt_overwrite=True,\n opt_fast=False)\n\n def loadExternalMeta(self, configs):\n \"\"\"\n preds = {\n 'train_oof' : oof_preds_df,\n 'test_oof' : sub_preds_df,\n 'test_full' : test_preds_full,\n 'feature_importance': feature_importance_df\n }\n \"\"\"\n\n self.X_meta = list()\n self.test_X_meta = list()\n\n def func(x, by, name):\n return x.groupby(by)['PROBA'].mean().rank(pct=True).rename(name)\n\n for k, v in configs.items():\n Xs, test_Xs = list(), list()\n for f in v:\n ret = self.data_io_manager.loadHDF('{loc}/{filename}'.format(loc=self.input_loc, filename=f))\n\n if not ret:\n continue\n\n Xs.append(ret.get('train_oof', pd.DataFrame()))\n test_Xs.append(ret.get('test_oof', pd.DataFrame()))\n\n X = pd.concat(Xs, axis=0)\n test_X = pd.concat(test_Xs, axis=0)\n\n X = func(X.reset_index(), X.index.name, k)\n test_X = func(test_X.reset_index(), test_X.index.name, k)\n\n self.X_meta.append(X)\n self.test_X_meta.append(test_X)\n\n filename = filename_mlxtend_meta_features_external.format(loc=self.input_loc, prefix=self.prefix)\n ret = self.data_io_manager.loadHDF(filename)\n if ret:\n df = ret.get('train_meta', pd.DataFrame()).apply(lambda x: x.rank(pct=True))\n self.X_meta.append(df)\n df = ret.get('test_meta', pd.DataFrame()).apply(lambda x: x.rank(pct=True))\n self.test_X_meta.append(df)\n\n self.X_meta = pd.concat(self.X_meta, axis=1)\n self.test_X_meta = pd.concat(self.test_X_meta, axis=1)\n logger.info('Load Meta {}, {}'.format(self.X_meta .shape, self.test_X_meta.shape))\n return self.X_meta, self.test_X_meta\n\n def buildMetaFeatures(self, model_zoo):\n for clf in self.meta_feature_configs:\n name = clf.get('name', 'foobar')\n use_features_in_secondary = clf.get('use_features', True)\n stratify = clf.get('stratify', True)\n nr_folds = clf.get('cv', 3)\n seed = clf.get('seed', 42)\n\n bases = [model_zoo.get(c) for c in clf['sources']]\n base_classifiers = [self._create_model_object(clf['model'],\n clf.get('params', dict()),\n clf.get('task', None),\n model_zoo) for clf in bases]\n\n logger.info('create meta feature extractor')\n self.clfs.append(StackingCVClassifier(base_classifiers,\n self._create_model_object(clf['meta_classifier'],\n clf.get('params', dict()),\n clf.get('task', None),\n model_zoo),\n use_probas=True,\n cv=nr_folds,\n use_features_in_secondary=use_features_in_secondary,\n stratify=stratify,\n store_train_meta_features=True,\n use_clones=True)\n )\n self.clfs_info.append(self._set_submit_filename(level=1,\n name=name,\n feature_num=None,\n score=None,\n nr_fold=nr_folds,\n seed=seed)\n )\n logger.info('Read in on {} base learners for {}'.format(len(bases), name))\n\n logger.info('Read in {} meta feature extractors'.format(len(self.clfs)))\n\n def buildMetaClassifiers(self, model_zoo):\n for clf in self.meta_stacker_configs:\n name = clf.get('name', 'foobar')\n use_features_in_secondary = clf.get('use_features', True)\n stratify = clf.get('stratify', True)\n nr_folds = clf.get('cv', 3)\n seed = clf.get('seed', 42)\n\n bases = clf['base_classifiers']\n logger.info('Learn on {} base learner'.format(len(bases)))\n base_classifiers = [self._create_model_object(clf['model'],\n clf.get('params', dict()),\n clf.get('task', None),\n model_zoo) for clf in bases]\n self.meta_clfs.append(StackingCVClassifier(base_classifiers,\n self._create_model_object(clf['meta_classifier'],\n clf.get('params', dict()),\n clf.get('task', None),\n model_zoo),\n use_probas=True,\n cv=nr_folds,\n use_features_in_secondary=use_features_in_secondary,\n stratify=stratify,\n store_train_meta_features=True,\n use_clones=True)\n )\n self.meta_clfs_info.append(self._set_submit_filename(level=2,\n name=name,\n feature_num=None,\n score=None,\n nr_fold=nr_folds,\n seed=seed)\n )\n logger.info('Read in on {} base learners for {}'.format(len(bases), name))\n\n logger.info('Read in {} meta stackers'.format(len(self.meta_clfs)))\n\n def fitSingleTask(self, clf, X, y, test_X, info={}, nr_class=2, opt_submit=True):\n clf.fit(X.values, y.values)\n X_new = clf.train_meta_features_\n p = pd.DataFrame({'TARGET': clf.predict_proba(X.values)[:, -1]},\n index=X.index)\n\n test_X_new = clf.predict_meta_features(test_X.values)\n test_p = pd.DataFrame({'TARGET': clf.predict_proba(test_X.values)[:, -1]},\n index=test_X.index)\n\n logger.info('X_meta={}, test_X_meta={}'.format(X_new.shape, test_X_new.shape))\n counter = [i for i in range(1, X_new.shape[1], nr_class)]\n bases_auc = [roc_auc_score(y, X_new[:, i]) for i in counter]\n #bases_p = [X_new[:, i] for i in counter]\n #tests_p = [test_X_new[:, i] for i in counter]\n if opt_submit:\n l = info['level'] - 1\n info.update({'feature_num': X.shape[1]})\n for i, s in zip(counter, bases_auc):\n p = pd.DataFrame({'TARGET': test_X_new[:, i]}, index=test_X.index)\n info.update({'level': l, 'score': s})\n self.saveSubmit(info,\n p,\n template=filename_submit_mlxtend_base)\n\n return X_new, test_X_new, p, test_p, bases_auc\n\n def fit_transform(self, X, y, test_X, seed=42):\n\n X = X.apply(lambda x: np.nan_to_num(x))\n test_X = test_X.apply(lambda x: np.nan_to_num(x))\n\n for i, (clf, info) in enumerate(zip(self.clfs, self.clfs_info), 1):\n name = info['model']\n logger.info('fit meta feature source: {}'.format(name))\n np.random.seed(info.get('seed', seed))\n\n X_new, test_X_new, p, test_p, scores = self.fitSingleTask(clf, X, y, test_X, info=info.copy())\n info.update({'feature_num':X_new.shape[1], 'score': max(scores)})\n self.saveSubmit(info,\n test_p,\n template=filename_submit_mlxtend_meta)\n\n columns = ['{}_{}'.format(name, j) for j in range(X_new.shape[1])]\n self.X_meta.append(pd.DataFrame(X_new, index=X.index, columns=columns))\n self.test_X_meta.append(pd.DataFrame(test_X_new, index=test_X.index, columns=columns))\n\n X = pd.concat(self.X_meta, axis=1)\n test_X = pd.concat(self.test_X_meta, axis=1)\n logger.info('transform meta feature for X={}, test_X={}'.format(X.shape, test_X.shape))\n self.saveMetaFeatures(info, {'train_meta': X, 'test_meta' : test_X})\n\n def fit_predict(self, X, y, test_X, seed=42):\n for i, (clf, info) in enumerate(zip(self.meta_clfs, self.meta_clfs_info), 1):\n name = info['model']\n logger.info('fitting meta stackers {}'.format(name))\n np.random.seed(info.get('seed', seed))\n\n X = self._process_meta_features(self.X_meta, gamma=None).reindex(X.index)\n test_X = self._process_meta_features(self.test_X_meta, gamma=None).reindex(test_X.index)\n logger.info('processed for X_meta: {}, {}'.format(X.shape, test_X.shape))\n X_new, test_X_new, p, test_p, scores = self.fitSingleTask(clf, X, y, test_X, info=info.copy())\n info.update({'feature_num':X_new.shape[1], 'score': max(scores)})\n self.saveSubmit(info,\n test_p,\n template=filename_submit_mlxtend_meta)\n\n self.saveMetaFeatures(info, {'train_meta': X, 'test_meta': test_X}, stacker_level=True)\n\n @staticmethod\n def _process_meta_features(X, gamma=None):\n for k in combinations(X.columns, 2):\n X['_X_'.join(k)] = X[list(k)].product(axis=1).apply(lambda x: np.sqrt(x))\n\n #logger.info('x processed: {}'.format(X_agg.shape))\n return X.apply(lambda x: np.nan_to_num(x))\n\n def set_model(self, m, params):\n params = SwitchDevice(params, enable_gpu=self.enable_gpu)\n\n availabe_params = m().get_params()\n if any([k not in availabe_params for k in params.keys()]):\n ret = m(**params)\n else: # need all parameters in get_params() so safe to call set_params()\n ret = m().set_params(**params)\n\n logger.info('set {}'.format(ret))\n return ret\n\n def _create_model_object(self, model, parameters, task, model_zoo):\n # TODO: enable GPU assist\n\n if task in model_zoo.keys():\n parameters = model_zoo[task].get('params', {})\n logger.info('load parameters {} from model zoo: {}'.format(task, parameters))\n\n hpo_export = model_zoo[task].get('task', None)\n if hpo_export:\n filename = filename_hpo_external.format(loc=self.params_loc,\n prefix=self.prefix,\n task=hpo_export)\n if CheckFileExist(filename):\n parameters = LoadPickle(filename)\n logger.info('Update {} from {}'.format(hpo_export, filename))\n\n\n if isinstance(parameters.get('base_estimator', None), str):\n n = parameters.get('base_estimator', None)\n if n in model_zoo.keys():\n params = model_zoo[n].get('params', {})\n sub_model = model_zoo[n].get('model', None)\n logger.info('override parameters {} from model zoo: {}'.format(n, params))\n parameters['base_estimator'] = self.set_model(sub_model, params)\n\n return self.set_model(model, parameters)\n\n\n", "id": "9505438", "language": "Python", "matching_score": 3.132265090942383, "max_stars_count": 0, "path": "lib/AutoStacker.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue July 26 2018\n\n@author: cttsai\n\"\"\"\n\nimport os, sys\nimport pickle\nimport numpy as np\nimport pandas as pd\n\nfrom skopt import gp_minimize\nfrom sklearn.model_selection import cross_val_score\n\nfrom LibConfigs import logger, file_dir_path, model_selection_object\nfrom LibConfigs import filename_hpo_intermediate, filename_hpo_result, filename_hpo_external\nfrom Utility import CheckFileExist, ComposeResultName, SwitchDevice\n\nclass ScikitOptimize(object):\n \"\"\"\n \"\"\"\n def __init__(self, model, configs={}, task_name=None, data_prefix=None):\n self.model = model\n self.task_name = task_name\n self.data_prefix = data_prefix\n self.params_dir = file_dir_path.get('params', '../params')\n\n #skopt\n search_settings = configs.get(\"search_settings\", {})\n self.n_calls = search_settings.get(\"n_calls\", 15)\n self.random_state = search_settings.get(\"random_state\", 42)\n self.n_init_points = search_settings.get(\"n_inits\", 10)\n\n if self.n_init_points >= self.n_calls:\n logger.warning('initial points {} is larger than n_calls {}'.format(self.n_init_points,\n self.n_calls))\n\n #validation\n evalute_settings = configs.get(\"evaluation_settings\", {})\n\n self.valid_type = evalute_settings.get(\"validation\", \"KFold\")\n self.nr_fold = evalute_settings.get(\"nr_fold\", 3)\n self.split_seed = evalute_settings.get(\"split_seed\", 42)\n self.metric = evalute_settings.get(\"eval_metric\", \"neg_log_loss\")\n\n #model\n self.init_params = configs.get(\"initialize\", {})\n self.search_space = configs.get(\"search_space\", {})\n self.set_params_safe = self._check_parameters()\n\n self.optimized_params = {}\n self.filename_hpo_iter = ''\n self.filename_hpo_best = ''\n\n #initializing\n self._search_space_initialize()\n\n #\n self.filestem_meta = {\n 'level': 0,\n 'model': self.task_name,\n 'feature_num': 0,\n 'score': 0,\n 'fold': self.nr_fold, }\n\n def _search_space_initialize(self):\n self.eval_params_name = sorted([k for k in self.search_space.keys()])\n self.search_params_list = [self.search_space[k] for k in self.eval_params_name]\n logger.info('search range of skopt:')\n for k, v in self.search_space.items():\n logger.info('search {} in {}'.format(k, v))\n\n def _current_file_stem(self):\n return ComposeResultName(self.filestem_meta)\n #\n def _check_parameters(self):\n m = self.model()\n availabe_params = m.get_params()\n parameters = [k for k in self.init_params.keys()] + [k for k in self.search_space.keys()]\n if any([k not in availabe_params for k in parameters]):\n return False\n else: # need all parameters in get_params() so safe to call set_params()\n return True\n\n def get_result_filename(self):\n return self.filename_hpo_best\n\n def get_optimal_parameters(self):\n if not self.optimized_params:\n logger.warning('need to run optimize first')\n\n return self.optimized_params.copy()\n\n def load_hyperparameters(self, filename):\n if not CheckFileExist(filename, silent=False):\n logger.warning('no hpo parameters load from {}'.format(filename))\n return {}\n\n with open(filename, 'rb') as f:\n params = pickle.load(f)\n logger.info('load from {} with params:{}'.format(filename, params))\n return params\n\n @staticmethod\n def _save_pickle(filename, obj):\n with open(filename, 'wb') as f:\n pickle.dump(obj, f)\n\n def save_hyperparameters(self, export=False, show_iter=True, remove_old=True):\n if not self.optimized_params:\n logger.warning('need to run optimize first')\n return False\n\n params = SwitchDevice(self.optimized_params, enable_gpu=False)\n\n if export:\n filename = filename_hpo_external.format(loc=self.params_dir,\n prefix=self.data_prefix,\n task=self.task_name)\n logger.warning('export for external module: {}'.format(filename))\n self._save_pickle(filename, obj=params)\n return filename\n\n if remove_old and CheckFileExist(self.filename_hpo_best, silent=True):\n os.remove(self.filename_hpo_best)\n\n stem = self._current_file_stem()\n if show_iter:\n self.filename_hpo_iter = filename_hpo_intermediate.format(loc=self.params_dir,\n prefix=self.data_prefix,\n iter_num=self.nr_iteration,\n stem=stem)\n self._save_pickle(self.filename_hpo_iter, obj=params)\n\n #write current best anyway\n self.filename_hpo_best = filename_hpo_result.format(loc=self.params_dir,\n prefix=self.data_prefix,\n stem=stem)\n self._save_pickle(self.filename_hpo_best, obj=params)\n #self.load_hyperparameters(filename) # attemp to reload\n return True\n\n def _evaluate(self, eval_params):\n eval_params = dict(zip(self.eval_params_name, eval_params))\n tuning_params = self.init_params.copy()\n tuning_params.update(eval_params)\n\n # reinitialize cv\n cv_obj = self.nr_fold\n if self.valid_type == 'TimeSeriesSplit':\n cv_obj = model_selection_object[self.valid_type](n_splits=self.nr_fold)\n elif 'KFold' in self.valid_type:\n cv_obj = model_selection_object[self.valid_type](n_splits=self.nr_fold,\n shuffle=True,\n random_state=self.split_seed)\n\n if self.set_params_safe:\n try:\n m = self.model().set_params(**tuning_params)\n except:\n logger.warning('fail to use set_params')\n m = self.model(**tuning_params)\n logger.warning('model params={}'.format(m.get_params()))\n else: # unless some parameters cannot pass through set_params()\n m = self.model(**tuning_params)\n\n score = np.mean(cross_val_score(m,\n self.X,\n self.y,\n cv=cv_obj,\n n_jobs=1,\n scoring=self.metric))\n\n self.nr_iteration += 1\n self.best_score = max(self.best_score, score)\n\n # save the current best paramerters here\n if self.best_score == score:\n # update new result\n self.filestem_meta.update({'score': score})\n self.optimized_params = tuning_params.copy()\n if self.nr_iteration >= self.n_init_points:\n self.save_hyperparameters(show_iter=True)\n else:\n self.save_hyperparameters(show_iter=False)\n\n if self.nr_iteration == self.n_init_points: # save after intinializing\n self.save_hyperparameters(show_iter=False)\n\n logger.info('iteration {:04d}/{:04d}, current score: {:04f}, best: {:.4f}, current params: {}, best params: {}'.format(self.nr_iteration,\n self.n_calls,\n score, self.best_score,\n tuning_params, self.optimized_params))\n\n return -score # for minimize, most scikit-learn metric are larger the better\n\n def search(self, X, y):\n self.X = X.apply(lambda x: np.nan_to_num(x))\n self.y = y\n\n self.filestem_meta.update({'feature_num': X.shape[1],})\n\n self.nr_iteration = 0\n self.best_score = 0\n logger.info('evaluate {} at {} iteration, {}-fold cv, metric={}'.format(self.task_name,\n self.n_calls,\n self.nr_fold,\n self.metric))\n gp_optimizer = gp_minimize(self._evaluate,\n self.search_params_list,\n n_calls=self.n_calls,\n n_random_starts=self.n_init_points,\n random_state=self.random_state,\n verbose=False)\n\n optimized_params = {k: v for k, v in zip(self.eval_params_name, gp_optimizer.x)} # not using\n logger.info('best cv score: {}, hyperparameters={}'.format(self.best_score, optimized_params))\n return self.optimized_params.copy()\n\n", "id": "2438355", "language": "Python", "matching_score": 3.5207810401916504, "max_stars_count": 0, "path": "lib/ScikitOptimize.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis file keep all the configs associated in lib folder.\n\nCreated on Tue July 10 2018\n\n@author: cttsai\n\"\"\"\nfrom sklearn.model_selection import KFold, StratifiedKFold\nfrom sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit\nfrom sklearn.model_selection import TimeSeriesSplit\n\nimport logging # the default logging format\nformatter = '%(asctime)s %(filename)s:%(lineno)d: %(message)s'\nlogging.basicConfig(format=formatter, level='INFO')\nlogger = logging.getLogger(__name__)\n\n#import pandas as pd\n#pd.set_option('display.height', 2000)\n#pd.set_option('display.max_rows', 2000)\n\nfile_dir_path = {\n 'data' : './data',\n 'params' : './params',\n 'configs' : './configs',\n 'output' : './output',\n}\n\nhdf5_compress_option = {\n 'complevel' : 5,\n 'complib' : 'zlib',\n}\n\nfast_hdf5_compress_option = {\n 'complevel' : 3,\n 'complib' : 'zlib',\n}\n\ndata_provider_refresh_configs = {\n 'from_csv' : {'level': 0, 'filename': None},\n 'from_raw_cache' : {'level': 1, 'filename': 'cache_{header}_raw.hdf5'},\n 'from_processed' : {'level': 2, 'filename': 'cache_{header}_processed.hdf5'},\n 'from_train_test' : {'level': 3, 'filename': 'cache_{header}_train_test.hdf5'},\n}\n\nmodel_selection_object = {\n 'KFold' : KFold,\n 'StratifiedKFold' : StratifiedKFold,\n 'ShuffleSplit' : ShuffleSplit,\n 'StratifiedShuffleSplit' : StratifiedShuffleSplit,\n 'TimeSeriesSplit' : TimeSeriesSplit,\n}\n\n\n# this enable dict() is pairing with the diable one to open and close gpu related paramters\nenable_gpu_options = {\n 'device' : {'cpu': 'gpu'},\n 'tree_method': {\n 'exact': 'gpu_exact',\n 'hist' : 'gpu_hist',\n },\n}\n\ndisable_gpu_options = {k: {vv: vk for vk, vv in v.items()} for k, v in enable_gpu_options.items()}\n\n#Scikit-Opt\nfilename_hpo_intermediate = '{loc}/skopt_{prefix}_{stem}_hyperparameters_iter{iter_num:04d}.pk'\nfilename_hpo_result = '{loc}/skopt_{prefix}_{stem}_hyperparameters.pk'\nfilename_hpo_external = '{loc}/skopt_{prefix}_{task}_hyperparameters.pk'\n\n#stacker\nfilename_submit_mlxtend_meta = '{loc}/subm_{prefix}_mlxtend_{stem}_meta.csv'\nfilename_submit_mlxtend_base = '{loc}/subm_{prefix}_mlxtend_{stem}_base.csv'\nfilename_mlxtend_meta_features = '{loc}/{prefix}_mlxtend_{stem}_meta_features.hdf5'\nfilename_mlxtend_meta_features_external = '{loc}/{prefix}_mlxtend_meta_features.hdf5'\nfilename_mlxtend_stacker_external = '{loc}/{prefix}_mlxtend_meta_stackers.hdf5'\n", "id": "11974947", "language": "Python", "matching_score": 3.1436333656311035, "max_stars_count": 0, "path": "lib/LibConfigs.py" }, { "content": "import logging\nformatter = '%(asctime)s %(filename)s:%(lineno)d: %(message)s'\nlogging.basicConfig(format=formatter, level='INFO')\nlogger = logging.getLogger(__name__)\n\nimport sys, os\nsys.path.insert(0, './lib')\nsys.path.insert(0, './external')\n\n\nlogger.info('working directory: {}'.format(os.getcwd()))\nfor p in sys.path[:sys.path.index(os.getcwd())]:\n logger.info('expend to {}'.format(p))\n\nfrom lib.Utility import MkDirSafe\nfrom lib.LibConfigs import file_dir_path\n\nfor path in file_dir_path:\n MkDirSafe(path)\n", "id": "6025630", "language": "Python", "matching_score": 0.005824801046401262, "max_stars_count": 0, "path": "ModulePaths.py" }, { "content": "configs = {\n \"func_x_list\": [\n (\"RandomTruncateHead\",\n {\"min_length\": 128, \"max_length\": 256, \"random_seed\": 42, \"threshold\": 0.75}),\n (\"RandomTruncateTail\",\n {\"min_length\": 128, \"max_length\": 256, \"random_seed\": 42, \"threshold\": 0.75}),\n (\"RandomDropWords\",\n {\"min_length\": 64, \"max_drop\": 8, \"drop_rate\": .2, \"random_seed\": 42, \"threshold\": 0.5})\n ],\n\n \"func_y_list\": [\n (\"LabelSoften\", {\"min_value\": 0.1, \"max_value\": 0.9, \"random_seed\": 42, \"threshold\": 0.95})\n ],\n\n \"model\": \"distilroberta-base\",\n \"cv_splitter\": {\n \"splitter_gen\": \"GroupKFold\",\n \"split_index\": \"unique_id_question_body\",\n \"params\": {\n \"n_splits\": 5,\n },\n\n },\n\n \"fit_params\": {\n \"batch_size\": 8,\n \"epochs\": 10,\n \"verbose\": 1,\n # \"callbacks\": None,\n \"shuffle\": True,\n \"steps_per_epoch\": None,\n \"validation_steps\": None,\n \"validation_freq\": 1,\n },\n\n \"special_tokens_dict\": {},\n \"question\": {\n \"column\": \"question_title\",\n \"column_pair\": \"question_body\",\n \"tokenize\": {\n \"add_special_tokens\": True,\n \"max_length\": 384, # 256,\n \"stride\": 0,\n \"truncation_strategy\": \"longest_first\",\n \"return_tensors\": \"tf\",\n \"return_input_lengths\": False,\n \"return_attention_masks\": True,\n \"pad_to_max_length\": True,\n },\n },\n\n \"answer\": {\n \"column\": \"question_title\",\n \"column_pair\": \"answer\",\n \"tokenize\": {\n \"add_special_tokens\": True,\n \"max_length\": 512, # 384,\n \"stride\": 0,\n \"truncation_strategy\": \"longest_first\",\n \"return_tensors\": \"tf\",\n \"return_input_lengths\": False,\n \"return_attention_masks\": True,\n \"pad_to_max_length\": True,\n },\n },\n}\n", "id": "1632058", "language": "Python", "matching_score": 6.791959762573242, "max_stars_count": 2, "path": "configs/distillroberta_augment_configs.py" }, { "content": "configs = {\n \"model\": \"distilroberta-base\",\n\n \"cv_splitter\": {\n \"splitter_gen\": \"GroupKFold\",\n \"split_index\": \"unique_id_question_body\",\n \"params\": {\n \"n_splits\": 5,\n },\n },\n\n \"fit_params\": {\n \"batch_size\": 8,\n \"epochs\": 10,\n \"verbose\": 1,\n # \"callbacks\": None,\n \"shuffle\": True,\n \"steps_per_epoch\": None,\n \"validation_steps\": None,\n \"validation_freq\": 1,\n },\n\n \"special_tokens_dict\": {},\n \"question\": {\n \"column\": \"question_title\",\n \"column_pair\": \"question_body\",\n \"tokenize\": {\n \"add_special_tokens\": True,\n \"max_length\": 384, # 256,\n \"stride\": 0,\n \"truncation_strategy\": \"longest_first\",\n \"return_tensors\": \"tf\",\n \"return_input_lengths\": False,\n \"return_attention_masks\": True,\n \"pad_to_max_length\": True,\n },\n },\n\n \"answer\": {\n \"column\": \"question_title\",\n \"column_pair\": \"answer\",\n \"tokenize\": {\n \"add_special_tokens\": True,\n \"max_length\": 512, # 384,\n \"stride\": 0,\n \"truncation_strategy\": \"longest_first\",\n \"return_tensors\": \"tf\",\n \"return_input_lengths\": False,\n \"return_attention_masks\": True,\n \"pad_to_max_length\": True,\n },\n },\n}\n", "id": "6477673", "language": "Python", "matching_score": 3.15443754196167, "max_stars_count": 2, "path": "configs/distillroberta_configs.py" }, { "content": "from typing import Callable, Dict, Optional, List, Tuple\nimport os\nimport sys\nimport argparse\nimport random\n\nfrom scipy.stats import spearmanr\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import GroupKFold, StratifiedKFold, StratifiedShuffleSplit\n\nimport tensorflow as tf\nimport transformers\n\n# Workaround to run on kaggle server\nis_kaggle_server: bool = \"kaggle\" in os.getcwd().split(\"/\") # check if in kaggle server\nEXTERNAL_UTILS_LIB = \"../nlp_utils\"\nINPUT_DIR = \"../input\"\nif not is_kaggle_server:\n sys.path.append(EXTERNAL_UTILS_LIB)\nelse:\n EXTERNAL_UTILS_LIB = \"/kaggle/input/nlp_utils\"\n sys.path.append(EXTERNAL_UTILS_LIB)\n\nfrom nlp_utils import BaselineTransformerTFSolver\nfrom nlp_utils import AugmentedTransformerTFSolver\n\n\ndef seed_everything(seed: int = 42):\n # Python/TF Seeds\n random.seed(seed)\n np.random.seed(seed)\n os.environ[\"TF_DETERMINISTIC_OPS\"] = \"1\"\n os.environ[\"TF_CUDNN_DETERMINISTIC\"] = \"true\"\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n tf.random.set_seed(seed)\n\n\ndef spearmanr_ignore_nan(trues: np.array, preds: np.array):\n return np.nanmean(\n [spearmanr(ta, pa).correlation for ta, pa in\n zip(np.transpose(trues), np.transpose(np.nan_to_num(preds)) + 1e-7)])\n\n\ndef batch_encode_sequence(\n df: pd.DataFrame, tokenizer, column_question: str, column_answer: str,\n column_question_pair: Optional[str] = None, tokenize_config_question: Optional[Dict] = None,\n column_answer_pair: Optional[str] = None, tokenize_config_answer: Optional[Dict] = None,\n is_distilled: bool = False):\n # FIXME: fix padding to max length not working\n encode_sequence = df[column_question]\n if column_question_pair is not None:\n encode_sequence = zip(df[column_question], df[column_question_pair])\n if tokenize_config_question is None:\n tokenize_config_question = dict()\n\n tokenized_question = tokenizer.batch_encode_plus(encode_sequence, **tokenize_config_question)\n q_input_ids = tokenized_question[\"input_ids\"].numpy()\n q_attention_mask = tokenized_question[\"attention_mask\"].numpy()\n q_token_type_ids = tokenized_question[\"token_type_ids\"].numpy()\n\n # fix?\n max_length = tokenize_config_question[\"max_length\"]\n if max_length != q_input_ids.shape[1]:\n appended_length = max_length - q_input_ids.shape[1]\n q_input_ids = np.pad(q_input_ids, ((0, 0), (0, appended_length)), constant_values=tokenizer.unk_token_id)\n\n if max_length != q_attention_mask.shape[1]:\n appended_length = max_length - q_attention_mask.shape[1]\n q_attention_mask = np.pad(q_attention_mask, ((0, 0), (0, appended_length)), constant_values=0)\n\n encode_sequence = df[column_answer]\n if column_answer_pair is not None:\n encode_sequence = zip(df[column_answer], df[column_answer_pair])\n if tokenize_config_answer is None:\n tokenize_config_answer = dict()\n\n tokenized_answer = tokenizer.batch_encode_plus(encode_sequence, **tokenize_config_answer)\n a_input_ids = tokenized_answer[\"input_ids\"].numpy()\n a_attention_mask = tokenized_answer[\"attention_mask\"].numpy()\n a_token_type_ids = tokenized_answer[\"token_type_ids\"].numpy()\n\n # fix?\n max_length = tokenize_config_answer[\"max_length\"]\n if max_length != a_input_ids.shape[1]:\n appended_length = max_length - a_input_ids.shape[1]\n a_input_ids = np.pad(a_input_ids, ((0, 0), (0, appended_length)), constant_values=tokenizer.unk_token_id)\n\n if max_length != a_attention_mask.shape[1]:\n appended_length = max_length - a_attention_mask.shape[1]\n a_attention_mask = np.pad(a_attention_mask, ((0, 0), (0, appended_length)), constant_values=0)\n\n # print(q_input_ids.shape, q_attention_mask.shape, a_input_ids.shape, a_attention_mask.shape)\n if is_distilled:\n return q_input_ids, q_attention_mask, a_input_ids, a_attention_mask\n\n return q_input_ids, q_attention_mask, q_token_type_ids, a_input_ids, a_attention_mask, a_token_type_ids\n\n\ndef process_read_dataframe(df: pd.DataFrame):\n bins = [0.25, 0.5, 0.75, 0.9, 0.95, 0.99]\n # group\n df[\"unique_id_question_body\"] = df[\"question_body\"].astype(\"category\").cat.codes\n df[\"unique_id_question_body\"] = df[\"category\"].str.cat(df[\"unique_id_question_body\"].astype(\"str\"), sep=\"_\")\n df[\"host_stem\"] = df[\"host\"].str.split(\".\").apply(lambda x: \".\".join(x[-2:]))\n group_columns = [\"category\", \"host_stem\", \"unique_id_question_body\"]\n df[group_columns] = df[group_columns].astype(\"category\")\n\n # corpus\n columns = [\"question_title\", \"question_body\", \"answer\"]\n for col in columns:\n df[f\"count_{col}\"] = df[col].str.split(\" \").apply(lambda x: len(x)).astype(np.int32)\n\n df[\"count_question_title_body\"] = (df[\"count_question_title\"] + df[\"count_question_body\"]).astype(np.int32)\n df[\"count_question_title_body_answer\"] = (df[\"count_question_title_body\"] + df[\"count_answer\"]).astype(np.int32)\n stats_columns = [f\"count_{col}\" for col in columns] + [\n \"count_question_title_body\", \"count_question_title_body_answer\"]\n\n df_stats = df[stats_columns].describe(bins)\n df_stats_split = df.groupby(\"category\")[stats_columns].apply(lambda x: x.describe(bins)).unstack(0).T\n\n # concat\n # df[\"question_title_body\"] = df[\"question_title\"].str.cat(others=df[\"question_body\"], sep=\" \")\n # columns = columns + [\"question_title_body\"]\n return df[columns], df[group_columns], df_stats, df_stats_split\n\n\ndef read_train_test(data_dir: str, index_name: str, inference_only: bool = False):\n # output_categories\n target_columns = [\n \"question_asker_intent_understanding\", \"question_body_critical\", \"question_conversational\",\n \"question_expect_short_answer\", \"question_fact_seeking\", \"question_has_commonly_accepted_answer\",\n \"question_interestingness_others\", \"question_interestingness_self\", \"question_multi_intent\",\n \"question_not_really_a_question\", \"question_opinion_seeking\", \"question_type_choice\", \"question_type_compare\",\n \"question_type_consequence\", \"question_type_definition\", \"question_type_entity\", \"question_type_instructions\",\n \"question_type_procedure\", \"question_type_reason_explanation\", \"question_type_spelling\",\n \"question_well_written\", \"answer_helpful\", \"answer_level_of_information\", \"answer_plausible\",\n \"answer_relevance\", \"answer_satisfaction\", \"answer_type_instructions\", \"answer_type_procedure\",\n \"answer_type_reason_explanation\", \"answer_well_written\"\n ]\n output_categories_question = list(\n filter(lambda x: x.startswith(\"question_\"), target_columns))\n output_categories_answer = list(filter(lambda x: x.startswith(\"answer_\"), target_columns))\n output_categories = output_categories_question + output_categories_answer\n\n df_test = pd.read_csv(os.path.join(data_dir, \"test.csv\")).set_index(index_name)\n test_x, test_groups, test_stats, test_stats_split = process_read_dataframe(df_test)\n print(f\"test shape = {df_test.shape}\\n{test_stats}\\n\")\n\n data = {\n \"test_x\": test_x, \"test_groups\": test_groups, \"output_categories_question\": output_categories_question,\n \"output_categories_answer\": output_categories_answer, \"output_categories\": output_categories\n }\n if inference_only:\n return data\n\n # training\n df_train = pd.read_csv(os.path.join(data_dir, \"train.csv\")).set_index(index_name)\n\n # labels\n df_train[target_columns] = df_train[target_columns].astype(np.float32)\n train_y = df_train[output_categories]\n train_x, train_groups, train_stats, train_stats_split = process_read_dataframe(df_train)\n print(f\"train shape = {df_train.shape}\\n{train_stats}\\n\")\n print(f\"Split by category: \\n{train_stats_split}\\nResorted\\n{train_stats_split.swaplevel().sort_index()}\\n\")\n data.update({\n \"train_x\": train_x, \"train_y\": train_y, \"train_groups\": train_groups, \"train_stats\": train_stats,\n \"train_stats_split\": train_stats_split, \"output_categories_question\": output_categories_question,\n \"output_categories_answer\": output_categories_answer}\n )\n return data\n\n\ndef make_submission(preds: np.array, data_dir: str, index_name: str):\n df_sub = pd.read_csv(os.path.join(data_dir, \"sample_submission.csv\")).set_index(index_name)\n df_sub[df_sub.columns] = preds[df_sub.columns]\n preds.index.name = index_name\n preds.to_csv(\"submission.csv\", index=True)\n return preds\n\n\ndef _cv_splitter_factory(splitter_gen: str, params: Dict):\n if splitter_gen not in [\"GroupKFold\", \"StratifiedKFold\", \"StratifiedShuffleSplit\"]:\n err_msg = f'{splitter_gen} is not supported'\n raise ValueError(err_msg)\n\n return globals()[splitter_gen](**params)\n\n\ndef parse_command_line():\n default_data_dir: str = \"../input/google-quest-challenge/\"\n default_pretrained_w_root_dir = \"../input/hugging_face_pretrained/\"\n default_model_configs_path: str = \"../configs/bert_configs.py\"\n default_model_weight_filename: str = \"tf_model_fine-tuned.h5\"\n\n parser = argparse.ArgumentParser(\n description=\"Google Quest Q&A Bert Learner\", add_help=True,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--data-dir\", type=str, default=default_data_dir, help=\"folder for data\")\n parser.add_argument(\n \"--weights-root-dir\", type=str, default=default_pretrained_w_root_dir,\n help=\"root folder for pretrained weights\")\n parser.add_argument(\"--model-weights-filename\", type=str, default=default_model_weight_filename,\n help=\"fine tuned filename for model weights\")\n parser.add_argument(\"--configs\", type=str, default=default_model_configs_path, help=\"path to model configs\")\n parser.add_argument(\"--inference-only\", action=\"store_true\", default=False, help=\"inference only\")\n parser.add_argument(\n \"--use-class-weights\", action=\"store_true\", default=False, help=\"weighted loss by class weight\")\n parser.add_argument(\n \"--training-augmentation\", action=\"store_true\", default=False, help=\"training with augmentation\")\n args = parser.parse_args()\n return args\n\n\ndef initialize_configs(filename: str):\n if not os.path.exists(filename):\n raise ValueError(\"Spec file {spec_file} does not exist\".format(spec_file=filename))\n\n module_name = filename.split(os.sep)[-1].replace('.', '')\n\n import importlib.util\n spec = importlib.util.spec_from_file_location(module_name, filename)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n\n\ndef main():\n\n args = parse_command_line()\n\n configs = initialize_configs(args.configs).configs\n configs[\"model_weights_filename\"] = args.model_weights_filename\n\n fit_params = configs.get(\"fit_params\", dict())\n pretrained_model_type: str = configs.get(\"model\", \"bert-base-uncased\")\n configs[\"pretrained_model_dir\"] = os.path.join(args.weights_root_dir, pretrained_model_type)\n if pretrained_model_type.find(\"distil\") >= 0:\n configs[\"is_distilled\"] = True\n\n INDEX_NAME = 'qa_id'\n ##\n q_max_length = configs[\"question\"][\"tokenize\"][\"max_length\"]\n a_max_length = configs[\"answer\"][\"tokenize\"][\"max_length\"]\n generated_working_dir = f\"{pretrained_model_type}_q{q_max_length}_a{a_max_length}\"\n\n data = read_train_test(data_dir=args.data_dir, index_name=INDEX_NAME, inference_only=args.inference_only)\n if args.use_class_weights:\n train_w = data['train_y'].sum()\n class_weight = (train_w.median() / train_w).apply(np.sqrt)\n fit_params[\"class_weight\"] = {i: w for i, w in enumerate(class_weight)}\n generated_working_dir = f\"{generated_working_dir}_weighted\"\n print(f\"class weights:\\n{class_weight}\")\n\n # cv setup\n splitter_configs = configs.get(\"cv_splitter\")\n splitter = _cv_splitter_factory(\n splitter_gen=splitter_configs[\"splitter_gen\"], params=splitter_configs[\"params\"])\n\n solver_gen = BaselineTransformerTFSolver\n if args.training_augmentation:\n solver_gen = AugmentedTransformerTFSolver\n generated_working_dir = f\"{generated_working_dir}_augmented\"\n\n WORKING_DIR = os.path.join(\"../input\", generated_working_dir)\n if args.training_augmentation:\n solver = solver_gen(\n fine_tuned_dir=WORKING_DIR, cv_splitter=splitter, score_func=spearmanr_ignore_nan,\n encode_func=batch_encode_sequence, configs=configs,)\n solver.run(data, fit_params=fit_params, inference_only=args.inference_only)\n else:\n solver = solver_gen(\n fine_tuned_dir=WORKING_DIR, cv_splitter=splitter, score_func=spearmanr_ignore_nan,\n encode_func=batch_encode_sequence, configs=configs)\n solver.run(data, fit_params=fit_params, inference_only=args.inference_only)\n\n test_result = solver.test_prediction_\n make_submission(test_result, data_dir=args.data_dir, index_name=INDEX_NAME)\n return\n\n\nif \"__main__\" == __name__:\n print(f\"tensorflow version: {tf.__version__}\")\n print(f\"transformers version: {transformers.__version__}\")\n seed_everything()\n np.set_printoptions(suppress=True)\n main()\n # multi gpu setup;\n # tf.debugging.set_log_device_placement(True)\n # strategy = tf.distribute.MirroredStrategy()\n # with strategy.scope():\n # main()\n", "id": "12524849", "language": "Python", "matching_score": 4.1289567947387695, "max_stars_count": 2, "path": "script/tf_starter.py" }, { "content": "from typing import Optional, Callable, Dict, List, Tuple\nfrom functools import partial\nimport pandas as pd\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, LearningRateScheduler, TensorBoard\n# from tensorflow.keras.callbacks import ModelCheckpoint\n\nfrom ..Augmentation import AugmentationMaster\nfrom .BaseTransformerTFSolver import BaseTransformerTFSolver\nfrom ..Callback import CustomMetricEarlyStoppingCallback\n\n\ndef learning_rate_scheduler(epoch: int, lr: float, max_lr: float = 5e-4, factor: float = .5):\n lr_scheduled = tf.math.minimum(max_lr, lr * tf.math.exp(factor * epoch))\n if epoch > 0:\n print(f\"\\nNext epoch {epoch + 1}: previous learning rate: {lr:.6f} - scheduled to {lr_scheduled: .6f}\")\n return lr_scheduled\n\n\ndef custom_loss(y_true, y_pred):\n bce_loss = tf.keras.losses.BinaryCrossentropy()(y_true, y_pred)\n cosine_loss = tf.keras.losses.CosineSimilarity(axis=1)(y_true, y_pred)\n return bce_loss - cosine_loss\n\n\nclass BaselineTransformerTFSolver(BaseTransformerTFSolver):\n def __init__(\n self, fine_tuned_dir: str, score_func: Callable, encode_func: Callable, configs: Dict,\n cv_splitter: Optional = None, ):\n super().__init__(\n fine_tuned_dir=fine_tuned_dir, score_func=score_func, batch_encode_func=encode_func, configs=configs)\n\n self.cv_splitter = cv_splitter\n self.loss_direction: str = configs.get(\"loss_direction\", 'auto')\n self.split_index = configs.get(\"cv_splitter\", dict()).get(\"split_index\", \"category\")\n self.eval_metric = 'val_loss'\n\n def _model_fit(self, data, train_idx, model, validation_data, fit_params):\n train_x = data.get('train_x', None)\n train_y = data.get('train_y', None)\n\n # TODO: generator and data augmentation\n train_outputs = train_y.iloc[train_idx].values\n train_inputs = self._batch_encode(train_x.iloc[train_idx])\n\n print(\"\\nTraining classification head block only\")\n optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)\n model.compile(\n loss='binary_crossentropy', optimizer=optimizer, metrics=['binary_crossentropy', 'mse', 'mae'])\n fit_params_init = fit_params.copy()\n fit_params_init['epochs'] = 2\n model.fit(train_inputs, train_outputs, **fit_params_init)\n\n print(\"\\nFine tune the whole model w/ early stopping\")\n model.trainable = True\n optimizer = tf.keras.optimizers.Adam(learning_rate=5e-6)\n model.compile(\n loss='binary_crossentropy', optimizer=optimizer, metrics=['binary_crossentropy', 'mse', 'mae'])\n\n # callbacks\n warmup_lr_scheduler = partial(learning_rate_scheduler, max_lr=1e-4, factor=.25)\n lr_schedule = LearningRateScheduler(warmup_lr_scheduler)\n reduce_lr = ReduceLROnPlateau(\n monitor=self.eval_metric, factor=0.5, patience=2, min_lr=1e-6, model=self.loss_direction)\n early_stopping = CustomMetricEarlyStoppingCallback(\n data=validation_data, training_data=(train_inputs, train_outputs), score_func=self.score_func, patience=2,\n verbose=1, mode=\"auto\", restore_best_weights=True)\n tensorboard = TensorBoard(self.fine_tuned_dir_path)\n callbacks = [early_stopping, reduce_lr, lr_schedule, tensorboard] # model_checkpoint: got NotImplementedError\n model.fit(train_inputs, train_outputs, validation_data=validation_data, callbacks=callbacks, **fit_params)\n return self\n\n def _run_model_fine_tune(self, data: Dict, fit_params: Optional[Dict] = None, **kwargs):\n train_x = data.get('train_x', None)\n train_y = data.get('train_y', None)\n train_groups = data.get('train_groups', None)\n\n print(f\"data split by {self.split_index}: \")\n data_split = train_groups[self.split_index]\n # TODO: adding HPO in the future\n # FIXME: for now it only runs single model not cv models\n for fold, (train_idx, valid_idx) in enumerate(self.cv_splitter.split(\n X=data_split, y=data_split, groups=data_split), start=1):\n self.tokenizer, model = self._pipeline_factory(\n load_model_from_fine_tuned=False, output_size=len(self.target_columns))\n\n valid_outputs = train_y.iloc[valid_idx].values\n valid_inputs = self._batch_encode(train_x.iloc[valid_idx])\n\n # training\n self._model_fit(\n data, train_idx, model, validation_data=(valid_inputs, valid_outputs), fit_params=fit_params)\n model.save_weights(self.fine_tuned_model_weights_file_path_)\n\n preds = model.predict(valid_inputs)\n self.valid_score = self.score_func(valid_outputs, preds)\n self.preds_valid = pd.DataFrame(preds, index=train_x.iloc[valid_idx].index, columns=self.target_columns)\n self.trues_valid = train_y.iloc[valid_idx]\n print(f'best validation metric score: {self.valid_score:.3f}')\n break # FIXME: for now it only runs single model not cv models\n #\n return self\n\n\nfrom tensorflow.keras.utils import Sequence\n\n\nclass TokenizedSequence(Sequence):\n def __init__(\n self, batch_encode_func: Callable, tokenizer, configs_question: Dict, configs_answer: Dict,\n x_set: pd.DataFrame, y_set: pd.DataFrame, is_distilled: bool = False, func_x_list: Optional[Tuple] = None,\n func_y_list: Optional[Tuple] = None, batch_size: int = 8, random_seed: int = 42, ):\n\n self.rng = np.random.RandomState(random_seed)\n\n self.x: pd.DataFrame = x_set\n self.y: pd.DataFrame = y_set\n self.batch_size: int = batch_size\n\n #\n self.tokenizer = tokenizer\n self._batch_encode_func: Callable = batch_encode_func\n self.configs_question: Dict = configs_question\n self.configs_answer: Dict = configs_answer\n self.is_distilled: bool = is_distilled\n\n #\n self.transformers = AugmentationMaster(func_x_list, func_y_list)\n\n self._gen_sequence()\n\n def __len__(self):\n return int(np.ceil(self.x.shape[0] / self.batch_size))\n\n def __getitem__(self, idx: int):\n pos_start = idx * self.batch_size\n\n batch_y = self.y.iloc[pos_start:pos_start + self.batch_size].copy().apply(\n lambda y: self.transformers.transform(y=y)).values\n\n batch_x = self.x.iloc[pos_start:pos_start + self.batch_size].copy()\n q_col = self.configs_question[\"column\"]\n q_col_pair = self.configs_question.get(\"column_pair\", None)\n a_col = self.configs_answer[\"column\"]\n a_col_pair = self.configs_answer.get(\"column_pair\", None)\n\n for col in [q_col_pair, a_col_pair]:\n if col is None:\n continue\n\n batch_x[col] = batch_x[col].str.split().apply(\n lambda x: \" \".join(self.transformers.transform(x)))\n\n batch_x = self._batch_encode_func(\n batch_x, self.tokenizer, column_question=q_col, column_question_pair=q_col_pair,\n tokenize_config_question=self.configs_question[\"tokenize\"], column_answer=a_col,\n column_answer_pair=a_col_pair, tokenize_config_answer=self.configs_answer[\"tokenize\"],\n is_distilled=self.is_distilled)\n\n return batch_x, batch_y\n\n def _gen_sequence(self):\n sequence = self.y.index.tolist()\n self.rng.shuffle(sequence)\n self.x = self.x.reindex(index=sequence)\n self.y = self.y.reindex(index=sequence)\n return self\n\n def on_epoch_end(self):\n self._gen_sequence()\n return self\n\n\nclass AugmentedTransformerTFSolver(BaselineTransformerTFSolver):\n def __init__(\n self, fine_tuned_dir: str, score_func: Callable, encode_func: Callable, configs: Dict,\n cv_splitter: Optional = None, ):\n super().__init__(\n fine_tuned_dir=fine_tuned_dir, score_func=score_func, encode_func=encode_func, configs=configs,\n cv_splitter=cv_splitter)\n\n # augmentation\n self.func_x_list = configs.get(\"func_x_list\", list())\n self.func_y_list = configs.get(\"func_y_list\", list())\n\n def _model_fit(self, data: Dict, train_idx: List[int], model, validation_data, fit_params):\n train_x = data.get('train_x', None)\n train_y = data.get('train_y', None)\n\n # TODO: generator and data augmentation\n train_outputs = train_y.iloc[train_idx].values\n train_inputs = self._batch_encode(train_x.iloc[train_idx])\n\n print(\"\\nTraining classification head block only\")\n optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)\n model.compile(\n loss='binary_crossentropy', optimizer=optimizer, metrics=['binary_crossentropy', 'mse', 'mae'])\n fit_params_init = fit_params.copy()\n fit_params_init['epochs'] = 2\n model.fit(train_inputs, train_outputs, **fit_params_init)\n\n print(\"\\nFine tune the whole model\")\n model.trainable = True\n optimizer = tf.keras.optimizers.Adam(learning_rate=5e-6)\n model.compile(\n loss='binary_crossentropy', optimizer=optimizer, metrics=['binary_crossentropy', 'mse', 'mae'])\n\n # FIXME: data generator for tf training somehow broken, here is a hot fix\n augmented_iter = 10\n batch_size = 512 # len(train_idx)\n generator_train = TokenizedSequence(\n batch_encode_func=self._batch_encode_func, tokenizer=self.tokenizer, configs_question=self.configs_question,\n configs_answer=self.configs_answer, is_distilled=self.is_distilled, x_set=train_x.iloc[train_idx],\n y_set=train_y.iloc[train_idx], func_x_list=self.func_x_list, func_y_list=self.func_y_list,\n batch_size=batch_size, random_seed=42)\n\n fit_params_warmup = fit_params.copy()\n fit_params_warmup['epochs'] = 1\n warmup_lr_scheduler = partial(learning_rate_scheduler, max_lr=2e-5, factor=.1)\n lr_schedule = LearningRateScheduler(warmup_lr_scheduler)\n for i in range(augmented_iter):\n print(f\"iteration {i + 1:03d}: warm up with augmentation\")\n for j in range(len(generator_train)):\n t_x, t_y = generator_train[j]\n model.fit(t_x, t_y, callbacks=[lr_schedule], **fit_params_warmup)\n\n generator_train.on_epoch_end()\n # FIXME: data generator for tf training somehow broken, here is a hot fix # END\n\n print(\"\\nFine tune the whole model w/ early stopping\")\n model.trainable = True\n # callbacks\n warmup_lr_scheduler = partial(learning_rate_scheduler, max_lr=1e-4, factor=.25)\n lr_schedule = LearningRateScheduler(warmup_lr_scheduler)\n reduce_lr = ReduceLROnPlateau(\n monitor=self.eval_metric, factor=0.5, patience=2, min_lr=1e-6, model=self.loss_direction)\n early_stopping = CustomMetricEarlyStoppingCallback(\n data=validation_data, training_data=(train_inputs, train_outputs), score_func=self.score_func, patience=2,\n verbose=1, mode=\"auto\", restore_best_weights=True)\n tensorboard = TensorBoard(self.fine_tuned_dir_path)\n callbacks = [early_stopping, reduce_lr, lr_schedule, tensorboard] # model_checkpoint: got NotImplementedError\n model.fit(train_inputs, train_outputs, validation_data=validation_data, callbacks=callbacks, **fit_params)\n return self\n", "id": "2809313", "language": "Python", "matching_score": 5.686821937561035, "max_stars_count": 2, "path": "nlp_utils/Solver/BaselineTransformerTFSolver.py" }, { "content": "from typing import Callable, Dict, Optional\n\nimport pandas as pd\n\nimport tensorflow.keras.backend as K\nfrom transformers import AutoConfig, AutoTokenizer, TFAutoModel\n\nfrom .BaseTransformerSovler import MixinTransformerSolver\nfrom .TransformerModelFactory import create_model_from_pretrained\n\n\nclass BaseTransformerTFSolver(MixinTransformerSolver):\n def __init__(self, fine_tuned_dir: str, score_func: Callable, batch_encode_func: Callable, configs: Dict, ):\n super().__init__(\n score_func=score_func, fine_tuned_dir=fine_tuned_dir, pretrained_dir=configs[\"pretrained_model_dir\"],\n model_weights_filename=configs[\"model_weights_filename\"])\n\n self._batch_encode_func = batch_encode_func\n\n self.configs_question: Dict = configs[\"question\"]\n self.configs_answer: Dict = configs[\"answer\"]\n\n self.is_distilled: bool = configs.get(\"is_distilled\", False)\n\n self.special_tokens_dict = configs.get(\"special_tokens_dict\", dict())\n self.tokenizer: AutoTokenizer = None\n self.model = None\n\n self.max_seq_length_question = self.configs_question[\"tokenize\"][\"max_length\"]\n self.max_seq_length_answer = self.configs_answer[\"tokenize\"][\"max_length\"]\n\n def _pipeline_factory(self, load_model_from_fine_tuned: bool = False, output_size: int = None):\n # FIXME: AutoTokenizer, AutoConfig, AutoTFModel has unexpected issue while load from self.fine_tuned_dir_path\n load_from_dir_path: str = self.pretrained_dir_path\n\n tokenizer = AutoTokenizer.from_pretrained(load_from_dir_path)\n tokenizer.save_pretrained(self.fine_tuned_dir_path)\n num_added_toks = tokenizer.add_special_tokens(self.special_tokens_dict)\n if len(self.special_tokens_dict) > 0:\n print(f\"adding special {num_added_toks} tokens: {self.special_tokens_dict}\")\n\n if output_size is None:\n raise ValueError(\"need to specified output size for create model\")\n\n # init a new model for this\n model_configs = AutoConfig.from_pretrained(load_from_dir_path)\n model_configs.output_hidden_states = False # Set to True to obtain hidden states\n\n print(f\"load pretrained weights for transformer from: {load_from_dir_path}\")\n K.clear_session()\n model_block = TFAutoModel.from_pretrained(load_from_dir_path, config=model_configs)\n # model_block.resize_token_embeddings(len(tokenizer)) # FIXME: transformer not implemented in TF\n model_block.save_pretrained(self.fine_tuned_dir_path)\n model = create_model_from_pretrained(\n model_block, max_seq_length_question=self.max_seq_length_question,\n max_seq_length_answer=self.max_seq_length_answer, output_size=output_size, is_distilled=self.is_distilled)\n\n if load_model_from_fine_tuned:\n print(f\"load fine-tuned wieghts from : {self.fine_tuned_model_weights_file_path_}\")\n model.load_weights(self.fine_tuned_model_weights_file_path_)\n\n return tokenizer, model\n\n def _batch_encode(self, x: pd.DataFrame):\n inputs = self._batch_encode_func(\n x, self.tokenizer, column_question=self.configs_question[\"column\"],\n column_question_pair=self.configs_question.get(\"column_pair\", None),\n tokenize_config_question=self.configs_question[\"tokenize\"], column_answer=self.configs_answer[\"column\"],\n column_answer_pair=self.configs_answer.get(\"column_pair\", None),\n tokenize_config_answer=self.configs_answer[\"tokenize\"], is_distilled=self.is_distilled)\n return inputs\n\n def _run_inference(self, x: pd.DataFrame):\n if self.model is None or self.tokenizer is None:\n self.tokenizer, self.model = self._pipeline_factory(\n load_model_from_fine_tuned=True, output_size=len(self.target_columns))\n else:\n print(\"inference using current loaded tokenizer and model\")\n return pd.DataFrame(self.model.predict(self._batch_encode(x)), index=x.index, columns=self.target_columns)\n\n def _run_model_fine_tune(self, data: Dict, fit_params: Optional[Dict] = None, **kwargs):\n raise NotImplementedError()\n", "id": "5578630", "language": "Python", "matching_score": 2.2201266288757324, "max_stars_count": 2, "path": "nlp_utils/Solver/BaseTransformerTFSolver.py" }, { "content": "import os\nimport argparse\nfrom typing import Union\nfrom pathlib import Path\n\nimport transformers\nfrom transformers import AutoConfig, AutoTokenizer, AutoModel, TFAutoModel\n\n\ndef mkdir(dir_path: Union[str, Path]) -> bool:\n if os.path.isdir(dir_path):\n print(f\"Skip created directory: {dir_path}\")\n return True\n\n try:\n os.mkdir(dir_path)\n print(f\"Successfully created directory: {dir_path}\")\n\n except OSError:\n print(f\"Creation of directory: {dir_path} failed\")\n return False\n\n return True\n\n\ndef transformers_dowloader(\n pretrained_model_name: str, working_dir: Union[str, Path], is_tf: bool = True) -> bool:\n model_class = AutoModel\n if is_tf:\n model_class = TFAutoModel\n\n print(f\"Download model and tokenizer for: {pretrained_model_name}\")\n transformer_model = model_class.from_pretrained(pretrained_model_name)\n transformer_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name)\n #\n\n output_dir = working_dir / pretrained_model_name\n try:\n mkdir(dir_path=output_dir)\n transformer_model.save_pretrained(output_dir)\n transformer_tokenizer.save_pretrained(output_dir)\n print(f\"Save model and tokenizer {pretrained_model_name} in directory {output_dir}\")\n\n except OSError:\n print(f\"Save model and tokenizer {pretrained_model_name} in directory {output_dir}: Failed\")\n return False\n\n return True\n\n\ndef main():\n default_output_dir: str = \"../models\"\n default_data_filename: str = \"models_to_download.txt\"\n\n parser = argparse.ArgumentParser(\n description=\"Transformers Pretrained Models Downloader\", add_help=True,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--output-dir\", type=str, default=default_output_dir, help=\"folder to save\")\n parser.add_argument(\n \"--requirement\", \"-r\", type=str, default=default_data_filename, help=\"file of model to download\")\n parser.add_argument(\"--model\", \"-m\", type=str, default=None, help=\"filename of data\")\n parser.add_argument(\"--tensorflow\", action=\"store_true\", default=False, help=\"cross val group split\")\n args = parser.parse_args()\n\n pretrained_model_name_list = [args.model]\n if args.model is None:\n with open(args.requirement, 'r') as opened_file:\n pretrained_model_name_list = [s.strip(\"\\n\").strip(\" \") for s in opened_file.readlines()]\n pretrained_model_name_list = list(filter(lambda s: not s.startswith(\"#\"), pretrained_model_name_list))\n pretrained_model_name_list = list(filter(lambda s: len(s) > 0, pretrained_model_name_list))\n print(f\"Plan: download {len(pretrained_model_name_list)} models: {', '.join(pretrained_model_name_list)}\")\n\n # 'bert-base-uncased'\n\n print(f'Transformers version {transformers.__version__}')\n working_dir = Path(args.output_dir)\n mkdir(working_dir)\n\n for model_name in pretrained_model_name_list:\n transformers_dowloader(model_name, working_dir=working_dir, is_tf=args.tensorflow)\n return\n\n\nif \"__main__\" == __name__:\n main()\n", "id": "6864988", "language": "Python", "matching_score": 4.646456718444824, "max_stars_count": 0, "path": "script/download.py" }, { "content": "\"\"\"\nfork THIS excellent downloader\nhttps://www.kaggle.com/maroberti/transformers-model-downloader-pytorch-tf2-0\n\"\"\"\n\nfrom typing import Union\nfrom pathlib import Path\nimport os\n\nimport transformers\nfrom transformers import AutoConfig, AutoTokenizer, TFAutoModel\n\n\ndef transformers_model_dowloader(pretrained_model_name: str, working_dir: Union[str, Path], is_tf: bool = True) -> bool:\n model_class = None\n if is_tf:\n model_class = TFAutoModel\n\n NEW_DIR = working_dir / pretrained_model_name\n try:\n os.mkdir(NEW_DIR)\n print(f\"Successfully created directory {NEW_DIR}\")\n except OSError:\n print(f\"Creation of directory {NEW_DIR} failed\")\n\n print(f\"Download model and tokenizer {pretrained_model_name}\")\n transformer_model = model_class.from_pretrained(pretrained_model_name)\n transformer_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name)\n try:\n transformer_model.save_pretrained(NEW_DIR)\n transformer_tokenizer.save_pretrained(NEW_DIR)\n print(f\"Save model and tokenizer {pretrained_model_name} in directory {NEW_DIR}\")\n except:\n print(f\"Save model and tokenizer {pretrained_model_name} in directory {NEW_DIR}: Failed\")\n return False\n\n return True\n\n\ndef main():\n pretrained_model_name_list = [\n 'bert-base-uncased',\n 'bert-base-cased',\n 'bert-large-cased',\n\n 'distilbert-base-uncased',\n\n 'albert-xxlarge-v2',\n 'albert-xlarge-v2',\n 'albert-large-v2',\n\n 'roberta-base',\n 'roberta-large',\n 'roberta-large-mnli',\n 'distilroberta-base',\n\n 'distilbert-base-uncased',\n ]\n\n print(f'Transformers version {transformers.__version__}') # Current version: 2.3.0\n WORKING_DIR = Path(\"../input/hugging_face_pretrained\")\n try:\n os.mkdir(WORKING_DIR)\n except:\n pass\n\n for i, pretrained_model_name in enumerate(pretrained_model_name_list, start=1):\n print(i, '/', len(pretrained_model_name_list))\n transformers_model_dowloader(pretrained_model_name, WORKING_DIR, is_tf=True)\n\n return\n\n\nif \"__main__\" == __name__:\n main()\n", "id": "22816", "language": "Python", "matching_score": 0.6827096939086914, "max_stars_count": 2, "path": "script/download_pretrained.py" }, { "content": "import os\nimport random\nimport warnings\nfrom typing import Dict\n\nimport numpy as np\nimport pandas as pd\nimport torch\n\nwarnings.filterwarnings(\"ignore\")\n\n\ndef save_hdf_file(file_path: str, data: Dict[str, pd.DataFrame]):\n with pd.HDFStore(file_path, mode=\"w\") as store:\n print(f\"save data ({len(data.keys())}) to: {file_path}\")\n for k, v in data.items():\n if v is None or not (isinstance(v, pd.DataFrame) or isinstance(v, pd.Series)):\n print(f\"skip save key: {k}\")\n continue\n\n store.put(key=k, value=v)\n print(f\"save stats: {k}, shape={v.shape}\")\n\n return True\n\n\ndef load_hdf_file(file_path: str) -> Dict[str, pd.DataFrame]:\n data = dict()\n with pd.HDFStore(file_path, mode=\"r\") as store:\n print(f\"load data ({len(store.keys())}) from: {file_path}\")\n for k in store.keys():\n df = store.get(k)\n data[k.lstrip('/')] = df\n print(f\"load key: {k}, shape={df.shape}\")\n\n return data\n\n\ndef safe_mkdir(directory: str) -> bool:\n if not os.path.exists(directory):\n os.makedirs(directory)\n print(f\"make dir: {directory}\")\n return True\n\n print(f\"skip making dir: {directory}\")\n return False\n\n\ndef initialize_configs(filename: str):\n if not os.path.exists(filename):\n raise ValueError(\"Spec file {spec_file} does not exist\".format(spec_file=filename))\n\n module_name = filename.split(os.sep)[-1].replace(\".\", \"\")\n\n import importlib.util\n spec = importlib.util.spec_from_file_location(module_name, filename)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n\n\ndef seed_everything(seed: int = 42):\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = True\n return\n", "id": "2053445", "language": "Python", "matching_score": 2.2972443103790283, "max_stars_count": 0, "path": "alaska_utils/utils.py" }, { "content": "from .eval_metrics import alaska_weighted_auc\nfrom .utils import load_hdf_file, save_hdf_file, safe_mkdir, initialize_configs, seed_everything\nfrom .data_utils import configure_arguments\nfrom .data_utils import split_train_valid_data, index_train_test_images, parse_image_to_dir_basename\nfrom .data_utils import generate_submission", "id": "11033952", "language": "Python", "matching_score": 1.6436270475387573, "max_stars_count": 0, "path": "alaska_utils/__init__.py" }, { "content": "import os\nimport sys\nimport warnings\nfrom argparse import ArgumentParser\nfrom itertools import combinations\nfrom typing import Dict, Optional, Tuple, List, Callable, Any, Union\n\nimport numpy as np\nimport optuna\nimport pandas as pd\nfrom catboost import CatBoostClassifier\nfrom lightgbm import LGBMClassifier\nfrom optuna.pruners import SuccessiveHalvingPruner\nfrom optuna.samplers import TPESampler\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, BaggingClassifier\nfrom sklearn.ensemble import StackingClassifier\nfrom sklearn.model_selection import StratifiedKFold\nfrom xgboost import XGBClassifier\n\nwarnings.filterwarnings(\"ignore\")\n\nEXTERNAL_UTILS_LIB = \"../alaska_utils\"\nsys.path.append(EXTERNAL_UTILS_LIB)\n\nfrom alaska_utils import alaska_weighted_auc\nfrom alaska_utils import safe_mkdir\nfrom alaska_utils import seed_everything\nfrom alaska_utils import initialize_configs\nfrom alaska_utils import split_train_valid_data\nfrom alaska_utils import configure_arguments\nfrom alaska_utils import generate_submission\n\n\ndef do_evaluate(\n args: ArgumentParser, submission: pd.DataFrame, eval_metric_func: Callable, label: str = \"Cover\") -> float:\n image, kind = args.shared_indices\n df = submission.reset_index()[[kind, image, label]]\n df = df.loc[df[kind].isin(args.labels)]\n if df.empty:\n print(f\"Warning: No Ground Truth to evaluate; Return 0\")\n return 0.\n\n return eval_metric_func((df[kind] != label).values, (1. - df[label]).values)\n\n\ndef check_and_filter_proba_files(args: ArgumentParser, files_list: List[str]) -> List[str]:\n ret_files = list()\n for i, basename in enumerate(files_list):\n file_path = os.path.join(args.cached_dir, basename)\n if not os.path.exists(file_path):\n print(f\"file_path does not exist: {file_path}\")\n continue\n\n ret_files.append(basename)\n\n return ret_files\n\n\ndef generate_stacking_data_split(\n args: ArgumentParser, configs,\n train_indices: pd.DataFrame, valid_indices: pd.DataFrame) -> Dict[str, pd.DataFrame]:\n image, kind = args.shared_indices\n\n df_meta_info = pd.read_parquet(os.path.join(args.cached_dir, configs[\"image_stats\"]))\n\n ret = list()\n for i, s in enumerate(configs[\"image_proba\"]):\n basename = s.split(\"__\")\n arch_name: str = basename[1]\n metrics: str = basename[2]\n\n file_path: str = os.path.join(args.cached_dir, s)\n df = pd.read_parquet(file_path)\n df.rename(columns={c: \"_\".join([arch_name, metrics, c]) for c in df.columns}, inplace=True)\n print(f\"read {df.shape} from {file_path}\")\n ret.append(df)\n\n df_proba = pd.concat(ret, axis=1)\n df_proba = df_proba.join(df_meta_info)\n columns = df_proba.columns.tolist() + [args.col_image_quality]\n\n data = dict()\n df_train = train_indices.join(df_proba, how='left', on=args.shared_indices).set_index(args.shared_indices)\n data[\"train_x\"] = df_train.reindex(columns=columns)\n data[\"train_y\"] = (df_train[args.col_enum_class] > .1).astype(np.int32)\n data[\"train_groups\"] = df_train[[args.col_image_quality]]\n\n df_valid = valid_indices.join(df_proba, how='left', on=args.shared_indices).set_index(args.shared_indices)\n data[\"valid_x\"] = df_valid.reindex(columns=columns)\n data[\"valid_y\"] = (df_valid[args.col_enum_class] > .1).astype(np.int32)\n data[\"valid_groups\"] = df_valid[[args.col_image_quality]]\n\n df_test = pd.read_parquet(args.file_path_test_images_info)\n df_test = df_test.reset_index().join(df_proba, how='left', on=args.shared_indices)\n data[\"test_x\"] = df_test.set_index(image).reindex(columns=columns)\n print(f\"Using Features {len(columns)}: {columns}\")\n # assert data[\"valid_x\"].columns == data[\"test_x\"].columns\n return data\n\n\ndef get_inference_file_score(\n args: ArgumentParser, data: pd.DataFrame, train_indices: pd.DataFrame, valid_indices: pd.DataFrame,\n eval_metric_func: Callable, label: str = \"Cover\") -> Tuple[pd.DataFrame, float]:\n image, kind = args.shared_indices\n\n df_train = train_indices.join(data, how='left', on=args.shared_indices)\n df_valid = valid_indices.join(data, how='left', on=args.shared_indices)\n\n queue: List[Tuple[str, pd.DataFrame]] = [(\"train_split\", df_train), (\"valid_split\", df_valid), ]\n ret_df: List[pd.DataFrame] = list()\n ret_score: List[float] = list()\n for name, df in queue:\n stats = df.groupby(args.col_image_quality).apply(\n lambda x: eval_metric_func((x[kind] != label).values, (1. - x[label]).values))\n ret_df.append(stats.rename(name))\n ret_score.append(eval_metric_func((df[kind] != label).values, (1. - df[label]).values))\n\n df = pd.concat(ret_df, axis=1).T\n df[\"all_quality\"] = ret_score\n return df, ret_score[-1]\n\n\ndef scoring_single_proba_file(\n args: ArgumentParser, file_path: str, eval_metric_func: Callable, train_indices: List, valid_indices: List):\n arch_name: str = file_path.split(\"__\")[1]\n file_path: str = os.path.join(args.cached_dir, file_path)\n\n df_proba = pd.read_parquet(file_path)\n stats, score = get_inference_file_score(\n args, data=df_proba, eval_metric_func=eval_metric_func, train_indices=train_indices,\n valid_indices=valid_indices)\n print(f\"inference file: {file_path}:\\n{stats}\")\n\n df = get_test_sub(args, df_proba)\n df.to_csv(os.path.join(args.output_dir, f\"submission_{arch_name}_{score:.06f}.csv\"))\n return\n\n\ndef get_test_sub(args: ArgumentParser, df_proba: pd.DataFrame) -> pd.DataFrame:\n image, kind = args.shared_indices\n df_test = df_proba.reset_index()\n df_test = df_test.loc[~df_test[kind].isin(args.labels)]\n return generate_submission(args=args, submission=df_test)\n\n\ndef generate_stacked_submission(\n args, stacker, params, eval_metric_func: Callable, data: Dict[str, Union[pd.DataFrame, pd.Series]],\n train_on_validation: bool = True, use_update_model: bool = False):\n if not train_on_validation and not use_update_model:\n stacker = stacker(**params)\n stacker.fit(data[\"train_x\"], data[\"train_y\"])\n elif train_on_validation and not use_update_model:\n stacker = stacker(**params)\n stacker.fit(data[\"valid_x\"], data[\"valid_y\"])\n elif use_update_model:\n base_model = stacker(**params)\n base_model.fit(data[\"train_x\"], data[\"train_y\"])\n hparams = params.copy()\n hparams.update({\"refresh_leaf\": 1, \"updater\": \"refresh\", \"process_type\": \"update\",})\n stacker = stacker(**hparams)\n stacker.fit(data[\"valid_x\"], data[\"valid_y\"], xgb_model=base_model.get_booster())\n\n score = eval_metric_func(data[\"valid_y\"], stacker.predict_proba(data[\"valid_x\"])[:, 1])\n\n file_path = os.path.join(args.output_dir, f\"submission_stacker_metric_{score:.06f}_tr.csv\")\n if train_on_validation and not use_update_model:\n file_path = os.path.join(args.output_dir, f\"submission_stacker_metric_{score:.06f}_val.csv\")\n if use_update_model:\n file_path = os.path.join(args.output_dir, f\"submission_stacker_metric_{score:.06f}_update_val.csv\")\n\n subm = pd.DataFrame({\"Label\": stacker.predict_proba(data[\"test_x\"])[:, 1]}, index=data[\"test_x\"].index.rename(\"Id\"))\n print(f\"\\nSubmission file: {file_path}\\nStats:\\n{subm.describe()}\\nHead:\\n{subm.head()}\")\n subm.to_csv(file_path)\n return stacker, subm\n\n\n# Calib: sklearn.isotonic.IsotonicRegression\n# GPSINIFF\n# Stacking\n\nmodel_gen = {\n \"CatBoostClassifier\": CatBoostClassifier,\n \"LGBMClassifier\": LGBMClassifier,\n \"RandomForestClassifier\": RandomForestClassifier,\n \"ExtraTreesClassifier\": ExtraTreesClassifier,\n \"StackingClassifier\": StackingClassifier,\n \"XGBClassifier\": XGBClassifier,\n}\n\n\nclass OptunaTuner:\n def __init__(\n self, eval_metric_func: Callable, estimator, init_params: Optional[Dict],\n search_space: Dict[str, Dict[str, Any]], n_startup_trials: int = 5, n_trials: int = 10,\n greater_is_better: bool = True):\n self.sampler = TPESampler(\n consider_prior=True, prior_weight=1.0, consider_magic_clip=True, consider_endpoints=True,\n n_startup_trials=n_startup_trials, n_ei_candidates=24, )\n self.pruner = SuccessiveHalvingPruner(reduction_factor=4, min_early_stopping_rate=0)\n direction = \"maximize\" if greater_is_better else \"minimize\"\n self.study = optuna.create_study(\n storage=None, sampler=self.sampler, pruner=self.pruner, study_name=\"foobar\", direction=direction,\n load_if_exists=False)\n\n self.eval_metric_func: Callable = eval_metric_func\n self.n_trials: int = n_trials\n self.status: bool = False\n #\n self.estimator = estimator\n self.init_params: Dict[str, Any] = init_params\n self.search_space: Dict[str, Dict[str, Any]] = search_space\n #\n self.data: Optional[Dict[str, pd.DataFrame]] = None\n self.params = self.init_params.copy()\n\n def _get_suggested_params_from_trail(self, trial) -> Dict:\n suggest_params = dict()\n for k, v in self.search_space.items():\n if v['type'] == 'categorical':\n suggest_params[k] = trial.suggest_categorical(k, v['categorical'])\n\n elif v['type'] == 'discrete':\n suggest_params[k] = trial.suggest_discrete_uniform(\n k, low=v['low'], high=v['high'], q=v['step'], )\n\n elif v['type'] == 'int':\n suggest_params[k] = trial.suggest_int(k, low=v['low'], high=v['high'])\n\n elif v['type'] == 'loguniform':\n suggest_params[k] = trial.suggest_loguniform(k, low=v['low'], high=v['high'], )\n\n elif v['type'] == 'uniform':\n suggest_params[k] = trial.suggest_uniform(k, low=v['low'], high=v['high'], )\n\n return suggest_params\n\n def search(self, data: Dict[str, pd.DataFrame]):\n self.data = data\n self.study.optimize(self.objective, n_trials=self.n_trials)\n self.status = True\n trial = self.study.best_trial\n self.params.update(trial.params)\n print(f\"best params: {self.params}\")\n return self\n\n def objective(self, trial) -> float:\n params = self.init_params.copy()\n params.update(self._get_suggested_params_from_trail(trial))\n stacker = self.estimator(**params)\n stacker.fit(self.data[\"train_x\"], self.data[\"train_y\"])\n return self.eval_metric_func(self.data[\"valid_y\"], stacker.predict_proba(self.data[\"valid_x\"])[:, 1])\n\n @property\n def best_params_(self) -> Dict:\n if not self.status:\n raise NotImplementedError()\n\n print(f\"best params: {self.params}\")\n return self.params\n\n\ndef main(args: ArgumentParser):\n seed_everything(args.init_seed)\n args = configure_arguments(args)\n\n configs = initialize_configs(args.configs).configs\n\n configs[\"image_proba\"] = check_and_filter_proba_files(args, configs[\"image_proba\"])\n\n eval_metric_func = alaska_weighted_auc\n train_indices, valid_indices = split_train_valid_data(args=args, splitter=StratifiedKFold(n_splits=5), nr_fold=1)\n if args.proba_single:\n for basename in configs[\"image_proba\"]:\n scoring_single_proba_file(args, basename, eval_metric_func, train_indices, valid_indices)\n return\n\n ret_files = configs[\"image_proba\"]\n ret = [pd.read_parquet(os.path.join(args.cached_dir, basename)) for basename in ret_files]\n\n if args.proba_combinations:\n for i in range(2, len(ret)):\n for j, s in zip(combinations(ret, i), combinations(ret_files, i)):\n df = pd.concat(j, axis=0).groupby(level=args.shared_indices).mean()\n stats, score = get_inference_file_score(\n args, data=df, eval_metric_func=eval_metric_func, train_indices=train_indices,\n valid_indices=valid_indices)\n print(f\"\\ninference file {len(s)}: {s}:\\n{stats}\")\n df = get_test_sub(args, df)\n df.to_csv(os.path.join(args.output_dir, f\"submission_{score:.06f}_ens{len(s)}.csv\"))\n\n return\n\n if args.generate_proba_file:\n ret = [pd.read_parquet(os.path.join(args.cached_dir, basename)) for basename in ret_files]\n df = pd.concat(ret, axis=0).groupby(level=args.shared_indices).mean()\n stats, score = get_inference_file_score(\n args, data=df, eval_metric_func=eval_metric_func, train_indices=train_indices, valid_indices=valid_indices)\n print(f\"\\ninference file:\\n{stats}\")\n file_path = f\"proba__{args.proba_filename_stem}__metric_{score:.4f}.parquet\"\n file_path = os.path.join(args.cached_dir, file_path)\n print(f\"generate new proba file and save to: {file_path}\")\n df.to_parquet(file_path)\n return\n\n scikit_parameters_repos = configs[\"scikit_parameters_repos\"]\n data = generate_stacking_data_split(args, configs, train_indices, valid_indices)\n # HPO\n # configs = scikit_parameters_repos[\"LinearXGBClassifier\"]\n # configs = scikit_parameters_repos[\"LGBMClassifier\"]\n if args.model_stacking:\n scikit_model_params = scikit_parameters_repos[args.model]\n estimator = model_gen.get(scikit_model_params[\"estimator_gen\"])\n params = scikit_model_params.get(\"params\")\n print(f\"use model: {args.model}: {estimator}\")\n if args.refresh or not params:\n init_params = scikit_model_params[\"init_params\"]\n search_space = scikit_model_params[\"search_space\"]\n solver = OptunaTuner(\n init_params=init_params, search_space=search_space, estimator=estimator,\n eval_metric_func=eval_metric_func, n_startup_trials=100, n_trials=250, )\n solver.search(data)\n params = solver.best_params_\n\n if args.bagging:\n #\n params_bagging = {\n \"base_estimator\": estimator(**params), \"n_estimators\": 25, \"max_samples\": .95, \"random_state\": None,\n }\n generate_stacked_submission(\n args, stacker=BaggingClassifier, params=params_bagging, eval_metric_func=eval_metric_func, data=data,\n train_on_validation=False)\n return\n\n generate_stacked_submission(\n args, stacker=estimator, params=params, eval_metric_func=eval_metric_func, data=data,\n train_on_validation=False)\n generate_stacked_submission(\n args, stacker=estimator, params=params, eval_metric_func=eval_metric_func, data=data,\n train_on_validation=True)\n\n if args.use_update_model:\n generate_stacked_submission(\n args, stacker=estimator, params=params, eval_metric_func=eval_metric_func, data=data,\n train_on_validation=False, use_update_model=args.use_update_model)\n\n\n return\n\n return\n\n\nif \"__main__\" == __name__:\n #\n default_output_dir: str = \"../input/alaska2-image-steganalysis-output/\"\n default_cached_dir: str = \"../input/alaska2-image-steganalysis-cached-data/\"\n default_meta_dir: str = \"../input/alaska2-image-steganalysis-image-quality/\"\n default_model_dir: str = \"../input/alaska2-image-steganalysis-models/\"\n default_data_dir: str = \"../input/alaska2-image-steganalysis/\"\n #\n default_configs: str = \"../configs/stacking_baseline.py\"\n #\n default_n_jobs: int = 8\n default_init_seed: int = 42\n #\n default_eval_metric_name: str = \"weighted_auc\"\n default_model_name: str = \"LGBMClassifier\"\n\n parser = ArgumentParser()\n parser.add_argument(\"--output-dir\", type=str, default=default_output_dir, help=\"folder for output\")\n parser.add_argument(\"--cached-dir\", type=str, default=default_cached_dir, help=\"folder for cached data\")\n parser.add_argument(\"--meta-dir\", type=str, default=default_meta_dir, help=\"folder for meta data\")\n parser.add_argument(\"--model-dir\", type=str, default=default_model_dir, help=\"folder for models\")\n parser.add_argument(\"--data-dir\", type=str, default=default_data_dir, help=\"folder for data\")\n #\n parser.add_argument(\"--eval-metric\", type=str, default=default_eval_metric_name, help=\"eval metric name\")\n # configs\n parser.add_argument(\"--configs\", type=str, default=default_configs, help=\"configs for stacker\")\n #\n parser.add_argument(\n \"--proba-filename-stem\", type=str, default=None, help=\"filename for the generated proba file\")\n parser.add_argument(\n \"--bagging\", action=\"store_true\", default=False, help=\"bagging on models\")\n parser.add_argument(\n \"--generate-proba-file\", action=\"store_true\", default=False, help=\"generate a new proba file from configs\")\n parser.add_argument(\n \"--proba-single\", action=\"store_true\", default=False, help=\"generate submission for each single proba file\")\n parser.add_argument(\n \"--proba-combinations\", action=\"store_true\", default=False,\n help=\"generate submissions for the average proba in every combinations of the proba files\")\n parser.add_argument(\n \"--use-update-model\", action=\"store_true\", default=False,\n help=\"use model having refit option\")\n parser.add_argument(\n \"--model-stacking\", action=\"store_true\", default=False,\n help=\"generate submissions for every combinations of the proba files\")\n parser.add_argument(\n \"--model\", type=str, default=default_model_name, help=\"model for stacking\")\n #\n parser.add_argument(\"--refresh\", action=\"store_true\", default=False, help=\"refresh cached data\")\n parser.add_argument(\"--n-jobs\", type=int, default=default_n_jobs, help=\"num worker\")\n parser.add_argument(\"--init-seed\", type=int, default=default_init_seed, help=\"initialize random seed\")\n # debug\n parser.add_argument(\"--debug\", action=\"store_true\", default=False, help=\"debug\")\n args = parser.parse_args()\n\n # house keeping\n safe_mkdir(args.output_dir)\n safe_mkdir(args.cached_dir)\n safe_mkdir(args.model_dir)\n # start program\n main(args)\n", "id": "5771715", "language": "Python", "matching_score": 5.746922016143799, "max_stars_count": 0, "path": "script/stacker.py" }, { "content": "import os\nimport sys\nimport warnings\nfrom argparse import ArgumentParser\nfrom functools import partial\nfrom glob import glob\nfrom multiprocessing import Pool\nfrom typing import List, Callable, Tuple, Union\n\nimport cv2\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import kurtosis, skew\n\nwarnings.filterwarnings(\"ignore\")\n\nEXTERNAL_UTILS_LIB = \"../alaska_utils\"\nsys.path.append(EXTERNAL_UTILS_LIB)\n\nfrom alaska_utils import safe_mkdir\nfrom alaska_utils import configure_arguments\nfrom alaska_utils import parse_image_to_dir_basename\n\n\ndef process_image(image_file_path: str, functions: Tuple[str, Callable], ) -> Union[pd.Series, pd.DataFrame]:\n image = cv2.imread(image_file_path, cv2.IMREAD_COLOR)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)\n tmp = {name: [func(image[:, :, i].flatten()) for i in range(image.shape[-1])] for name, func in functions}\n for name, func in functions:\n tmp[name].append(func(image.flatten()))\n tmp = pd.DataFrame(tmp).stack().swaplevel()\n return tmp\n\n\ndef main(args: ArgumentParser):\n for dir_path in [args.output_dir, args.cached_dir, args.model_dir]:\n safe_mkdir(dir_path)\n\n args = configure_arguments(args)\n file_path: str = args.file_path_images_stats\n if os.path.exists(file_path) and not args.refresh:\n print(f\"{file_path} exists, skip generates meta info\")\n return False\n\n # process\n list_of_functions: List[Tuple[str, Callable]] = [\n (\"mean\", np.mean), (\"std\", np.std), (\"kurt\", kurtosis), (\"skew\", skew), ]\n # file path\n list_all_images: List[str] = list(glob(os.path.join(args.data_dir, \"*\", \"*.jpg\")))\n if args.debug:\n list_all_images = list_all_images[:100]\n\n with Pool(processes=args.n_jobs) as p:\n func = partial(process_image, functions=list_of_functions)\n df = pd.concat(list(p.map(func, list_all_images)), axis=1).astype(np.float32).T\n df.columns = [f\"{i}_{m}\" for m, i in df.columns]\n\n df_train = parse_image_to_dir_basename(args, list_all_images, column=\"file_path\")\n\n # compose return dataframe\n image, kind = args.shared_indices\n df[image] = df_train[image].tolist()\n df[kind] = df_train[kind].tolist()\n df.sort_values(image, inplace=True)\n df.set_index(args.shared_indices, inplace=True)\n\n df.to_parquet(file_path)\n print(f\"Save stats to: {file_path}\\n{df.describe().T}\")\n return\n\n\nif \"__main__\" == __name__:\n #\n default_output_dir: str = \"../input/alaska2-image-steganalysis-output/\"\n default_cached_dir: str = \"../input/alaska2-image-steganalysis-cached-data/\"\n default_meta_dir: str = \"../input/alaska2-image-steganalysis-image-quality/\"\n default_model_dir: str = \"../input/alaska2-image-steganalysis-models/\"\n default_data_dir: str = \"../input/alaska2-image-steganalysis/\"\n #\n default_n_jobs: int = 8\n default_init_seed: int = 42\n\n parser = ArgumentParser()\n parser.add_argument(\"--output-dir\", type=str, default=default_output_dir, help=\"folder for output\")\n parser.add_argument(\"--cached-dir\", type=str, default=default_cached_dir, help=\"folder for cached data\")\n parser.add_argument(\"--meta-dir\", type=str, default=default_meta_dir, help=\"folder for meta data\")\n parser.add_argument(\"--model-dir\", type=str, default=default_model_dir, help=\"folder for models\")\n parser.add_argument(\"--data-dir\", type=str, default=default_data_dir, help=\"folder for data\")\n #\n parser.add_argument(\"--refresh\", action=\"store_true\", default=False, help=\"refresh cached data\")\n parser.add_argument(\"--n-jobs\", type=int, default=default_n_jobs, help=\"num worker\")\n # debug\n parser.add_argument(\"--debug\", action=\"store_true\", default=False, help=\"debug\")\n args = parser.parse_args()\n\n # start program\n main(args)\n", "id": "8614512", "language": "Python", "matching_score": 3.7008228302001953, "max_stars_count": 0, "path": "script/meta_reader.py" }, { "content": "import os\nfrom argparse import ArgumentParser\nfrom glob import glob\nfrom typing import List, Tuple, Optional\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import BaseCrossValidator\n\n\ndef configure_arguments(args: ArgumentParser) -> ArgumentParser:\n args.labels: List[str] = [\"Cover\", \"JMiPOD\", \"JUNIWARD\", \"UERD\"]\n args.shared_indices: List[str] = [\"image\", \"kind\"]\n args.col_enum_class: str = \"label\"\n args.col_image_quality: str = \"quality\"\n\n args.file_path_all_images_info = os.path.join(args.cached_dir, \"all_images_info.parquet\")\n args.file_path_train_images_info = os.path.join(args.cached_dir, \"train_images_info.parquet\")\n args.file_path_test_images_info = os.path.join(args.cached_dir, \"test_images_info.parquet\")\n\n args.file_path_images_stats = os.path.join(args.cached_dir, \"images_stats_info.parquet\")\n\n file_path_image_quality: str = \"image_quality.csv\"\n args.file_path_image_quality = os.path.join(args.meta_dir, file_path_image_quality)\n return args\n\n\ndef parse_image_to_dir_basename(\n args: ArgumentParser, list_all_images: List[str], column: str = \"file_path\") -> pd.DataFrame:\n image, kind = args.shared_indices\n df = pd.DataFrame({column: list_all_images})\n df[image] = df[column].apply(lambda x: os.path.basename(x))\n df[kind] = df[column].apply(lambda x: os.path.split(os.path.dirname(x))[-1])\n df.drop(columns=[column], inplace=True)\n return df\n\n\ndef index_train_test_images(args: ArgumentParser):\n image, kind = args.shared_indices\n\n file_paths: List[str] = [\n args.file_path_all_images_info, args.file_path_train_images_info, args.file_path_test_images_info]\n\n if all([os.path.exists(path) for path in file_paths]):\n if not args.refresh_cache:\n return\n\n df_quality: pd.DataFrame = pd.DataFrame()\n file_path_image_quality = args.file_path_image_quality\n if os.path.exists(file_path_image_quality):\n df_quality = pd.read_csv(file_path_image_quality).set_index(args.shared_indices)\n print(f\"read in image quality file: {df_quality.shape}\")\n else:\n print(f\"image quality not exist: {file_path_image_quality}\")\n\n # process\n list_all_images: List[str] = list(glob(os.path.join(args.data_dir, \"*\", \"*.jpg\")))\n df_train = parse_image_to_dir_basename(args, list_all_images, column=\"file_path\")\n df_train[args.col_enum_class] = df_train[kind].map({kind: label for label, kind in enumerate(args.labels)})\n df_train.set_index(args.shared_indices, inplace=True)\n\n if not df_quality.empty:\n df_train = df_train.join(df_quality[args.col_image_quality])\n\n print(f\"Columns: {df_train.columns.tolist()}, N Uniques:\\n{df_train.nunique()}\")\n df_train.sort_values(image, inplace=True)\n df_train.to_parquet(args.file_path_all_images_info)\n df_train.loc[df_train[args.col_enum_class].notnull()].to_parquet(args.file_path_train_images_info)\n df_train.loc[df_train[args.col_enum_class].isnull()].to_parquet(args.file_path_test_images_info)\n return\n\n\ndef split_train_test_data(\n args: ArgumentParser, data: Optional[pd.DataFrame] = None) -> Tuple[pd.DataFrame, pd.DataFrame]:\n image, kind = args.shared_indices\n\n if data is None:\n data = pd.read_parquet(args.file_path_all_images_info)\n\n df = data.reset_index()\n mask = df[kind].isin(args.labels)\n return df.loc[mask], df.loc[~mask]\n\n\ndef split_train_valid_data(\n args: ArgumentParser, splitter: BaseCrossValidator, data: Optional[pd.DataFrame] = None,\n nr_fold: int = 1) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"Split Data into Train and Valid\"\"\"\n label = args.col_enum_class\n image, kind = args.shared_indices\n image_quality = args.col_image_quality\n\n if data is None:\n data = pd.read_parquet(args.file_path_train_images_info)\n\n data = data.reset_index()\n\n if args.debug:\n data = data.iloc[:2000]\n\n data[label] = data[label].astype(np.int32)\n df = data.loc[(~data[image].duplicated(keep=\"first\"))]\n for fold, (train_ind, valid_ind) in enumerate(\n splitter.split(X=df[label], y=df[image_quality], groups=df[image]), 1):\n if nr_fold == fold:\n print(f\"using fold {fold:02d} for train valid data split\", end=\"\\r\")\n break\n\n train_df = data.loc[data[image].isin(df[image].iloc[train_ind])]\n valid_df = data.loc[data[image].isin(df[image].iloc[valid_ind])]\n print(f\"using fold {fold:02d} for train valid data split: {train_df.shape}, {valid_df.shape}\")\n return train_df, valid_df\n\n\ndef generate_submission(args: ArgumentParser, submission: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Take Test Predictions for 4 classes to Generate Submission File\"\"\"\n image, kind = args.shared_indices\n df = submission.reset_index()[[image, args.labels[0]]]\n df.columns = [\"Id\", \"Label\"]\n df.set_index(\"Id\", inplace=True)\n df[\"Label\"] = 1. - df[\"Label\"]\n print(f\"\\nSubmission Stats:\\n{df.describe()}\\nSubmission Head:\\n{df.head()}\")\n return df\n", "id": "1212663", "language": "Python", "matching_score": 3.0165321826934814, "max_stars_count": 0, "path": "alaska_utils/data_utils.py" }, { "content": "import os\nimport random\nimport time\nimport warnings\nfrom argparse import ArgumentParser\nfrom datetime import datetime\nfrom glob import glob\nfrom typing import List, Optional, Callable, Tuple, Any, Dict, Sequence, Union\n\nimport albumentations as A\nimport cv2\nimport numpy as np\nimport pandas as pd\nimport pytorch_lightning as pl\nimport timm # pretrained model\nimport torch\nimport torch.nn.functional as F\nfrom albumentations.pytorch.transforms import ToTensorV2\n# Class Balance \"on fly\" from @CatalystTeam\nfrom catalyst.data.sampler import BalanceClassSampler\nfrom efficientnet_pytorch import EfficientNet\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint\nfrom sklearn import metrics\nfrom sklearn.model_selection import StratifiedKFold\nfrom torch import nn\nfrom torch import optim\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.sampler import SequentialSampler\n\nwarnings.filterwarnings(\"ignore\")\n\n\ndef seed_everything(seed: int = 42):\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = True\n return\n\n\n# competition metrics\ndef alaska_weighted_auc(\n y_true: np.array, y_valid: np.array, tpr_thresholds: List[float] = [0.0, 0.4, 1.0],\n weights: List[float] = [2, 1]):\n \"\"\"\n https://www.kaggle.com/anokas/weighted-auc-metric-updated\n \"\"\"\n # size of subsets\n areas = np.array(tpr_thresholds[1:]) - np.array(tpr_thresholds[:-1])\n\n # The total area is normalized by the sum of weights such that the final weighted AUC is between 0 and 1.\n normalization = np.dot(areas, weights)\n\n def compute_submetrics(y_min: float, y_max: float, fpr_arr: np.array, tpr_arr: np.array) -> float:\n mask = (y_min < tpr_arr) & (tpr_arr < y_max)\n\n if not len(fpr[mask]):\n return 0.\n\n x_padding = np.linspace(fpr_arr[mask][-1], 1, 100)\n\n x = np.concatenate([fpr_arr[mask], x_padding])\n y = np.concatenate([tpr_arr[mask], [y_max] * len(x_padding)])\n return metrics.auc(x, y - y_min) # normalize such that curve starts at y=0\n\n fpr, tpr, thresholds = metrics.roc_curve(y_true, y_valid, pos_label=1)\n sub_metrics = [compute_submetrics(\n y_min=a, y_max=b, fpr_arr=fpr, tpr_arr=tpr) for a, b in zip(tpr_thresholds[:-1], tpr_thresholds[1:])]\n competition_metric = (np.array(sub_metrics) * weights).sum() / normalization\n return competition_metric\n\n\n# Metrics\nclass AverageMeter:\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\nclass RocAucMeter:\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.y_true = np.array([0, 1])\n self.y_pred = np.array([0.5, 0.5])\n self.score = 0\n\n def update(self, y_true, y_pred):\n y_true = y_true.cpu().numpy().argmax(axis=1).clip(min=0, max=1).astype(int)\n y_pred = 1 - nn.functional.softmax(y_pred, dim=1).data.cpu().numpy()[:, 0]\n self.y_true = np.hstack((self.y_true, y_true))\n self.y_pred = np.hstack((self.y_pred, y_pred))\n self.score = alaska_weighted_auc(self.y_true, self.y_pred)\n\n @property\n def avg(self):\n return self.score\n\n\n# Label Smoothing\nclass LabelSmoothing(nn.Module):\n def __init__(self, smoothing: float = 0.05, enable: bool = True):\n super().__init__()\n self.confidence: float = 1.0 - smoothing\n self.smoothing: float = smoothing\n self.enable: bool = enable\n\n def forward(self, x, target):\n if not self.enable:\n return torch.nn.functional.cross_entropy(x, target)\n\n x = x.float()\n target = target.float()\n log_probs = torch.nn.functional.log_softmax(x, dim=-1)\n nll_loss = (log_probs * target).sum(-1)\n smooth_loss = log_probs.mean(dim=-1)\n return -(self.confidence * nll_loss + self.smoothing * smooth_loss).mean()\n\n\n# Fitter\nclass Fitter:\n\n def __init__(self, model, device, config):\n self.config = config\n self.epoch = 0\n\n self.base_dir = \"./\"\n self.log_path = os.path.join(self.base_dir, \"log.txt\")\n self.best_summary_loss = 10 ** 5\n\n self.model: nn.Module = model\n self.device = device\n\n self.optimizer = None\n self.scheduler = None\n self.criterion: Optional[nn.Module] = None\n\n self.log(f\"Fitter prepared. Device is {self.device}\")\n self._configure_fitter()\n\n def _configure_fitter(self):\n param_optimizer = list(self.model.named_parameters())\n no_decay = [\"bias\", \"LayerNorm.bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\"params\": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], \"weight_decay\": 0.001},\n {\"params\": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0}\n ]\n\n self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.config.lr)\n self.scheduler = self.config.lr_scheduler(self.optimizer, **self.config.scheduler_params)\n self.criterion = self.config.loss.to(self.device)\n return self\n\n def fit(self, train_loader, validation_loader):\n\n for e in range(self.config.n_epochs):\n if self.config.verbose:\n lr = self.optimizer.param_groups[0][\"lr\"]\n timestamp = datetime.utcnow().isoformat()\n self.log(f\"\\n{timestamp}\\nLR: {lr}\")\n\n t = time.time()\n summary_loss, final_scores = self.train_one_epoch(train_loader)\n\n self.log(\n f\"[RESULT]: Train. Epoch: {self.epoch}, summary_loss: {summary_loss.avg:.5f}, final_score: \"\n f\"{final_scores.avg:.5f}, time: {(time.time() - t):.5f}\")\n self.save(os.path.join(self.base_dir, \"last-checkpoint.bin\"))\n\n t = time.time()\n summary_loss, final_scores = self.validation(validation_loader)\n\n self.log(\n f\"[RESULT]: Val. Epoch: {self.epoch}, summary_loss: {summary_loss.avg:.5f}, final_score: \"\n f\"{final_scores.avg:.5f}, time: {(time.time() - t):.5f}\")\n if summary_loss.avg < self.best_summary_loss:\n self.best_summary_loss = summary_loss.avg\n self.model.eval()\n self.save(os.path.join(self.base_dir, f\"best-checkpoint-{self.epoch:03d}epoch.bin\"))\n for path in sorted(glob(os.path.join(self.base_dir, \"best-checkpoint-*epoch.bin\")))[:-3]:\n os.remove(path)\n\n if self.config.step_after_validation:\n self.scheduler.step(metrics=summary_loss.avg)\n\n self.epoch += 1\n\n def validation(self, val_loader):\n self.model.eval()\n summary_loss = AverageMeter()\n final_scores = RocAucMeter()\n t = time.time()\n for step, (images, targets) in enumerate(val_loader):\n if step % self.config.verbose_step == 0 and self.config.verbose:\n print(\n f\"Val Step {step}/{len(val_loader)}, summary_loss: {summary_loss.avg:.5f}, final_score: \"\n f\"{final_scores.avg:.5f}, time: {(time.time() - t):.5f}\",\n end=\"\\r\"\n )\n\n with torch.no_grad():\n targets = targets.to(self.device).float()\n images = images.to(self.device).float()\n batch_size = images.shape[0]\n outputs = self.model(images)\n loss = self.criterion(outputs, targets)\n #\n final_scores.update(targets, outputs)\n summary_loss.update(loss.detach().item(), batch_size)\n\n return summary_loss, final_scores\n\n def train_one_epoch(self, train_loader):\n self.model.train()\n summary_loss = AverageMeter()\n final_scores = RocAucMeter()\n t = time.time()\n for step, (images, targets) in enumerate(train_loader):\n if step % self.config.verbose_step == 0 and self.config.verbose:\n print(\n f\"Train Step {step}/{len(train_loader)}, summary_loss: {summary_loss.avg:.5f}, final_score: \"\n f\"{final_scores.avg:.5f}, time: {(time.time() - t):.5f}\",\n end=\"\\r\"\n )\n\n targets = targets.to(self.device).float()\n images = images.to(self.device).float()\n batch_size = images.shape[0]\n self.optimizer.zero_grad()\n outputs = self.model(images)\n loss = self.criterion(outputs, targets)\n loss.backward()\n #\n final_scores.update(targets, outputs)\n summary_loss.update(loss.detach().item(), batch_size)\n\n self.optimizer.step()\n if self.config.step_after_optimizer:\n self.scheduler.step()\n\n return summary_loss, final_scores\n\n def save(self, path):\n self.model.eval()\n torch.save({\n \"model_state_dict\": self.model.state_dict(),\n \"optimizer_state_dict\": self.optimizer.state_dict(),\n \"scheduler_state_dict\": self.scheduler.state_dict(),\n \"best_summary_loss\": self.best_summary_loss,\n \"epoch\": self.epoch,\n }, path)\n return self\n\n def load(self, path, model_weights_only: bool = False):\n checkpoint = torch.load(path)\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.model.cuda()\n\n self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.config.lr)\n self.scheduler = self.config.lr_scheduler(self.optimizer, **self.config.scheduler_params)\n if not model_weights_only:\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n self.scheduler.load_state_dict(checkpoint[\"scheduler_state_dict\"])\n self.best_summary_loss = checkpoint[\"best_summary_loss\"]\n self.epoch = checkpoint[\"epoch\"] + 1\n\n return self\n\n def log(self, message):\n if self.config.verbose:\n print(message)\n with open(self.log_path, \"a+\") as logger:\n logger.write(f\"{message}\\n\")\n\n\nclass BaseLightningModule(pl.LightningModule):\n def __init__(\n self,\n model: nn.Module,\n training_records: Optional[List[Dict[str, Any]]] = None, training_configs: Optional = None,\n valid_records: Optional[List[Dict[str, Any]]] = None, valid_configs: Optional = None,\n eval_metric_name: str = \"val_metric_score\", eval_metric_func: Optional[Callable] = None, ):\n super().__init__()\n self.model: nn.Module = model\n # configs, records\n self.training_records: Optional[List[Dict[str, Any]]] = training_records\n self.training_configs = training_configs\n self.valid_records: Optional[List[Dict[str, Any]]] = valid_records\n self.valid_configs = valid_configs\n #\n self.restored_checkpoint = None\n #\n self.current_epoch: int = 0\n\n # eval metric\n self.eval_metric_name: str = eval_metric_name\n self.eval_metric_func: Optional[Callable] = eval_metric_func\n self.loss: Optional[nn.Module] = None\n if self.training_configs is not None:\n self.loss = self.training_configs.loss\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self.model(x)\n return {\"loss\": self.loss(y_hat, y), \"y\": y, \"yhat\": y_hat}\n\n def _post_process_outputs_for_metric(self, outputs):\n # metric\n y_true = (torch.cat([x[\"y\"] for x in outputs], dim=0).cpu().numpy()[:, 0] == 0).astype(int)\n y_pred = 1. - F.softmax(torch.cat([x[\"yhat\"] for x in outputs], dim=0)).data.cpu().numpy()[:, 0]\n return self.eval_metric_func(y_true, y_pred)\n\n def training_epoch_end(self, outputs) -> Dict[str, float]:\n tr_loss_mean = torch.stack([x[\"loss\"] for x in outputs]).mean()\n\n tr_metric = self._post_process_outputs_for_metric(outputs)\n metric_name: str = f\"tr_{self.eval_metric_name}\"\n print(f\"loss: {tr_loss_mean:.6f}, {metric_name}: {tr_metric:.6f}\\n\")\n\n return {\"loss\": tr_loss_mean, metric_name: tr_metric}\n\n def validation_step(self, batch, batch_idx) -> Dict[str, Any]:\n x, y = batch\n y_hat = self.model(x)\n return {\"val_loss\": self.loss(y_hat, y), \"y\": y, \"yhat\": y_hat}\n\n def validation_epoch_end(self, outputs) -> Dict[str, float]:\n val_loss_mean = torch.stack([x[\"val_loss\"] for x in outputs]).mean()\n\n val_metric = self._post_process_outputs_for_metric(outputs)\n metric_name: str = f\"val_{self.eval_metric_name}\"\n print(f\"\\nval_loss: {val_loss_mean:.6f}, {metric_name}: {val_metric:.6f}\")\n\n logs = {\"val_loss\": val_loss_mean, metric_name: val_metric}\n return {\"val_loss\": val_loss_mean, metric_name: val_metric, \"log\": logs}\n\n def test_step(self, batch, batch_idx):\n raise NotImplementedError()\n\n def test_epoch_end(self, outputs):\n raise NotImplementedError()\n\n def train_dataloader(self) -> DataLoader:\n train_dataset = DatasetRetriever(\n records=self.training_records, transforms=self.training_configs.transforms)\n train_loader = torch.utils.data.DataLoader(\n train_dataset, sampler=BalanceClassSampler(labels=train_dataset.get_labels(), mode=\"downsampling\"),\n batch_size=self.training_configs.batch_size, pin_memory=True, drop_last=True,\n num_workers=self.training_configs.num_workers,\n )\n return train_loader\n\n def val_dataloader(self) -> DataLoader:\n validation_dataset = DatasetRetriever(\n records=self.valid_records, transforms=self.valid_configs.transforms)\n val_loader = torch.utils.data.DataLoader(\n validation_dataset, batch_size=self.valid_configs.batch_size, pin_memory=True, shuffle=False,\n num_workers=self.valid_configs.num_workers, sampler=SequentialSampler(validation_dataset), )\n return val_loader\n\n def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]:\n raise NotImplementedError()\n\n def forward(self, x):\n return self.model(x)\n\n def configure_optimizers(\n self) -> Union[optim.Optimizer, Sequence[optim.Optimizer], Dict, Sequence[Dict], Tuple[List, List], None]:\n optimizer = optim.AdamW(self.model.parameters(), lr=self.training_configs.lr)\n\n scheduler_params = self.training_configs.scheduler_params.copy()\n if \"steps_per_epoch\" in scheduler_params.keys():\n steps_per_epoch = int(len(self.training_records) // self.training_configs.batch_size) + 1\n scheduler_params.update({\"steps_per_epoch\": steps_per_epoch})\n\n scheduler = self.training_configs.lr_scheduler(optimizer, **scheduler_params)\n\n # restore checkpoint\n if self.restored_checkpoint is not None:\n optimizer.load_state_dict(self.restored_checkpoint[\"optimizer_states\"])\n scheduler.load_state_dict(self.restored_checkpoint[\"lr_schedulers\"])\n self.current_epoch = self.restored_checkpoint[\"epoch\"] + 1\n\n return [optimizer], [scheduler]\n\n\n# DataSet\nclass _BaseRetriever(Dataset):\n def __init__(self, records: List[Dict[str, Any]], transforms: Optional[Callable] = None):\n super().__init__()\n self.records: List[Dict[str, Any]] = records\n self.transforms: Optional[Callable] = transforms\n\n def _load_one_image(self, index: int) -> np.array:\n image_info: int = self.records[index]\n image_path: str = image_info[\"file_path\"]\n image_name: str = image_info[\"image\"]\n image_kind: str = image_info[\"kind\"]\n\n if not os.path.exists(image_path):\n raise ValueError(f\"file image does not exist: {image_kind}, {image_name}\")\n\n # full_path: work_dir + kind + image_name\n # full_path, kind, image_name,\n image = cv2.imread(image_path, cv2.IMREAD_COLOR)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)\n if self.transforms:\n sample = {\"image\": image}\n sample = self.transforms(**sample)\n image = sample[\"image\"]\n\n return image\n\n def __len__(self) -> int:\n return len(self.records)\n\n\ndef onehot_target(size: int, target: int):\n vec = torch.zeros(size, dtype=torch.float32)\n vec[target] = 1.\n return vec\n\n\nclass DatasetRetriever(_BaseRetriever):\n def __init__(self, records: List[Dict[str, Any]], transforms: Optional[Callable] = None):\n super().__init__(records=records, transforms=transforms)\n self.labels: List[int] = [record.get(\"label\") for record in self.records]\n self.nr_class: int = len(set(self.labels))\n\n def __getitem__(self, index: int):\n image = self._load_one_image(index=index)\n label = onehot_target(self.nr_class, self.labels[index])\n # TODO: make mixup working\n return image, label\n\n def get_labels(self) -> List[int]:\n return self.labels\n\n\nclass SubmissionRetriever(_BaseRetriever):\n def __init__(self, records: List[Dict[str, Any]], transforms: Optional[Callable] = None):\n super().__init__(records=records, transforms=transforms)\n\n def __getitem__(self, index: int) -> Tuple[str, np.array]:\n image = self._load_one_image(index=index)\n image_info: int = self.records[index]\n image_name: str = image_info[\"image\"]\n image_kind: str = image_info[\"kind\"]\n return image_kind, image_name, image\n\n\n# Main Function Block\n########################################################################################################################\ndef index_train_test_images(args):\n working_dir: str = args.data_dir\n output_dir: str = args.cached_dir\n meta_dir: str = args.meta_dir\n file_path_image_quality: str = \"image_quality.csv\"\n\n args.file_path_all_images_info = os.path.join(output_dir, \"all_images_info.parquet\")\n args.file_path_train_images_info = os.path.join(output_dir, \"train_images_info.parquet\")\n args.file_path_test_images_info = os.path.join(output_dir, \"test_images_info.parquet\")\n file_paths: List[str] = [\n args.file_path_all_images_info, args.file_path_train_images_info, args.file_path_test_images_info]\n\n if all([os.path.exists(path) for path in file_paths]):\n if not args.refresh_cache:\n return\n\n file_path_image_quality = os.path.join(meta_dir, file_path_image_quality)\n df_quality: pd.DataFrame = pd.DataFrame()\n if os.path.exists(file_path_image_quality):\n df_quality = pd.read_csv(file_path_image_quality).set_index(args.shared_indices)\n print(f\"read in image quality file: {df_quality.shape}\")\n else:\n print(f\"image quality not exist: {file_path_image_quality}\")\n raise ValueError()\n\n # process\n list_all_images: List[str] = list(glob(os.path.join(working_dir, \"*\", \"*.jpg\")))\n df_train = pd.DataFrame({\"file_path\": list_all_images})\n df_train[\"image\"] = df_train[\"file_path\"].apply(lambda x: os.path.basename(x))\n df_train[\"kind\"] = df_train[\"file_path\"].apply(lambda x: os.path.split(os.path.dirname(x))[-1])\n df_train[\"label\"] = df_train[\"kind\"].map({kind: label for label, kind in enumerate(args.labels)})\n df_train.drop(columns=[\"file_path\"], inplace=True)\n df_train.set_index(args.shared_indices, inplace=True)\n\n if not df_quality.empty:\n df_train = df_train.join(df_quality[\"quality\"])\n\n print(f\"Columns: {df_train.columns.tolist()}, N Uniques:\\n{df_train.nunique()}\")\n df_train.sort_values(\"image\", inplace=True)\n df_train.to_parquet(args.file_path_all_images_info)\n df_train.loc[df_train[\"label\"].notnull()].to_parquet(args.file_path_train_images_info)\n df_train.loc[df_train[\"label\"].isnull()].to_parquet(args.file_path_test_images_info)\n return\n\n\ndef process_images_to_records(args, df: pd.DataFrame) -> List[Dict[str, Any]]:\n df[\"file_path\"] = df.apply(lambda x: os.path.join(args.data_dir, x[\"kind\"], x[\"image\"]), axis=1)\n return df.to_dict(\"record\")\n\n\ndef split_data(args, splitter):\n df_train = pd.read_parquet(args.file_path_train_images_info).reset_index()\n if args.debug:\n df_train = df_train.iloc[:200]\n\n df_train[\"label\"] = df_train[\"label\"].astype(np.int32)\n df = df_train.loc[(~df_train[\"image\"].duplicated(keep=\"first\"))]\n\n for train_index, valid_index in splitter.split(X=df[\"label\"], y=df[\"quality\"], groups=df[\"image\"]):\n break\n\n train_df = df_train.loc[df_train[\"image\"].isin(df[\"image\"].iloc[train_index])]\n valid_df = df_train.loc[df_train[\"image\"].isin(df[\"image\"].iloc[valid_index])]\n return train_df, valid_df\n\n\ndef training_lightning(args, model: nn.Module):\n # load_from_checkpoint: not working\n if args.load_checkpoint and os.path.exists(args.checkpoint_path):\n checkpoint = torch.load(args.checkpoint_path)\n model.load_state_dict(checkpoint[\"state_dict\"])\n\n print(checkpoint[\"lr_schedulers\"])\n # not working if using\n # trainer = Trainer(resume_From_checkpoint=args.checkpoint_path)\n if not args.load_weights_only:\n model.restored_checkpoint = checkpoint\n\n # training\n if not args.inference_only:\n metric_name: str = f\"val_{args.eval_metric}\"\n file_path_checkpoint: str = os.path.join(\n args.model_dir, \"__\".join([\"result\", args.model_arch, \"{epoch:03d}-{val_loss:.4f}\"]))\n checkpoint_callback = ModelCheckpoint(\n filepath=file_path_checkpoint, save_top_k=3, verbose=True, monitor=metric_name, mode=\"max\")\n\n early_stop_callback = EarlyStopping(\n monitor=metric_name, min_delta=0., patience=5, verbose=True, mode=\"max\")\n trainer = Trainer(\n gpus=args.gpus, min_epochs=1, max_epochs=1000, default_root_dir=args.model_dir,\n accumulate_grad_batches=model.training_configs.accumulate_grad_batches,\n # distributed_backend=\"dpp\",\n early_stop_callback=early_stop_callback,\n checkpoint_callback=checkpoint_callback\n )\n lr_finder = trainer.lr_find(model) # pd.DataFrame(lr_finder.results\n new_lr = lr_finder.suggestion()\n print(f\"optimal learning rate: {new_lr}\")\n\n trainer.fit(model)\n model.freeze()\n\n return model\n\n\ndef inference_proba(args, configs, dataset: Dataset, model: nn.Module) -> pd.DataFrame:\n data_loader = DataLoader(\n dataset, batch_size=configs.batch_size, shuffle=False, num_workers=configs.num_workers, drop_last=False, )\n\n model.eval()\n outputs = list()\n result = {k: list() for k in args.shared_indices}\n total_num_batch: int = int(len(dataset) / configs.batch_size)\n for step, (image_kinds, image_names, images) in enumerate(data_loader):\n print(\n f\"Test Batch Proba: {step:03d} / {total_num_batch:d}, progress: {100. * step / total_num_batch: .02f} %\",\n end=\"\\r\")\n\n result[\"image\"].extend(image_names)\n result[\"kind\"].extend(image_kinds)\n outputs.append(nn.functional.softmax(model(images.cuda()), dim=1).data.cpu())\n\n y_pred = pd.DataFrame(torch.cat(outputs, dim=0).numpy(), columns=args.labels)\n submission = pd.concat([pd.DataFrame(result), y_pred], axis=1).set_index(args.shared_indices).sort_index()\n\n print(f\"\\nFinish Test Proba: {submission.shape}, Stats:\\n{submission.describe()}\")\n return submission\n\n\ndef do_evaluate(submission: pd.DataFrame, label: str = \"Cover\") -> float:\n df = submission.reset_index()[[\"kind\", \"image\", label]]\n df = df.loc[df[\"kind\"].isin(args.labels)]\n df[\"Label\"] = 1. - df[label]\n return alaska_weighted_auc(df[\"kind\"].isin(args.labels[1:]), df[\"Label\"].values)\n\n\ndef do_inference(args, model: nn.Module):\n test_configs = BaseConfigs.from_file(file_path=args.test_configs)\n df_test = pd.read_parquet(args.file_path_test_images_info).reset_index()\n if args.inference_proba:\n df_test = pd.read_parquet(args.file_path_all_images_info).reset_index()\n\n if args.debug:\n df_test = df_test.iloc[:2000] # sample(n=2000, random_state=42)\n\n test_records = process_images_to_records(args, df=df_test)\n if args.tta:\n collect = list()\n for i, tta in enumerate(test_configs.tta_transforms, 1):\n print(f\"Inference TTA: {i:02d} / {len(test_configs.tta_transforms):02d} rounds\")\n dataset = SubmissionRetriever(records=test_records, transforms=tta, )\n df = inference_proba(args, test_configs, dataset=dataset, model=model)\n collect.append(df)\n\n score = do_evaluate(df)\n print(f\"Inference TTA: {i:02d} / {len(test_configs.tta_transforms):02d} rounds: {score:.04f}\")\n\n df = pd.concat(collect, ).groupby(level=args.shared_indices).mean()\n print(f\"\\nFinish Test Proba: {df.shape}, Stats:\\n{df.describe()}\")\n return df\n\n dataset = SubmissionRetriever(records=test_records, transforms=test_configs.transforms, )\n df = inference_proba(args, test_configs, dataset=dataset, model=model)\n return df\n\n\ndef initialize_configs(filename: str):\n if not os.path.exists(filename):\n raise ValueError(\"Spec file {spec_file} does not exist\".format(spec_file=filename))\n\n module_name = filename.split(os.sep)[-1].replace(\".\", \"\")\n\n import importlib.util\n spec = importlib.util.spec_from_file_location(module_name, filename)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n\n\nlr_schedulers = {\n \"ReduceLROnPlateau\": {\n \"lr_scheduler\": optim.lr_scheduler.ReduceLROnPlateau,\n \"step_after_optimizer\": False, # do scheduler.step after optimizer.step\n \"step_after_validation\": True, # do scheduler.step after validation stage loss\n \"params\": {\n \"mode\": \"min\",\n \"factor\": 0.5,\n \"patience\": 1,\n \"verbose\": True,\n \"threshold\": 0.0001,\n \"threshold_mode\": \"abs\",\n \"cooldown\": 0,\n \"min_lr\": 1e-8,\n \"eps\": 1e-08\n },\n },\n\n \"OneCycleLR\": {\n \"lr_scheduler\": optim.lr_scheduler.OneCycleLR,\n \"step_after_optimizer\": True, # do scheduler.step after optimizer.step\n \"step_after_validation\": False, # do scheduler.step after validation stage loss\n \"params\": {\n \"max_lr\": 0.001,\n \"epochs\": 5,\n \"steps_per_epoch\": 30000, # int(len(train_dataset) / batch_size),\n \"pct_start\": 0.1,\n \"anneal_strategy\": \"cos\",\n \"final_div_factor\": 10 ** 5\n },\n },\n}\n\naugment_methods = {\n \"HorizontalFlip\": A.HorizontalFlip,\n \"VerticalFlip\": A.VerticalFlip,\n \"RandomRotate90\": A.RandomRotate90,\n \"RandomGridShuffle\": A.RandomGridShuffle,\n \"InvertImg\": A.InvertImg,\n \"Resize\": A.Resize,\n \"Normalize\": A.Normalize,\n \"Cutout\": A.Cutout,\n \"CoarseDropout\": A.CoarseDropout,\n \"ToFloat\": A.ToFloat,\n \"ToTensorV2\": ToTensorV2,\n}\n\n\ndef transform_factory(item, possible_methods: Dict[str, Any] = augment_methods):\n obj = possible_methods.get(item[\"transform\"])\n params = item[\"params\"]\n return obj(**params)\n\n\nclass BaseConfigs:\n def __init__(self, file_path: str):\n self.configs: Dict[str, Any] = self._load_configs(file_path)\n\n # -------------------\n self.num_workers: int = self.configs.get(\"num_workers\", 8)\n self.batch_size: int = self.configs.get(\"batch_size\", 16)\n\n self.accumulate_grad_batches: Union[int, Dict[int, int], List[list]] = self.configs.get(\n \"accumulate_grad_batches\", 1)\n\n # display\n # -------------------\n self.verbose: bool = self.configs.get(\"verbose\", True)\n self.verbose_step: int = self.configs.get(\"verbose_step\", 1)\n\n # -------------------\n self.n_epochs: int = self.configs.get(\"n_epochs\", 5)\n self.lr: float = self.configs.get(\"lr\", 0.001)\n\n # --------------------\n self.loss: Optional[nn.Module] = None\n\n # config scheduler\n # --------------------\n if \"lr_scheduler\" in self.configs.keys():\n tmp = lr_schedulers.get(self.configs[\"lr_scheduler\"], None)\n self.step_after_optimizer: bool = tmp.get(\n \"step_after_optimizer\", False) # do scheduler.step after optimizer.step\n self.step_after_validation: bool = tmp.get(\n \"step_after_validation\", False) # do scheduler.step after validation stage loss\n self.lr_scheduler = tmp.get(\"lr_scheduler\", None)\n\n # scheduler params\n self.scheduler_params: Dict = tmp.get(\"params\", dict()).copy()\n laod_scheduler_params = self.configs.get(\"scheduler_params\", dict())\n if laod_scheduler_params:\n self.scheduler_params.update(laod_scheduler_params)\n\n if \"max_lr\" in self.scheduler_params.keys():\n self.scheduler_params[\"max_lr\"] = self.lr\n\n if \"epochs\" in self.scheduler_params.keys():\n self.scheduler_params[\"epochs\"] = self.n_epochs\n\n self.transforms: List = self._load_transforms(self.configs[\"augmentations\"])\n self.tta_transforms: List = [self.transforms]\n if \"test_time_augmentations\" in self.configs.keys():\n self.tta_transforms = [self._load_transforms(tta) for tta in self.configs[\"test_time_augmentations\"]]\n\n @staticmethod\n def _load_transforms(augmentations: List):\n return A.Compose([transform_factory(item) for item in augmentations])\n\n @staticmethod\n def _load_configs(file_path: str):\n return initialize_configs(file_path).configs\n\n @classmethod\n def from_file(cls, file_path: str):\n return cls(file_path=file_path)\n\n\n# Configs Block Starts\n########################################################################################################################\nclass TrainReduceOnPlateauConfigs:\n num_workers: int = 8\n batch_size: int = 12 # 16\n\n # -------------------\n verbose: bool = True\n verbose_step: int = 1\n\n # -------------------\n n_epochs: int = 5\n lr: float = 0.001\n\n # --------------------\n loss: nn.Module = LabelSmoothing(smoothing=.05)\n\n # --------------------\n step_after_optimizer: bool = False # do scheduler.step after optimizer.step\n step_after_validation = True # do scheduler.step after validation stage loss\n lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau\n scheduler_params = dict(\n mode=\"min\",\n factor=0.5,\n patience=1,\n verbose=True,\n threshold=0.0001,\n threshold_mode=\"abs\",\n cooldown=0,\n min_lr=1e-8,\n eps=1e-08\n )\n\n # Augmentations\n # --------------------\n transforms = A.Compose([\n A.HorizontalFlip(p=0.5),\n A.VerticalFlip(p=0.5),\n A.RandomRotate90(always_apply=False, p=0.5),\n # A.RandomGridShuffle(grid=(3, 3), always_apply=False, p=0.5),\n A.InvertImg(always_apply=False, p=0.5),\n A.Resize(height=512, width=512, p=1.0),\n A.Normalize(always_apply=True),\n ToTensorV2(p=1.0),\n ], p=1.0)\n\n\nclass TrainOneCycleConfigs:\n num_workers: int = 8\n batch_size: int = 14 # efficientnet-b2\n # batch_size: int = 11 # efficientnet-b3\n # batch_size: int = 8 # efficientnet-b4\n # batch_size: int = 6 # efficientnet-b5\n # batch_size: int = 4 # efficientnet-b6\n\n # -------------------\n verbose: bool = True\n verbose_step: int = 1\n\n # -------------------\n n_epochs: int = 40\n lr: float = 0.001\n\n # --------------------\n loss: nn.Module = LabelSmoothing(smoothing=.05)\n\n # --------------------\n step_after_optimizer: bool = True # do scheduler.step after optimizer.step\n step_after_validation: bool = False\n lr_scheduler = torch.optim.lr_scheduler.OneCycleLR\n scheduler_params = dict(\n max_lr=0.001,\n epochs=n_epochs,\n steps_per_epoch=30000, # int(len(train_dataset) / batch_size),\n pct_start=0.1,\n anneal_strategy=\"cos\",\n final_div_factor=10 ** 5\n )\n\n # Augmentations\n # --------------------\n transforms = A.Compose([\n A.HorizontalFlip(p=0.5),\n A.VerticalFlip(p=0.5),\n A.RandomRotate90(always_apply=False, p=0.5),\n # A.RandomGridShuffle(grid=(3, 3), always_apply=False, p=0.5),\n A.InvertImg(always_apply=False, p=0.5),\n A.Resize(height=512, width=512, p=1.0),\n A.Normalize(always_apply=True),\n ToTensorV2(p=1.0),\n ], p=1.0)\n\n\nclass ValidConfigs:\n num_workers: int = 8\n batch_size: int = 16 # 16\n\n transforms: A.Compose = A.Compose([\n A.Resize(height=512, width=512, p=1.0),\n A.Normalize(always_apply=True),\n ToTensorV2(p=1.0),\n ], p=1.0)\n\n\nclass TestConfigs:\n num_workers = 8\n batch_size = 8 # 16\n\n transforms: A.Compose = A.Compose([\n A.Resize(height=512, width=512, p=1.0),\n A.Normalize(always_apply=True),\n ToTensorV2(p=1.0),\n ], p=1.0)\n\n\n########################################################################################################################\n# Configs Block Ends\n\n\ndef main(args):\n args.labels: List[str] = [\"Cover\", \"JMiPOD\", \"JUNIWARD\", \"UERD\"]\n args.shared_indices: List[str] = [\"image\", \"kind\"]\n\n seed_everything(args.init_seed)\n index_train_test_images(args)\n\n if \"efficientnet\" in args.model_arch:\n model = EfficientNet.from_pretrained(\n args.model_arch, advprop=False, in_channels=3, num_classes=len(args.labels))\n else:\n # \"seresnet34\", resnext50_32x4d\"\n model = timm.create_model(\n args.model_arch, pretrained=True, num_classes=len(args.labels), in_chans=3, drop_rate=.5)\n\n # loading info for training\n training_configs = None\n validation_configs = None\n training_records = list()\n valid_records = list()\n if not args.inference_only:\n # configs\n validation_configs = BaseConfigs.from_file(file_path=args.valid_configs)\n training_configs = BaseConfigs.from_file(file_path=args.train_configs)\n training_configs.loss = LabelSmoothing(smoothing=.05)\n\n # split data\n skf = StratifiedKFold(n_splits=5)\n train_df, valid_df = split_data(args=args, splitter=skf)\n\n #\n training_records = process_images_to_records(args, df=train_df)\n valid_records = process_images_to_records(args, df=valid_df)\n\n # use lightning\n if args.use_lightning:\n model = BaseLightningModule(\n model, training_configs=training_configs, training_records=training_records,\n valid_configs=validation_configs, valid_records=valid_records, eval_metric_name=args.eval_metric,\n eval_metric_func=alaska_weighted_auc)\n\n model = training_lightning(args=args, model=model)\n model.freeze()\n\n # raw\n if args.gpus is not None:\n model = model.cuda()\n device = torch.device(\"cuda:0\")\n\n if not args.inference_only and not args.use_lightning:\n train_dataset = DatasetRetriever(records=training_records, transforms=training_configs.transforms)\n train_loader = DataLoader(\n train_dataset,\n sampler=BalanceClassSampler(labels=train_dataset.get_labels(), mode=\"downsampling\"),\n batch_size=training_configs.batch_size,\n pin_memory=False,\n drop_last=True,\n num_workers=training_configs.num_workers,\n )\n validation_dataset = DatasetRetriever(records=valid_records, transforms=training_configs.transforms)\n val_loader = DataLoader(\n validation_dataset,\n batch_size=validation_configs.batch_size,\n num_workers=validation_configs.num_workers,\n shuffle=False,\n sampler=SequentialSampler(validation_dataset),\n pin_memory=False,\n )\n\n fitter = Fitter(model=model, device=device, config=training_configs)\n if args.load_checkpoint and os.path.exists(args.checkpoint_path):\n fitter.load(args.checkpoint_path, model_weights_only=args.load_weights_only)\n # fitter.load(f\"{fitter.base_dir}/best-checkpoint-024epoch.bin\")\n fitter.fit(train_loader, val_loader)\n\n # Test\n submission = do_inference(args, model=model)\n if args.inference_proba:\n score = do_evaluate(args, submission)\n print(f\"Inference TTA: {score:.04f}\")\n file_path = os.path.join(args.cached_dir, f\"proba__arch_{args.model_arch}__metric_{score:.4f}.parquet\")\n submission.to_parquet(file_path)\n else:\n print(f\"Inference Test:\")\n image, kind = args.shared_indices\n df = submission.reset_index()[[image, args.labels[0]]]\n df.columns = [\"Id\", \"Label\"]\n df.set_index(\"Id\", inplace=True)\n df[\"Label\"] = 1. - df[\"Label\"]\n df.to_csv(\"submission.csv\", index=True)\n print(f\"\\nSubmission Stats:\\n{df.describe()}\\nSubmission:\\n{df.head()}\")\n\n return\n\n\ndef safe_mkdir(directory: str) -> bool:\n if not os.path.exists(directory):\n os.makedirs(directory)\n print(f\"make dir: {directory}\")\n return True\n\n print(f\"skip making dir: {directory}\")\n return False\n\n\nif \"__main__\" == __name__:\n #\n default_output_dir: str = \"../input/alaska2-image-steganalysis-output/\"\n default_cached_dir: str = \"../input/alaska2-image-steganalysis-cached-data/\"\n default_meta_dir: str = \"../input/alaska2-image-steganalysis-image-quality/\"\n default_model_dir: str = \"../input/alaska2-image-steganalysis-models/\"\n default_data_dir: str = \"../input/alaska2-image-steganalysis/\"\n #\n default_model_arch: str = \"efficientnet-b2\"\n #\n default_train_configs: str = \"../configs/train_baseline.py\"\n default_valid_configs: str = \"../configs/valid_baseline.py\"\n default_test_configs: str = \"../configs/test_baseline.py\"\n #\n default_n_jobs: int = 8\n default_init_seed: int = 42\n #\n default_eval_metric_name: str = \"weighted_auc\"\n\n parser = ArgumentParser()\n parser.add_argument(\"--output-dir\", type=str, default=default_output_dir, help=\"folder for output\")\n parser.add_argument(\"--cached-dir\", type=str, default=default_cached_dir, help=\"folder for cached data\")\n parser.add_argument(\"--meta-dir\", type=str, default=default_meta_dir, help=\"folder for meta data\")\n parser.add_argument(\"--model-dir\", type=str, default=default_model_dir, help=\"folder for models\")\n parser.add_argument(\"--data-dir\", type=str, default=default_data_dir, help=\"folder for data\")\n #\n parser.add_argument(\"--eval-metric\", type=str, default=default_eval_metric_name, help=\"eval metric name\")\n parser.add_argument(\"--model-arch\", type=str, default=default_model_arch, help=\"model arch\")\n parser.add_argument(\"--checkpoint-path\", type=str, default=None, help=\"model checkpoint\")\n parser.add_argument(\"--load-checkpoint\", action=\"store_true\", default=False, help=\"load checkpoint\")\n parser.add_argument(\"--load-weights-only\", action=\"store_true\", default=False, help=\"load weights only\")\n # configs\n parser.add_argument(\"--train-configs\", type=str, default=default_train_configs, help=\"configs for training\")\n parser.add_argument(\"--valid-configs\", type=str, default=default_valid_configs, help=\"configs for validation\")\n parser.add_argument(\"--test-configs\", type=str, default=default_test_configs, help=\"configs for test\")\n # functional\n parser.add_argument(\"--tta\", action=\"store_true\", default=False, help=\"perform test time augmentation\")\n parser.add_argument(\"--inference-proba\", action=\"store_true\", default=False, help=\"only perform inference proba\")\n parser.add_argument(\"--inference-only\", action=\"store_true\", default=False, help=\"only perform inference\")\n parser.add_argument(\"--use-lightning\", action=\"store_true\", default=False, help=\"using lightning trainer\")\n parser.add_argument(\"--refresh-cache\", action=\"store_true\", default=False, help=\"refresh cached data\")\n parser.add_argument(\"--n-jobs\", type=int, default=default_n_jobs, help=\"num worker\")\n parser.add_argument(\"--init-seed\", type=int, default=default_init_seed, help=\"initialize random seed\")\n #\n parser.add_argument(\"--gpus\", default=None)\n # debug\n parser.add_argument(\"--debug\", action=\"store_true\", default=False, help=\"debug\")\n args = parser.parse_args()\n\n # house keeping\n safe_mkdir(args.output_dir)\n safe_mkdir(args.cached_dir)\n safe_mkdir(args.model_dir)\n # start program\n main(args)\n", "id": "4208982", "language": "Python", "matching_score": 6.163522243499756, "max_stars_count": 0, "path": "script/starter.py" }, { "content": "configs = {\n \"num_workers\": 8,\n \"batch_size\": 16, # efficientnet-b2\n\n \"n_epochs\": 25,\n \"lr\": 4e-05,\n\n \"accumulate_grad_batches\": 2,\n \"lr_scheduler\": \"ReduceLROnPlateau\",\n \"scheduler_params\": {},\n\n \"augmentations\": [\n {\"transform\": \"HorizontalFlip\", \"params\": {\"p\": .5}, },\n {\"transform\": \"VerticalFlip\", \"params\": {\"p\": .5}, },\n {\"transform\": \"RandomRotate90\", \"params\": {\"always_apply\": False, \"p\": .5}, },\n {\"transform\": \"InvertImg\", \"params\": {\"p\": .5}, },\n {\"transform\": \"Resize\", \"params\": {\"height\": 512, \"width\": 512, \"always_apply\": True, \"p\": 1.}, },\n {\"transform\": \"ToFloat\", \"params\": {\"max_value\": 255, \"always_apply\": True, \"p\": 1.}, },\n # {\"transform\": \"Normalize\", \"params\": {\"always_apply\": True, \"p\": 1.}, },\n {\"transform\": \"ToTensorV2\", \"params\": {\"always_apply\": True, \"p\": 1.}, },\n ],\n}\n\n", "id": "2897441", "language": "Python", "matching_score": 0.391193687915802, "max_stars_count": 0, "path": "configs/train_seresnext26t_32x4d_finetune_eff.py" }, { "content": "import tensorflow as tf\n\n\ndef _configure_pretrained_model_block(model, max_seq_length: int, is_distilled: bool = False):\n # if config.output_hidden_states = True, obtain hidden states via bert_model(...)[-1]\n embedding_index = 0\n if model.config.output_hidden_states:\n embedding_index = -1\n\n input_ids = tf.keras.layers.Input((max_seq_length,), dtype=tf.int32)\n attention_mask = tf.keras.layers.Input((max_seq_length,), dtype=tf.int32)\n if is_distilled:\n embedding = model(input_ids, attention_mask=attention_mask)[embedding_index]\n return (input_ids, attention_mask), embedding\n\n token_type_ids = tf.keras.layers.Input((max_seq_length,), dtype=tf.int32)\n embedding = model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)[embedding_index]\n return (input_ids, attention_mask, token_type_ids), embedding\n\n\ndef _res_net_block(\n input_data, filters: int = 64, kernel_size: int = 3, strides: int = 1, dilation_rate: int = 1,\n data_format='channels_first'):\n x = tf.keras.layers.Conv1D(\n filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=dilation_rate, activation='relu',\n padding='same', data_format=data_format)(input_data)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Conv1D(\n filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=dilation_rate, activation=None,\n padding='same', data_format=data_format)(x)\n x = tf.keras.layers.BatchNormalization()(x)\n return x\n\n\ndef wave_net_alike_arch(\n input_data, filters: int = 32, kernel_size: int = 3, strides: int = 1, data_format='channels_first'):\n pool_size: int = 8\n # block d1\n x = _res_net_block(\n input_data, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=1, data_format=data_format)\n d1 = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=1, data_format=data_format)\n d1 = tf.keras.layers.MaxPooling1D(pool_size)(d1)\n\n # block d2\n x = _res_net_block(\n input_data, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=2, data_format=data_format)\n d2 = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=2, data_format=data_format)\n d2 = tf.keras.layers.MaxPooling1D(pool_size)(d2)\n\n # block d4\n x = _res_net_block(\n input_data, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=4, data_format=data_format)\n d4 = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=4, data_format=data_format)\n d4 = tf.keras.layers.MaxPooling1D(pool_size)(d4)\n\n # block d8\n x = _res_net_block(\n input_data, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=8, data_format=data_format)\n d8 = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=8, data_format=data_format)\n d8 = tf.keras.layers.MaxPooling1D(pool_size)(d8)\n\n # hidden\n x = tf.keras.layers.Add()([d1, d2])\n x = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=2, data_format=data_format)\n x = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=2, data_format=data_format)\n hidden_d1d2 = tf.keras.layers.MaxPooling1D(pool_size)(x)\n\n # hidden\n x = tf.keras.layers.Add()([d4, d8])\n x = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=2, data_format=data_format)\n x = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=2, data_format=data_format)\n hidden_d4d8 = tf.keras.layers.MaxPooling1D(pool_size)(x)\n\n # hidden\n x = tf.keras.layers.Add()([hidden_d1d2, hidden_d4d8])\n x = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=1, data_format=data_format)\n x = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=1, data_format=data_format)\n\n x = tf.keras.layers.GlobalAveragePooling1D()(x)\n return x\n\n\ndef shallow_wave_net_alike_arch(\n input_data, filters: int = 32, kernel_size: int = 3, strides: int = 1, data_format='channels_first'):\n pool_size: int = 8\n # block d1\n x = _res_net_block(\n input_data, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=2, data_format=data_format)\n d1 = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=1, data_format=data_format)\n d1 = tf.keras.layers.MaxPooling1D(pool_size)(d1)\n\n # block d2\n x = _res_net_block(\n input_data, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=8, data_format=data_format)\n d2 = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=4, data_format=data_format)\n d2 = tf.keras.layers.MaxPooling1D(pool_size)(d2)\n\n # hidden\n x = tf.keras.layers.Add()([d1, d2])\n x = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=2, data_format=data_format)\n x = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=1, data_format=data_format)\n\n x = tf.keras.layers.GlobalAveragePooling1D()(x)\n return x\n\n\ndef shallow_res_net_alike_arch(\n input_data, filters: int = 32, kernel_size: int = 3, strides: int = 1, data_format='channels_first'):\n pool_size: int = 8\n\n # block d1\n x = _res_net_block(\n input_data, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=2, data_format=data_format)\n d1 = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=1, data_format=data_format)\n d1 = tf.keras.layers.MaxPooling1D(pool_size)(d1)\n\n # block d2\n x = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=2, data_format=data_format)\n d2 = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=1, data_format=data_format)\n d2 = tf.keras.layers.MaxPooling1D(pool_size)(d2)\n\n # hidden\n x = tf.keras.layers.Add()([d1, d2])\n x = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=2, data_format=data_format)\n x = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=1, data_format=data_format)\n\n x = tf.keras.layers.GlobalAveragePooling1D()(x)\n return x\n\n\ndef dense_block(\n input_data, filters: int = 32, kernel_size: int = 3, strides: int = 1, data_format='channels_first'):\n pool_size: int = 8\n\n x = _res_net_block(\n input_data, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=4, data_format=data_format)\n x = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=2, data_format=data_format)\n x = _res_net_block(\n x, filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=1, data_format=data_format)\n x = tf.keras.layers.MaxPooling1D(pool_size)(x)\n x = tf.keras.layers.GlobalAveragePooling1D()(x)\n return x\n\n\ndef create_model_from_pretrained(\n model, max_seq_length_question: int, max_seq_length_answer: int, output_size: int = 30,\n is_distilled: bool = False):\n model.trainable = False\n\n q_inputs, q_embed = _configure_pretrained_model_block(model, max_seq_length_question, is_distilled=is_distilled)\n a_inputs, a_embed = _configure_pretrained_model_block(model, max_seq_length_answer, is_distilled=is_distilled)\n\n if is_distilled:\n q_input_ids, q_attention_mask = q_inputs\n a_input_ids, a_attention_mask = a_inputs\n inputs = [q_input_ids, q_attention_mask, a_input_ids, a_attention_mask]\n else:\n q_input_ids, q_attention_mask, q_token_type_ids = q_inputs\n a_input_ids, a_attention_mask, a_token_type_ids = a_inputs\n inputs = [q_input_ids, q_attention_mask, q_token_type_ids, a_input_ids, a_attention_mask, a_token_type_ids]\n\n embed_process = dense_block # make it a bit complex than GlobalAveragePooling1D\n q_embed = embed_process(q_embed)\n a_embed = embed_process(a_embed)\n\n subtracted = tf.keras.layers.Subtract()([q_embed, a_embed])\n x = tf.keras.layers.Concatenate()([q_embed, a_embed, subtracted])\n x = tf.keras.layers.Dense(x.shape[-1], activation='relu')(x)\n x = tf.keras.layers.Dropout(0.2)(x)\n x = tf.keras.layers.Dense(output_size, activation='sigmoid')(x)\n model = tf.keras.models.Model(inputs=inputs, outputs=x)\n # model.summary() # debug purpose\n return model\n", "id": "8155773", "language": "Python", "matching_score": 2.572425603866577, "max_stars_count": 2, "path": "nlp_utils/Solver/TransformerModelFactory.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis script is describing a ConvNet model, with its parameters could be set \nat 'param.py'. It takes multi-inputs which are TWO-channels and meta information \nsuch as 'inc_angle'.\n\n@author: cttsai (<NAME>), @Oct 2017\n\"\"\"\nfrom keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten\nfrom keras.layers import GlobalMaxPooling2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.merge import Concatenate\nfrom keras.models import Model\nfrom keras.optimizers import Adam\n\n\ndef conv_block(x, nf=8, k=3, s=1, nb=2, p_act='elu'):\n \n for i in range(nb):\n x = Conv2D(filters=nf, kernel_size=(k, k), strides=(s, s), \n activation=p_act,\n padding='same', kernel_initializer='he_uniform')(x)\n \n return x\n\ndef dense_block(x, h=32, d=0.5, m=0., p_act='elu'):\n return Dropout(d) (BatchNormalization(momentum=m) (Dense(h, activation=p_act)(x)))\n\n\ndef bn_pooling(x, k=2, s=2, m=0): \n return MaxPooling2D((k, k), strides=(s, s))(BatchNormalization(momentum=m)(x))\n \n\ndef get_model(img_shape=(75, 75, 2), num_classes=1, f=8, h=128):\n\n \"\"\"\n This model structure is inspired and modified from the following kernel\n https://www.kaggle.com/knowledgegrappler/a-keras-prototype-0-21174-on-pl\n img_shape: dimension for input image\n f: filters of first conv blocks and generate filters in the following \n blocks acorrdingly \n h: units in dense hidden layer\n \"\"\" \n \n #model\n bn_model = 0\n p_activation = 'elu'\n \n #\n input_img = Input(shape=img_shape, name='img_inputs')\n input_img_bn = BatchNormalization(momentum=bn_model)(input_img)\n #\n input_meta = Input(shape=[1], name='angle')\n input_meta_bn = BatchNormalization(momentum=bn_model)(input_meta)\n \n #img_1\n #img_1:block_1\n img_1 = conv_block(input_img_bn, nf=f, k=3, s=1, nb=3, p_act=p_activation)\n img_1 = bn_pooling(img_1, k=3, s=3, m=0)\n \n #img_1:block_2\n f*=2\n img_1 = Dropout(0.2)(img_1)\n img_1 = conv_block(img_1, nf=f, k=3, s=1, nb=3, p_act=p_activation)\n img_1 = bn_pooling(img_1, k=3, s=2, m=0)\n \n #img_1:block_3\n f*=2\n img_1 = Dropout(0.2)(img_1)\n img_1 = conv_block(img_1, nf=f, k=3, s=1, nb=3, p_act=p_activation)\n img_1 = bn_pooling(img_1, k=3, s=3, m=0)\n \n #img_1:block_4\n f*=2\n img_1 = Dropout(0.2)(img_1)\n img_1 = conv_block(img_1, nf=f, k=3, s=1, nb=3, p_act=p_activation)\n img_1 = Dropout(0.2)(img_1)\n img_1 = BatchNormalization(momentum=bn_model)(GlobalMaxPooling2D()(img_1))\n \n #img 2\n img_2 = conv_block(input_img_bn, nf=f, k=3, s=1, nb=6, p_act=p_activation)\n img_2 = Dropout(0.2)(img_2)\n img_2 = BatchNormalization(momentum=bn_model)(GlobalMaxPooling2D()(img_2))\n \n #full connect\n concat = (Concatenate()([img_1, img_2, input_meta_bn]))\n x = dense_block(concat, h=h)\n x = dense_block(x, h=h)\n output = Dense(num_classes, activation='sigmoid')(x)\n \n model = Model([input_img, input_meta], output)\n\n model.summary()\n \n return model\n\nif __name__ == '__main__':\n model = get_model()\n\n", "id": "3504310", "language": "Python", "matching_score": 0.8044987916946411, "max_stars_count": 26, "path": "scripts/models.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis scripts contains some data processing functions\n\n@author: (<NAME>), @Oct 2017\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\n\ndef rescale(imgs): return imgs / 100. + 0.5\n\ndef read_jason(file='', loc='../input'):\n\n df = pd.read_json('{}/{}'.format(loc, file))\n df['inc_angle'] = df['inc_angle'].replace('na', -1).astype(float)\n #print(df['inc_angle'].value_counts())\n \n band1 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df[\"band_1\"]])\n band2 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df[\"band_2\"]])\n df = df.drop(['band_1', 'band_2'], axis=1)\n\n bands = np.stack((band1, band2), axis=-1)\n del band1, band2\n \n return df, bands\n\n", "id": "8798203", "language": "Python", "matching_score": 0.8408070802688599, "max_stars_count": 26, "path": "scripts/utils.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis script contains some image augmentations by calling 'opencv' and \n'keras.preprocessing.image'. Currently, 4 kinds of augmentations: \n'Flip', 'Rotate', 'Shift', 'Zoom' are available.\n\n@author: cttsai (<NAME>), @Oct 2017\n\"\"\"\n\nfrom random import choice\nimport cv2\nimport numpy as np\nimport keras.preprocessing.image as prep\n\n#data augmentations\n############################################################################### \ndef HorizontalFlip(image, u=0.5, v=1.0):\n \n if v < u:\n image = cv2.flip(image, 1)\n \n return image\n\n\ndef VerticalFlip(image, u=0.5, v=1.0):\n \n if v < u:\n image = cv2.flip(image, 0)\n \n return image\n\n\ndef Rotate90(image, u=0.5, v=1.0):\n\n if v < u:\n image = np.rot90(image, k=choice([0, 1, 2, 3]), axes=(0, 1))\n\n return image\n \n\ndef Rotate(image, rotate_rg=45, u=0.5, v=1.0):\n\n if v < u:\n image = prep.random_rotation(image, rg=rotate_rg, \n row_axis=0, col_axis=1, channel_axis=2)\n\n return image\n\n\ndef Shift(image, width_rg=0.1, height_rg=0.1, u=0.5, v=1.0):\n\n if v < u:\n image = prep.random_shift(image, wrg=width_rg, hrg=height_rg, \n row_axis=0, col_axis=1, channel_axis=2)\n\n return image\n\n\ndef Zoom(image, zoom_rg=(0.1, 0.1), u=0.5, v=1.0):\n\n if v < u:\n image = prep.random_zoom(image, zoom_range=zoom_rg,\n row_axis=0, col_axis=1, channel_axis=2)\n\n return image\n", "id": "11906717", "language": "Python", "matching_score": 0.2384658306837082, "max_stars_count": 26, "path": "scripts/augmentations.py" }, { "content": "# Forked from excellent kernel : https://www.kaggle.com/jsaguiar/updated-0-792-lb-lightgbm-with-simple-features\n# From Kaggler : https://www.kaggle.com/jsaguiar\n# Just added a few features so I thought I had to make release it as well...\n\nimport numpy as np\nimport pandas as pd\nimport gc\nimport time\nfrom contextlib import contextmanager\nimport lightgbm as lgb\nfrom sklearn.metrics import roc_auc_score, roc_curve\nfrom sklearn.model_selection import KFold, StratifiedKFold\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nfeatures_with_no_imp_at_least_twice = [\n 'ACTIVE_CNT_CREDIT_PROLONG_SUM', 'ACTIVE_CREDIT_DAY_OVERDUE_MEAN', 'AMT_REQ_CREDIT_BUREAU_DAY', 'AMT_REQ_CREDIT_BUREAU_HOUR',\n 'AMT_REQ_CREDIT_BUREAU_WEEK', 'BURO_CNT_CREDIT_PROLONG_SUM', 'BURO_CREDIT_ACTIVE_Bad debt_MEAN', 'BURO_CREDIT_ACTIVE_nan_MEAN',\n 'BURO_CREDIT_CURRENCY_currency 1_MEAN', 'BURO_CREDIT_CURRENCY_currency 2_MEAN', 'BURO_CREDIT_CURRENCY_currency 3_MEAN',\n 'BURO_CREDIT_CURRENCY_currency 4_MEAN', 'BURO_CREDIT_CURRENCY_nan_MEAN', 'BURO_CREDIT_DAY_OVERDUE_MAX', 'BURO_CREDIT_DAY_OVERDUE_MEAN',\n 'BURO_CREDIT_TYPE_Cash loan (non-earmarked)_MEAN', 'BURO_CREDIT_TYPE_Interbank credit_MEAN', 'BURO_CREDIT_TYPE_Loan for business development_MEAN',\n 'BURO_CREDIT_TYPE_Loan for purchase of shares (margin lending)_MEAN', 'BURO_CREDIT_TYPE_Loan for the purchase of equipment_MEAN',\n 'BURO_CREDIT_TYPE_Loan for working capital replenishment_MEAN', 'BURO_CREDIT_TYPE_Mobile operator loan_MEAN',\n 'BURO_CREDIT_TYPE_Real estate loan_MEAN', 'BURO_CREDIT_TYPE_Unknown type of loan_MEAN', 'BURO_CREDIT_TYPE_nan_MEAN',\n 'BURO_MONTHS_BALANCE_MAX_MAX', 'BURO_STATUS_2_MEAN_MEAN', 'BURO_STATUS_3_MEAN_MEAN', 'BURO_STATUS_4_MEAN_MEAN', 'BURO_STATUS_5_MEAN_MEAN',\n 'BURO_STATUS_nan_MEAN_MEAN', 'CC_AMT_DRAWINGS_ATM_CURRENT_MIN', 'CC_AMT_DRAWINGS_CURRENT_MIN', 'CC_AMT_DRAWINGS_OTHER_CURRENT_MAX',\n 'CC_AMT_DRAWINGS_OTHER_CURRENT_MEAN', 'CC_AMT_DRAWINGS_OTHER_CURRENT_MIN', 'CC_AMT_DRAWINGS_OTHER_CURRENT_SUM',\n 'CC_AMT_DRAWINGS_OTHER_CURRENT_VAR', 'CC_AMT_INST_MIN_REGULARITY_MIN', 'CC_AMT_PAYMENT_TOTAL_CURRENT_MIN', 'CC_AMT_PAYMENT_TOTAL_CURRENT_VAR',\n 'CC_AMT_RECIVABLE_SUM', 'CC_AMT_TOTAL_RECEIVABLE_MAX', 'CC_AMT_TOTAL_RECEIVABLE_MIN', 'CC_AMT_TOTAL_RECEIVABLE_SUM', 'CC_AMT_TOTAL_RECEIVABLE_VAR',\n 'CC_CNT_DRAWINGS_ATM_CURRENT_MIN', 'CC_CNT_DRAWINGS_CURRENT_MIN', 'CC_CNT_DRAWINGS_OTHER_CURRENT_MAX', 'CC_CNT_DRAWINGS_OTHER_CURRENT_MEAN',\n 'CC_CNT_DRAWINGS_OTHER_CURRENT_MIN', 'CC_CNT_DRAWINGS_OTHER_CURRENT_SUM', 'CC_CNT_DRAWINGS_OTHER_CURRENT_VAR', 'CC_CNT_DRAWINGS_POS_CURRENT_SUM',\n 'CC_CNT_INSTALMENT_MATURE_CUM_MAX', 'CC_CNT_INSTALMENT_MATURE_CUM_MIN', 'CC_COUNT', 'CC_MONTHS_BALANCE_MAX', 'CC_MONTHS_BALANCE_MEAN',\n 'CC_MONTHS_BALANCE_MIN', 'CC_MONTHS_BALANCE_SUM', 'CC_NAME_CONTRACT_STATUS_Active_MAX', 'CC_NAME_CONTRACT_STATUS_Active_MIN',\n 'CC_NAME_CONTRACT_STATUS_Approved_MAX', 'CC_NAME_CONTRACT_STATUS_Approved_MEAN', 'CC_NAME_CONTRACT_STATUS_Approved_MIN',\n 'CC_NAME_CONTRACT_STATUS_Approved_SUM', 'CC_NAME_CONTRACT_STATUS_Approved_VAR', 'CC_NAME_CONTRACT_STATUS_Completed_MAX',\n 'CC_NAME_CONTRACT_STATUS_Completed_MEAN', 'CC_NAME_CONTRACT_STATUS_Completed_MIN', 'CC_NAME_CONTRACT_STATUS_Completed_SUM', 'CC_NAME_CONTRACT_STATUS_Completed_VAR',\n 'CC_NAME_CONTRACT_STATUS_Demand_MAX', 'CC_NAME_CONTRACT_STATUS_Demand_MEAN', 'CC_NAME_CONTRACT_STATUS_Demand_MIN', 'CC_NAME_CONTRACT_STATUS_Demand_SUM',\n 'CC_NAME_CONTRACT_STATUS_Demand_VAR', 'CC_NAME_CONTRACT_STATUS_Refused_MAX', 'CC_NAME_CONTRACT_STATUS_Refused_MEAN', 'CC_NAME_CONTRACT_STATUS_Refused_MIN',\n 'CC_NAME_CONTRACT_STATUS_Refused_SUM', 'CC_NAME_CONTRACT_STATUS_Refused_VAR', 'CC_NAME_CONTRACT_STATUS_Sent proposal_MAX',\n 'CC_NAME_CONTRACT_STATUS_Sent proposal_MEAN', 'CC_NAME_CONTRACT_STATUS_Sent proposal_MIN', 'CC_NAME_CONTRACT_STATUS_Sent proposal_SUM',\n 'CC_NAME_CONTRACT_STATUS_Sent proposal_VAR', 'CC_NAME_CONTRACT_STATUS_Signed_MAX', 'CC_NAME_CONTRACT_STATUS_Signed_MEAN', 'CC_NAME_CONTRACT_STATUS_Signed_MIN',\n 'CC_NAME_CONTRACT_STATUS_Signed_SUM', 'CC_NAME_CONTRACT_STATUS_Signed_VAR', 'CC_NAME_CONTRACT_STATUS_nan_MAX', 'CC_NAME_CONTRACT_STATUS_nan_MEAN',\n 'CC_NAME_CONTRACT_STATUS_nan_MIN', 'CC_NAME_CONTRACT_STATUS_nan_SUM', 'CC_NAME_CONTRACT_STATUS_nan_VAR', 'CC_SK_DPD_DEF_MAX',\n 'CC_SK_DPD_DEF_MIN', 'CC_SK_DPD_DEF_SUM', 'CC_SK_DPD_DEF_VAR', 'CC_SK_DPD_MAX', 'CC_SK_DPD_MEAN', 'CC_SK_DPD_MIN', 'CC_SK_DPD_SUM',\n 'CC_SK_DPD_VAR', 'CLOSED_AMT_CREDIT_SUM_LIMIT_MEAN', 'CLOSED_AMT_CREDIT_SUM_LIMIT_SUM', 'CLOSED_AMT_CREDIT_SUM_OVERDUE_MEAN',\n 'CLOSED_CNT_CREDIT_PROLONG_SUM', 'CLOSED_CREDIT_DAY_OVERDUE_MAX', 'CLOSED_CREDIT_DAY_OVERDUE_MEAN', 'CLOSED_MONTHS_BALANCE_MAX_MAX',\n 'CNT_CHILDREN', 'ELEVATORS_MEDI', 'ELEVATORS_MODE', 'EMERGENCYSTATE_MODE_No', 'EMERGENCYSTATE_MODE_Yes', 'ENTRANCES_MODE', 'FLAG_CONT_MOBILE',\n 'FLAG_DOCUMENT_10', 'FLAG_DOCUMENT_11', 'FLAG_DOCUMENT_12', 'FLAG_DOCUMENT_13', 'FLAG_DOCUMENT_14', 'FLAG_DOCUMENT_15', 'FLAG_DOCUMENT_16',\n 'FLAG_DOCUMENT_17', 'FLAG_DOCUMENT_19', 'FLAG_DOCUMENT_2', 'FLAG_DOCUMENT_20', 'FLAG_DOCUMENT_21', 'FLAG_DOCUMENT_4', 'FLAG_DOCUMENT_5',\n 'FLAG_DOCUMENT_6', 'FLAG_DOCUMENT_7', 'FLAG_DOCUMENT_9', 'FLAG_EMAIL', 'FLAG_EMP_PHONE', 'FLAG_MOBIL', 'FLAG_OWN_CAR', 'FLOORSMAX_MODE',\n 'FONDKAPREMONT_MODE_not specified', 'FONDKAPREMONT_MODE_org spec account', 'FONDKAPREMONT_MODE_reg oper account', 'FONDKAPREMONT_MODE_reg oper spec account',\n 'HOUSETYPE_MODE_block of flats', 'HOUSETYPE_MODE_specific housing', 'HOUSETYPE_MODE_terraced house', 'LIVE_REGION_NOT_WORK_REGION',\n 'NAME_CONTRACT_TYPE_Revolving loans', 'NAME_EDUCATION_TYPE_Academic degree', 'NAME_FAMILY_STATUS_Civil marriage', 'NAME_FAMILY_STATUS_Single / not married',\n 'NAME_FAMILY_STATUS_Unknown', 'NAME_FAMILY_STATUS_Widow', 'NAME_HOUSING_TYPE_Co-op apartment', 'NAME_HOUSING_TYPE_With parents',\n 'NAME_INCOME_TYPE_Businessman', 'NAME_INCOME_TYPE_Maternity leave', 'NAME_INCOME_TYPE_Pensioner', 'NAME_INCOME_TYPE_Student',\n 'NAME_INCOME_TYPE_Unemployed', 'NAME_TYPE_SUITE_Children', 'NAME_TYPE_SUITE_Family', 'NAME_TYPE_SUITE_Group of people',\n 'NAME_TYPE_SUITE_Other_A', 'NAME_TYPE_SUITE_Other_B', 'NAME_TYPE_SUITE_Spouse, partner', 'NAME_TYPE_SUITE_Unaccompanied',\n 'NEW_RATIO_BURO_AMT_CREDIT_SUM_DEBT_MEAN', 'NEW_RATIO_BURO_AMT_CREDIT_SUM_LIMIT_SUM', 'NEW_RATIO_BURO_AMT_CREDIT_SUM_OVERDUE_MEAN',\n 'NEW_RATIO_BURO_CNT_CREDIT_PROLONG_SUM', 'NEW_RATIO_BURO_CREDIT_DAY_OVERDUE_MAX', 'NEW_RATIO_BURO_CREDIT_DAY_OVERDUE_MEAN', 'NEW_RATIO_BURO_MONTHS_BALANCE_MAX_MAX',\n 'NEW_RATIO_PREV_AMT_DOWN_PAYMENT_MIN', 'NEW_RATIO_PREV_RATE_DOWN_PAYMENT_MAX', 'OCCUPATION_TYPE_Cleaning staff', 'OCCUPATION_TYPE_Cooking staff',\n 'OCCUPATION_TYPE_HR staff', 'OCCUPATION_TYPE_IT staff', 'OCCUPATION_TYPE_Low-skill Laborers', 'OCCUPATION_TYPE_Managers',\n 'OCCUPATION_TYPE_Private service staff', 'OCCUPATION_TYPE_Realty agents', 'OCCUPATION_TYPE_Sales staff', 'OCCUPATION_TYPE_Secretaries',\n 'OCCUPATION_TYPE_Security staff', 'OCCUPATION_TYPE_Waiters/barmen staff', 'ORGANIZATION_TYPE_Advertising', 'ORGANIZATION_TYPE_Agriculture',\n 'ORGANIZATION_TYPE_Business Entity Type 1', 'ORGANIZATION_TYPE_Business Entity Type 2', 'ORGANIZATION_TYPE_Cleaning', 'ORGANIZATION_TYPE_Culture',\n 'ORGANIZATION_TYPE_Electricity', 'ORGANIZATION_TYPE_Emergency', 'ORGANIZATION_TYPE_Government', 'ORGANIZATION_TYPE_Hotel', 'ORGANIZATION_TYPE_Housing',\n 'ORGANIZATION_TYPE_Industry: type 1', 'ORGANIZATION_TYPE_Industry: type 10', 'ORGANIZATION_TYPE_Industry: type 11', 'ORGANIZATION_TYPE_Industry: type 12',\n 'ORGANIZATION_TYPE_Industry: type 13', 'ORGANIZATION_TYPE_Industry: type 2', 'ORGANIZATION_TYPE_Industry: type 3', 'ORGANIZATION_TYPE_Industry: type 4',\n 'ORGANIZATION_TYPE_Industry: type 5', 'ORGANIZATION_TYPE_Industry: type 6', 'ORGANIZATION_TYPE_Industry: type 7', 'ORGANIZATION_TYPE_Industry: type 8',\n 'ORGANIZATION_TYPE_Insurance', 'ORGANIZATION_TYPE_Legal Services', 'ORGANIZATION_TYPE_Mobile', 'ORGANIZATION_TYPE_Other', 'ORGANIZATION_TYPE_Postal',\n 'ORGANIZATION_TYPE_Realtor', 'ORGANIZATION_TYPE_Religion', 'ORGANIZATION_TYPE_Restaurant', 'ORGANIZATION_TYPE_Security',\n 'ORGANIZATION_TYPE_Security Ministries', 'ORGANIZATION_TYPE_Services', 'ORGANIZATION_TYPE_Telecom', 'ORGANIZATION_TYPE_Trade: type 1',\n 'ORGANIZATION_TYPE_Trade: type 2', 'ORGANIZATION_TYPE_Trade: type 3', 'ORGANIZATION_TYPE_Trade: type 4', 'ORGANIZATION_TYPE_Trade: type 5',\n 'ORGANIZATION_TYPE_Trade: type 6', 'ORGANIZATION_TYPE_Trade: type 7',\n 'ORGANIZATION_TYPE_Transport: type 1', 'ORGANIZATION_TYPE_Transport: type 2', 'ORGANIZATION_TYPE_Transport: type 4', 'ORGANIZATION_TYPE_University',\n 'ORGANIZATION_TYPE_XNA', 'POS_NAME_CONTRACT_STATUS_Amortized debt_MEAN', 'POS_NAME_CONTRACT_STATUS_Approved_MEAN', 'POS_NAME_CONTRACT_STATUS_Canceled_MEAN',\n 'POS_NAME_CONTRACT_STATUS_Demand_MEAN', 'POS_NAME_CONTRACT_STATUS_XNA_MEAN', 'POS_NAME_CONTRACT_STATUS_nan_MEAN', 'PREV_CHANNEL_TYPE_Car dealer_MEAN',\n 'PREV_CHANNEL_TYPE_nan_MEAN', 'PREV_CODE_REJECT_REASON_CLIENT_MEAN', 'PREV_CODE_REJECT_REASON_SYSTEM_MEAN', 'PREV_CODE_REJECT_REASON_VERIF_MEAN',\n 'PREV_CODE_REJECT_REASON_XNA_MEAN', 'PREV_CODE_REJECT_REASON_nan_MEAN', 'PREV_FLAG_LAST_APPL_PER_CONTRACT_N_MEAN', 'PREV_FLAG_LAST_APPL_PER_CONTRACT_Y_MEAN',\n 'PREV_FLAG_LAST_APPL_PER_CONTRACT_nan_MEAN', 'PREV_NAME_CASH_LOAN_PURPOSE_Building a house or an annex_MEAN', 'PREV_NAME_CASH_LOAN_PURPOSE_Business development_MEAN',\n 'PREV_NAME_CASH_LOAN_PURPOSE_Buying a garage_MEAN', 'PREV_NAME_CASH_LOAN_PURPOSE_Buying a holiday home / land_MEAN', 'PREV_NAME_CASH_LOAN_PURPOSE_Buying a home_MEAN',\n 'PREV_NAME_CASH_LOAN_PURPOSE_Buying a new car_MEAN', 'PREV_NAME_CASH_LOAN_PURPOSE_Buying a used car_MEAN', 'PREV_NAME_CASH_LOAN_PURPOSE_Education_MEAN',\n 'PREV_NAME_CASH_LOAN_PURPOSE_Everyday expenses_MEAN', 'PREV_NAME_CASH_LOAN_PURPOSE_Furniture_MEAN', 'PREV_NAME_CASH_LOAN_PURPOSE_Gasification / water supply_MEAN',\n 'PREV_NAME_CASH_LOAN_PURPOSE_Hobby_MEAN', 'PREV_NAME_CASH_LOAN_PURPOSE_Journey_MEAN', 'PREV_NAME_CASH_LOAN_PURPOSE_Money for a third person_MEAN', 'PREV_NAME_CASH_LOAN_PURPOSE_Other_MEAN',\n 'PREV_NAME_CASH_LOAN_PURPOSE_Payments on other loans_MEAN', 'PREV_NAME_CASH_LOAN_PURPOSE_Purchase of electronic equipment_MEAN', 'PREV_NAME_CASH_LOAN_PURPOSE_Refusal to name the goal_MEAN',\n 'PREV_NAME_CASH_LOAN_PURPOSE_Wedding / gift / holiday_MEAN', 'PREV_NAME_CASH_LOAN_PURPOSE_XAP_MEAN', 'PREV_NAME_CASH_LOAN_PURPOSE_nan_MEAN', 'PREV_NAME_CLIENT_TYPE_XNA_MEAN',\n 'PREV_NAME_CLIENT_TYPE_nan_MEAN', 'PREV_NAME_CONTRACT_STATUS_Unused offer_MEAN', 'PREV_NAME_CONTRACT_STATUS_nan_MEAN', 'PREV_NAME_CONTRACT_TYPE_XNA_MEAN',\n 'PREV_NAME_CONTRACT_TYPE_nan_MEAN', 'PREV_NAME_GOODS_CATEGORY_Additional Service_MEAN', 'PREV_NAME_GOODS_CATEGORY_Animals_MEAN',\n 'PREV_NAME_GOODS_CATEGORY_Auto Accessories_MEAN', 'PREV_NAME_GOODS_CATEGORY_Clothing and Accessories_MEAN', 'PREV_NAME_GOODS_CATEGORY_Construction Materials_MEAN',\n 'PREV_NAME_GOODS_CATEGORY_Direct Sales_MEAN', 'PREV_NAME_GOODS_CATEGORY_Education_MEAN', 'PREV_NAME_GOODS_CATEGORY_Fitness_MEAN',\n 'PREV_NAME_GOODS_CATEGORY_Gardening_MEAN', 'PREV_NAME_GOODS_CATEGORY_Homewares_MEAN', 'PREV_NAME_GOODS_CATEGORY_House Construction_MEAN',\n 'PREV_NAME_GOODS_CATEGORY_Insurance_MEAN', 'PREV_NAME_GOODS_CATEGORY_Jewelry_MEAN', 'PREV_NAME_GOODS_CATEGORY_Medical Supplies_MEAN',\n 'PREV_NAME_GOODS_CATEGORY_Medicine_MEAN', 'PREV_NAME_GOODS_CATEGORY_Office Appliances_MEAN', 'PREV_NAME_GOODS_CATEGORY_Other_MEAN', 'PREV_NAME_GOODS_CATEGORY_Tourism_MEAN',\n 'PREV_NAME_GOODS_CATEGORY_Vehicles_MEAN', 'PREV_NAME_GOODS_CATEGORY_Weapon_MEAN', 'PREV_NAME_GOODS_CATEGORY_XNA_MEAN', 'PREV_NAME_GOODS_CATEGORY_nan_MEAN',\n 'PREV_NAME_PAYMENT_TYPE_Cashless from the account of the employer_MEAN', 'PREV_NAME_PAYMENT_TYPE_Non-cash from your account_MEAN', 'PREV_NAME_PAYMENT_TYPE_nan_MEAN',\n 'PREV_NAME_PORTFOLIO_Cars_MEAN', 'PREV_NAME_PORTFOLIO_nan_MEAN', 'PREV_NAME_PRODUCT_TYPE_nan_MEAN', 'PREV_NAME_SELLER_INDUSTRY_Construction_MEAN',\n 'PREV_NAME_SELLER_INDUSTRY_Furniture_MEAN', 'PREV_NAME_SELLER_INDUSTRY_Industry_MEAN', 'PREV_NAME_SELLER_INDUSTRY_Jewelry_MEAN', 'PREV_NAME_SELLER_INDUSTRY_MLM partners_MEAN',\n 'PREV_NAME_SELLER_INDUSTRY_Tourism_MEAN', 'PREV_NAME_SELLER_INDUSTRY_nan_MEAN', 'PREV_NAME_TYPE_SUITE_Group of people_MEAN', 'PREV_NAME_YIELD_GROUP_nan_MEAN',\n 'PREV_PRODUCT_COMBINATION_POS industry without interest_MEAN', 'PREV_PRODUCT_COMBINATION_POS mobile without interest_MEAN', 'PREV_PRODUCT_COMBINATION_POS others without interest_MEAN',\n 'PREV_PRODUCT_COMBINATION_nan_MEAN', 'PREV_WEEKDAY_APPR_PROCESS_START_nan_MEAN', 'REFUSED_AMT_DOWN_PAYMENT_MAX', 'REFUSED_AMT_DOWN_PAYMENT_MEAN',\n 'REFUSED_RATE_DOWN_PAYMENT_MIN', 'REG_CITY_NOT_WORK_CITY', 'REG_REGION_NOT_LIVE_REGION', 'REG_REGION_NOT_WORK_REGION',\n 'WALLSMATERIAL_MODE_Block', 'WALLSMATERIAL_MODE_Mixed', 'WALLSMATERIAL_MODE_Monolithic', 'WALLSMATERIAL_MODE_Others', 'WALLSMATERIAL_MODE_Panel',\n 'WALLSMATERIAL_MODE_Wooden', 'WEEKDAY_APPR_PROCESS_START_FRIDAY', 'WEEKDAY_APPR_PROCESS_START_THURSDAY', 'WEEKDAY_APPR_PROCESS_START_TUESDAY'\n]\n\n@contextmanager\ndef timer(title):\n t0 = time.time()\n yield\n print(\"{} - done in {:.0f}s\".format(title, time.time() - t0))\n\n# One-hot encoding for categorical columns with get_dummies\ndef one_hot_encoder(df, nan_as_category = True):\n original_columns = list(df.columns)\n categorical_columns = [col for col in df.columns if df[col].dtype == 'object']\n df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category)\n new_columns = [c for c in df.columns if c not in original_columns]\n return df, new_columns\n\n# Preprocess application_train.csv and application_test.csv\ndef application_train_test(num_rows = None, nan_as_category = False):\n # Read data and merge\n df = pd.read_csv('../input/application_train.csv', nrows= num_rows)\n test_df = pd.read_csv('../input/application_test.csv', nrows= num_rows)\n print(\"Train samples: {}, test samples: {}\".format(len(df), len(test_df)))\n df = df.append(test_df).reset_index()\n # Optional: Remove 4 applications with XNA CODE_GENDER (train set)\n df = df[df['CODE_GENDER'] != 'XNA']\n \n docs = [_f for _f in df.columns if 'FLAG_DOC' in _f]\n live = [_f for _f in df.columns if ('FLAG_' in _f) & ('FLAG_DOC' not in _f) & ('_FLAG_' not in _f)]\n \n # NaN values for DAYS_EMPLOYED: 365.243 -> nan\n df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True)\n\n inc_by_org = df[['AMT_INCOME_TOTAL', 'ORGANIZATION_TYPE']].groupby('ORGANIZATION_TYPE').median()['AMT_INCOME_TOTAL']\n\n df['NEW_CREDIT_TO_ANNUITY_RATIO'] = df['AMT_CREDIT'] / df['AMT_ANNUITY']\n df['NEW_CREDIT_TO_GOODS_RATIO'] = df['AMT_CREDIT'] / df['AMT_GOODS_PRICE']\n df['NEW_DOC_IND_AVG'] = df[docs].mean(axis=1)\n df['NEW_DOC_IND_STD'] = df[docs].std(axis=1)\n df['NEW_DOC_IND_KURT'] = df[docs].kurtosis(axis=1)\n df['NEW_LIVE_IND_SUM'] = df[live].sum(axis=1)\n df['NEW_LIVE_IND_STD'] = df[live].std(axis=1)\n df['NEW_LIVE_IND_KURT'] = df[live].kurtosis(axis=1)\n df['NEW_INC_PER_CHLD'] = df['AMT_INCOME_TOTAL'] / (1 + df['CNT_CHILDREN'])\n df['NEW_INC_BY_ORG'] = df['ORGANIZATION_TYPE'].map(inc_by_org)\n df['NEW_EMPLOY_TO_BIRTH_RATIO'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']\n df['NEW_ANNUITY_TO_INCOME_RATIO'] = df['AMT_ANNUITY'] / (1 + df['AMT_INCOME_TOTAL'])\n df['NEW_SOURCES_PROD'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] * df['EXT_SOURCE_3']\n df['NEW_EXT_SOURCES_MEAN'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)\n df['NEW_SCORES_STD'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)\n df['NEW_SCORES_STD'] = df['NEW_SCORES_STD'].fillna(df['NEW_SCORES_STD'].mean())\n df['NEW_CAR_TO_BIRTH_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH']\n df['NEW_CAR_TO_EMPLOY_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED']\n df['NEW_PHONE_TO_BIRTH_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']\n df['NEW_PHONE_TO_EMPLOY_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED']\n df['NEW_CREDIT_TO_INCOME_RATIO'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL']\n \n # Categorical features with Binary encode (0 or 1; two categories)\n for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:\n df[bin_feature], uniques = pd.factorize(df[bin_feature])\n # Categorical features with One-Hot encode\n df, cat_cols = one_hot_encoder(df, nan_as_category)\n \n del test_df\n gc.collect()\n return df\n\n# Preprocess bureau.csv and bureau_balance.csv\ndef bureau_and_balance(num_rows = None, nan_as_category = True):\n bureau = pd.read_csv('../input/bureau.csv', nrows = num_rows)\n bb = pd.read_csv('../input/bureau_balance.csv', nrows = num_rows)\n bb, bb_cat = one_hot_encoder(bb, nan_as_category)\n bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category)\n \n # Bureau balance: Perform aggregations and merge with bureau.csv\n bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}\n for col in bb_cat:\n bb_aggregations[col] = ['mean']\n bb_agg = bb.groupby('SK_ID_BUREAU').agg(bb_aggregations)\n bb_agg.columns = pd.Index([e[0] + \"_\" + e[1].upper() for e in bb_agg.columns.tolist()])\n bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')\n bureau.drop(['SK_ID_BUREAU'], axis=1, inplace= True)\n del bb, bb_agg\n gc.collect()\n \n # Bureau and bureau_balance numeric features\n num_aggregations = {\n 'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],\n 'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],\n 'DAYS_CREDIT_UPDATE': ['mean'],\n 'CREDIT_DAY_OVERDUE': ['max', 'mean'],\n 'AMT_CREDIT_MAX_OVERDUE': ['mean'],\n 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],\n 'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],\n 'AMT_CREDIT_SUM_OVERDUE': ['mean'],\n 'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],\n 'AMT_ANNUITY': ['max', 'mean'],\n 'CNT_CREDIT_PROLONG': ['sum'],\n 'MONTHS_BALANCE_MIN': ['min'],\n 'MONTHS_BALANCE_MAX': ['max'],\n 'MONTHS_BALANCE_SIZE': ['mean', 'sum']\n }\n # Bureau and bureau_balance categorical features\n cat_aggregations = {}\n for cat in bureau_cat: cat_aggregations[cat] = ['mean']\n for cat in bb_cat: cat_aggregations[cat + \"_MEAN\"] = ['mean']\n \n bureau_agg = bureau.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})\n bureau_agg.columns = pd.Index(['BURO_' + e[0] + \"_\" + e[1].upper() for e in bureau_agg.columns.tolist()])\n # Bureau: Active credits - using only numerical aggregations\n active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]\n active_agg = active.groupby('SK_ID_CURR').agg(num_aggregations)\n cols = active_agg.columns.tolist()\n active_agg.columns = pd.Index(['ACTIVE_' + e[0] + \"_\" + e[1].upper() for e in active_agg.columns.tolist()])\n bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')\n del active, active_agg\n gc.collect()\n # Bureau: Closed credits - using only numerical aggregations\n closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]\n closed_agg = closed.groupby('SK_ID_CURR').agg(num_aggregations)\n closed_agg.columns = pd.Index(['CLOSED_' + e[0] + \"_\" + e[1].upper() for e in closed_agg.columns.tolist()])\n bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')\n \n for e in cols:\n bureau_agg['NEW_RATIO_BURO_' + e[0] + \"_\" + e[1].upper()] = bureau_agg['ACTIVE_' + e[0] + \"_\" + e[1].upper()] / bureau_agg['CLOSED_' + e[0] + \"_\" + e[1].upper()]\n \n del closed, closed_agg, bureau\n gc.collect()\n return bureau_agg\n\n# Preprocess previous_applications.csv\ndef previous_applications(num_rows = None, nan_as_category = True):\n prev = pd.read_csv('../input/previous_application.csv', nrows = num_rows)\n prev, cat_cols = one_hot_encoder(prev, nan_as_category= True)\n # Days 365.243 values -> nan\n prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)\n prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)\n prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)\n prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)\n prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)\n # Add feature: value ask / value received percentage\n prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']\n # Previous applications numeric features\n num_aggregations = {\n 'AMT_ANNUITY': ['min', 'max', 'mean'],\n 'AMT_APPLICATION': ['min', 'max', 'mean'],\n 'AMT_CREDIT': ['min', 'max', 'mean'],\n 'APP_CREDIT_PERC': ['min', 'max', 'mean', 'var'],\n 'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],\n 'AMT_GOODS_PRICE': ['min', 'max', 'mean'],\n 'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],\n 'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],\n 'DAYS_DECISION': ['min', 'max', 'mean'],\n 'CNT_PAYMENT': ['mean', 'sum'],\n }\n # Previous applications categorical features\n cat_aggregations = {}\n for cat in cat_cols:\n cat_aggregations[cat] = ['mean']\n \n prev_agg = prev.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})\n prev_agg.columns = pd.Index(['PREV_' + e[0] + \"_\" + e[1].upper() for e in prev_agg.columns.tolist()])\n # Previous Applications: Approved Applications - only numerical features\n approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]\n approved_agg = approved.groupby('SK_ID_CURR').agg(num_aggregations)\n cols = approved_agg.columns.tolist()\n approved_agg.columns = pd.Index(['APPROVED_' + e[0] + \"_\" + e[1].upper() for e in approved_agg.columns.tolist()])\n prev_agg = prev_agg.join(approved_agg, how='left', on='SK_ID_CURR')\n # Previous Applications: Refused Applications - only numerical features\n refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]\n refused_agg = refused.groupby('SK_ID_CURR').agg(num_aggregations)\n refused_agg.columns = pd.Index(['REFUSED_' + e[0] + \"_\" + e[1].upper() for e in refused_agg.columns.tolist()])\n prev_agg = prev_agg.join(refused_agg, how='left', on='SK_ID_CURR')\n del refused, refused_agg, approved, approved_agg, prev\n \n for e in cols:\n prev_agg['NEW_RATIO_PREV_' + e[0] + \"_\" + e[1].upper()] = prev_agg['APPROVED_' + e[0] + \"_\" + e[1].upper()] / prev_agg['REFUSED_' + e[0] + \"_\" + e[1].upper()]\n \n gc.collect()\n return prev_agg\n\n# Preprocess POS_CASH_balance.csv\ndef pos_cash(num_rows = None, nan_as_category = True):\n pos = pd.read_csv('../input/POS_CASH_balance.csv', nrows = num_rows)\n pos, cat_cols = one_hot_encoder(pos, nan_as_category= True)\n # Features\n aggregations = {\n 'MONTHS_BALANCE': ['max', 'mean', 'size'],\n 'SK_DPD': ['max', 'mean'],\n 'SK_DPD_DEF': ['max', 'mean']\n }\n for cat in cat_cols:\n aggregations[cat] = ['mean']\n \n pos_agg = pos.groupby('SK_ID_CURR').agg(aggregations)\n pos_agg.columns = pd.Index(['POS_' + e[0] + \"_\" + e[1].upper() for e in pos_agg.columns.tolist()])\n # Count pos cash accounts\n pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR').size()\n del pos\n gc.collect()\n return pos_agg\n \n# Preprocess installments_payments.csv\ndef installments_payments(num_rows = None, nan_as_category = True):\n ins = pd.read_csv('../input/installments_payments.csv', nrows = num_rows)\n ins, cat_cols = one_hot_encoder(ins, nan_as_category= True)\n # Percentage and difference paid in each installment (amount paid and installment value)\n ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT']\n ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']\n # Days past due and days before due (no negative values)\n ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']\n ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']\n ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)\n ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0)\n # Features: Perform aggregations\n aggregations = {\n 'NUM_INSTALMENT_VERSION': ['nunique'],\n 'DPD': ['max', 'mean', 'sum'],\n 'DBD': ['max', 'mean', 'sum'],\n 'PAYMENT_PERC': ['max', 'mean', 'sum', 'var'],\n 'PAYMENT_DIFF': ['max', 'mean', 'sum', 'var'],\n 'AMT_INSTALMENT': ['max', 'mean', 'sum'],\n 'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],\n 'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']\n }\n for cat in cat_cols:\n aggregations[cat] = ['mean']\n ins_agg = ins.groupby('SK_ID_CURR').agg(aggregations)\n ins_agg.columns = pd.Index(['INSTAL_' + e[0] + \"_\" + e[1].upper() for e in ins_agg.columns.tolist()])\n # Count installments accounts\n ins_agg['INSTAL_COUNT'] = ins.groupby('SK_ID_CURR').size()\n del ins\n gc.collect()\n return ins_agg\n\n# Preprocess credit_card_balance.csv\ndef credit_card_balance(num_rows = None, nan_as_category = True):\n cc = pd.read_csv('../input/credit_card_balance.csv', nrows = num_rows)\n cc, cat_cols = one_hot_encoder(cc, nan_as_category= True)\n # General aggregations\n cc.drop(['SK_ID_PREV'], axis= 1, inplace = True)\n cc_agg = cc.groupby('SK_ID_CURR').agg(['min', 'max', 'mean', 'sum', 'var'])\n cc_agg.columns = pd.Index(['CC_' + e[0] + \"_\" + e[1].upper() for e in cc_agg.columns.tolist()])\n # Count credit card lines\n cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR').size()\n del cc\n gc.collect()\n return cc_agg\n\n# LightGBM GBDT with KFold or Stratified KFold\n# Parameters from Tilii kernel: https://www.kaggle.com/tilii7/olivier-lightgbm-parameters-by-bayesian-opt/code\ndef kfold_lightgbm(df, num_folds, stratified = False, debug= False):\n # Divide in training/validation and test data\n train_df = df[df['TARGET'].notnull()]\n test_df = df[df['TARGET'].isnull()]\n print(\"Starting LightGBM. Train shape: {}, test shape: {}\".format(train_df.shape, test_df.shape))\n del df\n gc.collect()\n # Cross validation model\n if stratified:\n folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=1001)\n else:\n folds = KFold(n_splits= num_folds, shuffle=True, random_state=1001)\n # Create arrays and dataframes to store results\n oof_preds = np.zeros(train_df.shape[0])\n sub_preds = np.zeros(test_df.shape[0])\n feature_importance_df = pd.DataFrame()\n feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]\n \n for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df[feats], train_df['TARGET'])):\n dtrain = lgb.Dataset(data=train_df[feats].iloc[train_idx], \n label=train_df['TARGET'].iloc[train_idx], \n free_raw_data=False, silent=True)\n dvalid = lgb.Dataset(data=train_df[feats].iloc[valid_idx], \n label=train_df['TARGET'].iloc[valid_idx], \n free_raw_data=False, silent=True)\n\n # LightGBM parameters found by Bayesian optimization\n params = {\n 'objective': 'binary',\n 'boosting_type': 'gbdt',\n 'nthread': 4,\n 'learning_rate': 0.02, # 02,\n 'num_leaves': 20,\n 'colsample_bytree': 0.9497036,\n 'subsample': 0.8715623,\n 'subsample_freq': 1,\n 'max_depth': 8,\n 'reg_alpha': 0.041545473,\n 'reg_lambda': 0.0735294,\n 'min_split_gain': 0.0222415,\n 'min_child_weight': 60, # 39.3259775,\n 'seed': 0,\n 'verbose': -1,\n 'metric': 'auc',\n }\n \n clf = lgb.train(\n params=params,\n train_set=dtrain,\n num_boost_round=10000,\n valid_sets=[dtrain, dvalid],\n early_stopping_rounds=200,\n verbose_eval=False\n )\n\n oof_preds[valid_idx] = clf.predict(dvalid.data)\n sub_preds += clf.predict(test_df[feats]) / folds.n_splits\n\n fold_importance_df = pd.DataFrame()\n fold_importance_df[\"feature\"] = feats\n fold_importance_df[\"importance\"] = clf.feature_importance(importance_type='gain')\n fold_importance_df[\"fold\"] = n_fold + 1\n feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)\n print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(dvalid.label, oof_preds[valid_idx])))\n del clf, dtrain, dvalid\n gc.collect()\n\n print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds))\n # Write submission file and plot feature importance\n if not debug:\n sub_df = test_df[['SK_ID_CURR']].copy()\n sub_df['TARGET'] = sub_preds\n sub_df[['SK_ID_CURR', 'TARGET']].to_csv(submission_file_name, index= False)\n display_importances(feature_importance_df)\n return feature_importance_df\n\n# Display/plot feature importance\ndef display_importances(feature_importance_df_):\n cols = feature_importance_df_[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(by=\"importance\", ascending=False)[:40].index\n best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]\n plt.figure(figsize=(8, 10))\n sns.barplot(x=\"importance\", y=\"feature\", data=best_features.sort_values(by=\"importance\", ascending=False))\n plt.title('LightGBM Features (avg over folds)')\n plt.tight_layout\n plt.savefig('lgbm_importances01.png')\n\n\ndef main(debug = False):\n num_rows = 10000 if debug else None\n df = application_train_test(num_rows)\n with timer(\"Process bureau and bureau_balance\"):\n bureau = bureau_and_balance(num_rows)\n print(\"Bureau df shape:\", bureau.shape)\n df = df.join(bureau, how='left', on='SK_ID_CURR')\n del bureau\n gc.collect()\n with timer(\"Process previous_applications\"):\n prev = previous_applications(num_rows)\n print(\"Previous applications df shape:\", prev.shape)\n df = df.join(prev, how='left', on='SK_ID_CURR')\n del prev\n gc.collect()\n with timer(\"Process POS-CASH balance\"):\n pos = pos_cash(num_rows)\n print(\"Pos-cash balance df shape:\", pos.shape)\n df = df.join(pos, how='left', on='SK_ID_CURR')\n del pos\n gc.collect()\n with timer(\"Process installments payments\"):\n ins = installments_payments(num_rows)\n print(\"Installments payments df shape:\", ins.shape)\n df = df.join(ins, how='left', on='SK_ID_CURR')\n del ins\n gc.collect()\n with timer(\"Process credit card balance\"):\n cc = credit_card_balance(num_rows)\n print(\"Credit card balance df shape:\", cc.shape)\n df = df.join(cc, how='left', on='SK_ID_CURR')\n del cc\n gc.collect()\n with timer(\"Run LightGBM with kfold\"):\n print(df.shape)\n df.drop(features_with_no_imp_at_least_twice, axis=1, inplace=True)\n gc.collect()\n print(df.shape)\n feat_importance = kfold_lightgbm(df, num_folds= 5, stratified= False, debug= debug)\n\nif __name__ == \"__main__\":\n submission_file_name = \"submission_with selected_features.csv\"\n with timer(\"Full model run\"):\n main()", "id": "12093778", "language": "Python", "matching_score": 9.162160873413086, "max_stars_count": 0, "path": "external/lighgbm-with-selected-features.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis file keep\n\nCreated on Tue July 10 2018\n\nThis config is original portinr features from and slightly modified\nhttps://www.kaggle.com/jsaguiar/updated-0-792-lb-lightgbm-with-simple-features\nhttps://www.kaggle.com/dromosys/fork-of-fork-lightgbm-with-simple-features-cee847\n\n@author: cttsai\n\"\"\"\n\nimport numpy as np\n\nfrom sklearn.decomposition import NMF, TruncatedSVD, IncrementalPCA, LatentDirichletAllocation\nfrom sklearn.manifold import Isomap, SpectralEmbedding\nfrom sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection\n\nDataConfigs = {\n # input file\n 'input' : {\n 'application_train' : {'name': 'application_train.csv', 'index': 'SK_ID_CURR', },\n 'application_test' : {'name': 'application_test.csv', 'index': 'SK_ID_CURR',},\n 'previous_application' : {'name': 'previous_application.csv', 'index': 'SK_ID_PREV',},\n 'credit_card_balance' : {'name': 'credit_card_balance.csv', 'index': 'SK_ID_PREV',},\n 'pos_cash_balance' : {'name': 'POS_CASH_balance.csv', 'index': 'SK_ID_PREV',},\n 'installments_payments': {'name': 'installments_payments.csv', 'index': 'SK_ID_PREV',},\n 'bureau' : {'name': 'bureau.csv', 'index': 'SK_ID_BUREAU',},\n 'bureau_balance' : {'name': 'bureau_balance.csv', 'index': 'SK_ID_BUREAU',},\n },\n\n # application train and test\n 'application': {\n 'filter_rows': {'CODE_GENDER': ['XNA'], }, # dict to feed in pandas directly\n 'factorize_columns': ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY'],\n 'onehot_encoding': True,\n 'onehot_columns': [], # [] mean auto detect\n 'nan_as_category': True,\n 'replace_rows': {\n 'DAYS_BIRTH': {365243: np.nan, },\n 'DAYS_EMPLOYED': {365243: np.nan, },\n 'DAYS_ID_PUBLISH': {365243: np.nan, },\n 'DAYS_REGISTRATION': {365243: np.nan, },\n },\n 'interaction_columns': [\n {'name': 'REGION_RATING_CLIENT_RATIO', 'mode': 'divide', 'a': 'REGION_RATING_CLIENT_W_CITY', 'b': 'REGION_RATING_CLIENT',},\n {'name': 'REGION_RATING_CLIENT_MULTI', 'mode': 'multiply', 'a': 'REGION_RATING_CLIENT_W_CITY', 'b': 'REGION_RATING_CLIENT',},\n {'name': 'DEPENDENT_FAM_MEM_RATIO', 'mode': 'divide', 'a': 'CNT_CHILDREN', 'b': 'CNT_FAM_MEMBERS',},\n {'name': 'ADULT_FAM_MEMBERS', 'mode': 'subtract', 'a': 'CNT_CHILDREN', 'b': 'CNT_FAM_MEMBERS',},\n {'name': 'INCOME_PER_PERSON', 'mode': 'divide', 'a': 'AMT_INCOME_TOTAL', 'b': 'CNT_FAM_MEMBERS',},\n {'name': 'INCOME_PER_CHILD', 'mode': 'divide', 'a': 'AMT_INCOME_TOTAL', 'b': 'CNT_CHILDREN',},\n # amount\n {'name': 'CREDIT_TO_ANNUITY_RATIO', 'mode': 'divide', 'a': 'AMT_CREDIT', 'b': 'AMT_ANNUITY',},\n {'name': 'CREDIT_TO_GOODS_RATIO', 'mode': 'divide', 'a': 'AMT_CREDIT', 'b': 'AMT_GOODS_PRICE',},\n {'name': 'CREDIT_TO_INCOME_RATIO', 'mode': 'divide_nonzero', 'a': 'AMT_CREDIT', 'b': 'AMT_INCOME_TOTAL',},\n {'name': 'ANNUITY_TO_INCOME_RATIO', 'mode': 'divide_nonzero', 'a': 'AMT_ANNUITY', 'b' :'AMT_INCOME_TOTAL',},\n {'name': 'CREDIT_MULTI_INCOME', 'mode': 'multiply', 'a': 'AMT_CREDIT', 'b': 'AMT_INCOME_TOTAL',},\n # days\n# {'name': 'EMPLOY_TO_BIRTH_RATIO', 'mode': 'divide', 'a': 'DAYS_EMPLOYED', 'b': 'DAYS_BIRTH',},\n# {'name': 'REGIST_TO_BIRTH_RATIO', 'mode': 'divide', 'a': 'DAYS_REGISTRATION','b': 'DAYS_BIRTH',},\n# {'name': 'ID_PUBLISH_TO_BIRTH_RATIO', 'mode': 'divide', 'a': 'DAYS_ID_PUBLISH', 'b': 'DAYS_BIRTH',},\n\n {'name': 'CAR_TO_BIRTH_RATIO', 'mode': 'divide', 'a': 'OWN_CAR_AGE', 'b': 'DAYS_BIRTH',},\n {'name': 'CAR_TO_EMPLOY_RATIO', 'mode': 'divide', 'a': 'OWN_CAR_AGE', 'b': 'DAYS_EMPLOYED',},\n {'name': 'PHONE_TO_BIRTH_RATIO', 'mode': 'divide', 'a': 'DAYS_LAST_PHONE_CHANGE', 'b': 'DAYS_BIRTH',},\n {'name': 'PHONE_TO_EMPLOY_RATIO', 'mode': 'divide', 'a': 'DAYS_LAST_PHONE_CHANGE', 'b': 'DAYS_EMPLOYED',},\n ],\n 'deep_interactions': [\n {'header' : 'DOC_IND',\n 'transform': ['kurtosis', 'sum', 'mean', 'std'],\n 'columns' : [\n 'FLAG_DOCUMENT_2', 'FLAG_DOCUMENT_3', 'FLAG_DOCUMENT_4', 'FLAG_DOCUMENT_5',\n 'FLAG_DOCUMENT_6', 'FLAG_DOCUMENT_7', 'FLAG_DOCUMENT_8', 'FLAG_DOCUMENT_9', 'FLAG_DOCUMENT_10',\n 'FLAG_DOCUMENT_11', 'FLAG_DOCUMENT_12', 'FLAG_DOCUMENT_13', 'FLAG_DOCUMENT_14', 'FLAG_DOCUMENT_15',\n 'FLAG_DOCUMENT_16', 'FLAG_DOCUMENT_17', 'FLAG_DOCUMENT_18', 'FLAG_DOCUMENT_19', 'FLAG_DOCUMENT_20', 'FLAG_DOCUMENT_21',],\n },\n {'header' : 'EXT_SOURCES_SYNTHESIZE',\n 'transform': ['product', 'mean', 'sum', 'sum_squared', 'std'],\n 'columns' : ['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3'],\n },\n {'header' : 'CONTACT_IND',\n 'transform': ['kurtosis', 'sum', 'std'],\n 'columns' : [\n 'FLAG_CONT_MOBILE', 'FLAG_MOBIL',\n 'FLAG_PHONE', 'FLAG_WORK_PHONE', 'FLAG_EMP_PHONE', 'FLAG_EMAIL',\n 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY',],\n },\n {'header' : 'LIVE_IND',\n 'transform': ['kurtosis', 'sum', 'mean', 'std'],\n 'columns' : [\n 'REG_REGION_NOT_LIVE_REGION', 'REG_REGION_NOT_WORK_REGION', 'LIVE_REGION_NOT_WORK_REGION',\n 'REG_CITY_NOT_LIVE_CITY', 'REG_CITY_NOT_WORK_CITY', 'LIVE_CITY_NOT_WORK_CITY',],\n },\n\n ],\n 'decomposition': [\n #APPLICATTION and APPLICANT\n {'columns': [\n 'CODE_GENDER',\n 'FLAG_CONT_MOBILE', 'FLAG_MOBIL',\n 'FLAG_PHONE', 'FLAG_WORK_PHONE', 'FLAG_EMP_PHONE', 'FLAG_EMAIL',\n 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY',\n 'REG_REGION_NOT_LIVE_REGION', 'REG_REGION_NOT_WORK_REGION', 'LIVE_REGION_NOT_WORK_REGION',\n 'REG_CITY_NOT_LIVE_CITY', 'REG_CITY_NOT_WORK_CITY', 'LIVE_CITY_NOT_WORK_CITY',\n 'FLAG_DOCUMENT_2', 'FLAG_DOCUMENT_3', 'FLAG_DOCUMENT_4', 'FLAG_DOCUMENT_5',\n 'FLAG_DOCUMENT_6', 'FLAG_DOCUMENT_7', 'FLAG_DOCUMENT_8', 'FLAG_DOCUMENT_9', 'FLAG_DOCUMENT_10',\n 'FLAG_DOCUMENT_11', 'FLAG_DOCUMENT_12', 'FLAG_DOCUMENT_13', 'FLAG_DOCUMENT_14', 'FLAG_DOCUMENT_15',\n 'FLAG_DOCUMENT_16', 'FLAG_DOCUMENT_17', 'FLAG_DOCUMENT_18', 'FLAG_DOCUMENT_19', 'FLAG_DOCUMENT_20', 'FLAG_DOCUMENT_21',],\n 'stems': [\n 'NAME_FAMILY_STATUS_', 'NAME_HOUSING_TYPE_',\n 'NAME_CONTRACT_TYPE_', 'NAME_INCOME_TYPE_',\n 'OCCUPATION_TYPE_', 'ORGANIZATION_TYPE_', 'NAME_EDUCATION_TYPE_'],\n 'methods': {'APPLICANT_SVD': {'object': TruncatedSVD,\n 'params': {'n_components': 4,\n 'algorithm': 'randomized',\n 'n_iter': 10,\n 'random_state': 42},},\n 'APPLICANT_LDA': {'object': LatentDirichletAllocation,\n 'params': {'n_components': 8,\n 'n_jobs':-1,\n 'random_state': 42},},\n },\n },\n ]\n },\n\n 'previous_application': {\n 'filter_rows': {},\n 'factorize_columns': [],\n 'onehot_encoding': True,\n 'onehot_columns': [],\n 'nan_as_category': True,\n 'replace_rows': {\n 'DAYS_FIRST_DRAWING' : {365243: np.nan, },\n 'DAYS_FIRST_DUE' : {365243: np.nan, },\n 'DAYS_LAST_DUE_1ST_VERSION': {365243: np.nan, },\n 'DAYS_LAST_DUE' : {365243: np.nan, },\n 'DAYS_TERMINATION' : {365243: np.nan, }},\n 'interaction_columns': [\n {'name': 'APP_CREDIT_RATIO', 'mode': 'divide', 'a': 'AMT_APPLICATION', 'b':'AMT_CREDIT',},\n {'name': 'APP_CREDIT_DIFF', 'mode': 'subtract', 'a': 'AMT_APPLICATION', 'b':'AMT_CREDIT',},\n {'name': 'EQUITY_INIT_RATIO', 'mode': 'divide', 'a': 'AMT_APPLICATION', 'b':'AMT_CREDIT',},\n {'name': 'EQUITY_DIFF', 'mode': 'subtract', 'a': 'AMT_APPLICATION', 'b':'AMT_CREDIT',},\n ],\n 'aggregations': [ # list of aggregation task\n {'header' : \"PREV\",\n 'data' : 'previous_application',\n 'groupby': ['SK_ID_CURR'], #\n 'index' : 'SK_ID_CURR',\n 'cat' : ['mean'], #\n 'num' : { #\n 'AMT_ANNUITY': ['min', 'max', 'mean'],\n 'AMT_APPLICATION': ['min', 'max', 'mean'],\n 'AMT_CREDIT': ['min', 'max', 'mean'],\n 'APP_CREDIT_RATIO': ['min', 'max', 'mean', 'var'],\n 'APP_CREDIT_DIFF': ['min', 'max', 'mean', 'var'],\n 'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],\n 'AMT_GOODS_PRICE': ['min', 'max', 'mean'],\n 'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],\n 'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],\n 'DAYS_DECISION': ['min', 'max', 'mean', 'var'],\n 'CNT_PAYMENT': ['mean', 'sum'],\n },\n },\n {'header' : \"PREV_APPROVED\",\n 'data' : 'previous_application',\n 'groupby': ['SK_ID_CURR'], #\n 'subset' : {'column_name': 'NAME_CONTRACT_STATUS_Approved',\n 'conditions' : [1],},\n 'index' : 'SK_ID_CURR',\n 'cat' : [],\n 'num' : {\n 'AMT_ANNUITY': ['min', 'max', 'mean'],\n 'AMT_APPLICATION': ['min', 'max', 'mean'],\n 'AMT_CREDIT': ['min', 'max', 'mean'],\n 'APP_CREDIT_RATIO': ['min', 'max', 'mean', 'var'],\n 'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],\n 'AMT_GOODS_PRICE': ['min', 'max', 'mean'],\n 'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],\n 'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],\n 'DAYS_DECISION': ['min', 'max', 'mean'],\n 'CNT_PAYMENT': ['mean', 'sum'],\n },\n },\n {'header': \"PREV_REFUSED\",\n 'data' : 'previous_application',\n 'groupby': ['SK_ID_CURR'],\n 'subset': {'column_name': 'NAME_CONTRACT_STATUS_Refused',\n 'conditions' : [1],},\n 'index' : 'SK_ID_CURR',\n 'cat' : [],\n 'num' : {\n 'AMT_ANNUITY': ['min', 'max', 'mean'],\n 'AMT_APPLICATION': ['min', 'max', 'mean'],\n 'AMT_CREDIT': ['min', 'max', 'mean'],\n 'APP_CREDIT_RATIO': ['min', 'max', 'mean', 'var'],\n 'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],\n 'AMT_GOODS_PRICE': ['min', 'max', 'mean'],\n 'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],\n 'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],\n 'DAYS_DECISION': ['min', 'max', 'mean'],\n 'CNT_PAYMENT': ['mean', 'sum'],\n },\n },\n ],\n },\n\n 'bureau':{\n 'filter_rows': {},\n 'factorize_columns': [],\n 'onehot_encoding': True,\n 'onehot_columns': [],\n 'nan_as_category': True,\n 'replace_rows': {},\n 'interaction_columns': [],\n 'aggregations': [\n {'header': \"BB\",\n 'groupby': ['SK_ID_BUREAU'],\n 'index' : 'SK_ID_BUREAU',\n 'data' : 'bureau_balance',\n 'count' : True,\n 'cat' : ['mean'], # cat cols by autometically identify\n 'num' : {\n 'MONTHS_BALANCE': ['min', 'max', 'size'],\n },\n },\n {'header': \"BUREAU\",\n 'groupby': ['SK_ID_CURR'],\n 'index' : 'SK_ID_CURR',#'SK_ID_BUREAU',\n 'data' : 'bureau',\n 'cat' : ['mean'], # cat cols by autometically identify\n 'num' : {\n 'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],\n 'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],\n 'DAYS_CREDIT_UPDATE': ['mean'],\n 'CREDIT_DAY_OVERDUE': ['max', 'mean'],\n 'AMT_CREDIT_MAX_OVERDUE': ['mean'],\n 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],\n 'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],\n 'AMT_CREDIT_SUM_OVERDUE': ['mean'],\n 'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],\n 'AMT_ANNUITY': ['max', 'mean'],\n 'CNT_CREDIT_PROLONG': ['sum'],\n 'BB_MONTHS_BALANCE_MIN': ['min'],\n 'BB_MONTHS_BALANCE_MAX': ['max'],\n 'BB_MONTHS_BALANCE_SIZE': ['mean', 'sum'],\n 'BB_STATUS_C_MEAN': ['mean', 'sum'],\n 'BB_STATUS_X_MEAN': ['mean', 'sum'],\n 'BB_STATUS_0_MEAN': ['mean', 'sum'],\n 'BB_STATUS_1_MEAN': ['mean', 'sum'],\n 'BB_STATUS_2_MEAN': ['mean', 'sum'],\n 'BB_STATUS_3_MEAN': ['mean', 'sum'],\n 'BB_STATUS_4_MEAN': ['mean', 'sum'],\n 'BB_STATUS_5_MEAN': ['mean', 'sum'],\n },\n },\n {'header' : \"BUREAU_ACTIVED\",\n 'data' : 'bureau',\n 'groupby': ['SK_ID_CURR'],\n 'subset' : {'column_name': 'CREDIT_ACTIVE_Active',\n 'conditions' : [1],},\n 'index' : 'SK_ID_CURR',\n 'count' : True,\n 'cat' : [],\n 'num' : {\n 'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],\n 'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],\n 'DAYS_CREDIT_UPDATE': ['mean'],\n 'CREDIT_DAY_OVERDUE': ['max', 'mean'],\n 'AMT_CREDIT_MAX_OVERDUE': ['mean'],\n 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],\n 'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],\n 'AMT_CREDIT_SUM_OVERDUE': ['mean'],\n 'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],\n 'AMT_ANNUITY': ['max', 'mean'],\n 'CNT_CREDIT_PROLONG': ['sum'],\n 'BB_MONTHS_BALANCE_MIN': ['min'],\n 'BB_MONTHS_BALANCE_MAX': ['max'],\n 'BB_MONTHS_BALANCE_SIZE': ['mean', 'sum'],\n },\n },\n {'header' : \"BUREAU_CLOSED\",\n 'data' : 'bureau',\n 'groupby': ['SK_ID_CURR'],\n 'subset' : {'column_name': 'CREDIT_ACTIVE_Closed',\n 'conditions' : [1],},\n 'index' : 'SK_ID_CURR',\n 'count' : True,\n 'cat' : [],\n 'num' : {\n 'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],\n 'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],\n 'DAYS_CREDIT_UPDATE': ['mean'],\n 'CREDIT_DAY_OVERDUE': ['max', 'mean'],\n 'AMT_CREDIT_MAX_OVERDUE': ['mean'],\n 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],\n 'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],\n 'AMT_CREDIT_SUM_OVERDUE': ['mean'],\n 'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],\n 'AMT_ANNUITY': ['max', 'mean'],\n 'CNT_CREDIT_PROLONG': ['sum'],\n 'BB_MONTHS_BALANCE_MIN': ['min'],\n 'BB_MONTHS_BALANCE_MAX': ['max'],\n 'BB_MONTHS_BALANCE_SIZE': ['mean', 'sum'],\n },\n },\n {'header' : \"BUREAU_CREDIT_TYPE\",\n 'data' : 'bureau',\n 'groupby': ['SK_ID_CURR'],\n 'subset' : {'column_name': 'BUREAU_CREDIT_TYPE_Consumer credit',\n 'conditions' : [1],},\n 'index' : 'SK_ID_CURR',\n 'count' : True,\n 'cat' : [],\n 'num' : {\n 'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],\n 'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],\n 'DAYS_CREDIT_UPDATE': ['mean'],\n 'CREDIT_DAY_OVERDUE': ['max', 'mean'],\n 'AMT_CREDIT_MAX_OVERDUE': ['mean'],\n 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],\n 'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],\n 'AMT_CREDIT_SUM_OVERDUE': ['mean'],\n 'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],\n 'AMT_ANNUITY': ['max', 'mean'],\n 'CNT_CREDIT_PROLONG': ['sum'],\n 'BB_MONTHS_BALANCE_MIN': ['min'],\n 'BB_MONTHS_BALANCE_MAX': ['max'],\n 'BB_MONTHS_BALANCE_SIZE': ['mean', 'sum'],\n },\n },\n ],\n },\n\n 'installments_payments':{\n 'filter_rows': {},\n 'factorize_columns': [],\n 'onehot_encoding': True,\n 'onehot_columns': [],\n 'nan_as_category': True,\n 'replace_rows': {},\n 'interaction_columns': [\n {'name': 'PAYMENT_RATIO','mode': 'divide', 'a': 'AMT_PAYMENT', 'b':'AMT_INSTALMENT',},\n {'name': 'PAYMENT_DIFF', 'mode': 'subtract', 'a': 'AMT_INSTALMENT', 'b':'AMT_PAYMENT',},\n {'name': 'DPD', 'mode': 'subtract_positive', 'a': 'DAYS_ENTRY_PAYMENT', 'b':'DAYS_INSTALMENT',},\n {'name': 'DBD', 'mode': 'subtract_positive', 'a': 'DAYS_INSTALMENT', 'b':'DAYS_ENTRY_PAYMENT',},\n ],\n 'aggregations':[\n {'header' : \"INSTALL\",\n 'groupby': ['SK_ID_CURR'],\n 'data' : 'installments_payments',\n 'index' : 'SK_ID_CURR',\n 'count' : True,\n# 'cat' : ['mean'], # cat cols by autometically identify\n 'num' : {\n 'NUM_INSTALMENT_VERSION': ['nunique'],\n 'DPD': ['max', 'mean', 'sum'],\n 'DBD': ['max', 'mean', 'sum'],\n 'PAYMENT_RATIO': ['max', 'mean', 'sum', 'var'],\n 'PAYMENT_DIFF': ['max', 'mean', 'sum', 'var'],\n 'AMT_INSTALMENT': ['max', 'mean', 'sum'],\n 'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],\n 'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum'],\n },\n },\n ],\n },\n\n 'pos_cash': {\n 'filter_rows': {},\n 'factorize_columns': [],\n 'onehot_encoding': True,\n 'onehot_columns': [],\n 'nan_as_category': True,\n 'replace_rows': {},\n 'interaction_columns': [\n {'name': 'APP_CREDIT_RATIO', 'mode': 'divide', 'a': 'AMT_APPLICATION', 'b':'AMT_CREDIT',},\n ],\n 'aggregations': [\n {'header': \"POS_CASH\",\n 'groupby': ['SK_ID_CURR'],\n 'data' : 'pos_cash_balance',\n 'index' : 'SK_ID_CURR',\n 'count' : True,\n 'cat' : ['mean'], # cat cols by autometically identify\n 'num' : {\n 'MONTHS_BALANCE': ['max', 'mean', 'size'],\n 'SK_DPD' : ['max', 'mean', 'var'],\n 'SK_DPD_DEF' : ['max', 'mean', 'var'],\n },\n },\n ],\n 'conditional_aggregations': [], # omit aggregations\n },\n\n 'credit_card_balance': {\n 'filter_rows': {},\n 'factorize_columns': [],\n 'onehot_encoding': True,\n 'onehot_columns': [],\n 'nan_as_category': True,\n 'replace_rows': {},\n 'interaction_columns': [\n {'name': 'APP_CREDIT_RATIO', 'mode': 'divide', 'a': 'AMT_APPLICATION', 'b':'AMT_CREDIT',},\n ],\n 'aggregations': [ # general aggregation\n {'header' : \"CC\",\n 'groupby': ['SK_ID_CURR'],\n 'data' : 'credit_card_balance',\n 'index' : 'SK_ID_CURR',\n 'count' : True,\n 'cat' : ['mean', 'sum'],\n 'num' : {\n 'MONTHS_BALANCE' : ['min', 'max', 'mean', 'sum', 'var'],\n 'AMT_BALANCE' : ['min', 'max', 'mean', 'sum', 'var'],\n 'AMT_CREDIT_LIMIT_ACTUAL' : ['min', 'max', 'mean', 'sum', 'var'],\n 'AMT_DRAWINGS_ATM_CURRENT' : ['min', 'max', 'mean', 'sum', 'var'],\n 'AMT_DRAWINGS_CURRENT' : ['min', 'max', 'mean', 'sum', 'var'],\n 'AMT_DRAWINGS_OTHER_CURRENT': ['min', 'max', 'mean', 'sum', 'var'],\n 'AMT_DRAWINGS_POS_CURRENT' : ['min', 'max', 'mean', 'sum', 'var'],\n 'AMT_INST_MIN_REGULARITY' : ['min', 'max', 'mean', 'sum', 'var'],\n 'AMT_PAYMENT_CURRENT' : ['min', 'max', 'mean', 'sum', 'var'],\n 'AMT_PAYMENT_TOTAL_CURRENT' : ['min', 'max', 'mean', 'sum', 'var'],\n 'AMT_RECEIVABLE_PRINCIPAL' : ['min', 'max', 'mean', 'sum', 'var'],\n 'AMT_RECIVABLE' : ['min', 'max', 'mean', 'sum', 'var'],\n 'AMT_TOTAL_RECEIVABLE' : ['min', 'max', 'mean', 'sum', 'var'],\n 'CNT_DRAWINGS_ATM_CURRENT' : ['min', 'max', 'mean', 'sum', 'var'],\n 'CNT_DRAWINGS_CURRENT' : ['min', 'max', 'mean', 'sum', 'var'],\n 'CNT_DRAWINGS_OTHER_CURRENT': ['min', 'max', 'mean', 'sum', 'var'],\n 'CNT_DRAWINGS_POS_CURRENT' : ['min', 'max', 'mean', 'sum', 'var'],\n 'CNT_INSTALMENT_MATURE_CUM' : ['min', 'max', 'mean', 'sum', 'var'],\n 'SK_DPD' : ['min', 'max', 'mean', 'sum', 'var'],\n 'SK_DPD_DEF' : ['min', 'max', 'mean', 'sum', 'var'],\n }\n },\n ],\n },\n}\n", "id": "8296197", "language": "Python", "matching_score": 3.70770001411438, "max_stars_count": 0, "path": "configs/SampleDataConfigs.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis code provides data and converts to base features and forked from\nhttps://www.kaggle.com/jsaguiar/updated-0-792-lb-lightgbm-with-simple-features\nsome data processing copied from the Dromosys kernel:\nhttps://www.kaggle.com/dromosys/fork-of-fork-lightgbm-with-simple-features-cee847\nCreated on Thu Jun 28 2018\n\n@author: cttsai\n\"\"\"\nimport gc; gc.enable()\n\nimport numpy as np\nimport pandas as pd\n\nfrom Utility import InitializeConfigs\nfrom Utility import IdentifyCategoricalColumn, AnyEmptyDataframe, Cast64To32\nfrom LibConfigs import logger, data_provider_refresh_configs\n\nfrom FeatureTransformer import process_one_hot_encode, process_factorize\nfrom FeatureTransformer import process_interaction, process_deep_interactions\nfrom FeatureTransformer import process_aggregate\nfrom FeatureTransformer import process_decomposition\nfrom FeatureTransformer import process_replace, process_drop_rows\n\nfrom DataFileIO import DataFileIO\n\n\nclass DataProvider(object):\n \"\"\"\n **data:\n transformed:\n main: train-test pair\n \"\"\"\n def __init__(self, IOConfigs={}):\n\n self.input_path = IOConfigs.get('input', '../../input')\n self.cache_path = IOConfigs.get('data', '../data')\n\n self.data_io_manager = DataFileIO()\n self.provider_configs = data_provider_refresh_configs\n\n self.target_column = 'TARGET'\n self.data_index = {}\n self.data_raw = {}\n self.data_processed = {}\n self.xy_train_test = {}\n\n self.cols_categorical = {}\n self.cols_one_hot = {}\n\n def _aggregate_pipeline(self, df, cat_cols, configs):\n ret = list()\n\n for c in configs.get('aggregations', []):\n\n groupby_cols = c.get('groupby', [])\n if not groupby_cols:\n logger.info(\"No columns to Aggregate on {}\".format(groupby_cols))\n continue\n\n configs_subset = c.get('subset', {})\n if configs_subset:\n cond_k = configs_subset.get('column_name', 'foobar')\n cond_i = configs_subset.get('conditions', [])\n\n if cond_k in df.columns and cond_i:\n sub_df = df.loc[df[cond_k].isin(cond_i)]\n logger.info(\"Condictional Aggregate on {}, {}, shape={}\".format(cond_k, groupby_cols, sub_df.shape))\n ret.append(process_aggregate(sub_df, process_configs=c, groupby_cols=groupby_cols, cat_cols=[]))\n else:\n logger.info(\"Specific Aggregate on {}\".format(groupby_cols))\n ret.append(process_aggregate(df, process_configs=c, groupby_cols=groupby_cols, cat_cols=cat_cols))\n\n ret = [r for r in ret if not r.empty]\n inds = sorted(list(set([r.index.name for r in ret])))\n ret = {ind: pd.concat([r for r in ret if r.index.name == ind], axis=1, join='inner') for ind in inds}\n\n for k, v in ret.items():\n logger.info(\"Result Aggregate on {}: {}\".format(k, v.shape))\n\n return ret\n\n @staticmethod\n def _split_configs(c, name):\n ret = dict()\n for k, v in c.items():\n if 'aggregations' in k:\n ret[k] = [f for f in v if f.get('data', None) == name]\n logger.info('split configs: {}'.format(c))\n return ret\n\n # Preprocess application_train.csv and application_test.csv\n def _application_train_test(self, configs):\n nan_as_category = configs.get('nan_as_category', False)\n\n # Read data and merge\n major_index = self.data_index['application_train']\n df = self.data_raw['application_train']\n test_df = self.data_raw['application_test']\n logger.info(\"Train samples: {}, test samples: {}\".format(df.shape, test_df.shape))\n df = df.append(test_df, sort=False, ignore_index=True)\n\n df = process_drop_rows(df, process_configs=configs['filter_rows'])\n df = process_factorize(df, process_configs=configs['factorize_columns'])\n\n if configs.get('onehot_encoding', False):\n df, cat_cols, new_cols = process_one_hot_encode(df, configs['onehot_columns'], nan_as_category)\n self.cols_one_hot.update({'application': new_cols})\n else:\n cat_cols = IdentifyCategoricalColumn(df)\n\n df = process_replace(df, process_configs=configs['replace_rows'])\n df, interact_cols = process_interaction(df, process_configs=configs['interaction_columns'])\n\n if configs.get('deep_interactions', []):\n deep_interactions = configs.get('deep_interactions', [])\n for c in deep_interactions:\n df = process_deep_interactions(df, c)\n\n logger.info('prepare decompostion, application={}'.format(df.shape))\n df_ext = [process_decomposition(df, c) for c in configs['decomposition']]\n df = pd.concat([df] + df_ext, axis=1, join='inner')\n logger.info('finished decompositions, application={}'.format(df.shape))\n df = Cast64To32(df)\n\n # seperate train test\n # Divide in training/validation and test data\n train_df = df.loc[df[self.target_column].notnull()].reset_index().set_index(major_index)\n test_df = df.loc[df[self.target_column].isnull()].reset_index().set_index(major_index)\n logger.info(\"Split into train samples: {}, test samples: {}\".format(train_df.shape, test_df.shape))\n del df; gc.collect()\n\n return train_df, test_df\n\n # Preprocess bureau.csv and bureau_balance.csv\n def _bureau_and_balance(self, configs):\n current_index = self.data_index['bureau']\n major_index = self.data_index['application_train']\n nan_as_category = configs.get('nan_as_category', False)\n\n # Read data and merge\n df = self.data_raw['bureau']\n bb = self.data_raw['bureau_balance']\n logger.info(\"Bureau: {}, Bureau Balance: {}\".format(df.shape, bb.shape))\n\n if configs.get('onehot_encoding', False):\n df, cat_cols, new_cols = process_one_hot_encode(df, configs['onehot_columns'], nan_as_category)\n bb, cat_cols_bb, new_cols_bb = process_one_hot_encode(bb, configs['onehot_columns'], nan_as_category)\n self.cols_one_hot.update({'bureau': new_cols + new_cols_bb})\n\n agg_configs = self._split_configs(configs.copy(), 'bureau_balance')\n bb_agg = self._aggregate_pipeline(bb, cat_cols_bb, agg_configs)[current_index]\n df = df.set_index(current_index).join(bb_agg, how='left')\n bureau_cat_cols = cat_cols + [c for c in bb_agg if any([True if cc in c else False for cc in cat_cols_bb])]\n #condictional aggregation\n # Bureau: Active credits - using only numerical aggregations\n # Bureau: Closed credits - using only numerical aggregations\n agg_configs = self._split_configs(configs.copy(), 'bureau')\n bureau_agg = self._aggregate_pipeline(df, bureau_cat_cols, agg_configs)[major_index]\n return Cast64To32(bureau_agg)\n\n # Preprocess previous_applications.csv\n def _previous_application(self, configs):\n current_index = self.data_index['previous_application']\n major_index = self.data_index['application_train']\n nan_as_category = configs.get('nan_as_category', False)\n\n df = self.data_raw['previous_application']\n logger.info(\"Previous application: {}\".format(df.shape))\n\n if configs.get('onehot_encoding', False):\n df, cat_cols, new_cols = process_one_hot_encode(df, configs['onehot_columns'], nan_as_category)\n self.cols_one_hot.update({'previous_application': new_cols})\n else:\n cat_cols = IdentifyCategoricalColumn(df)\n\n df = process_replace(df, process_configs=configs['replace_rows'])\n df, interact_cols = process_interaction(df, process_configs=configs['interaction_columns'])\n # Previous applications categorical features\n # Previous Applications: Approved Applications - only numerical features\n # Previous Applications: Refused Applications - only numerical features\n prev_agg = self._aggregate_pipeline(df, cat_cols, configs)[major_index]\n\n return Cast64To32(prev_agg)\n\n # Preprocess POS_CASH_balance.csv\n def _pos_cash_balance(self, configs):\n current_index = self.data_index['pos_cash_balance']\n major_index = self.data_index['application_train']\n nan_as_category = configs.get('nan_as_category', False)\n\n df = self.data_raw['pos_cash_balance']\n logger.info(\"pos_cash: {}\".format(df.shape))\n\n if configs.get('onehot_encoding', False):\n df, cat_cols, new_cols = process_one_hot_encode(df, configs['onehot_columns'], nan_as_category)\n self.cols_one_hot.update({'pos_cash': new_cols})\n else:\n cat_cols = IdentifyCategoricalColumn(df)\n\n pos_cash_agg = self._aggregate_pipeline(df, cat_cols, configs)[major_index]\n return Cast64To32(pos_cash_agg)\n\n # Preprocess installments_payments.csv\n def _installments_payments(self, configs):\n current_index = self.data_index['installments_payments']\n major_index = self.data_index['application_train']\n nan_as_category = configs.get('nan_as_category', False)\n\n df = self.data_raw['installments_payments']\n logger.info(\"installments_payments: {}\".format(df.shape))\n\n cat_cols = []\n if configs.get('onehot_encoding', False):\n df, cat_cols, new_cols = process_one_hot_encode(df, cat_cols, nan_as_category)\n self.cols_one_hot.update({'installments_payments': new_cols})\n else:\n cat_cols = IdentifyCategoricalColumn(df)\n\n df, interact_cols = process_interaction(df, process_configs=configs['interaction_columns'])\n installments_agg = self._aggregate_pipeline(df, cat_cols, configs)[major_index]\n return Cast64To32(installments_agg)\n\n # Preprocess credit_card_balance.csv\n def _credit_card_balance(self, configs):\n current_index = self.data_index['credit_card_balance']\n major_index = self.data_index['application_train']\n nan_as_category = configs.get('nan_as_category', False)\n\n df = self.data_raw['credit_card_balance']\n logger.info(\"credit_card_balance: {}\".format(df.shape))\n\n cat_cols = []\n if configs.get('onehot_encoding', False):\n df, cat_cols, new_cols = process_one_hot_encode(df, cat_cols, nan_as_category)\n self.cols_one_hot.update({'credit_card_balance' : new_cols})\n# else:\n# cat_cols = IdentifyCategoricalColumn(df)\n\n credit_card_agg = self._aggregate_pipeline(df, cat_cols, configs)[major_index]\n return Cast64To32(credit_card_agg)\n\n # Data Input/Output Begin\n def ReadDataCSV(self, configs):\n \"\"\"\n configs={'application_train' : {'name' : 'application_train.csv', 'index': 'SK_ID_CURR',},\n \"\"\"\n data_dict = {k: '{}/{}'.format(self.input_path, data.get('name', None)) for k, data in configs.items()}\n self.data_raw = self.data_io_manager.loadCSV(data_dict)\n self.data_index = {k: data.get('index', None) for k, data in configs.items()}\n return self.data_raw, self.data_index\n\n def ReadRawHDF(self, configs, filename, limited_by_configs=False):\n \"\"\"\n configs={'application_train' : {'name' : 'application_train.csv', 'index': 'SK_ID_CURR',},\n \"\"\"\n data_dict = {k: None for k, data in configs.items()}\n self.data_raw = self.data_io_manager.loadHDF(filename,\n data_dict,\n limited_by_configs=limited_by_configs)\n\n self.data_raw = {k: Cast64To32(v) for k, v in self.data_raw.items()}\n self.data_index = {k: data.get('index', None) for k, data in configs.items()}\n return self.data_raw, self.data_index\n\n def ReadProcessedHDF(self, configs, filename):\n \"\"\"\n configs={'application_train' : {'name' : 'application_train.csv', 'index': 'SK_ID_CURR',},\n \"\"\"\n self.data_processed = self.data_io_manager.loadHDF(filename, {}, limited_by_configs=False)\n self.data_index = {k: data.get('index', None) for k, data in configs.items()}\n return self.data_processed, self.data_index\n\n def ReadTrainTestHDF(self, configs, filename):\n \"\"\"\n configs={'application_train' : {'name' : 'application_train.csv', 'index': 'SK_ID_CURR',},\n \"\"\"\n self.xy_train_test = self.data_io_manager.loadHDF(filename, configs, limited_by_configs=False)\n return self.xy_train_test\n\n def SaveFileHDF(self, filename, data, opt_overwrite=True):\n self.data_io_manager.saveHDF(filename, data, opt_overwrite)\n # Data Input/Output --End--\n\n @staticmethod\n def ReturnTrainTest(configs):\n df_names = ['train_x', 'train_y', 'test_x', 'test_y']\n configs.update({k: pd.DataFrame() for k, v in configs.items() if k not in df_names})\n for df_name in [k for k, v in configs.items() if v.empty]:\n logger.warning(\"no key as {}\".format(df_name))\n # return train_x, train_y, test_x, test_y\n return configs['train_x'], configs['train_y'], configs['test_x'], configs['test_y']\n\n def CreateTrainTestData(self, configs):\n \"\"\"\n concat all dataframes to create train and test dataframe\n configs={'application_train' : df},\n \"\"\"\n train = configs.get('application_train', pd.DataFrame())\n test = configs.get('application_test', pd.DataFrame())\n\n if train.empty or test.empty:\n logger.error('no train and test dataframe')\n\n excluded = ['application_train', 'application_test']\n for k, v in configs.items():\n if k not in excluded:\n train = train.join(v, how='left')\n test = test.join(v, how='left')\n logger.info(\"to_join={}, {}: train={}, test{}\".format(k, v.shape, train.shape, test.shape))\n gc.collect()\n\n # sorted for further\n cols = sorted(train.columns.tolist())\n train = train[cols]\n test = test[cols]\n\n #all process complete\n cols = sorted([f for f in train.columns if f != self.target_column and f in test.columns])\n self.xy_train_test = {\n 'train_x': train[cols],\n 'train_y': train[self.target_column],\n 'test_x': test[cols],\n 'test_y': test[self.target_column]}\n\n del train, test; gc.collect()\n return self.ReturnTrainTest(self.xy_train_test)\n\n def LoadData(self, data_configs, source='from_csv', prefix='sample'):\n \"\"\"\n \"\"\"\n #initialize, reading in configs for data provider itself\n configs_table = pd.DataFrame(self.provider_configs).T\n configs_table['level'] = configs_table['level'].astype(int)\n configs_table.set_index('level', inplace=True)\n configs_table['filename'] = configs_table['filename'].apply(lambda x: x.format(header=prefix) if isinstance(x, str) else None)\n\n provider_configs = self.provider_configs.get(source, 'from_csv').copy() #\n refresh_level = provider_configs.get('level')\n\n # load data at its refresh level\n filename = '{}/{}'.format(self.cache_path, configs_table.loc[refresh_level, 'filename'])\n if refresh_level == 3:\n logger.info(\"Load Train and Test from Cache\")\n self.ReadTrainTestHDF(data_configs['input'], filename)\n if not AnyEmptyDataframe(self.xy_train_test):\n return self.ReturnTrainTest(self.xy_train_test)\n else:\n refresh_level = 2\n logger.warning('No train_test cache to load. Try to refresh at level {}'.format(refresh_level))\n filename = '{}/{}'.format(self.cache_path, configs_table.loc[refresh_level, 'filename'])\n\n if refresh_level == 2:\n logger.info(\"Recreate Train and Test\")\n self.ReadProcessedHDF(data_configs['input'], filename)\n if AnyEmptyDataframe(self.data_processed):\n refresh_level = 1\n logger.warning('no processed cache to load from disk. Attempt to refresh at level {}'.format(refresh_level))\n filename = '{}/{}'.format(self.cache_path, configs_table.loc[refresh_level, 'filename'])\n\n if refresh_level == 1:\n logger.info(\"Process DataFrames from HDF Cashe\")\n self.ReadRawHDF(data_configs['input'], filename, limited_by_configs=True)\n if AnyEmptyDataframe(self.data_raw):\n refresh_level = 0\n logger.warning('No raw cache to load. Try to refresh at level {}'.format(refresh_level))\n\n if refresh_level == 0:\n logger.info(\"Process DataFrames from CSV\")\n self.ReadDataCSV(data_configs['input'])\n filename = '{}/{}'.format(self.cache_path, configs_table.loc[1, 'filename'])\n self.SaveFileHDF(filename, self.data_raw, opt_overwrite=True)\n\n # process data\n if refresh_level <= 1:\n logger.info(\"Process DataFrames\")\n train_test = self._application_train_test(data_configs['application'])\n self.data_processed = {'application_train': train_test[0],\n 'application_test' : train_test[1],}\n\n self.data_processed.update({\n 'bureau' : self._bureau_and_balance(data_configs['bureau']),\n 'previous_application' : self._previous_application(data_configs['previous_application']),\n 'pos_cash' : self._pos_cash_balance(data_configs['pos_cash']),\n 'credit_card_balance' : self._credit_card_balance(data_configs['credit_card_balance']),\n 'installments_payments': self._installments_payments(data_configs['installments_payments']),\n })\n\n # save processed\n filename = '{}/{}'.format(self.cache_path, configs_table.loc[2, 'filename'])\n self.SaveFileHDF(filename, self.data_processed, opt_overwrite=True)\n\n # create train and test\n if refresh_level <= 2:\n self.CreateTrainTestData(self.data_processed)\n filename = '{}/{}'.format(self.cache_path, configs_table.loc[3, 'filename'])\n self.SaveFileHDF(filename, self.xy_train_test, opt_overwrite=True)\n\n return self.ReturnTrainTest(self.xy_train_test)\n\n\ndef main(argc, argv):\n\n DataConfigs = InitializeConfigs('../configs/SampleDataConfigs.py').DataConfigs\n\n dp = DataProvider()\n #dp.ReadRawHDF(DataConfigs, filename='../data/cache_sample_raw.hdf5', limited_by_configs=False)\n #import pdb; pdb.set_trace()\n\n #dp.LoadData(DataConfigs, source='from_csv', prefix='sample')\n d = dp.LoadData(DataConfigs, source='from_raw_cache', prefix='sample')\n\n\n #d = dp.LoadData(DataConfigs, source='from_processed', prefix='sample')\n #d = dp.LoadData(DataConfigs, source='from_train_test', prefix='sample')\n\n import pdb; pdb.set_trace()\n\n train_x, train_y = d[0], d[1]\n\n logger.info('P/N ratio:\\n{}'.format(train_y.value_counts(normalize=True).sort_index()))\n\n #ModelConfigs = InitializeConfigs('../configs/SampleModelConfigs.py').ModelConfigs.get('LossGuideXGB')\n #ModelConfigs = InitializeConfigs('../configs/SampleModelConfigs.py').ModelConfigs.get('DepthWiseXGB')\n #ModelConfigs = InitializeConfigs('../configs/SampleModelConfigs.py').ModelConfigs.get('LinearXGB')\n #ModelConfigs = InitializeConfigs('../configs/SampleModelConfigs.py').ModelConfigs.get('LGBM')\n #ModelConfigs = InitializeConfigs('../configs/SampleModelConfigs.py').ModelConfigs.get('DartLGBM')\n #ModelConfigs = InitializeConfigs('../configs/SampleModelConfigs.py').ModelConfigs.get('BayesianCatBoost')\n #ModelConfigs = InitializeConfigs('../configs/SampleModelConfigs.py').ModelConfigs.get('BernoulliCatBoost')\n\n #import pdb; pdb.set_trace()\n #from DataModeler import DataModeler\n #eval = DataModeler(ModelConfigs)\n #eval.setupValidation(train_x.iloc[:5000], train_y.iloc[:5000])\n #eval.trainModels(train_x.iloc[:5000], train_y.iloc[:5000])\n\n #HPOConfigs = ModelConfigs.get(\"hyperparameter_optimization\")\n #from ScikitOptimize import ScikitOptimize\n #from xgboost import XGBClassifier\n #from lightgbm import LGBMClassifier\n #from catboost import CatBoostClassifier\n #hyperparameter_optimize = ScikitOptimize(XGBClassifier, HPOConfigs, task_name='LossGuideXGB')\n #hyperparameter_optimize = ScikitOptimize(XGBClassifier, HPOConfigs, task_name='DepthWiseXGB')\n #hyperparameter_optimize = ScikitOptimize(LGBMClassifier, HPOConfigs, task_name='DartLGBM')\n #hyperparameter_optimize = ScikitOptimize(CatBoostClassifier, HPOConfigs, task_name='LGBM')\n #hyperparameter_optimize.search(train_x.iloc[:10000], train_y.iloc[:10000])\n\n #hyperparameter_optimize.search(train_x, train_y)\n\nif __name__ == '__main__':\n import sys\n main(len(sys.argv), sys.argv)\n\n", "id": "11244596", "language": "Python", "matching_score": 4.0084943771362305, "max_stars_count": 0, "path": "lib/DataProvider.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis script provides feature tranform and forked from\nhttps://www.kaggle.com/jsaguiar/updated-0-792-lb-lightgbm-with-simple-features\nCreated on Thu July 20 2018\n\n@author: cttsai\n\"\"\"\nimport gc; gc.enable()\nimport itertools\n\nimport numpy as np\nimport pandas as pd\n\nfrom LibConfigs import logger\nfrom Utility import IdentifyCategoricalColumn, CheckColumnsExist\n\n\ndef process_one_hot_encode(df, categorical_columns=[], nan_as_category=True):\n \"\"\"\n ------\n return df, new_columns, columns_to_convert\n \"\"\"\n logger.info(\"Process OneHot Encoding\")\n original_columns = df.columns.tolist()\n\n if not categorical_columns:\n categorical_columns = IdentifyCategoricalColumn(df)\n categorical_columns, _ = CheckColumnsExist(df, categorical_columns)\n\n logger.info(\"identify {} categorical columns: {}\".format(len(categorical_columns), categorical_columns))\n df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category)\n\n new_columns = [c for c in df.columns if c not in original_columns]\n logger.info(\"one-hot encoded to {} columns:\".format(len(new_columns)))\n df[new_columns] = df[new_columns].astype(np.int8)\n ret = {cat: sorted([col for col in new_columns if cat in col]) for cat in categorical_columns}\n for k, v in ret.items():\n logger.info(\"onehot {} to {} columns: {}\".format(k, len(v), v))\n\n return df, new_columns, categorical_columns\n\n\ndef process_interaction(df, process_configs):\n \"\"\"\n process configs is a dictionary as\n a dictionary with {'new_feature_name': {'mode': 'add', 'a': 'col_name', 'b':'col_name',}, }\n ------\n\n \"\"\"\n logger.info(\"Process Interactions\")\n\n possible_arithmetics = ['add', 'sum_squared',\n 'subtract', 'subtract_positive',\n 'multiply',\n 'divide', 'divide_nonzero']\n\n new_columns = []\n for v in process_configs:\n k = v['name']\n logger.info(\"process {}\".format(k))\n\n # check arithmetic\n arithmetic = v.get('mode', None)\n if arithmetic not in possible_arithmetics:\n logger.warning(\"no arithmetic on {}\".format(k))\n continue\n\n #check feature columns\n ckeck_cols = [vv for kk, vv in v.items() if kk not in ['name', 'mode']]\n cols_exist, cols_not_exist = CheckColumnsExist(df, ckeck_cols)\n if cols_not_exist:\n logger.warning(\"missing {} columns: {}\".format(len(cols_not_exist), cols_not_exist))\n continue\n\n # process\n if 'add' == arithmetic:\n df[k] = df[v['a']] + df[v['b']]\n elif 'subtract' == arithmetic:\n df[k] = df[v['a']] - df[v['b']]\n elif 'subtract_positive' == arithmetic:\n df[k] = (df[v['a']] - df[v['b']]).apply(lambda x: x if x > 0 else 0)\n elif 'multiply' == arithmetic:\n df[k] = df[v['a']] * df[v['b']]\n elif 'divide' == arithmetic:\n df[k] = df[v['a']] / df[v['b']]\n elif 'divide_nonzero' == arithmetic:\n df[k] = df[v['a']] / (df[v['b']] + 1.)\n elif 'sum_squared' == arithmetic:\n df[k] = df[[v['a'], v['b']]].pow(2).sum(axis=1)# np.square(df[v['a']]) + np.square(df[v['b']])\n\n new_columns.append(k)\n\n return df, new_columns\n\n\ndef process_deep_interactions(df, process_configs):\n \"\"\"\n {'header' : 'EXT_SOURCES_SYNTHESIZE',\n 'transform': ['product', 'mean', 'sum', 'sum_squared', 'std'],\n 'columns' : ['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3'],\n }\n \"\"\"\n applicable_methods = ['kurtosis', 'sum', 'sum_squared', 'product', 'mean', 'std']\n\n header = process_configs.get('header', 'NEW')\n cols = process_configs.get('columns', [])\n cols_na = [f for f in cols if f not in df.columns]\n cols = [f for f in cols if f in df.columns]\n methods = process_configs.get('transform', [])\n methods = [m for m in methods if m in applicable_methods]\n\n for m in methods:\n logger.info('transform deep interactions ({}): {}'.format(m, cols))\n if cols_na:\n logger.warning('transform deep interactions ({}), features not found: {}'.format(\n m, cols_na))\n\n name = '{}_{}'.format(header, m.upper())\n if m == 'kurtosis':\n df[name] = df[cols].kurtosis(axis=1)\n elif m == 'mean':\n df[name] = df[cols].mean(axis=1)\n elif m == 'sum':\n df[name] = df[cols].sum(axis=1)\n elif m == 'sum_squared':\n df[name] = df[cols].pow(2).sum(axis=1)\n elif m == 'product':\n df[name] = df[cols].fillna(df[cols].mean()).product(axis=1)\n elif m == 'std':\n df[name] = df[cols].std(axis=1)\n df[name] = df[name].fillna(df[name].mean())\n\n return df\n\n\ndef process_replace(df, process_configs):\n \"\"\"\n {'DAYS_EMPLOYED': {365243: np.nan, }, }\n \"\"\"\n logger.info(\"Process Fill NA\")\n columns = sorted(list(process_configs.keys()))\n cols_exist, cols_not_exist = CheckColumnsExist(df, columns)\n\n configs = {k: v for k, v in process_configs.items() if k in cols_exist}\n df.replace(configs, inplace=True)\n\n for k, v in configs.items():\n logger.info(\"impute {} using {}\".format(k, v))\n if cols_not_exist:\n logger.warning(\"missing {} columns: {}\".format(len(cols_not_exist), cols_not_exist))\n\n return df\n\n\ndef process_drop_rows(df, process_configs):\n \"\"\"\n {'CODE_GENDER': ['XNA'], }\n \"\"\"\n logger.info(\"Process Drop Rows\")\n columns = sorted(list(process_configs.keys()))\n cols_exist, cols_not_exist = CheckColumnsExist(df, columns)\n\n configs = {k: v for k, v in process_configs.items() if k in cols_exist}\n inds = df[cols_exist].isin(configs)\n inds_sel = inds.any(axis=1)\n\n for f, series in inds.iteritems():\n logger.info(\"remove {} rows by in {} if any {}\".format(f, series.sum(), process_configs[f]))\n\n logger.info(\"overall remove {} from {} rows\".format(inds_sel.astype(int).sum(), inds_sel.shape[0]))\n if cols_not_exist:\n logger.warning(\"missing {} columns: {}\".format(len(cols_not_exist), cols_not_exist))\n\n return df.loc[~inds_sel]\n\n\ndef process_factorize(df, process_configs):\n \"\"\"\n input a list of features to factorize (label encoding)\n \"\"\"\n logger.info(\"Process Factorize\")\n cols_exist, cols_not_exist = CheckColumnsExist(df, sorted(process_configs))\n\n for bin_feature in cols_exist:\n df[bin_feature], uniques = pd.factorize(df[bin_feature], sort=False)\n logger.info(\"factorize {} in {}: {}\".format(len(uniques), bin_feature, uniques))\n\n for k in cols_not_exist:\n logger.warning(\"missing {}\".format(k))\n\n return df\n\n\ndef process_aggregate(df, process_configs, groupby_cols, cat_cols=[]):\n \"\"\"\n pass each groupby_cols one by one: aggregate and condictional aggregate, general aggregate\n \"\"\"\n logger.info(\"Process Aggregate\")\n groupby_cols = [f for f in groupby_cols if f in df.columns]\n# if groupby_cols not in df.columns:\n if not groupby_cols:\n logger.warning(\"aggregate column {} not exist\".format(groupby_cols))\n return pd.DataFrame({groupby_cols:[]}).set_index(groupby_cols)\n\n logger.info(\"aggregate on {}\".format(groupby_cols))\n header = process_configs.get('header', 'foobar')\n\n aggregations = {}\n # aggregate and condictional aggregate\n num_cols = process_configs.get('num', {})\n cat_agg = process_configs.get('cat', [])\n if num_cols or cat_agg:\n aggregations = {k:list(v) for k, v in num_cols.items() if k in df.columns and v}\n aggregations.update({k:list(cat_agg) for k in cat_cols if k in df.columns and cat_agg})\n for k, v in aggregations.items(): # dict\n logger.info(\"aggregate {} ({}) with {}\".format(k, df[k].dtype, v))\n\n # assigned in configs but not in dataframe\n missing = sorted(list(set(num_cols.keys()).union(set(cat_cols)).difference(set(aggregations.keys()))))\n for k in missing: # dict\n if k in num_cols.keys():\n logger.info(\"missing {} in num\".format(k))\n elif k in cat_cols:\n logger.info(\"missing {} in cat\".format(k))\n\n # processing\n if aggregations:\n df_agg = df.groupby(groupby_cols).agg({**aggregations})\n df_agg.columns = pd.Index(['{}_{}_{}'.format(header, e[0], e[1].upper()) for e in df_agg.columns.tolist()])\n else:\n logger.info(\"no aggragation on {} and {}\".format(header, groupby_cols))\n df_agg = pd.DataFrame({groupby_cols:[]}).set_index(groupby_cols)\n\n if process_configs.get('count', False):\n logger.info(\"aggregate count on {} at {}\".format(groupby_cols, header))\n df_agg['{}_COUNT_{}'.format(header, '_'.join(groupby_cols))] = df.groupby(groupby_cols).size()\n\n return df_agg\n\n\ndef process_decomposition(df, process_configs):\n \"\"\"\n {'columns': ['FLAG_CONT_MOBILE', 'FLAG_PHONE'],\n 'stems' : ['CODE_GENDER_'],\n 'methods' : {'APPLICANT_SVD': {'object': TruncatedSVD,\n 'params': {'n_components': 8,\n 'algorithm': 'randomized',\n 'n_iter': 10,\n 'random_state': 42},},\n },\n }\n \"\"\"\n use_cols, cols_not_exist = CheckColumnsExist(df, process_configs.get('columns', []))\n stems = process_configs.get('stems', [])\n if stems:\n dict_stem = {s:[f for f in df.columns if s in f] for s in stems}\n cols_stem = list(itertools.chain.from_iterable(dict_stem.values()))\n if cols_stem:\n use_cols.extend(cols_stem)\n for k, v in dict_stem.items():\n logger.info('find {} stem \"{}\": {}'.format(len(v), k, v))\n\n use_cols = sorted(use_cols)\n logger.info('decompose on {} features: {}'.format(len(use_cols), use_cols))\n df_sub = df[use_cols].apply(lambda x: np.nan_to_num(x))\n\n def func(k, v, sub):\n tf = v.get('object', None)\n params = v.get('params', {})\n if not tf:\n return pd.DataFrame()\n logger.info('decompose {} on {} features'.format(k, sub.shape[1]))\n d = tf().set_params(**params).fit_transform(sub)\n return pd.DataFrame(d, columns=['{}_{}'.format(k, i) for i in range(1, d.shape[1]+1)])\n\n ret = [func(k, v, df_sub) for k, v in process_configs.get('methods', {}).items()]\n ret = pd.concat(ret, axis=1, join='inner')\n ret.index = df.index\n return ret\n", "id": "1242212", "language": "Python", "matching_score": 0.8198881149291992, "max_stars_count": 0, "path": "lib/FeatureTransformer.py" }, { "content": "import os\nfrom typing import Callable, Optional, List, Dict\nimport pandas as pd\nfrom scipy.stats import ks_2samp\n\nfrom .ISolver import ISolver\n\n\ndef _ckeck_dir_path_exist(working_path: str):\n if os.path.exists(working_path) and not os.path.isfile(working_path):\n return True\n\n return False\n\n\ndef _mkdir_safe(working_path: str) -> bool:\n if not _ckeck_dir_path_exist(working_path):\n os.makedirs(working_path)\n return True\n\n return False\n\n\nclass MixinTransformerSolver(ISolver):\n def __init__(\n self, score_func: Callable, fine_tuned_dir: str, pretrained_dir: str, model_weights_filename: str,\n model_stats_filename: str = \"model_stats.hdf5\"):\n\n self.score_func: Callable = score_func\n\n self.model_weights_filename: str = model_weights_filename # consider move to configs\n self.model_stats_filename: str = model_stats_filename\n\n _mkdir_safe(fine_tuned_dir)\n self.fine_tuned_dir_path: str = fine_tuned_dir\n print(f\"working dir: {self.fine_tuned_dir_path}\")\n self.pretrained_dir_path: str = pretrained_dir\n if not _ckeck_dir_path_exist(pretrained_dir):\n err_msg = f\"pretrained dir path is not exists: {pretrained_dir}\"\n raise ValueError(err_msg)\n\n self.target_columns: Optional[List[str]] = None\n\n # results\n self.preds_test: Optional[pd.DataFrame] = None\n self.preds_valid: Optional[pd.DataFrame] = None\n self.trues_valid: Optional[pd.DataFrame] = None\n self.valid_score: Optional[float] = None\n\n self.is_executed: bool = False\n\n def _analyze_score_dist(self, data: Dict):\n train_groups = data.get(\"train_groups\", None)\n\n # validation-test overall diff\n ks_result = self.preds_test.apply(lambda x: ks_2samp(x.values, self.preds_valid[x.name].values), axis=0)\n ks_stats, p_value = list(zip(*(ks_result.tolist())))\n stats_diff = pd.concat([\n self.preds_test.mean().rename(\"test_mean\"), self.preds_valid.mean().rename(\"valid_mean\"),\n (self.preds_test.mean() - self.preds_valid.mean()).rename(\"mean_diff\"),\n self.preds_test.mean().rename(\"test_std\"), self.preds_valid.mean().rename(\"valid_std\"),\n pd.Series(ks_stats, index=self.preds_test.columns).rename(\"ks_stats\"),\n pd.Series(p_value, index=self.preds_test.columns).rename(\"p_value\"), ], axis=1).sort_values(\"mean_diff\")\n print(f\"valid-test difference:\\n{stats_diff.round(6)}\\n\")\n\n # validation performance\n valid_breakdown_metrics = pd.concat([\n (self.trues_valid - self.preds_valid).mean(axis=0).rename(\"bias\"),\n (self.trues_valid - self.preds_valid).abs().mean(axis=0).rename(\"mae\"),\n ((self.trues_valid - self.preds_valid) / self.trues_valid.mean()).abs().mean(axis=0).rename(\"mape\"),\n self.trues_valid.apply(\n lambda x: x.corr(self.preds_valid[x.name], method=\"pearson\"), axis=0).rename(\"pearson\"),\n self.trues_valid.apply(\n lambda x: x.corr(self.preds_valid[x.name], method=\"spearman\"), axis=0).rename(\"spearman\"),\n ], axis=1).sort_values(\"spearman\", ascending=True)\n print(f\"validation breakdown metrics:\\n{valid_breakdown_metrics.round(6)}\\n\")\n\n valid_overall_metrics = valid_breakdown_metrics.describe()\n print(f\"validation overall metrics:\\n{valid_overall_metrics.round(6)}\\n\")\n\n #\n output_categories_question = data.get(\"output_categories_question\", None)\n output_categories_answer = data.get(\"output_categories_answer\", None)\n if output_categories_question is not None and output_categories_answer is not None:\n y_valid_q = self.trues_valid[output_categories_question]\n p_valid_q = self.preds_valid[output_categories_question]\n valid_score_question = self.score_func(y_valid_q.values, p_valid_q.values)\n\n y_valid_a = self.trues_valid[output_categories_answer]\n p_valid_a = self.preds_valid[output_categories_answer]\n valid_score_answer = self.score_func(y_valid_a.values, p_valid_a.values)\n print(f\"valid score on question: {valid_score_question:.3f}, answer: {valid_score_answer:.3f}\\n\")\n\n # analysis by groups\n groupby_obj = train_groups.reindex(index=self.trues_valid.index).groupby(\"category\")\n group_valid_score = groupby_obj.apply(lambda x: self.score_func(\n self.trues_valid.reindex(index=x.index).values, self.preds_valid.reindex(\n index=x.index).values)).to_frame(\"score\")\n print(f\"group valid score: \\n{group_valid_score}\\n\")\n group_valid_score.index = group_valid_score.index.tolist() # categorical index casting to normal str\n\n stats_dict = {\n 'test_preds': self.preds_test,\n 'valid_preds': self.preds_valid,\n 'valid_trues': self.trues_valid,\n \"valid_test_stats_diff\": stats_diff,\n \"valid_breakdown_metrics\": valid_breakdown_metrics,\n \"valid_overall_metrics\": valid_overall_metrics,\n \"valid_group_score\": group_valid_score,\n }\n\n return stats_dict\n\n @property\n def fine_tuned_model_weights_file_path_(self) -> str:\n return os.path.join(self.fine_tuned_dir_path, self.model_weights_filename)\n\n @property\n def fine_tuned_model_stats_file_path_(self) -> str:\n return os.path.join(self.fine_tuned_dir_path, self.model_stats_filename)\n\n def run(self, data: Dict, fit_params: Optional[Dict] = None, inference_only: bool = False, **kwargs):\n test_x = data.get(\"test_x\", None)\n self.target_columns = data[\"output_categories\"]\n\n if inference_only:\n self.is_executed = True\n self.preds_test = self._run_inference(test_x)\n print(f\"test dist:\\n{self.preds_test.describe().T}\")\n return self\n\n self._run_model_fine_tune(data=data, fit_params=fit_params, **kwargs)\n self.preds_test = self._run_inference(test_x)\n print(f\"test dist:\\n{self.preds_test.describe().T}\")\n self.is_executed = True\n\n results = self._analyze_score_dist(data)\n with pd.HDFStore(self.fine_tuned_model_stats_file_path_, mode=\"w\") as store:\n for k, v in results.items():\n store.put(key=k, value=v)\n print(f\"save stats: {k}, shape={v.shape}\")\n\n return self\n\n def analyze(self):\n import pdb;\n pdb.set_trace()\n return self\n\n @property\n def test_prediction_(self):\n if not self.is_executed:\n raise ValueError(\"need to run solver before get results\")\n\n return self.preds_test\n\n @property\n def valid_trues_(self):\n if not self.is_executed:\n raise ValueError(\"need to run solver before get results\")\n\n if self.trues_valid is None:\n print(\"no model validation in this run\")\n\n return self.trues_valid\n\n @property\n def valid_prediction_(self):\n if not self.is_executed:\n raise ValueError(\"need to run solver before get results\")\n\n if self.preds_valid is None:\n print(\"no model validation in this run\")\n\n return self.preds_valid\n\n @property\n def valid_score_(self) -> float:\n if not self.is_executed:\n raise ValueError(\"need to run solver before get results\")\n\n if self.valid_score is None:\n print(\"no model validation while run\")\n\n return self.valid_score\n\n def _run_inference(self, test_x):\n raise NotImplementedError()\n\n def _run_model_fine_tune(self, data: Dict, fit_params: Dict, **kwargs):\n raise NotImplementedError()\n\n", "id": "4432920", "language": "Python", "matching_score": 5.307820796966553, "max_stars_count": 2, "path": "nlp_utils/Solver/BaseTransformerSovler.py" }, { "content": "from typing import Dict, Optional\n\n\nclass ISolver:\n @property\n def fine_tuned_model_weights_file_path_(self) -> str:\n raise NotImplementedError()\n\n def run(self, data: Dict, fit_params: Optional[Dict] = None, inference_only: bool = False, **kwargs):\n raise NotImplementedError()\n\n def analyze(self):\n raise NotImplementedError()\n\n @property\n def test_prediction_(self):\n raise NotImplementedError()\n\n @property\n def valid_trues_(self):\n raise NotImplementedError()\n\n @property\n def valid_prediction_(self):\n raise NotImplementedError()\n\n @property\n def valid_score_(self) -> float:\n raise NotImplementedError()\n", "id": "6355714", "language": "Python", "matching_score": 0.2630709707736969, "max_stars_count": 2, "path": "nlp_utils/Solver/ISolver.py" }, { "content": "from typing import Callable, Dict, List, Tuple\nfrom functools import partial\nimport numpy as np\nimport pandas as pd\n\nfrom scipy.stats import spearmanr\n\n\ndef spearmanr_corr(y_true: np.array, y_pred: np.array):\n return spearmanr(y_true, y_pred).correlation\n\n\nclass IRounder:\n def fit(self, y_ref: pd.DataFrame, y_pred: pd.DataFrame):\n raise NotImplementedError()\n\n def fit_transform(self, y_ref: pd.DataFrame, y_pred: pd.DataFrame) -> pd.DataFrame:\n raise NotImplementedError()\n\n def transform(self, y_pred: pd.DataFrame) -> pd.DataFrame:\n raise NotImplementedError()\n\n\nclass _OptimalRounder(IRounder):\n def __init__(self, ref: pd.DataFrame, iter: int = 1000, seed: int = 42):\n self.ref: pd.DataFrame = ref\n self.coef_: Dict[str, List[float]] = dict()\n self.value_: Dict[str, List[float]] = dict()\n self.iter: int = iter\n self.rng = np.random.RandomState(seed)\n\n def _evaluate(self, coef: np.array, y_true: pd.Series, y_pred: pd.Series, mapped_values: List[float]) -> float:\n raise NotImplementedError\n\n def _fit_one_column(self, ref: pd.Series, y_true: pd.Series, y_pred: pd.Series) -> Tuple[List[float], List[float]]:\n initial_coef = np.linspace(0, 1, num=ref.nunique())\n mapped_value = sorted(ref.unique())\n loss_partial = partial(self._evaluate, y_true=y_true, y_pred=y_pred, mapped_value=mapped_value)\n\n score = loss_partial(initial_coef)\n best_score = score\n best_solution = initial_coef\n len_x = len(initial_coef)\n for i in range(self.iter):\n solution = sorted(self.rng.rand(len_x))\n score = loss_partial(solution)\n if score is not None and score < best_score:\n best_score = score\n best_solution = solution\n\n return best_solution, mapped_value\n\n def _transform_one_column(self, y_pred: pd.Series, coef: List[float], mapped_value: List[float]) -> List[float]:\n len_map = len(mapped_value) - 1\n return list(map(lambda ind: mapped_value[min(ind, len_map)], np.digitize(np.nan_to_num(y_pred), bins=coef)))\n\n def fit(self, y_ref: pd.DataFrame, y_pred: pd.DataFrame):\n self.fit_transform(y_true=y_ref, y_pred=y_pred)\n return self\n\n def fit_transform(self, y_true: pd.DataFrame, y_pred: pd.DataFrame) -> pd.DataFrame:\n for col in y_true.columns:\n print(f'fitting: {col}')\n self.coef_[col], self.value_[col] = self._fit_one_column(self.ref[col], y_true[col], y_pred[col])\n\n return self.transform(y_pred)\n\n def transform(self, y_pred: pd.DataFrame) -> pd.DataFrame:\n return y_pred.apply(\n lambda x: self._transform_one_column(x, coef=self.coef_[x.name], mapped_value=self.value_[x.name]))\n\n\nclass OptimalRounder(_OptimalRounder):\n def __init__(self, ref: pd.DataFrame, loss: Callable = spearmanr_corr, direction: str = 'auto'):\n super().__init__(ref=ref)\n self.loss: Callable = loss\n self.direction: str = direction # support ['max', 'min', 'auto']\n if self.direction == 'auto':\n self.direction = 'max'\n\n def _evaluate(self, coef: np.array, y_true: pd.Series, y_pred: pd.Series, mapped_value: List[float]) -> float:\n y_pred_hat = self._transform_one_column(y_pred, coef=coef, mapped_value=mapped_value)\n score = self.loss(y_true.values, y_pred_hat)\n if self.direction == 'max':\n return score * -1.\n\n return score\n", "id": "11505538", "language": "Python", "matching_score": 2.441807985305786, "max_stars_count": 2, "path": "nlp_utils/Utils/OptimalRounder.py" }, { "content": "from .OptimalRounder import OptimalRounder\n", "id": "11258569", "language": "Python", "matching_score": 1.0018491744995117, "max_stars_count": 2, "path": "nlp_utils/Utils/__init__.py" }, { "content": "from .Solver import BaselineTransformerTFSolver\nfrom .Solver import AugmentedTransformerTFSolver\n\nfrom Utils import OptimalRounder\n", "id": "8018636", "language": "Python", "matching_score": 3.0980091094970703, "max_stars_count": 2, "path": "nlp_utils/__init__.py" }, { "content": "from .BaselineTransformerTFSolver import BaselineTransformerTFSolver\nfrom .BaselineTransformerTFSolver import AugmentedTransformerTFSolver\n", "id": "4167359", "language": "Python", "matching_score": 2.922666072845459, "max_stars_count": 2, "path": "nlp_utils/Solver/__init__.py" } ]
3.042358
sirbana
[ { "content": "class BasePage(object):\n def __init__(self, driver):\n self.driver = driver\n\n\ndef click_non_interactible_element(driver, element):\n \"\"\" Selenium doesn't support clicking non-interactible elements (eg: span).\n So we need to run a javascript, that simulates the click. \"\"\"\n driver.execute_script(\"arguments[0].click();\", element)", "id": "1881676", "language": "Python", "matching_score": 0.2263818383216858, "max_stars_count": 0, "path": "testcase/pages/base_page.py" }, { "content": "from testcase.locators.search_page_locators import SearchPageLocators\nfrom testcase.pages.base_page import BasePage\n\n\nclass SearchResultPage(BasePage):\n def __init__(self, driver):\n super().__init__(driver)\n self.locators = SearchPageLocators(self.driver)\n\n def first_result_title(self):\n element = self.locators.first_result_product()\n return element.text\n\n def invalid_search_message(self):\n element = self.locators.invalid_search_message()\n return element.text", "id": "4060159", "language": "Python", "matching_score": 0.47751134634017944, "max_stars_count": 0, "path": "testcase/pages/search_result_page.py" }, { "content": "from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom testcase.locators.base_page_locators import BasePageLocators\n\n\nclass MainPageLocators(BasePageLocators):\n def sign_in_button(self):\n return self.driver.find_element_by_css_selector(\".header_user_info>a.login\")\n\n def search_field_input(self):\n return self.driver.find_element_by_id('search_query_top')\n\n def search_button(self):\n return self.driver.find_element_by_css_selector('#searchbox>button.button-search')\n\n def category_dresses_link(self):\n return self.driver.find_element_by_css_selector('ul.sf-menu>li>a[title=\"Dresses\"]')\n\n def contact_us_button(self):\n return self.driver.find_element_by_css_selector('#contact-link>a')\n\n def shopping_cart_button(self):\n return self.driver.find_element_by_css_selector('.shopping_cart>a')\n\n def nth_product(self, n):\n return self.driver.find_element_by_css_selector(\n \".tab-content>ul:first-child>li:nth-child(\" + str(n) + \")>div>div>div.product-image-container>a:first-child\"\n )\n\n def add_to_cart_nth_product_button(self, n):\n return self.driver.find_element_by_css_selector(\n \"ul#homefeatured>li:nth-child(\" + str(n) + \")>div.product-container>div>.button-container>a.ajax_add_to_cart_button\"\n )\n\n def title_of_nth_product(self, n):\n return self.driver.find_element_by_css_selector(\n \"ul#homefeatured>li:nth-child(\" + str(n) + \")>div.product-container>div.right-block>h5\"\n )\n\n def second_product_add_to_cart_button(self):\n return self.driver.find_element_by_css_selector\n\n def continue_to_checkout_button(self):\n return WebDriverWait(self.driver, 5).until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, '.layer_cart_cart>.button-container>a'))\n )\n\n def button_check_out(self):\n return WebDriverWait(self.driver, 5).until(\n EC.visibility_of_element_located((By.ID, 'button_order_cart'))\n )\n\n def shopping_cart_button(self):\n return self.driver.find_element_by_css_selector('.shopping_cart>a')\n\n def continue_shopping_button(self):\n return WebDriverWait(self.driver, 5).until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, '.continue'))\n )", "id": "12466889", "language": "Python", "matching_score": 4.986266613006592, "max_stars_count": 0, "path": "testcase/locators/main_page_locators.py" }, { "content": "from selenium import webdriver\n\nfrom testcase.locators.main_page_locators import MainPageLocators\nfrom testcase.pages.base_page import BasePage, click_non_interactible_element\n\n\nclass MainPage(BasePage):\n def __init__(self, driver):\n super().__init__(driver)\n self.locators = MainPageLocators(self.driver)\n\n def is_title_matches(self):\n return \"My Store\" in self.driver.title\n\n def click_sing_in_button(self):\n element = self.locators.sign_in_button()\n element.click()\n\n def send_keys_in_search(self, product):\n element = self.locators.search_field_input()\n element.send_keys(product)\n\n def click_search_button(self):\n element = self.locators.search_button()\n element.click()\n\n def click_dresses_link(self):\n element = self.locators.category_dresses_link()\n element.click()\n\n def click_contact_us_button(self):\n element = self.locators.contact_us_button()\n element.click()\n\n def click_cart_button(self):\n element = self.locators.shopping_cart_button()\n element.click()\n\n def hover_nth_product(self, n):\n element = self.locators.nth_product(n)\n action = webdriver.ActionChains(self.driver)\n action.move_to_element(element)\n action.perform()\n\n def click_add_to_cart_nth_product_button(self, n):\n element = self.locators.add_to_cart_nth_product_button(n)\n element.click()\n\n def click_continue_to_checkout_button(self):\n element = self.locators.continue_to_checkout_button()\n element.click()\n\n def click_button_check_out(self):\n element = self.locators.button_check_out()\n element.click()\n\n def hover_shopping_cart_button(self):\n element = self.locators.shopping_cart_button()\n action = webdriver.ActionChains(self.driver)\n action.move_to_element(element)\n action.perform()\n\n def click_continue_shopping_button(self):\n element = self.locators.continue_shopping_button()\n click_non_interactible_element(self.driver, element)\n\n def title_of_nth_product(self, n):\n element = self.locators.title_of_nth_product(n)\n return element.text\n", "id": "9336676", "language": "Python", "matching_score": 3.2393417358398438, "max_stars_count": 0, "path": "testcase/pages/main_page.py" }, { "content": "import unittest\nimport time\nfrom selenium import webdriver\n\nimport testcase.pages.cart_page\nimport testcase.pages.category_products_page\nimport testcase.pages.contact_us_page\nimport testcase.pages.forgot_your_pass_page\nimport testcase.pages.main_page\nimport testcase.pages.my_account_page\nimport testcase.pages.registration_page\nimport testcase.pages.search_result_page\nimport testcase.pages.sign_in_page\n\n\nclass AutomationPracticeComTest(unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Chrome(r\"C:\\chromedriver.exe\")\n self.driver.get(\"http://automationpractice.com/index.php\")\n\n def tearDown(self):\n self.driver.close()\n\n def test_title(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n assert main_page.is_title_matches()\n\n def test_registration_invalid(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n main_page.click_sing_in_button()\n\n sign_in_page = testcase.pages.sign_in_page.SignInPage(self.driver)\n sign_in_page.input_email_in_register(\"anan\")\n\n sign_in_page.click_register_button()\n message = sign_in_page.register_error_message()\n assert message == 'Invalid email address.'\n\n def test_registration_valid(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n main_page.click_sing_in_button()\n\n sign_in_page = testcase.pages.sign_in_page.SignInPage(self.driver)\n sign_in_page.input_email_in_register('<EMAIL>')\n\n sign_in_page.click_register_button()\n\n register_page = testcase.pages.registration_page.RegistrationPage(self.driver)\n assert register_page.page_heading().lower() == 'your personal information'\n\n def test_valid_search(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n main_page.send_keys_in_search('t-shirt')\n main_page.click_search_button()\n\n search_result_page = testcase.pages.search_result_page.SearchResultPage(self.driver)\n assert 't-shirt' in search_result_page.first_result_title().lower()\n\n def test_invalid_search(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n main_page.send_keys_in_search(\"cucu\")\n main_page.click_search_button()\n\n search_result_page = testcase.pages.search_result_page.SearchResultPage(self.driver)\n assert search_result_page.invalid_search_message() == 'No results were found for your search \"cucu\"'\n\n def test_same_category_products(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n main_page.click_dresses_link()\n\n category_page = testcase.pages.category_products_page.CategoryProductsPage(self.driver)\n for product in category_page.products_list():\n assert 'dress' in product.lower()\n\n def test_valid_registration(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n main_page.click_sing_in_button()\n\n sign_in_page = testcase.pages.sign_in_page.SignInPage(self.driver)\n sign_in_page.input_email_in_register(\"<EMAIL>\")\n sign_in_page.click_register_button()\n\n register_page = testcase.pages.registration_page.RegistrationPage(self.driver)\n assert register_page.page_heading().lower() == 'your personal information'\n\n register_page.gender_selection()\n register_page.firstname_input('Iulia')\n\n register_page.lastname_input('buhuhi')\n register_page.pass_input('<PASSWORD>')\n\n register_page.select_days_dropdown('1')\n register_page.select_months_dropdown('3')\n register_page.select_years_dropdown('2014')\n\n assert register_page.input_your_address_firstname() == 'Iulia'\n assert register_page.input_your_address_lastname() == 'buhuhi'\n\n register_page.input_your_address_company('polus')\n register_page.input_your_address_address('strada Colinei, bl34, ap3')\n register_page.input_your_address_city('Baia')\n\n register_page.select_state_dropdown('Oregon')\n register_page.input_your_address_postcode('50040')\n register_page.select_country_dropdown('United States')\n register_page.input_your_address_phone('567 899 9999')\n register_page.input_your_address_phone_mobile('989 989 0909')\n register_page.input_your_address_alias('Home')\n\n register_page.click_submit_button()\n my_account_page = testcase.pages.my_account_page.MyAccountPage(self.driver)\n assert my_account_page.registration_message() == 'Welcome to your account. Here you can manage all of your personal information and orders.'\n\n def test_valid_login(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n main_page.click_sing_in_button()\n\n sign_in_page = testcase.pages.sign_in_page.SignInPage(self.driver)\n sign_in_page.input_email_in_signin('<EMAIL>')\n sign_in_page.input_pass_in_signin('<PASSWORD>')\n sign_in_page.click_login_button()\n my_account_page = testcase.pages.my_account_page.MyAccountPage(self.driver)\n assert my_account_page.registration_message() == 'Welcome to your account. Here you can manage all of your personal information and orders.'\n\n def test_sign_in_without_email_and_pass(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n main_page.click_sing_in_button()\n sign_in_page = testcase.pages.sign_in_page.SignInPage(self.driver)\n sign_in_page.input_email_in_signin('')\n sign_in_page.input_pass_in_signin('')\n sign_in_page.click_login_button()\n assert sign_in_page.alert_email_missing_message() == 'An email address required.'\n\n def test_sign_in_without_pass(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n main_page.click_sing_in_button()\n sign_in_page = testcase.pages.sign_in_page.SignInPage(self.driver)\n sign_in_page.input_email_in_signin('<EMAIL>')\n sign_in_page.input_pass_in_signin('')\n sign_in_page.click_login_button()\n assert sign_in_page.alert_email_missing_message() == 'Password is required.'\n\n def test_forgot_your_pass_valid(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n main_page.click_sing_in_button()\n sign_in_page = testcase.pages.sign_in_page.SignInPage(self.driver)\n sign_in_page.forgot_password_link()\n forgot_your_pass = testcase.pages.forgot_your_pass_page.ForgotYourPassPage(self.driver)\n assert forgot_your_pass.forgot_your_pass_message() == 'Please enter the email address you used to register. We will then send you a new password.'\n\n # def test_sign_in_invalid_email_address(self):\n def test_contact_us_valid(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n main_page.click_contact_us_button()\n contact_us_page = testcase.pages.contact_us_page.ContactUsPage(self.driver)\n contact_us_page.subject_heading_dropdown('Customer service')\n contact_us_page.email_address_input('<EMAIL>')\n contact_us_page.order_reference_input('reference 01')\n contact_us_page.message_input_contact_us('Buna ziua, \\nvreau sa ma fac o cerere de retur\\n hop si asa.')\n contact_us_page.click_send_button()\n assert contact_us_page.alert_message_succes() == 'Your message has been successfully sent to our team.'\n time.sleep(5)\n\n def test_contact_us_no_credentials(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n main_page.click_contact_us_button()\n contact_us_page = testcase.pages.contact_us_page.ContactUsPage(self.driver)\n\n contact_us_page.click_send_button()\n assert contact_us_page.alert_message_missing() == 'Invalid email address.'\n\n def test_contact_us_without_message(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n main_page.click_contact_us_button()\n contact_us_page = testcase.pages.contact_us_page.ContactUsPage(self.driver)\n contact_us_page.email_address_input('<EMAIL>')\n contact_us_page.click_send_button()\n assert contact_us_page.alert_message_missing() == 'The message cannot be blank.'\n\n def test_contact_us_only_with_email_and_message(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n main_page.click_contact_us_button()\n contact_us_page = testcase.pages.contact_us_page.ContactUsPage(self.driver)\n contact_us_page.email_address_input('<EMAIL>')\n contact_us_page.message_input_contact_us('Buna ziua, \\nvreau sa ma fac o cerere de retur\\n hop si asa.')\n contact_us_page.click_send_button()\n assert contact_us_page.alert_message_missing() == 'Please select a subject from the list provided.'\n\n def test_contact_us_with_email_subject_and_message(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n main_page.click_contact_us_button()\n contact_us_page = testcase.pages.contact_us_page.ContactUsPage(self.driver)\n contact_us_page.subject_heading_dropdown('Customer service')\n contact_us_page.email_address_input('<EMAIL>')\n contact_us_page.message_input_contact_us('Buna ziua, \\nvreau sa ma fac o cerere de retur\\n hop si asa.')\n contact_us_page.click_send_button()\n assert contact_us_page.alert_message_succes() == 'Your message has been successfully sent to our team.'\n\n def test_contact_us_very_long_message(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n main_page.click_contact_us_button()\n contact_us_page = testcase.pages.contact_us_page.ContactUsPage(self.driver)\n contact_us_page.subject_heading_dropdown('Customer service')\n contact_us_page.email_address_input('<EMAIL>')\n contact_us_page.message_input_contact_us('Buna ziua, \\nvreau sa ma fac o cerere de retur\\n hop si asa.' * 1000)\n contact_us_page.click_send_button()\n assert contact_us_page.alert_message_succes() == 'Your message has been successfully sent to our team.'\n\n def test_add_to_cart(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n # main_page.click_cart_button()\n\n main_page.hover_nth_product(1)\n main_page.click_add_to_cart_nth_product_button(1)\n main_page.click_continue_to_checkout_button()\n cart_page = testcase.pages.cart_page.CartPage(self.driver)\n\n assert '1 ' in cart_page.heading_counter_number_of_items()\n\n def test_add_multiple_products_to_cart(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n\n for i in range(1, 6):\n main_page.hover_nth_product(i)\n main_page.click_add_to_cart_nth_product_button(i)\n main_page.click_continue_shopping_button()\n main_page.hover_shopping_cart_button()\n main_page.click_button_check_out()\n\n cart_page = testcase.pages.cart_page.CartPage(self.driver)\n\n assert '5' in cart_page.heading_counter_number_of_items()\n\n def test_same_product_in_cart_as_added(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n product_names = []\n for i in range(1, 5):\n main_page.hover_nth_product(i)\n main_page.click_add_to_cart_nth_product_button(i)\n main_page.click_continue_shopping_button()\n product_names.append(main_page.title_of_nth_product(i))\n\n main_page.hover_shopping_cart_button()\n main_page.click_button_check_out()\n\n cart_page = testcase.pages.cart_page.CartPage(self.driver)\n cart_product_names = cart_page.cart_product_names()\n assert cart_product_names == product_names\n\n time.sleep(5)\n\n def test_empty_cart(self):\n main_page = testcase.pages.main_page.MainPage(self.driver)\n\n for i in range(1, 2):\n main_page.hover_nth_product(i)\n main_page.click_add_to_cart_nth_product_button(i)\n main_page.click_continue_shopping_button()\n main_page.hover_shopping_cart_button()\n main_page.click_button_check_out()\n\n cart_page = testcase.pages.cart_page.CartPage(self.driver)\n for i in range(1, 2):\n cart_page.click_of_nth_recycle_bin(1)\n\n assert 'o' in cart_page.heading_counter_number_of_items()\n assert cart_page.message_no_product_in_cart() == 'Your shopping cart is empty.'\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "11989990", "language": "Python", "matching_score": 3.8653151988983154, "max_stars_count": 0, "path": "testcase/main.py" }, { "content": "from selenium.webdriver.support.select import Select\n\nfrom testcase.locators.registration_page_locators import RegistrationPageLocators\nfrom testcase.pages.base_page import BasePage\n\n\nclass RegistrationPage(BasePage):\n def __init__(self, driver):\n super().__init__(driver)\n self.locators = RegistrationPageLocators(self.driver)\n\n def page_heading(self):\n element = self.locators.registration_heading()\n return element.text\n\n def firstname_input(self, firstname):\n elements = self.locators.first_name_input()\n elements.send_keys(firstname)\n\n def gender_selection(self):\n elements = self.locators.gender_selector()\n elements.click()\n\n def lastname_input(self, lastname):\n elements = self.locators.lastname_locator()\n elements.send_keys(lastname)\n\n def pass_input(self, password):\n elements = self.locators.pass_input()\n elements.send_keys(password)\n\n def select_days_dropdown(self, day):\n select_element = self.locators.dropdown_date_days()\n element = Select(select_element)\n element.select_by_value(day)\n\n def select_months_dropdown(self, month):\n select_element = self.locators.dropdown_date_months()\n element = Select(select_element)\n element.select_by_value(month)\n\n def select_years_dropdown(self, year):\n select_element = self.locators.dropdown_date_years()\n element = Select(select_element)\n element.select_by_value(year)\n\n def input_your_address_firstname(self):\n element = self.locators.your_address_firstname_input()\n element.click()\n return element.get_attribute(\"value\")\n\n def input_your_address_lastname(self):\n elements = self.locators.your_address_lastname_input()\n elements.click()\n return elements.get_attribute('value')\n\n def input_your_address_company(self, company):\n elements = self.locators.your_address_company()\n elements.send_keys(company)\n\n def input_your_address_address(self, address):\n elements = self.locators.your_address_address()\n elements.send_keys(address)\n\n def input_your_address_city(self, city):\n elements = self.locators.your_address_city()\n elements.send_keys(city)\n\n def select_state_dropdown(self, state):\n state_element = self.locators.your_address_state()\n element = Select(state_element)\n element.select_by_visible_text(state)\n\n def input_your_address_postcode(self, postcode):\n element = self.locators.your_address_postcode()\n element.send_keys(postcode)\n\n def select_country_dropdown(self, country):\n state_element = self.locators.your_address_country()\n element = Select(state_element)\n element.select_by_visible_text(country)\n\n def input_your_address_phone(self, phone):\n element = self.locators.your_address_phone()\n element.send_keys(phone)\n\n def input_your_address_phone_mobile(self, mobile):\n element = self.locators.your_address_phone_mobile(mobile)\n element.send_keys(mobile)\n\n def input_your_address_alias(self, alias):\n element = self.locators.your_address_alias()\n element.clear()\n element.send_keys(alias)\n\n def click_submit_button(self):\n element = self.locators.registration_button()\n element.click()", "id": "491659", "language": "Python", "matching_score": 4.791927337646484, "max_stars_count": 0, "path": "testcase/pages/registration_page.py" }, { "content": "from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom testcase.locators.base_page_locators import BasePageLocators\n\n\nclass RegistrationPageLocators(BasePageLocators):\n def registration_heading(self):\n return WebDriverWait(self.driver, 5).until(\n EC.visibility_of_element_located((\n By.CSS_SELECTOR,\n '#account-creation_form>.account_creation:first-child>h3.page-subheading'\n ))\n )\n\n def first_name_input(self):\n return self.driver.find_element_by_css_selector('#customer_firstname')\n\n def gender_selector(self):\n return self.driver.find_element_by_css_selector('#id_gender2')\n\n def lastname_locator(self):\n return self.driver.find_element_by_css_selector('#customer_lastname')\n\n def pass_input(self):\n return self.driver.find_element_by_id('passwd')\n\n def dropdown_date_days(self):\n return self.driver.find_element_by_id('days')\n\n def dropdown_date_months(self):\n return self.driver.find_element_by_id('months')\n\n def dropdown_date_years(self):\n return self.driver.find_element_by_id('years')\n\n def your_address_firstname_input(self):\n return self.driver.find_element_by_id('firstname')\n\n def your_address_lastname_input(self):\n return self.driver.find_element_by_id('lastname')\n\n def your_address_company(self):\n return self.driver.find_element_by_id('company')\n\n def your_address_address(self):\n return self.driver.find_element_by_id('address1')\n\n def your_address_city(self):\n return self.driver.find_element_by_id('city')\n\n def your_address_state(self):\n return self.driver.find_element_by_id('id_state')\n\n def your_address_postcode(self):\n return self.driver.find_element_by_id('postcode')\n\n def your_address_country(self):\n return self.driver.find_element_by_id('id_country')\n\n def your_address_phone(self):\n return self.driver.find_element_by_id('phone')\n\n def your_address_phone_mobile(self, mobile):\n return self.driver.find_element_by_id('phone_mobile')\n\n def your_address_alias(self):\n return self.driver.find_element_by_id('alias')\n\n def registration_button(self):\n return self.driver.find_element_by_id('submitAccount')", "id": "8336670", "language": "Python", "matching_score": 4.021590709686279, "max_stars_count": 0, "path": "testcase/locators/registration_page_locators.py" }, { "content": "from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom testcase.locators.base_page_locators import BasePageLocators\n\n\nclass SignInPageLocators(BasePageLocators):\n def email_register_input(self):\n return self.driver.find_element_by_id(\"email_create\")\n\n def register_button(self):\n return self.driver.find_element_by_id(\"SubmitCreate\")\n\n def register_error_message(self):\n return WebDriverWait(self.driver, 5).until(\n EC.visibility_of_element_located((By.ID, \"create_account_error\"))\n )\n\n def creation_form(self):\n return WebDriverWait(self.driver, 5).until(\n EC.visibility_of_element_located((By.ID, \"account-creation_form\"))\n )\n\n def sign_in_email_input(self):\n return self.driver.find_element_by_id('email')\n\n def sign_in_pass_input(self):\n return self.driver.find_element_by_id('passwd')\n\n def sign_in_button(self):\n return self.driver.find_element_by_id('SubmitLogin')\n\n def alert_message(self):\n return self.driver.find_element_by_css_selector('.alert-danger>ol>li')\n\n def forgot_your_pass(self):\n return self.driver.find_element_by_css_selector('.lost_password>a')", "id": "1089590", "language": "Python", "matching_score": 3.585747718811035, "max_stars_count": 0, "path": "testcase/locators/sign_in_locators.py" }, { "content": "from testcase.locators.sign_in_locators import SignInPageLocators\nfrom testcase.pages.base_page import BasePage\n\n\nclass SignInPage(BasePage):\n def __init__(self, driver):\n super().__init__(driver)\n self.locators = SignInPageLocators(self.driver)\n\n def input_email_in_register(self, email):\n element = self.locators.email_register_input()\n element.send_keys(email)\n\n def click_register_button(self):\n element = self.locators.register_button()\n element.click()\n\n def register_error_message(self):\n element = self.locators.register_error_message()\n return element.text\n\n def input_email_in_signin(self, email):\n element = self.locators.sign_in_email_input()\n element.send_keys(email)\n\n def input_pass_in_signin(self, password):\n element = self.locators.sign_in_pass_input()\n element.send_keys(password)\n\n def click_login_button(self):\n element = self.locators.sign_in_button()\n element.click()\n\n def alert_email_missing_message(self):\n element = self.locators.alert_message()\n return element.text\n\n def forgot_password_link(self):\n element = self.locators.forgot_your_pass()\n element.click()", "id": "3187384", "language": "Python", "matching_score": 1.914301872253418, "max_stars_count": 0, "path": "testcase/pages/sign_in_page.py" }, { "content": "from selenium.webdriver.support.select import Select\n\nfrom testcase.locators.contact_us_page_locators import ContactUsPageLocators\nfrom testcase.pages.base_page import BasePage\n\n\nclass ContactUsPage(BasePage):\n def __init__(self, driver):\n super().__init__(driver)\n self.locators = ContactUsPageLocators(self.driver)\n\n def subject_heading_dropdown(self, subject):\n select_element = self.locators.subject_heading_dropdown()\n element = Select(select_element)\n element.select_by_visible_text(subject)\n\n def email_address_input(self, email):\n element = self.locators.email_address_input()\n element.send_keys(email)\n\n def order_reference_input(self, reference):\n element = self.locators.order_reference_input()\n element.send_keys(reference)\n\n def message_input_contact_us(self, message):\n element = self.locators.message_input_contactus()\n element.send_keys(message)\n\n def click_file_Upload_button(self):\n element = self.locators.file_upload_button()\n element.click()\n\n def click_send_button(self):\n element = self.locators.send_button()\n element.click()\n\n def alert_message_succes(self):\n element = self.locators.alert_success_message()\n return element.text\n\n def alert_message_missing(self):\n element = self.locators.alert_message_no_email()\n return element.text\n", "id": "5605578", "language": "Python", "matching_score": 4.243138313293457, "max_stars_count": 0, "path": "testcase/pages/contact_us_page.py" }, { "content": "from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom testcase.locators.base_page_locators import BasePageLocators\n\n\nclass ContactUsPageLocators(BasePageLocators):\n def subject_heading_dropdown(self):\n return self.driver.find_element_by_id('id_contact')\n\n def email_address_input(self):\n return self.driver.find_element_by_id('email')\n\n def order_reference_input(self):\n return self.driver.find_element_by_id('id_order')\n\n def message_input_contactus(self):\n return self.driver.find_element_by_id('message')\n\n def file_upload_button(self):\n return self.driver.find_element_by_id('fileUpload')\n\n def send_button(self):\n return self.driver.find_element_by_id('submitMessage')\n\n def alert_success_message(self):\n return self.driver.find_element_by_css_selector('.center_column>.alert-success')\n\n def alert_message_no_email(self):\n return WebDriverWait(self.driver, 5).until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, '.alert-danger>ol>li'))\n )", "id": "10187154", "language": "Python", "matching_score": 4.233066082000732, "max_stars_count": 0, "path": "testcase/locators/contact_us_page_locators.py" }, { "content": "from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom testcase.locators.base_page_locators import BasePageLocators\n\n\nclass SearchPageLocators(BasePageLocators):\n def first_result_product(self):\n return WebDriverWait(self.driver, 5).until(\n EC.visibility_of_element_located((\n By.CSS_SELECTOR,\n \".right-block>h5:first-child>a\"\n ))\n )\n\n def invalid_search_message(self):\n return self.driver.find_element_by_css_selector('.alert-warning')", "id": "11349627", "language": "Python", "matching_score": 3.9623262882232666, "max_stars_count": 0, "path": "testcase/locators/search_page_locators.py" }, { "content": "from selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\nclass BasePageLocators(object):\n def __init__(self, driver):\n self.driver = driver\n\n\ndef wait_for_element_to_be_visible(driver, by, selector):\n return WebDriverWait(driver, 5).until(EC.visibility_of_element_located((by, selector)))", "id": "12040895", "language": "Python", "matching_score": 2.4498963356018066, "max_stars_count": 0, "path": "testcase/locators/base_page_locators.py" }, { "content": "from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom testcase.locators.base_page_locators import BasePageLocators\n\n\nclass CartPageLocators(BasePageLocators):\n def heading_counter_number_of_items(self):\n return self.driver.find_element_by_css_selector('.heading-counter')\n\n def titles_of_items_in_cart(self):\n return self.driver.find_elements_by_css_selector('.cart_description>.product-name>a')\n\n def nth_recycle_bin(self, n):\n\n return self.driver.find_element_by_css_selector(\n \"tbody>.cart_item:nth-child(\" + str(n) + \")>.cart_delete>div>a\"\n )\n\n def alert_no_product_in_cart(self):\n return WebDriverWait(self.driver, 5).until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, 'p.alert-warning'))\n )", "id": "2658745", "language": "Python", "matching_score": 3.6097939014434814, "max_stars_count": 0, "path": "testcase/locators/cart_page_locators.py" }, { "content": "from testcase.locators.cart_page_locators import CartPageLocators\nfrom testcase.pages.base_page import BasePage\n\n\nclass CartPage(BasePage):\n def __init__(self, driver):\n super().__init__(driver)\n self.locators = CartPageLocators(self.driver)\n\n def heading_counter_number_of_items(self):\n element = self.locators.heading_counter_number_of_items()\n return element.text\n\n def cart_product_names(self):\n elements = self.locators.titles_of_items_in_cart()\n list_of_titles = []\n for elem in elements:\n list_of_titles.append(elem.text)\n return list_of_titles\n\n def click_of_nth_recycle_bin(self, n):\n element = self.locators.nth_recycle_bin(n)\n element.click()\n\n def message_no_product_in_cart(self):\n element = self.locators.alert_no_product_in_cart()\n return element.text", "id": "6877841", "language": "Python", "matching_score": 1.4184192419052124, "max_stars_count": 0, "path": "testcase/pages/cart_page.py" }, { "content": "from testcase.locators.category_page_locators import CategoryPageLocators\nfrom testcase.pages.base_page import BasePage\n\n\nclass CategoryProductsPage(BasePage):\n def __init__(self, driver):\n super().__init__(driver)\n self.locators = CategoryPageLocators(self.driver)\n\n def products_list(self):\n elements = self.locators.products()\n products_list = []\n for elem in elements:\n products_list.append(elem.text)\n\n return products_list\n", "id": "11046978", "language": "Python", "matching_score": 1.433323860168457, "max_stars_count": 0, "path": "testcase/pages/category_products_page.py" }, { "content": "from testcase.locators.base_page_locators import BasePageLocators\n\n\nclass CategoryPageLocators(BasePageLocators):\n def products(self):\n return self.driver.find_elements_by_css_selector('.product-container>div>h5>a.product-name')", "id": "4169636", "language": "Python", "matching_score": 1.352332592010498, "max_stars_count": 0, "path": "testcase/locators/category_page_locators.py" }, { "content": "from testcase.locators.base_page_locators import BasePageLocators\n\n\nclass ForgotYourPassLocators(BasePageLocators):\n def forgot_your_pass_message(self):\n return self.driver.find_element_by_css_selector('.box>p')", "id": "7347787", "language": "Python", "matching_score": 3.364150285720825, "max_stars_count": 0, "path": "testcase/locators/forgot_your_pass_locators.py" }, { "content": "from testcase.locators.forgot_your_pass_locators import ForgotYourPassLocators\nfrom testcase.pages.base_page import BasePage\n\n\nclass ForgotYourPassPage(BasePage):\n def __init__(self, driver):\n super().__init__(driver)\n self.locators = ForgotYourPassLocators(self.driver)\n\n def forgot_your_pass_message(self):\n element = self.locators.forgot_your_pass_message()\n return element.text", "id": "1122197", "language": "Python", "matching_score": 0.3354683816432953, "max_stars_count": 0, "path": "testcase/pages/forgot_your_pass_page.py" }, { "content": "from testcase.locators.base_page_locators import BasePageLocators\n\n\nclass MyAccountPageLocators(BasePageLocators):\n def registration_message(self):\n return self.driver.find_element_by_css_selector('#center_column>.info-account')", "id": "1314936", "language": "Python", "matching_score": 2.708345413208008, "max_stars_count": 0, "path": "testcase/locators/my_account_page_locators.py" }, { "content": "from testcase.locators.my_account_page_locators import MyAccountPageLocators\nfrom testcase.pages.base_page import BasePage\n\n\nclass MyAccountPage(BasePage):\n def __init__(self, driver):\n super().__init__(driver)\n self.locators = MyAccountPageLocators(self.driver)\n\n def registration_message(self):\n element = self.locators.registration_message()\n return element.text", "id": "12042545", "language": "Python", "matching_score": 1.6633574962615967, "max_stars_count": 0, "path": "testcase/pages/my_account_page.py" } ]
3.239342
LilyRose2798
[ { "content": "#!/usr/bin/env python3\nfrom pyjavaproperties import Properties\nfrom typing import Literal, Union\nfrom dataclasses import dataclass\nfrom zipfile import Path as ZipPath\nfrom pathlib import Path\nfrom argparse import ArgumentParser, Namespace\nfrom sys import exit\n\n@dataclass\nclass Args(Namespace):\n language_code: str\n output_path: Union[Path, None, Literal[False]]\n input_path: Path\n\ndef path_arg(path: str) -> Path:\n return Path(path)\n\ndef get_shaderpacks_path(input_path: Path) -> Path:\n if input_path.name == \"shaderpacks\":\n return input_path\n elif input_path.name == \".minecraft\":\n return Path(input_path, \"shaderpacks\")\n else:\n return Path(input_path, \".minecraft/shaderpacks\")\n\ndef get_properties(path: Path) -> Properties:\n properties = Properties()\n properties.load(path.open(\"r\"))\n return properties\n\n@dataclass\nclass ScreenProperty:\n screen_names: set\n options: set\n\ndef parse_screen_property(screen_property: str) -> ScreenProperty:\n parts = screen_property.split()\n screen_names = set()\n options = set()\n for part in parts:\n if part[0] == \"[\" and part[-1] == \"]\":\n screen_names.add(part[1:-1])\n elif not (part[0] == \"<\" and part[-1] == \">\"):\n options.add(part)\n return ScreenProperty(screen_names, options)\n\ndef get_shader_options_readable(shader_lang: str, shader_options_path: Path) -> str:\n shader_base_path = shader_options_path.with_suffix(\"\")\n\n if shader_base_path.is_dir():\n path_type = Path\n elif shader_base_path.is_file() and shader_base_path.suffix == \".zip\":\n path_type = ZipPath\n else:\n raise ValueError(f\"No shader for config [{shader_base_path.name}]\")\n \n try:\n shader_options_properties = get_properties(shader_options_path)\n except:\n raise ValueError(f\"No valid shader options file for shader [{shader_base_path.name}]\")\n\n try:\n shader_lang_properties = get_properties(path_type(shader_base_path, f\"shaders/lang/{shader_lang}.lang\"))\n except:\n shader_langs = ', '.join(lang_path.name.rstrip(\".lang\") for lang_path in path_type(shader_base_path, \"shaders/lang/\").iterdir() if lang_path.name.endswith(\".lang\"))\n raise ValueError(f\"No valid language file for [{shader_lang}] in shader [{shader_base_path.name}]\\nAvailable languages: {shader_langs}\")\n\n try:\n shader_properties = get_properties(path_type(shader_base_path, \"shaders/shaders.properties\"))\n except:\n raise ValueError(f\"No valid properties file for shader [{shader_base_path.name}]\")\n \n screen_properties: dict[str, ScreenProperty] = {prop.lstrip(\"screen.\"): parse_screen_property(val) for prop, val in shader_properties.getPropertyDict().items() if prop.startswith(\"screen.\")}\n screen_screen_names: dict[str, str] = {screen_name: prop for prop, val in screen_properties.items() for screen_name in val.screen_names}\n option_screen_names: dict[str, str] = {option: prop for prop, val in screen_properties.items() for option in val.options}\n\n def get_screen_path(screen_name: str) -> str:\n lang_screen_name: str = shader_lang_properties.getProperty(f\"screen.{screen_name}\")\n parent_screen_name = screen_screen_names.get(screen_name)\n return lang_screen_name if parent_screen_name is None else f\"{get_screen_path(parent_screen_name)} -> {lang_screen_name}\"\n\n return \"\".join(f\"{shader_lang_properties.getProperty(f'option.{prop}')} ({get_screen_path(option_screen_names[prop])}): {val}\\n\" for prop, val in shader_options_properties.getPropertyDict().items())\n\ndef main() -> None:\n parser = ArgumentParser(description=\"Parse Iris shader options files and output in a human readable format\")\n parser.add_argument(\"-l\", \"--lang\", metavar=\"language_code\", dest=\"language_code\", default=\"en_US\", help=\"The language code to use\")\n parser.add_argument(\"-o\", \"--output\", metavar=\"output_path\", dest=\"output_path\", nargs=\"?\", const=None, default=False, type=path_arg, help=\"The path to the output file or directory\")\n parser.add_argument(\"input_path\", type=path_arg, help=\"The path to the Iris shader options file or Minecraft installation directory\")\n args: Args = parser.parse_args()\n if args.input_path.is_dir():\n for shader_options_path in get_shaderpacks_path(args.input_path).glob(\"*.txt\"):\n try:\n output = get_shader_options_readable(args.language_code, shader_options_path)\n if args.output_path is False:\n print(f\"{shader_options_path.with_suffix('').name}\\n{output}\")\n elif args.output_path is None:\n shader_options_path.with_stem(f\"{shader_options_path.stem}_readable\").write_text(output)\n else:\n args.output_path.mkdir(parents=True, exist_ok=True)\n Path(args.output_path, f\"{shader_options_path.stem}_readable.txt\").write_text(output)\n except ValueError as err:\n print(err)\n elif args.input_path.is_file():\n try:\n output = get_shader_options_readable(args.language_code, args.input_path)\n if args.output_path is False:\n print(output)\n elif args.output_path is None:\n args.input_path.with_stem(f\"{args.input_path.stem}_readable\").write_text(output)\n else:\n args.output_path.write_text(output)\n except ValueError as err:\n print(err)\n exit(1)\n else:\n print(f\"Invalid input path [{args.input_path}]\")\n exit(1)\n\nif __name__ == \"__main__\":\n main()\n", "id": "6501480", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "iris_shader_options.py" } ]
0
opsbox-ecosystem
[ { "content": "#!/usr/bin/env python\n# coding=utf-8\n\nfrom ansible.module_utils.basic import *\n\n\ndef main():\n fields = {\n \"access_key\": {\"required\": True, \"type\": \"str\"},\n \"secret_key\": {\"required\": True, \"type\": \"str\" },\n \"object\": {\"required\": True, \"type\": \"str\"},\n \"options\": {\"default\": \"\", \"type\": \"str\" },\n \"endpoint\": {\"required\": True, \"type\": \"str\" },\n \"dest\": {\"required\": True, \"type\": \"str\" },\n \"executable\": {\"required\": False, \"default\": \"/usr/bin/ossutil\", \"type\": \"str\"}\n }\n\n module = AnsibleModule(argument_spec=fields)\n params = module.params\n ak = params[\"access_key\"]\n sk = params[\"secret_key\"]\n object_path = params[\"object\"]\n options = params[\"options\"]\n endpoint = params[\"endpoint\"]\n dest = params[\"dest\"]\n ossutil = params[\"executable\"]\n\n cmd = [ossutil, '-i', ak, '-k', sk, '-e', endpoint, 'cp', object_path, dest, options]\n rc, out, err = module.run_command(cmd)\n\n if rc:\n module.fail_json(msg=err, stdout=out)\n else:\n module.exit_json(changed=True, stdout=out, stderr=err)\n\n\nif __name__ == '__main__':\n main()\n", "id": "9577005", "language": "Python", "matching_score": 0, "max_stars_count": 6, "path": "steps/ansible/tool/roles/osspkg/library/ossutil_cp.py" } ]
0
Fonta1n3
[ { "content": "\"\"\"Tests for shas incl wrong types passed\"\"\"\nimport sys\nimport unittest\nfrom wallycore import *\n\nb2h = hex_from_bytes\nh2b = hex_to_bytes\n\nclass SHA_tests(unittest.TestCase):\n\n def test_sha256(self):\n self.assertEqual(b2h(sha256(\"This is a test message to hash\".encode())), \"726ca2c10e9d8b76e5b79f2961c3069a09fdd0a3b9bf8650e091e39b3c6c35be\")\n\n self.assertEqual(b2h(sha256(h2b(\"3e8379862d658e168c71f083bc05169b3b58ca3212e11c838b08629c5ca48a42\"))), \"2f7d292595788655c5288b6e1dc698440d9c12559e3bc1e3cc38005a4add132f\")\n\n\n def test_sha256d(self):\n self.assertEqual(b2h(sha256d(\"This is a test message to hash\".encode())), \"29e04e90a1075caaa06573ea701913148d99fb0b7d6928e33f1aabe6032761a0\")\n\n self.assertEqual(b2h(sha256d(h2b(\"3e8379862d658e168c71f083bc05169b3b58ca3212e11c838b08629c5ca48a42\"))), \"26e30f19dc2b29d8c220766fd5835d8256c87c32804d19b8307e21d6685c9d3e\")\n\n\n def test_sha512(self):\n self.assertEqual(b2h(sha512(\"This is a test message to hash\".encode())), \"2ed34644ddfcf76ca4de13e4632aa61376fbce813fecc5a043a479daaab17b2f8c3f376468d4637cb2e7c9e2b99ad08b8cb56fe6e724e476826f2aa210872c32\")\n\n self.assertEqual(b2h(sha512(h2b(\"3e8379862d658e168c71f083bc05169b3b58ca3212e11c838b08629c5ca48a42\"))), \"d51342efcb114c11045c12f7fede6f9a5fdb11051032bd520a99d79023423f4ac3ab706ce5fa88c0aac46bbbf15bde720cf49eae5be0def3b39e6d3abb29a67b\")\n\n\n def _test_wrong_types_py2(self):\n # Python2 implicitly converts/decodes\n self.assertEquals(b2h(sha256('not bytes')), \"b6cb5f25b258630497a18528fb8f73a64034e94e1ead857a8151e3f30a9835ae\")\n\n self.assertEquals(b2h(sha256d('not bytes')), \"878eb992aeb736646ecf2c76f562c5d411a487d62ac172d098a83afb023d1b53\")\n\n self.assertEquals(b2h(sha512('not bytes')), \"981e82b6ccc079c455cd3fd37b9e04f52f084ffb268a07c47b0447910e2d6280ccbaa5be3f8f062e3e284c98f52039bbddee150a06183ff8d9cb243ef35e3f57\")\n\n\n def _test_wrong_types_py3(self):\n # Python3 raises TypeError\n for shaX in [sha256, sha256d, sha512]:\n with self.assertRaises(TypeError):\n shaX('not bytes')\n\n\n def test_wrong_types(self):\n if sys.version_info.major < 3:\n # Python2 implicitly converts/decodes\n self._test_wrong_types_py2()\n else:\n # Python3 raises TypeError\n self._test_wrong_types_py3()\n\n\n def test_pass_none(self):\n self.assertEquals(b2h(sha256(None)), \"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\")\n self.assertEquals(b2h(sha256d(None)), \"5df6e0e2761359d30a8275058e299fcc0381534545f55cf43e41983f5d4c9456\")\n self.assertEquals(b2h(sha512(None)), \"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e\")\n\n\nif __name__ == '__main__':\n unittest.main()\n\n", "id": "5542393", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "bc-libwally-swift/CLibWally/libwally-core/src/swig_python/contrib/sha.py" }, { "content": "from flask import Flask, request as flaskRequest, jsonify, render_template\nimport requests\nimport json\nimport time\nfrom datetime import datetime, timedelta\nimport sqlite3\nimport sys\nimport ccxt\nimport os\nfrom threading import Thread\nfrom pathlib import Path \n\n#Config Settings\nallowedFields = [\"keepWeeks\", \"exchanges\", \"currencies\", \"interval\"]\nconfigPath = Path(\"/home/standup/.spotbit/spotbit.config\").expanduser()\n#Default values; these will be overwritten when the config file is read\nexchanges = []\ncurrencies = []\ninterval = 1 #time to wait between GET requests to servers, to avoid ratelimits\nkeepWeeks = 3 # add this to the config file\nexchange_limit = 2 #when there are more exchanges than this multithreading is ideal\nperformance_mode = False\n#Database\np = Path(\"/home/standup/.spotbit/sb.db\").expanduser()\ndb = sqlite3.connect(p)\nprint(\"db opened in {}\".format(p))\napp = Flask(__name__)\n\n# split up the number of exchanges per chunk based on how many cpu cores are available\n# cpuOffset: the number of cores you want to try and utilize. \ndef optimize_chunks(cpuOffset):\n return int(len(exchanges) / (os.cpu_count()-cpuOffset))\n\n# Create a dict that contains ccxt objects for every supported exchange. \n# The API will query a subset of these exchanges based on what the user has specified\n# Unsupported exchanges: bitvaro phemex vaultoro\n# Future Plans:\n# Hard coding supported exchanges is a bad practice. CCXT autogenerates code for each exchange and therefore at least in theory may frequently support new exchanges.\n# Need to find a way to automatically create a list of exchange objects. \n# btctradeim doesn't want to work on raspberry pi\ndef init_supported_exchanges():\n objects = { \"binance\":ccxt.binance(), \"bitfinex\":ccxt.bitfinex(), \"bitflyer\":ccxt.bitflyer(), \"bitstamp\":ccxt.bitstamp(), \"bittrex\":ccxt.bittrex(), \"coinbase\":ccxt.coinbase(), \"kraken\":ccxt.kraken(), \"poloniex\":ccxt.poloniex()}\n return objects\n\n# Check if a given exchange is in the list of supported exchanges.\n# Currently, the list of supported exchanges is all those supported by ccxt aside from a small handful that did not seem to work properly. May be bug in ccxt or just a typo in their code / docs\ndef is_supported(exchange):\n try:\n obj = ex_objs[exchange]\n if obj != None:\n return True\n else:\n return False\n except Exception as e:\n print(f\"caught an error: {e}\")\n return False\n\n# We create a list of all exchanges to do error checking on user input\nex_objs = init_supported_exchanges()\nnum_exchanges = len(ex_objs)\nprint(f\"created list of {num_exchanges}\")\n\n# TODO: create an html page to render here\[email protected]('/status')\ndef status():\n return \"server is running\"\n\n# configure the settings of Spotbit while the server is still running\n# send a GET request to this route to view current settings\n# send a POST request to this route with settings fields stored in JSON to update settings\n# TODO: make the updates persistant by also writing them to file.\[email protected]('/configure', methods=['GET', 'POST'])\ndef configure():\n # seems like this needs to be done in order to reference global vars inside of the flask server thread\n global keepWeeks\n global currencies\n global exchanges\n global interval\n if flaskRequest.method == 'POST':\n #return the config settings TODO: error check so that the user doesn't have to submit everything at once. Also implement a form here.\n keepWeeks = flaskRequest.json(\"keepWeeks\")\n exchanges = flaskRequest.json(\"exchanges\")\n currencies = flaskRequest.json(\"currencies\")\n interval = flaskRequest.json(\"interval\")\n return {'updated settings?':'yes', 'keepWeeks':keepWeeks, 'currencies':currencies, 'exchanges':exchanges, 'interval':interval}\n else:\n return {'updated settings?':'no', 'keepWeeks':keepWeeks, 'currencies':currencies, 'exchanges':exchanges, 'interval':interval}\n \n\n# Get the latest price entry in the database.\n# Currency: the three letter base currency desired. Must be a currency you are already collecting data for\n# Exchange: the exchange to query data for from the local database. Must be an exchange you are already caching data for (for now)\[email protected]('/now/<currency>/<exchange>')\ndef now(currency, exchange):\n db_n = sqlite3.connect(p)\n if exchange in exchanges:\n #if the exchange is already in the config file\n ticker = \"BTC-{}\".format(currency.upper())\n #statement = \"SELECT * FROM {} WHERE pair = '{}' AND timestamp = (SELECT MAX(timestamp) FROM {});\".format(exchange, ticker, exchange)\n statement = \"SELECT * FROM {} WHERE pair = '{}' ORDER BY timestamp DESC LIMIT 1;\".format(exchange, ticker)\n cursor = db_n.execute(statement)\n res = cursor.fetchone()\n if res != None:\n db_n.close()\n return {'id':res[0], 'timestamp':res[1], 'datetime':res[2], 'currency_pair':res[3], 'open':res[4], 'high':res[5], 'low':res[6], 'close':res[7], 'vol':res[8]} \n else:\n db_n.close()\n return {'id': res}\n else:\n #make a direct request\n res = request_single(exchange, currency)\n db_n.close()\n if res != None:\n return res\n else:\n return {'id': res}\n\n# Get data from local storage inside of a certain range.\n# Parameters: \n# Currency: the fiat base currency to fetch data for. Should be a three letter currency code in lowercase.\n# Exchange: the exchange to get data from.\n# date_start and date_end: date_start is the oldest time value in the range desired. It can be provided as a millisecond timestamp or as a datetime formatted as \"YYYY-MM-DDTHH:mm:SS\".\[email protected]('/hist/<currency>/<exchange>/<date_start>/<date_end>', methods=['GET'])\ndef hist(currency, exchange, date_start, date_end):\n db_n = sqlite3.connect(p)\n #check what format of dates we have\n if (str(date_start)).isdigit():\n date_s = int(date_start) \n date_e = int(date_end) \n else:\n #error checking for malformed dates\n try:\n date_s = (datetime.fromisoformat(date_start.replace(\"T\", \" \"))).timestamp()*1000\n date_e = (datetime.fromisoformat(date_end.replace(\"T\", \" \"))).timestamp()*1000\n except Exception:\n return \"malformed dates. Use YYYY-MM-DDTHH:mm:SS or millisecond timestamps. Provide both dates in the same format\"\n statement = \"SELECT * FROM {} WHERE timestamp > {} AND timestamp < {};\".format(exchange, date_s, date_e)\n cursor = db_n.execute(statement)\n res = cursor.fetchall()\n db_n.close()\n return {'columns': ['id', 'timestamp', 'datetime', 'currency_pair', 'open', 'high', 'low', 'close', 'close', 'vol'], 'data':res}\n\n\n# Make a single request, without having to loop through all exchanges and currency pairs.\n# This is intended for when the user requests an exchange in /now that is not present in the database.\n# It will probably not be used for /hist because of the length of time getting arbitrary amounts of historical data can be\ndef request_single(exchange, currency):\n if not is_supported(exchange):\n return \"{} is not supported by CCXT\".format(exchange)\n obj = ex_objs[exchange]\n ticker = \"BTC/{}\".format(currency.upper())\n if obj.has['fetchOHLCV']:\n result = None\n if exchange == \"bitfinex\": #other exchanges requiring special conditions: bitstamp, bitmart\n params = {'limit':100, 'start':(round((datetime.now()-timedelta(hours=1)).timestamp()*1000)), 'end':round(datetime.now().timestamp()*1000)}\n try:\n result = ex_objs[exchange].fetch_ohlcv(symbol=ticker, timeframe='1m', since=None, params=params)\n except Exception as e:\n print(f\"got an error requesting to {exchange}: {e}\")\n else:\n try:\n result = obj.fetch_ohlcv(ticker, timeframe='1m')\n except Exception as e:\n print(f\"got an error requesting to {exchange}: {e}\")\n else:\n try:\n result = obj.fetch_ticker(ticker)\n except Exception as e:\n print(\"got ratelimited on {}\".format(e))\n return {'data': result[-1]}\n\n\n# Make an HTTP GET request to exchanges via the ccxt API\n# TODO: add error checking for if an exchange supports ohlc data. If not, default to regular price data. (done)\n# Loop through all chosen exchanges, check if they are supported, loop through all chosen currencies, for each make request to ohlc endpoint if supported, else price ticker. Write data to local storage.\n# Bitfinex special rule: bitfinex returns candles from the beginning of time, not the most recent. This is a behavior of the API itself and has nothing to do with this code or ccxt. Therefore we must specify the timeframe desired in the optional params field of the function call with a dictionary of available options.\ndef request(exchanges,interval,db_n):\n global currencies\n for e in exchanges:\n for curr in currencies:\n ticker = \"BTC/{}\".format(curr)\n success = True\n if ex_objs[e].has['fetchOHLCV']:\n candle = None\n if e == \"bitfinex\":\n params = {'limit':100, 'start':(round((datetime.now()-timedelta(hours=1)).timestamp()*1000)), 'end':round(datetime.now().timestamp()*1000)}\n try:\n candle = ex_objs[e].fetch_ohlcv(symbol=ticker, timeframe='1m', since=None, params=params)\n except Exception as err: #figure out this error type\n #the point so far is to gracefully handle the error, but waiting for the next cycle should be good enough\n print(f\"error fetching candle (bitfinex): {err}\")\n success = False\n else:\n try:\n candle = ex_objs[e].fetch_ohlcv(ticker, '1m') #'ticker' was listed as 'symbol' before | interval should be determined in the config file \n except Exception as err:\n print(f\"error fetching candle: {err}\")\n success = False\n if success:\n for line in candle:\n ts = datetime.fromtimestamp(line[0]/1e3) #check here if we have a ms timestamp or not\n for l in line:\n if l == None:\n l = 0\n #this is another error check condition for when null values slip into the data.\n statement = \"INSERT INTO {} (timestamp, datetime, pair, open, high, low, close, volume) VALUES ({}, '{}', '{}', {}, {}, {}, {}, {});\".format(e, line[0], ts, ticker.replace(\"/\", \"-\"), line[1], line[2], line[3], line[4], line[5])\n try:\n db_n.execute(statement)\n db_n.commit()\n except sqlite3.OperationalError as op:\n nulls = []\n c = 0\n # identify where the null value is \n for l in line:\n if l == None:\n nulls.append(c)\n c += 1\n print(f\"exchange: {e} currency: {curr}\\nsql statement: {statement}\\nerror: {op}(moving on)\")\n candle_len = len(candle)\n print(f\"inserted into {e} {curr} {candle_len} times\")\n else:\n try:\n price = ex_objs[e].fetch_ticker(ticker)\n except Exception as err:\n print(f\"error fetching ticker: {err}\")\n success = False\n if success:\n ts = None\n if str(price['timestamp'])[-3:] == \"000\":\n ts = datetime.fromtimestamp(price['timestamp']/1e3)\n else:\n ts = datetime.fromtimestamp(price['timestamp'])\n statement = \"INSERT INTO {} (timestamp, datetime, pair, open, high, low, close, volume) VALUES ({}, '{}', '{}', {}, {}, {}, {}, {});\".format(e, price['timestamp'], ts, ticker.replace(\"/\", \"-\"), 0.0, 0.0, 0.0, price['last'], 0.0)\n db_n.execute(statement)\n db_n.commit()\n print(f\"inserted into {e} {curr}\")\n time.sleep(interval)\n\n# Thread method. Makes requests every interval seconds. \n# Adding this method here to make request more versatile while maintaining the same behavior\ndef request_periodically(exchanges, interval):\n db_n = sqlite3.connect(p)\n while True:\n request(exchanges,interval,db_n)\n\n# Split the list of exchanges into chunks up to size chunk_size.\n# Create a thread for each chunk and start it, then add the thread to a list.\n# Return a list of tuples that contain the list of whats in each chunk and a list of the actual thread objects.\ndef request_fast(exchanges,interval, chunk_size):\n count = 0\n chunks = []\n threads = []\n current_chunk = []\n # split up the list of exchanges\n for e in exchanges:\n if count < chunk_size:\n current_chunk.append(e)\n count += 1\n else:\n count = 0\n chunks.append(current_chunk)\n current_chunk = []\n # Start a thread for each chunk\n for chunk in chunks:\n print(f\"creating thread for chunk {chunk}\")\n cThread = Thread(target=request_periodically, args=(chunk,interval))\n cThread.start()\n threads.append(cThread)\n return (chunks, threads)\n\n# Read the values stored in the config file and store them in memory.\n# Run during install and at every run of the server.\n# Returns void\ndef read_config():\n global exchanges\n global interval\n global performance_mode\n with open(configPath, \"r\") as f:\n lines = f.readlines()\n #read each line in the file\n for line in lines:\n #split the current line\n setting_line = line.split(\"=\")\n #if there are invalid lines in the file ignore them\n if \"#\" in setting_line[0]:\n pass #ignore comments\n elif setting_line[0] not in allowedFields and \"#\" not in setting_line[0]:\n print(f\"invalid config setting {setting_line[0]}\")\n elif setting_line[0] == \"keepWeeks\":\n try:\n keepWeeks = int(setting_line[1])\n except Exception as e:\n print(f\"could not read keepWeeks field. Using default setting of {keepWeeks} weeks. Error: {e}\")\n elif setting_line[0] == \"exchanges\":\n exs = setting_line[1].split(\" \")\n for e in exs:\n e = e.replace(\"\\n\", \"\")\n if e == \"all\":\n exchanges = list(ex_objs.keys())\n break\n if e not in exchanges and is_supported(e) == True:\n exchanges.append(e)\n else:\n print(f\"{e} is not supported by ccxt!\")\n elif setting_line[0] == \"currencies\":\n currs = setting_line[1].split(\" \")\n for c in currs:\n #need to make sure currency codes are all caps and have newlines dropped off\n c_formatted = (c.replace(\"\\n\", \"\")).upper()\n if c_formatted not in currencies:\n if \"\\n\" in c:\n currencies.append(c_formatted)\n else:\n currencies.append(c_formatted)\n elif setting_line[0] == \"interval\":\n interval = int(setting_line[1])\n else:\n return\n #print statement for debugging\n len_exchanges = len(exchanges)\n if len_exchanges > exchange_limit:\n print(f\"{len_exchanges} exchanges detected. Using performance mode (multithreading)\")\n performance_mode = True\n\n print(f\" Settings read:\\n keepWeeks: {keepWeeks}\\n exchanges: {exchanges}\\n currencies: {currencies}\\n interval: {interval}\")\n\n# This method is called at the first run.\n# It sets up the required tables inside of a local sqlite3 database. There is one table for each exchange.\n# Tables are only created if they do not already exist. Install will attempt to create tables for every listed exchange at once when called.\ndef install():\n read_config()\n #create the sqlite db\n len_exchanges = len(exchanges)\n print(f\"creating tables for {len_exchanges} exchanges if they do not exist already.\")\n for exchange in exchanges:\n sql = f\"CREATE TABLE IF NOT EXISTS {exchange} (id INTEGER PRIMARY KEY AUTOINCREMENT, timestamp INTEGER, datetime TEXT, pair TEXT, open REAL, high REAL, low REAL, close REAL, volume REAL)\"\n print(f\"created table for {exchange}\")\n db.execute(sql)\n db.commit()\n db.close()\n\n# Remove every entry older than now-keepWeeks from all tables in the database\n# if there is nothing to prune then nothing will be pruned.\ndef prune(keepWeeks):\n # prune checks will run continuously and check every 60k seconds right now.\n db_n = sqlite3.connect(p)\n while True:\n for exchange in exchanges:\n #count = ((db.execute(\"SELECT Count(*) FROM {}\".format(exchange))).fetchone())[0]\n cutoff = (datetime.now()-timedelta(weeks=keepWeeks)).timestamp()*1000\n statement = \"DELETE FROM {} WHERE timestamp < {};\".format(exchange, cutoff)\n db_n.execute(statement)\n db_n.commit()\n time.sleep(60000)\n \n\nif __name__ == \"__main__\":\n install() #install will call read_config\n chunk_size = optimize_chunks(cpuOffset=0)\n threadResults = None\n # spin up many threads if there is a lot of exchanges present in the config file\n if performance_mode:\n # request_fast will create and start the threads automatically\n threadResults = request_fast(exchanges, interval, chunk_size) \n else:\n print(\"performance mode is OFF\")\n prices_thread = Thread(target=request_periodically, args=(exchanges,interval))\n prices_thread.start()\n pruning_thread = Thread(target=prune, args=[keepWeeks])\n pruning_thread.start()\n app.run()\n db.close()\n\n\n", "id": "10479125", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "server.py" } ]
0
merryHunter
[ { "content": "import cv2\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nimport params\nimport matplotlib.pyplot as plt\nimport PIL\nimport os\nimport eval_segm as metric\n\ninput_size = params.input_size\nbatch_size = params.batch_size\norig_width = params.orig_width\norig_height = params.orig_height\nthreshold = params.threshold\nmodel = params.model_factory()\n\ndf_test = None #pd.read_csv('input/sample_submission.csv')\nids_test = None #df_test['img'].map(lambda s: s.split('.')[0])\n\nOUTPUT_DIR = 'output/'\nN_CLASSES = 2\n\ndef get_test_mask_paths(filename):\n path_set = []\n with open(filename, \"r\") as f:\n lines = f.readlines()\n for l in lines:\n if l != '\\n':\n l = l.split(' ')\n path_set.append((l[0],l[1].strip()))\n im = cv2.imread(l[1],cv2.IMREAD_GRAYSCALE )\n\n return path_set\n\nnames = get_test_mask_paths(\"train.txt\")\n# for id in ids_test:\n# names.append('{}.jpg'.format(id))\n\n\n# https://www.kaggle.com/stainsby/fast-tested-rle\ndef run_length_encode(mask):\n '''\n img: numpy array, 1 - mask, 0 - background\n Returns run length as string formated\n '''\n inds = mask.flatten()\n runs = np.where(inds[1:] != inds[:-1])[0] + 2\n runs[1::2] = runs[1::2] - runs[:-1:2]\n rle = ' '.join([str(r) for r in runs])\n return rle\n\n\nrles = []\n\nmodel.load_weights(filepath='weights/best_weights.hdf5')\n\n\ndef read_mask_image(mask_img):\n mask_img[mask_img == False] = 0\n mask_img[mask_img == True] = 1\n return mask_img\n\nprint('Predicting on {} samples with batch_size = {}...'.format(len(names), batch_size))\n\n\ndef visualize(img_orig, mask_pred, mask_orig):\n mask_pred = get_transparent_prediction(img_orig, mask_pred)\n plt.subplot(131)\n plt.imshow(img_orig)\n plt.subplot(132)\n plt.imshow(mask_orig)\n plt.subplot(133)\n plt.imshow(mask_pred)\n plt.show()\n\n\ndef get_transparent_prediction(img_orig, mask_pred, alpha=0.5, orig=True):\n output = img_orig.copy()\n if orig:\n image = mask_pred\n else:\n image = np.zeros((orig_height, orig_width, 3), dtype=\"uint8\")\n image[np.where((mask_pred != [0, 0, 0]).all(axis=2))] = [255, 0, 0]\n\n overlay = image.copy()\n cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)\n output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)\n return output\n\npixel_accuracy = []\n\n\ndef get_pred_orig_grayscale(pred, orig):\n image_pred = np.zeros((orig_height, orig_width), dtype=\"uint8\")\n image_pred[np.where((pred != [0, 0, 0]).all(axis=2))] = np.ones((1, 1), dtype=\"uint8\")\n\n # select indexes where green in intensive so it's a mask\n mask = orig[:,:,1] > 230\n image_orig = np.zeros((orig_height, orig_width, 3), dtype=\"uint8\")\n image_orig[mask] = np.ones((1, 1, 1), dtype=\"uint8\")\n\n # get back to 2-d array\n image_orig = image_orig[:,:,0]\n\n return image_pred, image_orig\n\n\nfor start in tqdm(range(0, len(names), batch_size)):\n x_batch = []\n end = min(start + batch_size, len(names))\n ids_test_batch = names[start:end]\n ns = []\n for name in ids_test_batch:\n ns.append((name[0],name[1]))\n img = cv2.imread(name[0])\n img = cv2.resize(img, (input_size, input_size))\n x_batch.append(img)\n x_batch = np.array(x_batch, np.float32) / 255\n preds = model.predict_on_batch(x_batch)\n preds = np.squeeze(preds, axis=3)\n plt.figure(figsize=(20, 20))\n i = 0\n\n for pred in preds:\n print ns[i][0]\n img_name = (ns[i][0].split('/')[-1]).split('.')[0] + '/'\n cur_dir = OUTPUT_DIR + img_name\n os.mkdir(OUTPUT_DIR + img_name)\n prob = cv2.resize(pred, (orig_width, orig_height))\n orig_img = cv2.imread(ns[i][0])\n cv2.imwrite('temp.jpg', prob)\n p = cv2.imread('temp.jpg')\n mask_orig = cv2.imread(ns[i][1])\n\n # visualize(orig_img, 255 * prob, mask_orig)\n\n # get and write transparent mask for predicted image\n output_pred = get_transparent_prediction(orig_img, p,alpha=0.5,orig=False)\n cv2.imwrite(cur_dir + str(i) + '_' + ns[i][1].split('/')[-1], output_pred)\n\n # get and write transparent mask for ground truth\n output = get_transparent_prediction(orig_img, mask_orig,alpha=0.5,orig=True)\n cv2.imwrite(cur_dir + 'orig_' + ns[i][1].split('/')[-1], output)\n pred, orig = get_pred_orig_grayscale(p, mask_orig)\n cv2.imwrite(cur_dir + 'mask_pred_' + ns[i][1].split('/')[-1],255 * pred)\n cv2.imwrite(cur_dir + 'mask_orig_' + ns[i][1].split('/')[-1],255 * orig)\n iou = metric.pixel_accuracy(pred, orig)\n if iou != -1:\n pixel_accuracy.append(iou)\n print \"MEAN accuracy: {0}\".format(iou)\n\n # for original submission\n mask = prob > threshold\n rle = run_length_encode(mask)\n rles.append(rle)\n i += 1\n\nprint \"Average pixel accuracy: {0}\".format(np.sum(pixel_accuracy) / len(pixel_accuracy))\nprint \"number of defected images: {0}\".format(len(pixel_accuracy))\n\nprint(\"Generating submission file...\") \ndf = pd.DataFrame({'img': names, 'rle_mask': rles})\ndf.to_csv('submit/submission.csv.gz', index=False, compression='gzip')\n", "id": "675364", "language": "Python", "matching_score": 4.624868392944336, "max_stars_count": 2, "path": "eval.py" }, { "content": "import cv2\nimport numpy as np\nimport os\nimport eval_segm as metric\nimport matplotlib.pyplot as plt\n\n\ndef visualize(img_orig, mask_pred, mask_orig):\n mask_pred = get_transparent_prediction(img_orig, mask_pred)\n plt.subplot(131)\n plt.imshow(img_orig)\n plt.subplot(132)\n plt.imshow(mask_orig)\n plt.subplot(133)\n plt.imshow(mask_pred)\n plt.show()\n\n\ndef get_transparent_prediction(img_orig, mask_pred, alpha=0.5, orig=True):\n output = img_orig.copy()\n if orig:\n image = mask_pred\n else:\n image = np.zeros((orig_height, orig_width, 3), dtype=\"uint8\")\n image[np.where((mask_pred != [0, 0, 0]).all(axis=2))] = [255, 0, 0]\n\n overlay = image.copy()\n cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)\n output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)\n return output\n\n\ndef get_pred_orig_grayscale(pred, orig):\n image_pred = np.zeros((orig_height, orig_width), dtype=\"uint8\")\n image_pred[np.where((pred != [0, 0, 0]).all(axis=2))] = np.ones((1, 1), dtype=\"uint8\")\n\n # select indexes where green in intensive so it's a mask\n mask = orig[:, :, 1] > 230\n image_orig = np.zeros((orig_height, orig_width, 3), dtype=\"uint8\")\n image_orig[mask] = np.ones((1, 1, 1), dtype=\"uint8\")\n\n # get back to 2-d array\n image_orig = image_orig[:, :, 0]\n\n return image_pred, image_orig\n\n\ndef get_iou(prediction, ns, i, write_pred=True):\n print ns[i][0]\n img_name = (ns[i][0].split('/')[-1]).split('.')[0] + '/'\n cur_dir = OUTPUT_DIR + img_name\n os.mkdir(OUTPUT_DIR + img_name)\n prob = cv2.resize(prediction, (orig_width, orig_height))\n orig_img = cv2.imread(ns[i][0])\n cv2.imwrite('temp.jpg', prob)\n p = cv2.imread('temp.jpg')\n mask_orig = cv2.imread(ns[i][1])\n # visualize(orig_img, 255 * prob, mask_orig)\n pred, orig = get_pred_orig_grayscale(p, mask_orig)\n\n if write_pred:\n # get and write transparent mask for predicted image\n output_pred = get_transparent_prediction(orig_img, p, alpha=0.5, orig=False)\n cv2.imwrite(cur_dir + str(i) + '_' + ns[i][1].split('/')[-1], output_pred)\n\n # get and write transparent mask for ground truth\n output = get_transparent_prediction(orig_img, mask_orig, alpha=0.5, orig=True)\n cv2.imwrite(cur_dir + 'orig_' + ns[i][1].split('/')[-1], output)\n cv2.imwrite(cur_dir + 'mask_pred_' + ns[i][1].split('/')[-1], 255 * pred)\n cv2.imwrite(cur_dir + 'mask_orig_' + ns[i][1].split('/')[-1], 255 * orig)\n\n return metric.pixel_accuracy(pred, orig)\n", "id": "12440827", "language": "Python", "matching_score": 1.3681020736694336, "max_stars_count": 2, "path": "util.py" }, { "content": "from model.u_net import get_unet_128, get_unet_256, get_unet_512, get_unet_1024\n\ninput_size = 128\n\nmax_epochs = 100\nbatch_size = 2\n\n# orig_width = 1918\n# orig_height = 1280\norig_width = 735\norig_height = 500\n\nthreshold = 0.5\n\nmodel_factory = get_unet_128\n", "id": "3682689", "language": "Python", "matching_score": 0.4031384289264679, "max_stars_count": 2, "path": "params.py" }, { "content": "import os\nimport cv2\n\nwith open(\"train.txt\",\"w\") as out:\n base = \"/home/ivan/projects/internship_addfor/nets/KittiSeg/data_tooth/\"\n for f in os.listdir(\"data_tooth/\"):\n if \"_mask\" not in f:\n out.write(base + f + \" \" + base + f.split('.')[0] + '_mask.jpg')\n out.write('\\n')\n", "id": "6674250", "language": "Python", "matching_score": 0.1167958527803421, "max_stars_count": 2, "path": "make_image_sets.py" } ]
0.88562
yagoocarvalho
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 7 14:48:57 2018\n\n@author: BRC\n\"\"\"\n\nimport os\nimport DBHelperFunctions as aux\n\n\n\n###################################################################################\n######################## DB INITIALIZATION FUNCTIONS ##############################\n###################################################################################\n\n\n#Le o CSV e cria o arquivo do BD de Heap\ndef CreateHeapBD(csvFilePath):\n #Lê do CSV e preenche os registros com enchimento para criar o tamanho fixo\n valuesToLoad = aux.PadRegistries(aux.ReadFromFile(csvFilePath))\n \n #apaga o conteúdo existente no momento(se houver)\n if os.path.exists(aux.HeapPath):\n os.remove(aux.HeapPath)\n \n #make HEAD File\n aux.MakeHEAD(aux.HeapHeadPath, \"Heap\", 0)\n #preenche os valores direto no arquivo\n #file = open(aux.HeapPath, \"w+\")\n #file.write(aux.MakeHEADString(\"HEAP\"))\n #file.close()\n \n registryCounter = 0\n #inserimos valor a valor com a função de inserção da Heap\n for row in valuesToLoad:\n HeapInsertSingleRecord(row)\n registryCounter +=1\n \n aux.UpdateHEADFile(aux.HeapHeadPath, \"HEAP\", registryCounter)\n \n\n\n\n\n\n\n###################################################################################\n##################################### HEAP ########################################\n###################################################################################\n\n###################################################################################\n############################ HEAP - SELECT FUNCTIONS ##############################\n###################################################################################\n\n#colName = Desired column of the query (SEE LISTS ABOVE FOR COL NAMES)\n#value = desired value\n#SQL Format: Select * from HeapTable WHERE colName = value\n#singleRecordSelection = Retorna o PRIMEIRO registro onde 'colName' = à value se True\ndef HeapSelectRecord(colName, value, singleRecordSelection = False, valueIsArray = False, secondColName = \"\", secondValue = \"\"):\n numberOfBlocksUsed = 0 #conta o número de vezes que \"acessamos a memória do disco\"\n registryFound = False\n endOfFile = False\n \n values = \"\"\n if valueIsArray:\n for val in value:\n values+= val + \", \"\n values = values[:len(values)-2]#tira ultima ', '\n \n if colName not in aux.colHeadersList:\n print(\"Error: Column name not found in relation.\")\n return\n columnIndex = aux.colHeadersList.index(colName) #pega o indice referente àquela coluna\n secondValuePresent = False\n\n secondColumnIndex = -1\n if secondColName != \"\" and secondValue != \"\":\n if secondColName not in aux.colHeadersList:\n print(\"Error: Second column name not found in relation\")\n return\n secondColumnIndex = aux.colHeadersList.index(secondColName)\n secondValuePresent = True\n\n print(\"\\nRunning query: \")\n if singleRecordSelection:\n if valueIsArray:\n print(\"\\nSELECT * FROM TB_HEAP WHERE \" + colName + \" in (\" + values + \") LIMIT 1;\\n\\n\")\n else:\n if secondValuePresent:\n print(\"\\nSELECT * FROM TB_HEAP WHERE \" + colName + \" = \" + value + \" AND \" + secondColName + \"=\" + secondValue + \" LIMIT 1;\\n\\n\")\n else:\n print(\"\\nSELECT * FROM TB_HEAP WHERE \" + colName + \" = \" + value + \" LIMIT 1;\\n\\n\")\n else:\n if valueIsArray:\n print(\"\\nSELECT * FROM TB_HEAP WHERE \" + colName + \" in (\" + values + \");\\n\\n\")\n else:\n if secondValuePresent:\n print(\"\\nSELECT * FROM TB_HEAP WHERE \" + colName + \" = \" + value + \" AND \" + secondColName + \"=\" + secondValue + \";\\n\\n\")\n else:\n print(\"\\nSELECT * FROM TB_HEAP WHERE \" + colName + \" = \" + value + \";\\n\\n\")\n\n currentRegistry= 0#busca linear, sempre começamos do primeiro\n results = []\n while not (registryFound or endOfFile):\n currentBlock = aux.FetchBlock(aux.HeapPath, currentRegistry)#pega 5 registros a partir do registro atual\n if currentBlock == []:\n endOfFile = True\n break\n \n #mais um bloco varrido\n numberOfBlocksUsed +=1\n \n for i in range(len(currentBlock)):\n if (not valueIsArray and ((not secondValuePresent and currentBlock[i][columnIndex] == value) or (secondValuePresent and currentBlock[i][columnIndex]==value and currentBlock[i][secondColumnIndex]==secondValue) ) ) or (valueIsArray and currentBlock[i][columnIndex] in value):\n print(\"Result found in registry \" + str(currentRegistry+i) + \"!\")\n results += [currentBlock[i]]\n if singleRecordSelection:\n registryFound = True\n break\n #se não é EOF e não encontrou registro, repete operação com outro bloco\n currentRegistry +=aux.blockSize\n \n if results == []:\n if valueIsArray:\n print(\"Não foi encontrado registro com \"+colName+ \" in (\" + values +\")\")\n else:\n print(\"Não foi encontrado registro com valor \" +colName+ \" = \" + value)\n \n else:\n print(\"Results found: \\n\")\n for result in results:\n print(result)\n print(\"\\n\")\n \n print(\"End of search.\")\n print(\"Number of blocks fetched: \" + str(numberOfBlocksUsed))\n\n\n\n\n\n\n\n#DONE\n###################################################################################\n############################ HEAP - INSERT FUNCTIONS ##############################\n###################################################################################\n\n#insere um valor novo na Heap(ou seja, no final dela)\ndef HeapInsertSingleRecord(listOfValues):\n if len(listOfValues) != len(aux.maxColSizesList):\n print(\"Erro: lista de valores recebidos não tem a mesma quantidade de campos da relação\")\n return\n with open(aux.HeapPath, 'a') as file:\n #insere o CPF com seu proprio padding\n file.write(aux.FillCPF(listOfValues[0]))\n #assumindo que estão na ordem correta já\n for i in range(1, len(listOfValues)):\n file.write(aux.PadString(listOfValues[i], aux.maxColSizesList[i]))\n #por fim pulamos uma linha para o próximo registro\n file.write(\"\\n\")\n aux.UpdateHEADFile(aux.HeapHeadPath, \"Heap\", aux.GetNumRegistries(aux.HeapHeadPath, aux.heapHeadSize)+1)\n\n\ndef HeapMassInsertCSV(csvFilePath):\n #Lê do CSV e preenche os registros com enchimento para criar o tamanho fixo\n valuesToLoad = aux.PadRegistries(aux.ReadFromFile(csvFilePath))\n \n registryCounter = aux.GetNumRegistries(aux.HeapHeadPath, aux.heapHeadSize)\n #inserimos valor a valor com a função de inserção da Heap\n for row in valuesToLoad:\n HeapInsertSingleRecord(row)\n registryCounter +=1\n \n aux.UpdateHEADFile(aux.HeapHeadPath, \"HEAP\", registryCounter)\n\n\n###################################################################################\n############################ HEAP - DELETE FUNCTIONS ##############################\n###################################################################################\n\n#colName = Desired column of the query (SEE LISTS ABOVE FOR COL NAMES)\n#value = desired value\n#SQL Format: Select * from HeapTable WHERE colName = value\n#singleRecordDeletion = Retorna o PRIMEIRO registro onde 'colName' = à value se True\ndef HeapDeleteRecord(colName, value, singleRecordDeletion = False, valueIsArray = False, secondColName = \"\", secondValue = \"\"):\n numberOfBlocksUsed = 0 #conta o número de vezes que \"acessamos a memória do disco\"\n registryFound = False\n endOfFile = False\n \n indexesToDelete = []\n \n values = \"\"\n if valueIsArray:\n for val in value:\n values+= val + \", \"\n values = values[:len(values)-2]#tira ultima ', '\n \n if colName not in aux.colHeadersList:\n print(\"Error: Column name not found in relation.\")\n return\n columnIndex = aux.colHeadersList.index(colName) #pega o indice referente àquela coluna\n\n secondValuePresent = False\n\n\n secondColumnIndex = -1\n if secondColName != \"\" and secondValue != \"\":\n if secondColName not in aux.colHeadersList:\n print(\"Error: Second column name not found in relation\")\n return\n secondColumnIndex = aux.colHeadersList.index(secondColName)\n secondValuePresent = True\n\n print(\"\\nRunning query: \")\n if singleRecordDeletion:\n if valueIsArray:\n print(\"\\nDELETE FROM TB_HEAP WHERE \" + colName + \" in (\" + values + \") LIMIT 1;\\n\\n\")\n else:\n if secondValuePresent:\n print(\"\\nDELETE FROM TB_HEAP WHERE \" + colName + \" = \" + value + \" AND \" + secondColName + \"=\" + secondValue + \" LIMIT 1;\\n\\n\")\n else:\n print(\"\\nDELETE FROM TB_HEAP WHERE \" + colName + \" = \" + value + \" LIMIT 1;\\n\\n\")\n else:\n if valueIsArray:\n print(\"\\nDELETE FROM TB_HEAP WHERE \" + colName + \" in (\" + values + \");\\n\\n\")\n else:\n if secondValuePresent:\n print(\"\\nDELETE FROM TB_HEAP WHERE \" + colName + \" = \" + value + \" AND \" + secondColName + \"=\" + secondValue + \";\\n\\n\")\n else:\n print(\"\\nDELETE FROM TB_HEAP WHERE \" + colName + \" = \" + value + \";\\n\\n\")\n\n currentRegistry= 0#busca linear, sempre começamos do primeiro\n results = [] #retornar os deletados\n while not (registryFound or endOfFile):\n currentBlock = aux.FetchBlock(aux.HeapPath, currentRegistry)#pega 5 registros a partir do registro atual\n if currentBlock == []:\n endOfFile = True\n break\n \n #mais um bloco varrido\n numberOfBlocksUsed +=1\n \n for i in range(len(currentBlock)):\n if (not valueIsArray and ((not secondValuePresent and currentBlock[i][columnIndex] == value) or (secondValuePresent and currentBlock[i][columnIndex]==value and currentBlock[i][secondColumnIndex]==secondValue) ) ) or (valueIsArray and currentBlock[i][columnIndex] in value):\n print(\"Result found in registry \" + str(currentRegistry+i) + \"!\")\n results += [currentBlock[i]]\n #salvar index para deletar posteriormente\n indexesToDelete+=[currentRegistry+i]\n\n if singleRecordDeletion:\n aux.DeleteLineFromFile(currentRegistry+i, aux.HeapPath)\n registryFound = True\n break\n #se não é EOF e não encontrou registro, repete operação com outro bloco\n currentRegistry +=aux.blockSize\n \n if results == []:\n if valueIsArray:\n print(\"Não foi encontrado registro com \"+colName+ \" in (\" + values +\")\")\n else:\n print(\"Não foi encontrado registro com valor \" +colName+ \" = \" + value)\n \n else:\n print(indexesToDelete)\n \n for reg in reversed(indexesToDelete):\n aux.DeleteLineFromFile(reg, aux.HeapPath)\n print(\"\\n\\nRegistries deleted: \\n\")\n for result in results:\n print(result)\n print(\"\\n\")\n \n print(\"End of query.\")\n print(\"Number of blocks fetched: \" + str(numberOfBlocksUsed))\n\n #updateHEAD with new number of registries if there were deletions\n if results != []:\n aux.UpdateHEADFile(aux.HeapHeadPath, \"Heap\", aux.GetNumRegistries(aux.HeapHeadPath, aux.heapHeadSize)-len(results))\n \n\n\n\n\n\n###################################################################################\n################################### ORDERED #######################################\n###################################################################################\nfrom dateutil import parser\nimport math\n\n# Campo nao chave para ordenacao\nnumColToOrder = 0\nisOrderedByPrimaryKey = True\n\n\n#Le o CSV e cria o arquivo do BD Ordenado\ndef CreateOrderedBD(csvFilePath, isOrderedByPrimaryKey):\n #Lê do CSV e preenche os registros com enchimento para criar o tamanho fixo\n valuesToLoad = aux.PadRegistries(aux.ReadFromFile(csvFilePath))\n valuesToLoad = sortList(valuesToLoad, isOrderedByPrimaryKey)\n #apaga o conteúdo existente no momento(se houver)\n if os.path.exists(aux.OrderedPath):\n os.remove(aux.OrderedPath)\n \n #preenche os valores direto no arquivo\n file = open(aux.OrderedPath, \"w+\")\n file.write(aux.MakeHEADString(\"Ordered\", len(valuesToLoad)))\n for row in valuesToLoad:\n for cols in row:\n file.write(cols)\n \n file.close()\n\n# Funcao para auxiliar na ordenacao\ndef sortComparison(elem):\n # Se o campo for de data, converte-lo para este formato\n if(numColToOrder == 7):\n return parser.parse(elem[numColToOrder])\n #Outros campos\n else:\n return elem[numColToOrder]\n\n# Ordena um array de registros\ndef sortList(values, isPrimaryKey):\n if (isPrimaryKey is False):\n return sorted(values, key=sortComparison)\n else:\n return sorted(values)\n\n\n###################################################################################\n########################## ORDERED - SELECT FUNCTIONS #############################\n###################################################################################\n\n# TODO consertar busca em data\n# Retorna o bloco se o achar, ou -1, caso contrario; e o numero de blocos utilizados\ndef binarySearch(columnIndex, value, maxNumBlocks, singleRecordSelection = False):\n #conta o número de vezes que \"acessamos a memória do disco\"\n numberOfBlocksUsed = []\n \n # blocos encontrados na query \n foundedBlocks=[]\n \n # blocos acessados durante a query\n accessedBlocks=[]\n \n # intervalo de procura dos blocos\n l = 0\n r = maxNumBlocks\n while l <= r: \n # Pega o numero do bloco do meio\n mid = math.ceil(l + (r - l)/2)\n \n # 0-based\n # Busca o registro\n blockRegistries = aux.FetchBlock(aux.OrderedPath, (mid-1)*5)\n \n getNearBlocks(mid, foundedBlocks,columnIndex, value, accessedBlocks,\n maxNumBlocks, numberOfBlocksUsed, singleRecordSelection)\n# \n # Check if x is present at mid \n if (foundedBlocks): \n return foundedBlocks, len(numberOfBlocksUsed)\n else:\n # Se o valor é maior que o ultimo elemento, ignora a metade esquerda \n if(value > blockRegistries[-1][columnIndex]):\n l = mid + 1\n # Se o valor é menor, ignorar metade direita\n else:\n r = mid - 1\n \n # Retorna -1 se não achar\n return -1, len(numberOfBlocksUsed)\n\n\ndef getNearBlocks(numberBlock, foundedBlocks,columnIndex, value, accessedBlocks,\n maxNumBlocks, numberOfBlocksUsed, singleRecordSelection = False):\n \n if(numberBlock not in accessedBlocks):\n accessedBlocks.append(numberBlock)\n numberOfBlocksUsed.append('1')\n \n # array de indices de blocos encontrados\n indexesFoundedBlocks = []\n \n # recupera o bloco de registros\n blockRegistries = aux.FetchBlock(aux.OrderedPath, (numberBlock-1)*5)\n \n # Varre cada registro do bloco procurando o valor\n for idx, block in enumerate(blockRegistries):\n if value in block[columnIndex]:\n if(singleRecordSelection):\n foundedBlocks.append(block)\n return\n indexesFoundedBlocks.append(idx)\n foundedBlocks.append(block)\n \n # se contem o indice 0 é possivel que tenha mais registros no bloco anterior\n if( (0 in indexesFoundedBlocks) and numberBlock>1):\n getNearBlocks(numberBlock-1, foundedBlocks,\n columnIndex, value, accessedBlocks,maxNumBlocks, numberOfBlocksUsed)\n \n # se contem o indice 4 é possivel que tenha mais registros no proximo bloco\n if((4 in indexesFoundedBlocks) and numberBlock<maxNumBlocks):\n getNearBlocks(numberBlock+1, foundedBlocks, \n columnIndex, value, accessedBlocks, maxNumBlocks, numberOfBlocksUsed)\n\n \n\n\n#colName = Desired column of the query (SEE LISTS ABOVE FOR COL NAMES)\n#value = desired value\n#SQL Format: Select * from OrderedTable WHERE colName = value\n#Retorna o PRIMEIRO registro onde o colName tenha o valor igual à value\ndef OrderedSelectSingleRecord(colName, value):\n numberOfBlocksUsed = 0 #conta o número de vezes que \"acessamos a memória do disco\"\n \n if colName not in aux.colHeadersList:\n print(\"Error: Column name not found in relation.\")\n return\n columnIndex = aux.colHeadersList.index(colName) #pega o indice referente àquela coluna\n\n print(\"Running query: \")\n print(\"SELECT * FROM TB_ORDERED WHERE \" + colName + \" = \" + value + \";\")\n\n # Obtem o numero de blocos do BD\n numBlocks = math.ceil(aux.GetNumRegistries(aux.OrderedPath, 0)/aux.blockSize)\n \n # Verifica se o campo procurado eh equivalente ao campo pelo qual o banco foi ordenado\n # Caso seja, utilizar busca binaria\n if(columnIndex == numColToOrder): \n blockFounded,numberOfBlocksUsed = binarySearch(numColToOrder, value,\n numBlocks, True)\n # Senao realizar select linear\n else:\n blockFounded = None #fazer select normal\n \n if(blockFounded):\n print(\"Registro encontrado: \")\n print(blockFounded)\n else:\n print(\"Registro não encontrado\")\n\n print(\"Fim da busca.\")\n print(\"Número de blocos varridos: \" + str(numberOfBlocksUsed))\n\n###################################################################################\n########################## ORDERED - INSERT FUNCTIONS #############################\n###################################################################################\n\n###################################################################################\n########################## ORDERED - DELETE FUNCTIONS #############################\n###################################################################################\n\n\n###################################################################################\n##################################### HASH ########################################\n###################################################################################\n\n#Le o CSV e cria o arquivo do BD de Hash\ndef CreateHashBD(csvFilePath):\n\n #Reads the csv file and create the records to be inserted, with fixed length\n valuesToLoad = aux.PadRegistries(aux.ReadFromFile(csvFilePath))\n \n # Delete previous database\n if os.path.exists(aux.HashPath):\n os.remove(aux.HashPath)\n \n # Create empty file to reserve disk space\n with open(aux.HashPath, 'wb') as hashFile:\n hashFile.seek((aux.bucketSize * aux.numberOfBuckets * aux.blockSize * (aux.registrySize -1)) - 1)\n hashFile.write(b'\\0')\n \n # Create HEAD to File\n aux.MakeHEAD(aux.HashHeadPath, \"Hash\", 0)\n \n registryCounter = 0\n #inserimos valor a valor com a função de inserção do Hash\n for row in valuesToLoad:\n registry = Registry(row, False)\n HashInsertRecord(registry)\n registryCounter +=1\n\n\ndef MassHashInsert(csvFilePath):\n #Reads the csv file and create the records to be inserted, with fixed length\n valuesToLoad = aux.PadRegistries(aux.ReadFromFile(csvFilePath))\n\n registryCounter = 0\n #inserimos valor a valor com a função de inserção do Hash\n for row in valuesToLoad:\n registry = Registry(row, False)\n HashInsertRecord(registry)\n registryCounter +=1\n\ndef CalculateHashKey(key):\n return int(key)\n\ndef CalculateHashAddress(hashKey):\n return hashKey % aux.numberOfBuckets\n \ndef FetchBlockBytes(hashFile, startOffset):\n hashFile.seek(startOffset)\n return hashFile.read((aux.blockSize * (aux.registrySize -1)))\n\nclass Bucket:\n def __init__(self, hashFile, startOffset):\n self.blocksList = []\n for i in range(startOffset, startOffset + aux.bucketSize * aux.blockSize * (aux.registrySize -1) - 1, aux.blockSize * (aux.registrySize -1)):\n self.blocksList += [Block(FetchBlockBytes(hashFile, i))]\n self.firstBlockWithEmptyRecordIndex = self.__FirstBlockWithEmptyRecordIndex()\n\n def __FirstBlockWithEmptyRecordIndex(self):\n for i in range(len(self.blocksList)):\n if (self.blocksList[i].firstEmptyRecordIndex != -1):\n return i\n \n return -1\n\nclass Block:\n\n def __init__(self, registriesBytes):\n self.registriesList = []\n #iterate over registries bytes\n for b in range(0, len(registriesBytes), (aux.registrySize -1)):\n self.registriesList += [Registry(registriesBytes[b : b + (aux.registrySize -1)], True)]\n\n self.firstEmptyRecordIndex = self.__FirstEmptyRecordIndex()\n\n def SizeInBytes(self):\n sizeInBytes = 0\n for registry in self.registriesList:\n sizeInBytes += registry.sizeInBytes\n\n return sizeInBytes\n\n def __FirstEmptyRecordIndex(self):\n for i in range(len(self.registriesList)):\n try:\n if (self.registriesList[i].docNumber.index('\\x00') >= 0):\n return i\n except:\n pass\n return -1\n\n def __str__(self):\n str_block = \"\"\n for registry in self.registriesList:\n str_block += str(registry)\n \n return str_block\n\nclass Registry:\n\n def __init__(self, listOfValues, dataInBytes):\n if (not dataInBytes):\n self.docNumber = listOfValues[0]\n self.state = listOfValues[1]\n self.jobType = listOfValues[2]\n self.candidateNumber = listOfValues[3]\n self.candidateName = listOfValues[4]\n self.candidateEmail = listOfValues[5]\n self.partyNumber = listOfValues[6]\n self.birthDate = listOfValues[7]\n self.gender = listOfValues[8]\n self.instructionLevel = listOfValues[9]\n self.maritalStatus = listOfValues[10]\n self.colorRace = listOfValues[11]\n self.ocupation = listOfValues[12]\n else:\n listOfValues = listOfValues.decode(\"utf-8\")\n self.docNumber = listOfValues[0:11]\n self.state = listOfValues[11:13]\n self.jobType = listOfValues[13:15]\n self.candidateNumber = listOfValues[15:20]\n self.candidateName = listOfValues[20:90]\n self.candidateEmail = listOfValues[90:133]\n self.partyNumber = listOfValues[133:135]\n self.birthDate = listOfValues[135:145]\n self.gender = listOfValues[145:146]\n self.instructionLevel = listOfValues[146:147]\n self.maritalStatus = listOfValues[147:148]\n self.colorRace = listOfValues[148:150]\n self.ocupation = listOfValues[150:153]\n\n self.sizeInBytes = len(str(self))\n \n def __str__(self):\n return self.docNumber + self.state + self.jobType + self.candidateNumber + self.candidateName + self.candidateEmail + self.partyNumber + self.birthDate + self.gender + self.instructionLevel + self.maritalStatus + self.colorRace + self.ocupation\n\n def Clear(self):\n self.docNumber = '\\x00' * 11\n self.state = '\\x00' * 2\n self.jobType = '\\x00' * 2\n self.candidateNumber = '\\x00' * 5\n self.candidateName = '\\x00' * 70\n self.candidateEmail = '\\x00' * 43\n self.partyNumber = '\\x00' * 2\n self.birthDate = '\\x00' * 10\n self.gender = '\\x00' * 1\n self.instructionLevel = '\\x00' * 1\n self.maritalStatus = '\\x00' * 1\n self.colorRace = '\\x00' * 2\n self.ocupation = '\\x00' * 3\n\n self.sizeInBytes = len(str(self))\n\n\n###################################################################################\n############################ HASH - SELECT FUNCTIONS ##############################\n###################################################################################\n\ndef HashSelectRecord(searchKeys, goodSearchKeys):\n registryList = []\n for searchKey in searchKeys:\n freeBlockIndex = -1\n blocksVisitedCount = 0\n #calculate hash key and address\n hashKey = CalculateHashKey(searchKey)\n hashAddress = CalculateHashAddress(hashKey)\n\n # Init the start offset\n startingOffset = hashAddress * aux.bucketSize * aux.blockSize * (aux.registrySize - 1)\n\n # Place the record the first block with enough space starting from the file\n with open(aux.HashPath, 'r+b') as hashFile:\n while freeBlockIndex == -1:\n # Load the bucket\n currentBucket = Bucket(hashFile, startingOffset)\n freeBlockIndex = currentBucket.firstBlockWithEmptyRecordIndex\n foundRegistry = False\n # Search for the key in the registries in the bucket\n for block in currentBucket.blocksList:\n blocksVisitedCount += 1\n for registry in block.registriesList:\n if (registry.docNumber == searchKey):\n registryList += [registry]\n foundRegistry = True\n print(\"Blocks visited for key {}: {}\".format(searchKey, blocksVisitedCount))\n if (freeBlockIndex == -1):\n freeBlockIndex = 0\n break\n \n if (foundRegistry):\n break\n\n if (not foundRegistry):\n # if registry was not found and the bucket is full, it may have occured overflow, so we search in the next bucket\n if (freeBlockIndex == -1):\n startingOffset += aux.bucketSize * aux.blockSize * (aux.registrySize - 1)\n pass\n # else, print an error and continue\n else:\n print(\"Record {} not found\".format(searchKey))\n print(\"Blocks visited for key {}: {}\".format(searchKey, blocksVisitedCount))\n pass\n\n return registryList\n\n#colName = Desired column of the query (SEE LISTS ABOVE FOR COL NAMES)\n#value = desired value\n#SQL Format: Select * from HeapTable WHERE colName = value\n#singleRecordSelection = Retorna o PRIMEIRO registro onde 'colName' = à value se True\ndef HashLinearSelectRecord(colName, value, customRegistrySize, singleRecordSelection = False, valueIsArray = False, secondColName = \"\", secondValue = \"\"):\n numberOfBlocksUsed = 0 #conta o número de vezes que \"acessamos a memória do disco\"\n registryFound = False\n endOfFile = False\n \n values = \"\"\n if valueIsArray:\n for val in value:\n values+= val + \", \"\n values = values[:len(values)-2]#tira ultima ', '\n \n if colName not in aux.colHeadersList:\n print(\"Error: Column name not found in relation.\")\n return\n columnIndex = aux.colHeadersList.index(colName) #pega o indice referente àquela coluna\n secondValuePresent = False\n\n secondColumnIndex = -1\n if secondColName != \"\" and secondValue != \"\":\n if secondColName not in aux.colHeadersList:\n print(\"Error: Second column name not found in relation\")\n return\n secondColumnIndex = aux.colHeadersList.index(secondColName)\n secondValuePresent = True\n\n print(\"\\nRunning query: \")\n if singleRecordSelection:\n if valueIsArray:\n print(\"\\nSELECT * FROM TB_HEAP WHERE \" + colName + \" in (\" + values + \") LIMIT 1;\\n\\n\")\n else:\n if secondValuePresent:\n print(\"\\nSELECT * FROM TB_HEAP WHERE \" + colName + \" = \" + value + \" AND \" + secondColName + \"=\" + secondValue + \" LIMIT 1;\\n\\n\")\n else:\n print(\"\\nSELECT * FROM TB_HEAP WHERE \" + colName + \" = \" + value + \" LIMIT 1;\\n\\n\")\n else:\n if valueIsArray:\n print(\"\\nSELECT * FROM TB_HEAP WHERE \" + colName + \" in (\" + values + \");\\n\\n\")\n else:\n if secondValuePresent:\n print(\"\\nSELECT * FROM TB_HEAP WHERE \" + colName + \" = \" + value + \" AND \" + secondColName + \"=\" + secondValue + \";\\n\\n\")\n else:\n print(\"\\nSELECT * FROM TB_HEAP WHERE \" + colName + \" = \" + value + \";\\n\\n\")\n\n currentRegistry= 0#busca linear, sempre começamos do primeiro\n results = []\n while not (registryFound or endOfFile):\n currentBlock = aux.FetchBlock2(aux.HashPath, currentRegistry, customRegistrySize)#pega 5 registros a partir do registro atual\n if currentBlock == []:\n endOfFile = True\n break\n \n #mais um bloco varrido\n numberOfBlocksUsed +=1\n \n for i in range(len(currentBlock)):\n if (not valueIsArray and ((not secondValuePresent and currentBlock[i][columnIndex] == value) or (secondValuePresent and currentBlock[i][columnIndex]==value and currentBlock[i][secondColumnIndex]==secondValue) ) ) or (valueIsArray and currentBlock[i][columnIndex] in value):\n print(\"Result found in registry \" + str(currentRegistry+i) + \"!\")\n results += [currentBlock[i]]\n if singleRecordSelection:\n registryFound = True\n break\n #se não é EOF e não encontrou registro, repete operação com outro bloco\n currentRegistry +=aux.blockSize\n \n if results == []:\n if valueIsArray:\n print(\"Não foi encontrado registro com \"+colName+ \" in (\" + values +\")\")\n else:\n print(\"Não foi encontrado registro com valor \" +colName+ \" = \" + value)\n \n else:\n print(\"Results found: \\n\")\n for result in results:\n print(result)\n print(\"\\n\")\n \n print(\"End of search.\")\n print(\"Number of blocks fetched: \" + str(numberOfBlocksUsed))\n return results\n\n###################################################################################\n############################ HASH - INSERT FUNCTIONS ##############################\n###################################################################################\n\ndef HashInsertRecord(registry):\n freeBlockIndex = -1\n freeSpaceIndex = -1\n\n #calculate hash key and address\n hashKey = CalculateHashKey(registry.docNumber)\n hashAddress = CalculateHashAddress(hashKey)\n\n # Init the start offset\n startingOffset = hashAddress * aux.bucketSize * aux.blockSize * (aux.registrySize - 1)\n\n # Place the record the first block with enough space starting from the file\n with open(aux.HashPath, 'r+b') as hashFile:\n while freeBlockIndex == -1:\n # Load the bucket\n currentBucket = Bucket(hashFile, startingOffset)\n freeBlockIndex = currentBucket.firstBlockWithEmptyRecordIndex\n # Check if there is a collision\n if (freeBlockIndex == -1):\n #If the collision happened a lot and the bucket is full, load the next bucket\n startingOffset += aux.bucketSize * aux.blockSize * (aux.registrySize - 1)\n pass\n else:\n # Select block\n currentBlock = currentBucket.blocksList[freeBlockIndex]\n\n # Set registry to rigth block\n freeSpaceIndex = currentBlock.firstEmptyRecordIndex\n currentBlock.registriesList[freeSpaceIndex] = registry\n \n # Re-write block to the file\n hashFile.seek(startingOffset + (freeBlockIndex * aux.blockSize * (aux.registrySize - 1)))\n hashFile.write(str(currentBlock).encode(\"utf-8\"))\n \n###################################################################################\n############################ HASH - DELETE FUNCTIONS ##############################\n###################################################################################\n\ndef HashDeleteRecord(searchKeys, goodSearchKeys):\n for searchKey in searchKeys:\n freeBlockIndex = -1\n blocksVisitedCount = 0\n #calculate hash key and address\n hashKey = CalculateHashKey(searchKey)\n hashAddress = CalculateHashAddress(hashKey)\n\n # Init the start offset\n startingOffset = hashAddress * aux.bucketSize * aux.blockSize * (aux.registrySize - 1)\n\n # Place the record the first block with enough space starting from the file\n with open(aux.HashPath, 'r+b') as hashFile:\n while freeBlockIndex == -1:\n # Load the bucket\n currentBucket = Bucket(hashFile, startingOffset)\n freeBlockIndex = currentBucket.firstBlockWithEmptyRecordIndex\n foundRegistry = False\n # Search for the key in the registries in the bucket\n for i in range(len(currentBucket.blocksList)):\n block = currentBucket.blocksList[i]\n blocksVisitedCount += 1\n for registry in block.registriesList:\n if (registry.docNumber == searchKey):\n registry.Clear()\n foundRegistry = True\n # Re-write block to the file\n hashFile.seek(startingOffset + (i * aux.blockSize * (aux.registrySize - 1)))\n hashFile.write(str(block).encode(\"utf-8\"))\n print(\"Blocks visited for key {}: {}\".format(searchKey, blocksVisitedCount))\n if (freeBlockIndex == -1):\n freeBlockIndex = 0\n break\n \n if (foundRegistry):\n break\n\n if (not foundRegistry):\n # if registry was not found and the bucket is full, it may have occured overflow, so we search in the next bucket\n if (freeBlockIndex == -1):\n startingOffset += aux.bucketSize * aux.blockSize * (aux.registrySize - 1)\n pass\n # else, print an error and continue\n else:\n print(\"Record {} not found\".format(searchKey))\n print(\"Blocks visited for key {}: {}\".format(searchKey, blocksVisitedCount))\n pass\n\n###################################################################################\n################################### MAIN ##########################################\n###################################################################################\n\nCreateHashBD(aux.RJPath)\nMassHashInsert(aux.MGPath)\nMassHashInsert(aux.SPPath)\n\n\n\n\n#CreateOrderedBD(aux.RJPath, False)\n\n#print('-----')\n\n#blocks = binarySearch(1)\n\n#query = \"86551337791\"\n#p = [block if \"09650185712\" in block[numColToOrder] else None for block in blocks ]\n#q = next((block for block in blocks if query in block[numColToOrder]), None)\n\n#print(q)\n\n#0, len(arr)-1,\n#result = binarySearch(numColToOrder, query, 4)\n#print (result)\n\n#OrderedSelectSingleRecord('NM_EMAIL', query)\n#OrderedSelectSingleRecord(aux.colHeadersList[numColToOrder], query)\n\n", "id": "5738682", "language": "Python", "matching_score": 10.008277893066406, "max_stars_count": 0, "path": "FileStructures.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 12 21:10:36 2018\n\n@author: eduardo\n\"\"\"\n\nimport os\nimport fileinput\nimport DBHelperFunctions as aux\n\n\n###################################################################################\n################################# AUX FUNCTIONS ###################################\n###################################################################################\n\n#colName = Desired column of the query (SEE LISTS ABOVE FOR COL NAMES)\n#value = desired value\n#SQL Format: Select * from Table WHERE colName = value\n#singleRecordSelection = Retorna o PRIMEIRO registro onde 'colName' = à value se True\ndef LinearSelectRecord(colName, value, singleRecordSelection = False, valueIsArray = False, secondColName = \"\", secondValue = \"\"):\n numberOfBlocksUsed = 0 #conta o número de vezes que \"acessamos a memória do disco\"\n registryFound = False\n endOfFile = False\n \n values = \"\"\n if valueIsArray:\n for val in value:\n values+= val + \", \"\n values = values[:len(values)-2]#tira ultima ', '\n \n if colName not in aux.colHeadersList:\n print(\"Error: Column name not found in relation.\")\n return\n columnIndex = aux.colHeadersList.index(colName) #pega o indice referente àquela coluna\n\n secondValuePresent = False\n\n secondColumnIndex = -1\n if secondColName != \"\" and secondValue != \"\":\n if secondColName not in aux.colHeadersList:\n print(\"Error: Second column name not found in relation\")\n return\n secondColumnIndex = aux.colHeadersList.index(secondColName)\n secondValuePresent = True\n\n currentRegistry= 0#busca linear, sempre começamos do primeiro\n results = []\n while not (registryFound or endOfFile):\n currentBlock = aux.FetchBlock(aux.OrderedPath, currentRegistry)#pega 5 registros a partir do registro atual\n if currentBlock == []:\n endOfFile = True\n break\n \n #mais um bloco varrido\n numberOfBlocksUsed +=1\n \n for i in range(len(currentBlock)):\n if (not valueIsArray and ((not secondValuePresent and currentBlock[i][columnIndex] == value) or (secondValuePresent and currentBlock[i][columnIndex]==value and currentBlock[i][secondColumnIndex]==secondValue) ) ) or (valueIsArray and currentBlock[i][columnIndex] in value):\n print(\"Result found in registry \" + str(currentRegistry+i) + \"!\")\n results += [currentBlock[i]]\n if singleRecordSelection:\n registryFound = True\n break\n #se não é EOF e não encontrou registro, repete operação com outro bloco\n currentRegistry +=aux.blockSize\n \n if results == []:\n return -1, numberOfBlocksUsed \n else:\n return results, numberOfBlocksUsed\n\n\n\n#colName = Desired column of the query (SEE LISTS ABOVE FOR COL NAMES)\n#value = desired value\n#SQL Format: Select * from Table WHERE colName = value\n#singleRecordDeletion = Retorna o PRIMEIRO registro onde 'colName' = à value se True\ndef DeleteRecord(colName, value, singleRecordDeletion = False, valueIsArray = False, secondColName = \"\", secondValue = \"\"):\n numberOfBlocksUsed = 0 #conta o número de vezes que \"acessamos a memória do disco\"\n registryFound = False\n endOfFile = False\n \n indexesToDelete = []\n \n values = \"\"\n if valueIsArray:\n for val in value:\n values+= val + \", \"\n values = values[:len(values)-2]#tira ultima ', '\n \n if colName not in aux.colHeadersList:\n print(\"Error: Column name not found in relation.\")\n return\n columnIndex = aux.colHeadersList.index(colName) #pega o indice referente àquela coluna\n\n secondValuePresent = False\n\n\n secondColumnIndex = -1\n if secondColName != \"\" and secondValue != \"\":\n if secondColName not in aux.colHeadersList:\n print(\"Error: Second column name not found in relation\")\n return\n secondColumnIndex = aux.colHeadersList.index(secondColName)\n secondValuePresent = True\n\n currentRegistry= 0#busca linear, sempre começamos do primeiro\n results = [] #retornar os deletados\n while not (registryFound or endOfFile):\n currentBlock = aux.FetchBlock(aux.OrderedPath, currentRegistry)#pega 5 registros a partir do registro atual\n if currentBlock == []:\n endOfFile = True\n break\n \n #mais um bloco varrido\n numberOfBlocksUsed +=1\n \n for i in range(len(currentBlock)):\n if (not valueIsArray and ((not secondValuePresent and currentBlock[i][columnIndex] == value) or (secondValuePresent and currentBlock[i][columnIndex]==value and currentBlock[i][secondColumnIndex]==secondValue) ) ) or (valueIsArray and currentBlock[i][columnIndex] in value):\n print(\"Result found in registry \" + str(currentRegistry+i) + \"!\")\n results += [currentBlock[i]]\n #salvar index para deletar posteriormente\n indexesToDelete+=[currentRegistry+i]\n\n if singleRecordDeletion:\n aux.DeleteLineFromFile(currentRegistry+i, aux.OrderedPath)\n registryFound = True\n break\n #se não é EOF e não encontrou registro, repete operação com outro bloco\n currentRegistry +=aux.blockSize\n \n if results == []:\n return -1, numberOfBlocksUsed\n \n else:\n for reg in reversed(indexesToDelete):\n aux.DeleteLineFromFile(reg, aux.OrderedPath)\n \n print(\"End of query.\")\n print(\"Number of blocks fetched: \" + str(numberOfBlocksUsed))\n\n #updateHEAD with new number of registries if there were deletions\n if results != []:\n aux.UpdateHEADFile(aux.OrderedHeadPath, \"Ordered\", aux.GetNumRegistries(aux.OrderedHeadPath, aux.heapHeadSize-1)-len(results))\n \n return results, numberOfBlocksUsed\n\n###################################################################################\n################################### ORDERED #######################################\n###################################################################################\nfrom dateutil import parser\nimport math\n\n# Campo nao chave para ordenacao\nnumColToOrder = 5\n\n\n\n#Le o CSV e cria o arquivo do BD Ordenado\ndef CreateOrderedBD(csvFilePath):\n #Lê do CSV e preenche os registros com enchimento para criar o tamanho fixo\n valuesToLoad = aux.PadRegistries(aux.ReadFromFile(csvFilePath))\n valuesToLoad = sortList(valuesToLoad)\n #apaga o conteúdo existente no momento(se houver)\n if os.path.exists(aux.OrderedPath):\n os.remove(aux.OrderedPath)\n \n \n # Cria Head do bd\n aux.MakeHEAD(aux.OrderedHeadPath, \"Ordered\", len(valuesToLoad))\n\n #preenche os valores direto no arquivo\n file = open(aux.OrderedPath, \"w+\")\n for row in valuesToLoad:\n #insere o CPF com seu proprio padding\n file.write(aux.FillCPF(row[0]))\n #assumindo que estão na ordem correta já\n for i in range(1, len(row)):\n file.write(aux.PadString(row[i], aux.maxColSizesList[i]))\n #por fim pulamos uma linha para o próximo registro\n file.write(\"\\n\")\n\n \n file.close()\n\n\n# Funcao para auxiliar na ordenacao\ndef sortComparison(elem):\n # Se o campo for de data, converte-lo para este formato\n if(numColToOrder == 7):\n return parser.parse(elem[numColToOrder])\n #Outros campos\n else:\n return elem[numColToOrder]\n\n# Ordena um array de registros\ndef sortList(values):\n return sorted(values, key=sortComparison)\n\n\n###################################################################################\n########################## ORDERED - SELECT FUNCTIONS #############################\n###################################################################################\n\n\n######################### OrderedSelectSingleRecord ##############################\n# TODO consertar busca em data\n# Retorna o bloco se o achar, ou -1, caso contrario; e o numero de blocos utilizados\ndef binarySearch(columnIndex, value, maxNumBlocks, singleRecordSelection = False):\n #conta o número de vezes que \"acessamos a memória do disco\"\n numberOfBlocksUsed = []\n \n # blocos encontrados na query \n foundedBlocks=[]\n \n # blocos acessados durante a query\n accessedBlocks=[]\n \n # intervalo de procura dos blocos\n l = 0\n r = maxNumBlocks\n while l <= r: \n # Pega o numero do bloco do meio\n mid = math.ceil(l + (r - l)/2); \n \n # 0-based\n # Busca o registro\n blockRegistries = aux.FetchBlock(aux.OrderedPath, (mid-1)*5)\n \n getNearBlocks(mid, foundedBlocks,columnIndex, value, accessedBlocks,\n maxNumBlocks, numberOfBlocksUsed, singleRecordSelection)\n# \n # Check if x is present at mid \n if (foundedBlocks): \n return foundedBlocks, len(numberOfBlocksUsed)\n else:\n # Se o valor é maior que o ultimo elemento, ignora a metade esquerda \n if(value > blockRegistries[-1][columnIndex]):\n l = mid + 1\n # Se o valor é menor, ignorar metade direita\n else:\n r = mid - 1\n \n # Retorna -1 se não achar\n return -1, len(numberOfBlocksUsed)\n\n\ndef getNearBlocks(numberBlock, foundedBlocks,columnIndex, value, accessedBlocks,\n maxNumBlocks, numberOfBlocksUsed, singleRecordSelection = False):\n \n if(numberBlock not in accessedBlocks):\n accessedBlocks.append(numberBlock)\n numberOfBlocksUsed.append('1')\n \n # array de indices de blocos encontrados\n indexesFoundedBlocks = []\n \n # recupera o bloco de registros\n blockRegistries = aux.FetchBlock(aux.OrderedPath, (numberBlock-1)*5)\n \n for idx, block in enumerate(blockRegistries):\n if value in block[columnIndex]:\n if(singleRecordSelection):\n foundedBlocks.append(block)\n return\n indexesFoundedBlocks.append(idx)\n foundedBlocks.append(block)\n \n # se contem o indice 0 é possivel que tenha mais registros no bloco anterior\n if( (0 in indexesFoundedBlocks) and numberBlock>1):\n getNearBlocks(numberBlock-1, foundedBlocks,\n columnIndex, value, accessedBlocks,maxNumBlocks, numberOfBlocksUsed)\n \n # se contem o indice 4 é possivel que tenha mais registros no proximo bloco\n if((4 in indexesFoundedBlocks) and numberBlock<maxNumBlocks):\n getNearBlocks(numberBlock+1, foundedBlocks, \n columnIndex, value, accessedBlocks, maxNumBlocks, numberOfBlocksUsed)\n\n \n\n\n#colName = Desired column of the query (SEE LISTS ABOVE FOR COL NAMES)\n#value = desired value\n#SQL Format: Select * from OrderedTable WHERE colName = value\n#Retorna o PRIMEIRO registro onde o colName tenha o valor igual à value\ndef OrderedSelectSingleRecord(colName, value, singleRecordSelection = True):\n numberOfBlocksUsed = 0 #conta o número de vezes que \"acessamos a memória do disco\"\n \n if colName not in aux.colHeadersList:\n print(\"Error: Column name not found in relation.\")\n return\n columnIndex = aux.colHeadersList.index(colName) #pega o indice referente àquela coluna\n\n print(\"Running query: \")\n print(\"SELECT * FROM TB_ORDERED WHERE \" + colName + \" = \" + value + \" limit 1;\")\n\n # Obtem o numero de blocos do BD\n numBlocks = math.ceil(aux.GetNumRegistries(aux.OrderedHeadPath, aux.heapHeadSize-1)/aux.blockSize)\n \n # Verifica se o campo procurado eh equivalente ao campo pelo qual o banco foi ordenado\n # Caso seja, utilizar busca binaria\n if(columnIndex == numColToOrder): \n blockFounded,numberOfBlocksUsed = binarySearch(columnIndex, value,\n numBlocks, singleRecordSelection)\n # Senao realizar select linear\n else:\n blockFounded,numberOfBlocksUsed = LinearSelectRecord(colName, value, singleRecordSelection) #fazer select normal\n\n if(blockFounded):\n print(\"Registro encontrado: \")\n print(blockFounded)\n else:\n print(\"Registro não encontrado\")\n\n print(\"Fim da busca.\")\n print(\"Número de blocos varridos: \" + str(numberOfBlocksUsed))\n \n\n####################### SELECT - WHERE CAMPO IN (conjunto de valores) #############\n\n#colName = Desired column of the query (SEE LISTS ABOVE FOR COL NAMES)\n#values = multiple values\n#SQL Format: Select * from OrderedTable WHERE colName in [value1,value2...]\n#Retorna lita de registros em que colName tenha um dos valores da lista values\ndef OrderedSelectWithMultipleValues(colName, values):\n numberOfBlocksUsed = 0 #conta o número de vezes que \"acessamos a memória do disco\"\n totalBlocksFounded = []\n totalNumberOfBlocksUsed = 0 \n if colName not in aux.colHeadersList:\n print(\"Error: Column name not found in relation.\")\n return\n columnIndex = aux.colHeadersList.index(colName) #pega o indice referente àquela coluna\n\n print(\"Running query: \")\n print(\"SELECT * FROM TB_ORDERED WHERE \" + colName + \" in \" + str(values) + \";\")\n\n # Obtem o numero de blocos do BD\n numBlocks = math.ceil(aux.GetNumRegistries(aux.OrderedHeadPath, aux.heapHeadSize-1)/aux.blockSize)\n\n # Verifica se o campo procurado eh equivalente ao campo pelo qual o banco foi ordenado\n # Caso seja, utilizar busca binaria\n if(columnIndex == numColToOrder):\n for value in values:\n blocksFounded,numberOfBlocksUsed = binarySearch(columnIndex, value,\n numBlocks, singleRecordSelection = False )\n totalBlocksFounded.append(blocksFounded)\n totalNumberOfBlocksUsed += numberOfBlocksUsed\n # Senao realizar select linear\n else:\n blocksFounded, numberOfBlocksUsed = LinearSelectRecord(aux.OrderedPath,colName, values, valueIsArray = True) #fazer select normal\n totalBlocksFounded.append(blocksFounded)\n totalNumberOfBlocksUsed += numberOfBlocksUsed\n\n if(len(totalBlocksFounded)):\n print(\"Registro(s) encontrado(s): \")\n print(totalBlocksFounded)\n else:\n print(\"Registro não encontrado\")\n\n print(\"Fim da busca.\")\n print(\"Número de blocos varridos: \" + str(totalNumberOfBlocksUsed))\n\n\n####################### SELECT…WHERE colName1=value1 AND colName2=value2 #############\n\n\n# TODO consertar busca em data\n# Retorna o bloco se o achar, ou -1, caso contrario; e o numero de blocos utilizados\ndef binarySearchWithTwoFields(columnIndex, value, maxNumBlocks, \n secondColIndex = \"\", secondValue = \"\"):\n #conta o número de vezes que \"acessamos a memória do disco\"\n numberOfBlocksUsed = []\n \n # blocos encontrados na query \n foundedBlocks=[]\n \n # blocos acessados durante a query\n accessedBlocks=[]\n \n # intervalo de procura dos blocos\n l = 0\n r = maxNumBlocks\n while l <= r: \n # Pega o numero do bloco do meio\n mid = math.ceil(l + (r - l)/2); \n \n # 0-based\n # Busca o registro\n blockRegistries = aux.FetchBlock(aux.OrderedPath, (mid-1)*5)\n \n getNearBlocksWithTwoFields(mid, foundedBlocks,columnIndex, value, accessedBlocks,\n maxNumBlocks, numberOfBlocksUsed,\n secondColIndex,secondValue)\n# \n # Check if x is present at mid \n if (foundedBlocks): \n return foundedBlocks, len(numberOfBlocksUsed)\n else:\n # Se o valor é maior que o ultimo elemento, ignora a metade esquerda \n if(value > blockRegistries[-1][columnIndex]):\n l = mid + 1\n # Se o valor é menor, ignorar metade direita\n else:\n r = mid - 1\n \n # Retorna -1 se não achar\n return -1, len(numberOfBlocksUsed)\n\n\ndef getNearBlocksWithTwoFields(numberBlock, foundedBlocks,columnIndex, value, accessedBlocks,\n maxNumBlocks, numberOfBlocksUsed,secondColIndex,\n secondValue):\n \n if(numberBlock not in accessedBlocks):\n accessedBlocks.append(numberBlock)\n numberOfBlocksUsed.append('1')\n \n # array de indices de blocos encontrados\n indexesFoundedBlocks = []\n \n # recupera o bloco de registros\n blockRegistries = aux.FetchBlock(aux.OrderedPath, (numberBlock-1)*5)\n \n for idx, block in enumerate(blockRegistries):\n if value in block[columnIndex]:\n if(secondValue in block[secondColIndex]):\n indexesFoundedBlocks.append(idx)\n foundedBlocks.append(block)\n \n # se contem o indice 0 é possivel que tenha mais registros no bloco anterior\n if( (0 in indexesFoundedBlocks) and numberBlock>1):\n getNearBlocksWithTwoFields(numberBlock-1, foundedBlocks,\n columnIndex, value, accessedBlocks,maxNumBlocks, \n numberOfBlocksUsed, secondColIndex, secondValue)\n \n # se contem o indice 4 é possivel que tenha mais registros no proximo bloco\n if((4 in indexesFoundedBlocks) and numberBlock<maxNumBlocks):\n getNearBlocksWithTwoFields(numberBlock+1, foundedBlocks, \n columnIndex, value, accessedBlocks, maxNumBlocks, \n numberOfBlocksUsed, secondColIndex, secondValue)\n\n \n\n\n#colName = Desired column of the query (SEE LISTS ABOVE FOR COL NAMES)\n#values = multiple values\n#SQL Format: Select * from OrderedTable WHERE colName = value1 AND \n# secondColName = value2;\n#Retorna lita de registros em que possuam os colNames e values correspondentes\ndef OrderedSelectWithTwoFields(colName, value, secondColName = \"\", secondValue = \"\"):\n numberOfBlocksUsed = 0 #conta o número de vezes que \"acessamos a memória do disco\"\n \n if colName not in aux.colHeadersList:\n print(\"Error: Column name not found in relation.\")\n return\n columnIndex1 = aux.colHeadersList.index(colName) #pega o indice referente àquela coluna\n\n if secondColName not in aux.colHeadersList:\n print(\"Error: Column name not found in relation.\")\n return\n columnIndex2 = aux.colHeadersList.index(secondColName) #pega o indice referente àquela coluna\n\n print(\"Running query: \")\n print(\"SELECT * FROM TB_ORDERED WHERE \" + colName + \" = \" + str(value) + \n \" AND \" + str(secondColName) +\" = \" + str(secondValue) + \";\")\n\n # Obtem o numero de blocos do BD\n numBlocks = math.ceil(aux.GetNumRegistries(aux.OrderedHeadPath, aux.heapHeadSize-1)/aux.blockSize)\n\n # Verifica se o campo procurado eh equivalente ao campo pelo qual o banco foi ordenado\n # Caso seja, utilizar busca binaria\n if(columnIndex1 == numColToOrder):\n blocksFounded,numberOfBlocksUsed = binarySearchWithTwoFields(columnIndex1, value,\n numBlocks, columnIndex2, secondValue)\n elif(columnIndex2 == numColToOrder):\n blocksFounded,numberOfBlocksUsed = binarySearchWithTwoFields(columnIndex2, secondValue,\n numBlocks, columnIndex1, value)\n \n # Senao realizar select linear\n else:\n blocksFounded, numberOfBlocksUsed = LinearSelectRecord(\n colName, value, False, False, secondColName, secondValue) #fazer select normal\n\n if(blocksFounded and blocksFounded != -1):\n print(\"Registro(s) encontrado(s): \")\n print(blocksFounded)\n else:\n print(\"Registro não encontrado\")\n\n print(\"Fim da busca.\")\n print(\"Número de blocos varridos: \" + str(numberOfBlocksUsed))\n\n\n\n###################################################################################\n################### SELECT - WHERE CAMPO Between v_inicial and v_final ############\n###################################################################################\n\n# Retorna o bloco se o achar, ou -1, caso contrario; e o numero de blocos utilizados\ndef binarySearchBetween(columnIndex, firstValue, maxNumBlocks, secondValue = \"\"):\n #conta o número de vezes que \"acessamos a memória do disco\"\n numberOfBlocksUsed = []\n \n # blocos encontrados na query \n foundedBlocks=[]\n \n # blocos acessados durante a query\n accessedBlocks=[]\n \n # intervalo de procura dos blocos\n l = 0\n r = maxNumBlocks\n while l <= r: \n # Pega o numero do bloco do meio\n mid = math.ceil(l + (r - l)/2); \n \n # 0-based\n # Busca o registro\n blockRegistries = aux.FetchBlock(aux.OrderedPath, (mid-1)*5)\n \n getNearBlocksBetween(mid, foundedBlocks,columnIndex, firstValue, accessedBlocks,\n maxNumBlocks, numberOfBlocksUsed,\n secondValue)\n# \n # Check if x is present at mid \n if (foundedBlocks): \n return foundedBlocks, len(numberOfBlocksUsed)\n else:\n # Se o valor menor é maior que o ultimo elemento, ignora a metade esquerda \n if(firstValue > blockRegistries[-1][columnIndex]):\n l = mid + 1\n # Se o valor maior é menor que o ultimo elemento, ignorar metade direita\n elif (secondValue < blockRegistries[0][columnIndex]):\n r = mid - 1\n else:\n break\n \n # Retorna -1 se não achar\n return -1, len(numberOfBlocksUsed)\n\n\ndef getNearBlocksBetween(numberBlock, foundedBlocks,columnIndex, value, accessedBlocks,\n maxNumBlocks, numberOfBlocksUsed,secondValue):\n \n if(numberBlock not in accessedBlocks):\n accessedBlocks.append(numberBlock)\n numberOfBlocksUsed.append('1')\n \n # array de indices de blocos encontrados\n indexesFoundedBlocks = []\n \n # recupera o bloco de registros\n blockRegistries = aux.FetchBlock(aux.OrderedPath, (numberBlock-1)*5)\n \n for idx, block in enumerate(blockRegistries):\n if block[columnIndex] >= value :\n if(block[columnIndex]) <= secondValue:\n indexesFoundedBlocks.append(idx)\n foundedBlocks.append(block)\n \n # se contem o indice 0 é possivel que tenha mais registros no bloco anterior\n if( (0 in indexesFoundedBlocks) and numberBlock>1):\n getNearBlocksBetween(numberBlock-1, foundedBlocks,\n columnIndex, value, accessedBlocks,maxNumBlocks, numberOfBlocksUsed,\n secondValue)\n \n # se contem o indice 4 é possivel que tenha mais registros no proximo bloco\n if((4 in indexesFoundedBlocks) and numberBlock<maxNumBlocks):\n getNearBlocksBetween(numberBlock+1, foundedBlocks, \n columnIndex, value, accessedBlocks, maxNumBlocks, numberOfBlocksUsed,\n secondValue)\n\n \n\n\n#colName = Desired column of the query (SEE LISTS ABOVE FOR COL NAMES)\n#values = multiple values\n#SQL Format: Select * from OrderedTable WHERE colName in [value1,value2...]\n#Retorna lita de registros em que colName tenha um dos valores da lista values\ndef OrderedSelectBetweenTwoValues(colName, firstValue, secondValue):\n numberOfBlocksUsed = 0 #conta o número de vezes que \"acessamos a memória do disco\"\n \n if colName not in aux.colHeadersList:\n print(\"Error: Column name not found in relation.\")\n return\n columnIndex1 = aux.colHeadersList.index(colName) #pega o indice referente àquela coluna\n\n\n print(\"Running query: \")\n print(\"SELECT * FROM TB_ORDERED WHERE \" + colName + \" BETWEEN \" + str(firstValue) + \n \" AND \" + str(secondValue) + \";\")\n\n # Obtem o numero de blocos do BD\n numBlocks = math.ceil(aux.GetNumRegistries(aux.OrderedHeadPath, aux.heapHeadSize-1)/aux.blockSize)\n\n # Verifica se o campo procurado eh equivalente ao campo pelo qual o banco foi ordenado\n # Caso seja, utilizar busca binaria\n if(columnIndex1 == numColToOrder):\n blocksFounded,numberOfBlocksUsed = binarySearchBetween(columnIndex1, firstValue,\n numBlocks, secondValue)\n \n # Senao realizar select linear\n else:\n #FALTANDO implementação na heap\n blocksFounded = None\n\n if(blocksFounded and blocksFounded != -1):\n print(\"Registro(s) encontrado(s): \")\n print(blocksFounded)\n else:\n print(\"Registro não encontrado\")\n\n print(\"Fim da busca.\")\n print(\"Número de blocos varridos: \" + str(numberOfBlocksUsed))\n\n\n###################################################################################\n########################## ORDERED - INSERT FUNCTIONS #############################\n###################################################################################\n\n\n\n\n###################################################################################\n########################## ORDERED - DELETE FUNCTIONS #############################\n###################################################################################\n# Method to insert a given record into the file. The record will be inserted at the position/line specified(0-based)\ndef InsertLineIntoFile(record, location, filepath):\n # Open the file\n for line in fileinput.input(filepath, inplace=1):\n # Check line number\n linenum = fileinput.lineno()\n # If we are in our desired location, append the new record to the current one. Else, just remove the line-ending character\n if linenum == location:\n line = line + record\n else:\n line = line.rstrip()\n # write line in the output file\n print(line)\n\n\n# Method to delete a record from the file. (0-based)\ndef DeleteLineFromFile(registry, filepath):\n # Open the file\n for line in fileinput.FileInput(filepath, inplace=1):\n # Check line number\n registryLine = [aux.CleanRegistry(line)]\n # If we are in our desired location, append the new record to the current one. Else, just remove the line-ending character\n if registry == registryLine:\n continue\n else:\n line = line.rstrip()\n # write line in the output file\n print(line)\n \n\n#singleRecordDeletion = Retorna o PRIMEIRO registro onde 'colName' = à value se True\ndef OrderdDeleteSingleRecord(colName, value, singleRecordSelection = True):\n if colName not in aux.colHeadersList:\n print(\"Error: Column name not found in relation.\")\n return\n columnIndex = aux.colHeadersList.index(colName) #pega o indice referente àquela coluna\n\n print(\"Running query: \")\n print(\"Delete * FROM TB_ORDERED WHERE \" + colName + \" = \" + str(value) + \" limit 1;\")\n\n # Obtem o numero de blocos do BD\n numBlocks = math.ceil(aux.GetNumRegistries(aux.OrderedHeadPath, aux.heapHeadSize-1)/aux.blockSize)\n\n # Verifica se o campo procurado eh equivalente ao campo pelo qual o banco foi ordenado\n # Caso seja, utilizar busca binaria\n if(columnIndex == numColToOrder): \n blockFounded,numberOfBlocksUsed = binarySearch(columnIndex, value,\n numBlocks, singleRecordSelection)\n # Senao realizar select linear\n else:\n blockFounded,numberOfBlocksUsed = DeleteRecord(colName, value, singleRecordSelection) #fazer select normal\n\n if(blockFounded):\n print(\"Registro encontrado: \")\n print(blockFounded)\n DeleteLineFromFile(blockFounded, aux.OrderedPath)\n print(\"Registro Deletado\")\n \n else:\n print(\"Registro não encontrado\")\n \n print(\"Número de blocos varridos: \" + str(numberOfBlocksUsed))\n\n\n\n###################################################################################\n################################### MAIN ##########################################\n###################################################################################\n\n\nCreateOrderedBD(aux.RJPath)\n#numColToOrder = 5\n\n#print('-----')\n\n#blocks = binarySearch(1)\n\n#query = [\"86551337791\",\"01093643765\"]\n#query = [\"RJ\",\"SP\"]\nquery = \"<EMAIL>\"\n#p = [block if \"09650185712\" in block[numColToOrder] else None for block in blocks ]\n#q = next((block for block in blocks if query in block[numColToOrder]), None)\n\n#print(q)\n\n#0, len(arr)-1,\n#result = binarySearch(numColToOrder, query, 4)\n#print (result)\n\nOrderedSelectSingleRecord('NM_EMAIL', query)\n#OrderedSelectWithTwoFields(aux.colHeadersList[5], \"<EMAIL>\", \n# aux.colHeadersList[0], \"07833694762\")\n#OrderedSelectWithTwoFields(aux.colHeadersList[5], \"<EMAIL>\", \n# aux.colHeadersList[4], \"VINICIUS DE FREITAS DOS SANTOS\")\n\n#OrderedSelectBetweenTwoValues(aux.colHeadersList[numColToOrder],\"01093643765\",\"07833694762\" )\n#OrderedSelectBetweenTwoValues(aux.colHeadersList[numColToOrder],\"<EMAIL>\",\n# \"<EMAIL>\" )\n\n#OrderdDeleteSingleRecord(aux.colHeadersList[5], \"<EMAIL>\")\n\n#print(\"----\")\n#print(aux.FetchBlock(aux.OrderedPath,0 ))\n#DeleteLineFromFile(2, OrderedPath)\n", "id": "6442947", "language": "Python", "matching_score": 6.417768955230713, "max_stars_count": 0, "path": "Ordered.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 12 19:03:55 2018\n\n@author: BRC\n\"\"\"\n\nimport os\nimport csv\nimport time\nimport datetime\nimport fileinput\n#import unidecode\n\n\n\n\n###################################################################################\n############################ PATH AND DATASET VARIABLES ###########################\n###################################################################################\n\n\n#Caminho dos arquivos de candidatos\nCandidatesFilePath = \"data/consulta_cand_2018_\"\nSPPath = CandidatesFilePath + \"SP.csv\"\nRJPath = CandidatesFilePath + \"RJ.csv\"\nMGPath = CandidatesFilePath + \"MG.csv\"\nRSPath = CandidatesFilePath + \"RS.csv\"\nDFPath = CandidatesFilePath + \"DF.csv\"\nPRPath = CandidatesFilePath + \"PR.csv\"\n\n\n#caminho dos arquivos \nBDFilePath = \"BD/\"\nHeapPath = BDFilePath + \"HeapBD.txt\"\nHeapHeadPath = BDFilePath + \"HeapHEAD.txt\"\nOrderedPath = BDFilePath + \"OrderedBD.txt\"\nOrderedHeadPath = BDFilePath + \"OrderedHEAD.txt\"\nHashPath = BDFilePath + \"HashBD.txt\"\nHashHeadPath = BDFilePath+ \"HashHEAD.txt\"\n\n#caracter usado como enchimento de valores nao-cheios de um registro\npaddingCharacter = \"#\"\n#tamanho de um registro(medido em caracteres)\nregistrySize = 153+1 #153 chars + escape key\n\n\n#Tamanho de um bloco de memoria (medido em registros)\nblockSize = 5\n\n#Tamanho do head do heap(em linhas)\nheapHeadSize = 5\n\n#Tamanho do head da lista ordenada(em linhas)\norderedHeadSize = 5\n\n#Tamanho do head do hash(em linhas)\nhashHeadSize = 5\n\n#Tamanho do bucket do hash (em blocos)\nbucketSize = 10\n\n#Quantidade máxima de buckets\nnumberOfBuckets = 220\n\n#Tamanhos maximos de cada atributo(for reference mostly)\ndicColunaTamanhoMax = {\n\t\"K\": 2,\n\t\"N\": 2,\n\t\"Q\": 5,\n\t\"R\": 70,\n\t\"U\": 11, #CPF, PK\n\t\"V\": 43,\n\t\"AB\": 2,\n\t\"AM\": 10,\n\t\"AP\": 1,\n\t\"AR\": 1,\n\t\"AT\": 1,\n\t\"AV\": 2, #vem com um 0 antes, aparentemente\n\t\"AX\": 3\n}\n\ndicColHeaderType = {\n \"CPF\": \"INTEGER(11)\",\n \"SG_UF\": \"VARCHAR(2)\",\n \"CD_CARGO\": \"INTEGER(2)\",\n 'NR_CANDIDATO': \"INTEGER(5)\", \n 'NM_CANDIDATO': \"VARCHAR(70)\", \n 'NM_EMAIL': \"VARCHAR(43)\",\n 'NR_PARTIDO': \"INTEGER(2)\", \n 'DT_NASCIMENTO': \"DATE\", \n 'CD_GENERO': \"INTEGER(1)\", \n 'CD_GRAU_INSTRUCAO': \"INTEGER(1)\", \n 'CD_ESTADO_CIVIL': \"INTEGER(1)\", \n 'CD_COR_RACA': \"INTEGER(2)\",\n 'CD_OCUPACAO': \"VARCHAR(3)\"\n}\n\n\n\n#Baseado no dic acima(CPF JOGADO PARA A PRIMEIRA POSICAO)\nmaxColSizesList = [11,2,2,5,70,43,2,10,1,1,1,2,3]\n\n#Baseado no dic acima(e na ordem da lista acima, com CPF no início)\ncolHeadersList = [\"CPF\", \"SG_UF\", \"CD_CARGO\", 'NR_CANDIDATO', 'NM_CANDIDATO', 'NM_EMAIL', 'NR_PARTIDO', 'DT_NASCIMENTO', 'CD_GENERO', 'CD_GRAU_INSTRUCAO', 'CD_ESTADO_CIVIL', 'CD_COR_RACA', 'CD_OCUPACAO']\n\n#Baseado nos indices acima\nrelevantColsList = [10, 13, 16, 17, 20, 21, 27, 38, 41, 43, 45, 47, 49]\n\n\n#retorna se e uma coluna relevante dentro do Excel(baseado nas colunas escolhidas acima)\ndef IsRelevantRow(rowNumber):\n #\n return rowNumber in relevantColsList\n\n#calcula o tamanho do registro novamente, caso necessario\ndef CalculateRegistrySize():\n sum = 0\n for key, value in dicColunaTamanhoMax:\n sum+=value\n return sum\n\n\n#Preenche com 0's a esquerda CPFs que nao possuem seu tamanho totalmente preenchido\ndef FillCPF(cpf):\n return cpf.zfill(maxColSizesList[0])#tamanho de CPF e fixo\n\n#Completa a string com caracter escolhido para padding(p/ manter tamanho fixo)\ndef PadString(stringToPad, totalSizeOfField):\n tmp = stringToPad\n for i in range (totalSizeOfField - len(stringToPad)):\n tmp+=paddingCharacter\n return tmp \n\n\n#Le os primeiros N registros e retorna os mesmos já preparados para inserção\ndef ReadFirstRegistriesFromCSV(CSVFilePath, numberOfRegistriesToRead):\n #Lê do CSV e preenche os registros com enchimento para criar o tamanho fixo\n valuesToLoad = PadRegistries(ReadFromFile(CSVFilePath))\n \n for i in range(numberOfRegistriesToRead):\n string = \"\"\n for j in range(len(maxColSizesList)):\n string+=valuesToLoad[i][j]\n valuesToLoad[i]=string\n return valuesToLoad[:numberOfRegistriesToRead]\n\n #Le os primeiros N registros e retorna os mesmos já preparados para inserção\ndef ReadFirstRegistriesFromCSVList(CSVFilePath, numberOfRegistriesToRead):\n #Lê do CSV e preenche os registros com enchimento para criar o tamanho fixo\n valuesToLoad = PadRegistries(ReadFromFile(CSVFilePath))\n return valuesToLoad[:numberOfRegistriesToRead]\n\n#Lê o arquivo desejado e retorna uma lista com todos os registros relevantes do mesmo\n#lista retornada sera usada para construir nossos proprios arquivos\ndef ReadFromFile(csvFilePath):\n lineCount = 0\n registros = []\n with open(csvFilePath, 'r', encoding=\"ISO-8859-1\") as file:\n rows = csv.reader(file, delimiter = \";\")\n for row in rows:\n if lineCount == 0 :#headers\n lineCount+=1\n else:\n finalRow = []\n \n for i in range(len(row)):\n if IsRelevantRow(i):\n #Se for a coluna do CPF, coloca o mesmo no inicio da lista\n if i == relevantColsList[4]:\n finalRow.insert(0, FillCPF(row[i]))\n else:\n finalRow += [(row[i])]\n print(finalRow)\n if finalRow[0] == \"\":\n return registros#chegou numa linha vazia, fim do arquivo\n registros +=[finalRow]\n lineCount+=1\n #if lineCount == 5000: return registros #limita tamanho p/ testes\n return registros\n\n#pega uma lista de registros(matriz bidimensional) e para cada elemento, preenche os espaços faltantes\ndef PadRegistries(listOfRegistries):\n for i in range(len(listOfRegistries)):\n for j in range(len(listOfRegistries[i])):\n listOfRegistries[i][j] = PadString(listOfRegistries[i][j], maxColSizesList[j])\n return listOfRegistries\n\n#Retira o padding dos campos de um registro, e retorna o registro em formato de lista\n#registryString = registro a ser limpo, em formato de string\ndef CleanRegistry(registryString):\n newRegistry = []\n offset = 0\n for i in range(len(maxColSizesList)):\n #print(registryString[offset:offset+maxColSizesList[i]])\n newRegistry += [registryString[offset:offset+maxColSizesList[i]].replace(paddingCharacter, \"\").replace(\"\\n\", \"\")]\n \n offset+=maxColSizesList[i]\n return newRegistry\n\n# Method to insert a given record into the file. The record will be inserted at the position/line specified(0-based)\ndef InsertLineIntoFile(record, location, filepath):\n # Open the file\n for line in fileinput.input(filepath, inplace=1):\n # Check line number\n linenum = fileinput.lineno()\n # If we are in our desired location, append the new record to the current one. Else, just remove the line-ending character\n if linenum == location:\n line = line + record\n else:\n line = line.rstrip()\n # write line in the output file\n print(line)\n\n\n# Method to delete a record from the file. (0-based)\ndef DeleteLineFromFile(location, filepath):\n # Open the file\n for line in fileinput.input(filepath, inplace=1):\n # Check line number\n linenum = fileinput.lineno()\n # If we are in our desired location, append the new record to the current one. Else, just remove the line-ending character\n if linenum == location+1:\n continue\n else:\n line = line.rstrip()\n # write line in the output file\n print(line)\n\n\ndef MakeHEADString(headType, numRegistries):\n string = \"File structure: \" + headType + \"\\n\"\n string += \"Creation: \" + datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') + \"\\n\"\n string += \"Last modification: \" + datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') + \"\\n\"\n string += \"Schema: \"\n for key, value in dicColHeaderType.items():\n string += key + \"-\" + value + \"|\"\n string += \"\\nNumber of registries: \" + str(numRegistries) + \"\\n\"\n \n return string\n\n\ndef MakeHEAD(headPath, headType, numRegistries):\n if os.path.exists(headPath):\n os.remove(headPath)\n file = open(headPath, 'a')\n string = \"File structure: \" + headType + \"\\n\"\n string += \"Creation: \" + datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') + \"\\n\"\n string += \"Last modification: \" + datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') + \"\\n\"\n string += \"Schema: \"\n for key, value in dicColHeaderType.items():\n string += key + \"-\" + value + \"|\"\n string += \"\\nNumber of registries: \" + str(numRegistries) + \"\\n\"\n file.write(string)\n #return string\n\n\n#Updates de HEAD File with new timestamp and current number of Registries\ndef UpdateHEADFile(headPath, headType, numRegistries):\n if os.path.exists(headPath):\n file = open(headPath, 'r')\n \n headContent = file.readlines()\n #print(headContent)\n headContent\n file.close()\n os.remove(headPath)\n \n #recria ela com as alteracoes\n file = open(headPath, 'a')\n file.write(headContent[0])\n file.write(headContent[1])\n file.write(\"Last modification: \" + datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') + \"\\n\")\n file.write(headContent[3])\n file.write(\"Number of registries: \" + str(numRegistries) + \"\\n\")\n else:\n #Doesn't exist, create it\n MakeHEAD(headPath, headType, numRegistries)\n\n\n#Gets number of registries from HEAD file\ndef GetNumRegistries(DBHeadFilePath, headSize):\n #posição de início de leitura dos dados\n #cursorBegin = startingR\n with open(DBHeadFilePath, 'r') as file:\n for i in range(headSize-1):\n file.readline()\n return (int(file.readline().split(\"Number of registries: \")[1]))\n\n\n#StartingRegistry = index do registro inicial a ser buscado (0-based)\ndef FetchBlock(DBFilePath, startingRegistry):\n #posicao de inicio de leitura dos dados\n #TODO\n #cursorBegin = startingR\n block = []\n with open(DBFilePath, 'r') as file:\n #Pula o HEAD(UPDATE: HEAD is in another cast....file)\n #for i in range(heapHeadSize):\n # file.readline()#HEAD possui tamanho variável, então pulamos a linha inteira\n #Em termos de BD, seria o análogo à buscar o separador de registros, nesse caso, '\\n'\n \n #Em seguida, move o ponteiro do arquivo para a posição correta(offset)\n for i in range(registrySize*startingRegistry):\n c = file.read(1) #vamos de 1 em 1 char para não jogar tudo de uma vez na memória\n \n #Após isso, faz um seek no número de blocos até preencher o bloco(ou acabar o arquivo)\n \n for i in range(blockSize):\n registry = \"\"\n for j in range(registrySize):\n c = file.read(1)\n #print(c)\n if c == \"\": \n #print(\"FIM DO ARQUIVO\")\n return block\n registry+=c\n #print(\"Current registry: \"+registry)\n block += [CleanRegistry(registry)]\n return block\n\n\n#StartingRegistry = index do registro inicial a ser buscado (0-based)\ndef FetchBlock2(DBFilePath, startingRegistry, registryCustomSize):\n #posicao de inicio de leitura dos dados\n #TODO\n #cursorBegin = startingR\n block = []\n with open(DBFilePath, 'r') as file:\n #Pula o HEAD(UPDATE: HEAD is in another cast....file)\n #for i in range(heapHeadSize):\n # file.readline()#HEAD possui tamanho variável, então pulamos a linha inteira\n #Em termos de BD, seria o análogo à buscar o separador de registros, nesse caso, '\\n'\n \n #Em seguida, move o ponteiro do arquivo para a posição correta(offset)\n for i in range(registryCustomSize*startingRegistry):\n c = file.read(1) #vamos de 1 em 1 char para não jogar tudo de uma vez na memória\n \n #Após isso, faz um seek no número de blocos até preencher o bloco(ou acabar o arquivo)\n \n for i in range(blockSize):\n registry = \"\"\n for j in range(registryCustomSize):\n c = file.read(1)\n #print(c)\n if c == \"\": \n #print(\"FIM DO ARQUIVO\")\n return block\n registry+=c\n #print(\"Current registry: \"+registry)\n block += [CleanRegistry(registry)]\n return block\n", "id": "4371349", "language": "Python", "matching_score": 1.9613854885101318, "max_stars_count": 0, "path": "DBHelperFunctions.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 3 23:18:55 2018\n\n@author: BConfessor\n\"\"\"\n\nfrom BTrees.OOBTree import OOBTree\nimport datetime as dt\nimport TableGen as tg\nimport math as m\n\n\n\n\n#Tamanho de um bloco de memoria (medido em registros)\nblockSize = 5\n\n\n\n\n\n\n\n#Realiza Join com loops aninhados\n#OrderedPK: Define se a tabela de PK (tabela da esquerda) será ordenada ou não(por default, ordenada)\n#OrderedFK: Define se a tabela de FK (tabela da direita) será ordenada ou não(por default, desordenada)\ndef JoinNestedLoop(orderedPK = True, orderedFK = False):\n start = dt.datetime.now()#Para medirmos tempo de execução //(necessário?)\n #matches = 0\n \n blocksFetched = 0 #Contador de blocos, diz quantos blocos de X registros foram \"acessados\" da memória\n \n \n #Para a tabela da esquerda, acessaremos TODOS os registros ordenadamente, então já podemos saber quantos blocos de memória acessaremos\n #Simplesmente dividimos seu número de registros pelo tamanho de um bloco\n blocksFetched += m.ceil(float(tg.numberOfRegistries)/blockSize)#Ceiling pois pode sobrar espaço em algum bloco\n \n print(\"Current blocks fetched in left table: \" + str(blocksFetched))\n #Arquivos que usaremos no JOIN\n PKFile = \"\"\n FKFile = \"\"\n if(orderedPK):\n PKFile = tg.PKOrderedFileName\n else:\n PKFile = tg.PKUnorderedFileName\n \n if(orderedFK):\n FKFile = tg.FKOrderedFileName\n else:\n FKFile = tg.FKUnorderedFileName\n \n #Abrimos ambas as tabelas \n with open(FKFile) as rightTable:\n with open(PKFile) as leftTable:\n leftRegistryIndex = 1\n #Para cada registro na tabela PK\n for leftRegistries in leftTable:\n lRegistry = leftRegistries.split(tg.fieldSeparator)\n if (leftRegistryIndex%100==0):\n print(\"Looking for match for left record \" + str(leftRegistryIndex))\n \n #conta os registros acessados na tabela direita nessa iteração\n rightTableRegistriesCounter = 0\n #Faremos a iteração pela tabela da direita(FK), fixando a linha da tabela da esquerda(PK)\n for rightRegistries in rightTable:\n rightTableRegistriesCounter+=1\n #print(\"Checking right record \"+str(rightTableRegistriesCounter))\n rRegistry = rightRegistries.split(tg.fieldSeparator)\n if lRegistry[0] == rRegistry[1]: #Encontramos um match entre os registros das tabelas\n #Adiciona os blocos coletados da memória durante essa iteração por parte da tabela da direita\n blocksFetched += m.ceil(float(rightTableRegistriesCounter)/blockSize)\n print(\"Found match on right registry \" + str(rightTableRegistriesCounter))\n #joined = l1 + l2\n rightTable.seek(0) #retornamos o ponteiro da tabela da direita para o seu início para uma nova iteração\n break\n leftRegistryIndex+=1\n end = dt.datetime.now() \n \n #print(\"Total joined registers: \" + str(matches))\n print(\"Tempo de execução: \" + str((end-start).total_seconds()) + \"s\")\n print(\"Número total de blocos acessados: \" + str(blocksFetched))\n\n\n\n\ndef MergeJoin(orderedPK = True, orderedFK = True):\n start = dt.datetime.now()\n blocksFetched = 0 #Contador de blocos, diz quantos blocos de X registros foram \"acessados\" da memória\n leftRegistriesCount = 1\n rightRegistriesCount = 1\n matches = 0\n \n #Arquivos que usaremos no JOIN\n PKFile = \"\"\n FKFile = \"\"\n if(orderedPK):\n PKFile = tg.PKOrderedFileName\n else:\n PKFile = tg.PKUnorderedFileName\n \n if(orderedFK):\n FKFile = tg.FKOrderedFileName\n else:\n FKFile = tg.FKUnorderedFileName\n with open(FKFile) as rightTable:\n with open(PKFile) as leftTable:\n leftRecord = leftTable.readline().split(tg.fieldSeparator)\n rightRecord = rightTable.readline().split(tg.fieldSeparator)\n while True:\n if leftRecord[0] == rightRecord[1]:\n matches += 1\n joined = leftRecord + rightRecord\n if int(leftRecord[0]) < int(rightRecord[1]):\n ln = leftTable.readline()\n if ln == \"\":\n break\n leftRegistriesCount += 1\n leftRecord = ln.split(tg.fieldSeparator)\n else: \n ln = rightTable.readline()\n if ln == \"\":\n break\n rightRegistriesCount += 1\n rightRecord = ln.split(tg.fieldSeparator)\n end = dt.datetime.now() \n\n print (\"Número de blocos acessados: \" + str(m.ceil(leftRegistriesCount/blockSize) + m.ceil(leftRegistriesCount/blockSize)))\n print (\"Total joined registers: \" + str(matches))\n print (\"Tempo de execução: \" + str((end-start).total_seconds()) + \"s\")\n\n\n#Tanto PK quanto FK são consideradas falsas por default;\ndef HashJoin(orderedPK = False, orderedFK = False):\n lineSize = tg.registrySize + 2 #Tamanho de uma linha no arquivo linha, inclui o pulo de linha(funciona com 2 chars em Windows)\n start = dt.datetime.now()\n blocksFetched = 0 #blocos de memória vistos\n hashTable = {} #Tabela de hash a ser usada\n\n print(\"Line size: \" + str(lineSize))\n #Arquivos que usaremos no JOIN\n PKFile = \"\"\n FKFile = \"\"\n if(orderedPK):\n PKFile = tg.PKOrderedFileName\n else:\n PKFile = tg.PKUnorderedFileName\n \n if(orderedFK):\n FKFile = tg.FKOrderedFileName\n else:\n FKFile = tg.FKUnorderedFileName\n \n #Para a tabela da esquerda, acessaremos TODOS os registros ordenadamente, então já podemos saber quantos blocos de memória acessaremos\n #Simplesmente dividimos seu número de registros pelo tamanho de um bloco\n blocksFetched += m.ceil(float(tg.numberOfRegistries)/blockSize)#Ceiling pois pode sobrar espaço em algum bloco\n \n \n \n lineCounter = 0 #anota as linhas de cada registro\n with open (FKFile) as hashTableBuilderFile:\n for registries in hashTableBuilderFile:\n # \"primary_key|foreign_key|data\"\n registry = registries.split(tg.fieldSeparator)\n hashTable[registry[1]] = lineCounter #para cada registro, criamos um dicionário associando sua chave primária à sua linha\n lineCounter += 1 \n \n \n matches = 0\n #abrimos as duas tabelas\n with open(FKFile) as rightTable:\n with open(PKFile) as leftTable:\n #Para cada linha na tabela PK, pegamos sua chave e usamos para encontrar a posição da respectiva linha na tabela FK, a partir da tabela hash\n for line in leftTable:\n #print(\"\\n\\nLeft line: \" + line) \n leftRecord = line.split(tg.fieldSeparator)\n position = hashTable[leftRecord[0]] * lineSize #vai para a posição absoluta do início do respectivo registro em FK\n #print(\"Position to seek: \" + str(position))\n rightTable.seek(position, 0)#vai para a posição absoluta do início do respectivo registro em FK\n rightLine = rightTable.read(tg.registrySize)\n #print(\"Right line: \" + rightLine)\n rightRecord = rightLine.split(tg.fieldSeparator)#na teoria, após o cálculo da posição absoluta, ...\n blocksFetched+=1 #... o sistema deve buscar o registro em memória, logo isso ocasiona um bloco a mais coletado de memória(ainda que com um só registro)\n rightTable.seek(0, 0)\n if leftRecord[0] == rightRecord[1]:\n matches += 1\n joined = leftRecord + rightRecord\n \n else:\n print(\"ERRO FATAL. SEMPRE DEVERIA HAVER UM MATCH.\")\n return \n end = dt.datetime.now() \n \n print(\"Registros com match: \" + str(matches))\n print(\"Número total de blocos acessados: \" + str(blocksFetched))\n print(\"Tempo de execução: \" + str((end-start).total_seconds()) + \"s\")\n\n\n\ndef BTreeJoin(orderedPK = False, orderedFK = False):\n \n #Arquivos que usaremos no JOIN\n PKFile = \"\"\n FKFile = \"\"\n if(orderedPK):\n PKFile = tg.PKOrderedFileName\n else:\n PKFile = tg.PKUnorderedFileName\n \n if(orderedFK):\n FKFile = tg.FKOrderedFileName\n else:\n FKFile = tg.FKUnorderedFileName\n \n #Carregando o indice da BTree\n btree = OOBTree() \n\n lineCounter = 0\n with open (FKFile) as f:\n for line in f:\n l = line.split(tg.fieldSeparator)\n btree.update({l[1]:lineCounter})\n lineCounter += 1\n\n # checking the size\n print (\"Btree size: \" + str(len(btree)))\n\n lineSize = tg.registrySize + 2\n start = dt.datetime.now()\n matches = 0\n with open(FKFile) as right:\n with open(PKFile) as left:\n for line in left:\n leftRegistry = line.split(tg.fieldSeparator)\n position = btree[leftRegistry[0]] * lineSize\n right.seek(position)\n rightRegistry = right.read(tg.registrySize).split(tg.fieldSeparator)\n if leftRegistry[0] == rightRegistry[1]:\n matches += 1\n joined = leftRegistry + rightRegistry\n else:\n print (\"This should never happen!\")\n end = dt.datetime.now() \n \n print (\"Total joined registers: \" + str(matches))\n print (\"Time taken: \" + str((end-start).total_seconds()) + \"s\")\n\n\ndef main():\n #JoinNestedLoop()\n #MergeJoin()\n #HashJoin()\n BTreeJoin()\n \nmain()", "id": "3889921", "language": "Python", "matching_score": 4.313781261444092, "max_stars_count": 0, "path": "Lista3/Joins.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 2 16:18:13 2018\n\n@author: BConfessor\n\"\"\"\n\n#Script de geração de arquivos, cria os seguintes:\n#tabelas PK (ordenada e desordenada) (só possuem chave primária e um atributo de enchimento)\n#tabelas FK (ordenada e desordenada) (possui chave primária, chave estrangeira (= PK das tabelas PK) e atributo de enchimento)\n#registros possuem tamanho fixo e relações são 1:1\n\nimport string\nfrom random import choice\nfrom random import shuffle\n\noutputTableName = \"Tabela\" #nome dos arquivos a serem gerados\noutputTableExtension = \"txt\" #extensão dos arquivos\nfieldSeparator = \";\" #delimitador de atributos\nregistrySize = 1024 #tamanho de cada linha(record) em bytes/chars\nnumberOfRegistries = 10000 #numero de linhas(records) em cada tabela\n\n\n\nPKOrderedFileName = outputTableName + \"OrdenadaPK.\" + outputTableExtension\nPKUnorderedFileName = outputTableName + \"DesordenadaPK.\" + outputTableExtension\nFKOrderedFileName = outputTableName + \"OrdenadaFK.\" + outputTableExtension\nFKUnorderedFileName = outputTableName + \"DesordenadaFK.\" + outputTableExtension\n\n\n\nprint(\"Gerando chaves...\")\n\n#gera chaves primárias para as tabelas PK\nPKTableKeyList = list(range(numberOfRegistries)) #converte para lista pois range em Python3 é só iterador\n\n#gera chaves primárias para as tabelas FK\nFKTableKeyList = list(range(numberOfRegistries,2*numberOfRegistries)) #intervalo distinto para diferenciar das chaves PK\n\n\n\n\nprint(\"Gerando relações...\")\n\n#cria as relações entre registros das tabelas PK e FK\nPK_FK_RelationshipDic = {} #cria um dicionario de relações entre chaves primárias das tabelas PK e chaves primárias das tabelas FK\ntempList = FKTableKeyList\nfor k1 in PKTableKeyList: #para cada chave primária de PK...\n PK_FK_RelationshipDic[k1] = tempList.pop(tempList.index(choice(tempList))) #...escolhe aleatoriamente uma chave primária de FK e cria a relação\n\n\n\n\nprint(\"Gerando registros de PK...\")\n\n#gera os registros das tabelas PK\nPKRecordsList = []\nfor k1 in PKTableKeyList:\n temp = str(k1) #começa a escrever o registro...\n temp += fieldSeparator #..., separa o atributo...\n temp += ''.join(choice(string.ascii_uppercase + string.digits) for _ in range(registrySize - len(temp))) #... e por fim preenche o resto do registro com enchimento\n PKRecordsList.append(temp)\n\n#mesmo funcionamento para os registros de FK\n\nprint(\"Gerando registros de FK...\")\n\n#gera os registros das tabelas FK\nFKRecordsList = []\nfor k1 in PKTableKeyList:\n temp = str(PK_FK_RelationshipDic[k1])\n temp += fieldSeparator\n temp += str(k1)\n temp += fieldSeparator\n temp += ''.join(choice(string.ascii_uppercase + string.digits) for _ in range(registrySize - len(temp)))\n FKRecordsList.append(temp)\n\n\n\n#gera a tabela PK ordenada \nprint(\"Criando \" + PKOrderedFileName)\nwith open(PKOrderedFileName, \"w\") as f:\n for temp in PKRecordsList:\n f.write(temp + \"\\n\")\n\n\n\n#embaralhamos os registros de PK para criar a tabela desordenada do mesmo\nshuffle(PKRecordsList)\n\nprint(\"Criando \" + PKUnorderedFileName)\nwith open(PKUnorderedFileName, \"w\") as f:\n for temp in PKRecordsList:\n f.write(temp + \"\\n\") \n \n \n \n \n \n \n#agora, a tabela FK Ordenada\nprint(\"Criando \" + FKOrderedFileName)\nwith open(FKOrderedFileName, \"w\") as f:\n for temp in FKRecordsList:\n f.write(temp + \"\\n\")\n\n#embaralhamos FK para criar sua tabela desordenada\nshuffle(FKRecordsList)\n\nprint (\"Criando \" + FKUnorderedFileName)\nwith open(FKUnorderedFileName, \"w\") as f:\n for temp in FKRecordsList:\n f.write(temp + \"\\n\")\n\nprint(\"Finalizado.\")", "id": "552151", "language": "Python", "matching_score": 2.6418135166168213, "max_stars_count": 0, "path": "Lista3/TableGen.py" } ]
4.313781
SwagatoMondal
[ { "content": "from django.db import models\n\n\n# Create your models here.\nclass Product(models.Model):\n prod_no = models.IntegerField()\n prod_name = models.CharField(max_length=50)\n prod_price = models.FloatField()\n prod_qty = models.IntegerField()\n\n\nclass User(models.Model):\n name = models.CharField(max_length=30)\n email = models.CharField(max_length=50)\n password = models.CharField(max_length=20)\n\n\n", "id": "9323970", "language": "Python", "matching_score": 3.075748920440674, "max_stars_count": 0, "path": "online_shop/products/models.py" }, { "content": "# Generated by Django 3.1 on 2020-08-31 09:56\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Product',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('prod_no', models.IntegerField()),\n ('prod_name', models.CharField(max_length=50)),\n ('prod_price', models.FloatField()),\n ('prod_qty', models.IntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=30)),\n ('email', models.CharField(max_length=50)),\n ('password', models.CharField(max_length=20)),\n ],\n ),\n ]\n", "id": "514400", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "online_shop/products/migrations/0001_initial.py" }, { "content": "from django.shortcuts import render, redirect\nfrom django.http import Http404, HttpResponseRedirect\nfrom .models import User\n\n\n# Create your views here.\ndef first(request):\n if request.method == 'POST':\n u_dict = request.POST\n try:\n user = User.objects.get(email=u_dict['email'], password=u_dict['password'])\n if user is None:\n raise Http404(\"User does not exist\")\n except User.DoesNotExist:\n raise Http404(\"User does not exist\")\n else:\n return second(request, name=user.name)\n return render(request, 'page1.html')\n\n\ndef second(request, name='Guest'):\n context = {\"name\": name}\n return render(request, 'page2.html', context)\n\n\ndef registration(request):\n if request.method == 'POST':\n u_dict = request.POST\n user = User(name=u_dict['name'], email=u_dict['email'], password=u_dict['password'])\n user.save()\n return second(request, name=user.name)\n return render(request, 'registration.html')\n\n\ndef users_view(request):\n # dictionary for initial data with\n # field names as keys\n context = {}\n\n # add the dictionary during initialization\n context[\"dataset\"] = User.objects.all()\n\n return render(request, \"users.html\", context)\n", "id": "11946889", "language": "Python", "matching_score": 0.0429314561188221, "max_stars_count": 0, "path": "online_shop/products/views.py" } ]
1
eskildsf
[ { "content": "from kismetclient import Client as KismetClient\n \n# Connect to the Kismet server.\naddress = ('127.0.0.1', 2501)\nk = KismetClient(address)\n \n# Sets are nice because they only keep unique data.\ntry:\n from sets import Set\n clients = Set()\nexcept ImportError: # In python3 you don't have to explicitly import Sets\n clients = set()\n\ndef handle_client(client, **fields):\n # 0: Access Point, 1: Ad-Hoc, 2: Probe request, 3: Turbocell, 4: Data\n if int(fields['type']) in (0, 1):\n return None\n global clients\n l = len(clients)\n clients.add(fields['mac'])\n if l != len(clients):\n print ('-' * 80)\n print('New device detected:')\n for k, v in fields.items():\n print('%s: %s' % (k, v))\n \nk.register_handler('CLIENT', handle_client) \n \ntry:\n print('Logging wireless network clients.')\n while True:\n k.listen()\nexcept KeyboardInterrupt:\n print('Clients:')\n for i, client in enumerate(clients, start=1):\n print('%d. MAC: %s' % (i, client))\n print('%d unique clients have been seen.' % len(clients))\n", "id": "11316700", "language": "Python", "matching_score": 2.992555856704712, "max_stars_count": 1, "path": "logclients.py" }, { "content": "from kismetclient import Client as KismetClient\nfrom kismetclient import handlers\nimport logging\n\nlog = logging.getLogger('kismetclient')\nlog.addHandler(logging.StreamHandler())\nlog.setLevel(logging.DEBUG)\n\n# Connect to Kismet server\naddress = ('127.0.0.1', 2501)\nk = KismetClient(address)\n\ndef handle_ssid(client, ssid, mac):\n print 'ssid spotted: \"%s\" with mac %s' % (ssid, mac)\n\n# Register handlers that act on input from Kismet server\nk.register_handler('SSID', handle_ssid)\nk.register_handler('TRACKINFO', handlers.print_fields)\n\n# Listen, carefully\ntry:\n while True:\n k.listen()\nexcept KeyboardInterrupt:\n pprint(k.protocols)\n log.info('Exiting...')\n", "id": "12712388", "language": "Python", "matching_score": 1.483146071434021, "max_stars_count": 1, "path": "runclient.py" }, { "content": "import logging\n\nfrom kismetclient.utils import csv\nfrom kismetclient.exceptions import ServerError\n\nlog = logging.getLogger(__name__)\n\n\ndef kismet(client, version, starttime, servername, dumpfiles, uid):\n \"\"\" Handle server startup string. \"\"\"\n log.info('Server: ' +\n ' '.join([version, starttime, servername, dumpfiles, uid]))\n\n\ndef capability(client, CAPABILITY, capabilities):\n \"\"\" Register a server's default protocol capabilities. \"\"\"\n client.protocols[CAPABILITY] = csv(capabilities)\n\n\ndef protocols(client, protocols):\n \"\"\" Enumerate protocol capabilities so they can be registered. \"\"\"\n for protocol in csv(protocols):\n client.cmd('CAPABILITY', protocol)\n\n\ndef ack(client, cmdid, text):\n \"\"\" Handle ack messages in response to commands. \"\"\"\n # Simply remove from the in_progress queue\n client.in_progress.pop(cmdid)\n\n\ndef error(client, cmdid, text):\n \"\"\" Handle error messages in response to commands. \"\"\"\n cmd = client.in_progress.pop(cmdid)\n raise ServerError(cmd, text)\n\n\ndef print_fields(client, **fields):\n \"\"\" A generic handler which prints all the fields. \"\"\"\n for k, v in fields.items():\n print('%s: %s' % (k, v))\n print ('-' * 80)\n", "id": "6659543", "language": "Python", "matching_score": 1.1087727546691895, "max_stars_count": 1, "path": "kismetclient/handlers.py" } ]
1.483146
Carldeboer
[ { "content": "import random\nfrom deap import creator, base, tools, algorithms\nimport numpy as np\nargs = {'sequence_length' : 5 , 'nucleotide_frequency' :[0.25,0.25,0.25,0.25] } \nrandomizer=np.random\n\ndef random_sequence_generator(randomizer,args) :\n\treturn randomizer.choice(list('ACGT') , p=args['nucleotide_frequency'] ) \n\n\ndef fitness(individual):\n\treturn (individual.count('A')),\n\n\ndef mutation(individual, indpb):\n\tfor i in xrange(len(individual)):\n\t\tif random.random() < indpb:\n\t\t\tif individual[i]=='A' :\n\t\t\t\t\tindividual[i] = (randomizer.choice(list('CGT') , p=[args['nucleotide_frequency'][1]/(1-args['nucleotide_frequency'][0]) ,args['nucleotide_frequency'][2]/(1-args['nucleotide_frequency'][0]) ,args['nucleotide_frequency'][3]/(1-args['nucleotide_frequency'][0]) ] ) )\n\t\t\telif individual[i]=='C' :\n\t\t\t\t\tindividual[i] = (randomizer.choice(list('AGT') , p=[args['nucleotide_frequency'][0]/(1-args['nucleotide_frequency'][1]) ,args['nucleotide_frequency'][2]/(1-args['nucleotide_frequency'][1]) ,args['nucleotide_frequency'][3]/(1-args['nucleotide_frequency'][1]) ] ) )\n\t\t\telif individual[i]=='G' :\n\t\t\t\t\tindividual[i] = (randomizer.choice(list('CGT') , p=[args['nucleotide_frequency'][2]/(1-args['nucleotide_frequency'][2]) ,args['nucleotide_frequency'][1]/(1-args['nucleotide_frequency'][2]) ,args['nucleotide_frequency'][3]/(1-args['nucleotide_frequency'][2]) ] ) )\n\t\t\telif individual[i]=='T' :\n\t\t\t\t\tindividual[i] = (randomizer.choice(list('CGT') , p=[args['nucleotide_frequency'][0]/(1-args['nucleotide_frequency'][3]) ,args['nucleotide_frequency'][1]/(1-args['nucleotide_frequency'][3]) ,args['nucleotide_frequency'][2]/(1-args['nucleotide_frequency'][3]) ] ) )\n\tprint(individual)\n\tprint(individual.fitness)\n\treturn individual,\n\ncreator.create(\"FitnessMax\", base.Fitness, weights=(1.0,))\ncreator.create(\"Individual\", list , fitness=creator.FitnessMax)\n\ntoolbox = base.Toolbox()\n\ntoolbox.register(\"base\", random_sequence_generator , randomizer , args)\ntoolbox.register(\"individual\", tools.initRepeat, creator.Individual, toolbox.base, n=args['sequence_length'])\ntoolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n\n\ntoolbox.register(\"evaluate\", fitness)\ntoolbox.register(\"mate\", tools.cxTwoPoint)\ntoolbox.register(\"mutate\", mutation, indpb=0.05)\ntoolbox.register(\"select\", tools.selTournament, tournsize=3)\n\npopulation = toolbox.population(n=300)\n\nNGEN=40\n\nfor qgen in range(NGEN):\n\toffspring = algorithms.varAnd(population, toolbox, cxpb=0.1, mutpb=0.1)\n\tfits = toolbox.map(toolbox.evaluate, offspring)\n\tfor fit, ind in zip(fits, offspring):\n\t\tind.fitness.values = fit\n\tpopulation = toolbox.select(offspring, k=len(population))\n\ntop10 = tools.selBest(population, k=10)\n\nprint top10\n", "id": "12201131", "language": "Python", "matching_score": 4.342951774597168, "max_stars_count": 0, "path": "eeshits_example.py" }, { "content": "#import random\nimport numpy as np\nfrom deap import creator, base, tools, algorithms\n#code originally from 1eesh\n\nBASEORDER = list(\"ACGT\")\n\ndef getRandomBase(randomizer,args) :\n\treturn randomizer.choice(BASEORDER , p=args['nucleotide_frequency'] ) \n\ndef fitness(motif, individual):\n\treturn (''.join(individual).count(motif)),\n\ndef mutateSequence(randomizer, args, individual):\n\tfor i in xrange(len(individual)):\n\t\tif randomizer.random() < args['mutation_frequency']:\n\t\t\tindividual[i]=getRandomBase(randomizer, args)\n\treturn individual, \n\n\t\ndef seqsToOHC(seqX):\n\tohcX = np.zeros((np.shape(seqX)[0],4,np.shape(seqX)[1],1))\n\tfor i in range(0,4):#bases\n\t\tfor j in range(0,np.shape(seqX)[0]):\n\t\t\tohcX[j,i,[x==BASEORDER[i] for x in seqX[j,:]],:]=1;\n\treturn (ohcX)\n\ndef\tevaluateThese(sess, seqX, args):\n\tnumSeqs=np.shape(ohcX)[0]\n\tohcX = seqsToOHC(seqX);\n\tpredY = np.zeros(numSeqs);\n\tz=0\n\twhile z < numSeqs:\n\t\tcurPredY = sess.run([predELY], feed_dict={ohcX: ohcX[z:(z+args['batch_size']),:,:,:]})\n\t\tpredY[z:(z+args['batch_size'])] = curPredY; \n\t\tz=z+args['batch_size']\n\treturn (predY);\n\ndef seqSelectionRound(sess, population, toolbox, args):\n\toffspring = algorithms.varAnd(population, toolbox, cxpb=args['cspb'], mutpb=args['mutpb'])\n\tfits = evaluateThese(sess, offspring,args);\n\tfor fit, ind in zip(fits, offspring):\n\t\tind.fitness.values = fit\n\tpopulation = toolbox.select(offspring, k=len(population))\n\treturn (population);\n\t\n\n\n\ndef runExample():\n\targs = {'sequence_length' : 110 , 'nucleotide_frequency' : [0.25,0.25,0.25,0.25], 'mutation_frequency' : 0.05 } \n\tnp.random.seed(1239571243);\n\trandomizer=np.random\n\tcreator.create(\"FitnessMax\", base.Fitness, weights=(1.0,))\n\tcreator.create(\"Individual\", list , fitness=creator.FitnessMax)\n\n\ttoolbox = base.Toolbox()\n\ttoolbox.register(\"base\", getRandomBase, randomizer, args)\n\ttoolbox.register(\"individual\", tools.initRepeat, creator.Individual, toolbox.base, n=args['sequence_length'])\n\ttoolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n\ttoolbox.register(\"evaluate\", fitness, \"ATGC\")\n\t#toolbox.register(\"evaluate\", fitness)\n\ttoolbox.register(\"mate\", tools.cxTwoPoint)\n\ttoolbox.register(\"mutate\", mutateSequence, randomizer, args)\n\t#toolbox.register(\"mutate\", mutateSequence, args['mutation_frequency'])\n\ttoolbox.register(\"select\", tools.selTournament, tournsize=3)\n\n\tpopulation = toolbox.population(n=300)\n\tNGEN=1000\n\tfor gen in range(NGEN):\n\t\toffspring = algorithms.varAnd(population, toolbox, cxpb=0.1, mutpb=0.1)\n\t\t#offspring = algorithms.varAnd(population, toolbox, cxpb=0.1)\n\t\tfits = toolbox.map(toolbox.evaluate, offspring)\n\t\tprint(type(fits))\n\t\tfor fit, ind in zip(fits, offspring):\n\t\t\tind.fitness.values = fit\n\t\tpopulation = toolbox.select(offspring, k=len(population))\n\ttop10 = tools.selBest(population, k=10)\n\treturn(top10);\n\n", "id": "11037337", "language": "Python", "matching_score": 1.0427862405776978, "max_stars_count": 0, "path": "RegulatoryOptimizer.py" }, { "content": "#!/usr/bin/python\nimport warnings\nimport MYUTILS\nimport re\nimport sys\nimport argparse\nparser = argparse.ArgumentParser(description='Figure out the mapping of enhancer sequences to the SNP IDs and such contained within.')\nparser.add_argument('-i',dest='inFP',\tmetavar='<inFile>',help='Input file of excel', required=True);\nparser.add_argument('-o',dest='outFPre', metavar='<outFile>',help='Where to output results', required=True);\nparser.add_argument('-l',dest='logFP', metavar='<logFile>',help='Where to output errors/warnings [default=stderr]', required=False);\nparser.add_argument('-v',dest='verbose', action='count',help='Verbose output?', required=False, default=0);\n\nargs = parser.parse_args();\n\n\n\n\nif (args.logFP is not None):\n\tlogFile=MYUTILS.smartGZOpen(args.logFP,'w');\n\tsys.stderr=logFile;\n\n\n#raise Exception(\"Reached bad state=%d for '%s.%d' '%s' at line '%s'\" %(state,mid,ver,tfid,line));\n\nI_VARID=0;\nI_SNP=1;\nI_CHR=2;\nI_POS=3;\nI_GC=4;\nI_LEN=5;\nI_J1=6;\nI_REFA=7;\nI_ALTA=8;\nI_INCSNP=9;\nI_SEQL=10;\nI_SEQM=11;\nI_SEQR=12;\nI_SEQ=13;\nI_PARTNERIDS=14;\nI_PARTNERLOCS=15;\nI_PARTNERAS=16;\n\nsnp2partner = {}; # dict containing primary SNP -> partnerSNPs\nsnp2all = {}; # dict containing SNP -> alleles\nsnp2ref = {}; #snp ID to ref allele \nsnp2position = {};\n\ninFile=MYUTILS.smartGZOpen(args.inFP,'r');\ninFile.readline();\nfor line in inFile:\n\tif line is None or line == \"\" or line[0]==\"#\": continue\n\tdata=line.rstrip().split(\"\\t\");\n\tdata[I_SEQ]=data[I_SEQ][7:(len(data[I_SEQ])-7)];\n\tcurAltSNPs = data[I_PARTNERIDS].split(\";\")\n\tmultiAllele = re.match(\"^(.*)_[0-9]+$\",data[I_SNP])\n\tif multiAllele:\n\t\tdata[I_SNP] = multiAllele.group(1);\n\tif data[I_SNP] not in snp2partner:\n\t\tsnp2partner[data[I_SNP]]={};\n\t\tsnp2all[data[I_SNP]] ={};\n\t\tsnp2ref[data[I_SNP]] = data[I_REFA];\n\t\tsnp2position[data[I_SNP]] = data[I_POS];\n\tsnp2all[data[I_SNP]][data[I_SEQM]]=0;\n\tfor i in range(0,len(curAltSNPs)):\n\t\tif curAltSNPs[i]!=\"\" and curAltSNPs[i]!=\"-\":\n\t\t\tmultiAllele = re.match(\"^(.*)_[0-9]+$\",curAltSNPs[i])\n\t\t\tif multiAllele:\n\t\t\t\tcurAltSNPs[i] = multiAllele.group(1);\n\t\t\tsnp2partner[data[I_SNP]][curAltSNPs[i]]=0;\n\nsnpseq2allele= {}\n#go through the file again, recording any SNP_seqs that map to two different alleles (exclude these later);\ninFile.seek(0,0);\ninFile.readline();\nfor line in inFile:\n\tif line is None or line == \"\" or line[0]==\"#\": continue\n\tdata=line.rstrip().split(\"\\t\");\n\tdata[I_SEQ]=data[I_SEQ][7:(len(data[I_SEQ])-7)];\n\tmultiAllele = re.match(\"^(.*)_[0-9]+$\",data[I_SNP])\n\tif multiAllele:\n\t\tdata[I_SNP] = multiAllele.group(1);\n\tpartnerSNPs = sorted(snp2partner[data[I_SNP]].keys());\n\tcurAltSNPs = data[I_PARTNERIDS].split(\";\")\n\tcurAltGenotypes = data[I_PARTNERAS].split(\";\");\n\tfor i in range(0,len(partnerSNPs)):\n\t\tsnp_seq = partnerSNPs[i]+\"_\"+data[I_SEQ];\n\t\tif partnerSNPs[i] not in snp2ref:\n\t\t\tallele=\"NA\";\n\t\telse:\n\t\t\tallele = snp2ref[partnerSNPs[i]];\n\t\tfor j in range(0,len(curAltSNPs)):\n\t\t\tmultiAllele = re.match(\"^(.*)_[0-9]+$\",curAltSNPs[i])\n\t\t\tif multiAllele:\n\t\t\t\tcurAltSNPs[i] = multiAllele.group(1);\n\t\t\tif curAltSNPs[j]==partnerSNPs[i]: #override the previous allele if this instance is non-reference\n\t\t\t\tallele=curAltGenotypes[j];\n\t\tif snp_seq not in snpseq2allele:\n\t\t\tsnpseq2allele[snp_seq]={};\n\t\tsnpseq2allele[snp_seq][allele]=0;\n\noutFilePrimary = MYUTILS.smartGZOpen(args.outFPre+\"_byPrimary.txt.gz\",'w');\noutFileFull = MYUTILS.smartGZOpen(args.outFPre+\"_all.txt.gz\",'w');\noutFilePrimary.write(\"VarID\\tPrimarySNP\\tchr\\tpos\\tstrand\\tPrimaryAllele\\tSequence\\tPartnerSNPs\\tPartnerAlleles\\tPartnerPosition\\n\");\noutFileFull.write(\"SNP\\tchr\\tpos\\tstrand\\tPosPrimary\\tIsPrimary\\tAllele\\tSequence\\n\");\nmissingPartnerSNPs = {};\ninFile.seek(0,0);\ninFile.readline();\nfor line in inFile:\n\tif line is None or line == \"\" or line[0]==\"#\": continue\n\tdata=line.rstrip().split(\"\\t\");\n\tdata[I_SEQ]=data[I_SEQ][7:(len(data[I_SEQ])-7)];\n\tstrand = \"+\";\n\tif re.match(\"^.*_RC$\",data[I_VARID]):\n\t\tstrand=\"-\";\n\tmultiAllele = re.match(\"^(.*)_[0-9]+$\",data[I_SNP])\n\tif multiAllele:\n\t\tdata[I_SNP] = multiAllele.group(1);\n\tpartnerSNPs = sorted(snp2partner[data[I_SNP]].keys());\n\tpartnerAlleles = [];\n\tpartnerPositions = [];\n\tcurAltSNPs = data[I_PARTNERIDS].split(\";\")\n\tcurAltGenotypes = data[I_PARTNERAS].split(\";\");\n\toutFileFull.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\"%(data[I_SNP],data[I_CHR],data[I_POS], strand, data[I_POS],\"T\", data[I_SEQM], data[I_SEQ]));\n\tfor i in reversed(range(0,len(partnerSNPs))): # throw out all the SNPs that aren't actually present in this sequence\n\t\tsnp_seq = partnerSNPs[i]+\"_\"+data[I_SEQ];\n\t\tif len(snpseq2allele[snp_seq])>1:\n\t\t\tdel snp2partner[data[I_SNP]][partnerSNPs[i]];\n\t\t\tdel partnerSNPs[i];\n\tfor i in range(0,len(partnerSNPs)):\n\t\tif partnerSNPs[i] not in snp2ref:\n\t\t\tmissingPartnerSNPs[partnerSNPs[i]]=1;\n\t\t\tpartnerAlleles.append(\"NA\"); #default is the reference\n\t\t\tpartnerPositions.append(\"NA\"); #default is the reference\n\t\telse:\n\t\t\tpartnerAlleles.append(snp2ref[partnerSNPs[i]]); #default is the reference\n\t\t\tpartnerPositions.append(snp2position[partnerSNPs[i]]); #default is the reference\n\t\tfor j in range(0,len(curAltSNPs)):\n\t\t\tmultiAllele = re.match(\"^(.*)_[0-9]+$\",curAltSNPs[i])\n\t\t\tif multiAllele:\n\t\t\t\tcurAltSNPs[i] = multiAllele.group(1);\n\t\t\tif curAltSNPs[j]==partnerSNPs[i]:\n\t\t\t\tpartnerAlleles[i]=curAltGenotypes[j];\n\t\tif partnerSNPs[i] not in snp2ref:\n\t\t\toutFileFull.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\"%(partnerSNPs[i],data[I_CHR],\"NA\",strand, data[I_POS],\"F\", partnerAlleles[i], data[I_SEQ]));\n\t\telse:\n\t\t\toutFileFull.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\"%(partnerSNPs[i],data[I_CHR],snp2position[partnerSNPs[i]],strand, data[I_POS],\"F\", partnerAlleles[i], data[I_SEQ]));\n\toutFilePrimary.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\"%(data[I_VARID], data[I_SNP],data[I_CHR],data[I_POS],strand,data[I_SEQM],data[I_SEQ],\";\".join(partnerSNPs),\";\".join(partnerAlleles), \";\".join(partnerPositions)));\n\nsys.stderr.write(\"There were %i missing partner SNPs:\\n\"%(len(missingPartnerSNPs)));\nsys.stderr.write(\"%s\\n\"%(\"\\n\".join(missingPartnerSNPs.keys())));\ninFile.close();\noutFileFull.close();\noutFilePrimary.close();\nif (args.logFP is not None):\n\tlogFile.close();\n", "id": "7247301", "language": "Python", "matching_score": 2.0564005374908447, "max_stars_count": 4, "path": "sortOutGenotypes.py" }, { "content": "#!/home/unix/cgdeboer/bin/python3\nimport warnings\nimport MYUTILS\nimport sys\nimport argparse\nparser = argparse.ArgumentParser(description='This program is intended to quantify MPRA RNA tags given sequencing data of tags and a reference map of tags - enhancers.')\nparser.add_argument('-it',dest='inTagMap',\tmetavar='<inTagMap>',help='Input file of the mRNA tag -enhancer map', required=True);\nparser.add_argument('-iq',dest='inFastq',\tmetavar='<inFastq>',help='Input file of fastq barcodes', required=True);\nparser.add_argument('-c',dest='constT', metavar='<constT>',help='the constant region following the reads [default=None]', required=False);\nparser.add_argument('-mc',dest='mmc', metavar='<mismatchesConst>',help='the number of mismatches to allow in constant region [default=1]', required=False,default = 1);\nparser.add_argument('-mt',dest='mmt', metavar='<mismatchesTag>',help='the number of mismatches to allow in tag region [default=0]', required=False);\nparser.add_argument('-o',dest='outFPre', metavar='<outFilePre>',help='Where to output results, prefix', required=True);\nparser.add_argument('-l',dest='logFP', metavar='<logFile>',help='Where to output errors/warnings [default=stderr]', required=False);\nparser.add_argument('-nc',dest='noConstCheck', action='count',help='Ignore constant region matching', required=False, default=0);\nparser.add_argument('-v',dest='verbose', action='count',help='Verbose output?', required=False, default=0);\n\nargs = parser.parse_args();\nargs.mmc = int(args.mmc);\nargs.mmt = int(args.mmt);\n\nif (args.logFP is not None):\n\tlogFile=MYUTILS.smartGZOpen(args.logFP,'w');\n\tsys.stderr=logFile;\n\nsys.stderr.write(\"Compiling possible mismatches to constant region...\\n\");\n#adds mismatches to tag2tag hash\ndef addMMToTags(myHash, baseSeq, numToAdd, alphabet, ref):\n\tif numToAdd<=0:\n\t\tif baseSeq in myHash and myHash[baseSeq]!=ref: #collision\n\t\t\tif baseSeq!=myHash[baseSeq]:#other is not an exact match\n\t\t\t\tmyHash[baseSeq]=\"NA\";\n\t\telse:\t\n\t\t\tmyHash[baseSeq]=ref;\n\telse:\n\t\tfor i in range(0,len(baseSeq)):\n\t\t\tfor a in range(0,len(alphabet)):\n\t\t\t\taddMMToTags(myHash, baseSeq[0:i]+alphabet[a]+baseSeq[(i+1):len(baseSeq)], numToAdd-1, alphabet, ref);\n\n#loads mismatches to constant region into hash with the provided alphabet\ndef loadMismatches(myHash, baseSeq, numToAdd, alphabet):\n\tif numToAdd<=0:\n\t\tmyHash[baseSeq]=1;\n\telse:\n\t\tfor i in range(0,len(baseSeq)):\n\t\t\tfor a in range(0,len(alphabet)):\n\t\t\t\tloadMismatches(myHash, baseSeq[0:i]+alphabet[a]+baseSeq[(i+1):len(baseSeq)], numToAdd-1, alphabet);\n\npossibleMismatchesConst = {}\nalphabet = [\"A\",\"T\",\"G\",\"C\",\"N\"];\nfor i in range(0,args.mmc+1):\n\tloadMismatches(possibleMismatchesConst,args.constT,i,alphabet);\n\n\n\nsys.stderr.write(\"Loading tag-enhancer map...\\n\");\ntag2enh = {}\ntag2tagCount = {}\ntag2tag = {}\ntagLength = -1;\ninTagMap=MYUTILS.smartGZOpen(args.inTagMap,'r');\nfor line in inTagMap:\n\tif line is None or line == \"\" or line[0]==\"#\": continue\n\tdata=line.rstrip().split(\"\\t\");\n\ttag2enh[data[1]] = data[0];\n\ttag2tagCount[data[1]] = 0;\n\tfor i in range(0,args.mmt+1): #add mismatched barcodes to map\n\t\taddMMToTags(tag2tag,data[1],i,alphabet,data[1]);\n\tif tagLength==-1:\n\t\ttagLength=len(data[1]);\n\telif tagLength!=len(data[1]):\n\t\traise Exception(\"Error: not all tag lengths the same! previous = %i, now = %i for line %s\" %(tagLength, len(data[1]), line));\ninTagMap.close();\n\nsys.stderr.write(\"The length of the tags is %i.\\n\"%(tagLength));\nsys.stderr.write(\"Reading fastq and matching to tags...\\n\");\noutFileUnmatched = MYUTILS.smartGZOpen(args.outFPre+\"_unmatched.fastq.gz\",'w');\noutFileBadConst = MYUTILS.smartGZOpen(args.outFPre+\"_badConstRegion.fastq.gz\",'w');\ninFastq=MYUTILS.smartGZOpen(args.inFastq,'r');\nstate=0;\nmismatchedConstantRegion=0;\nunmatchedTag=0;\ntagCollision=0;\nsuccessfulTags=0;\ntagMismatchOkay=0;\nreads=0;\ncurOutLoc=0;\nfor line in inFastq:\n\tif line is None or line == \"\": continue\n\tline = line.rstrip();\n\tif state==0 and line[0]==\"@\":\n\t\tlastHead=line;\n\t\tpass;\n\telif state==1:\n\t\treads+=1;\n\t\tcurTag = line[0:tagLength];\n\t\tconstRegion = line[tagLength:len(line)];\n\t\tif len(constRegion)!=len(args.constT):\n\t\t\traise Exception(\"Error: provided constant region not same length as non-tag component of fastq: %i vs %i for line %s\" %(len(args.constT), len(constRegion), line));\n\t\telif constRegion not in possibleMismatchesConst:\n\t\t\tmismatchedConstantRegion+=1;\n\t\t\tif args.noConstCheck==0:\n\t\t\t\toutFileBadConst.write(lastHead+\"\\n\"+line+\"\\n\");\n\t\t\t\tcurOutLoc=outFileBadConst;\n\t\telif curTag not in tag2tag:\n\t\t\tunmatchedTag+=1;\n\t\t\toutFileUnmatched.write(lastHead+\"\\n\"+line+\"\\n\");\n\t\t\tcurOutLoc=outFileUnmatched;\n\t\telif tag2tag[curTag]==\"NA\":\n\t\t\ttagCollision+=1;\n\t\tif curTag in tag2tag and tag2tag[curTag]!=\"NA\" and not curOutLoc: # matched a tag successfully, not a collision tag, and not a mismatched constant region\n\t\t\trealTag = tag2tag[curTag];\n\t\t\tif realTag!=curTag:\n\t\t\t\ttagMismatchOkay+=1;\n\t\t\ttag2tagCount[realTag]+=1;\n\t\t\tsuccessfulTags+=1;\n\telif state==2 and line==\"+\":\n\t\tif curOutLoc:\n\t\t\tcurOutLoc.write(line+\"\\n\");\n\telif state==3:\n\t\tstate=-1;\n\t\tif curOutLoc:\n\t\t\tcurOutLoc.write(line+\"\\n\");\n\t\tcurOutLoc=0;\n\tstate+=1;\ninFastq.close();\noutFileUnmatched.close();\noutFileBadConst.close();\n\nsys.stderr.write(\"Outputting results...\\n\");\noutFile = MYUTILS.smartGZOpen(args.outFPre+\"_counts.txt.gz\",'w');\n\ntagsInLib = 0;\ndistinctNonNATagsObserved=0;\ndistinctNATagsObserved=0;\ntagsMappingToNA=0;\noutFile.write(\"tag\\tenhancer\\tcount\\n\");\nfor tag in sorted(tag2enh):\n\toutFile.write(\"%s\\t%s\\t%i\\n\"%(tag,tag2enh[tag],tag2tagCount[tag]));\n\tif tag2enh[tag][0:2]==\"NA\":\n\t\ttagsMappingToNA+=1;\n\t\tif tag2tagCount[tag]>0:\n\t\t\tdistinctNATagsObserved+=1;\n\telse:\n\t\tif tag2tagCount[tag]>0:\n\t\t\tdistinctNonNATagsObserved+=1;\n\ttagsInLib+=1;\noutFile.close();\n\nsys.stderr.write(\"Among %i reads:\\n\"%(reads));\nsys.stderr.write(\"\t%i failed to align due to mismatched constant regions (%i%%).\\n\"%(mismatchedConstantRegion,100*mismatchedConstantRegion/reads));\nsys.stderr.write(\"\t%i failed to align due to a tag collision (%i%%).\\n\"%(tagCollision,100*tagCollision/reads));\nsys.stderr.write(\"\t%i failed to align due to the tag matching nothing known (%i%%).\\n\"%(unmatchedTag,100*unmatchedTag/reads));\nsys.stderr.write(\"\t%i tags found a home (%i%%).\\n\"%(successfulTags,100*successfulTags/reads));\nsys.stderr.write(\"\t%i tags found a home because we allowed mismatches (%i%%).\\n\"%(tagMismatchOkay,100*tagMismatchOkay/reads));\nsys.stderr.write(\"Among the library of %i tags :\\n\"%(tagsInLib));\nsys.stderr.write(\"\t%i map to nothing (are NA) (%i%%).\\n\"%(tagsMappingToNA,100*tagsMappingToNA/tagsInLib));\nsys.stderr.write(\"Among the %i NA tags :\\n\"%(tagsMappingToNA));\nsys.stderr.write(\"\t%i were observed >=1 time in this data (%i%%).\\n\"%(distinctNATagsObserved,100*distinctNATagsObserved/tagsMappingToNA));\nsys.stderr.write(\"Among the %i enhancer-mapping tags:\\n\"%(tagsInLib - tagsMappingToNA));\nsys.stderr.write(\"\t%i were observed >=1 time in this data (%i%%).\\n\"%(distinctNonNATagsObserved,100*distinctNonNATagsObserved/(tagsInLib-tagsMappingToNA)));\nif (args.logFP is not None):\n\tlogFile.close();\n\nsys.stderr.write(\"Done!\\n\");\n", "id": "10347547", "language": "Python", "matching_score": 2.086550712585449, "max_stars_count": 4, "path": "quantifyRNATags.py" }, { "content": "#!/home/unix/cgdeboer/bin/python3\nimport warnings\nimport MYUTILS\nimport sys\nimport re\nimport argparse\nparser = argparse.ArgumentParser(description='Filters out barcode-enhancer combinations and tallies associations.')\nparser.add_argument('-i',dest='inFP',\tmetavar='<inFile>',help='Input file of mapped enhancers-barcodes as made by mapBarcodesToEnhancers.py', required=True);\nparser.add_argument('-o',dest='outFPre', metavar='<outFile>',help='Where to output results prefix', required=True);\nparser.add_argument('-l',dest='logFP', metavar='<logFile>',help='Where to output errors/warnings [default=stderr]', required=False);\nparser.add_argument('-v',dest='verbose', action='count',help='Verbose output?', required=False, default=0);\n\nargs = parser.parse_args();\n\n\ninFile=MYUTILS.smartGZOpen(args.inFP,'r');\n\n\nif (args.logFP is not None):\n\tlogFile=MYUTILS.smartGZOpen(args.logFP,'w');\n\tsys.stderr=logFile;\n\n\n#raise Exception(\"Reached bad state=%d for '%s.%d' '%s' at line '%s'\" %(state,mid,ver,tfid,line));\nF_RID=0;\nF_R1_REF=1;\nF_R1_MAPQ=2;\nF_R1_CIGAR=3;\nF_R1_START=4;\nF_R2_REF=5;\nF_R2_MAPQ=6;\nF_R2_CIGAR=7;\nF_R2_START=8;\nF_BAR=9;\n\n\noutFileUnmapped = MYUTILS.smartGZOpen(args.outFPre+\".reads.unmapped.gz\",'w');\noutFileGapped = MYUTILS.smartGZOpen(args.outFPre+\".reads.gapped.gz\",'w');\noutFileDifferentRef = MYUTILS.smartGZOpen(args.outFPre+\".reads.diffRef.gz\",'w');\noutFileDifferentMAPQ = MYUTILS.smartGZOpen(args.outFPre+\".reads.diffMAPQ.gz\",'w');\noutFileUngapped = MYUTILS.smartGZOpen(args.outFPre+\".reads.ungapped.gz\",'w');\noutFileLowMAPQ = MYUTILS.smartGZOpen(args.outFPre+\".reads.lowMAPQ.gz\",'w');\n\ndef startAndCigar2GapPoss(cigar, start):\n\tif re.match(\"^[0-9]*M$\",cigar):\n\t\treturn \"M\"\n\tif cigar==\"*\":\n\t\treturn \"*\";\n\tm = re.match(\"^([0-9]*)M(.*[^0-9])[0-9]*M$\",cigar)\n\tif m:\n\t\tnewSt = start + int(m.group(1));\n\t\treturn \"%iM%s\"%(newSt,m.group(2));\n\telse:\n\t\traise Exception(\"Bad cigar; could not find terminal matches %s\"%cigar);\n\t\t\n\nenhancerBC2CIGARs = {};\ntag2enhancer = {};\ngapsBC2Enh = {};\nambiguousBC2Enh = {};\nunmappableBCs = {};\ntotal=0;\ndiffRef=0;\nnotMapped=0;\ndiffMapq=0;\nmapqLow=0;\nnoGaps=0;\nhasGaps=0;\nnumNBarcodes = 0;\nheader = inFile.readline().rstrip().split(\"\\t\");\nfor line in inFile:\n\ttotal+=1;\n\tif line is None or line == \"\" or line[0]==\"#\": continue\n\tdata=line.rstrip().split(\"\\t\");\n\tif re.search(\"N\", data[F_BAR]):\n\t\tnumNBarcodes+=1;\n\t\tnext;\n\tif data[F_R1_REF] != data[F_R2_REF]:\n\t\tdiffRef+=1;\n\t\tunmappableBCs[data[F_BAR]]=1;\n\t\toutFileDifferentRef.write(line);\n\t\tcontinue;\n\tif data[F_R1_REF]==\"*\":\n\t\tnotMapped+=1;\n\t\tunmappableBCs[data[F_BAR]]=1;\n\t\toutFileUnmapped.write(line);\n\t\tcontinue;\n\tenh_bar = data[F_R1_REF]+\"\\t\"+data[F_BAR];\n\tr1GapDesc = startAndCigar2GapPoss(data[F_R1_CIGAR], int(data[F_R1_START]));\n\tr2GapDesc = startAndCigar2GapPoss(data[F_R2_CIGAR], int(data[F_R2_START]));\n\t\n\tif enh_bar not in enhancerBC2CIGARs:\n\t\tenhancerBC2CIGARs[enh_bar] = [{},{}]; # Read1 cigar, read2 cigar\n\tif r1GapDesc not in enhancerBC2CIGARs[enh_bar][0]:\n\t\tenhancerBC2CIGARs[enh_bar][0][r1GapDesc]=0;\n\tif r2GapDesc not in enhancerBC2CIGARs[enh_bar][1]:\n\t\tenhancerBC2CIGARs[enh_bar][1][r2GapDesc]=0;\n\tenhancerBC2CIGARs[enh_bar][0][r1GapDesc]+=1;\n\tenhancerBC2CIGARs[enh_bar][1][r2GapDesc]+=1;\n\tif data[F_R1_MAPQ]!=data[F_R2_MAPQ]:\n\t\tdiffMapq+=1;\n\t\toutFileDifferentMAPQ.write(line);\n\tif int(data[F_R1_MAPQ])+int(data[F_R1_MAPQ])<12: #exclude because mapping is ambiguous\n\t\tmapqLow+=1;\n\t\toutFileLowMAPQ.write(line)\n\t\tif data[F_BAR] not in ambiguousBC2Enh:\n\t\t\tambiguousBC2Enh[data[F_BAR]]={};\n\t\tif data[F_R1_REF] not in ambiguousBC2Enh[data[F_BAR]]:\n\t\t\tambiguousBC2Enh[data[F_BAR]][data[F_R1_REF]]=1;\n\t\telse:\n\t\t\tambiguousBC2Enh[data[F_BAR]][data[F_R1_REF]]+=1;\n\telif r1GapDesc==\"M\" and r2GapDesc==\"M\": #mapping has no gaps\n\t\tnoGaps+=1;\n\t\toutFileUngapped.write(line)\n\t\tif data[F_BAR] not in tag2enhancer:\n\t\t\ttag2enhancer[data[F_BAR]]={};\n\t\tif data[F_R1_REF] not in tag2enhancer[data[F_BAR]]:\n\t\t\ttag2enhancer[data[F_BAR]][data[F_R1_REF]]=1;\n\t\telse:\n\t\t\ttag2enhancer[data[F_BAR]][data[F_R1_REF]]+=1;\n\telse: # these have gaps in the alignment, excluded\n\t\thasGaps+=1;\n\t\toutFileGapped.write(line);\n\t\tif data[F_BAR] not in gapsBC2Enh:\n\t\t\tgapsBC2Enh[data[F_BAR]]={};\n\t\tif data[F_R1_REF] not in gapsBC2Enh[data[F_BAR]]:\n\t\t\tgapsBC2Enh[data[F_BAR]][data[F_R1_REF]]=1;\n\t\telse:\n\t\t\tgapsBC2Enh[data[F_BAR]][data[F_R1_REF]]+=1;\n\ninFile.close();\n\nsys.stderr.write(\"Of all %i reads:\\n\"%(total));\nsys.stderr.write(\" barcodes containing Ns: %i (%i%%)\\n\"%(numNBarcodes, 100.0 * numNBarcodes/total));\nsys.stderr.write(\" unmapped reads: %i (%i%%)\\n\"%(notMapped, 100.0 * notMapped/total));\nsys.stderr.write(\" differing reference: %i (%i%%)\\n\"%(diffRef, 100.0 * diffRef/total));\nsys.stderr.write(\" differing MAPQ: %i (%i%%)\\n\"%(diffMapq, 100.0 * diffMapq/total));\nsys.stderr.write(\" low MAPQ: %i (%i%%)\\n\"%(mapqLow, 100.0 * mapqLow/total));\nsys.stderr.write(\" no gaps in alignment: %i (%i%%)\\n\"%(noGaps, 100.0 * noGaps/total));\nsys.stderr.write(\" gaps in alignment: %i (%i%%)\\n\"%(hasGaps, 100.0 * hasGaps/total));\n\ntotal=0;\nuniqueC1=0;\nuniqueC2=0;\nsingleObs=0;\nuniqueBoth=0;\ndominantC1=0;\ndominantC2=0;\ndominantBoth=0;\nperfectC1=0;\nperfectC2=0;\nperfectBoth=0;\n\noutFileCIGARs = MYUTILS.smartGZOpen(args.outFPre+\".cigars.gz\",'w');\nfor enh_bar in enhancerBC2CIGARs:\n\tcurLine = enh_bar;\n\ttotal+=1;\n\tlastUnique=False;\n\tlastPerfect=False;\n\tlastDom=False;\n\tfor i in range(0,2): # for each read in pair\n\t\tcurCigars = [];\n\t\tmaxObs = 0;\n\t\tnumObs = 0;\n\t\tfor cigar in enhancerBC2CIGARs[enh_bar][i]:\n\t\t\tcurCigars.append(cigar+\":%i\"%(enhancerBC2CIGARs[enh_bar][i][cigar]));\n\t\t\tmaxObs = max([maxObs, enhancerBC2CIGARs[enh_bar][i][cigar]]);\n\t\t\tnumObs+=enhancerBC2CIGARs[enh_bar][i][cigar];\n\t\t\tif cigar==\"M\": #perfect match\n\t\t\t\tif lastPerfect:\n\t\t\t\t\tperfectBoth+=1;\n\t\t\t\tif i==0:\n\t\t\t\t\tperfectC1+=1;\n\t\t\t\t\tlastPerfect=True;\n\t\t\t\telse:\n\t\t\t\t\tperfectC2+=1;\n\t\tif len(curCigars)==1: # only one cigar for this read\n\t\t\tif i==0:\n\t\t\t\tuniqueC1+=1;\n\t\t\t\tlastUnique=True;\n\t\t\telse:\n\t\t\t\tuniqueC2+=1;\n\t\t\t\tif lastUnique:\n\t\t\t\t\tuniqueBoth+=1;\n\t\tif numObs==1 and i==0: #only one cigar observed and the first read (so as not to count this twice since the second should also have only one obs (since they are part of a pair))\n\t\t\tsingleObs+=1;\n\t\tif 1.0*maxObs/numObs>0.5: # the maximally observed cigar for the read is at least 50% of the cigars observed for the read\n\t\t\tif i==0:\n\t\t\t\tdominantC1+=1;\n\t\t\t\tlastDom=True;\n\t\t\telse:\n\t\t\t\tdominantC2+=1;\n\t\t\t\tif lastDom:\n\t\t\t\t\tdominantBoth+=1;\n\t\tcurLine=curLine+\"\\t\"+\";\".join(curCigars);\n\toutFileCIGARs.write(curLine+\"\\n\");\n\noutFileCIGARs.close();\n\t\t\nsys.stderr.write(\"Of the %i enhancer-barcode pairs that mapped to anything (well or otherwise):\\n\"%(total));\nsys.stderr.write(\" have only one read supporting them: %i (%i%%)\\n\"%(singleObs, 100.0 * singleObs/total));\nsys.stderr.write(\" have only one CIGAR1: %i (%i%%)\\n\"%(uniqueC1, 100.0 * uniqueC1/total));\nsys.stderr.write(\" have only one CIGAR2: %i (%i%%)\\n\"%(uniqueC2, 100.0 * uniqueC2/total));\nsys.stderr.write(\" have both only one C1 and C2: %i (%i%%)\\n\"%(uniqueBoth, 100.0 * uniqueBoth/total));\nsys.stderr.write(\" have a dominant C1: %i (%i%%)\\n\"%(dominantC1, 100.0 * dominantC1/total));\nsys.stderr.write(\" have a dominant C2: %i (%i%%)\\n\"%(dominantC2, 100.0 * dominantC2/total));\nsys.stderr.write(\" have both a dominant C1 and C2: %i (%i%%)\\n\"%(dominantBoth, 100.0 * dominantBoth/total));\nsys.stderr.write(\" have an ungapped C1: %i (%i%%)\\n\"%(perfectC1, 100.0 * perfectC1/total));\nsys.stderr.write(\" have an ungapped C2: %i (%i%%)\\n\"%(perfectC2, 100.0 * perfectC2/total));\nsys.stderr.write(\" have both an ungapped C1 and C2: %i (%i%%)\\n\"%(perfectBoth, 100.0 * perfectBoth/total));\n\nusabletag2enh={};\nmmbc2bc = {};\nfor bc in unmappableBCs:\n\tusabletag2enh[bc]=\"NA_unmappable\";\n\tfor k in range(0,len(bc)):\n\t\tmmbc = bc[0:k]+\"-\"+bc[(k+1):len(bc)];\n\t\tif mmbc not in mmbc2bc:\n\t\t\tmmbc2bc[mmbc]=[];\n\t\tmmbc2bc[mmbc].append(bc);\n\n#fill in 1 mismatch hash\nfor bc in tag2enhancer:\n\tfor k in range(0,len(bc)):\n\t\tmmbc = bc[0:k]+\"-\"+bc[(k+1):len(bc)];\n\t\tif mmbc not in mmbc2bc:\n\t\t\tmmbc2bc[mmbc]=[];\n\t\tmmbc2bc[mmbc].append(bc);\n\n\t\n\n\ntotalTags = 0;\nbarcodeEnhPairs=0;\nbarcodeCollisions=0;\nbarcodeCollisions1MM=0;\nbarcodeErrors=0;\nbarcodeWasUnmappable=0;\nbarcodeWasAmbiguousWithOther=0;\nbarcodeMappedToSomethingElseWithGaps=0;\nenh2usabletag = {};\nfor bc in tag2enhancer:\n\ttotalTags+=1;\n\t#is this tag good?\n\tenh=list(tag2enhancer[bc].keys())[0];\n\tif len(tag2enhancer[bc])>1: # this tag mapped to more than one enhancer\n\t\tenh=\"NA_collision\";\n\t\tbarcodeCollisions+=1;\n\telif bc in unmappableBCs:\n\t\tbarcodeWasUnmappable+=1;\n\telif bc in ambiguousBC2Enh and (len(ambiguousBC2Enh[bc])>1 or enh not in ambiguousBC2Enh[bc]): # bc was ambiguous with something (either more than one other thing, or not itself)\n\t\tenh=\"NA_ambiguous\";\n\t\tbarcodeWasAmbiguousWithOther+=1;\n\telif bc in gapsBC2Enh and enh not in gapsBC2Enh[bc]: # barcode was in a read with gaps and never mapped to the correct enhancer with this gapped read\n\t\tenh=\"NA_indel\"\n\t\tbarcodeMappedToSomethingElseWithGaps+=1;\n\tusabletag2enh[bc]=enh;\n\tif enh not in enh2usabletag:\n\t\tenh2usabletag[enh]=[];\n\tenh2usabletag[enh].append(bc);\n\nfor bc in ambiguousBC2Enh:\n\tif bc not in usabletag2enh:\n\t\tmostHits=0;\n\t\tbestGuess=\"NA\";\n\t\tfor enh in ambiguousBC2Enh[bc]:\n\t\t\tif ambiguousBC2Enh[bc][enh]>mostHits:\n\t\t\t\tmostHits = ambiguousBC2Enh[bc][enh];\n\t\t\t\tbestGuess=enh;\n\t\tenh = \"NA_ambiguous_\"+enh;\n\t\tusabletag2enh[bc]=enh;\n\t\tfor k in range(0,len(bc)):\n\t\t\tmmbc = bc[0:k]+\"-\"+bc[(k+1):len(bc)];\n\t\t\tif mmbc not in mmbc2bc:\n\t\t\t\tmmbc2bc[mmbc]=[];\n\t\t\tmmbc2bc[mmbc].append(bc);\n\nfor bc in gapsBC2Enh:\n\tif bc not in usabletag2enh:\n\t\tmostHits=0;\n\t\tbestGuess=\"NA\";\n\t\tfor enh in gapsBC2Enh[bc]:\n\t\t\tif gapsBC2Enh[bc][enh]>mostHits:\n\t\t\t\tmostHits = gapsBC2Enh[bc][enh];\n\t\t\t\tbestGuess=enh;\n\t\tenh = \"NA_indel_\"+enh;\n\t\tusabletag2enh[bc]=enh;\n\t\tfor k in range(0,len(bc)):\n\t\t\tmmbc = bc[0:k]+\"-\"+bc[(k+1):len(bc)];\n\t\t\tif mmbc not in mmbc2bc:\n\t\t\t\tmmbc2bc[mmbc]=[];\n\t\t\tmmbc2bc[mmbc].append(bc);\n\n\n\nfor bc in usabletag2enh:\n\tenh=usabletag2enh[bc]\n\tfor k in range(0,len(bc)):\n\t\tmmbc = bc[0:k]+\"-\"+bc[(k+1):len(bc)];\n\t\tif len(mmbc2bc[mmbc])!=1: # there are more than one barcodes that mapped to this mmmBC\n\t\t\tfor i in range(0,len(mmbc2bc[mmbc])):\n\t\t\t\tif mmbc2bc[mmbc][i]!=bc and usabletag2enh[mmbc2bc[mmbc][i]]!=enh: #not the current barcode and barcode maps to a different enhancer\n\t\t\t\t\tbarcodeCollisions1MM+=1;\n\t\t\t\telif mmbc2bc[mmbc][i]!=bc and enh[0:2]!=\"NA\" and usabletag2enh[mmbc2bc[mmbc][i]]==enh: #not mapping to the current tag, not NA, and mapping to the same enhancer\n\t\t\t\t\tbarcodeErrors+=1;\n\nsys.stderr.write(\"Of the %i barcodes that appeared in isolation to map to something uniquely:\\n\"%(totalTags));\nsys.stderr.write(\" mapped to >=2 different enhancers in different reads: %i (%i%%)\\n\"%(barcodeCollisions, 100.0 * barcodeCollisions/totalTags));\nsys.stderr.write(\" were identical to an ambiguously mapping read (and not the same enh): %i (%i%%)\\n\"%(barcodeWasAmbiguousWithOther, 100.0 * barcodeWasAmbiguousWithOther/totalTags));\nsys.stderr.write(\" were identical to a barcode whose read was unmappable: %i (%i%%)\\n\"%(barcodeWasUnmappable, 100.0 * barcodeWasUnmappable/totalTags));\nsys.stderr.write(\" were identical to a barcode whose read had gaps (and not same enh): %i (%i%%)\\n\"%(barcodeMappedToSomethingElseWithGaps, 100.0 * barcodeMappedToSomethingElseWithGaps/totalTags));\nsys.stderr.write(\" barcodes that were within 1 edit distance for the same enhancer: %i (%i%%)\\n\"%(barcodeErrors, 100.0 * barcodeErrors/totalTags));\nsys.stderr.write(\" barcodes that collided within 1 edit distance: %i (%i%%)\\n\"%(barcodeCollisions1MM, 100.0 * barcodeCollisions1MM/totalTags));\n\ntotalEnh = len(enh2usabletag)-1;#because of NA\nsingleton=0;\nmoreThanFive=0;\nmoreThan10=0;\nmoreThan20=0;\nmoreThan40=0;\nmoreThan80=0;\nmoreThan250=0;\nmoreThan500=0;\nmoreThan1000=0;\nmoreThan10000=0;\nmoreThan100000=0;\nfor enh in enh2usabletag:\n\tnumTags = len(enh2usabletag[enh]);\n\tif numTags==1:\n\t\tsingleton+=1;\n\tif numTags>=5:\n\t\tmoreThanFive+=1;\n\tif numTags>=10:\n\t\tmoreThan10+=1;\n\tif numTags>=20:\n\t\tmoreThan20+=1;\n\tif numTags>=40:\n\t\tmoreThan40+=1;\n\tif numTags>=80:\n\t\tmoreThan80+=1;\n\tif numTags>=250:\n\t\tmoreThan250+=1;\n\tif numTags>=500:\n\t\tmoreThan500+=1;\n\tif numTags>=1000:\n\t\tmoreThan1000+=1;\n\tif numTags>=10000:\n\t\tmoreThan10000+=1;\n\tif numTags>=100000:\n\t\tmoreThan100000+=1;\n\nsys.stderr.write(\"Of the %i enhancers that passed filtering:\\n\"%(totalEnh));\nsys.stderr.write(\" have only one barcode: %i (%i%%)\\n\"%(singleton, 100.0 * singleton/totalEnh));\nsys.stderr.write(\" have more than five barcodes: %i (%i%%)\\n\"%(moreThanFive, 100.0 * moreThanFive/totalEnh));\nsys.stderr.write(\" have more than 10 barcodes: %i (%i%%)\\n\"%(moreThan10, 100.0 * moreThan10/totalEnh));\nsys.stderr.write(\" have more than 20 barcodes: %i (%i%%)\\n\"%(moreThan20, 100.0 * moreThan20/totalEnh));\nsys.stderr.write(\" have more than 40 barcodes: %i (%i%%)\\n\"%(moreThan40, 100.0 * moreThan40/totalEnh));\nsys.stderr.write(\" have more than 80 barcodes: %i (%i%%)\\n\"%(moreThan80, 100.0 * moreThan80/totalEnh));\nsys.stderr.write(\" have more than 250 barcodes: %i (%i%%)\\n\"%(moreThan250, 100.0 * moreThan250/totalEnh));\nsys.stderr.write(\" have more than 500 barcodes: %i (%i%%)\\n\"%(moreThan500, 100.0 * moreThan500/totalEnh));\nsys.stderr.write(\" have more than 1000 barcodes: %i (%i%%)\\n\"%(moreThan1000, 100.0 * moreThan1000/totalEnh));\nsys.stderr.write(\" have more than 10000 barcodes: %i (%i%%)\\n\"%(moreThan10000, 100.0 * moreThan10000/totalEnh));\nsys.stderr.write(\" have more than 100000 barcodes: %i (%i%%)\\n\"%(moreThan100000, 100.0 * moreThan100000/totalEnh));\n\noutFileMap = MYUTILS.smartGZOpen(args.outFPre+\".map.gz\",'w');\nfor bc in usabletag2enh:\n\tenh = usabletag2enh[bc];\n\tif enh[0:2]!=\"NA\":\n\t\toutFileMap.write(\"%s\\t%s\\t%i\\n\"%(enh,bc,tag2enhancer[bc][enh])); # last column indicates the number of times the barcode was observed in the data\n\telse:\n\t\toutFileMap.write(\"%s\\t%s\\tNA\\n\"%(enh,bc)); # last column indicates nothing but not to use this barcode\noutFileMap.close();\n\nif (args.logFP is not None):\n\tlogFile.close();\n", "id": "722332", "language": "Python", "matching_score": 4.9763898849487305, "max_stars_count": 4, "path": "filterMappedEnhancerBarcodes.py" }, { "content": "#!/home/unix/cgdeboer/bin/python3\nimport warnings\nimport MYUTILS\nimport sys\nimport argparse\nparser = argparse.ArgumentParser(description='Parses sorted SAM file and associates barcode sequences with the enhancers they mapped to.')\nparser.add_argument('-is',dest='inSAM',\tmetavar='<inFile>',help='Input file of aligned reads to enhancers in sam format. must be sorted (unix sort)', required=True);\nparser.add_argument('-ib',dest='inBarcodes',\tmetavar='<inFile>',help='Input file of barcodes in read\\tbarcode format (must also be sorted)', required=True);\nparser.add_argument('-o',dest='outFP', metavar='<outFile>',help='Where to output results [default=stdout]', required=False);\nparser.add_argument('-l',dest='logFP', metavar='<logFile>',help='Where to output errors/warnings [default=stderr]', required=False);\nparser.add_argument('-v',dest='verbose', action='count',help='Verbose output?', required=False, default=0);\n\nargs = parser.parse_args();\n\nSAM_ID=0;\nSAM_STARTPOS=3;\nSAM_CIGAR=5;\nSAM_SCORE=4;\nSAM_TLEN=8;\nSAM_REF=2;\n\ninSAM=MYUTILS.smartGZOpen(args.inSAM,'r');\ninBC=MYUTILS.smartGZOpen(args.inBarcodes,'r');\n\n\nif (args.logFP is not None):\n\tlogFile=MYUTILS.smartGZOpen(args.logFP,'w');\n\tsys.stderr=logFile;\n\nif (args.outFP is None):\n\toutFile= sys.stdout;\nelse:\n\tif args.verbose>0: warnings.warn(\"Outputting to file \"+args.outFP);\n\toutFile = MYUTILS.smartGZOpen(args.outFP,'w');\n\n\nnextSAMLine = inSAM.readline().rstrip().split(\"\\t\");\nnumUnmatchedSAM = 0;\nnumUnmatchedBC = 0;\noutFile.write(\"readID\\tR1_REF\\tR1_MAPQ\\tR1_START\\tR1_CIGAR\\tR2_REF\\tR2_MAPQ\\tR2_START\\tR2_CIGAR\\tbarcode\\n\");\nfor line in inBC:\n\tif line is None or line == \"\": continue\n\tdata=line.rstrip().split(\"\\t\");\n\twhile data[0]>nextSAMLine[SAM_ID] and (nextSAMLine[0]!=\"\" or len(nextSAMLine)!=1):\n\t\tnextSAMLine = inSAM.readline().rstrip().split(\"\\t\");\n\t\tnumUnmatchedSAM+=1;\n\tif data[0]==nextSAMLine[SAM_ID]:\n\t\tcurSAMLine = nextSAMLine;\n\t\tnextSAMLine = inSAM.readline().rstrip().split(\"\\t\");\n\t\tif data[0]!=nextSAMLine[SAM_ID]:\n\t\t\tsys.stderr..write(\"Next line in SAM file is not the same read ID as predecessor; skipping read: %s - %s\\n\" %(data[0], nextSAMLine[SAM_ID]));\n\t\telse: \n\t\t\tif int(curSAMLine[SAM_TLEN])<0 and int(nextSAMLine[SAM_TLEN])>0: #swap if not F read first\n\t\t\t\ttemp=curSAMLine;\n\t\t\t\tcurSAMLine=nextSAMLine;\n\t\t\t\tnextSAMLine=temp;\n\t\t\toutFile.write(data[0]+\"\\t\"+curSAMLine[SAM_REF]+\"\\t\"+curSAMLine[SAM_SCORE]+\"\\t\"+curSAMLine[SAM_CIGAR] + \"\\t\" + curSAMLine[SAM_STARTPOS] + \"\\t\"+nextSAMLine[SAM_REF]+\"\\t\"+nextSAMLine[SAM_SCORE]+\"\\t\"+nextSAMLine[SAM_CIGAR]+\"\\t\" + nextSAMLine[SAM_STARTPOS]+\"\\t\"+data[1]+\"\\n\");\n\t\t\tnextSAMLine = inSAM.readline().rstrip().split(\"\\t\");\n\telse:\n\t\tnumUnmatchedBC+=1;\n\nsys.stderr.write(\"Completed. Num unmatched barcodes = %i; Num unmatched SAM entries = %i\\n\"%(numUnmatchedBC, numUnmatchedSAM)); \ninBC.close();\ninSAM.close();\n\noutFile.close();\nif (args.logFP is not None):\n\tlogFile.close();\n", "id": "5328290", "language": "Python", "matching_score": 3.8018319606781006, "max_stars_count": 4, "path": "mapBarcodesToEnhancers.py" }, { "content": "#!/home/unix/cgdeboer/bin/python3\nimport warnings\nimport MYUTILS\nimport sys\nimport argparse\nparser = argparse.ArgumentParser(description='DESCRIPTION.')\nparser.add_argument('-i',dest='inFP',\tmetavar='<inFile>',help='Input file of fastq', required=True);\nparser.add_argument('-s',dest='startPos',\tmetavar='<startPos>',help='Where the barcode starts in fastq', required=True);\nparser.add_argument('-n',dest='numBases',\tmetavar='<numBases>',help='Length of barcode', required=True);\nparser.add_argument('-o',dest='outFP', metavar='<outFile>',help='Where to output results [default=stdout]', required=False);\nparser.add_argument('-l',dest='logFP', metavar='<logFile>',help='Where to output errors/warnings [default=stderr]', required=False);\nparser.add_argument('-v',dest='verbose', action='count',help='Verbose output?', required=False, default=0);\n\nargs = parser.parse_args();\n\n\ninFile=MYUTILS.smartGZOpen(args.inFP,'r');\n\nargs.startPos = int(args.startPos);\nargs.numBases = int(args.numBases);\n\nif (args.logFP is not None):\n\tlogFile=MYUTILS.smartGZOpen(args.logFP,'w');\n\tsys.stderr=logFile;\n\nif (args.outFP is None):\n\toutFile= sys.stdout;\nelse:\n\tif args.verbose>0: warnings.warn(\"Outputting to file \"+args.outFP);\n\toutFile = MYUTILS.smartGZOpen(args.outFP,'w');\n\n\n\n#@M03102:139:000000000-AGUL4:1:1101:15230:1342 1:N:0:0\n#TTGACCTA\n#+\n#CCCCCFFF\n\n#CTGTTCCGCTATACTC\n\nstate = 0;\nfor line in inFile:\n\tif line is None or line == \"\": continue\n\tif state==0 and line[0]==\"@\":\n\t\tdata = line[1:].split(\" \");\n\t\tcurID = data[0];\n\telif state==1:\n\t\toutFile.write(curID+\"\\t\"+line[args.startPos:(args.startPos+args.numBases)]+\"\\n\");\n\telif state==2 and line[0]==\"+\":\n\t\tpass\n\telif state==3:\n\t\tstate=-1;\n\telse:\n\t\traise Exception(\"Reached bad state=%d for '%s' at line '%s'\" %(state,id,line));\n\tstate+=1;\n\t\t\ninFile.close();\noutFile.close();\nif (args.logFP is not None):\n\tlogFile.close();\n", "id": "7667385", "language": "Python", "matching_score": 2.687973976135254, "max_stars_count": 4, "path": "getBarcodesFromFastq.py" } ]
2.687974
benprofessionaledition
[ { "content": "\"\"\"\nyour ad here\n\"\"\"", "id": "4691840", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "python/whaletails/train.py" }, { "content": "from typing import Tuple, Union\nimport math\nimport time \nimport os\nimport PIL.Image as Image\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.models as models\nimport torchvision.transforms as T\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom IPython.display import clear_output\nfrom tqdm import tqdm\n\nmpl.rcParams[\"figure.figsize\"] = (14, 7)\nmpl.rcParams[\"axes.grid\"] = False\n\nrun_id = round(time.time() * 1000)\n\nTB_LOG_DIR = \"/home/ben/ide/stylegan/resources/.ignored/logs/{}\".format(run_id)\nos.makedirs(TB_LOG_DIR, exist_ok=True)\n\nwriter = SummaryWriter(TB_LOG_DIR)\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(f\"Device in use: {device}\")\n\nclass ImageLoader:\n\n def __init__(self, size: Union[int, tuple], resize: bool = True, interpolation=2):\n\n \"\"\"\n Args:\n\n size:\n Desired output size. If size is a sequence like (h, w), output size will\n be matched to this. If size is an int, smaller edge of the image will be\n matched to this number i.e, if height > width, then image will be resca\n led to (size * height / width, size)\n\n resize:\n If True, PIL Image will be resized to the desired size\n\n interpolation:\n If resize set to True, this will be used inside torch.transforms.Resize()\n \"\"\"\n\n transforms = []\n if resize: transforms.append(T.Resize(size=size, interpolation=interpolation))\n\n # compulsory to add this transformation to the list\n transforms.append(T.ToTensor())\n self.transforms = T.Compose(transforms)\n\n def read_image(self, filepath: str) -> torch.tensor:\n\n \"\"\"\n Return:\n Transformed torch tensor\n \"\"\"\n\n image = Image.open(fp=filepath)\n image = self.transforms(image)\n image = image.to(device, torch.float)\n\n return image\n\n @staticmethod\n def show_image(tensor: torch.Tensor, title: str = \"Image\", save_: bool = False, filename: str = None):\n\n \"\"\"\n Args:\n save_: If set to True, will save the image with filename \"filename\"\n :param filename:\n :param save_:\n :param title:\n :param tensor:\n \"\"\"\n\n # Clone the tensor to CPU (to avoid any modifications to the original tensor)\n tensor = tensor.cpu().clone()\n\n # squeeze or unsqueeze the tensor to bring it to an appropiate shape\n if len(tensor.shape) == 4:\n tensor = tensor.squeeze(0)\n elif len(tensor.shape) == 2:\n tensor = tensor.unsqueeze(0)\n elif len(tensor.shape) > 4 or len(tensor.shape) < 2:\n raise ValueError(f\"Bad Input shape:: {tensor.shape}\")\n\n # transform the tensor to PIL Image\n transforms = T.ToPILImage()\n img = transforms(tensor)\n plt.imshow(img)\n plt.title(title)\n plt.pause(0.001)\n\n if save_: img.save(fp=filename)\n\n\nclass TransferModel(nn.Module):\n\n def __init__(self, con_layers: list = ['conv4_2'], sty_layers: list = None, mean: list = [0.485, 0.456, 0.406],\n stdv: list = [0.229, 0.224, 0.225]):\n\n \"\"\"\n Args:\n\n con_layers: Layers to be used for Content loss\n sty_layers: Layers to be used for Style loss\n\n mean: Mean to normalize the input tensor\n stdv: Stdv to normalize the input tensor\n \"\"\"\n\n super().__init__() # call the initializer of the super class\n mapping_dict = {\"conv1_1\": 0, \"conv1_2\": 2,\n \"conv2_1\": 5, \"conv2_2\": 7,\n \"conv3_1\": 10, \"conv3_2\": 12, \"conv3_3\": 14, \"conv3_4\": 16,\n \"conv4_1\": 19, \"conv4_2\": 21, \"conv4_3\": 23, \"conv4_4\": 25,\n \"conv5_1\": 28, \"conv5_2\": 30, \"conv5_3\": 32, \"conv5_4\": 34}\n\n # convert the mean and stdv to torch.tensor\n mean = torch.tensor(mean, dtype=torch.float, device=device)\n stdv = torch.tensor(stdv, dtype=torch.float, device=device)\n self.transforms = T.Normalize(mean, stdv) # transform to normalize the image\n\n # create an integer mapping of the layer names\n # +1 to get the output of ReLu layer\n self.con_layers = [(mapping_dict[layer] + 1) for layer in con_layers]\n self.sty_layers = [(mapping_dict[layer] + 1) for layer in sty_layers]\n self.all_layers = self.con_layers + self.sty_layers\n\n # Initialize a pre-trained model in eval() mode since we don't want to update\n # the parameters of the VGG network\n self.vgg19 = models.vgg19(pretrained=True, progress=True).features\n self.vgg19 = self.vgg19.to(device).eval()\n\n # replace the max pooling layers by average pooling\n for name, layer in self.vgg19.named_children():\n if isinstance(layer, nn.MaxPool2d):\n self.vgg19[int(name)] = nn.AvgPool2d(kernel_size=2, stride=2)\n\n def forward(self, tensor: torch.Tensor) -> dict:\n\n sty_feat_maps = []\n con_feat_maps = []\n # normalize the input tensor and add the batch dimension\n tensor = self.transforms(tensor)\n x = tensor.unsqueeze(0)\n\n # collect the required feature maps\n for name, layer in self.vgg19.named_children():\n x = layer(x)\n if int(name) in self.con_layers: con_feat_maps.append(x)\n if int(name) in self.sty_layers: sty_feat_maps.append(x)\n\n # return a dictionary of content and style output\n return {\"Con_features\": con_feat_maps, \"Sty_features\": sty_feat_maps}\n\n\nclass NeuralStyleTransfer:\n\n def __init__(self, con_image: torch.Tensor, sty_image: torch.Tensor, size: Union[int, Tuple]=512, con_layers: list = None,\n sty_layers: list = None, con_loss_wt: float = 1., sty_loss_wt: float = 1., var_loss_wt=1.):\n\n \"\"\"\n Args:\n con_loss_wt: Weightage of the Content loss\n sty_loss_wt: Weightage of the Style loss\n var_loss_wt: Weightage of the Variational loss\n \"\"\"\n\n self.con_loss_wt = con_loss_wt\n self.sty_loss_wt = sty_loss_wt\n self.var_loss_wt = var_loss_wt\n self.size = size\n\n # initialize the model\n self.model = TransferModel(con_layers=con_layers, sty_layers=sty_layers)\n self.sty_target = self.model(sty_image)[\"Sty_features\"]\n self.con_target = self.model(con_image)[\"Con_features\"]\n\n # initialize the variable image with requires_grad_ set to True\n self.var_image = con_image.clone().requires_grad_(True).to(device)\n # self.var_image = torch.rand_like(con_image.clone(), device = device, requires_grad = True)\n\n @staticmethod\n def _get_var_loss(tensor: torch.Tensor) -> torch.Tensor:\n\n # method to compute the variational loss of the image\n loss = (torch.sum(torch.abs(tensor[:, :, :-1] - tensor[:, :, 1:])) +\n torch.sum(torch.abs(tensor[:, :-1, :] - tensor[:, 1:, :])))\n\n return loss\n\n @staticmethod\n def _get_con_loss(pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n\n return 0.5 * torch.sum(torch.pow(pred - target, 2))\n\n @staticmethod\n def _get_gram_matrix(tensor: torch.Tensor) -> torch.Tensor:\n\n \"\"\"\n Returns: Normalized Gram Matrix of the input tensor\n \"\"\"\n\n b, c, h, w = tensor.size()\n tensor_ = tensor.view(b * c, h * w)\n gram_matrix = torch.mm(tensor_, tensor_.t())\n\n return gram_matrix\n\n def _get_sty_loss(self, pred: torch.Tensor, target: torch.Tensor):\n\n z = np.power(np.prod(pred.size()), 2, dtype=np.float64)\n pred = self._get_gram_matrix(pred)\n\n return 0.25 * torch.sum(torch.pow(pred - target, 2)).div(z)\n\n def _get_tot_loss(self, output: torch.Tensor):\n\n \"\"\"\n output: model's predictions\n\n \"\"\"\n\n con_output = output[\"Con_features\"]\n nb_con_layers = len(con_output)\n sty_output = output[\"Sty_features\"]\n nb_sty_layers = len(sty_output)\n\n # calculate the content and style loss for each layer\n con_loss = [self._get_con_loss(con_output[idx], self.con_target[idx]) for idx in range(nb_con_layers)]\n sty_loss = [self._get_sty_loss(sty_output[idx], self.sty_target[idx]) for idx in range(nb_sty_layers)]\n\n # weigh the loss by the appropiate weighing hyper-parameters\n con_loss = torch.mean(torch.stack(con_loss)) * self.con_loss_wt\n sty_loss = torch.mean(torch.stack(sty_loss)) * self.sty_loss_wt\n var_loss = self._get_var_loss(self.var_image) * self.var_loss_wt\n\n return con_loss.to(device), sty_loss.to(device), var_loss.to(device)\n\n def _print_statistics(self, epoch: int, image: torch.Tensor, tot_loss: torch.Tensor, con_loss: torch.Tensor,\n sty_loss: torch.Tensor, var_loss: torch.Tensor):\n\n loader = ImageLoader(size=self.size, resize=True)\n clear_output(wait=True)\n loader.show_image(image.data.clamp_(0, 1), title=\"Output_Image\")\n\n sty_loss = round(sty_loss.item(), 4)\n con_loss = round(con_loss.item(), 4)\n tot_loss = round(tot_loss.item(), 4)\n var_loss = round(var_loss.item(), 4)\n\n print(f\"After epoch {epoch + 1}:: Tot_loss: {tot_loss}\")\n print(f\"Sty_loss: {sty_loss}, Con_loss: {con_loss}, Var_loss: {var_loss}\")\n\n # Using Adam to solve the optimization problem\n def fit(self, nb_epochs: int = 10, nb_iters: int = 1000, lr: float = 1e-2, eps: float = 1e-8,\n betas: tuple = (0.9, 0.999)) -> torch.Tensor:\n\n # detach the targets from the graph to stop the flow of grads through them\n self.sty_target = [self._get_gram_matrix(x).detach() for x in self.sty_target]\n self.con_target = [x.detach() for x in self.con_target]\n \n optimizer = optim.Adam([self.var_image], lr=lr, betas=betas, eps=eps)\n writer.add_graph(self.model)\n\n for epoch in range(nb_epochs):\n print(\"Epoch {}:\".format(epoch))\n writer.add_image(\"image\", self.var_image, epoch)\n for _ in tqdm(range(nb_iters)):\n self.var_image.data.clamp_(0, 1)\n optimizer.zero_grad()\n output = self.model(self.var_image)\n\n con_loss, sty_loss, var_loss = self._get_tot_loss(output)\n tot_loss = con_loss + sty_loss + var_loss\n\n tot_loss.backward()\n optimizer.step()\n\n iter_ = optimizer.state[optimizer.param_groups[0][\"params\"][-1]][\"step\"]\n writer.add_scalar(\"loss/content\", con_loss, iter_)\n writer.add_scalar(\"loss/style\", sty_loss, iter_)\n writer.add_scalar(\"loss/total\", tot_loss, iter_)\n writer.add_scalar(\"loss/var\", var_loss, iter_)\n\n self._print_statistics(epoch, image=self.var_image, tot_loss=tot_loss,\n con_loss=con_loss, sty_loss=sty_loss, var_loss=var_loss)\n\n return self.var_image.data.clamp_(0, 1)\n\n\n# ***********************************************************************************************************************\n\n\ncon_img_fp = \"/home/ben/ide/stylegan/resources/.ignored/content.jpg\"\nsty_img_fp = \"/home/ben/ide/stylegan/resources/.ignored/style.jpg\"\nimg_loader = ImageLoader(size=(512, 512), resize=True, interpolation=2)\n\ncon_image = img_loader.read_image(filepath=con_img_fp)\nsty_image = img_loader.read_image(filepath=sty_img_fp)\n\nimg_loader.show_image(con_image, title=\"Content Image\")\nimg_loader.show_image(sty_image, title=\"Style Image\")\n\ncon_layers = [\"conv4_2\"]\nsty_layers = [\"conv1_1\", \"conv2_1\", \"conv3_1\", \"conv4_1\", \"conv5_1\"]\n\n_NST_ = NeuralStyleTransfer(con_image=con_image, sty_image=sty_image, size=(512,512), con_layers=con_layers,\n sty_layers=sty_layers, con_loss_wt=1e-5, sty_loss_wt=1e4, var_loss_wt=5e-5)\n\noutput_image = _NST_.fit(nb_epochs=10, nb_iters=1000, lr=1e-2, eps=1e-8, betas=(0.9, 0.999))\n\nimg_loader = ImageLoader(size=512, resize=True)\nimg_loader.show_image(output_image, save_=True, filename=\"../../resources/.ignored/Stylized_Image.jpg\")\n\n# ***********************************************************************************************************************\n", "id": "1474432", "language": "Python", "matching_score": 0.7807196974754333, "max_stars_count": 0, "path": "creative_ai/models/style_transfer.py" }, { "content": "import requests\nimport os\n\ndef download(url, output):\n local_filename = os.path.join(output, url.split('/')[-1])\n # NOTE the stream=True parameter below\n with requests.get(url, stream=True) as r:\n r.raise_for_status()\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=8192):\n # If you have chunk encoded response uncomment if\n # and set chunk_size parameter to None.\n #if chunk:\n f.write(chunk)\n return local_filename", "id": "8258102", "language": "Python", "matching_score": 0.20945331454277039, "max_stars_count": 0, "path": "creative_ai/utils.py" }, { "content": "from setuptools import setup\nfrom setuptools import find_packages\n\nsetup(name='goonalytics',\n version='0.1a',\n description='Goonalytics: web scraping, machine learning and whatever with the Something Awful forums',\n author='<NAME>',\n author_email='<EMAIL>',\n url='https://goonalytics.io',\n download_url='https://github.com/benlevineprofessionaledition/goonalytics',\n license='MIT',\n install_requires=['numpy',\n 'tensorflow',\n 'networkx',\n 'scipy',\n 'scrapy',\n 'lxml',\n 'nltk',\n 'bs4',\n 'google-cloud-core',\n 'google-cloud-bigquery',\n 'google-cloud-storage',\n 'flask',\n 'elasticsearch',\n 'avro-python3'\n ],\n package_data={'goonalytics': ['README.md']},\n packages=find_packages())", "id": "10732430", "language": "Python", "matching_score": 0.6338539123535156, "max_stars_count": 0, "path": "python/setup.py" }, { "content": "import unittest\n\nclass TestSomething(unittest.TestCase):\n\n def test_nothing_in_particular(self):\n self.assertTrue(True)", "id": "889482", "language": "Python", "matching_score": 0.04269085451960564, "max_stars_count": 0, "path": "creative_ai/tests/test_style_transfer.py" }, { "content": "import warnings\n\nwarnings.simplefilter(\"ignore\")\n\n\nclass Resize(object):\n\n def __init__(self, image_size: (int, tuple) = 256):\n\n \"\"\"\n Parameters:\n image_size: Final size of the image\n \"\"\"\n\n if isinstance(image_size, int): self.image_size = (image_size, image_size)\n elif isinstance(image_size, tuple): self.image_size = image_size\n else: raise ValueError(\"Unknown DataType of the parameter image_size found!!\")\n\n\n def __call__(self, sample):\n\n \"\"\"\n Parameters:\n sample: Dictionary containing image and label\n \"\"\"\n\n A, B = sample['A'], sample['B']\n\n A = tfm.resize(A, output_shape = self.image_size)\n B = tfm.resize(B, output_shape = self.image_size)\n\n A = np.clip(A, a_min = 0., a_max = 1.)\n B = np.clip(B, a_min = 0., a_max = 1.)\n\n return {'A': A, 'B': B}\n\n\nclass RandomCrop(object):\n\n def __init__(self, image_size: (int, tuple) = 256):\n\n \"\"\"\n Parameters:\n image_size: Final size of the image (should be smaller than current size o/w returns the original image)\n \"\"\"\n\n if isinstance(image_size, int): self.image_size = (image_size, image_size)\n elif isinstance(image_size, tuple): self.image_size = image_size\n else: raise ValueError(\"Unknown DataType of the parameter image_size found!!\")\n\n\n def __call__(self, sample):\n\n \"\"\"\n Parameters:\n sample: Dictionary containing image and label\n \"\"\"\n\n A, B = sample['A'], sample['B']\n curr_height, curr_width = A.shape[0], A.shape[1]\n\n ht_diff = max(0, curr_height - self.image_size[0])\n wd_diff = max(0, curr_width - self.image_size[1])\n top = np.random.randint(low = 0, high = ht_diff)\n lft = np.random.randint(low = 0, high = wd_diff)\n\n A = A[top: top + self.image_size[0], lft: lft + self.image_size[1]]\n B = B[top: top + self.image_size[0], lft: lft + self.image_size[1]]\n\n return {'A': A, 'B': B}\n\n\nclass Random_Flip(object):\n\n def __call__(self, sample):\n\n \"\"\"\n Parameters:\n sample: Dictionary containing image and label\n \"\"\"\n\n A, B = sample['A'], sample['B']\n if np.random.uniform(low = 0., high = 1.0) > .5:\n A = np.fliplr(A); B = np.fliplr(B)\n\n return {'A': A, 'B': B}\n\n\nclass To_Tensor(object):\n\n def __call__(self, sample):\n\n \"\"\"\n Parameters:\n sample: Dictionary containing image and label\n \"\"\"\n\n A = np.transpose(sample['A'].astype(np.float, copy = True), (2, 0, 1))\n B = np.transpose(sample['B'].astype(np.float, copy = True), (2, 0, 1))\n\n A = torch.tensor(A, dtype = torch.float)\n B = torch.tensor(B, dtype = torch.float)\n\n return {'A': A, 'B': B}\n\n\nclass Normalize(object):\n\n def __init__(self, mean = [0.5] * 3, stdv = [0.5] * 3):\n\n \"\"\"\n Parameters:\n mean: Normalizing mean\n stdv: Normalizing stdv\n \"\"\"\n\n mean = torch.tensor(mean, dtype = torch.float)\n stdv = torch.tensor(stdv, dtype = torch.float)\n self.transforms = T.Normalize(mean = mean, std = stdv)\n\n\n def __call__(self, sample):\n\n \"\"\"\n Parameters:\n sample: Dictionary containing image and label\n \"\"\"\n\n A, B = sample['A'], sample['B']\n A = self.transforms(A)\n B = self.transforms(B)\n\n return {'A': A, 'B': B}\n\n\n\nclass CustomDataset(Dataset):\n\n def __init__(self, path: str = None, transforms = None, max_sz: int = 1000):\n\n \"\"\"\n Parameters:\n transforms: a list of Transformations (Data augmentation)\n \"\"\"\n\n super().__init__(); self.transforms = T.Compose(transforms)\n\n file_names_A = sorted(os.listdir(path + 'A/'), key = lambda x: int(x[: -4]))\n self.file_names_A = [path + 'A/' + file_name for file_name in file_names_A]\n\n file_names_B = sorted(os.listdir(path + 'B/'), key = lambda x: int(x[: -4]))\n self.file_names_B = [path + 'B/' + file_name for file_name in file_names_B]\n\n self.file_names_A = self.file_names_A[:max_sz]\n self.file_names_B = self.file_names_B[:max_sz]\n\n\n def __len__(self):\n return max(len(self.file_names_A), len(self.file_names_B))\n\n\n def __getitem__(self, idx):\n\n A = io.imread(self.file_names_A[idx % len(self.file_names_A)])\n B = io.imread(self.file_names_B[idx % len(self.file_names_B)])\n sample = self.transforms({'A': A, 'B': B})\n\n return sample\n\n\n\nclass DataModule(pl.LightningDataModule):\n\n \"\"\"\n Download the dataset using the below link; you just need to specify the url while creating an object of this class\n https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/\n Authors don't follow a consistent format for all the datasets, so, it might not work for few\n\n Implements the Lightining DataModule!\n \"\"\"\n\n def __init__(self, url: str, root_dir: str = \"./Dataset/CycleGAN/\", img_sz: int = 256, trn_batch_sz: int = 4,\n tst_batch_sz: int = 64):\n\n \"\"\"\n Parameters:\n url: Download URL of the dataset\n root_dir: Root dir where dataset needs to be downloaded\n img_sz: Size of the Image\n trn_batch_sz: Training Batch Size\n tst_batch_sz: Test Batch Size\n \"\"\"\n\n super().__init__()\n\n self.url = url\n self.dataset = url.split(\"/\")[-1]\n\n self.processed_dir = root_dir + \"Processed/\"\n self.compressed_dir = root_dir + \"Compressed/\"\n os.makedirs(self.processed_dir , exist_ok = True)\n os.makedirs(self.compressed_dir, exist_ok = True)\n\n self.trn_batch_sz = trn_batch_sz\n self.tst_batch_sz = tst_batch_sz\n\n jitter_sz = int(img_sz * 1.120)\n self.tst_tfms = [Resize(img_sz), To_Tensor(), Normalize()]\n self.trn_tfms = [Resize(jitter_sz), RandomCrop(img_sz), Random_Flip(), To_Tensor(), Normalize()]\n\n\n def prepare_data(self):\n\n if self.dataset in os.listdir(self.compressed_dir):\n print(f\"Dataset {self.dataset[:-4]} already exists!\")\n else:\n print(f\"Downloading dataset {self.dataset[:-4]}!!\")\n wget.download(self.url, self.compressed_dir)\n print(f\"\\nDataset {self.dataset[:-4]} downloaded. Extraction in progress!\")\n\n with zipfile.ZipFile(self.compressed_dir + self.dataset, 'r') as zip_ref:\n zip_ref.extractall(self.processed_dir)\n print(f\"Extraction done!\")\n\n # you might need to modify the below code; it's not generic, but works for most of the datasets listed in that url.\n dwnld_dir = self.processed_dir + self.dataset[:-4] + \"/\"\n for folder in [\"testA/\", \"testB/\", \"trainA/\", \"trainB/\"]:\n\n dest_dir = dwnld_dir\n src_dir = dwnld_dir + folder\n\n dest_dir = dest_dir + \"Train/\" if folder[:-2] != \"test\" else dest_dir + \"Test/\"\n dest_dir = dest_dir + \"B/\" if folder[-2] != \"A\" else dest_dir + \"A/\"\n os.makedirs(dest_dir, exist_ok = True)\n\n orig_files = [src_dir + file for file in os.listdir(src_dir)]\n modf_files = [dest_dir + \"{:06d}.jpg\".format(i) for i, file in enumerate(orig_files)]\n\n for orig_file, modf_file in zip(orig_files, modf_files):\n shutil.move(orig_file, modf_file)\n os.rmdir(src_dir)\n\n print(f\"Files moved to appropiate folder!\")\n\n\n def setup(self, stage: str = None):\n\n \"\"\"\n stage: fit/test\n \"\"\"\n\n dwnld_dir = self.processed_dir + self.dataset[:-4]\n trn_dir = dwnld_dir + \"/Train/\"\n tst_dir = dwnld_dir + \"/Test/\"\n\n if stage == 'fit' or stage is None:\n\n dataset = CustomDataset(path = trn_dir, transforms = self.trn_tfms)\n train_sz = int(len(dataset) * 0.9)\n valid_sz = len(dataset) - train_sz\n\n self.train, self.valid = random_split(dataset, [train_sz, valid_sz])\n print(f\"Size of the training dataset: {train_sz}, validation dataset: {valid_sz}\")\n\n if stage == 'test' or stage is None:\n self.test = CustomDataset(path = tst_dir, transforms = self.tst_tfms)\n print(f\"Size of the test dataset: {len(self.test)}\")\n\n\n def train_dataloader(self):\n return DataLoader(self.train, batch_size = self.trn_batch_sz, shuffle = True , num_workers = 16, pin_memory = True)\n\n def val_dataloader (self):\n return DataLoader(self.valid, batch_size = self.tst_batch_sz, shuffle = False, num_workers = 16, pin_memory = True)\n\n def test_dataloader (self):\n return DataLoader(self.test , batch_size = self.tst_batch_sz, shuffle = False, num_workers = 16, pin_memory = True)\n\n\n\ndef show_image(image):\n plt.imshow(np.transpose((image + 1) / 2, (1, 2, 0)))\n\ndef get_random_sample(dataset):\n return dataset[np.random.randint(0, len(dataset))]\n\n\n############################################################################################################################################################\n\n\nimg_sz = 512\nurl = \"https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/cezanne2photo.zip\"\n\n# You can decrease the num_workers argument in {train/val/test}_dataloader\ndatamodule = DataModule(url, trn_batch_sz = 1, tst_batch_sz = 64)\ndatamodule.prepare_data()\ndatamodule.setup(\"fit\")\n\n\nprint(f\"Few random samples from the Training dataset!\")\n\nsample = get_random_sample(datamodule.train)\nplt.subplot(1, 2, 1); show_image(sample['A'])\nplt.subplot(1, 2, 2); show_image(sample['B'])\nplt.show()\n\nprint(f\"Few random samples from the Validation dataset!\")\n\nsample = get_random_sample(datamodule.valid)\nplt.subplot(1, 2, 1); show_image(sample['A'])\nplt.subplot(1, 2, 2); show_image(sample['B'])\nplt.show()\n\n\n############################################################################################################################################################\n\n\nclass ResBlock(nn.Module):\n\n def __init__(self, in_channels: int, apply_dp: bool = True):\n\n \"\"\"\n Defines a ResBlock\n X ------------------------identity------------------------\n |-- Convolution -- Norm -- ReLU -- Convolution -- Norm --|\n \"\"\"\n\n \"\"\"\n Parameters:\n in_channels: Number of input channels\n apply_dp: If apply_dp is set to True, then activations are 0'ed out with prob 0.5\n \"\"\"\n\n super().__init__()\n\n conv = nn.Conv2d(in_channels = in_channels, out_channels = in_channels, kernel_size = 3, stride = 1)\n layers = [nn.ReflectionPad2d(1), conv, nn.InstanceNorm2d(in_channels), nn.ReLU(True)]\n\n if apply_dp:\n layers += [nn.Dropout(0.5)]\n\n conv = nn.Conv2d(in_channels = in_channels, out_channels = in_channels, kernel_size = 3, stride = 1)\n layers += [nn.ReflectionPad2d(1), conv, nn.InstanceNorm2d(in_channels)]\n\n self.net = nn.Sequential(*layers)\n\n\n def forward(self, x): return x + self.net(x)\n\n\n\nclass Generator(nn.Module):\n\n def __init__(self, in_channels: int = 3, out_channels: int = 64, apply_dp: bool = True):\n\n \"\"\"\n Generator Architecture (Image Size: 256)\n c7s1-64, d128, d256, R256, R256, R256, R256, R256, R256, R256, R256, R256, u128, u64, c7s1-3,\n\n where c7s1-k denote a 7 × 7 Conv-InstanceNorm-ReLU layer with k filters and stride 1, dk denotes a 3 × 3\n Conv-InstanceNorm-ReLU layer with k filters and stride 2, Rk denotes a residual block that contains two\n 3 × 3 Conv layers with the same number of filters on both layer. uk denotes a 3 × 3 DeConv-InstanceNorm-\n ReLU layer with k filters and stride 1.\n \"\"\"\n\n \"\"\"\n Parameters:\n in_channels: Number of input channels\n out_channels: Number of output channels\n apply_dp: If apply_dp is set to True, then activations are 0'ed out with prob 0.5\n \"\"\"\n\n super().__init__()\n\n f = 1\n nb_downsampling = 2\n nb_resblks = 6 if img_sz == 128 else 9\n\n conv = nn.Conv2d(in_channels = in_channels, out_channels = out_channels, kernel_size = 7, stride = 1)\n self.layers = [nn.ReflectionPad2d(3), conv, nn.InstanceNorm2d(out_channels), nn.ReLU(True)]\n\n for i in range(nb_downsampling):\n conv = nn.Conv2d(out_channels * f, out_channels * 2 * f, kernel_size = 3, stride = 2, padding = 1)\n self.layers += [conv, nn.InstanceNorm2d(out_channels * 2 * f), nn.ReLU(True)]\n f *= 2\n\n for i in range(nb_resblks):\n res_blk = ResBlock(in_channels = out_channels * f, apply_dp = apply_dp)\n self.layers += [res_blk]\n\n for i in range(nb_downsampling):\n conv = nn.ConvTranspose2d(out_channels * f, out_channels * (f//2), 3, 2, padding = 1, output_padding = 1)\n self.layers += [conv, nn.InstanceNorm2d(out_channels * (f//2)), nn.ReLU(True)]\n f = f // 2\n\n conv = nn.Conv2d(in_channels = out_channels, out_channels = in_channels, kernel_size = 7, stride = 1)\n self.layers += [nn.ReflectionPad2d(3), conv, nn.Tanh()]\n\n self.net = nn.Sequential(*self.layers)\n\n\n def forward(self, x): return self.net(x)\n\n\n\nclass Discriminator(nn.Module):\n\n def __init__(self, in_channels: int = 3, out_channels: int = 64, nb_layers: int = 3):\n\n \"\"\"\n Discriminator Architecture!\n C64 - C128 - C256 - C512, where Ck denote a Convolution-InstanceNorm-LeakyReLU layer with k filters\n \"\"\"\n\n \"\"\"\n Parameters:\n in_channels: Number of input channels\n out_channels: Number of output channels\n nb_layers: Number of layers in the 70*70 Patch Discriminator\n \"\"\"\n\n super().__init__()\n in_f = 1\n out_f = 2\n\n conv = nn.Conv2d(in_channels, out_channels, kernel_size = 4, stride = 2, padding = 1)\n self.layers = [conv, nn.LeakyReLU(0.2, True)]\n\n for idx in range(1, nb_layers):\n conv = nn.Conv2d(out_channels * in_f, out_channels * out_f, kernel_size = 4, stride = 2, padding = 1)\n self.layers += [conv, nn.InstanceNorm2d(out_channels * out_f), nn.LeakyReLU(0.2, True)]\n in_f = out_f\n out_f *= 2\n\n out_f = min(2 ** nb_layers, 8)\n conv = nn.Conv2d(out_channels * in_f, out_channels * out_f, kernel_size = 4, stride = 1, padding = 1)\n self.layers += [conv, nn.InstanceNorm2d(out_channels * out_f), nn.LeakyReLU(0.2, True)]\n\n conv = nn.Conv2d(out_channels * out_f, out_channels = 1, kernel_size = 4, stride = 1, padding = 1)\n self.layers += [conv]\n\n self.net = nn.Sequential(*self.layers)\n\n\n def forward(self, x): return self.net(x)\n\n\n\nclass Initializer:\n\n def __init__(self, init_type: str = 'normal', init_gain: float = 0.02):\n\n \"\"\"\n Initializes the weight of the network!\n\n Parameters:\n init_type: Initializer type - 'kaiming' or 'xavier' or 'normal'\n init_gain: Standard deviation of the normal distribution\n \"\"\"\n\n self.init_type = init_type\n self.init_gain = init_gain\n\n\n def init_module(self, m):\n\n cls_name = m.__class__.__name__;\n if hasattr(m, 'weight') and (cls_name.find('Conv') != -1 or cls_name.find('Linear') != -1):\n\n if self.init_type == 'kaiming': nn.init.kaiming_normal_(m.weight.data, a = 0, mode = 'fan_in')\n elif self.init_type == 'xavier' : nn.init.xavier_normal_ (m.weight.data, gain = self.init_gain)\n elif self.init_type == 'normal' : nn.init.normal_(m.weight.data, mean = 0, std = self.init_gain)\n else: raise ValueError('Initialization not found!!')\n\n if m.bias is not None: nn.init.constant_(m.bias.data, val = 0);\n\n if hasattr(m, 'weight') and cls_name.find('BatchNorm2d') != -1:\n nn.init.normal_(m.weight.data, mean = 1.0, std = self.init_gain)\n nn.init.constant_(m.bias.data, val = 0)\n\n\n def __call__(self, net):\n\n \"\"\"\n Parameters:\n net: Network\n \"\"\"\n\n net.apply(self.init_module)\n\n return net\n\n\n\nclass ImagePool:\n\n \"\"\"\n This class implements an image buffer that stores previously generated images! This buffer enables to update\n discriminators using a history of generated image rather than the latest ones produced by generator.\n \"\"\"\n\n def __init__(self, pool_sz: int = 50):\n\n \"\"\"\n Parameters:\n pool_sz: Size of the image buffer\n \"\"\"\n\n self.nb_images = 0\n self.image_pool = []\n self.pool_sz = pool_sz\n\n\n def push_and_pop(self, images):\n\n \"\"\"\n Parameters:\n images: latest images generated by the generator\n\n Returns a batch of images from pool!\n \"\"\"\n\n images_to_return = []\n for image in images:\n image = torch.unsqueeze(image, 0)\n\n if self.nb_images < self.pool_sz:\n self.image_pool.append (image)\n images_to_return.append(image)\n self.nb_images += 1\n else:\n if np.random.uniform(0, 1) > 0.5:\n\n rand_int = np.random.randint(0, self.pool_sz)\n temp_img = self.image_pool[rand_int].clone()\n self.image_pool[rand_int] = image\n images_to_return.append(temp_img)\n else:\n images_to_return.append(image)\n\n return torch.cat(images_to_return, 0)\n\n\n\nclass Loss:\n\n \"\"\"\n This class implements different losses required to train the generators and discriminators of CycleGAN\n \"\"\"\n\n def __init__(self, loss_type: str = 'MSE', lambda_: int = 10):\n\n \"\"\"\n Parameters:\n loss_type: Loss Function to train CycleGAN\n lambda_: Weightage of Cycle-consistency loss\n \"\"\"\n\n self.loss = nn.MSELoss() if loss_type == 'MSE' else nn.BCEWithLogitsLoss()\n self.lambda_ = lambda_\n\n\n def get_dis_loss(self, dis_pred_real_data, dis_pred_fake_data):\n\n \"\"\"\n Parameters:\n dis_pred_real_data: Discriminator's prediction on real data\n dis_pred_fake_data: Discriminator's prediction on fake data\n \"\"\"\n\n dis_tar_real_data = torch.ones_like (dis_pred_real_data, requires_grad = False)\n dis_tar_fake_data = torch.zeros_like(dis_pred_fake_data, requires_grad = False)\n\n loss_real_data = self.loss(dis_pred_real_data, dis_tar_real_data)\n loss_fake_data = self.loss(dis_pred_fake_data, dis_tar_fake_data)\n\n dis_tot_loss = (loss_real_data + loss_fake_data) * 0.5\n\n return dis_tot_loss\n\n\n def get_gen_gan_loss(self, dis_pred_fake_data):\n\n \"\"\"\n Parameters:\n dis_pred_fake_data: Discriminator's prediction on fake data\n \"\"\"\n\n gen_tar_fake_data = torch.ones_like(dis_pred_fake_data, requires_grad = False)\n gen_tot_loss = self.loss(dis_pred_fake_data, gen_tar_fake_data)\n\n return gen_tot_loss\n\n\n def get_gen_cyc_loss(self, real_data, cyc_data):\n\n \"\"\"\n Parameters:\n real_data: Real images sampled from the dataloaders\n cyc_data: Image reconstructed after passing the real image through both the generators\n X_recons = F * G (X_real), where F and G are the two generators\n \"\"\"\n\n gen_cyc_loss = torch.nn.L1Loss()(real_data, cyc_data)\n gen_tot_loss = gen_cyc_loss * self.lambda_\n\n return gen_tot_loss\n\n\n def get_gen_idt_loss(self, real_data, idt_data):\n\n \"\"\"\n Implements the identity loss:\n nn.L1Loss(LG_B2A(real_A), real_A)\n nn.L1Loss(LG_A2B(real_B), real_B)\n \"\"\"\n\n gen_idt_loss = torch.nn.L1Loss()(real_data, idt_data)\n gen_tot_loss = gen_idt_loss * self.lambda_ * 0.5\n\n return gen_tot_loss\n\n\n def get_gen_loss(self, real_A, real_B, cyc_A, cyc_B, idt_A, idt_B, d_A_pred_fake_data,\n d_B_pred_fake_data):\n\n \"\"\"\n Implements the total Generator loss\n Sum of Cycle loss, Identity loss, and GAN loss\n \"\"\"\n\n #Cycle loss\n cyc_loss_A = self.get_gen_cyc_loss(real_A, cyc_A)\n cyc_loss_B = self.get_gen_cyc_loss(real_B, cyc_B)\n tot_cyc_loss = cyc_loss_A + cyc_loss_B\n\n # GAN loss\n g_A2B_gan_loss = self.get_gen_gan_loss(d_B_pred_fake_data)\n g_B2A_gan_loss = self.get_gen_gan_loss(d_A_pred_fake_data)\n\n # Identity loss\n g_B2A_idt_loss = self.get_gen_idt_loss(real_A, idt_A)\n g_A2B_idt_loss = self.get_gen_idt_loss(real_B, idt_B)\n\n # Total individual losses\n g_A2B_loss = g_A2B_gan_loss + g_A2B_idt_loss + tot_cyc_loss\n g_B2A_loss = g_B2A_gan_loss + g_B2A_idt_loss + tot_cyc_loss\n g_tot_loss = g_A2B_loss + g_B2A_loss - tot_cyc_loss\n\n return g_A2B_loss, g_B2A_loss, g_tot_loss\n\n\n\nclass CycleGAN(pl.LightningModule):\n\n def __init__(self, d_lr: float = 2e-4, g_lr: float = 2e-4, beta_1: float = 0.5, beta_2: float = 0.999, \n epoch_decay: int = 200):\n\n super().__init__()\n\n self.d_lr = d_lr\n self.g_lr = g_lr\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epoch_decay = epoch_decay\n\n self.fake_pool_A = ImagePool(pool_sz = 50)\n self.fake_pool_B = ImagePool(pool_sz = 50)\n\n self.loss = Loss(loss_type = 'MSE', lambda_ = 10)\n init = Initializer(init_type = 'normal', init_gain = 0.02)\n\n self.d_A = init(Discriminator(in_channels = 3, out_channels = 64, nb_layers = 3))\n self.d_B = init(Discriminator(in_channels = 3, out_channels = 64, nb_layers = 3))\n self.g_A2B = init(Generator(in_channels = 3, out_channels = 64, apply_dp = False))\n self.g_B2A = init(Generator(in_channels = 3, out_channels = 64, apply_dp = False))\n\n self.d_A_params = self.d_A.parameters()\n self.d_B_params = self.d_B.parameters()\n self.g_params = itertools.chain([*self.g_A2B.parameters(), *self.g_B2A.parameters()])\n\n self.example_input_array = [torch.rand(1, 3, img_sz, img_sz, device = self.device),\n torch.rand(1, 3, img_sz, img_sz, device = self.device)]\n\n\n @staticmethod\n def set_requires_grad(nets, requires_grad = False):\n\n \"\"\"\n Set requies_grad=Fasle for all the networks to avoid unnecessary computations\n Parameters:\n nets (network list) -- a list of networks\n requires_grad (bool) -- whether the networks require gradients or not\n \"\"\"\n\n if not isinstance(nets, list): nets = [nets]\n for net in nets:\n for param in net.parameters():\n param.requires_grad = requires_grad\n\n\n def forward(self, real_A, real_B):\n \n \"\"\"\n This is different from the training step. You should treat this as the final inference code \n (final outputs that you are looking for!), but you can definitely use it in the training_step \n to make some code reusable.\n Parameters:\n real_A -- real image of A\n real_B -- real image of B\n \"\"\"\n \n fake_B = self.g_A2B(real_A)\n fake_A = self.g_B2A(real_B)\n\n return fake_B, fake_A\n \n \n def forward_gen(self, real_A, real_B, fake_A, fake_B):\n \n \"\"\"\n Gets the remaining output of both the generators for the training/validation step\n Parameters:\n real_A -- real image of A\n real_B -- real image of B\n fake_A -- fake image of A\n fake_B -- fake image of B\n \"\"\"\n \n cyc_A = self.g_B2A(fake_B)\n idt_A = self.g_B2A(real_A)\n \n cyc_B = self.g_A2B(fake_A)\n idt_B = self.g_A2B(real_B)\n \n return cyc_A, idt_A, cyc_B, idt_B\n \n \n @staticmethod\n def forward_dis(dis, real_data, fake_data):\n \n \"\"\"\n Gets the Discriminator output\n Parameters:\n dis -- Discriminator\n real_data -- real image\n fake_data -- fake image\n \"\"\"\n \n pred_real_data = dis(real_data)\n pred_fake_data = dis(fake_data)\n \n return pred_real_data, pred_fake_data\n\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n\n real_A, real_B = batch['A'], batch['B']\n fake_B, fake_A = self(real_A, real_B)\n \n \n if optimizer_idx == 0:\n \n cyc_A, idt_A, cyc_B, idt_B = self.forward_gen(real_A, real_B, fake_A, fake_B)\n \n # No need to calculate the gradients for Discriminators' parameters\n self.set_requires_grad([self.d_A, self.d_B], requires_grad = False)\n d_A_pred_fake_data = self.d_A(fake_A)\n d_B_pred_fake_data = self.d_B(fake_B)\n\n g_A2B_loss, g_B2A_loss, g_tot_loss = self.loss.get_gen_loss(real_A, real_B, cyc_A, cyc_B, idt_A, idt_B, \n d_A_pred_fake_data, d_B_pred_fake_data)\n\n dict_ = {'g_tot_train_loss': g_tot_loss, 'g_A2B_train_loss': g_A2B_loss, 'g_B2A_train_loss': g_B2A_loss}\n self.log_dict(dict_, on_step = True, on_epoch = True, prog_bar = True, logger = True)\n\n return g_tot_loss\n \n\n if optimizer_idx == 1:\n \n self.set_requires_grad([self.d_A], requires_grad = True)\n fake_A = self.fake_pool_A.push_and_pop(fake_A)\n d_A_pred_real_data, d_A_pred_fake_data = self.forward_dis(self.d_A, real_A, fake_A.detach())\n\n # GAN loss\n d_A_loss = self.loss.get_dis_loss(d_A_pred_real_data, d_A_pred_fake_data)\n self.log(\"d_A_train_loss\", d_A_loss, on_step = True, on_epoch = True, prog_bar = True, logger = True)\n\n return d_A_loss\n \n\n if optimizer_idx == 2:\n \n self.set_requires_grad([self.d_B], requires_grad = True)\n fake_B = self.fake_pool_B.push_and_pop(fake_B)\n d_B_pred_real_data, d_B_pred_fake_data = self.forward_dis(self.d_B, real_B, fake_B.detach())\n\n # GAN loss\n d_B_loss = self.loss.get_dis_loss(d_B_pred_real_data, d_B_pred_fake_data)\n self.log(\"d_B_train_loss\", d_B_loss, on_step = True, on_epoch = True, prog_bar = True, logger = True)\n\n return d_B_loss\n\n\n def shared_step(self, batch, stage: str = 'val'):\n\n grid_A = []\n grid_B = []\n \n real_A, real_B = batch['A'], batch['B']\n \n fake_B, fake_A = self(real_A, real_B)\n cyc_A , idt_A , cyc_B, idt_B = self.forward_gen(real_A, real_B, fake_A, fake_B)\n \n d_A_pred_real_data, d_A_pred_fake_data = self.forward_dis(self.d_A, real_A, fake_A)\n d_B_pred_real_data, d_B_pred_fake_data = self.forward_dis(self.d_B, real_B, fake_B)\n \n # G_A2B loss, G_B2A loss, G loss\n g_A2B_loss, g_B2A_loss, g_tot_loss = self.loss.get_gen_loss(real_A, real_B, cyc_A, cyc_B, idt_A, idt_B, \n d_A_pred_fake_data, d_B_pred_fake_data)\n\n # D_A loss, D_B loss\n d_A_loss = self.loss.get_dis_loss(d_A_pred_real_data, d_A_pred_fake_data)\n d_B_loss = self.loss.get_dis_loss(d_B_pred_real_data, d_B_pred_fake_data)\n\n dict_ = {f'g_tot_{stage}_loss': g_tot_loss, f'g_A2B_{stage}_loss': g_A2B_loss, f'g_B2A_{stage}_loss': g_B2A_loss, \n f'd_A_{stage}_loss' : d_A_loss , f'd_B_{stage}_loss' : d_B_loss}\n self.log_dict(dict_, on_step = False, on_epoch = True, prog_bar = True, logger = True)\n\n for i in range(12):\n rand_int = np.random.randint(0, len(real_A))\n tensor = torch.stack([real_A[rand_int], fake_B[rand_int], cyc_A[rand_int],\n real_B[rand_int], fake_A[rand_int], cyc_B[rand_int]])\n tensor = (tensor + 1) / 2\n grid_A.append(tensor[:3])\n grid_B.append(tensor[3:])\n \n # log the results on tensorboard\n grid_A = torchvision.utils.make_grid(torch.cat(grid_A, 0), nrow = 6)\n grid_B = torchvision.utils.make_grid(torch.cat(grid_B, 0), nrow = 6)\n self.logger.experiment.add_image('Grid_A', grid_A, self.current_epoch, dataformats = \"CHW\")\n self.logger.experiment.add_image('Grid_B', grid_B, self.current_epoch, dataformats = \"CHW\")\n\n\n def validation_step(self, batch, batch_idx):\n return self.shared_step(batch, 'val')\n\n\n def test_step(self, batch, batch_idx):\n return self.shared_step(batch, 'test')\n\n\n def lr_lambda(self, epoch):\n\n fraction = (epoch - self.epoch_decay) / self.epoch_decay\n return 1 if epoch < self.epoch_decay else 1 - fraction\n\n\n def configure_optimizers(self):\n \n # define the optimizers here\n g_opt = torch.optim.Adam(self.g_params , lr = self.g_lr, betas = (self.beta_1, self.beta_2))\n d_A_opt = torch.optim.Adam(self.d_A_params, lr = self.d_lr, betas = (self.beta_1, self.beta_2))\n d_B_opt = torch.optim.Adam(self.d_B_params, lr = self.d_lr, betas = (self.beta_1, self.beta_2))\n \n # define the lr_schedulers here\n g_sch = optim.lr_scheduler.LambdaLR(g_opt , lr_lambda = self.lr_lambda)\n d_A_sch = optim.lr_scheduler.LambdaLR(d_A_opt, lr_lambda = self.lr_lambda)\n d_B_sch = optim.lr_scheduler.LambdaLR(d_B_opt, lr_lambda = self.lr_lambda)\n \n # first return value is a list of optimizers and second is a list of lr_schedulers \n # (you can return empty list also)\n return [g_opt, d_A_opt, d_B_opt], [g_sch, d_A_sch, d_B_sch]\n\n\n\n############################################################################################################################################################\n\n\nTEST = True\nTRAIN = True\nRESTORE = False\nresume_from_checkpoint = None if TRAIN else \"path/to/checkpoints/\" # \"./logs/CycleGAN/version_0/checkpoints/epoch=1.ckpt\"\n\n\nif TRAIN or RESTORE:\n \n epochs = 200\n epoch_decay = epochs // 2\n \n model = CycleGAN(epoch_decay = epoch_decay)\n tb_logger = pl_loggers.TensorBoardLogger('logs/', name = \"CycleGAN\", log_graph = True)\n \n lr_logger = LearningRateMonitor(logging_interval = 'epoch')\n checkpoint_callback = ModelCheckpoint(monitor = \"g_tot_val_loss\", save_top_k = 3, period = 2, save_last = True)\n callbacks = [lr_logger, checkpoint_callback]\n \n # you can change the gpus argument to how many you have (I had only 1 :( )\n # Set the deterministic flag to True for full reproducibility\n trainer = pl.Trainer(accelerator = 'ddp', gpus = -1, max_epochs = epochs, progress_bar_refresh_rate = 20, precision = 16, \n callbacks = callbacks, num_sanity_val_steps = 1, logger = tb_logger, resume_from_checkpoint = \n resume_from_checkpoint, log_every_n_steps = 25, profiler = True, deterministic = True)\n \n trainer.fit(model, datamodule)\n \n \nif TEST:\n \n \"\"\"\n This is one of the many ways to run inference, but I would recommend you to look into the docs for other \n options as well, so that you can use one which suits you best.\n \"\"\"\n \n trainer = pl.Trainer(gpus = -1, precision = 16, profiler = True)\n # load the checkpoint that you want to load\n checkpoint_path = \"path/to/checkpoints/\" # \"./logs/CycleGAN/version_0/checkpoints/epoch=1.ckpt\"\n \n model = CycleGAN.load_from_checkpoint(checkpoint_path = checkpoint_path)\n model.freeze()\n \n # put the datamodule in test mode\n datamodule.setup(\"test\")\n test_data = datamodule.test_dataloader()\n\n trainer.test(model, test_dataloaders = test_data)\n # look tensorboard for the final results\n # You can also run an inference on a single image using the forward function defined above!!\n \n \n", "id": "9848538", "language": "Python", "matching_score": 10.093384742736816, "max_stars_count": 0, "path": "creative_ai/models/cycleg_pl.py" }, { "content": "\nfrom Imports import *\nwarnings.simplefilter(\"ignore\")\n\n\nclass Resize(object):\n\n def __init__(self, image_size: (int, tuple) = 256):\n\n \"\"\"\n Parameters:\n image_size: Final size of the image\n \"\"\"\n\n if isinstance(image_size, int): self.image_size = (image_size, image_size)\n elif isinstance(image_size, tuple): self.image_size = image_size\n else: raise ValueError(\"Unknown DataType of the parameter image_size found!!\")\n\n\n def __call__(self, sample):\n\n \"\"\"\n Parameters:\n sample: Dictionary containing image and label\n \"\"\"\n\n A, B = sample['A'], sample['B']\n\n A = tfm.resize(A, output_shape = self.image_size)\n B = tfm.resize(B, output_shape = self.image_size)\n\n A = np.clip(A, a_min = 0., a_max = 1.)\n B = np.clip(B, a_min = 0., a_max = 1.)\n\n return {'A': A, 'B': B}\n\n\nclass RandomCrop(object):\n\n def __init__(self, image_size: (int, tuple) = 256):\n\n \"\"\"\n Parameters:\n image_size: Final size of the image (should be smaller than current size o/w returns the original image)\n \"\"\"\n\n if isinstance(image_size, int): self.image_size = (image_size, image_size)\n elif isinstance(image_size, tuple): self.image_size = image_size\n else: raise ValueError(\"Unknown DataType of the parameter image_size found!!\")\n\n\n def __call__(self, sample):\n\n \"\"\"\n Parameters:\n sample: Dictionary containing image and label\n \"\"\"\n\n A, B = sample['A'], sample['B']\n curr_height, curr_width = A.shape[0], A.shape[1]\n\n ht_diff = max(0, curr_height - self.image_size[0])\n wd_diff = max(0, curr_width - self.image_size[1])\n top = np.random.randint(low = 0, high = ht_diff)\n lft = np.random.randint(low = 0, high = wd_diff)\n\n A = A[top: top + self.image_size[0], lft: lft + self.image_size[1]]\n B = B[top: top + self.image_size[0], lft: lft + self.image_size[1]]\n\n return {'A': A, 'B': B}\n\n\nclass Random_Flip(object):\n\n def __call__(self, sample):\n\n \"\"\"\n Parameters:\n sample: Dictionary containing image and label\n \"\"\"\n\n A, B = sample['A'], sample['B']\n if np.random.uniform(low = 0., high = 1.0) > .5:\n A = np.fliplr(A)\n B = np.fliplr(B)\n\n return {'A': A, 'B': B}\n\n\nclass To_Tensor(object):\n\n def __call__(self, sample):\n\n \"\"\"\n Parameters:\n sample: Dictionary containing image and label\n \"\"\"\n\n A = np.transpose(sample['A'].astype(np.float, copy = True), (2, 0, 1))\n B = np.transpose(sample['B'].astype(np.float, copy = True), (2, 0, 1))\n\n A = torch.tensor(A, dtype = torch.float)\n B = torch.tensor(B, dtype = torch.float)\n\n return {'A': A, 'B': B}\n\n\nclass Normalize(object):\n\n def __init__(self, mean = [0.5] * 3, stdv = [0.5] * 3):\n\n \"\"\"\n Parameters:\n mean: Normalizing mean\n stdv: Normalizing stdv\n \"\"\"\n\n mean = torch.tensor(mean, dtype = torch.float)\n stdv = torch.tensor(stdv, dtype = torch.float)\n self.transforms = T.Normalize(mean = mean, std = stdv)\n\n\n def __call__(self, sample):\n\n \"\"\"\n Parameters:\n sample: Dictionary containing image and label\n \"\"\"\n\n A, B = sample['A'], sample['B']\n A = self.transforms(A)\n B = self.transforms(B)\n\n return {'A': A, 'B': B}\n\n\n\nclass CustomDataset(Dataset):\n\n def __init__(self, path: str = None, transforms = None, max_sz: int = 1000):\n\n \"\"\"\n Parameters:\n transforms: a list of Transformations (Data augmentation)\n \"\"\"\n\n super().__init__(); self.transforms = T.Compose(transforms)\n\n file_names_A = sorted(os.listdir(path + 'A/'), key = lambda x: int(x[: -4]))\n self.file_names_A = [path + 'A/' + file_name for file_name in file_names_A]\n\n file_names_B = sorted(os.listdir(path + 'B/'), key = lambda x: int(x[: -4]))\n self.file_names_B = [path + 'B/' + file_name for file_name in file_names_B]\n\n self.file_names_A = self.file_names_A[:max_sz]\n self.file_names_B = self.file_names_B[:max_sz]\n\n\n def __len__(self):\n assert len(self.file_names_A) == len(self.file_names_B)\n return len(self.file_names_A)\n\n\n def __getitem__(self, idx):\n\n A = io.imread(self.file_names_A[idx])\n B = io.imread(self.file_names_B[idx])\n sample = self.transforms({'A': A, 'B': B})\n\n return sample\n\n\n\nclass DataModule(pl.LightningDataModule):\n\n \"\"\"\n Download the dataset using the below link; you just need to specify the url while creating an object of this class\n https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/\n Authors don't follow a consistent format for all the datasets, so, it might not work for few\n\n Implements the Lightining DataModule!\n \"\"\"\n\n def __init__(self, url: str, root_dir: str = \"./Dataset/CycleGAN/\", img_sz: int = 256, trn_batch_sz: int = 4,\n tst_batch_sz: int = 64):\n\n \"\"\"\n Parameters:\n url: Download URL of the dataset\n root_dir: Root dir where dataset needs to be downloaded\n img_sz: Size of the Image\n trn_batch_sz: Training Batch Size\n tst_batch_sz: Test Batch Size\n \"\"\"\n\n super().__init__()\n\n self.url = url\n self.dataset = url.split(\"/\")[-1]\n\n self.processed_dir = root_dir + \"Processed/\"\n self.compressed_dir = root_dir + \"Compressed/\"\n os.makedirs(self.processed_dir , exist_ok = True)\n os.makedirs(self.compressed_dir, exist_ok = True)\n\n self.trn_batch_sz = trn_batch_sz\n self.tst_batch_sz = tst_batch_sz\n\n jitter_sz = int(img_sz * 1.120)\n self.tst_tfms = [Resize(img_sz), To_Tensor(), Normalize()]\n self.trn_tfms = [Resize(jitter_sz), RandomCrop(img_sz), Random_Flip(), To_Tensor(), Normalize()]\n\n\n def prepare_data(self):\n\n if self.dataset in os.listdir(self.compressed_dir):\n print(f\"Dataset {self.dataset[:-4]} already exists!\")\n else:\n print(f\"Downloading dataset {self.dataset[:-4]}!!\")\n wget.download(self.url, self.compressed_dir)\n print(f\"\\nDataset {self.dataset[:-4]} downloaded. Extraction in progress!\")\n\n with zipfile.ZipFile(self.compressed_dir + self.dataset, 'r') as zip_ref:\n zip_ref.extractall(self.processed_dir)\n print(f\"Extraction done!\")\n\n # you might need to modify the below code; it's not generic, but works for most of the datasets \n # listed in that url.\n \n dwnld_dir = self.processed_dir + self.dataset[:-4] + \"/\"\n for folder in [\"testA/\", \"testB/\", \"trainA/\", \"trainB/\"]:\n\n dest_dir = dwnld_dir\n src_dir = dwnld_dir + folder\n\n dest_dir = dest_dir + \"Train/\" if folder[:-2] != \"test\" else dest_dir + \"Test/\"\n dest_dir = dest_dir + \"B/\" if folder[-2] != \"A\" else dest_dir + \"A/\"\n os.makedirs(dest_dir, exist_ok = True)\n\n orig_files = [src_dir + file for file in sorted(os.listdir(src_dir))]\n modf_files = [dest_dir + \"{:06d}.jpg\".format(i) for i, file in enumerate(orig_files)]\n\n for orig_file, modf_file in zip(orig_files, modf_files):\n shutil.move(orig_file, modf_file)\n os.rmdir(src_dir)\n\n print(f\"Files moved to appropiate folder!\")\n\n\n def setup(self, stage: str = None):\n\n \"\"\"\n stage: fit/test\n \"\"\"\n\n dwnld_dir = self.processed_dir + self.dataset[:-4]\n trn_dir = dwnld_dir + \"/Train/\"\n tst_dir = dwnld_dir + \"/Test/\"\n\n if stage == 'fit' or stage is None:\n\n dataset = CustomDataset(path = trn_dir, transforms = self.trn_tfms)\n train_sz = int(len(dataset) * 0.9)\n valid_sz = len(dataset) - train_sz\n\n self.train, self.valid = random_split(dataset, [train_sz, valid_sz])\n print(f\"Size of the training dataset: {train_sz}, validation dataset: {valid_sz}\")\n\n if stage == 'test' or stage is None:\n self.test = CustomDataset(path = tst_dir, transforms = self.tst_tfms)\n print(f\"Size of the test dataset: {len(self.test)}\")\n\n\n def train_dataloader(self):\n return DataLoader(self.train, batch_size = self.trn_batch_sz, shuffle = True , num_workers = 16, \n pin_memory = True)\n\n def val_dataloader (self):\n return DataLoader(self.valid, batch_size = self.tst_batch_sz, shuffle = False, num_workers = 16, \n pin_memory = True)\n\n def test_dataloader (self):\n return DataLoader(self.test , batch_size = self.tst_batch_sz, shuffle = False, num_workers = 16, \n pin_memory = True)\n\n\ndef show_image(image):\n plt.imshow(np.transpose((image + 1) / 2, (1, 2, 0)))\n\ndef get_random_sample(dataset):\n return dataset[np.random.randint(0, len(dataset))]\n\n\n###############################################################################################################################################\n\n\nimg_sz = 256\nurl = \"https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/facades.zip\"\n\n# You can decrease the num_workers argument in {train/val/test}_dataloader\ndatamodule = DataModule(url, root_dir = \"./Dataset/Pix2Pix/\", trn_batch_sz = 1, tst_batch_sz = 64)\ndatamodule.prepare_data()\ndatamodule.setup(\"fit\")\n\n\nprint(f\"Few random samples from the Training dataset!\")\n\nsample = get_random_sample(datamodule.train)\nplt.subplot(1, 2, 1); show_image(sample['A'])\nplt.subplot(1, 2, 2); show_image(sample['B'])\nplt.show()\n\nprint(f\"Few random samples from the Validation dataset!\")\n\nsample = get_random_sample(datamodule.valid)\nplt.subplot(1, 2, 1); show_image(sample['A'])\nplt.subplot(1, 2, 2); show_image(sample['B'])\nplt.show()\n\n\n###############################################################################################################################################\n\n\nclass UNetBlock(nn.Module):\n\n def __init__(self, input_channels: int, inner_channels: int, innermost: bool = False, outermost: bool = False,\n apply_dp: bool = False, submodule = None, add_skip_conn: bool = True, norm_type: str = 'instance'):\n\n\n \"\"\"Defines a Unet submodule with/without skip connection!\n X -----------------identity(optional)--------------------\n |-- downsampling -- |submodule| -- upsampling --|\n \"\"\"\n\n \"\"\"\n Parameters:\n input_channels: Number of output channels in the DeConvolutional layer\n inner_channels: Number of output channels in the Convolutional layer\n innermost: If this module is the innermost module\n outermost: If this module is the outermost module\n apply_dp: If apply_dp is set to True, then activations are 0'ed out with prob 0.5\n submodule: Previously defined UNet submodule\n add_skip_conn: If set to true, skip connections are added b/w Encoder and Decoder\n norm_type: Type of Normalization layer - InstanceNorm2D or BatchNorm2D\n \"\"\"\n\n super().__init__()\n \n self.outermost = outermost\n self.add_skip_conn = add_skip_conn\n\n bias = norm_type == 'instance'\n f = 2 if add_skip_conn else 1\n norm_layer = InstanceNorm if norm_type == 'instance' else BatchNorm\n\n if innermost:\n dn_conv = Conv (in_channels = input_channels, out_channels = inner_channels, kernel_size = 4, stride = 2,\n padding = 1, bias = True, padding_mode = 'zeros')\n up_conv = Deconv(in_channels = inner_channels, out_channels = input_channels, kernel_size = 4, stride = 2,\n padding = 1, bias = bias, padding_mode = 'zeros')\n\n dn_layers = [nn.LeakyReLU(0.2, True), dn_conv]\n up_layers = [nn.ReLU(True), up_conv, norm_layer(input_channels)]\n layers = dn_layers + up_layers\n\n elif outermost:\n dn_conv = Conv (in_channels = 1 * input_channels, out_channels = inner_channels, kernel_size = 4,\n stride = 2, padding = 1, bias = True, padding_mode = 'zeros')\n up_conv = Deconv(in_channels = f * inner_channels, out_channels = input_channels, kernel_size = 4,\n stride = 2, padding = 1, bias = True, padding_mode = 'zeros')\n\n dn_layers = [dn_conv]\n up_layers = [nn.ReLU(True), up_conv, nn.Tanh()]\n layers = dn_layers + [submodule] + up_layers\n\n else:\n dn_conv = Conv (in_channels = 1 * input_channels, out_channels = inner_channels, kernel_size = 4,\n stride = 2, padding = 1, bias = bias, padding_mode = 'zeros')\n up_conv = Deconv(in_channels = f * inner_channels, out_channels = input_channels, kernel_size = 4,\n stride = 2, padding = 1, bias = bias, padding_mode = 'zeros')\n\n dn_layers = [nn.LeakyReLU(0.2, True), dn_conv, norm_layer(inner_channels)]\n up_layers = [nn.ReLU(True), up_conv, norm_layer(input_channels)]\n\n if apply_dp:\n layers = dn_layers + [submodule] + up_layers + [nn.Dropout(0.5)]\n else:\n layers = dn_layers + [submodule] + up_layers\n\n self.net = nn.Sequential(*layers)\n\n\n def forward(self, x):\n\n if self.outermost: return self.net(x)\n else: return torch.cat([x, self.net(x)], dim = 1) if self.add_skip_conn else self.net(x)\n\n\n\nclass Generator(nn.Module):\n\n def __init__(self, in_channels: int = 3, out_channels: int = 64, nb_layers: int = 8, apply_dp: bool = True,\n add_skip_conn: bool = True, norm_type: str = 'instance'):\n\n \"\"\"\n Generator Architecture!\n Encoder: C64-C128-C256-C512-C512-C512-C512-C512\n U-Net Decoder: CD1024-CD1024-CD1024-CD1024-CD512-CD256-CD128, where Ck denote a Convolution-InsNorm-ReLU\n layer with k filters, and CDk denotes a Convolution-InsNorm-Dropout-ReLU layer with a dropout rate of 50%\n \"\"\"\n\n \"\"\"\n Parameters:\n in_channels: Number of input channels\n out_channels: Number of output channels\n nb_layers: Number of layers in the Generator\n apply_dp: If apply_dp is set to True, then activations are 0'ed out with prob \"drop_param\"\n add_skip_conn: If set to true, skip connections are added b/w Encoder and Decoder\n norm_type: Type of Normalization layer - InstanceNorm2D or BatchNorm2D\n \"\"\"\n\n super().__init__()\n \n f = 4\n self.layers = []\n\n unet = UNetBlock(out_channels * 8, out_channels * 8, innermost = True, outermost = False, apply_dp = False,\n submodule = None, add_skip_conn = add_skip_conn, norm_type = norm_type)\n\n for idx in range(nb_layers - 5):\n unet = UNetBlock(out_channels * 8, out_channels * 8, innermost = False, outermost = False, apply_dp =\n apply_dp, submodule = unet, add_skip_conn = add_skip_conn, norm_type = norm_type)\n\n for idx in range(0, 3):\n unet = UNetBlock(out_channels * f, out_channels*2*f, innermost = False, outermost = False, apply_dp =\n False, submodule = unet, add_skip_conn = add_skip_conn, norm_type = norm_type)\n f = f // 2\n\n unet = UNetBlock(in_channels * 1, out_channels * 1, innermost = False, outermost = True, apply_dp = False,\n submodule = unet, add_skip_conn = add_skip_conn, norm_type = norm_type)\n\n self.net = unet\n\n\n def forward(self, x): \n return self.net(x)\n\n\n\nclass Discriminator(nn.Module):\n\n def __init__(self, in_channels: int, out_channels: int, nb_layers = 3, norm_type: str = 'instance'):\n\n \"\"\"\n Discriminator Architecture!\n C64 - C128 - C256 - C512, where Ck denote a Convolution-InstanceNorm-LeakyReLU layer with k filters\n \"\"\"\n\n \"\"\"\n Parameters:\n in_channels: Number of input channels\n out_channels: Number of output channels\n nb_layers: Number of layers in the 70*70 Patch Discriminator\n \"\"\"\n\n super().__init__()\n \n in_f = 1\n out_f = 2\n bias = norm_type == 'instance'\n norm_layer = InstanceNorm if norm_type == \"instance\" else BatchNorm\n\n conv = Conv(in_channels, out_channels, 4, stride = 2, padding = 1, bias = True)\n layers = [conv, nn.LeakyReLU(0.2, True)]\n\n for idx in range(1, nb_layers):\n conv = Conv(out_channels * in_f, out_channels * out_f, 4, stride = 2, padding = 1, bias = bias)\n layers += [conv, norm_layer(out_channels * out_f), nn.LeakyReLU(0.2, True)]\n in_f = out_f; out_f *= 2\n\n out_f = min(2 ** nb_layers, 8)\n conv = Conv(out_channels * in_f, out_channels * out_f, 4, stride = 1, padding = 1, bias = bias)\n layers += [conv, norm_layer(out_channels * out_f), nn.LeakyReLU(0.2, True)]\n\n conv = Conv(out_channels * out_f, 1, 4, stride = 1, padding = 1, bias = True)\n layers += [conv]\n\n self.net = nn.Sequential(*layers)\n\n\n def forward(self, x): \n return self.net(x)\n\n\n\nclass Initializer:\n\n def __init__(self, init_type: str = 'normal', init_gain: float = 0.02):\n\n \"\"\"\n Parameters:\n init_type: Initializer type - 'kaiming' or 'xavier' or 'normal'\n init_gain: Standard deviation of the normal distribution\n \"\"\"\n\n self.init_type = init_type\n self.init_gain = init_gain\n\n\n def init_module(self, m):\n\n \"\"\"\n Parameters:\n m: Module\n \"\"\"\n\n cls_name = m.__class__.__name__;\n if hasattr(m, 'weight') and (cls_name.find('Conv') != -1 or cls_name.find('Linear') != -1):\n\n if self.init_type == 'kaiming': nn.init.kaiming_normal_(m.weight.data, a = 0, mode = 'fan_in')\n elif self.init_type == 'xavier' : nn.init.xavier_normal_ (m.weight.data, gain = self.init_gain)\n elif self.init_type == 'normal' : nn.init.normal_(m.weight.data, mean = 0, std = self.init_gain)\n else: raise ValueError('Initialization not found!!')\n\n if m.bias is not None: nn.init.constant_(m.bias.data, val = 0);\n\n if hasattr(m, 'weight') and cls_name.find('BatchNorm2d') != -1:\n nn.init.normal_(m.weight.data, mean = 1.0, std = self.init_gain)\n nn.init.constant_(m.bias.data, val = 0)\n\n\n def __call__(self, net):\n\n \"\"\"\n Parameters:\n net: Network\n \"\"\"\n \n net.apply(self.init_module)\n\n return net\n\n\n\nclass Loss:\n\n \"\"\"\n This class implements different losses required to train the generators and discriminators of CycleGAN\n \"\"\"\n\n def __init__(self, loss_type: str = 'MSE', lambda_: int = 100):\n\n \"\"\"\n Parameters:\n loss_type: Loss Function to train CycleGAN\n lambda_: Weightage of Cycle-consistency loss\n \"\"\"\n\n self.loss = nn.MSELoss() if loss_type == 'MSE' else nn.BCEWithLogitsLoss()\n self.lambda_ = lambda_\n\n\n def get_dis_loss(self, dis_pred_real_data, dis_pred_fake_data):\n\n \"\"\"\n Parameters:\n dis_pred_real_data: Discriminator's prediction on real data\n dis_pred_fake_data: Discriminator's prediction on fake data\n \"\"\"\n\n dis_tar_real_data = torch.ones_like (dis_pred_real_data, requires_grad = False)\n dis_tar_fake_data = torch.zeros_like(dis_pred_fake_data, requires_grad = False)\n\n loss_real_data = self.loss(dis_pred_real_data, dis_tar_real_data)\n loss_fake_data = self.loss(dis_pred_fake_data, dis_tar_fake_data)\n\n dis_tot_loss = (loss_real_data + loss_fake_data) * 0.5\n\n return dis_tot_loss\n\n\n def get_gen_gan_loss(self, dis_pred_fake_data):\n\n \"\"\"\n Parameters:\n dis_pred_fake_data: Discriminator's prediction on fake data\n \"\"\"\n\n gen_tar_fake_data = torch.ones_like(dis_pred_fake_data, requires_grad = False)\n gen_tot_loss = self.loss(dis_pred_fake_data, gen_tar_fake_data)\n\n return gen_tot_loss\n\n\n def get_gen_rec_loss(self, real_data, recs_data):\n\n \"\"\"\n Parameters:\n real_data: Real images sampled from the dataloaders\n recs_data: Fake label generated by the generator\n \"\"\"\n\n gen_rec_loss = torch.nn.L1Loss()(real_data, recs_data)\n gen_tot_loss = gen_rec_loss * self.lambda_\n\n return gen_tot_loss\n\n\n def get_gen_loss(self, dis_pred_fake_data, real_data, fake_data):\n\n \"\"\"\n Implements the total Generator loss\n Sum of Reconstruction loss, and GAN loss\n \"\"\"\n\n gen_gan_loss = self.get_gen_gan_loss(dis_pred_fake_data )\n gen_rec_loss = self.get_gen_rec_loss(real_data, fake_data)\n gen_tot_loss = gen_gan_loss + gen_rec_loss\n\n return gen_tot_loss\n\n\n\nclass Pix2Pix(pl.LightningModule):\n\n def __init__(self, d_lr: float = 2e-4, g_lr: float = 2e-4, beta_1: float = 0.5, beta_2: float = 0.999, epoch_decay: int = 100):\n\n super().__init__()\n\n self.d_lr = d_lr\n self.g_lr = g_lr\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epoch_decay = epoch_decay\n\n self.loss = Loss(loss_type = 'MSE', lambda_ = 100)\n init = Initializer(init_type = 'normal', init_gain = 0.02)\n \n self.gen = init(Generator(in_channels = 3, out_channels = 64, norm_type = 'instance'))\n self.dis = init(Discriminator(in_channels = 3, out_channels = 64, norm_type = 'instance'))\n\n self.d_params = self.dis.parameters()\n self.g_params = self.gen.parameters()\n\n self.example_input_array = torch.rand(1, 3, img_sz, img_sz, device = self.device)\n\n\n @staticmethod\n def set_requires_grad(nets, requires_grad = False):\n\n \"\"\"\n Set requies_grad=Fasle for all the networks to avoid unnecessary computations\n Parameters:\n nets (network list) -- a list of networks\n requires_grad (bool) -- whether the networks require gradients or not\n \"\"\"\n\n if not isinstance(nets, list): nets = [nets]\n for net in nets:\n for param in net.parameters():\n param.requires_grad = requires_grad\n\n\n def forward(self, real_A):\n \n # this is different from the training step. You should treat this as the final inference code (final outputs that you are looking for!)\n fake_B = self.gen(real_A)\n\n return fake_B\n\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n\n real_A, real_B = batch['A'], batch['B']\n fake_B = self.gen(real_A)\n\n if optimizer_idx == 0:\n\n # No need to calculate the gradients for Discriminators' parameters\n self.set_requires_grad([self.dis], requires_grad = False)\n dis_pred_fake_data = self.dis(torch.cat([real_A, fake_B], 0))\n \n # Gen loss\n g_loss = self.loss.get_gen_loss(dis_pred_fake_data, real_B, fake_B)\n self.log(\"g_train_loss\", g_loss, on_step = True, on_epoch = True, prog_bar = True, logger = True)\n \n return g_loss\n\n\n if optimizer_idx == 1:\n \n self.set_requires_grad([self.dis], requires_grad = True)\n dis_pred_real_data = self.dis(torch.cat([real_A, real_B], 0))\n dis_pred_fake_data = self.dis(torch.cat([real_A, fake_B.detach()], 0))\n\n # Dis loss\n d_loss = self.loss.get_dis_loss(dis_pred_real_data, dis_pred_fake_data)\n self.log(\"d_train_loss\", d_loss, on_step = True, on_epoch = True, prog_bar = True, logger = True)\n\n return d_loss\n\n\n def shared_step(self, batch, stage: str = 'val'):\n\n grid = []\n real_A, real_B = batch['A'], batch['B']\n\n fake_B = self.gen(real_A)\n dis_pred_fake_data = self.dis(torch.cat([real_A, fake_B], 0))\n dis_pred_real_data = self.dis(torch.cat([real_A, real_B], 0))\n\n # Gen loss, # Dis loss\n g_loss = self.loss.get_gen_loss(dis_pred_fake_data, real_B, fake_B)\n d_loss = self.loss.get_dis_loss(dis_pred_real_data, dis_pred_fake_data)\n\n dict_ = {f'g_{stage}_loss': g_loss, f'd_{stage}_loss': d_loss}\n self.log_dict(dict_, on_step = False, on_epoch = True, prog_bar = True, logger = True)\n\n for i in range(12):\n rand_int = np.random.randint(0, len(real_A))\n tensor = torch.stack([real_A[rand_int], fake_B[rand_int], real_B[rand_int]])\n grid.append((tensor + 1) / 2)\n \n # log the results on tensorboard\n grid = torchvision.utils.make_grid(torch.cat(grid, 0), nrow = 6)\n self.logger.experiment.add_image('Grid', grid, self.current_epoch, dataformats = \"CHW\")\n \n\n def validation_step(self, batch, batch_idx):\n return self.shared_step(batch, 'val')\n\n\n def test_step(self, batch, batch_idx):\n return self.shared_step(batch, 'test')\n\n\n def lr_lambda(self, epoch):\n\n fraction = (epoch - self.epoch_decay) / self.epoch_decay\n return 1 if epoch < self.epoch_decay else 1 - fraction\n\n\n def configure_optimizers(self):\n \n # define the optimizers here\n g_opt = torch.optim.Adam(self.g_params, lr = self.g_lr, betas = (self.beta_1, self.beta_2))\n d_opt = torch.optim.Adam(self.d_params, lr = self.d_lr, betas = (self.beta_1, self.beta_2))\n \n # define the lr_schedulers here\n g_sch = optim.lr_scheduler.LambdaLR(g_opt, lr_lambda = self.lr_lambda)\n d_sch = optim.lr_scheduler.LambdaLR(d_opt, lr_lambda = self.lr_lambda)\n \n # first return value is a list of optimizers and second is a list of lr_schedulers (you can return empty list also)\n return [g_opt, d_opt], [g_sch, d_sch]\n\n\n \n###############################################################################################################################################\n\n\nTEST = True\nTRAIN = True\nRESTORE = False\nresume_from_checkpoint = None if TRAIN else \"path/to/checkpoints/\" # \"./logs/Pix2Pix/version_0/checkpoints/epoch=1.ckpt\"\n\n\nif TRAIN or RESTORE:\n \n epochs = 200\n epoch_decay = epochs // 2\n \n model = Pix2Pix(epoch_decay = epoch_decay)\n tb_logger = pl_loggers.TensorBoardLogger('logs/', name = \"Pix2Pix\", log_graph = True)\n \n lr_logger = LearningRateMonitor(logging_interval = 'epoch')\n checkpoint_callback = ModelCheckpoint(monitor = \"g_val_loss\", save_top_k = 3, period = 2, save_last = True)\n callbacks = [lr_logger, checkpoint_callback]\n \n # you can change the gpus argument to how many you have (I had only 1 :( )\n # Setting deterministic flag to True for full reproducibility\n trainer = pl.Trainer(accelerator = 'ddp', gpus = -1, max_epochs = epochs, progress_bar_refresh_rate = 20, precision = 16, \n callbacks = callbacks, num_sanity_val_steps = 1, logger = tb_logger, resume_from_checkpoint = \n resume_from_checkpoint, log_every_n_steps = 25, profiler = True, deterministic = True)\n \n trainer.fit(model, datamodule)\n \n \nif TEST:\n \n \"\"\"\n This is one of the many ways to run inference, but I would recommend you to look into the docs for other \n options as well, so that you can use one which suits you best.\n \"\"\"\n \n trainer = pl.Trainer(gpus = -1, precision = 16, profiler = True)\n # load the checkpoint that you want to load\n checkpoint_path = \"path/to/checkpoints/\" # \"./logs/Pix2Pix/version_0/checkpoints/epoch=1.ckpt\"\n \n model = Pix2Pix.load_from_checkpoint(checkpoint_path = checkpoint_path)\n model.freeze()\n \n # put the datamodule in test mode\n datamodule.setup(\"test\")\n test_data = datamodule.test_dataloader()\n\n trainer.test(model, test_dataloaders = test_data)\n # look tensorboard for the final results\n # You can also run an inference on a single image using the forward function defined above!!\n\n", "id": "745340", "language": "Python", "matching_score": 9.93567943572998, "max_stars_count": 297, "path": "creative_ai/models/pix2pix_pl.py" }, { "content": "\nimport numpy as np, pandas as pd, matplotlib as mpl, matplotlib.pyplot as plt, os\nimport itertools; from skimage import io as io, transform as tfm; import warnings\n\nimport torch, torch.nn as nn, torch.nn.functional as F, torch.optim as optim\nimport torchvision, torchvision.transforms as T, torchvision.utils as utils\nfrom torch.nn import Conv2d as Conv, ConvTranspose2d as Deconv, ReLU as Relu\nfrom torch.nn import InstanceNorm2d as InstanceNorm, BatchNorm2d as BatchNorm\nfrom torch.utils.tensorboard import SummaryWriter, FileWriter, RecordWriter\nfrom torch.utils.data import Dataset, DataLoader, ConcatDataset, TensorDataset\n\nmpl.rcParams[\"figure.figsize\"] = (8, 4)\nmpl.rcParams[\"axes.grid\"] = False\nwarnings.filterwarnings(\"ignore\")\n\n\n########################################################################################################################\n\n\nif torch.cuda.is_available():\n devices = ['cuda:' + str(x) for x in range(torch.cuda.device_count())]\n print(f\"Number of GPUs available: {len(devices)}\")\nelse:\n devices = [torch.device('cpu')]; print(\"GPU isn't available! :(\")\n \n \n########################################################################################################################\n \n\nclass Resize(object):\n \n def __init__(self, image_size: (int, tuple) = 256):\n \n \"\"\"\n Parameters: \n image_size: Final size of the image\n \"\"\"\n \n if isinstance(image_size, int): self.image_size = (image_size, image_size)\n elif isinstance(image_size, tuple): self.image_size = image_size\n else: raise ValueError(\"Unknown DataType of the parameter image_size found!!\")\n \n \n def __call__(self, sample):\n \n \"\"\"\n Parameters: \n sample: Dictionary containing image and label\n \"\"\"\n \n A, B = sample['A'], sample['B']\n \n A = tfm.resize(A, output_shape = self.image_size)\n B = tfm.resize(B, output_shape = self.image_size)\n \n A = np.clip(A, a_min = 0., a_max = 1.)\n B = np.clip(B, a_min = 0., a_max = 1.)\n \n return {'A': A, 'B': B}\n\n\nclass RandomCrop(object):\n \n def __init__(self, image_size: (int, tuple) = 256): \n \n \"\"\"\n Parameters: \n image_size: Final size of the image (should be smaller than current size o/w \n returns the original image)\n \"\"\"\n \n if isinstance(image_size, int): self.image_size = (image_size, image_size)\n elif isinstance(image_size, tuple): self.image_size = image_size\n else: raise ValueError(\"Unknown DataType of the parameter image_size found!!\")\n \n \n def __call__(self, sample):\n \n \"\"\"\n Parameters: \n sample: Dictionary containing image and label\n \"\"\"\n \n A, B = sample['A'], sample['B']\n curr_height, curr_width = A.shape[0], A.shape[1]\n \n ht_diff = max(0, curr_height - self.image_size[0])\n wd_diff = max(0, curr_width - self.image_size[1])\n top = np.random.randint(low = 0, high = ht_diff)\n lft = np.random.randint(low = 0, high = wd_diff)\n \n A = A[top: top + self.image_size[0], lft: lft + self.image_size[1]]\n B = B[top: top + self.image_size[0], lft: lft + self.image_size[1]]\n \n return {'A': A, 'B': B}\n \n\nclass Random_Flip(object):\n \n def __call__(self, sample):\n \n \"\"\"\n Parameters: \n sample: Dictionary containing image and label\n \"\"\"\n \n A, B = sample['A'], sample['B'];\n if np.random.uniform(low = 0., high = 1.0) > .5:\n A = np.fliplr(A)\n B = np.fliplr(B)\n \n return {'A': A, 'B': B}\n\n\nclass To_Tensor(object):\n \n def __call__(self, sample):\n \n \"\"\"\n Parameters: \n sample: Dictionary containing image and label\n \"\"\"\n \n A = np.transpose(sample['A'].astype(np.float, copy = True), (2, 0, 1))\n B = np.transpose(sample['B'].astype(np.float, copy = True), (2, 0, 1))\n \n A = torch.tensor(A, dtype = torch.float)\n B = torch.tensor(B, dtype = torch.float)\n \n return {'A': A, 'B': B}\n \n\nclass Normalize(object):\n \n def __init__(self, mean = [0.5] * 3, stdv = [0.5] * 3):\n \n \"\"\"\n Parameters: \n mean: Normalizing mean\n stdv: Normalizing stdv\n \"\"\"\n \n mean = torch.tensor(mean, dtype = torch.float)\n stdv = torch.tensor(stdv, dtype = torch.float)\n self.transforms = T.Normalize(mean = mean, std = stdv)\n \n \n def __call__(self, sample):\n \n \"\"\"\n Parameters: \n sample: Dictionary containing image and label\n \"\"\"\n \n A, B = sample['A'], sample['B']\n A = self.transforms(A)\n B = self.transforms(B)\n \n return {'A': A, 'B': B}\n\n\nclass MyDataset(Dataset):\n \n def __init__(self, path = None, transforms = None):\n \n \"\"\"\n Parameters: \n path: path to the Dataset\n transforms: list of Transformations (Data Augmentation)\n \"\"\"\n \n super().__init__(); \n self.transforms = T.Compose(transforms)\n \n self.file_names = sorted(os.listdir(path), key = lambda x: int(x[:-4]))\n self.file_names = [path + file_name for file_name in self.file_names] \n \n \n def __len__(self): return len(self.file_names)\n \n \n def __getitem__(self, idx):\n \n \"\"\"\n Returns:\n A dict containing image and label\n \"\"\"\n \n sample = io.imread(fname = self.file_names[idx]); width = sample.shape[1]\n \n B = sample[:, : width // 2, :]\n A = sample[:, width // 2 :, :]\n sample = self.transforms({'A': A, 'B': B})\n \n return sample\n \n\nclass Helper(object):\n \n @staticmethod\n def show_image(image):\n \n image = np.transpose((image + 1) / 2, (1, 2, 0))\n plt.imshow(image)\n \n \n @staticmethod\n def tensor_to_numpy(tensor):\n \n tensor = (tensor.cpu().clone() + 1) / 2\n if len(tensor.shape) == 3: tensor = np.transpose(tensor, (1, 2, 0))\n elif len(tensor.shape) == 4: tensor = np.transpose(tensor, (0, 2, 3, 1))\n \n return tensor\n\n \n @staticmethod\n def get_random_sample(dataset):\n \n return dataset[np.random.randint(0, len(dataset))]\n \n \n @staticmethod\n def get_data(path: str, tfms, batch_sz: int, is_train: bool):\n \n dataset = MyDataset(path = path, transforms = tfms)\n dataloader = DataLoader(dataset, batch_size = batch_sz, shuffle = is_train, num_workers = 0)\n \n return dataset, dataloader\n\n\n######################################################################################################################\n\n\n# 1) Correctly specify the Root directory which contains two folders: Train folder and Validation folder\n# 2) Image names should be labeled from 1 to len(dataset), o/w will throw an error while sorting the filenames\n\nroot_dir = \"./Dataset/Vision/Pix2Pix/Facades/\"\ntrn_path = root_dir + \"Trn/\"\nval_path = root_dir + \"Val/\"\n\nimg_sz = 256\njitter_sz = int(img_sz * 1.12)\n\ntrn_batch_sz = 16 * len(devices)\nval_batch_sz = 64\nhelper = Helper()\n\nval_tfms = [Resize(img_sz), To_Tensor(), Normalize()]\ntrn_tfms = [Resize(jitter_sz), RandomCrop(img_sz), Random_Flip(), To_Tensor(), Normalize()]\n\ntrn_dataset, trn_dataloader = helper.get_data(trn_path, trn_tfms, trn_batch_sz, is_train = True )\nval_dataset, val_dataloader = helper.get_data(val_path, val_tfms, val_batch_sz, is_train = False)\n\n\nsample = helper.get_random_sample(trn_dataset)\nplt.subplot(1, 2, 1); helper.show_image(sample['A'])\nplt.subplot(1, 2, 2); helper.show_image(sample['B'])\nplt.show()\n\nsample = helper.get_random_sample(val_dataset)\nplt.subplot(1, 2, 1); helper.show_image(sample['A'])\nplt.subplot(1, 2, 2); helper.show_image(sample['B'])\nplt.show()\n\n\n######################################################################################################################\n\n\nclass UNetBlock(nn.Module):\n \n def __init__(self, input_channels: int, inner_channels: int, innermost: bool = False, outermost: bool = False,\n apply_dp: bool = False, submodule = None, add_skip_conn: bool = True, norm_type: str = 'instance'):\n \n \n \"\"\"Defines a Unet submodule with/without skip connection!\n X -----------------identity(optional)--------------------\n |-- downsampling -- |submodule| -- upsampling --|\n \"\"\"\n \n \"\"\"\n Parameters: \n input_channels: Number of output channels in the DeConvolutional layer\n inner_channels: Number of output channels in the Convolutional layer\n innermost: If this module is the innermost module\n outermost: If this module is the outermost module\n apply_dp: If apply_dp is set to True, then activations are 0'ed out with prob 0.5\n submodule: Previously defined UNet submodule\n add_skip_conn: If set to true, skip connections are added b/w Encoder and Decoder\n norm_type: Type of Normalization layer - InstanceNorm2D or BatchNorm2D\n \"\"\"\n \n super().__init__()\n \n self.outermost = outermost\n self.add_skip_conn = add_skip_conn\n \n bias = norm_type == 'instance'\n f = 2 if add_skip_conn else 1\n norm_layer = InstanceNorm if norm_type == 'instance' else BatchNorm\n \n if innermost: \n dn_conv = Conv (in_channels = input_channels, out_channels = inner_channels, kernel_size = 4, stride = 2, \n padding = 1, bias = True, padding_mode = 'zeros')\n up_conv = Deconv(in_channels = inner_channels, out_channels = input_channels, kernel_size = 4, stride = 2, \n padding = 1, bias = bias, padding_mode = 'zeros')\n \n dn_layers = [nn.LeakyReLU(0.2, True), dn_conv]\n up_layers = [nn.ReLU(True), up_conv, norm_layer(input_channels)]\n layers = dn_layers + up_layers\n \n elif outermost:\n dn_conv = Conv (in_channels = 1 * input_channels, out_channels = inner_channels, kernel_size = 4, \n stride = 2, padding = 1, bias = True, padding_mode = 'zeros')\n up_conv = Deconv(in_channels = f * inner_channels, out_channels = input_channels, kernel_size = 4, \n stride = 2, padding = 1, bias = True, padding_mode = 'zeros')\n \n dn_layers = [dn_conv]\n up_layers = [nn.ReLU(True), up_conv, nn.Tanh()]\n layers = dn_layers + [submodule] + up_layers\n \n else:\n dn_conv = Conv (in_channels = 1 * input_channels, out_channels = inner_channels, kernel_size = 4, \n stride = 2, padding = 1, bias = bias, padding_mode = 'zeros')\n up_conv = Deconv(in_channels = f * inner_channels, out_channels = input_channels, kernel_size = 4, \n stride = 2, padding = 1, bias = bias, padding_mode = 'zeros')\n \n dn_layers = [nn.LeakyReLU(0.2, True), dn_conv, norm_layer(inner_channels)]\n up_layers = [nn.ReLU(True), up_conv, norm_layer(input_channels)]\n \n if apply_dp:\n layers = dn_layers + [submodule] + up_layers + [nn.Dropout(0.5)]\n else:\n layers = dn_layers + [submodule] + up_layers\n \n self.net = nn.Sequential(*layers)\n \n \n def forward(self, x):\n \n if self.outermost: return self.net(x)\n else: return torch.cat([x, self.net(x)], dim = 1) if self.add_skip_conn else self.net(x)\n\n\nclass Generator(nn.Module):\n \n def __init__(self, in_channels: int = 3, out_channels: int = 64, nb_layers: int = 8, apply_dp: bool = True, \n add_skip_conn: bool = True, norm_type: str = 'instance'):\n \n \"\"\"\n Generator Architecture!\n Encoder: C64-C128-C256-C512-C512-C512-C512-C512\n U-Net Decoder: CD1024-CD1024-CD1024-CD1024-CD512-CD256-CD128, where Ck denote a Convolution-InsNorm-ReLU \n layer with k filters, and CDk denotes a Convolution-InsNorm-Dropout-ReLU layer with a dropout rate of 50%\n \"\"\"\n \n \"\"\"\n Parameters: \n in_channels: Number of input channels \n out_channels: Number of output channels \n nb_layers: Number of layers in the Generator\n apply_dp: If apply_dp is set to True, then activations are 0'ed out with prob \"drop_param\"\n add_skip_conn: If set to true, skip connections are added b/w Encoder and Decoder\n norm_type: Type of Normalization layer - InstanceNorm2D or BatchNorm2D\n \"\"\"\n \n super().__init__()\n \n f = 4\n self.layers = []\n \n unet = UNetBlock(out_channels * 8, out_channels * 8, innermost = True, outermost = False, apply_dp = False,\n submodule = None, add_skip_conn = add_skip_conn, norm_type = norm_type)\n \n for idx in range(nb_layers - 5):\n unet = UNetBlock(out_channels * 8, out_channels * 8, innermost = False, outermost = False, apply_dp =\n apply_dp, submodule = unet, add_skip_conn = add_skip_conn, norm_type = norm_type)\n \n for idx in range(0, 3):\n unet = UNetBlock(out_channels * f, out_channels*2*f, innermost = False, outermost = False, apply_dp =\n False, submodule = unet, add_skip_conn = add_skip_conn, norm_type = norm_type)\n f = f // 2\n \n unet = UNetBlock(in_channels * 1, out_channels * 1, innermost = False, outermost = True, apply_dp = False,\n submodule = unet, add_skip_conn = add_skip_conn, norm_type = norm_type)\n \n self.net = unet\n \n \n def forward(self, x): \n return self.net(x)\n\n\nclass Discriminator(nn.Module):\n \n def __init__(self, in_channels: int, out_channels: int, nb_layers = 3, norm_type: str = 'instance'):\n \n \"\"\"\n Discriminator Architecture!\n C64 - C128 - C256 - C512, where Ck denote a Convolution-InstanceNorm-LeakyReLU layer with k filters\n \"\"\"\n \n \"\"\"\n Parameters: \n in_channels: Number of input channels\n out_channels: Number of output channels\n nb_layers: Number of layers in the 70*70 Patch Discriminator\n \"\"\"\n \n super().__init__()\n \n in_f = 1\n out_f = 2\n bias = norm_type == 'instance' \n norm_layer = InstanceNorm if norm_type == \"instance\" else BatchNorm\n \n conv = Conv(in_channels, out_channels, 4, stride = 2, padding = 1, bias = True)\n layers = [conv, nn.LeakyReLU(0.2, True)]\n \n for idx in range(1, nb_layers):\n conv = Conv(out_channels * in_f, out_channels * out_f, 4, stride = 2, padding = 1, bias = bias)\n layers += [conv, norm_layer(out_channels * out_f), nn.LeakyReLU(0.2, True)]\n in_f = out_f\n out_f *= 2\n \n out_f = min(2 ** nb_layers, 8)\n conv = Conv(out_channels * in_f, out_channels * out_f, 4, stride = 1, padding = 1, bias = bias)\n layers += [conv, norm_layer(out_channels * out_f), nn.LeakyReLU(0.2, True)] \n \n conv = Conv(out_channels * out_f, 1, 4, stride = 1, padding = 1, bias = True)\n layers += [conv]\n \n self.net = nn.Sequential(*layers)\n \n \n def forward(self, x): return self.net(x)\n\n\nclass Initializer:\n \n def __init__(self, init_type: str = 'normal', init_gain: float = 0.02): \n \n \"\"\"\n Parameters: \n init_type: Initializer type - 'kaiming' or 'xavier' or 'normal'\n init_gain: Standard deviation of the normal distribution\n \"\"\"\n \n self.init_type = init_type\n self.init_gain = init_gain\n \n \n def init_module(self, m):\n \n \"\"\"\n Parameters: \n m: Module\n \"\"\"\n \n cls_name = m.__class__.__name__;\n if hasattr(m, 'weight') and (cls_name.find('Conv') != -1 or cls_name.find('Linear') != -1):\n \n if self.init_type == 'kaiming': nn.init.kaiming_normal_(m.weight.data, a = 0, mode = 'fan_in')\n elif self.init_type == 'xavier' : nn.init.xavier_normal_ (m.weight.data, gain = self.init_gain)\n elif self.init_type == 'normal' : nn.init.normal_(m.weight.data, mean = 0, std = self.init_gain)\n else: raise ValueError('Initialization not found!!')\n \n if m.bias is not None: nn.init.constant_(m.bias.data, val = 0); \n \n if hasattr(m, 'weight') and cls_name.find('BatchNorm2d') != -1:\n nn.init.normal_(m.weight.data, mean = 1.0, std = self.init_gain)\n nn.init.constant_(m.bias.data, val = 0)\n \n \n def __call__(self, net):\n \n \"\"\"\n Parameters: \n net: Network\n \"\"\"\n \n net = net.to(devices[0]); net = nn.DataParallel(net, device_ids = range(len(devices)))\n net.apply(self.init_module)\n \n return net\n\n\nclass Tensorboard:\n \n def __init__(self, path: str): self.writer = SummaryWriter(path)\n \n \n def write_graph(self, model): \n \n A = helper.get_random_sample(trn_dataset)['A'].unsqueeze(0)\n self.writer.add_graph(model.module, A.to(devices[0]))\n \n \n @torch.no_grad()\n def write_image(self, nb_examples, gen, epoch: int, curr_iter: int):\n \n grid = []\n n_iter = (epoch - 1) * len(trn_dataloader) + curr_iter\n \n for _ in range(nb_examples):\n \n sample = helper.get_random_sample(val_dataset)\n real_A = sample['A'].unsqueeze(0).to(devices[0])\n real_B = sample['B'].unsqueeze(0).to(devices[0])\n \n fake_B = gen(real_A).detach()\n tensor = torch.cat([real_A, real_B, fake_B])\n tensor = (tensor.cpu().clone() + 1) / 2\n grid.append(tensor)\n \n grid = torchvision.utils.make_grid(torch.cat(grid, 0), nrow = 6)\n self.writer.add_image('Grid', grid, n_iter)\n \n \n @torch.no_grad()\n def write_loss(self, d_loss: float, g_loss: float, epoch: int, curr_iter: int):\n \n n_iter = (epoch - 1) * len(trn_dataloader) + curr_iter\n \n self.writer.add_scalar('d_loss', round(d_loss.item(), 4), n_iter)\n self.writer.add_scalar('g_loss', round(g_loss.item(), 4), n_iter)\n\n\nclass Loss:\n \n \"\"\"\n This class implements different losses required to train the generators and discriminators of CycleGAN\n \"\"\"\n \n def __init__(self, loss_type: str = 'MSE', lambda_: int = 100):\n \n \"\"\"\n Parameters:\n loss_type: Loss Function to train CycleGAN\n lambda_: Weightage of Cycle-consistency loss\n \"\"\"\n \n self.loss = nn.MSELoss() if loss_type == 'MSE' else nn.BCEWithLogitsLoss()\n self.lambda_ = lambda_\n \n \n def get_dis_gan_loss(self, dis_pred_real_data, dis_pred_fake_data):\n \n \"\"\"\n Parameters:\n dis_pred_real_data: Discriminator's prediction on real data\n dis_pred_fake_data: Discriminator's prediction on fake data\n \"\"\"\n \n dis_tar_real_data = torch.ones_like (dis_pred_real_data, requires_grad = False)\n dis_tar_fake_data = torch.zeros_like(dis_pred_fake_data, requires_grad = False)\n \n loss_real_data = self.loss(dis_pred_real_data, dis_tar_real_data)\n loss_fake_data = self.loss(dis_pred_fake_data, dis_tar_fake_data)\n \n dis_tot_loss = (loss_real_data + loss_fake_data) * 0.5\n \n return dis_tot_loss\n \n \n def get_gen_gan_loss(self, dis_pred_fake_data):\n \n \"\"\"\n Parameters:\n dis_pred_fake_data: Discriminator's prediction on fake data\n \"\"\"\n \n gen_tar_fake_data = torch.ones_like(dis_pred_fake_data, requires_grad = False)\n gen_tot_loss = self.loss(dis_pred_fake_data, gen_tar_fake_data)\n \n return gen_tot_loss\n \n \n def get_gen_rec_loss(self, real_data, recs_data):\n \n \"\"\"\n Parameters:\n real_data: Real images sampled from the dataloaders\n recs_data: Fake label generated by the generator\n \"\"\"\n \n gen_rec_loss = torch.nn.L1Loss()(real_data, recs_data)\n gen_tot_loss = gen_rec_loss * self.lambda_\n \n return gen_tot_loss\n\n\nclass SaveModel:\n \n def __init__(self, path: str, keep_only: int = 3): self.path = path; self.keep_only = keep_only\n \n \n def save_model(self, epoch: int, dis, gen, d_opt, g_opt):\n \n filename = self.path + \"Model_\" + str(epoch) + \".pth\"\n torch.save({'epochs_': epoch, 'g_opt': g_opt.state_dict(), 'd_opt': d_opt.state_dict(),\n 'dis': dis.module.state_dict(), 'gen': gen.module.state_dict()}, filename)\n \n filenames = [f for f in os.listdir(self.path) if not f.startswith('.')]\n if len(filenames) > self.keep_only:\n os.remove(self.path + sorted(filenames, key = lambda x: int(x[6 : -4]))[0])\n\n\nclass Pix2Pix:\n \n def __init__(self, root_dir: str, gen, dis):\n \n self.dis = dis\n self.gen = gen\n self.loss = Loss()\n self.save_dir = root_dir + 'Models/'\n summary_path = root_dir + 'Tensorboard/'\n \n if not os.path.exists(self.save_dir): os.makedirs(self.save_dir)\n if not os.path.exists(summary_path ): os.makedirs(summary_path )\n self.saver = SaveModel(self.save_dir); self.tb = Tensorboard(summary_path)\n \n \n def load_state_dict(self, path, train = True):\n \n checkpoint = torch.load(path)\n start_epoch = checkpoint['epochs_'] + 1\n \n if train:\n self.d_opt.load_state_dict(checkpoint['d_opt'])\n self.g_opt.load_state_dict(checkpoint['g_opt'])\n \n self.dis.module.load_state_dict(checkpoint['dis'])\n self.gen.module.load_state_dict(checkpoint['gen'])\n \n return start_epoch\n \n \n @staticmethod\n def set_requires_grad(nets, requires_grad = False):\n \n \"\"\"\n Set requies_grad=Fasle for all the networks to avoid unnecessary computations\n Parameters:\n nets (network list) -- a list of networks\n requires_grad (bool) -- whether the networks require gradients or not\n \"\"\"\n \n if not isinstance(nets, list): nets = [nets]\n for net in nets:\n for param in net.module.parameters(): param.requires_grad = requires_grad\n \n \n def fit(self, nb_epochs: int = 400, d_lr: float = 2e-4, g_lr: float = 2e-4, beta_1: float = 0.5, model_name: \\\n str = None, epoch_decay = 100):\n \n \"\"\"\n Parameters: \n model_name: Resume the training from saved checkpoint with file - \"model_name\"\n epoch_decay: Number of epochs after which learning rate starts decaying\n \"\"\"\n \n self.d_opt = optim.Adam(self.dis.module.parameters(), lr = d_lr, betas = (beta_1, 0.999))\n self.g_opt = optim.Adam(self.gen.module.parameters(), lr = g_lr, betas = (beta_1, 0.999))\n \n curr_iter = 0\n start_epoch = 0\n if model_name is not None: \n start_epoch = self.load_state_dict(path = self.save_dir + model_name)\n \n # LrScheduler follows this lambda rule to decay the learning rate\n def lr_lambda(epoch):\n fraction = (epoch - epoch_decay) / (nb_epochs + start_epoch - epoch_decay)\n return 1 if epoch < epoch_decay else 1 - fraction\n \n d_scheduler = optim.lr_scheduler.LambdaLR(self.d_opt, lr_lambda, last_epoch = start_epoch - 1)\n g_scheduler = optim.lr_scheduler.LambdaLR(self.g_opt, lr_lambda, last_epoch = start_epoch - 1)\n\n \n # Starts the training\n for epoch in range(start_epoch + 1, nb_epochs + 1):\n for data in trn_dataloader:\n \n curr_iter += 1\n real_A, real_B = data['A'].to(devices[0]), data['B'].to(devices[0])\n \n # Discriminator's optimization step\n self.set_requires_grad([self.dis], requires_grad = True)\n fake_B = self.gen(real_A)\n \n dis_pred_real_data = self.dis(torch.cat([real_A, real_B], 0))\n dis_pred_fake_data = self.dis(torch.cat([real_A, fake_B.detach()], 0))\n\n dis_tot_loss = self.loss.get_dis_gan_loss(dis_pred_real_data, dis_pred_fake_data)\n self.d_opt.zero_grad()\n dis_tot_loss.backward()\n self.d_opt.step()\n \n # Generator's optimization step\n self.set_requires_grad([self.dis], requires_grad = False)\n dis_pred_fake_data = self.dis(torch.cat([real_A, fake_B], 0))\n \n gen_gan_loss = self.loss.get_gen_gan_loss(dis_pred_fake_data)\n gen_rec_loss = self.loss.get_gen_rec_loss(real_B, fake_B)\n gen_tot_loss = gen_gan_loss + gen_rec_loss\n \n self.g_opt.zero_grad()\n gen_tot_loss.backward()\n self.g_opt.step()\n \n # Write statistics to the Tensorboard\n self.tb.write_loss (dis_tot_loss, gen_tot_loss, epoch, curr_iter)\n if curr_iter % 10 == 0: \n self.tb.write_image(10, self.gen, epoch, curr_iter)\n \n curr_iter = 0\n d_scheduler.step()\n g_scheduler.step()\n \n print(f\"After {epoch} epochs:\")\n print(f\"D_loss: {round(dis_tot_loss.item(), 3)}, G_loss: {round(gen_tot_loss.item(), 3)}\")\n \n # Save the models after every 10 epochs\n if epoch % 10 == 0:\n self.saver.save_model(epoch, self.dis, self.gen, self.d_opt, self.g_opt)\n \n \n @torch.no_grad()\n def eval_(self, model_name: str = None):\n \n _ = self.load_state_dict(self.save_dir + model_name, train = False) \n list_fake_B = []\n list_real_B = []\n list_real_A = []\n \n for idx, data in enumerate(val_dataloader):\n \n real_A, real_B = data['A'].to(devices[0]), data['B'].to(devices[0])\n list_real_A.append(data['A'])\n list_real_B.append(data['B'])\n \n fake_B = self.gen(real_A).detach()\n list_fake_B.append(fake_B)\n \n fake_B = torch.cat(list_fake_B, axis = 0)\n real_B = torch.cat(list_real_B, axis = 0)\n real_A = torch.cat(list_real_A, axis = 0)\n \n return real_A, real_B, fake_B\n\n\n######################################################################################################################\n\n\ninit = Initializer(init_type = 'normal', init_gain = 0.02)\ngen = init(Generator(in_channels = 3, out_channels = 64, norm_type = 'instance'))\ndis = init(Discriminator(in_channels = 3, out_channels = 64, norm_type = 'instance'))\n\n\nroot_dir = \"./Results/Pix2Pix/Facades/A/\"\n\nis_train = True\nnb_epochs = 400\nepoch_decay = nb_epochs // 2\nmodel = Pix2Pix(root_dir = root_dir, gen = gen, dis = dis)\n\n# Set is_train to False while running inference on the trained model\nif is_train: \n model.fit(nb_epochs = nb_epochs, model_name = None, epoch_decay = epoch_decay)\nelse: \n real_A, real_B, fake_B = model.eval_(model_name = \"Model_\" + str(nb_epochs) + \".pth\")\n\n######################################################################################################################\n\n\n\n", "id": "10589556", "language": "Python", "matching_score": 10.9791259765625, "max_stars_count": 297, "path": "creative_ai/models/pix2pix.py" }, { "content": "\nimport numpy as np, pandas as pd, matplotlib as mpl, matplotlib.pyplot as plt, os\nimport itertools, functools; from skimage import io as io, transform as tfm\n\nimport torch, torch.nn as nn, torch.nn.functional as F, torch.optim as optim\nimport torchvision, torchvision.transforms as T, torchvision.utils as utils\nfrom torch.nn import Conv2d as Conv, ConvTranspose2d as Deconv, ReLU as Relu\nfrom torch.nn import InstanceNorm2d as InstanceNorm, BatchNorm2d as BatchNorm\nfrom torch.utils.tensorboard import SummaryWriter, FileWriter, RecordWriter\nfrom torch.utils.data import Dataset, DataLoader, ConcatDataset, TensorDataset\n\nmpl.rcParams[\"figure.figsize\"] = (8, 4); mpl.rcParams[\"axes.grid\"] = False\n\n\n##########################################################################################################################\n\n# Use GPU if available\nif torch.cuda.is_available():\n devices = ['cuda:' + str(x) for x in range(torch.cuda.device_count())]\n print(f\"Number of GPUs available: {len(devices)}\")\nelse:\n devices = [torch.device('cpu')]; print(\"GPU isn't available! :(\")\n\n\n##########################################################################################################################\n\n\nclass Resize(object):\n \n def __init__(self, image_size: (int, tuple) = 256):\n \n \"\"\"\n Parameters:\n image_size: Final size of the image\n \"\"\"\n \n if isinstance(image_size, int): self.image_size = (image_size, image_size)\n elif isinstance(image_size, tuple): self.image_size = image_size\n else: raise ValueError(\"Unknown DataType of the parameter image_size found!!\")\n \n \n def __call__(self, sample):\n \n \"\"\"\n Parameters: \n sample: Dictionary containing image and label\n \"\"\"\n \n A, B = sample['A'], sample['B']\n \n A = tfm.resize(A, output_shape = self.image_size)\n B = tfm.resize(B, output_shape = self.image_size)\n \n A = np.clip(A, a_min = 0., a_max = 1.)\n B = np.clip(B, a_min = 0., a_max = 1.)\n \n return {'A': A, 'B': B}\n\n\nclass RandomCrop(object):\n \n def __init__(self, image_size: (int, tuple) = 256): \n \n \"\"\"\n Parameters: \n image_size: Final size of the image (should be smaller than current size o/w \n returns the original image)\n \"\"\"\n \n if isinstance(image_size, int): self.image_size = (image_size, image_size)\n elif isinstance(image_size, tuple): self.image_size = image_size\n else: raise ValueError(\"Unknown DataType of the parameter image_size found!!\")\n \n \n def __call__(self, sample):\n \n \"\"\"\n Parameters: \n sample: Dictionary containing image and label\n \"\"\"\n \n A, B = sample['A'], sample['B']\n curr_height, curr_width = A.shape[0], A.shape[1]\n \n ht_diff = max(0, curr_height - self.image_size[0])\n wd_diff = max(0, curr_width - self.image_size[1])\n top = np.random.randint(low = 0, high = ht_diff)\n lft = np.random.randint(low = 0, high = wd_diff)\n \n A = A[top: top + self.image_size[0], lft: lft + self.image_size[1]]\n B = B[top: top + self.image_size[0], lft: lft + self.image_size[1]]\n \n return {'A': A, 'B': B}\n \n\nclass Random_Flip(object):\n \n def __call__(self, sample):\n \n \"\"\"\n Parameters: \n sample: Dictionary containing image and label\n \"\"\"\n \n A, B = sample['A'], sample['B']\n if np.random.uniform(low = 0., high = 1.0) > .5:\n A = np.fliplr(A); B = np.fliplr(B)\n \n return {'A': A, 'B': B}\n\n\nclass To_Tensor(object):\n \n def __call__(self, sample):\n \n \"\"\"\n Parameters: \n sample: Dictionary containing image and label\n \"\"\"\n \n A = np.transpose(sample['A'].astype(np.float, copy = True), (2, 0, 1))\n B = np.transpose(sample['B'].astype(np.float, copy = True), (2, 0, 1))\n \n A = torch.tensor(A, dtype = torch.float)\n B = torch.tensor(B, dtype = torch.float)\n \n return {'A': A, 'B': B}\n \n\nclass Normalize(object):\n \n def __init__(self, mean = [0.5] * 3, stdv = [0.5] * 3):\n \n \"\"\"\n Parameters: \n mean: Normalizing mean\n stdv: Normalizing stdv\n \"\"\"\n \n mean = torch.tensor(mean, dtype = torch.float)\n stdv = torch.tensor(stdv, dtype = torch.float)\n self.transforms = T.Normalize(mean = mean, std = stdv)\n \n \n def __call__(self, sample):\n \n \"\"\"\n Parameters:\n sample: Dictionary containing image and label\n \"\"\"\n \n A, B = sample['A'], sample['B']\n A = self.transforms(A)\n B = self.transforms(B)\n \n return {'A': A, 'B': B}\n\n\nclass CustomDataset(Dataset):\n \n def __init__(self, path: str = None, transforms = None):\n \n \"\"\"\n Parameters:\n transforms: a list of Transformations (Data augmentation)\n \"\"\"\n \n super().__init__(); self.transforms = T.Compose(transforms)\n \n file_names_A = sorted(os.listdir(path + 'A/'), key = lambda x: int(x[: -4]))\n self.file_names_A = [path + 'A/' + file_name for file_name in file_names_A]\n \n file_names_B = sorted(os.listdir(path + 'B/'), key = lambda x: int(x[: -4]))\n self.file_names_B = [path + 'B/' + file_name for file_name in file_names_B]\n \n \n def __len__(self):\n return min(len(self.file_names_A), len(self.file_names_B))\n \n \n def __getitem__(self, idx):\n \n A = io.imread(self.file_names_A[idx % len(self.file_names_A)])\n B = io.imread(self.file_names_B[idx % len(self.file_names_B)])\n sample = self.transforms({'A': A, 'B': B})\n \n return sample\n\n\nclass Helper(object):\n \n @staticmethod\n def show_image(image):\n \n image = np.transpose((image + 1) / 2, (1, 2, 0))\n plt.imshow(image)\n \n \n @staticmethod\n def tensor_to_numpy(tensor):\n \n tensor = (tensor.cpu().clone() + 1) / 2\n if len(tensor.shape) == 3: tensor = np.transpose(tensor, (1, 2, 0))\n elif len(tensor.shape) == 4: tensor = np.transpose(tensor, (0, 2, 3, 1))\n \n return tensor\n\n \n @staticmethod\n def get_random_sample(dataset):\n return dataset[np.random.randint(0, len(dataset))]\n \n \n @staticmethod\n def get_data(path: str, tfms, batch_sz: int, is_train: bool):\n \n dataset = CustomDataset(path = path, transforms = tfms)\n dataloader = DataLoader(dataset, batch_size = batch_sz, shuffle = is_train, num_workers = 2)\n \n return dataset, dataloader\n\n\n##########################################################################################################################\n\n# 1) Correctly specify the Root directory which contains two folders: Train folder and Validation folder\n# 2) Image names should be labeled from 1 to len(dataset), o/w will throw an error while sorting the filenames\n\nroot_dir = \"./Dataset/Vision/CycleGAN/Cezzane/\"; \ntrn_path = root_dir + \"Trn/\"\nval_path = root_dir + \"Val/\"\n\ntrn_batch_sz = 1 * len(devices)\nval_batch_sz = 64\n\nimg_sz = 128\njitter_sz = int(img_sz * 1.12)\n\nval_tfms = [Resize(img_sz), To_Tensor(), Normalize()]\ntrn_tfms = [Resize(jitter_sz), RandomCrop(img_sz), Random_Flip(), To_Tensor(), Normalize()]\n\ntrn_dataset, trn_dataloader = helper.get_data(trn_path, trn_tfms, trn_batch_sz, is_train = True )\nval_dataset, val_dataloader = helper.get_data(val_path, val_tfms, val_batch_sz, is_train = False)\n\nnb_trn_iters = len(trn_dataloader)\nnb_val_iters = len(val_dataloader)\n\nhelper = Helper()\nprint(f\"Length of Training dataset: {len(trn_dataset)}, Validation dataset: {len(val_dataset)}\")\n\nprint(f\"Few random samples from the Training dataset!\")\nsample = helper.get_random_sample(trn_dataset)\nplt.subplot(1, 2, 1); helper.show_image(sample['A'])\nplt.subplot(1, 2, 2); helper.show_image(sample['B'])\nplt.show()\n\nprint(f\"Few random samples from the Validation dataset!\")\nsample = helper.get_random_sample(val_dataset)\nplt.subplot(1, 2, 1); helper.show_image(sample['A'])\nplt.subplot(1, 2, 2); helper.show_image(sample['B'])\nplt.show()\n\n\n##########################################################################################################################\n\n\nclass ResBlock(nn.Module):\n \n def __init__(self, in_channels: int, apply_dp: bool = True):\n \n \"\"\"\n Defines a ResBlock\n X ------------------------identity------------------------\n |-- Convolution -- Norm -- ReLU -- Convolution -- Norm --|\n \"\"\"\n \n \"\"\"\n Parameters:\n in_channels: Number of input channels\n apply_dp: If apply_dp is set to True, then activations are 0'ed out with prob 0.5\n \"\"\"\n \n super().__init__()\n \n conv = nn.Conv2d(in_channels = in_channels, out_channels = in_channels, kernel_size = 3, stride = 1)\n layers = [nn.ReflectionPad2d(1), conv, nn.InstanceNorm2d(in_channels), nn.ReLU(True)]\n \n if apply_dp: layers += [nn.Dropout(0.5)]\n \n conv = nn.Conv2d(in_channels = in_channels, out_channels = in_channels, kernel_size = 3, stride = 1)\n layers += [nn.ReflectionPad2d(1), conv, nn.InstanceNorm2d(in_channels)]\n \n self.net = nn.Sequential(*layers)\n \n \n def forward(self, x): return x + self.net(x)\n\n\n\nclass Generator(nn.Module):\n \n def __init__(self, in_channels: int = 3, out_channels: int = 64, apply_dp: bool = True):\n \n \"\"\"\n Generator Architecture (Image Size: 256)\n c7s1-64, d128, d256, R256, R256, R256, R256, R256, R256, R256, R256, R256, u128, u64, c7s1-3, \n \n where c7s1-k denote a 7 × 7 Conv-InstanceNorm-ReLU layer with k filters and stride 1, dk denotes a 3 × 3\n Conv-InstanceNorm-ReLU layer with k filters and stride 2, Rk denotes a residual block that contains two \n 3 × 3 Conv layers with the same number of filters on both layer. uk denotes a 3 × 3 DeConv-InstanceNorm-\n ReLU layer with k filters and stride 1.\n \"\"\"\n \n \"\"\"\n Parameters: \n in_channels: Number of input channels \n out_channels: Number of output channels\n apply_dp: If apply_dp is set to True, then activations are 0'ed out with prob 0.5\n \"\"\"\n \n super().__init__()\n \n f = 1\n nb_downsampling = 2\n nb_resblks = 6 if img_sz == 128 else 9 \n \n conv = nn.Conv2d(in_channels = in_channels, out_channels = out_channels, kernel_size = 7, stride = 1)\n self.layers = [nn.ReflectionPad2d(3), conv, nn.InstanceNorm2d(out_channels), nn.ReLU(True)]\n \n for i in range(nb_downsampling):\n conv = nn.Conv2d(out_channels * f, out_channels * 2 * f, kernel_size = 3, stride = 2, padding = 1)\n self.layers += [conv, nn.InstanceNorm2d(out_channels * 2 * f), nn.ReLU(True)]\n f *= 2\n \n for i in range(nb_resblks):\n res_blk = ResBlock(in_channels = out_channels * f, apply_dp = apply_dp)\n self.layers += [res_blk]\n \n for i in range(nb_downsampling):\n conv = nn.ConvTranspose2d(out_channels * f, out_channels * (f//2), 3, 2, padding = 1, output_padding = 1)\n self.layers += [conv, nn.InstanceNorm2d(out_channels * (f//2)), nn.ReLU(True)]\n f = f // 2\n \n conv = nn.Conv2d(in_channels = out_channels, out_channels = in_channels, kernel_size = 7, stride = 1)\n self.layers += [nn.ReflectionPad2d(3), conv, nn.Tanh()]\n \n self.net = nn.Sequential(*self.layers)\n \n \n def forward(self, x): return self.net(x)\n\n\n\nclass Discriminator(nn.Module):\n \n def __init__(self, in_channels: int = 3, out_channels: int = 64, nb_layers: int = 3):\n \n \"\"\"\n Discriminator Architecture!\n C64 - C128 - C256 - C512, where Ck denote a Convolution-InstanceNorm-LeakyReLU layer with k filters\n \"\"\"\n \n \"\"\"\n Parameters: \n in_channels: Number of input channels\n out_channels: Number of output channels\n nb_layers: Number of layers in the 70*70 Patch Discriminator\n \"\"\"\n \n super().__init__()\n \n in_f = 1\n out_f = 2\n \n conv = nn.Conv2d(in_channels, out_channels, kernel_size = 4, stride = 2, padding = 1)\n self.layers = [conv, nn.LeakyReLU(0.2, True)]\n \n for idx in range(1, nb_layers):\n conv = nn.Conv2d(out_channels * in_f, out_channels * out_f, kernel_size = 4, stride = 2, padding = 1)\n self.layers += [conv, nn.InstanceNorm2d(out_channels * out_f), nn.LeakyReLU(0.2, True)]\n in_f = out_f\n out_f *= 2\n \n out_f = min(2 ** nb_layers, 8)\n conv = nn.Conv2d(out_channels * in_f, out_channels * out_f, kernel_size = 4, stride = 1, padding = 1)\n self.layers += [conv, nn.InstanceNorm2d(out_channels * out_f), nn.LeakyReLU(0.2, True)] \n \n conv = nn.Conv2d(out_channels * out_f, out_channels = 1, kernel_size = 4, stride = 1, padding = 1)\n self.layers += [conv]\n \n self.net = nn.Sequential(*self.layers)\n \n \n def forward(self, x): return self.net(x)\n \n\n\nclass Initializer:\n \n def __init__(self, init_type: str = 'normal', init_gain: float = 0.02): \n \n \"\"\"\n Initializes the weight of the network!\n \n Parameters: \n init_type: Initializer type - 'kaiming' or 'xavier' or 'normal'\n init_gain: Standard deviation of the normal distribution\n \"\"\"\n \n self.init_type = init_type; self.init_gain = init_gain\n \n \n def init_module(self, m):\n \n cls_name = m.__class__.__name__;\n if hasattr(m, 'weight') and (cls_name.find('Conv') != -1 or cls_name.find('Linear') != -1):\n \n if self.init_type == 'kaiming': nn.init.kaiming_normal_(m.weight.data, a = 0, mode = 'fan_in')\n elif self.init_type == 'xavier' : nn.init.xavier_normal_ (m.weight.data, gain = self.init_gain)\n elif self.init_type == 'normal' : nn.init.normal_(m.weight.data, mean = 0, std = self.init_gain)\n else: raise ValueError('Initialization not found!!')\n \n if m.bias is not None: nn.init.constant_(m.bias.data, val = 0); \n \n if hasattr(m, 'weight') and cls_name.find('BatchNorm2d') != -1:\n nn.init.normal_(m.weight.data, mean = 1.0, std = self.init_gain)\n nn.init.constant_(m.bias.data, val = 0)\n \n \n def __call__(self, net):\n \n \"\"\"\n Parameters: \n net: Network\n \"\"\"\n \n net = net.to(devices[0]); net = nn.DataParallel(net, device_ids = range(len(devices)))\n net.apply(self.init_module)\n \n return net\n\n\n##########################################################################################################################\n\n\ninit = Initializer(init_type = 'normal', init_gain = 0.02)\n\nd_A = init(Discriminator(in_channels = 3, out_channels = 64, nb_layers = 3))\nd_B = init(Discriminator(in_channels = 3, out_channels = 64, nb_layers = 3))\n\ng_A2B = init(Generator(in_channels = 3, out_channels = 64, apply_dp = False))\ng_B2A = init(Generator(in_channels = 3, out_channels = 64, apply_dp = False))\n\n\n##########################################################################################################################\n\n\nclass Tensorboard:\n \n def __init__(self, path: str): self.writer = SummaryWriter(path)\n \n \n def write_graph(self, model): \n \n A = helper.get_random_sample(trn_dataset)['A'].unsqueeze(0)\n self.writer.add_graph(model.module, A.to(devices[0]))\n \n \n @torch.no_grad()\n def write_image(self, nb_examples: int, g_A2B, g_B2A, epoch: int, curr_iter: int):\n \n grid_A = []\n grid_B = []\n n_iter = (epoch - 1) * nb_trn_iters + curr_iter\n \n for _ in range(nb_examples):\n \n sample = helper.get_random_sample(val_dataset)\n real_A = sample['A'].unsqueeze(0).to(devices[0])\n real_B = sample['B'].unsqueeze(0).to(devices[0])\n\n fake_A = g_B2A(real_B); cyc_B = g_A2B(fake_A)\n fake_B = g_A2B(real_A); cyc_A = g_B2A(fake_B)\n\n tensor = torch.cat([real_A, fake_B, cyc_A, real_B, fake_A, cyc_B])\n tensor = (tensor.cpu().clone() + 1) / 2\n \n grid_A.append(tensor[:3])\n grid_B.append(tensor[3:]) \n \n grid_A = torchvision.utils.make_grid(torch.cat(grid_A, 0), nrow = 6)\n grid_B = torchvision.utils.make_grid(torch.cat(grid_B, 0), nrow = 6)\n\n self.writer.add_image('Grid_A', grid_A, n_iter)\n self.writer.add_image('Grid_B', grid_B, n_iter)\n \n \n @torch.no_grad()\n def write_loss(self, d_loss: float, g_loss: float, epoch: int, curr_iter: int):\n \n n_iter = (epoch - 1) * nb_trn_iters + curr_iter\n \n self.writer.add_scalar('d_loss', round(d_loss.item(), 4), n_iter)\n self.writer.add_scalar('g_loss', round(g_loss.item(), 4), n_iter)\n\n\n\nclass Loss:\n \n \"\"\"\n This class implements different losses required to train the generators and discriminators of CycleGAN\n \"\"\"\n \n def __init__(self, loss_type: str = 'MSE', lambda_: int = 10):\n \n \"\"\"\n Parameters:\n loss_type: Loss Function to train CycleGAN\n lambda_: Weightage of Cycle-consistency loss\n \"\"\"\n \n self.loss = nn.MSELoss() if loss_type == 'MSE' else nn.BCEWithLogitsLoss()\n self.lambda_ = lambda_\n \n \n def get_dis_gan_loss(self, dis_pred_real_data, dis_pred_fake_data):\n \n \"\"\"\n Parameters:\n dis_pred_real_data: Discriminator's prediction on real data\n dis_pred_fake_data: Discriminator's prediction on fake data\n \"\"\"\n \n dis_tar_real_data = torch.ones_like (dis_pred_real_data, requires_grad = False)\n dis_tar_fake_data = torch.zeros_like(dis_pred_fake_data, requires_grad = False)\n \n loss_real_data = self.loss(dis_pred_real_data, dis_tar_real_data)\n loss_fake_data = self.loss(dis_pred_fake_data, dis_tar_fake_data)\n \n dis_tot_loss = (loss_real_data + loss_fake_data) * 0.5\n \n return dis_tot_loss\n \n \n def get_gen_gan_loss(self, dis_pred_fake_data):\n \n \"\"\"\n Parameters:\n dis_pred_fake_data: Discriminator's prediction on fake data\n \"\"\"\n \n gen_tar_fake_data = torch.ones_like(dis_pred_fake_data, requires_grad = False)\n gen_tot_loss = self.loss(dis_pred_fake_data, gen_tar_fake_data)\n \n return gen_tot_loss\n \n \n def get_gen_cyc_loss(self, real_data, cyc_data):\n \n \"\"\"\n Parameters:\n real_data: Real images sampled from the dataloaders\n cyc_data: Image reconstructed after passing the real image through both the generators\n X_recons = F * G (X_real), where F and G are the two generators\n \"\"\"\n \n gen_cyc_loss = torch.nn.L1Loss()(real_data, cyc_data)\n gen_tot_loss = gen_cyc_loss * self.lambda_\n \n return gen_tot_loss\n \n \n def get_gen_idt_loss(self, real_data, idt_data):\n \n \"\"\"\n Implements the identity loss: \n nn.L1Loss(LG_B2A(real_A), real_A) \n nn.L1Loss(LG_A2B(real_B), real_B) \n \"\"\"\n \n gen_idt_loss = torch.nn.L1Loss()(real_data, idt_data)\n gen_tot_loss = gen_idt_loss * self.lambda_ * 0.5\n \n return gen_tot_loss\n\n\n\nclass ImagePool:\n \n \"\"\"\n This class implements an image buffer that stores previously generated images! This buffer enables to update\n discriminators using a history of generated image rather than the latest ones produced by generator.\n \"\"\"\n \n def __init__(self, pool_sz: int = 50):\n \n \"\"\"\n Parameters:\n pool_sz: Size of the image buffer\n \"\"\"\n \n self.pool_sz = pool_sz\n self.image_pool = []\n self.nb_images = 0\n \n \n def push_and_pop(self, images):\n \n \"\"\"\n Parameters:\n images: latest images generated by the generator\n \n Returns a batch of images from pool!\n \"\"\"\n \n images_to_return = []\n for image in images:\n image = torch.unsqueeze(image, 0)\n \n if self.nb_images < self.pool_sz:\n self.image_pool.append (image) \n images_to_return.append(image)\n self.nb_images += 1\n else:\n if np.random.uniform(0, 1) > 0.5:\n \n rand_int = np.random.randint(0, self.pool_sz)\n temp_img = self.image_pool[rand_int].clone()\n self.image_pool[rand_int] = image\n images_to_return.append(temp_img) \n else:\n images_to_return.append(image)\n \n return torch.cat(images_to_return, 0)\n\n\n\nclass SaveModel:\n \n def __init__(self, path: str, keep_only: int = 3): \n \n self.path = path\n self.keep_only = keep_only\n \n \n def save_model(self, epoch: int, d_A, d_B, g_A2B, g_B2A, d_opt, g_opt):\n \n filename = self.path + \"Model_\" + str(epoch) + \".pth\"\n \n torch.save({'epochs': epoch, 'd_opt': d_opt.state_dict(), 'g_opt': g_opt.state_dict(), \n 'd_A': d_A.module.state_dict(), 'd_B': d_B.module.state_dict(),\n 'g_A2B': g_A2B.module.state_dict(), 'g_B2A': g_B2A.module.state_dict()}, filename)\n \n \n filenames = [f for f in os.listdir(self.path) if not f.startswith('.')]\n if len(filenames) > self.keep_only:\n os.remove(self.path + sorted(filenames, key = lambda x: int(x[6 : -4]))[0])\n\n\n\nclass CycleGAN:\n \n def __init__(self, root_dir: str, g_A2B, g_B2A, d_A, d_B):\n \n self.save_dir = root_dir + 'Models/'\n summary_path = root_dir + 'Tensorboard/'\n \n if not os.path.exists(self.save_dir): os.makedirs(self.save_dir)\n if not os.path.exists(summary_path ): os.makedirs(summary_path )\n self.saver = SaveModel(self.save_dir); self.tb = Tensorboard(summary_path)\n \n self.d_A = d_A\n self.d_B = d_B\n self.g_A2B = g_A2B\n self.g_B2A = g_B2A\n self.fake_pool_A = ImagePool(pool_sz = 50)\n self.fake_pool_B = ImagePool(pool_sz = 50)\n self.loss = Loss(loss_type = 'MSE', lambda_ = 10)\n \n \n def load_state_dict(self, path: str = None, train: bool = True):\n \n checkpoint = torch.load(path); start_epoch = checkpoint['epochs'] + 1\n \n if train:\n self.d_opt.load_state_dict(checkpoint['d_opt'])\n self.g_opt.load_state_dict(checkpoint['g_opt'])\n \n self.d_A.module.load_state_dict(checkpoint['d_A'])\n self.d_B.module.load_state_dict(checkpoint['d_B'])\n \n self.g_A2B.module.load_state_dict(checkpoint['g_A2B'])\n self.g_B2A.module.load_state_dict(checkpoint['g_B2A'])\n \n return start_epoch\n \n \n @staticmethod\n def set_requires_grad(nets, requires_grad = False):\n \n \"\"\"\n Set requies_grad=Fasle for all the networks to avoid unnecessary computations\n Parameters:\n nets (network list) -- a list of networks\n requires_grad (bool) -- whether the networks require gradients or not\n \"\"\"\n \n if not isinstance(nets, list): nets = [nets]\n for net in nets:\n for param in net.module.parameters(): param.requires_grad = requires_grad\n \n \n def fit(self, nb_epochs: int = 200, d_lr: float = 2e-4, g_lr: float = 2e-4, beta_1: float = 0.5, model_name: \\\n str = None, keep_only: int = 3, epoch_decay: int = 200):\n \n \"\"\"\n Parameters: \n model_name: Resume the training from saved checkpoint \"model_name\"\n keep_only: Max Number of models to keep in the self.save_dir\n epoch_decay: Number of epochs after which learning rate starts decaying\n \"\"\"\n \n d_params = itertools.chain(self.d_A.module.parameters(), self.d_B.module.parameters())\n g_params = itertools.chain(self.g_A2B.module.parameters(), self.g_B2A.module.parameters())\n \n self.d_opt = optim.Adam(params = d_params, lr = d_lr, betas = (beta_1, 0.999))\n self.g_opt = optim.Adam(params = g_params, lr = g_lr, betas = (beta_1, 0.999))\n \n start_epoch = 0; curr_iter = 0\n if model_name is not None: start_epoch = self.load_state_dict(path = self.save_dir + model_name)\n \n # LrScheduler follows this lambda rule to decay the learning rate\n def lr_lambda(epoch):\n fraction = (epoch - epoch_decay) / (nb_epochs - epoch_decay)\n return 1 if epoch < epoch_decay else 1 - fraction\n \n d_scheduler = optim.lr_scheduler.LambdaLR(self.d_opt, lr_lambda = lr_lambda, last_epoch = start_epoch - 1)\n g_scheduler = optim.lr_scheduler.LambdaLR(self.g_opt, lr_lambda = lr_lambda, last_epoch = start_epoch - 1)\n \n \n for epoch in range(start_epoch + 1, nb_epochs + 1):\n for data in trn_dataloader:\n \n curr_iter += 1\n real_A, real_B = data['A'].to(devices[0]), data['B'].to(devices[0])\n \n # Forward pass\n fake_B = self.g_A2B(real_A); cyc_A = self.g_B2A(fake_B)\n fake_A = self.g_B2A(real_B); cyc_B = self.g_A2B(fake_A)\n idt_A = self.g_B2A(real_A); idt_B = self.g_A2B(real_B)\n \n \n # No need to calculate the gradients for Discriminators' parameters\n self.set_requires_grad([self.d_A, self.d_B], requires_grad = False)\n d_A_pred_fake_data = self.d_A(fake_A)\n d_B_pred_fake_data = self.d_B(fake_B)\n \n # Cycle loss\n cyc_loss_A = self.loss.get_gen_cyc_loss(real_A, cyc_A)\n cyc_loss_B = self.loss.get_gen_cyc_loss(real_B, cyc_B)\n tot_cyc_loss = cyc_loss_A + cyc_loss_B\n \n # GAN loss\n g_A2B_gan_loss = self.loss.get_gen_gan_loss(d_B_pred_fake_data)\n g_B2A_gan_loss = self.loss.get_gen_gan_loss(d_A_pred_fake_data)\n \n # Identity loss\n g_B2A_idt_loss = self.loss.get_gen_idt_loss(real_A, idt_A)\n g_A2B_idt_loss = self.loss.get_gen_idt_loss(real_B, idt_B)\n \n # Total individual losses\n g_A2B_loss = g_A2B_gan_loss + g_A2B_idt_loss + tot_cyc_loss\n g_B2A_loss = g_B2A_gan_loss + g_B2A_idt_loss + tot_cyc_loss\n g_tot_loss = g_A2B_loss + g_B2A_loss - tot_cyc_loss\n \n # Parameters' getting updated\n self.g_opt.zero_grad()\n g_tot_loss.backward()\n self.g_opt.step()\n \n \n # Discriminator's optimization step\n self.set_requires_grad([self.d_A, self.d_B], requires_grad = True)\n self.d_opt.zero_grad()\n \n fake_A = self.fake_pool_A.push_and_pop(fake_A)\n d_A_pred_real_data = self.d_A(real_A)\n d_A_pred_fake_data = self.d_A(fake_A.detach())\n \n # Discrimiator A loss\n d_A_loss = self.loss.get_dis_gan_loss(d_A_pred_real_data, d_A_pred_fake_data)\n d_A_loss.backward()\n \n fake_B = self.fake_pool_B.push_and_pop(fake_B)\n d_B_pred_real_data = self.d_B(real_B)\n d_B_pred_fake_data = self.d_B(fake_B.detach())\n \n # Discrimiator B loss\n d_B_loss = self.loss.get_dis_gan_loss(d_B_pred_real_data, d_B_pred_fake_data)\n d_B_loss.backward() \n \n # Parameters' getting updated\n self.d_opt.step()\n d_tot_loss = d_A_loss + d_B_loss\n \n \n # Writing statistics to the Tensorboard\n self.tb.write_loss (d_tot_loss, g_tot_loss, epoch, curr_iter)\n if curr_iter % 150 == 0: self.tb.write_image(10, self.g_A2B, self.g_B2A, epoch, curr_iter)\n \n \n curr_iter = 0\n g_scheduler.step()\n d_scheduler.step()\n \n print(f\"After {epoch} epochs:\"); \n print(f\"G_Loss: {round(g_tot_loss.item(), 3)}, D_Loss: {round(d_tot_loss.item(), 3)}\", end = \"\\n\")\n \n # Save models after every 10 epochs\n if epoch % 10 == 0:\n self.saver.save_model(epoch, self.d_A, self.d_B, self.g_A2B, self.g_B2A, self.d_opt, self.g_opt)\n \n \n @torch.no_grad()\n def eval_(self, model_name: str = None):\n \n _ = self.load_state_dict(path = self.save_dir + model_name, train = False) \n \n list_real_A = []\n list_fake_A = []\n list_real_B = []\n list_fake_B = []\n \n for idx, data in enumerate(val_dataloader):\n \n real_A, real_B = data['A'].to(devices[0]), data['B'].to(devices[0])\n fake_A = self.g_B2A(real_B).detach()\n fake_B = self.g_A2B(real_A).detach()\n \n list_real_A.append(real_A)\n list_real_B.append(real_B)\n list_fake_A.append(fake_A)\n list_fake_B.append(fake_B)\n \n real_A = torch.cat(list_real_A, axis = 0)\n fake_A = torch.cat(list_fake_A, axis = 0)\n real_B = torch.cat(list_real_B, axis = 0)\n fake_B = torch.cat(list_fake_B, axis = 0)\n \n return real_A, real_B, fake_A, fake_B\n\n\n##########################################################################################################################\n\n\nis_train = True\nnb_epochs = 200\nepoch_decay = nb_epochs // 2\nroot_dir = \"./Results/CycleGAN/Cezzane/\"\n\nmodel = CycleGAN(root_dir = root_dir, g_A2B = g_A2B, g_B2A = g_B2A, d_A = d_A, d_B = d_B)\n\nif is_train: model.fit(nb_epochs = nb_epochs, model_name = None, epoch_decay = epoch_decay)\nelse: real_A, real_B, fake_A, fake_B = model.eval_(model_name = \"Model_\" + str(nb_epochs) + \".pth\")\n\n##########################################################################################################################\n", "id": "11576143", "language": "Python", "matching_score": 1.5358973741531372, "max_stars_count": 297, "path": "creative_ai/models/cycleg.py" }, { "content": "'''\nCreated on Apr 8, 2016\n\n@author: blevine\n'''\nimport logging\nimport random\nimport re\nfrom functools import reduce\nfrom logging import DEBUG\nfrom random import shuffle\n\nfrom nltk.sentiment.util import mark_negation\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom numpy import array, mean\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\n\nfrom goonalytics.io import SQLiteDAO\n\n'''\nNotes:\n\nPost sentiments should be pulled from like a week after the game came out\n\nEntries that are one word (~10 chars) should be trimmed out\n\nVADER sentiment polarities are going to be overwhelmingly neutral. Negative \nscores need to be weighted more heavily. A lot of the time, even positive stuff \nis still negative\n'''\nlogging.basicConfig(level=DEBUG)\nlog = logging.getLogger(__name__)\n\nRANDOM_SEED = 1\n\nFACTORIO = 3629545\nFACTORIO_RELEASE_DATE = '''date('2016-02-25','+14 day')''' # steam release\nROCKET_LEAGUE = 3718089\nROCKET_LEAGUE_RELEASE_DATE = '''(date '2015-07-07', '+14 day')'''\nGOOD_THREADS = (FACTORIO, ROCKET_LEAGUE)\nHALO_5 = 3742406\nHALO_5_RELEASE_DATE = '''(date '2015-10-27', '+14 day')'''\nSTARBOUND = 3697749\nSTARBOUND_RELEASE_DATE = '2013-12-03' # apparently it's really old\nBAD_THREADS = (HALO_5, STARBOUND)\n\n# cod threads\nROOT_BEER = 3630852\nBLOPS_3 = 3716726\nBLOPS = 3387110\nMW_2 = (3311154, 3311154, 3227086, 3239216)\nWAW = 3007345\nCOD4 = (2756848, 3093899)\n\nGOATS = 3564911\nBLOPS_2 = (3522296, 3482470)\nMW3 = (3446068, 3461453)\n\ndao = SQLiteDAO()\n\n\ndef load_posts(thread_id, after_date=''' '2001-01-01' '''):\n con = dao.create_connection()\n cursor = con.cursor()\n cursor.execute(\n 'select post_text from posts where thread_id={0} and post_timestamp > {1}'.format(str(thread_id), after_date))\n return [res[0] for res in cursor.fetchall() if res is not None]\n\n\ndef load_cleaned(id_set, min_words=4):\n return clean_raw_posts(load_all(id_set), min_word_limit=min_words)\n\n\ndef load_post_id_map(id_set):\n pmap = dict()\n for id in id_set:\n con = dao.create_connection()\n cursor = con.cursor()\n cursor.execute('select post_id, post_text from posts where thread_id=' + str(id))\n for res in cursor.fetchall():\n cln = clean_post(res[1])\n pmap[res[0]] = cln\n log.info('Retrieved ' + str(len(pmap)) + ' posts')\n return pmap\n\n\ndef load_all(id_set):\n combined = list()\n for t_id in id_set:\n combined += load_posts(t_id)\n return combined\n\n\ndef clean_raw_posts(post_list, use_sentences=True, min_word_limit=4):\n '''\n Return a cleaned list of posts. The parameter is expected to be a \n raw list of posts from the database. This method then removes URLs and \n posts less than the minimum word limit specified. If as_sentences is set \n to False, this method returns a list of lists of words. \n '''\n # create a list of lists of words\n words = as_words(post_list)\n # clean the links out of the list of lists etc\n cleaned_vals = list()\n for wordarray in words:\n cleanedarray = [w for w in wordarray if re.search('http', w) is None]\n if len(cleanedarray) >= min_word_limit:\n cleaned_vals.append(cleanedarray)\n if use_sentences:\n cleaned_vals = as_sentences(cleaned_vals)\n return cleaned_vals\n\n\ndef clean_post(post, min_word_limit=4):\n words = post.split()\n cleanedwords = [w for w in words if re.search('http', w) is None]\n if len(cleanedwords) >= min_word_limit:\n return reduce(lambda p, q: p + ' ' + q, cleanedwords)\n return None\n\n\ndef mean_polarity_values(polarities):\n output = dict()\n for k in polarities[0].keys():\n output[k] = mean([pol[k] for pol in polarities])\n return output\n\n\ndef as_words(posts):\n return [x.split() for x in posts]\n\n\ndef as_sentences(posts):\n return [reduce(lambda p, q: p + ' ' + q, x) for x in posts]\n\n\ndef rank_polarity(post_list, sentiment='pos'):\n '''\n Returns a dict of the entries in post_list, with the value of the sentiment specified\n mapped to the index in the list\n '''\n sia = SentimentIntensityAnalyzer()\n d = dict()\n for e, p in enumerate(post_list):\n curr_polarity = sia.polarity_scores(p)\n d[e] = curr_polarity[sentiment]\n return d\n\n\ndef mean_compound_polarity(post_list, sia=SentimentIntensityAnalyzer()):\n polarities = [sia.polarity_scores(p) for p in post_list]\n compounds = [pol['compound'] for pol in polarities]\n return mean(array(compounds))\n\n\ndef feature_set(post_list):\n \"\"\"\n Expects a list of cleaned posts in sentence format and returns a featureset\n calculated by marking negation then doing a count vectorization and tf-idf\n transform\n \"\"\"\n # mark negation\n # count vectorizer\n # tf-idf\n # isn't fucked up -> isn't fucked_NEG up_NEG\n marked = [mark_negation(p) for p in post_list]\n tv = TfidfVectorizer(min_df=1)\n marked_words = flatten(as_words(marked))\n return tv.fit_transform(marked_words)\n\n\ndef flatten(post_list):\n return reduce(lambda s, r: s + r, post_list)\n\n\ndef max_sentiment(post_list, sentiment='pos', sia=SentimentIntensityAnalyzer()):\n '''\n Returns a tuple containing the post with the maximum value of \n the sentiment specified, and the sentiment score\n '''\n ret = tuple()\n for p in post_list:\n curr_polarity = sia.polarity_scores(p)\n if len(ret) < 1 or curr_polarity[sentiment] > ret[1]:\n ret = (p, curr_polarity[sentiment])\n return ret\n\n\ndef pull_thread_dataset(thread_id, release_date, min_char_limit=20):\n '''\n Returns a cleaned dataset. Currently, this entails grabbing all posts \n (quotes removed) occurring two weeks or more after the game's release, \n removing URLs, and removing posts containing less than a minimum character \n limit\n '''\n pass\n\n\ndef rnd_seed():\n return RANDOM_SEED\n\n\nif __name__ == '__main__':\n log.info(\"Loading data sets...\")\n good = load_cleaned(GOOD_THREADS)\n bad = load_cleaned(BAD_THREADS)\n log.info(\"Pulled %d good posts and %d bad posts\", len(good), len(bad))\n # create test and train sets by shuffling crap\n log.info(\"Creating testing and training sets...\")\n random.seed(1)\n shuffle(good)\n shuffle(bad)\n good_bnd = len(good) // 2\n bad_bnd = len(bad) // 2\n good_train = good[:good_bnd]\n good_test = good[good_bnd:]\n bad_train = bad[:bad_bnd]\n bad_test = bad[bad_bnd:]\n log.info(\"Calculating good training set features...\")\n g_tr_feat = feature_set(good_train)\n log.info(\"Calculating good testing set features...\")\n g_tst_feat = feature_set(good_test)\n log.info(\"Calculating bad training set features...\")\n b_tr_feat = feature_set(bad_train)\n log.info(\"Calculating bad testing set features...\")\n b_tst_feat = feature_set(bad_test)\n # train the classifier\n log.info(\"Training classifier...\")\n clf = MultinomialNB()\n target = [0, 1]\n clf.fit([g_tr_feat, b_tr_feat], target)\n log.info(\"Predicting test sets...\")\n g_pred = clf.predict(g_tst_feat)\n log.info(\"Predicted value %d for good test set\", g_pred)\n b_pred = clf.predict(b_tst_feat)\n log.info(\"Predicted value %d for bad test set\", b_pred)\n", "id": "9974296", "language": "Python", "matching_score": 4.558809280395508, "max_stars_count": 0, "path": "python/goonalytics/legacy/forumsml.py" }, { "content": "'''\nCreated on Apr 18, 2016\n\n@author: blevine\n'''\n\n# thread ids\n\nimport logging\nfrom logging import INFO\n\nimport numpy as np\nimport pandas as pd\nfrom forumsml import create_connection, load_post_id_map\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom pandas.core.frame import DataFrame\n\nlogging.basicConfig()\n\nlog = logging.getLogger(__name__)\nlog.setLevel(INFO)\n\nBLOPS3 = (3716726,)\nROOT_BEER = (3630852,)\nGOATS = (3564911,)\nBLOPS2 = (3522296, 3482470)\nMW3 = (3446068, 3461453)\nBLOPS = (3387110,)\nMW2 = (3311154, 3227086, 3239216)\nWAW = (3007345,)\nCOD4 = (2756848, 3093899)\nTITANFALL_2 = 3779817\n\n# blops 2[0], mw_2[1, all],\n\n# thread map\nTHREADS = {'cod4': COD4, 'waw': WAW, 'mw2': MW2, 'blops': BLOPS, 'mw3': MW3, 'blops2': BLOPS2, 'goats': GOATS,\n 'rootbeer': ROOT_BEER, 'blops3': BLOPS3}\n# release dates\nRELEASE_DATES = {'cod4': ''' '2007-11-05' ''',\n 'waw': ''' '2008-11-11' ''',\n 'mw2': ''' '2009-11-10' ''',\n 'blops': ''' '2010-11-09' ''',\n 'mw3': ''' '2011-11-08' ''',\n 'blops2': ''' '2012-11-12' ''',\n 'goats': ''' '2013-11-05' ''',\n 'rootbeer': ''' '2014-11-04' ''',\n 'blops3': ''' '2015-11-06' '''}\n\n\ndef count_posts(game):\n ids = THREADS.get(game)\n con = create_connection()\n count = 0\n for val in ids:\n query = \"select count(*) from posts where thread_id=%d\" % val;\n cursor = con.cursor()\n c = cursor.execute(query).fetchall()\n count += c[0][0] # so ugly\n\n return count\n\n\ndef write_polarities(t_id, filename, write_header=False):\n ids = THREADS[t_id]\n pmap = load_post_id_map(ids)\n if write_header:\n option = 'w+'\n else:\n option = 'a'\n with(open(filename, option)) as outf:\n sia = SentimentIntensityAnalyzer()\n if write_header:\n outf.write('thread,post_id,neg,neu,pos,compound\\n')\n i = 0\n size = len(pmap)\n for post_id in pmap.keys():\n ptext = pmap[post_id]\n if ptext is not None:\n pols = sia.polarity_scores(pmap[post_id])\n outf.write(t_id + ',' + str(post_id) + ',' + str(pols['neg']) + ',' + str(pols['neu']) + ',' + str(\n pols['pos']) + ',' + str(pols['compound']) + '\\n')\n i += 1\n log.info('Wrote polarities for post %d of %d', i, size)\n else:\n log.info('Skipping post %d', post_id)\n return\n\n\ndef load_polarities():\n with (open('post_polarities.csv')) as f:\n df = pd.read_csv(f)\n return df\n\n\ndef avg_game_polarity(game, column, df=None):\n if (df is None):\n df = load_polarities()\n vals = df[df.thread == game][column].values\n avg = np.mean(vals)\n return avg\n\n\ndef unique_posters(tkey):\n thread = THREADS[tkey]\n if len(thread) == 1:\n df = sql_df(\"select count(distinct user_id) from posts where thread_id == \" + str(thread[0]))\n else:\n df = sql_df(\"select count (distinct user_id) from posts where thread_id in \" + str(thread))\n return df.iloc[0][0]\n\n\ndef macys_posts(tkey):\n thread = THREADS[tkey]\n return val(varargs_df(\"select count(*) from posts where user_id==114997 and \", thread))\n\n\ndef post_count(tkey):\n thread = THREADS[tkey]\n return val(varargs_df(\"select count(*) from posts where \", thread))\n\n\ndef val(df):\n return df.iloc[0][0]\n\n\ndef varargs_df(string, threads):\n if len(threads) == 1:\n df = sql_df(string + 'thread_id == ' + str(threads[0]))\n else:\n df = sql_df(string + 'thread_id in ' + str(threads))\n return df\n\n\ndef create_frame(functions):\n '''\n Creates a dataframe with statistics for the threads provided\n '''\n threadkeys = THREADS.keys()\n df = DataFrame(columns=['cod4', 'waw', 'mw2', 'blops', 'mw3', 'blops2', 'goats', 'rootbeer', 'blops3'],\n index=[f.__name__ for f in functions])\n for e, function in enumerate(functions):\n for tkey in threadkeys:\n val = function(tkey)\n df.ix[e, tkey] = val\n return df\n\n\nif __name__ == '__main__':\n f = create_frame((unique_posters, post_count, macys_posts))\n print(str(f))\n out = open('macys_count2.csv', 'w+')\n f.to_csv(out)\n", "id": "2628143", "language": "Python", "matching_score": 1.8067586421966553, "max_stars_count": 0, "path": "python/goonalytics/legacy/cod.py" }, { "content": "\"\"\"\nio methods\n\"\"\"\nimport csv\nimport json\nimport sqlite3 as sql\nfrom abc import abstractmethod, ABCMeta\nfrom datetime import datetime\nfrom functools import reduce\nfrom typing import Iterable, Generic, T\nfrom typing import Tuple\n\nimport pandas as pd\nfrom elasticsearch import Elasticsearch\n\nfrom goonalytics.base import Post\nfrom goonalytics.settings import DATABASE_LOCATION, ELASTIC_LOCATION\n\n\nclass SQLiteDAO(object):\n \"\"\"\n DAOs are fucking dumb but so am i\n \"\"\"\n\n def thread_ids_for_forum_id(self, forum_id):\n \"\"\"\n Returns a list of thread ids for the given forum id\n :param forum_id: the numerical forum id, e.g. 44 for games\n :return: a comma-separated string of thread ids for the forum given\n \"\"\"\n forum_id = SQLiteDAO.clean_forum_ids(forum_id)\n frame = self.sql_df(\"select distinct(thread_id) from threads where forum_id in ({})\".format(forum_id))\n l = frame['thread_id'].tolist()\n\n ignore = self.sql_df(\"select thread_id from thread_ignore\")\n iglist = ignore['thread_id'].tolist()\n return [tid for tid in l if tid not in iglist]\n\n def thread_ids_as_string(self, thread_ids):\n return reduce(lambda r, s: str(r) + ',' + str(s), thread_ids)\n\n @staticmethod\n def clean_forum_ids(forum_id) -> str:\n if type(forum_id) == list:\n forum_id_tostr = str(forum_id)\n end = len(forum_id_tostr) - 1\n forum_id = forum_id_tostr[1:end]\n return forum_id\n\n def thread_ids_and_last_page_scraped_for_forum(self, forum_id):\n \"\"\"\n returns a map of thread ids mapped to the most recent page scraped for whatever\n forum ID\n :param forum_id: the numerical forum id, e.g. 44 for games\n :return: a map whose keys are thread ids and values are the max page number in the database for those threads\n \"\"\"\n forum_id = SQLiteDAO.clean_forum_ids(forum_id)\n frame = self.sql_df(\n \"select posts.thread_id, max(posts.thread_page) from posts inner join threads using(thread_id) where threads.forum_id in ({}) group by thread_id\".format(\n forum_id))\n thread_ids = frame['thread_id'].tolist()\n pagenos = frame['max(posts.thread_page)'].tolist()\n mp = dict()\n for i in range(0, len(thread_ids) - 1):\n mp[thread_ids[i]] = pagenos[i]\n return mp\n\n\n def sql_df(self, query):\n \"\"\"\n A sql injection vulnerability, except I don't care\n :param query:\n :return:\n \"\"\"\n con = self.create_connection()\n df = pd.read_sql(query, con)\n return df\n\n def sql_single_value(self, query):\n return self.sql_df(query).iloc[0, 0]\n\n def sql_list(self, query):\n return list(self.sql_df(query).iloc[:, 0])\n\n def create_connection(self):\n return sql.connect(DATABASE_LOCATION)\n\n def posts_from_thread_generator(self, thread_id, chunksize=1000):\n con = self.create_connection()\n query = \"select * from posts where thread_id=\" + str(thread_id)\n cursor = con.cursor()\n cursor.arraysize = chunksize\n cursor.execute(query)\n return self.fetchsome(cursor, some=chunksize)\n\n def fetchsome(self, cursor, some=1000):\n fetch = cursor.fetchmany\n while True:\n rows = fetch(some)\n if not rows: break\n for row in rows:\n yield row\n\n\ndef ElasticSearchDAO(object):\n es = Elasticsearch(ELASTIC_LOCATION)\n\n def index_forum(forum_id, as_ascii=False):\n dao = SQLiteDAO()\n tgen = dao.thread_ids_for_forum_id(forum_id)\n for t in tgen:\n index_thread(forum_id, t, posts_as_ascii=as_ascii)\n\n def index_thread(forum_id, thread_id, posts_as_ascii=False):\n \"\"\"\n Indexes the thread on elastic search.\n :param forum_id:\n :param thread_id:\n :param posts_as_ascii: if set to true, will encode post text as ascii to get rid of unprintable unicode shit\n :return:\n \"\"\"\n dao = SQLiteDAO()\n pgen = dao.posts_from_thread_generator(thread_id)\n i = 0\n inx = str(forum_id) + '-' + str(thread_id)\n if posts_as_ascii: inx += '-ascii'\n for p in pgen:\n text = p[4].encode('ascii', errors='ignore') if posts_as_ascii else p[4]\n post = {\n 'post_id': p[0],\n 'thread_id': p[1],\n 'user_name': p[2],\n 'user_id': p[3],\n 'text': text,\n 'page': p[5],\n 'timestamp': datetime.strptime(p[6], '%Y-%m-%d %H:%M:%S')\n }\n es.index(index=inx, doc_type='post', body=post, id=p[0])\n i += 1\n if i % 100 == 0:\n print(\"Wrote \" + str(i) + \" posts\")\n\n def thread_to_json(thread_id: int, forum_id: int, filename: str) -> None:\n \"\"\"\n pulls shit out of sqlite and puts it in a json for each thread\n :param forum_id:\n :param thread_id:\n :param filename:\n :return:\n \"\"\"\n dao = SQLiteDAO()\n gen = dao.posts_from_thread_generator(thread_id)\n obj = {\n 'thread_id': str(thread_id),\n 'forum_id': str(forum_id),\n 'posts': [{\n\n }]\n }\n i = 0\n for p in gen:\n post_json = {\n 'post_id': p[0],\n 'thread_id': p[1],\n 'user_name': p[2],\n 'user_id': p[3],\n 'text': p[4],\n 'page': p[5],\n 'timestamp': p[6]\n }\n obj['posts'].append(post_json)\n i += 1\n if i % 100 == 0:\n print(\"Appended \" + str(i) + \" posts\")\n with open(filename, 'w+') as out:\n json.dump(obj, out)\n print(\"Wrote file to \" + filename)\n\n\ndef write_to_csv(posts: Iterable[Tuple], output: str, delimiter=',', trim_newlines=False) -> None:\n \"\"\"\n writes the posts provided as a unicode csv\n :param posts:\n :param output:\n :param delimiter:\n :return:\n \"\"\"\n with open(output, 'a') as out:\n cout = csv.writer(out, delimiter=delimiter, encoding='utf-8')\n for i, p in enumerate(posts):\n clean = list(p)\n if trim_newlines:\n clean[4] = p[4].replace('\\n', u'')\n cout.writerow(clean)\n\n\ndef write_post_ids(outfile, thread_id):\n d = SQLiteDAO()\n l = d.posts_from_thread_generator(thread_id)\n with open(outfile, 'w') as out:\n for i, p in enumerate(l):\n out.write(str(p[0]) + \"\\n\")\n print(\"Done\")\n\n\ndef get_random_thread_id(has_posts=True):\n d = SQLiteDAO()\n table_name = 'posts' if has_posts else 'threads'\n df = d.sql_df(\"SELECT thread_id FROM {} ORDER BY RANDOM() LIMIT 1\".format(table_name))\n return df.iloc[0, 0]\n\n\nclass Loader(Generic[T], metaclass=ABCMeta): # python generics are fucking retarded\n\n @abstractmethod\n def load(self, obj) -> T:\n raise NotImplementedError\n\n @abstractmethod\n def write(self, obj: T) -> None:\n raise NotImplementedError\n\n\nclass SQLitePostLoader(Loader[Post]):\n def load(self, obj) -> Post:\n pass\n\n def write(self, obj: Post) -> None:\n pass\n\n\n\nif __name__ == '__main__':\n d = SQLiteDAO()\n thread_ids = d.thread_ids_for_forum_id(44)\n thread_ids.append(d.thread_ids_for_forum_id(46))\n output = 'posts-full-pipe-delimited.csv'\n # for i, tid in enumerate(thread_ids):\n # posts = d.posts_from_thread_generator(tid, chunksize=1000)\n # write_to_csv(posts, output, delimiter='|', trim_newlines=True)\n # print(\"Wrote {} of {} threads\".format(i + 1, len(thread_ids)))\n", "id": "12007982", "language": "Python", "matching_score": 6.596076965332031, "max_stars_count": 0, "path": "python/goonalytics/io/baseio.py" }, { "content": "\"\"\"\nVarious IO crap for the sqlite db\n\"\"\"\nimport json\nfrom datetime import datetime\n\nimport unicodecsv\nfrom elasticsearch import Elasticsearch\nfrom thread_scraper import SQLiteDAO\n\nes = Elasticsearch('http://elastic:changeme@localhost:9200/')\n\n\ndef example():\n # make a retarded json\n doc = {\n 'author': 'kimchy',\n 'text': 'Elasticsearch: cool. bonsai cool.',\n 'timestamp': datetime.now(),\n }\n # this shit all basically just calls the REST API\n res = es.index(index=\"test-index\", doc_type='tweet', id=1, body=doc)\n print(res['created'])\n\n res = es.get(index=\"test-index\", doc_type='tweet', id=1)\n print(res['_source'])\n\n es.indices.refresh(index=\"test-index\")\n\n res = es.search(index=\"test-index\", body={\"query\": {\"match_all\": {}}})\n print(\"Got %d Hits:\" % res['hits']['total'])\n for hit in res['hits']['hits']:\n print(\"%(timestamp)s %(author)s: %(text)s\" % hit[\"_source\"])\n\n\ndef index_forum(forum_id, as_ascii=False):\n dao = SQLiteDAO()\n tgen = dao.thread_ids_for_forum_id(forum_id)\n for t in tgen:\n index_thread(forum_id, t, posts_as_ascii=as_ascii)\n\n\ndef index_thread(forum_id, thread_id, posts_as_ascii=False):\n \"\"\"\n Indexes the thread on elastic search.\n :param forum_id:\n :param thread_id:\n :param posts_as_ascii: if set to true, will encode post text as ascii to get rid of unprintable unicode shit\n :return:\n \"\"\"\n dao = SQLiteDAO()\n pgen = dao.posts_from_thread_generator(thread_id)\n i = 0\n inx = str(forum_id) + '-' + str(thread_id)\n if posts_as_ascii: inx += '-ascii'\n for p in pgen:\n text = p[4].encode('ascii', errors='ignore') if posts_as_ascii else p[4]\n post = {\n 'post_id': p[0],\n 'thread_id': p[1],\n 'user_name': p[2],\n 'user_id': p[3],\n 'text': text,\n 'page': p[5],\n 'timestamp': datetime.strptime(p[6], '%Y-%m-%d %H:%M:%S')\n }\n es.index(index=inx, doc_type='post', body=post, id=p[0])\n i += 1\n if i % 100 == 0:\n print(\"Wrote \" + str(i) + \" posts\")\n\n\ndef thread_to_json(thread_id, forum_id, filename):\n \"\"\"\n pulls shit out of sqlite and puts it in a json for each thread\n :param forum_id:\n :param thread_id:\n :param filename:\n :return:\n \"\"\"\n dao = SQLiteDAO()\n gen = dao.posts_from_thread_generator(thread_id)\n obj = {\n 'thread_id': str(thread_id),\n 'forum_id': forum_id,\n 'posts': [{\n\n }]\n }\n i = 0\n for p in gen:\n post_json = {\n 'post_id': p[0],\n 'thread_id': p[1],\n 'user_name': p[2],\n 'user_id': p[3],\n 'text': p[4],\n 'page': p[5],\n 'timestamp': p[6]\n }\n obj['posts'].append(post_json)\n i += 1\n if i % 100 == 0:\n print(\"Appended \" + str(i) + \" posts\")\n with open(filename, 'w+') as out:\n json.dump(obj, out)\n print(\"Wrote file to \" + filename)\n\n\ndef write_to_csv(posts, output, delimiter=',', trim_newlines=False):\n \"\"\"\n writes the posts provided as a unicode csv\n :param posts:\n :param output:\n :param delimiter:\n :return:\n \"\"\"\n with open(output, 'a') as out:\n cout = unicodecsv.writer(out, delimiter=delimiter, encoding='utf-8')\n for i, p in enumerate(posts):\n clean = list(p)\n if trim_newlines:\n clean[4] = p[4].replace('\\n', u'')\n cout.writerow(clean)\n\n\ndef write_post_ids(outfile, thread_id):\n d = SQLiteDAO()\n l = d.posts_from_thread_generator(thread_id)\n with open(outfile, 'w') as out:\n for i, p in enumerate(l):\n out.write(str(p[0]) + \"\\n\")\n print(\"Done\")\n\n\nif __name__ == '__main__':\n d = SQLiteDAO()\n thread_ids = d.thread_ids_for_forum_id(44)\n thread_ids.append(d.thread_ids_for_forum_id(46))\n output = 'posts-full-pipe-delimited.csv'\n for i, tid in enumerate(thread_ids):\n posts = d.posts_from_thread_generator(tid, chunksize=1000)\n write_to_csv(posts, output, delimiter='|', trim_newlines=True)\n print(\"Wrote {} of {} threads\".format(i + 1, len(thread_ids)))\n", "id": "10723053", "language": "Python", "matching_score": 0.519548773765564, "max_stars_count": 0, "path": "python/goonalytics/legacy/forums_io.py" }, { "content": "\"\"\"\nDeals with bullshit workarounds because twisted and scrapy suck. Literally takes a bunch of args and spawns subprocesses\nbecause you have to kill the whole thing between scrapers\n\"\"\"\nimport logging\nimport argparse\nimport platform\nimport os\nimport shlex\n\nimport subprocess\n\nlogging.basicConfig()\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\nparser = argparse.ArgumentParser()\ngroup = parser.add_mutually_exclusive_group()\nparser.add_argument('user', type=str, help='Forums username')\nparser.add_argument('password', type=str, help='Forums password')\nparser.add_argument('idlist', type=str, help='Forum id(s) to scrape', nargs='+')\nargs = parser.parse_args()\nlog.debug(\"ID list: %s\", args.idlist)\n# raise ConnectionAbortedError('stopping for debug')\nuname = args.user\npasswd = <PASSWORD>\n\n# define strings for subproc commands\nsystem = platform.system()\npython_name = 'python' if system == 'Darwin' else 'python3' # the container\nscrapefile = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) + \"/scraping/bq_scrapers.py\"\n\ncommand_list = list()\nfor forum_id in args.idlist:\n command_list.append(python_name + \" \" + scrapefile + \" \" + uname + \" \" + passwd + \" -t \" + forum_id)\n command_list.append(python_name + \" \" + scrapefile + \" \" + uname + \" \" + passwd + \" -p \" + forum_id)\n\nfor command in command_list:\n # execute one at a time because the container is a chuggin lil turd\n args = shlex.split(command)\n proc = subprocess.Popen(args, stdin=subprocess.PIPE)\n proc.wait()", "id": "11844361", "language": "Python", "matching_score": 5.110901355743408, "max_stars_count": 0, "path": "python/goonalytics/scripts/scraper_runner.py" }, { "content": "\"\"\"\nThe same bullshit workarounds as scraper_runner but this one gets the info from a simple queue server instead of a file.\nAgain, the reason this is necessary is because Twisted reactors can't be restarted, and they'd need to be if we wanted to\nrun thread crawlers followed by post crawlers (and I have zero desire to call their shitty API directly just to make rest calls)\n\"\"\"\n\nimport logging\nimport argparse\nimport platform\nimport os\nimport shlex\n\nimport subprocess\n\n\nfrom goonalytics.scraping.bq_scrapers import get_url_from_server\n\nlogging.basicConfig()\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('user', type=str, help='Forums username')\nparser.add_argument('password', type=str, help='Forums password')\nparser.add_argument('server', type=str, help='Server with cli args')\nargs = parser.parse_args()\n# raise ConnectionAbortedError('stopping for debug')\nuname = args.user\npasswd = args.password\nserver = args.server\n\n# set up cli crap\nsystem = platform.system()\npython_name = 'python' if system == 'Darwin' else 'python3' # the container\nscrapefile = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) + \"/scraping/bq_scrapers.py\"\n\nwhile True:\n try:\n r = get_url_from_server(server) # actually not a url\n cmd = \" \".join([python_name, scrapefile, uname, passwd, r])\n args = shlex.split(cmd)\n proc = subprocess.Popen(args, stdin=subprocess.PIPE)\n proc.wait()\n except ValueError:\n log.info(\"Server queue exhausted, exiting\")\n raise\n # quit(0)\n\n", "id": "5046626", "language": "Python", "matching_score": 0.653053343296051, "max_stars_count": 0, "path": "python/goonalytics/scripts/flask_runner.py" }, { "content": "\"\"\"\nThis file is part of the flask+d3 Hello World project.\n\"\"\"\nimport json\n\nimport flask\nfrom flask import request, jsonify, render_template\n\nimport goonalytics.grafkl as graphs\nfrom goonalytics.io.gcloudio import random_thread_id\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\napp = flask.Flask(__name__, static_folder=\"templates\")\n\n\[email protected](\"/\")\ndef index():\n \"\"\"\n When you request the root path, you'll get the index.html template.\n\n \"\"\"\n return flask.render_template(\"index.html\")\n\[email protected](\"/thread\")\ndef get_graph_data():\n\n \"\"\"\n returns json of a network graph for the specified thread\n :param thread_id:\n :return:\n \"\"\"\n thread_id = request.args.get(\"thread_id\", random_thread_id(post_count_min=200), type=int)\n min_edges = request.args.get(\"min_edges\", 1, type=int)\n pqdict, userdict = graphs.get_post_quote_dict(thread_id)\n G = graphs.create_graph(pqdict)\n s = graphs.graph_to_node_link(G, userdict, min_degree=min_edges)\n return json.dumps(s)\n\n\[email protected](\"/showgraph\")\ndef showgraph():\n min_edges = request.args.get(\"min_edges\", 1, type=int)\n thread_id = request.args.get(\"thread_id\", random_thread_id(post_count_min=200), type=int)\n return render_template(\"threadgraph.html\", threadid=thread_id, minedges=min_edges)\n\n\nif __name__ == \"__main__\":\n import os\n\n port = 8888\n\n # Open a web browser pointing at the app.\n os.system(\"open http://localhost:{0}\".format(port))\n\n # Set up the development server on port 8000.\n app.debug = True\n app.run(port=port)\n", "id": "3555595", "language": "Python", "matching_score": 2.592250108718872, "max_stars_count": 0, "path": "python/goonalytics/web/app.py" }, { "content": "\"\"\"\ngraphs and shit\n\"\"\"\nimport json\nimport logging\nfrom typing import Dict, Set\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nfrom networkx.readwrite import json_graph as nxjs\n\nfrom goonalytics.io.gcloudio import PostBigQueryer\nfrom goonalytics.settings import GCLOUD_POST_TABLE as tbl\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\ndef get_post_quote_dict(thread_id: int) -> (Dict[int, Set[int]], Dict[int, str]):\n \"\"\"\n some confusing shit that somehow gets data for a graph\n :param thread_id:\n :return: a tuple whose 0-index is a dict suitable for use with create_graph and whose 1-index is a username dict suitable\n for use with graph_to_node_link\n \"\"\"\n # three maps: post id -> quoted post ids, uids -> usernames, post id -> user id\n # this could be simplified with a more complicated query\n pid_qpid_map = dict()\n uid_uname_map = dict()\n postid_uid_map = dict()\n bq = PostBigQueryer()\n # TODO fix this injection thing before making public\n qr = bq.sql_inject('''select post_id, user_id, quoted_post_ids, user_name from %s where thread_id=%d''' % ('forums.' + tbl, thread_id))\n # that query returns repeated rows for each different quoted pid which affects the iteration\n for row in qr.rows:\n post_id = row[0]\n user_id = row[1]\n quote = row[2] if row[2] is not None else 'null'\n user_name = row[3]\n uid_uname_map[user_id] = user_name\n if post_id not in pid_qpid_map.keys():\n pid_qpid_map[post_id] = set()\n pid_qpid_map[post_id].add(quote)\n postid_uid_map[post_id] = user_id\n # next transform the post_ids to the user ids\n pid_qpid_map_intermediate = dict()\n for key in pid_qpid_map.keys():\n user = postid_uid_map[key]\n # next we get the username for each quote\n quote_users = set()\n for val in pid_qpid_map[key]:\n try:\n quote_users.add(postid_uid_map[val])\n except KeyError:\n continue\n pid_qpid_map_intermediate[user] = quote_users\n return pid_qpid_map_intermediate, uid_uname_map\n\n\ndef create_graph(user_dict: Dict[int, Set[int]]) -> nx.Graph:\n \"\"\"\n Creates a networkx.Graph object out of the dict provided\n :param user_dict: a dictionary whose keys are the user id of the vertex and whose values are the set of user ids that that\n user will be connected to\n :return: an nx.Graph instance\n \"\"\"\n g = nx.Graph();\n # add all vertices\n for u in user_dict.keys():\n g.add_node(u)\n for u in user_dict.keys():\n for u2 in user_dict[u]:\n g.add_edge(u, u2)\n return g\n\n\ndef graph_to_node_link(g: nx.Graph, user_name_dict: Dict[int, str]=None, min_degree: int=0) -> dict:\n # logger.debug(\"Graph nodes: %s\", g.nodes())\n # logger.debug(\"Graph edges: %s\", g.edges())\n # add usernames before taking stuff out\n \"\"\"\n transforms the graph to node link for d3js and optionally removes nodes having fewer than a certain number of edges\n :param g: an instance of networkx.Graph\n :param user_name_dict: a mapping of user ids to user names\n :param min_degree: the minimum number of edges for a node to be included\n :return: a dict/json sufficient for use with d3js force layouts etc\n \"\"\"\n if user_name_dict:\n nx.relabel_nodes(g, user_name_dict, copy=False)\n if min_degree > 0:\n outdeg = g.degree()\n to_remove = [n for n in outdeg if outdeg[n] < min_degree]\n g.remove_nodes_from(to_remove)\n data = nxjs.node_link_data(g)\n return data\n\n\ndef graph_to_adjacency(g: nx.Graph) -> str:\n data = nxjs.adjacency_data(g)\n return json.dumps(data)\n\n\nif __name__ == '__main__':\n mp, un = get_post_quote_dict(3763968)\n print(mp)\n print(mp[112394])\n g = create_graph(mp)\n nx.draw(g, labels=un)\n plt.show()\n", "id": "10536549", "language": "Python", "matching_score": 3.427981376647949, "max_stars_count": 0, "path": "python/goonalytics/grafkl.py" }, { "content": "\"\"\"\nAvro and Google Cloud/BigQuery io methods\n\n3/6/2017 - tried to make this project agnostic and probably killed everything\n\"\"\"\nimport json\nimport logging\nimport os\nimport time\nfrom functools import reduce\nfrom queue import Queue\nfrom random import random\nfrom typing import Dict, List, Set, FrozenSet\n\nimport avro\nfrom avro.datafile import DataFileWriter, DataFileReader\nfrom avro.io import DatumWriter, DatumReader\nfrom google.cloud import bigquery\nfrom google.cloud import storage\nfrom google.cloud.bigquery import Table\nfrom google.cloud.bigquery.job import LoadTableFromStorageJob\nfrom google.cloud.bigquery.query import QueryResults\nfrom google.cloud.storage import Blob\nfrom google.cloud.storage import Bucket\nfrom multiprocessing import Process\n\nfrom goonalytics import settings\nfrom goonalytics.base import PostAvro, User, ThreadAvro\nfrom goonalytics.scraping.util import current_time_ms\nfrom goonalytics.settings import GCLOUD_STORAGE_BUCKET, POST_SCHEMA_LOCATION, GCLOUD_DATASET_NAME, GCLOUD_POST_TABLE, \\\n GCLOUD_PROJECT_NAME, THREAD_SCHEMA_LOCATION, GCLOUD_THREAD_TABLE, GCLOUD_CREDENTIAL_FILE\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\ndef post_as_json(post: PostAvro) -> json:\n return {\n \"post_id\": post.post_id,\n \"user_id\": post.user_id,\n \"user_name\": post.user_name,\n \"post_text\": post.post_text,\n \"thread_page\": post.thread_page,\n \"post_date_timestamp\": int(time.mktime(post.post_timestamp) * 1000),\n \"quoted_post_ids\": post.quoted_posts,\n \"thread_id\": post.thread_id,\n \"forum_id\": post.forum_id\n }\n\n\ndef threadinfo_as_json(thread: ThreadAvro) -> json:\n return {\n \"forum_id\": thread.forum_id,\n \"thread_id\": thread.thread_id,\n \"thread_title\": thread.thread_title,\n \"author\": thread.author,\n \"author_id\": thread.author_id,\n \"ignore\": thread.ignore\n }\n\n\ndef get_schema(location: str):\n return avro.schema.Parse(open(location, \"r\").read())\n\n\ndef get_post_schema():\n return get_schema(POST_SCHEMA_LOCATION)\n\n\ndef get_thread_schema():\n return get_schema(THREAD_SCHEMA_LOCATION)\n\n\nclass CloudStorager(object):\n def __init__(self, credentials=GCLOUD_CREDENTIAL_FILE, bucket=GCLOUD_STORAGE_BUCKET):\n self.client = storage.Client.from_service_account_json(credentials)\n self.bucket = bucket\n\n def get_cloud_storage_bucket(self) -> Bucket:\n return Bucket(self.client, self.bucket)\n\n\nclass BigQueryer(object):\n def __init__(self, credentials=GCLOUD_CREDENTIAL_FILE, dataset=GCLOUD_DATASET_NAME, table=GCLOUD_POST_TABLE):\n self.client = bigquery.Client.from_service_account_json(credentials)\n self.dataset = dataset\n self.table = table\n\n def get_bigquery_table(self) -> Table:\n dataset = self.client.dataset(self.dataset)\n return dataset.table(self.table)\n\n def sql_inject(self, query: str) -> QueryResults:\n logger.debug(\"Executing synchronous query: %s\", query)\n qr = self.client.run_sync_query(query)\n qr.run()\n return qr\n\nclass AvroWriter(object):\n \"\"\"\n this class buffers input since posts are fed one at a time, once it has whatever\n number it writes them all to a file\n \"\"\"\n\n def __init__(self, filename: str, buffer_size=40, schema=POST_SCHEMA_LOCATION, tojson: callable = post_as_json,\n bq=BigQueryer(), cs=CloudStorager(), max_filesize=13000000):\n self.queue = Queue(maxsize=buffer_size)\n self.filename = filename\n self.max_filesize = max_filesize\n self.filename_original = filename\n self.file_partition = 1\n self.schema = get_schema(schema)\n self.tojson = tojson\n self.target_writer = self.initialize_writer()\n self.bq = bq\n self.cs = cs\n\n def initialize_writer(self) -> DataFileWriter:\n return DataFileWriter(open(self.filename, 'wb+'), DatumWriter(), self.schema)\n\n def submit(self, item):\n # logger.debug(\"Queue size: %d\", self.queue.qsize())\n if self.queue.full():\n self.write()\n if random() < 0.05: # log 5% of them, there's a lot\n logger.debug(\"Enqueuing value: %s\", item)\n self.queue.put(item)\n\n\n def write(self):\n while not self.queue.empty():\n if os.path.getsize(self.filename) > self.max_filesize:\n logger.warning(\"output file has exceeded maximum size--uploading\")\n current_fname = self.filename\n self.target_writer.flush()\n p = Process(target=self.commit(current_fname))\n p.start()\n self.increment_filename()\n post = self.queue.get()\n pj = self.tojson(post)\n # logger.debug(\"Writing post: %s\", pj)\n self.target_writer.append(pj)\n\n def increment_filename(self):\n self.file_partition += 1\n self.filename = self.filename_original + '-part' + str(self.file_partition)\n self.target_writer.close()\n self.target_writer = self.initialize_writer()\n\n def close_and_commit(self):\n self.write()\n self.target_writer.close()\n self.commit(self.filename)\n return\n\n def commit(self, filename):\n try:\n txfr_blob(filename, bq=self.bq, cs=self.cs)\n except RuntimeError:\n logger.critical(\"Critical error transferring binary object: {} Creating new file...\".format(filename))\n self.increment_filename()\n\n\n\nclass AvroThreadWriter(AvroWriter):\n \"\"\"\n made for threads\n \"\"\"\n\n def __init__(self, filename: str, buffer_size=40):\n super().__init__(filename, buffer_size, schema=THREAD_SCHEMA_LOCATION, tojson=threadinfo_as_json,\n bq=ThreadBigQueryer())\n\n\nclass PostBigQueryer(BigQueryer):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def find_last_updated(self, forum_id: int = 44) -> Dict[int, int]:\n query = \"SELECT thread_id, max(thread_page) FROM \" + fully_qualified_tablename(\n GCLOUD_POST_TABLE) + \" where forum_id=%d group by thread_id\" % int(forum_id)\n qr = self.client.run_sync_query(query)\n output = dict()\n logger.info(\"Executing synchronous query on Google Cloud...\")\n qr.run()\n for row in qr.rows:\n output[row[0]] = row[1]\n return output\n\n def get_threadlist(self, forum_id: int) -> List[int]:\n dataset = self.sql_inject(\"select thread_id from \" + fully_qualified_tablename(\n GCLOUD_THREAD_TABLE) + \" where forum_id=%s and [ignore]=false group by thread_id\" % forum_id)\n out = list()\n for row in dataset.rows:\n out.append(row[0])\n return out\n\n\nclass ThreadBigQueryer(PostBigQueryer):\n def __init__(self):\n super().__init__(table=GCLOUD_THREAD_TABLE)\n\n\ndef evaluate_file(fname: str):\n logger.info(\"Opening file %s\", fname)\n reader = DataFileReader(open(fname, \"rb\"), DatumReader())\n logger.info(\"Counting lines...\")\n i = 0\n for val in reader:\n i += 1\n if i % 1000 == 0:\n logger.debug(\"Read %d lines\", i)\n logger.info(\"Found %d lines in file\", i)\n\n\n\ndef txfr_blob(filename: str, bq: BigQueryer = PostBigQueryer(),\n cs: CloudStorager = CloudStorager()):\n \"\"\"\n uploads the blob to bigquery. This would probably be better as a shell script\n :param cs:\n :param bq:\n :param bucket:\n :param filename:\n :return:\n \"\"\"\n tm = current_time_ms() # pain in the ass to get nanotime in python apparently\n objname = 'api-update-blob-{}'.format(tm)\n blob = Blob(objname, cs.get_cloud_storage_bucket())\n logger.info(\"Uploading file (this will take a long time)... \")\n blob.upload_from_filename(filename)\n # change this to change table\n table = bq.get_bigquery_table()\n uri = 'gs://' + cs.bucket + \"/\" + objname\n logger.info(\"Loading file to BQ...\")\n # insert into tmp table\n # tmptable = bq.client.dataset('forums').table(objname)\n job = LoadTableFromStorageJob('api-job-{}'.format(tm), table, [uri], client=bq.client)\n job.write_disposition = 'WRITE_APPEND'\n job.source_format = 'AVRO'\n job.begin()\n wait_for_job(job)\n logger.info(\"Cleaning up...\")\n blob.delete(cs.client)\n\n\ndef load_data_from_file(source_file_name):\n bq = PostBigQueryer()\n table = bq.get_bigquery_table()\n\n # Reload the table to get the schema(?)\n table.reload()\n\n with open(source_file_name, 'rb') as source_file:\n job = table.upload_from_file(\n source_file,\n source_format='AVRO',\n write_disposition='WRITE_APPEND',\n create_disposition='CREATE_NEVER')\n\n wait_for_job(job)\n\n print('Loaded {} rows'.format(\n job.output_rows))\n\n\ndef wait_for_job(job):\n while True:\n job.reload()\n if job.state == 'DONE':\n if job.error_result:\n raise RuntimeError(job.errors)\n return\n time.sleep(1)\n\n\ndef get_posts_for_user(user_id: int) -> str:\n query = ''' select post_text from %s where user_id=%d ''' % (fully_qualified_tablename(GCLOUD_POST_TABLE), user_id)\n bq = PostBigQueryer()\n qres = bq.sql_inject(query)\n from functools import reduce\n posts = [row[0] for row in qres.rows]\n return reduce(lambda x, y: x + ' ' + y, posts)\n\n\ndef random_user(post_count_min: int = 0) -> User:\n \"\"\"\n :param post_count_min: minimum post count for the user\n :return:\n \"\"\"\n query = ''' select user_id, user_name, min(rand()) as rand from %s group by user_id, user_name having count(user_id) > %d order by rand limit 1''' % (\n GCLOUD_POST_TABLE, post_count_min)\n bq = PostBigQueryer()\n res = bq.sql_inject(query).rows[0]\n return User(user_id=res[0], user_name=res[1])\n\n\ndef get_thread_posts(thread_id: int) -> Dict[int, str]:\n \"\"\"\n Gets all posts from the thread and returns them as a dict grouped by user ID\n :param thread_id:\n :return:\n \"\"\"\n query = ''' select user_id, post_text from %s where thread_id = %d''' % (\n fully_qualified_tablename(GCLOUD_POST_TABLE), thread_id)\n bq = PostBigQueryer()\n res = bq.sql_inject(query)\n output = dict()\n for row in res.rows:\n user_id = row[0]\n post_text = row[1]\n if user_id not in output.keys():\n output[user_id] = list()\n output[user_id].append(post_text)\n # reduce the values\n output_reduced = dict()\n for k in output.keys():\n output_reduced[k] = reduce(lambda x, y: x + ' ' + y, output[k])\n return output_reduced\n\n\ndef random_thread_id(post_count_min: int = 0) -> int:\n \"\"\"\n Returns a random thread with the minimum post count provided\n :param post_count_min:\n :return:\n \"\"\"\n query = ''' select thread_id, min(rand()) as rand from %s group by thread_id having count(post_id) > %d order by rand limit 1 ''' % (\n fully_qualified_tablename(GCLOUD_POST_TABLE), post_count_min)\n qres = PostBigQueryer().sql_inject(query)\n return qres.rows[0][0]\n\n\n# deprecated - there's no point in uniquing out post IDs beforehand because it takes more I/O than just copying the table\ndef get_post_ids_for_thread(thread_id: int, thread_page: int) -> FrozenSet[int]:\n tbl = fully_qualified_tablename(GCLOUD_POST_TABLE)\n qr = PostBigQueryer().sql_inject(\n \"select post_id from \" + tbl + \" where thread_id=%d and thread_page=%d group by post_id\" % (\n thread_id, thread_page))\n out = set()\n for row in qr.rows:\n out.add(row[0])\n return frozenset(out)\n\n\ndef get_thread_ids_for_forum(forum_id: int) -> FrozenSet[int]:\n qr = PostBigQueryer().sql_inject(\n \"select thread_id from \" + fully_qualified_tablename(GCLOUD_THREAD_TABLE) + \" where forum_id=%d group by thread_id\" % forum_id)\n out = set()\n for row in qr.rows:\n out.add(row[0])\n return frozenset(out)\n\n\ndef fully_qualified_tablename(table: str) -> str:\n return \"[\" + GCLOUD_PROJECT_NAME + \":\" + GCLOUD_DATASET_NAME + \".\" + table + \"]\"\n", "id": "7754881", "language": "Python", "matching_score": 3.419630289077759, "max_stars_count": 0, "path": "python/goonalytics/io/gcloudio.py" }, { "content": "import platform\n\npname = platform.system()\nPARENT_DIR = \"/Users/blevine/goonalytics\" if pname == 'Darwin' else \"/goonalytics\"\n\nGCLOUD_STORAGE_BUCKET = 'staging.empyrean-bridge-150804.appspot.com'\nPOST_SCHEMA_LOCATION = PARENT_DIR + \"/resources/post-avro-schema.avsc\"\nTHREAD_SCHEMA_LOCATION = PARENT_DIR + \"/resources/thread-avro-schema.avsc\"\nGCLOUD_POST_TABLE = 'posts_raw'\nGCLOUD_THREAD_TABLE = 'threads'\nGCLOUD_PROJECT_NAME = 'empyrean-bridge-150804'\nGCLOUD_DATASET_NAME = 'forums'\nDATABASE_LOCATION = PARENT_DIR + '/resources/games_crawl.sqlite'\nELASTIC_LOCATION = 'http://ubentu.local:5601'\n\nGCLOUD_CREDENTIAL_FILE = PARENT_DIR+'/resources/gcloud-cred.json'\n", "id": "8389650", "language": "Python", "matching_score": 1.3388155698776245, "max_stars_count": 0, "path": "python/goonalytics/settings.py" }, { "content": "\"\"\"\nvarious neato computations on posts. A lot of this was abandoned/postponed in favor of ipynb, web dev shit and porting\nall this to google cloud\n\"\"\"\nfrom functools import reduce\nfrom typing import List, NamedTuple, Dict\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom goonalytics.base import PostAvro, User\nfrom goonalytics.io.gcloudio import PostBigQueryer, random_thread_id, get_thread_posts\nfrom goonalytics.settings import GCLOUD_POST_TABLE\n\n\ndef noisiest_user(posts: List[PostAvro]) -> User:\n \"\"\"\n returns the poster with the lowest post to quote ratio\n :param posts:\n :return:\n \"\"\"\n pass\n\n\ndef most_oblivious_user(posts: List[PostAvro], min_post_count=10) -> User:\n \"\"\"\n the user with the lowest quote to post ratio\n :param min_post_count: minimum posts required before including a user\n :param posts:\n :return:\n \"\"\"\n pass\n\n\ndef most_quoted_user(posts: List[PostAvro]) -> User:\n pass\n\n\ndef most_cusses(posts: List[PostAvro]) -> User:\n pass\n\n\ndef least_cusses(posts: List[PostAvro]) -> User:\n pass\n\n\ndef do_LDA(posts: List[List[str]], number_of_topics: int=10):\n \"\"\"\n Latent Dirichlet Allocation.\n :param posts:\n :return:\n \"\"\"\n pass\n\n\ndef find_similar(posts: List[List[str]]):\n pass\n\n\ndef try_tfidf():\n tposts = get_thread_posts(random_thread_id(post_count_min=4000))\n clf = TfidfVectorizer(input='content', stop_words='english', analyzer='word', norm='l2')\n clf.fit(tposts.values())\n\n\n\n", "id": "1340318", "language": "Python", "matching_score": 2.549316644668579, "max_stars_count": 0, "path": "python/goonalytics/computations.py" }, { "content": "\"\"\"\nBase classes\n\"\"\"\n\nfrom datetime import datetime\nfrom typing import NamedTuple, Tuple\n\n# these could be made PostItems and then scrapy could do concurrent shit, but the bottleneck is Comcast anyway\nThread = NamedTuple('Thread', [('thread_id', int),\n ('title', str),\n ('author', str),\n ('views', int), # TODO don't give a shit about this\n ('replies', int), # TODO see above\n ('forum', int),\n ('retrieved_date', datetime)])\n\nPost = NamedTuple('Post', [('post_id', int),\n ('thread_id', int),\n ('user_id', int),\n ('user_name', str),\n ('post_text', str),\n ('thread_page', int),\n ('post_timestamp', datetime),\n ('retrieved_date', datetime)])\n\nPostAvro = NamedTuple('PostAvro', [('post_id', int),\n ('user_id', int),\n ('user_name', str),\n ('post_text', str),\n ('thread_page', int),\n ('post_timestamp', datetime),\n ('thread_id', int),\n ('forum_id', int),\n ('quoted_posts', Tuple[int])])\n\nThreadAvro = NamedTuple('ThreadAvro', [('forum_id', int),\n ('thread_id', int),\n ('thread_title', str),\n ('author', str),\n ('author_id', int),\n ('ignore', bool)])\n\nUser = NamedTuple('User', [('user_id', int), ('user_name', str)])\n", "id": "2430740", "language": "Python", "matching_score": 2.191885471343994, "max_stars_count": 0, "path": "python/goonalytics/base.py" }, { "content": "\"\"\"\nCreated on Apr 2, 2016\n\nDear Ben Plus Six Months,\n\nThis is a scraper tailored specifically for the something awful forums. I cannot\nfor the fucking life of me remember how to run it. It requires twisted, which requires\nPython 2.7. It writes to a SQLite database somewhere on the local HDD.\n\nThe phases of our project are as follows:\n\nPhase 1: Scrape an absolute fuckton more shit\nPhase 2: Use tensorflow to do cool shit whilst we learn tensorflow\nPhase 2.5: Use ElasticSearch/Kibana to do some visualizations--this is partially implemented in forums_elastic.py\nPhase 3: Try doing some entity extraction garbage, possibly try the tagging thing\neither with TF or else a linear SVM from sklearn. Our \"production\" project will\nbe hundreds of thousands of labels.\nPhase Zero Dark Thirty Alpha Zulu: make this less of a piece of shit, runnable\nfrom the command line\nPhase Lima Serpico Mike Mike Six Niner: github it?\n\n@author: blevine\n\"\"\"\n\nimport logging as log\nimport re\nimport sqlite3 as sql\nfrom datetime import datetime\nfrom functools import reduce\nfrom time import sleep\nfrom urllib.parse import urlparse\n\nimport lxml.html as ht\nimport scrapy\nfrom scrapy.contrib.spiders.init import InitSpider\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.utils.project import get_project_settings\n\nDATABASE_LOCATION = '/Users/blevine/saforums/games_crawl.sqlite'\n\n\ndef make_shit_comma_separated_func(x, y):\n return str(x) + ',' + str(y)\n\n\nclass ThreadSpider(scrapy.Spider):\n \"\"\"\n A really basic spider that just gets basic thread info\n \"\"\"\n name = \"threadspider2\"\n\n def __init__(self, username='', password='', ids='', **kwargs):\n super(ThreadSpider, self).__init__(**kwargs)\n idlist = ids.split(',')\n self.start_urls = ['http://forums.somethingawful.com/forumdisplay.php?forumid=' + str(id) for id in idlist]\n\n allowed_domains = {'forums.somethingawful.com'}\n\n def parse(self, response):\n \"\"\"\n Parses the first 8ish pages\n \"\"\"\n print(\"Extracting...\")\n for item in self.response_transform(response):\n self.response_load(item)\n for i in range(1, 8):\n url = 'http://forums.somethingawful.com/' + response.xpath('//a[@title=\"Next page\"]/@href').extract()[0]\n print(str(url))\n sleep(0.2)\n print(\"Iterating in parse: \" + str(url))\n yield scrapy.Request(url, callback=self.parse)\n\n def response_transform(self, response):\n \"\"\"\n Makes a list of items from the response\n \"\"\"\n forum_id = self.extract_forum_id_from_url(response.url)\n print(str(forum_id))\n thread_strings = response.xpath('//tbody/tr[@class=\"thread\"]/@id').extract() # gives 'thread#######'\n thread_authors = response.xpath('//tbody//td[@class=\"author\"]/a/text()').extract()\n titles = response.xpath('//a[@class=\"thread_title\"]/text()').extract()\n views = response.xpath('//td[@class=\"views\"]/text()').extract()\n replies = response.xpath('//td[@class=\"replies\"]/text()').extract()\n # parse everything\n for i in range(0, 40):\n thnum = re.search('(\\d{7})', thread_strings[i]).group(0)\n author = thread_authors[i]\n title = titles[i]\n vw = views[i]\n reply = replies[i]\n if views == '-' or reply == '-': # admin threads, dgaf\n continue\n # print(str([thread_authors,titles,views,replies]))\n item = ThreadItem(int(thnum), title, author, int(vw), int(reply), int(forum_id))\n yield item\n\n @staticmethod\n def response_load(items):\n print(\"Inserting: \" + str(items.fields))\n connection = sql.connect(DATABASE_LOCATION)\n c = connection.cursor()\n c.execute(\n \"INSERT OR IGNORE INTO threads (thread_id, title, author, views, replies, forum_id) VALUES (?, ?, ?, ?, ?, ?)\",\n items.fields)\n connection.commit()\n connection.close()\n return\n\n @staticmethod\n def extract_forum_id_from_url(url):\n q = urlparse(url).query\n fid = re.search('(?<=forumid=)\\\\d{2,}', q).group(0)\n return fid\n\n\nclass ThreadItem(object):\n def __init__(self, thread_id, title, author, views, replies, forum):\n self.fields = (thread_id, title, author, views, replies, forum)\n return\n\n\nclass PostSpider(InitSpider):\n \"\"\"\n Gets all the posts from whatever thread\n \"\"\"\n\n ROOT_BEER = 3630852 # done\n BLOPS_3 = 3716726\n BLOPS = 3387110 # done\n MW_2 = (3311154, 3227086, 3239216) # not done, not done, not done\n WAW = 3007345 # done\n COD4 = (2756848, 3093899) # done, done\n\n GOATS = 3564911 # done\n BLOPS_2 = (3522296, 3482470) # not done, done\n MW3 = (3446068, 3461453) # done, done\n\n TITANFALL_2 = 3782388 # titanfall\n\n # blops 2[0], mw_2[1, all],\n\n name = 'postspider'\n allowed_domains = 'forums.somethingawful.com', 'somethingawful.com'\n # start_urls= ['https://forums.somethingawful.com/showthread.php?threadid=3782388']\n login_page = 'https://forums.somethingawful.com/account.php?action=loginform#form'\n\n def __init__(self, username='', password='', urls='', *args, **kwargs):\n super(PostSpider, self).__init__(*args, **kwargs)\n self.uname = username\n self.password = password\n self.start_urls = urls.split(',') # this is a bullshit hack but the constructor param needs to be a str\n\n @staticmethod\n def urls_from_comma_sep_str(threads):\n urls = ['https://forums.somethingawful.com/showthread.php?threadid=' + t for t in threads.split(',')]\n return reduce(make_shit_comma_separated_func, urls)\n\n @staticmethod\n def urls_from_dict(thread_map):\n thread_ids = thread_map.keys()\n urls = list()\n for curr_id in thread_ids:\n curr_page = thread_map[curr_id]\n url = 'https://forums.somethingawful.com/showthread.php?threadid=' + str(\n curr_id) + '&userid=0&perpage=40&pagenumber=' + str(curr_page)\n urls.append(url)\n return reduce(make_shit_comma_separated_func, urls)\n\n def init_request(self):\n return Request(url=self.login_page, callback=self.login)\n\n def login(self, response):\n \"\"\"\n logs into the forums\n :param response:\n :return:\n \"\"\"\n return FormRequest.from_response(response,\n #\n #\n formdata={'username': self.uname, 'password': <PASSWORD>,\n 'checked': 'checked'}, # \"checked\" is the \"use https\" checkbox, dgaf\n formxpath='//form[@class=\"login_form\"]', callback=self.verify_login)\n\n def verify_login(self, response):\n \"\"\"\n Makes sure the login didn't fuck up\n :param response:\n :return:\n \"\"\"\n if u'<b>Clicking here makes all your wildest dreams come true.</b>' in response.xpath(\n '//div[@class=\"mainbodytextsmall\"]//b').extract():\n log.info('Login successful')\n return self.initialized()\n else:\n log.error('Login failure')\n log.debug(response.xpath('//div[@class=\"mainbodytextsmall\"]//b').extract())\n return\n\n # def parse(self, response):\n # self.post_transform(response)\n\n def parse(self, response):\n \"\"\"\n This is an override of a spider method\n :param response:\n :return:\n \"\"\"\n print(\"Extracting...\")\n items = self.post_transform(response)\n for item in items:\n self.post_load(item)\n url_base = 'http://forums.somethingawful.com/'\n url = response.xpath('//a[@title=\"Next page\"]/@href').extract()\n if len(url) > 0:\n url = url_base + url[0]\n log.debug(str(url))\n else:\n log.debug(str(url))\n raise IndexError(\"No next page for thread!\")\n sleep(0.2)\n # log.debug(\"Iterating in parse: \" + str(url))\n yield scrapy.Request(url, callback=self.parse)\n\n def post_transform(self, response):\n \"\"\"\n xpath's the pluperfect fuck out of the html response to get what we want. Will yield 40 posts per page or throw an IndexError,\n I don't remember if there's a reason I did it this way instead of len(post_text) or something\n :param response:\n :return:\n \"\"\"\n thread_id_raw = response.xpath('//div[@id=\"thread\"]/@class').extract()\n thread_id = [re.search('(\\d{7})', val).group(0) for val in thread_id_raw]\n post_text = self.posts_from_response(response)\n page = response.xpath('//option[@selected=\"selected\"]/@value').extract()[0]\n if page < 0: page = 1\n post_users = response.xpath('//dl[@class=\"userinfo\"]/dt/text()').extract()\n post_user_ids_raw = response.xpath('//ul[@class=\"profilelinks\"]/li/a/@href').extract()\n # returns something like \"user12345\", apparently these two statements can be combined\n post_user_ids = [re.search('(\\d+)', x).group(0) for x in post_user_ids_raw[0::2]]\n post_timestamp_raw = response.xpath('//td[@class=\"postdate\"]/text()').extract()\n post_timestamp = self.clean_dates(post_timestamp_raw)\n post_ids_raw = response.xpath('//div[@id=\"thread\"]//table/@id').extract()\n post_ids = [re.search('(\\d+)', x).group(0) for x in post_ids_raw]\n for i in range(0, len(post_text)): # 40 posts per page, will die on the last page\n post = post_text[i]\n user = post_users[i]\n user_id = post_user_ids[i]\n tstamp = post_timestamp[i]\n post_id = post_ids[i]\n item = PostItem(int(thread_id[0]), post, int(page), user, tstamp, int(user_id), int(post_id))\n yield item\n\n def clean_text(self, raw_posts):\n \"\"\"\n Returns a list of only text posts--ignores stuff like people just posting ^ or emoticons\n :param raw_posts:\n :return:\n \"\"\"\n textonly = [x for x in raw_posts if re.search('(\\w+)', x) is not None]\n return textonly\n\n def posts_from_response(self, response):\n \"\"\"\n Takes the http response and does xpath shit to extract the actual post\n :param response:\n :return:\n \"\"\"\n posts = response.xpath('//td[@class=\"postbody\"]').extract()\n # that gets a list (length=40) of all the posts, now we exclude quotes\n pfilter = list()\n for p in posts:\n dom = ht.fromstring(p) # so we can xpath again\n post = dom.xpath('//*/text()[not(ancestor::*[@class=\"bbc-block\"]) and not(ancestor::*[@class=\"editedby\"])]')\n # that returns a list of lists of strings, so we concatenate the entries in the sublists\n post_clean = reduce(lambda s1, s2: s1 + s2, post)\n pfilter.append(post_clean)\n return pfilter\n\n def clean_dates(self, raw_dates):\n \"\"\"\n Puts the dates in a format that can be parsed by sql stuff\n :param raw_dates: a list of date text obtained by xpathing the html\n :return: python date objects\n \"\"\"\n space_removed = [str(x).strip() for x in raw_dates]\n no_blanks = filter(None, space_removed)\n date_objs = [datetime.strptime(x, '%b %d, %Y %H:%M') for x in no_blanks]\n return date_objs\n\n def post_load(self, post):\n \"\"\"\n Inserts the post into the db\n :param post: a Post object with its fields filled out\n :return: void\n \"\"\"\n log.info(\"Inserting: \" + str(post.fields[6]) + \" Page: \" + str(post.fields[2]))\n connection = sql.connect(DATABASE_LOCATION)\n c = connection.cursor()\n c.execute(\n \"INSERT OR IGNORE INTO posts (thread_id, post_text, thread_page, user_name, post_timestamp, user_id, post_id) VALUES (?, ?, ?, ?, ?, ?, ?)\",\n post.fields)\n connection.commit()\n connection.close()\n return\n\n\nclass PostItem(object): # is there a reason to use scrapy.Item?\n def __init__(self, thread_id, post_text, page, post_user, post_timestamp, user_id, post_id):\n self.fields = (thread_id, post_text, page, post_user, post_timestamp, user_id, post_id)\n return\n\n", "id": "2208583", "language": "Python", "matching_score": 7.983436107635498, "max_stars_count": 0, "path": "python/goonalytics/legacy/thread_scraper.py" }, { "content": "import logging\nimport re\nfrom functools import reduce\nfrom time import sleep\nfrom typing import List, Iterable, Dict\n\nimport lxml.html as ht\nimport requests\nimport scrapy\nfrom lxml.etree import XMLSyntaxError\nfrom scrapy import FormRequest\nfrom scrapy import Request\nfrom scrapy import signals\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.http import Response\nfrom scrapy.spiders.init import InitSpider\nfrom scrapy.xlib.pydispatch import dispatcher\n\nfrom goonalytics.base import PostAvro, ThreadAvro\nfrom goonalytics.io.gcloudio import AvroWriter, AvroThreadWriter, PostBigQueryer, get_thread_ids_for_forum\nfrom goonalytics.scraping.util import remove_emojis, whitespace_regex, extract_forum_id_from_url, clean_dates, re_quote, \\\n re_post_id, get_scrapy_settings, urls_from_dict\n\nlogging.basicConfig()\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\n\n# this is a huge pain in the ass to define on the fly\nexcluded_post_ids = set()\n\nFORUM_URL = 'http://forums.somethingawful.com/forumdisplay.php?forumid='\n\n# two methods below here are for getting url info from a server for doing things async\ndef get_urls(url: str):\n while True:\n yield get_url_from_server(url)\n\ndef get_url_from_server(url: str):\n resp = requests.get(url)\n if resp.text is None:\n raise ValueError\n log.debug(\"Response: {}\".format(resp.text))\n return resp.text\n\n\nclass BQThreadSpider(scrapy.Spider):\n \"\"\"\n A really basic spider that just gets basic thread info\n \"\"\"\n name = \"bq-threadspider\"\n\n def __init__(self, username='', password='', dry_run='', forum_id='', **kwargs):\n super(BQThreadSpider, self).__init__(**kwargs)\n idlist = forum_id.split(',')\n self.dry_run = bool(dry_run)\n self.start_urls = ['http://forums.somethingawful.com/forumdisplay.php?forumid=' + forumid for forumid in idlist]\n excluded = [get_thread_ids_for_forum(int(i)) for i in idlist]\n self.excluded = frozenset(excluded)\n log.debug(\"Start urls: %s\", self.start_urls)\n self.loader = AvroThreadWriter('output.avro.tmp', buffer_size=520)\n dispatcher.connect(self.quit, signals.spider_closed)\n\n def quit(self):\n self.loader.close_and_commit()\n\n allowed_domains = {'forums.somethingawful.com'}\n\n def parse(self, response):\n \"\"\"\n Parses the first 8ish pages\n \"\"\"\n log.info(\"Extracting...\")\n if not self.dry_run:\n for item in self.response_transform(response):\n if item.thread_id not in self.excluded:\n self.loader.submit(item)\n for i in range(1, 8):\n url = 'http://forums.somethingawful.com/' + response.xpath('//a[@title=\"Next page\"]/@href').extract()[0]\n sleep(0.2)\n print(\"Iterating in parse: \" + str(url))\n yield scrapy.Request(url, callback=self.parse)\n\n thread_author_ids_regex = re.compile(r'(?<=userid=)(\\d+)')\n\n thread_num_id_regex = re.compile('(\\d{7})')\n\n @staticmethod\n def response_transform(response: Response):\n \"\"\"\n Makes a list of items from the response\n \"\"\"\n forum_id = extract_forum_id_from_url(response.url)\n thread_strings = response.xpath('//tbody/tr[contains(@class,\"thread\")]/@id').extract() # gives 'thread#######'\n thread_authors = response.xpath('//tbody/tr[@id]/td[@class=\"author\"]/a/text()').extract()\n thread_author_ids = BQThreadSpider.get_thread_author_ids(response)\n titles = response.xpath('//a[@class=\"thread_title\"]/text()').extract()\n\n if not (len(titles) == len(thread_author_ids) and len(thread_author_ids) == len(thread_authors) and len(thread_authors) == len(thread_strings)):\n log.warning(\"WARNING Extracted components do not match on page %s--titles: \\t %d \\n author ids: \\t %d \\n authors: %d \\n threadids: \\t %d\",\n response.url,\n len(titles),\n len(thread_author_ids),\n len(thread_authors),\n len(thread_strings))\n # parse everything\n for i in range(0, len(thread_strings)):\n thnum = re.search('(\\d{7})', thread_strings[i]).group(0)\n author = thread_authors[i]\n title = titles[i]\n aid = thread_author_ids[i]\n # print(str([thread_authors,titles,views,replies]))\n item = ThreadAvro(int(forum_id), int(thnum), title, author, int(aid), False)\n yield item\n\n @staticmethod\n def get_thread_author_ids(response: Response):\n xp = response.xpath('//tr[@id]/td[@class=\"author\"]/a').extract()\n return [BQThreadSpider.thread_author_ids_regex.search(t).group(0) for t in xp]\n\n\nlogin_page = 'https://forums.somethingawful.com/account.php?action=loginform#form'\n\nclass BQPostSpider(InitSpider):\n \"\"\"\n Gets all the posts from whatever thread\n \"\"\"\n\n name = 'bq-postspider'\n allowed_domains = 'forums.somethingawful.com', 'somethingawful.com'\n # start_urls= ['https://forums.somethingawful.com/showthread.php?threadid=347802']\n\n def __init__(self, username='', password='', forum_id='', dry_run='', urls='', **kwargs):\n super(BQPostSpider, self).__init__(**kwargs)\n self.uname = username\n self.password = password\n log.debug(\"User name: %s \\t Password: %s\", username, password)\n self.forum_id = int(forum_id)\n self.dry_run = True if dry_run is 'True' else False\n self.start_urls = urls.split(',') # this is a bullshit hack but the constructor param needs to be a str\n self.loader = AvroWriter(\"output.avro.tmp\", buffer_size=520)\n self.progress = dict()\n\n # lastly add a shutdown hook\n dispatcher.connect(self.quit, signals.spider_closed)\n\n def quit(self):\n self.loader.close_and_commit()\n\n def init_request(self):\n return Request(url=login_page, callback=self.login)\n\n def login(self, response):\n \"\"\"\n logs into the forums\n :param response:\n :return:\n \"\"\"\n return FormRequest.from_response(response,\n formdata={'username': self.uname, 'password': <PASSWORD>,\n 'checked': 'checked'}, # \"checked\" is the \"use https\" checkbox, dgaf\n formxpath='//form[@class=\"login_form\"]', callback=self.verify_login)\n\n def verify_login(self, response: Response) -> None:\n \"\"\"\n Makes sure the login didn't fuck up\n :param response:\n :return:\n \"\"\"\n if u'<b>Clicking here makes all your wildest dreams come true.</b>' in response.xpath(\n '//div[@class=\"mainbodytextsmall\"]//b').extract():\n log.info('Login successful')\n return self.initialized()\n else:\n log.error('Login failure')\n log.debug(response.xpath('//div[@class=\"mainbodytextsmall\"]//b').extract())\n return\n\n def parse(self, response: Response):\n \"\"\"\n This is an override of a spider method\n :param response:\n :return:\n \"\"\"\n print(\"Extracting...\")\n items = self.post_transform_avro(response)\n if not self.dry_run:\n for item in items:\n self.loader.submit(item)\n url_base = 'http://forums.somethingawful.com/'\n url = response.xpath('//a[@title=\"Next page\"]/@href').extract()\n if len(url) > 0:\n url = url_base + url[0]\n log.debug(str(url))\n else:\n log.debug(str(url))\n raise IndexError(\"No next page for thread!\")\n sleep(0.2)\n # log.debug(\"Iterating in parse: \" + str(url))\n yield scrapy.Request(url, callback=self.parse)\n\n def post_transform_avro(self, response: Response) -> Iterable[PostAvro]:\n \"\"\"\n xpath's the pluperfect fuck out of the html response to get what we want\n :param response:\n :return:\n \"\"\"\n # there's probably a way to do this with a single giant xpath but fuck it\n thread_id_raw = response.xpath('//div[@id=\"thread\"]/@class').extract()\n thread_id = [re.search('(\\d{7})', val).group(0) for val in thread_id_raw]\n post_text = self.posts_from_response(response)\n page = response.xpath('//option[@selected=\"selected\"]/@value').extract()[0]\n page = int(page)\n if page < 0: page = 1\n post_users = response.xpath('//dl[@class=\"userinfo\"]/dt/text()').extract()\n post_user_ids_raw = response.xpath('//ul[@class=\"profilelinks\"]/li/a/@href').extract()\n # returns something like \"user12345\", apparently these two statements can be combined but i don't care\n post_user_ids = [re.search('(\\d+)', x).group(0) for x in post_user_ids_raw[0::2]]\n post_timestamp_raw = response.xpath('//td[@class=\"postdate\"]/text()').extract()\n post_timestamp = clean_dates(post_timestamp_raw)\n post_ids_raw = response.xpath('//div[@id=\"thread\"]//table/@id').extract()\n post_ids = [re.search('(\\d+)', x).group(0) for x in post_ids_raw]\n quotes = self.extract_quotemap(response)\n for i in range(0, len(post_text)):\n post = post_text[i]\n user = post_users[i]\n user_id = post_user_ids[i]\n tstamp = post_timestamp[i]\n post_id = int(post_ids[i])\n post_quotes = quotes[post_id]\n item = PostAvro(post_id,\n int(user_id),\n user,\n post,\n int(page),\n tstamp,\n int(thread_id[0]),\n self.forum_id,\n post_quotes)\n yield item\n\n def extract_quotemap(self, response: Response) -> Dict[int, List[int]]:\n # this gives us a list of post ids and quotes, length = # posts + # quotes\n xp = response.xpath('//div[@id=\"thread\"]//table/@id | //div[@class=\"bbc-block\"]').extract()\n \"\"\"\n filter them into a map: quotes occur after the post ID that they're part of. The list looks like:\n\n post12345,\n post23456,\n <div class=[bbc-block]> + a bunch of shit + \"showthread.php?goto=post&amp;postid=467368110#post467368110\",\n post12346\n\n where the quote is of post 4673... belonging to post id 23456\n note that posts can have more than one quote\n \"\"\"\n return self.create_quotemap(xp)\n\n def create_quotemap(self, extract: List[str]) -> Dict[int, List[int]]:\n \"\"\"\n This method is kinda messy but everything needs to be int because downstream the post_id and quoted ids need\n to be consistent\n :param extract:\n :return:\n \"\"\"\n output = dict()\n prevkey = extract[0]\n for value in extract:\n pid = re_post_id(value)\n quote_pid = re_quote(value)\n if pid:\n pid = pid.group(2)\n output[int(pid)] = list()\n prevkey = int(pid)\n elif quote_pid:\n qpid = quote_pid.group(2)\n output[prevkey].append(int(qpid))\n return output\n\n @staticmethod\n def posts_from_response(response: Response) -> List[str]:\n \"\"\"\n Takes the http response and does xpath shit to extract the actual post\n :param response:\n :return:\n \"\"\"\n posts = response.xpath('//td[@class=\"postbody\"]').extract()\n # that gets a list (length=40) of all the posts, now we exclude quotes\n pfilter = list()\n for p in posts:\n try:\n dom = ht.fromstring(p) # this way we can xpath on the individual posts to exclude quotes\n except XMLSyntaxError:\n dom = ht.fromstring(p.encode('latin-1', 'ignore'))\n post = dom.xpath('//*/text()[not(ancestor::*[@class=\"bbc-block\"]) and not(ancestor::*[@class=\"editedby\"])]')\n # that returns a list of lists of strings, so we concatenate the entries in the sublists\n post_clean = reduce(lambda s1, s2: s1 + s2, post)\n pfilter.append(post_clean)\n # lastly we replace excess whitespace and newlines with a single space because we don't give a shit\n return [whitespace_regex.sub(' ', p) for p in pfilter]\n\n @staticmethod\n def _debug_posts(response: Response) -> List[str]:\n posts = response.xpath('//td[@class=\"postbody\"]').extract()\n # that gets a list (length=40) of all the posts, now we exclude quotes\n pfilter = list()\n for i, p in enumerate(posts):\n try:\n dom = ht.fromstring(\n remove_emojis(p)) # this way we can xpath on the individual posts to exclude quotes\n post = dom.xpath(\n '//*/text()[not(ancestor::*[@class=\"bbc-block\"]) and not(ancestor::*[@class=\"editedby\"])]')\n # that returns a list of lists of strings, so we concatenate the entries in the sublists\n post_clean = reduce(lambda s1, s2: s1 + s2, post)\n pfilter.append(post_clean)\n except XMLSyntaxError:\n log.debug(\"Error encountered on post index %d: \\n %s\", i, p)\n return posts\n # lastly we replace excess whitespace and newlines with a single space because we don't give a shit\n return [whitespace_regex.sub(' ', p) for p in pfilter]\n\n\nclass ArchiveSpider(BQThreadSpider, InitSpider): # multiple inheritance, this should go well\n\n # HIGHLY experimental\n name = 'archive-spider'\n\n allowed_domains = 'forums.somethingawful.com', 'somethingawful.com'\n def __init__(self, username='', password='', forum_id='', year='', **kwargs):\n super().__init__(username=username, password=password, forum_id=forum_id, **kwargs)\n self.uname = username\n self.password = password\n self.forum_id = forum_id\n self.year = year\n self.start_urls = ['http://forums.somethingawful.com/forumdisplay.php?forumid=' + forum_id]\n\n def init_request(self):\n return Request(url=login_page, callback=self.login)\n\n def login(self, response):\n \"\"\"\n logs into the forums\n :param response:\n :return:\n \"\"\"\n return FormRequest.from_response(response,\n formdata={'username': self.uname, 'password': <PASSWORD>,\n 'checked': 'checked'}, # \"checked\" is the \"use https\" checkbox, dgaf\n formxpath='//form[@class=\"login_form\"]', callback=self.after_login)\n\n def verify_login(self, response: Response) -> bool:\n \"\"\"\n Makes sure the login didn't fuck up\n :param response:\n :return:\n \"\"\"\n if u'<b>Clicking here makes all your wildest dreams come true.</b>' in response.xpath(\n '//div[@class=\"mainbodytextsmall\"]//b').extract():\n log.info('Login successful')\n return True\n else:\n log.error('Login failure')\n log.debug(response.xpath('//div[@class=\"mainbodytextsmall\"]//b').extract())\n return False\n\n\n def after_login(self, response):\n if self.verify_login(response):\n return scrapy.Request('http://forums.somethingawful.com/forumdisplay.php?forumid=' + self.forum_id, callback=self.select_archive_year)\n\n\n def select_archive_year(self, response):\n return FormRequest.from_response(response,\n formdata={'ac_year': self.year},\n formxpath='//form[@id=\"ac_timemachine\"]',\n callback=self.initialized)\n\n\ndef update_bigquery_threads(spidername, **kwargs):\n run(spidername, **kwargs)\n\n\ndef run(spidername: str, **kwargs):\n settings = get_scrapy_settings()\n cp = CrawlerProcess(settings)\n cp.crawl(spidername, **kwargs)\n cp.start()\n\n\ndef update_bigquery_posts(spidername, forum_id: int, **kwargs):\n \"\"\"\n Restarts the postspider based on what's in gcloud--this method is way incomplete as is gcloud implementation\n :return:\n \"\"\"\n bq = PostBigQueryer()\n threadmap = bq.find_last_updated(forum_id)\n threadlist = bq.get_threadlist(forum_id)\n for t in threadlist:\n if t not in threadmap.keys() or int(threadmap[t]) < 1:\n threadmap[t] = 1\n urls = urls_from_dict(threadmap)\n # !!! don't care about excluded IDs--it's like ten times as much I/O as just copying the table without duplicates afterward\n # exclude = set()\n # for thread_id, page in threadmap.items():\n # exclude.add(get_post_ids_for_thread(thread_id, page))\n # global excluded_post_ids\n # excluded_post_ids = exclude\n log.debug(\"Found %d entries for forumid %d: %s\", len(urls), forum_id, str(threadmap))\n run(spidername, username=kwargs['username'], password=kwargs['password'], forum_id=forum_id, urls=urls)\n\n\n# def update(forum_ids: List[int], **kwargs):\n# \"\"\"\n# this is broken because twisted sucks\n# :param forum_ids:\n# :param kwargs:\n# :return:\n# \"\"\"\n# username = kwargs['username']\n# password = kwargs['password']\n# intlist = [int(x) for x in forum_ids]\n# cp = None\n# for forum_id in intlist:\n# settings = get_scrapy_settings()\n# cp = CrawlerProcess(settings)\n# cp.crawl('bq-threadspider', forum_id=str(forum_id), username=username, password=password,\n# stop_after_crawl=False)\n# cp.start()\n# cp.join()\n#\n# bq = PostBigQueryer()\n# threadmap = bq.find_last_updated(forum_id)\n# threadlist = bq.get_threadlist(forum_id)\n# for t in threadlist:\n# if t not in threadmap.keys() or int(threadmap[t]) < 1:\n# threadmap[t] = 1\n# urls = urls_from_dict(threadmap)\n# log.debug(\"Found %d entries for forumid %d: %s\", len(urls), forum_id, str(threadmap))\n# cp.crawl('bq-postspider', forum_id=str(forum_id), username=username, password=password, urls=urls,\n# stop_after_crawl=False)\n# if cp is not None:\n# cp.stop()\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-p', '--posts', help='Update posts only', action='store_true')\n group.add_argument('-t', '--threads', help='Update threads only', action='store_true')\n parser.add_argument('-a', '--archive', type=str, help='Scrape archive year provided')\n parser.add_argument('user', type=str, help='Forums username')\n parser.add_argument('password', type=str, help='Forums password')\n parser.add_argument('forumid', type=str, help='Forum id(s) to scrape')\n args = parser.parse_args()\n log.debug(\"ID: %s\", args.forumid)\n # raise ConnectionAbortedError('stopping for debug')\n keyword_args = dict()\n keyword_args['username'] = args.user\n keyword_args['password'] = <PASSWORD>\n keyword_args['forum_id'] = args.forumid\n archived = False\n if args.archive:\n archived = True\n keyword_args['year'] = args.archive\n # could prob clean this up but fuck it. Posts require some extra steps for de-duping hence the diff commands\n if args.posts:\n if archived:\n log.warning(\"Archive flag has no effect for post scrape\")\n keyword_args['spidername'] = 'bq-postspider'\n update_bigquery_posts(**keyword_args)\n elif args.threads:\n keyword_args['spidername'] = 'archive-spider' if archived else 'bq-threadspider'\n update_bigquery_threads(**keyword_args)\n else:\n log.error(\"Must select either --posts or --threads to scrape\")\n", "id": "572288", "language": "Python", "matching_score": 4.224019527435303, "max_stars_count": 0, "path": "python/goonalytics/scraping/bq_scrapers.py" }, { "content": "from urllib.parse import urlparse\n\nimport lxml.html as ht\n\nfrom functools import reduce\nfrom typing import List\n\nfrom bs4 import UnicodeDammit\nfrom urllib3.util import Url\nfrom scrapy.settings import Settings\nfrom datetime import datetime\nfrom time import strptime\nimport time\n\nimport re\nimport os\n\ndef make_shit_comma_separated_func(x, y):\n \"\"\"\n func for use in reduce() methods to make a list a comma separated string\n :param x:\n :param y:\n :return:\n \"\"\"\n return str(x) + ',' + str(y)\n\ndef single_url(thread_id, thread_page=1):\n return 'https://forums.somethingawful.com/showthread.php?threadid={}&perpage=40&pagenumber={}'.format(thread_id,thread_page)\n\ndef urls_from_comma_sep_str(threads):\n urls = ['https://forums.somethingawful.com/showthread.php?threadid=' + t for t in threads.split(',')]\n return reduce(make_shit_comma_separated_func, urls)\n\ndef urls_from_list(threads: List):\n return ['https://forums.somethingawful.com/showthread.php?threadid=' + str(t) for t in threads]\n\ndef urls_from_dict(thread_map):\n thread_ids = thread_map.keys()\n urls = list()\n for curr_id in thread_ids:\n curr_page = thread_map[curr_id]\n url = 'https://forums.somethingawful.com/showthread.php?threadid=' + str(\n curr_id) + '&userid=0&perpage=40&pagenumber=' + str(curr_page)\n urls.append(url)\n return reduce(make_shit_comma_separated_func, urls)\n\ndef rehtml(content):\n \"\"\"\n does unicode bullshit\n :param content:\n :return:\n \"\"\"\n doc = UnicodeDammit(content, is_html=True)\n parser = ht.HTMLParser(encoding=doc.original_encoding)\n root = ht.fromstring(content, parser=parser)\n return root\n\ndef extract_forum_id_from_url(url: Url):\n q = urlparse(url).query\n fid = re.search('(?<=forumid=)\\\\d{2,}', q).group(0)\n return fid\n\n\n# it's fucking beyond retarded that emojis were added to unicode\nemoji_pattern = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n \"]+\", flags=re.UNICODE)\n\n\ndef remove_emojis(content):\n return emoji_pattern.sub(r'', content)\n\n\ndef get_scrapy_settings() -> Settings:\n \"\"\"\n i don't remember what this does but shit doesn't work without it\n :return:\n \"\"\"\n settings = Settings()\n os.environ['SCRAPY_SETTINGS_MODULE'] = 'goonalytics.scraping.scrapy_settings'\n settings_module_path = os.environ['SCRAPY_SETTINGS_MODULE']\n settings.setmodule(settings_module_path, priority='project')\n return settings\n\npid_regex = re.compile(r'(^post)(\\d+$)')\nquote_id_regex = re.compile(r'(.*showthread\\.php\\?goto=post&amp;postid\\=)(\\d+)')\nwhitespace_regex = re.compile(r'(\\s{2,}|\\n|\\t)')\n\ndef re_post_id(value: str):\n return pid_regex.match(value)\n\ndef re_quote(value: str):\n return quote_id_regex.match(value)\n\ndef clean_text(raw_posts: List[str]) -> List[str]:\n \"\"\"\n Returns a list of only text posts--ignores stuff like people just posting ^ or emoticons\n :param raw_posts:\n :return:\n \"\"\"\n textonly = [x for x in raw_posts if re.search('(\\w+)', x) is not None]\n return textonly\n\ndef clean_dates(raw_dates: List[str]) -> List[datetime]:\n \"\"\"\n Puts the dates in a format that can be parsed by sql stuff\n :param raw_dates: a list of date text obtained by xpathing the html\n :return: python date objects\n \"\"\"\n space_removed = [str(x).strip() for x in raw_dates]\n no_blanks = filter(None, space_removed)\n date_objs = [strptime(x, '%b %d, %Y %H:%M') for x in no_blanks]\n return date_objs\n\ncurrent_time_ms = lambda: int(round(time.time() * 1000))\n", "id": "3552120", "language": "Python", "matching_score": 0.7429109811782837, "max_stars_count": 0, "path": "python/goonalytics/scraping/util.py" }, { "content": "BOT_NAME = 'stubot-goonalytics'\nSPIDER_MODULES = ['goonalytics.scraping.bq_scrapers']\nNEWSPIDER_MODULE = 'goonalytics.scraping.bq_scrapers'\nCONCURRENT_REQUESTS = 5\nCONCURRENT_REQUESTS_PER_DOMAIN = 5\nDNS_TIMEOUT = 600 # give it ten minutes because comcast is shit\nDOWNLOAD_DELAY = 2\nRANDOMIZE_DOWNLOAD_DELAY = True\nROBOTSTXT_OBEY = False # can't login otherwise\n", "id": "1041458", "language": "Python", "matching_score": 0.2768247723579407, "max_stars_count": 0, "path": "python/goonalytics/scraping/scrapy_settings.py" } ]
2.191885
bezova
[ { "content": "import os, sys; sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))\nimport logging\n\nimport codecs\n\nimport pytest\n\nfrom pathlib import Path\n\nfrom lasio import read\n\negfn = lambda fn: os.path.join(os.path.dirname(__file__), \"examples\", fn)\n\ndef test_encoding_attr():\n las = read(egfn(\"encodings_utf8.las\"), autodetect_encoding='cchardet')\n assert las.encoding == 'UTF-8'\n\ndef test_utf8_cchardet(): las = read(egfn(\"encodings_utf8.las\"), autodetect_encoding='cchardet')\ndef test_utf8wbom_cchardet(): las = read(egfn(\"encodings_utf8wbom.las\"), autodetect_encoding='cchardet')\ndef test_utf16lebom_cchardet(): las = read(egfn(\"encodings_utf16lebom.las\"), autodetect_encoding='cchardet')\ndef test_utf16le_specified_ok(): las = read(egfn(\"encodings_utf16le.las\"), encoding='UTF-16-LE')\n\[email protected](reason=\"this is not behaving properly see PR #326\")\ndef test_utf16le_cchardet_fails(): \n with pytest.raises(Exception):\n las = read(egfn(\"encodings_utf16le.las\"), autodetect_encoding='cchardet')\n \ndef test_utf16bebom_cchardet(): las = read(egfn(\"encodings_utf16bebom.las\"), autodetect_encoding='cchardet')\ndef test_iso88591_cchardet(): las = read(egfn(\"encodings_iso88591.las\"), autodetect_encoding='cchardet')\ndef test_cp1252_cchardet(): las = read(egfn(\"encodings_cp1252.las\"), autodetect_encoding='cchardet')\n\n\"\"\"\nVerify encodings for pathlib.Path objects\n\"\"\"\ndef test_pathlib_utf8_cchardet(): las = read(Path(egfn(\"encodings_utf8.las\")), autodetect_encoding='cchardet')\ndef test_pathlib_utf8wbom_cchardet(): las = read(Path(egfn(\"encodings_utf8wbom.las\")), autodetect_encoding='cchardet')\ndef test_pathlib_utf16lebom_cchardet(): las = read(Path(egfn(\"encodings_utf16lebom.las\")), autodetect_encoding='cchardet')\ndef test_pathlib_utf16le_specified_ok(): las = read(Path(egfn(\"encodings_utf16le.las\")), encoding='UTF-16-LE')\n\[email protected](reason=\"this is not behaving properly see PR #326\")\ndef test_pathlib_utf16le_cchardet_fails(): \n with pytest.raises(Exception):\n las = read(Path(egfn(\"encodings_utf16le.las\")), autodetect_encoding='cchardet')\n\ndef test_pathlib_utf16bebom_cchardet(): las = read(Path(egfn(\"encodings_utf16bebom.las\")), autodetect_encoding='cchardet')\ndef test_pathlib_iso88591_cchardet(): las = read(Path(egfn(\"encodings_iso88591.las\")), autodetect_encoding='cchardet')\ndef test_pathlib_cp1252_cchardet(): las = read(Path(egfn(\"encodings_cp1252.las\")), autodetect_encoding='cchardet')\n\n", "id": "5945796", "language": "Python", "matching_score": 1.8363275527954102, "max_stars_count": 1, "path": "tests/test_encoding.py" }, { "content": "# coding=utf-8\n\nimport os, sys\n\nsys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))\n\nfrom pprint import pprint\n\nfrom lasio.reader import read_header_line\n\n\ndef test_time_str_and_colon_in_desc():\n line = \"TIML.hh:mm 23:15 23-JAN-2001: Time Logger: At Bottom\"\n result = read_header_line(line, section_name=\"Parameter\")\n # print('\\n')\n # pprint(result)\n assert result[\"value\"] == \"23:15 23-JAN-2001\"\n assert result[\"descr\"] == \"Time Logger: At Bottom\"\n\n\ndef test_cyrillic_depth_unit():\n line = u\" DEPT.метер : 1 DEPTH\"\n result = read_header_line(line, section_name=\"Curves\")\n assert result[\"unit\"] == u\"метер\"\n", "id": "9175592", "language": "Python", "matching_score": 0.015237768180668354, "max_stars_count": 1, "path": "tests/test_read_header_line.py" }, { "content": "import pandas as pd\nimport numpy as np\nfrom scipy.interpolate import griddata\nfrom scipy import ndimage\nfrom typing import List, Tuple, Dict, Optional \nfrom sklearn.neighbors import NearestNeighbors\nfrom .data_helper import low_high_quantile\nfrom matplotlib import pyplot as plt\nfrom matplotlib import patches, patheffects\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom collections import OrderedDict\nimport statsmodels.api as sm \n\nfrom numpy import ma\nfrom matplotlib import cbook\nfrom matplotlib.colors import Normalize\n\nfrom matplotlib.colors import LinearSegmentedColormap\n\n#colormap from SHAP packakge\nred_blue = LinearSegmentedColormap('red_blue', { # #1E88E5 -> #ff0052\n 'red': ((0.0, 30./255, 30./255),\n (1.0, 255./255, 255./255)),\n\n 'green': ((0.0, 136./255, 136./255),\n (1.0, 13./255, 13./255)),\n\n 'blue': ((0.0, 229./255, 229./255),\n (1.0, 87./255, 87./255)),\n\n 'alpha': ((0.0, 1, 1),\n (0.5, 0.3, 0.3),\n (1.0, 1, 1))\n})\n\nblue_green = LinearSegmentedColormap('blue_green', { # #1E88E5 -> #ff0052\n 'green': ((0.0, 30./255, 30./255),\n (1.0, 255./255, 255./255)),\n\n 'red': ((0.0, 50./255, 50./255),\n (1.0, 10./255, 10./255)),\n\n 'blue': ((0.0, 229./255, 229./255),\n (1.0, 87./255, 87./255)),\n\n 'alpha': ((0.0, 1, 1),\n (0.5, 0.3, 0.3),\n (1.0, 1, 1))\n})\n\nblue_green_solid = LinearSegmentedColormap('blue_green_solid', { # #1E88E5 -> #ff0052\n 'green': ((0.0, 30./255, 30./255),\n (1.0, 255./255, 255./255)),\n\n 'red': ((0.0, 50./255, 50./255),\n (1.0, 10./255, 10./255)),\n\n 'blue': ((0.0, 229./255, 229./255),\n (1.0, 87./255, 87./255)),\n\n 'alpha': ((0.0, 1, 1),\n (0.5, 1, 1),\n (1.0, 1, 1))\n})\n\n# setting midpoint for colorbar\n# https://stackoverflow.com/questions/7404116/defining-the-midpoint-of-a-colormap-in-matplotlib\nclass MidPointNorm(Normalize): \n def __init__(self, midpoint=0, vmin=None, vmax=None, clip=False):\n Normalize.__init__(self,vmin, vmax, clip)\n self.midpoint = midpoint\n\n def __call__(self, value, clip=None):\n if clip is None:\n clip = self.clip\n\n result, is_scalar = self.process_value(value)\n\n self.autoscale_None(result)\n vmin, vmax, midpoint = self.vmin, self.vmax, self.midpoint\n\n if not (vmin < midpoint < vmax):\n raise ValueError(\"midpoint must be between maxvalue and minvalue.\") \n elif vmin == vmax:\n result.fill(0) # Or should it be all masked? Or 0.5?\n elif vmin > vmax:\n raise ValueError(\"maxvalue must be bigger than minvalue\")\n else:\n vmin = float(vmin)\n vmax = float(vmax)\n if clip:\n mask = ma.getmask(result)\n result = ma.array(np.clip(result.filled(vmax), vmin, vmax),\n mask=mask)\n\n # ma division is very slow; we can take a shortcut\n resdat = result.data\n\n #First scale to -1 to 1 range, than to from 0 to 1.\n resdat -= midpoint \n resdat[resdat>0] /= abs(vmax - midpoint) \n resdat[resdat<0] /= abs(vmin - midpoint)\n\n resdat /= 2.\n resdat += 0.5\n result = ma.array(resdat, mask=result.mask, copy=False) \n\n if is_scalar:\n result = result[0] \n return result\n\n def inverse(self, value):\n if not self.scaled():\n raise ValueError(\"Not invertible until scaled\")\n vmin, vmax, midpoint = self.vmin, self.vmax, self.midpoint\n\n if cbook.iterable(value):\n val = ma.asarray(value)\n val = 2 * (val-0.5) \n val[val>0] *= abs(vmax - midpoint)\n val[val<0] *= abs(vmin - midpoint)\n val += midpoint\n return val\n else:\n val = 2 * (val - 0.5)\n if val < 0: \n return val*abs(vmin-midpoint) + midpoint\n else:\n return val*abs(vmax-midpoint) + midpoint\n\ndef plot_shap_dependence(shapVals_df, df, feature='ProppantIntensity_LBSPerFT', \n feature_disp=None, cmap=plt.cm.coolwarm, s=10, title=None, color_bar=True, color_title=None):\n feature_disp = feature if feature_disp is None else feature_disp\n title = feature_disp if title is None else title\n color_title = 'Feature Impact' if color_title is None else color_title\n \n x = df[feature].values\n y = shapVals_df[feature].values\n cvals =y\n clow = np.nanpercentile(cvals, 5)\n chigh = np.nanpercentile(cvals, 95)\n norm = MidPointNorm(midpoint=0) if color_bar else MidPointNorm(midpoint=0, vmin=clow, vmax=chigh) # setting vmin/vmax will clip cbar\n# scalarm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)\n# scalarm._A = []\n\n cvals_nans = np.isnan(cvals)\n cval_notNan = np.invert(cvals_nans)\n \n fig, ax = plt.subplots(figsize=(8,5))\n ax.scatter(x[cvals_nans], y[cvals_nans], s=s, color=\"#777777\", alpha=1, rasterized=len(x) > 500)\n mapable = ax.scatter(x[cval_notNan], y[cval_notNan], s=s, c=cvals[cval_notNan], cmap=cmap, alpha=1,\n norm=norm, rasterized=len(x) > 500)\n if color_bar: \n cb = colorbar(mapable, size=0.15)\n cb.set_clim(clow, chigh) # setting vmin/vmaqx here will set even color beyond these numbers\n# cb = colorbar(scalarm, size=0.15)\n cb.set_label(color_title, size=13)\n cb.outline.set_visible(False)\n cb.set_alpha(1)\n ax.set_xlabel(feature_disp, fontsize=14)\n ax.set_ylabel('Feature Impact', fontsize=14)\n ax.set_title(title, fontsize=14)\n return ax\n\ndef nan_to_mean(arr:np.ndarray, axis:int=0)->np.ndarray:\n '''fills nan with mean over axis .\n uses masked array to apply mean to complete nan columns np.nanmean() can not do that\n other option would be to set some kind of spline extrapolation '''\n data_m = np.ma.masked_invalid(arr, copy=True)\n return np.where(np.isnan(arr), data_m.mean(axis=axis), arr)\n\nType_mapout = Tuple[np.ndarray, np.ndarray, Dict[str, np.ndarray], Dict[str, np.ndarray]]\n\ndef map_grid(df:pd.DataFrame, nxny:Tuple[int]=(500,500),\n lat_lon_names:List[str]=['Latitude_Mid','Longitude_Mid'])->Type_mapout:\n '''generates linear interpolated maps\n return: xi, yi, {col:interpolated}'''\n zis = {}\n cols = df.drop(columns=lat_lon_names).columns\n lat, lon = lat_lon_names\n y, x = df[lat], df[lon]\n nx, ny = nxny\n minx, maxx = x.min(), x.max()\n miny, maxy = y.min(), y.max()\n xi = np.linspace(minx, maxx, nx)\n yi = np.linspace(miny, maxy, ny)\n for col in cols:\n zi = griddata((x, y), df[col], (xi[None,:], yi[:,None]), method='linear')\n zis[col] = zi\n return xi, yi, zis\n\ndef blured_map(zis, sigma:float=5.)->Type_mapout:\n '''generates linear interpolated and blured maps\n return: xi, yi, {col:interpolated}, {col:blured}'''\n zibs = {}\n for col, zi in zis.items():\n zi_blurred = nan_to_mean(zi, axis=0) #need so blure not cut nan edges\n zi_blurred = ndimage.gaussian_filter(zi_blurred, sigma=sigma)\n zi_blurred[np.isnan(zi)] = np.nan \n zibs[col] = zi_blurred\n return zibs\n\ndef plot_contour_map(xi:np.ndarray, yi:np.ndarray, zi:np.ndarray, mask:Optional=None, n_conturs:int=15, \n ax:Optional=None, fig:Optional=None, figsize=(10,10), \n vminmax:Optional=None, addColorbar=True, colorbarLabel=None, args={}, argsf={}):\n if ax is None: fig, ax = plt.subplots(figsize=figsize)\n if mask is not None: zi = np.ma.masked_where(~mask, zi)\n vmin, vmax = low_high_quantile(pd.Series(zi.flatten()),1/100) if vminmax is None else vminmax\n cs = ax.contourf(xi ,yi, zi, n_conturs, vmin=vmin, vmax=vmax, antialiased=True, **argsf)\n ax.contour(xi, yi, zi, n_conturs, linewidths=0.5, colors='k', antialiased=True, **args) #add vm\n ax.set_aspect(1)\n cbar =colorbar(cs, label=colorbarLabel) if addColorbar else None\n return fig, ax, cbar\n\ndef mask_by_dist(df, col, xi, yi, radius=0.3, lon_lat_names:List[str]=['Longitude_Mid', 'Latitude_Mid']):\n nx, ny = len(xi), len(yi)\n xm, ym = np.meshgrid(xi, yi)\n Xtrn = df[lon_lat_names]\n Xtest = pd.DataFrame({'x':xm.flatten(), 'y':ym.flatten()})\n\n nbrs = NearestNeighbors(n_neighbors=1, algorithm='auto').fit(Xtrn, df[col])\n rad, index = nbrs.radius_neighbors(Xtest, radius=radius, return_distance=True)\n mask = np.array([(True if len(x)>0 else False) for x in rad]).reshape((ny,nx))\n return mask\n\ndef fence_draw(gf, ax, latlon=['lat', 'lon'], **args):\n ''' takes fennce coord \n E.G. geo_fence={'lon':(-98, -97.73), 'lat': (28.83, 29.19)}\n adds patch to axes\n '''\n lat, lon = latlon\n dlon = gf[lon][1]-gf[lon][0]\n dlat = gf[lat][1]-gf[lat][0]\n rect = patches.Rectangle((gf[lon][0],gf[lat][0]),dlon,dlat,linewidth=1,edgecolor='r',facecolor='none', **args)\n ax.add_patch(rect)\n\ndef colorbar(mappable, ax=None, location='right', size=\"5%\", pad=0.05, **args):\n if ax is None:\n try: ax = mappable.axes\n except: ax = mappable.ax # for contour plots \n fig = ax.figure\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(location, size=size, pad=pad)\n return fig.colorbar(mappable, cax=cax, **args)\n\ndef draw_outline(o, lw):\n '''from fastai'''\n o.set_path_effects([patheffects.Stroke(\n linewidth=lw, foreground='black'), patheffects.Normal()])\n \ndef draw_text(ax, xy, txt, sz=14, outsz=2):\n '''from fastai'''\n #ax.annotate(txt, (df[lon].iloc[i], df[lat].iloc[i]))\n text = ax.text(*xy, txt, verticalalignment='top', color='white',\n fontsize=sz)#, weight='bold')\n draw_outline(text, outsz)\n\ndef draw_rect(ax, b):\n '''from fastai'''\n patch = ax.add_patch(patches.Rectangle(b[:2], *b[-2:], \n fill=False, edgecolor='white', lw=2))\n draw_outline(patch, 4)\n\ndef plot_pdp_std(wells_ice, smooth=True, zero_start=False, frac=0.15, ax=None, xlabel=None, \n ylabel='annual boe/1000ft', title='Completion Impact', quantile=True, addStd=True,\n addLegend=True, argF={'alpha':0.2}, argPDP={}, figsize=(12,7)):\n '''plot median line with 25, 75% quintiles [default] or mean with +-std'''\n if ax is None: fig, ax = plt.subplots(figsize=figsize)\n if smooth: lowess = sm.nonparametric.lowess\n for api, ice in wells_ice.items():\n if zero_start: ice = ice.sub(ice.iloc[:,0], axis=0)\n describe = ice.describe() # gives mean std and quintile values \n ice_pdp = describe.loc['50%'] if quantile else describe.loc['mean']\n ice_upper = describe.loc['75%'] if quantile else describe.loc['mean'] + describe.loc['std']\n ice_lower = describe.loc['25%'] if quantile else describe.loc['mean'] - describe.loc['std']\n upper = ice_upper.values\n lower = ice_lower.values\n pdp = ice_pdp.values\n if smooth: \n pdp = lowess(ice_pdp.values, np.array(ice.columns), frac=frac, return_sorted=False)\n if addStd:\n upper = lowess(ice_upper.values, np.array(ice.columns), frac=frac, return_sorted=False)\n lower = lowess(ice_lower.values, np.array(ice.columns), frac=frac, return_sorted=False)\n if addStd: ax.fill_between(ice.columns, upper, lower, **argF)#, color='r')\n ax.plot(list(ice.columns), pdp, label=api, **argPDP)\n if addLegend: ax.legend(loc='upper left')\n ax.set(xlabel=xlabel, ylabel=ylabel)\n ax.set_title(title, fontsize=14)\n return ax\n\ndef plot_ice_by_category(iceLines, completions, category, cat_dict=None, point=None, point_label='',\n xyLabels=('',''), title='Completion Impact', cmapName='tab10', figsize=(10,6), ax=None):\n if ax is None: fig, ax = plt.subplots(figsize=figsize)\n argsP = {'s':80, 'lw':1, 'edgecolors':'k', 'zorder':3}\n cmap=plt.get_cmap(cmapName) # other maps: 'Set1'\n args = {'lw':0.3, 'alpha':0.4, 'zorder':1}\n\n unique_cats=completions.loc[iceLines.index, category].unique()\n color_num = dict(zip(unique_cats, range(len(unique_cats)))) \n \n x = iceLines.columns\n for index, row in iceLines.iterrows():\n factor_ind=completions.loc[index, category]\n label = factor_ind if cat_dict is None else cat_dict[category][factor_ind]\n plt.plot(x, row.values, c=cmap(color_num[factor_ind]), label=label, **args)\n\n if point is not None: ax.scatter(point[0], point[1], label=point_label, **argsP)\n ax.set(xlabel=xyLabels[0], ylabel=xyLabels[1])\n ax.set_title(title, fontsize=14)\n\n #drop repeated legends\n handles, labels = ax.get_legend_handles_labels()\n by_label = OrderedDict(zip(labels, handles))\n\n leg = plt.legend(by_label.values(), by_label.keys())\n # transparency\n for legobj in leg.legendHandles:legobj.set_alpha(1) # OR legobj._legmarker.set_alpha(0) \n \n #linewidth in legend; [-1] to skip line width for point legend \n handles = leg.legendHandles if point is None else leg.legendHandles[:-1]\n for legobj in handles: legobj.set_linewidth(5.0)\n \n return ax\n\ndef plot_ice_by_continues(iceLines, completions, category, nLines=1000, point=None, \n point_label='', xyLabels=('',''), title='Completion Impact', random_state=42,\n vminmax=None, figsize=(10,6), ax=None, cmapName='gist_stern',\n argsP = {'s':80, 'lw':1, 'edgecolors':'k', 'zorder':3},\n argsL = {'lw':0.2, 'alpha':0.3, 'zorder':1}, smooth=False, frac=0.15):\n if ax is None: fig, ax = plt.subplots(figsize=figsize)\n if smooth: lowess = sm.nonparametric.lowess\n cmap=plt.get_cmap(cmapName) #'gist_stern', 'terrain', 'brg' \n nLines = min(nLines, iceLines.shape[0])\n iceSample = iceLines.sample(nLines, random_state=random_state)\n # normalize colors\n vmin, vmax = low_high_quantile(completions[category],1./100.) if vminmax is None else vminmax\n norm=plt.Normalize(vmin=vmin,vmax=vmax)\n scalarm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)\n\n x = iceSample.columns\n for index, row in iceSample.iterrows():\n factor_ind=completions.loc[index, category]\n values = lowess(np.array(row.values), x, frac=frac, return_sorted=False) if smooth \\\n else row.values\n plt.plot(x, values, c=cmap(norm(factor_ind)), **argsL)\n\n ax.set(xlabel=xyLabels[0], ylabel=xyLabels[1])\n ax.set_title(title, fontsize=14)\n \n if point is not None: ax.scatter(point[0], point[1], label=point_label,\\\n c=cmap(norm(point[2])), **argsP)\n ax.set(xlabel=xyLabels[0], ylabel=xyLabels[1])\n ax.set_title(title, fontsize=14)\n\n #drop repeated legends\n handles, labels = ax.get_legend_handles_labels()\n by_label = OrderedDict(zip(labels, handles))\n\n leg = plt.legend(by_label.values(), by_label.keys())\n # transparency\n for legobj in leg.legendHandles:legobj.set_alpha(1) # OR legobj._legmarker.set_alpha(0) \n \n #linewidth in legend; [-1] to skip line width for point legend \n handles = leg.legendHandles if point is None else leg.legendHandles[:-1]\n for legobj in handles: legobj.set_linewidth(5.0)\n\n # make up the array of the scalar mappable. Urgh...\n scalarm._A = []\n # cb=plt.colorbar(scalarm); cb.set_label(category)\n cbar =colorbar(scalarm, ax, label=category)\n return ax, scalarm", "id": "3580438", "language": "Python", "matching_score": 3.4414258003234863, "max_stars_count": 1, "path": "plotting.py" }, { "content": "import pandas as pd\nfrom numpy import nan as NaN\nimport numpy as np\nfrom sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier\nfrom typing import List, Union\nimport copy\n# disable pandas chain assignment warning\npd.options.mode.chained_assignment = None # default='warn'\n# discussion here\n# https://stackoverflow.com/questions/20625582/how-to-deal-with-settingwithcopywarning-in-pandas\n\ndef low_high_quantile(ser:pd.Series, low:float=None, high:float=None)->List[float]:\n ''' returns values for quantiles\n symmetric if High is omitted'''\n low = low if low else 0.\n high = high if high else 1.\n # if high is None: high = 1.-low\n return ser.quantile(low), ser.quantile(high)\n\ndef nan_quantile_ref(df:pd.DataFrame, df_ref:pd.DataFrame, col:str, low:float, high=None):\n ''' In PLACE operation\n modyfies df with quantiles set to nan (low, high) for col\n quantiles calculated from refereced df_ref '''\n bottom, top = low_high_quantile(df_ref[col], low, high)\n df.loc[(df[col]>top)|(df[col]<bottom), col] = NaN\n\ndef nan_quantile(df:pd.DataFrame, col:str, low:float=None, high:float=None):\n ''' In PLACE operation\n modyfies df with quantiles set to nan (low, high) for col quantiles '''\n nan_quantile_ref(df, df, col=col, low=low, high=high) \n # bottom, top = low_high_quantile(df[col], low, high)\n # df.loc[(df[col]>top)|(df[col]<bottom), col] = NaN\n\ndef knn_col_by_XY(df, col, cond_to_predict=None, LatLon=['Latitude', 'Longitude']):\n ''' In PLACE operation\n use closesst geographic neighbors to fill in missibng values base\n col - column to fill\n if condition=None, fill in all NaN values in the column\n 'example'\n col = 'Elevation';\n cond_to_predict = (df[col].isna() | (df[col]<100) | (df[col]>1000))\n knn_col(df, col, cond_to_predict) \n '''\n if cond_to_predict is None: cond_to_predict=df[col].isna()\n if len(df[cond_to_predict])==0: return None\n XY = df.loc[~cond_to_predict, LatLon+[col]]\n if len(XY)==0: return None\n if df[col].dtype=='object': knn = KNeighborsClassifier(2, weights='distance')\n else: knn = KNeighborsRegressor(2, weights='distance')\n _=knn.fit(XY[LatLon], XY[col])\n df.loc[cond_to_predict, col] = knn.predict(df.loc[cond_to_predict,LatLon])\n\ndef unknown_to_nan(df:pd.DataFrame, list_to_nan=['UNKNOWN'], cols=None):\n ''' in Place Operation\n for categorical collumns (among 'cols' if given) rennames certain values ('UNKNOWN')) -> Nan'''\n cols = df.columns if cols is None else cols\n for col in df[cols].columns[df[cols].dtypes == 'object']: \n for name in list_to_nan:\n df.loc[df[col]==name, col] = NaN\n\ndef nan_to_uknown(df:pd.DataFrame, unknownName='UNKNOWN', cols=None):\n ''' in Place Operation\n for categorical collumns (among 'cols' if given) rennames NaN to unknownName'''\n cols = df.columns if cols is None else cols\n for col in df[cols].columns[df[cols].dtypes == 'object']: \n df.loc[df[col].isna(), col] = unknownName\n\ndef unknowns_to_sameName(df:pd.DataFrame, unknownName='UNKNOWN', list_to_nan=['UNKNOWN'], cols=None, keepNAN=False):\n ''' IN PLace Opeation\n rename all unknowns (list_to_nan) and NaN categorical values (among 'cols' if given) to same unknownName'''\n unknown_to_nan(df, list_to_nan, cols)\n if not keepNAN: nan_to_uknown(df, unknownName, cols)\n\ndef geo_con(df, gf, gflatlon=['lat', 'lon'], datalatlon=['Latitude_Mid', 'Longitude_Mid']):\n '''condition on df by geographycal fence\n gf={'lon':(-98, -97.73), 'lat': (28.83, 29.19)} '''\n gflat, gflon = gflatlon\n datlat, datlon = datalatlon\n cond = (df[datlat]>gf[gflat][0])&(df[datlat]<gf[gflat][1])&(df[datlon]>gf[gflon][0])&(df[datlon]<gf[gflon][1])\n return cond\n\ndef select_by_distance(ref, df, R_mile, square=True, latlon=['Latitude_Mid', 'Longitude_Mid']): \n ''' select wells from df within ceartain square a=2R or radius R around of reference well \n \n # At 38 degrees North latitude, \n # one degree of latitude equals approximately 364,000 ft (69 miles), \n # one minute equals 6068 ft (1.15 miles), one-second equals 101 ft;\n # one-degree of longitude equals 288,200 ft (54.6 miles),\n # one minute equals 4800 ft (0.91 mile), and one second equals 80 ft.\n '''\n # def ft_to_rad(ft):\n # # convert distance in ft to radians\n # kms_per_radian = 6371.0088 # mean earth radius > https://en.wikipedia.org/wiki/Earth_radius\n # ft_in_meters = 0.3048\n # meter_in_km = 1000.\n # return ft*ft_in_meters/meter_in_km/kms_per_radian\n # def mile_to_deg(mile):\n # # convert distance in mile to radian on earth Lat long \n # FT_PER_MILE = 5280.\n # return np.degrees(ft_to_rad(mile*FT_PER_MILE))\n milesInLonDeg = 54.6 # at 38 deg North latitude\n milesInLatDeg = 69.\n lat, lon = latlon\n latR, lonR = ref[latlon].values\n # theta_deg = mile_to_deg(R_mile)\n # if square: \n # condition = ((df[lat]-latR).abs()<=theta_deg) & ((df[lon]-lonR).abs()<=theta_deg)\n # else: \n # condition =((df[lat]-latR)**2 +(df[lon]-lonR)**2) <= (theta_deg**2)\n # return df[condition].copy()\n if square: \n condition = ((df[lat]-latR).abs()*milesInLatDeg<=R_mile) & ((df[lon]-lonR).abs()*milesInLonDeg<=R_mile)\n else: \n condition =(((df[lat]-latR)*milesInLatDeg)**2 +((df[lon]-lonR)*milesInLonDeg)**2) < (R_mile**2)\n return df[condition]\ndef raname_dict(dictionary, category, orig, new):\n '''rename category value in dictionary'''\n catDict = copy.deepcopy(dictionary)\n tt = catDict[category].values\n tt[tt==orig]=new\n return catDict\n\ndef cut_minmax(arr, minV, maxV): return arr[(arr<=maxV)&(arr>=minV)]\n", "id": "11178888", "language": "Python", "matching_score": 0.9680084586143494, "max_stars_count": 1, "path": "data_helper.py" }, { "content": "from sklearn_pandas import DataFrameMapper\nfrom sklearn.preprocessing import LabelEncoder, Imputer, StandardScaler, MinMaxScaler\nimport warnings\nfrom sklearn.exceptions import DataConversionWarning\nfrom pandas.api.types import is_string_dtype, is_numeric_dtype\n\nimport pandas as pd\nimport numpy as np\n\ndef scale_vars(df, mapper=None, columns=None, inplace=True):\n '''from fastai.structured.py\n scales inplace all numeric cols or columns, returns mapper'''\n warnings.filterwarnings('ignore', category=DataConversionWarning)\n cols = df.columns if columns is None else columns\n\n if mapper is None:\n map_f = [([n], StandardScaler()) for n in cols if is_numeric_dtype(df[n])]\n mapper = DataFrameMapper(map_f, input_df=True, df_out=True).fit(df)\n if inplace: \n df[mapper.transformed_names_] = mapper.transform(df)\n return mapper\n\n\ndef encode_cat(df, mapper=None, columns=None, inplace=True):\n '''maps categorical vars to numbers, returns mapper\n to apply to test data: _ = scale_vars(test, scale_mapper)\n # direct transform: mapper.transform(df)\n # inverse transform: encode_dict = {n[0]: e for n, e in mapper.features}\n encode_dict['RSProppantType'].inverse_transform([0,1,2])\n encode_dict['RSProppantType'].classes_ gives ordered classes list same as in inversetransform'''\n\n warnings.filterwarnings('ignore', category=DataConversionWarning)\n cols = df.columns if columns is None else columns\n if mapper is None:\n #map_f = [([n], LabelEncoder()) for n in cols if not is_numeric_dtype(df[n])]\n map_f = [(n, LabelEncoder()) for n in cols if not is_numeric_dtype(df[n])]\n mapper = DataFrameMapper(map_f, input_df=True, df_out=True).fit(df)\n if inplace: \n df[mapper.transformed_names_] = mapper.transform(df)\n return mapper\n\n\ndef train_cat_var_types(df, cat_vars, cont_vars):\n '''assign 'float32' and 'category' types to columns, \n returns df, dict{col_name: [cat list]}'''\n for v in cont_vars: df[v] = df[v].astype('float32') \n for v in cat_vars: df[v] = df[v].astype('category').cat.as_ordered() \n cat_dict = {n: df[n].cat.categories for n in cat_vars}\n # df[n].cat.codes gives codes\n return df, cat_dict\n\ndef test_apply_cats(df, cat_dict, cont_vars):\n '''set categorical and continues vars using given dict'''\n cat_vars = list(cat_dict.keys())\n df = df[cat_vars+cont_vars]\n for v in cont_vars: df[v] = df[v].astype('float32')\n # transform cat_vars columns to categorcal\n # appply same ordered categories to df as in traning data (will make same .cat.codes even if some cat in test missing)\n for n in cat_vars: df[n] = pd.Categorical(df[n], categories=cat_dict[n], ordered=True)\n return df\n\ndef check_test_unknown_cats(tt, cat_dict):\n '''checks if test has cat not present in train, returns list of unknown cats'''\n new_cats=[]\n for n in cat_dict.keys():\n new_cat=set(tt[n].unique())-set(cat_dict[n])\n if new_cat: new_cats.append((n,list(new_cat)))\n return new_cats\n\ndef change_val(val, dic):\n '''will change val by dictionary dic if val in keys or return same val'''\n if val in dic.keys():\n return dic[val]\n else: return val\n\ndef subs_new_cat(tt, new_cat_subs):\n '''map categories in columns by new_cat_subs=[(col,{cat:new_cat,..}),..]'''\n for cat, dic in new_cat_subs:\n tt[cat] = tt[cat].map(lambda v: change_val(v, dic))\n\ndef split_by_val_idx(idxs, *a):\n \"\"\"\n copy from fastai\n Split each array passed as *a, to a pair of arrays like this (elements selected by idxs, the remaining elements)\n This can be used to split multiple arrays containing training data to validation and training set.\n\n :param idxs [int]: list of indexes selected\n :param a list: list of np.array, each array should have same amount of elements in the first dimension\n :return: list of tuples, each containing a split of corresponding array from *a.\n First element of each tuple is an array composed from elements selected by idxs,\n second element is an array of remaining elements.\n \"\"\"\n mask = np.zeros(len(a[0]),dtype=bool)\n mask[np.array(idxs)] = True\n return [(o[mask],o[~mask]) for o in a]\n\ndef get_cv_idxs(n, cv_idx=0, val_pct=0.2, seed=42):\n \"\"\" Get a list of index values for Validation set from a dataset\n \n Arguments:\n n : int, Total number of elements in the data set.\n cv_idx : int, starting index [idx_start = cv_idx*int(val_pct*n)] \n val_pct : (int, float), validation set percentage \n seed : seed value for RandomState\n \n Returns:\n list of indexes \n \"\"\"\n np.random.seed(seed)\n n_val = int(val_pct*n)\n idx_start = cv_idx*n_val\n idxs = np.random.permutation(n)\n return idxs[idx_start:idx_start+n_val]\n\ndef prepare_trn(df, cat_vars, cont_vars, sample_size=None, \n scale=True, scalecols=None,\n onehot=False, onehotecols=None, \n labelencode=True, encodecols=None,\n minmax_labelencoded=True):\n '''\n assigns categorical and numerical columns by cat_vars, cont_vars\n scales if scale all numerical columns given [scalecols]\n onehote encodses if onehot=True all [cat_vars] or [onehotecols]&[numerial]\n LabelEncodes if labelecodecat=True all still numerial cols. or [encodecols]&[numerical]\n if minmax_labelencoded=True apply MinMax scaler to LabelEncoded Columns\n \n '''\n scale_mapper = None\n cat_mapper = None\n\n if sample_size is not None: df.sample(sample_size)\n else: df = df.copy() \n \n #take [cat_vars+cont_vars] and convert cat_vars -> categorical cont_vars->'float32'\n #cat dict # original sorted categories list for cat_vars\n df, cat_dict=train_cat_var_types(df, cat_vars, cont_vars)\n\n # scale numerical or numerical from [scalecols] \n if scale: scale_mapper = scale_vars(df, columns=scalecols)\n # to apply to test data: _ = scale_vars(test, mapper=scale_mapper)\n ## direct transform: mapper.transform(df)\n\n # OneHot encode (dummies) of all categorical or given cols\n if onehot: \n onehotecols = cat_vars if onehotecols is None else onehotecols\n df=pd.get_dummies(df, columns=onehotecols) \n\n # encode categoricals from [encodecols] colunmns (all categorical if encodecols=None) \n # encode only cols with more then min_cat categories. other will be dummy encoded\n #if min_cat: encodecols = [n for n, cats in cat_dict.items() if len(cats)>2] \n if labelencode: cat_mapper = encode_cat(df, columns=encodecols)\n if minmax_labelencoded:\n minmaxcols = cat_mapper.transformed_names_\n\n # to apply to test data: _ = encode_cat(test, mapper=cat_mapper)\n ## direct transform: mapper.transform(df)\n ## inverse transform: encode_dict = {n[0]: e for n, e in mapper.features}\n # encode_dict['RSProppantType'].inverse_transform([0,1,2])\n # encode_dict['RSProppantType'].classes_ gives ordered classes list same as in inversetransform\n\n return df, cat_dict, scale_mapper, onehotecols, cat_mapper\n\ndef prepare_test(df, cat_dict, cont_vars, scale_mapper, onehotecols, cat_mapper, new_cat_subs=None):\n new_cats = check_test_unknown_cats(df, cat_dict)\n if new_cats: \n print('there are categories in test that were not in traini set')\n print(new_cats)\n print('consider passing subs new_cat_subs=[(column_name, {cat_in_test:cat_in_train}), ...]')\n return df\n\n #convert columns to cat and cont as in train\n # sedfing up same categories as in cat_dict taken from train ensures that dummies will generate all necessary columns\n df = test_apply_cats(df, cat_dict, cont_vars)\n\n #scale as in train\n if scale_mapper is not None: _ = scale_vars(df, mapper=scale_mapper)\n # to apply to test data: _ = scale_vars(test, mapper=scale_mapper)\n ## direct transform: mapper.transform(df)\n \n #encode cat columns as in train\n if cat_mapper is not None: _ = encode_cat(df, mapper=cat_mapper)\n\n # one hot encode as in train\n # because categories were set cat_dict taken from train ensures that dummies will generate all necessary columns\n if onehotecols is not None: df=pd.get_dummies(df, columns=onehotecols)\n return df", "id": "9711751", "language": "Python", "matching_score": 6.265348434448242, "max_stars_count": 1, "path": "fastml-arc/data.py" }, { "content": "from sklearn_pandas import DataFrameMapper, gen_features\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler, MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nimport warnings\nfrom sklearn.exceptions import DataConversionWarning\nfrom pandas.api.types import is_string_dtype, is_numeric_dtype\nfrom sklearn.base import BaseEstimator, TransformerMixin\nimport pandas as pd\nimport numpy as np\nfrom typing import List\npd.options.mode.chained_assignment = None # default='warn'\nfrom .data_helper import cut_minmax\n\n# MinMaxScaler takes only 2D arrays. to make it 1D\nclass Make2D(BaseEstimator, TransformerMixin):\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n return X.reshape(-1,1)\n \n# x=np.array([0.77778, 0.11111, 0.66667])\n# cc=MinMaxScaler1D().fit(x)\n# print(cc.transform(x), cc.inverse_transform(cc.transform(x)))\n\ndef rename_rare(df, cols=None, thr=0.01, dropna=True, rareName='RARE', verbatim=False):\n '''IN PLACE modification to df\n rename rare values in categorical cols to \"RARE\"'''\n if cols is None: cols = df.columns[df.dtypes == \"object\"]\n if verbatim: print('renamed for next columns: ', end=\"\", flush=True)\n for col in df[cols].columns:\n if df[col].isna().sum()>0: \n print(f'ABORT: col:{col} has NaN')\n return\n counts = df[col].value_counts(dropna=dropna)\n d = counts/counts.sum()\n if verbatim and len(d[d<thr])>0: print(f\"{col}, \", end=\"\", flush=True)\n df[col] = df[col].apply(lambda x: rareName if d.loc[x] <= thr else x)\n print('', end='\\n')\n\ndef scale_vars(df, mapper=None, columns=None, inplace=True):\n '''from fastai.structured.py\n scales inplace all numeric cols or columns, returns mapper'''\n warnings.filterwarnings('ignore', category=DataConversionWarning)\n cols = df.columns if columns is None else columns\n\n if mapper is None:\n map_f = [([n], StandardScaler()) for n in cols if is_numeric_dtype(df[n])]\n mapper = DataFrameMapper(map_f, input_df=True, df_out=True).fit(df)\n if inplace: \n df[mapper.transformed_names_] = mapper.transform(df)\n return mapper\n \ndef encode_cat(df, mapper=None, columns=None, minmax_encoded=False, inplace=True):\n '''maps categorical vars to numbers, returns mapper\n to apply to test data: _ = scale_vars(test, scale_mapper)\n direct transform: mapper.transform(df)\n inverse transform: encode_dict = {n[0]: e for n, e in mapper.features}\n encode_dict['RSProppantType'].inverse_transform([0,1,2])\n encode_dict['RSProppantType'].classes_ gives ordered classes list same as in inversetransform\n or if MinMax applyed\n codes=encode_dict['RSSubPlay'][1].inverse_transform([1,0.1,0.2]).round().flatten().astype(int)\n encode_dict['RSSubPlay'][0].inverse_transform(codes)'''\n\n warnings.filterwarnings('ignore', category=DataConversionWarning)\n cols = df.columns if columns is None else columns\n cols = [n for n in cols if not is_numeric_dtype(df[n])]\n if len(cols)==0: return mapper\n\n if mapper is None:\n if minmax_encoded:\n map_f = gen_features(cols, [LabelEncoder, Make2D, MinMaxScaler])\n else:\n map_f = gen_features(cols, [LabelEncoder])\n mapper = DataFrameMapper(map_f, input_df=True, df_out=True).fit(df)\n if inplace: \n df[mapper.transformed_names_] = mapper.transform(df)\n return mapper\n\ndef train_cat_var_types(df, cat_vars, cont_vars):\n '''assign 'float32' and 'category' types to columns, \n returns df, dict{col_name: [cat list]}'''\n for v in cont_vars: df[v] = df[v].astype('float32', copy=False) \n for v in cat_vars: df[v] = df[v].astype('category', copy=False).cat.as_ordered() \n cat_dict = {n: df[n].cat.categories for n in cat_vars}\n # df[n].cat.codes gives codes\n return df, cat_dict\n\ndef test_apply_cats(df, cat_dict, cont_vars):\n #TODO: rtename to not confuce with pytest\n '''set categorical and continues vars using given dict'''\n cat_vars = list(cat_dict.keys())\n df = df[cat_vars+cont_vars]\n for v in cont_vars: df[v] = df[v].astype('float32')\n # transform cat_vars columns to categorcal\n # appply same ordered categories to df as in traning data (will make same .cat.codes even if some cat in test missing)\n for n in cat_vars: df[n] = pd.Categorical(df[n], categories=cat_dict[n], ordered=True)\n return df\n\ndef check_test_unknown_cats(tt, cat_dict):\n '''checks if test has cat not present in train, returns list of unknown cats'''\n new_cats=[]\n for n in cat_dict.keys():\n new_cat=set(tt[n].unique())-set(cat_dict[n])\n if new_cat: new_cats.append((n,list(new_cat)))\n return new_cats\n\ndef change_val(val, dic):\n '''will change val by dictionary dic if val in keys or return same val'''\n if val in dic.keys():\n return dic[val]\n else: return val\n\ndef subs_new_cat(tt, new_cat_subs):\n '''map categories in columns by new_cat_subs=[(col,{cat:new_cat,..}),..]'''\n for cat, dic in new_cat_subs:\n tt[cat] = tt[cat].map(lambda v: change_val(v, dic))\n\ndef split_by_val_idx(idxs, *a):\n \"\"\"\n copy from fastai\n Split each array passed as *a, to a pair of arrays like this (elements selected by idxs, the remaining elements)\n This can be used to split multiple arrays containing training data to validation and training set.\n\n :param idxs [int]: list of indexes selected\n :param a list: list of np.array, each array should have same amount of elements in the first dimension\n :return: list of tuples, each containing a split of corresponding array from *a.\n First element of each tuple is an array composed from elements selected by idxs,\n second element is an array of remaining elements.\n \"\"\"\n mask = np.zeros(len(a[0]),dtype=bool)\n mask[np.array(idxs)] = True\n return [(o[mask],o[~mask]) for o in a]\n\ndef val_train_idxs(n, val_pct=0.2, seed=42):\n#def get_cv_idxs(n, cv_idx=0, val_pct=0.2, seed=42):\n \"\"\" Get a list of index values for Validation and Traning set from a dataset\n Arguments:\n n : int, Total number of elements in the data set.\n cv_idx : int, starting index [idx_start = cv_idx*int(val_pct*n)] \n val_pct : (int, float), validation set percentage \n seed : seed value for RandomState \n Returns:\n list of indexes val_inx, trn_inx \n \"\"\"\n np.random.seed(seed)\n n_val = int(val_pct*n)\n #idx_start = cv_idx*n_val\n idxs = np.random.permutation(n)\n # np.random.permutation has two differences from np.random.shuffle:\n # if passed an array, it will return a shuffled copy of the array; np.random.shuffle shuffles the array inplace\n # if passed an integer, it will return a shuffled range i.e. np.random.shuffle(np.arange(n))\n #return idxs[idx_start:idx_start+n_val], idxs[idx_start+n_val,:]\n val = idxs[:n_val]\n trn = idxs[n_val:]\n return val, trn\n\ndef prepare_trn(df, cat_vars, cont_vars, sample_size=None, \n scale=True, scalecols=None,\n onehot=False, onehotecols=None, \n labelencode=True, encodecols=None,\n minmax_encoded=False, **argkvds):\n '''\n assigns categorical and numerical columns by cat_vars, cont_vars\n scales if scale all numerical columns given [scalecols]\n onehote encodses if onehot=True all [cat_vars] or [onehotecols]&[numerial]\n LabelEncodes if labelecodecat=True all still numerial cols. or [encodecols]&[numerical]\n if minmax_labelencoded=True apply MinMax scaler to LabelEncoded Columns '''\n scale_mapper = None\n cat_mapper = None\n\n if sample_size is not None: df = df.sample(sample_size).copy()\n else: df = df.copy() \n \n #take [cat_vars+cont_vars] and convert cat_vars -> categorical cont_vars->'float32'\n #cat dict # original sorted categories list for cat_vars\n df, cat_dict=train_cat_var_types(df, cat_vars, cont_vars)\n\n # scale numerical or numerical from [scalecols] \n if scale: scale_mapper = scale_vars(df, columns=scalecols)\n # to apply to test data: _ = scale_vars(test, mapper=scale_mapper)\n ## direct transform: mapper.transform(df)\n\n # OneHot encode (dummies) of all categorical or given cols\n if onehot: \n onehotecols = cat_vars if onehotecols is None else onehotecols\n df=pd.get_dummies(df, columns=onehotecols) \n\n # encode categoricals from [encodecols] colunmns (all categorical if encodecols=None) \n # if minmax_encoded applay MinMaxScaler to encoded columns\n if labelencode: cat_mapper = encode_cat(df, columns=encodecols, minmax_encoded=minmax_encoded)\n\n return df, cat_dict, scale_mapper, onehotecols, cat_mapper\n\ndef prepare_test(df, cat_dict, cont_vars, scale_mapper, onehotecols, cat_mapper, new_cat_subs=None):\n new_cats = check_test_unknown_cats(df, cat_dict)\n if new_cats: \n print('there are categories in test that were not in traini set')\n print(new_cats)\n print('consider passing subs new_cat_subs=[(column_name, {cat_in_test:cat_in_train}), ...]')\n return df\n\n #convert columns to cat and cont as in train\n # sedfing up same categories as in cat_dict taken from train ensures that dummies will generate all necessary columns\n df = test_apply_cats(df, cat_dict, cont_vars)\n\n #scale as in train\n if scale_mapper is not None: _ = scale_vars(df, mapper=scale_mapper)\n # to apply to test data: _ = scale_vars(test, mapper=scale_mapper)\n ## direct transform: mapper.transform(df)\n \n #encode cat columns as in train\n if cat_mapper is not None: _ = encode_cat(df, mapper=cat_mapper)\n\n # one hot encode as in train\n # because categories were set cat_dict taken from train ensures that dummies will generate all necessary columns\n if onehotecols is not None: df=pd.get_dummies(df, columns=onehotecols)\n return df\n\ndef rename_categories_F(target:str, combine:List[str], cat_idx=None):\n '''returns function which will rename categories\n catIdx is dictionary [cat] -> index'''\n combineLst = combine if cat_idx is None else [cat_idx[cat] for cat in combine]\n targetVal = target if cat_idx is None else cat_idx[target]\n return (lambda x: targetVal if x in combineLst else x)\n\ndef cat_to_idx(category, cat_dict):\n '''inverse category encoding dict, returns dict(category->index)'''\n return {name: idx for idx, name in enumerate(cat_dict[category])}\n\ndef set_cat_columns(df, cols):\n '''change IN PLACE:set columns to catgory'''\n for col in cols: df[col] = df[col].astype('category', copy=False)\n \ndef rename_cat_df(df, category, target_combine, cat_dict=None):\n '''rename categories in df[catgory] from [combine] to target\n target_combine = (target:str, combine:List)'''\n target, combine = target_combine\n cat_idx = None if cat_dict is None else cat_to_idx(category, cat_dict)\n renameF = rename_categories_F(target, combine, cat_idx)\n df = df.copy()\n df[category] = df[category].apply(renameF)\n return df\n\ndef equal_size_cat_idx(df, column, categories, n, concat=True, cat_dict=None, random_state=None, verbose=False)->List:\n '''index list where given categories represented equal n times (or smaller if not enougth) data points'''\n cat_idx = None if cat_dict is None else cat_to_idx(column, cat_dict)\n catList = categories if cat_idx is None else [cat_idx[cat] for cat in categories]\n combined_ind = []\n for cat in catList:\n condition = (df[column]==cat)\n nmax = condition.sum() # number of category values\n if verbose: print(f'{cat_dict[column][cat]}: {nmax}->{min(n, nmax)}')\n combined_ind.append(df[condition].sample(min(n, nmax), random_state=random_state).index)\n if concat: return np.concatenate(combined_ind)\n return dict(zip(categories, combined_ind))\n\ndef split_ice_by_categories(ice, fixing_wells_compl, feature_name, category, sub_categories, cat_dict, n_sub=500, use_limits=True):\n ''' use_limits will limit each category ice by the feature name valueas in this category '''\n apisDict = equal_size_cat_idx(fixing_wells_compl, category, sub_categories, n_sub, concat=False, cat_dict=cat_dict, random_state=54, verbose=True)\n icelines = {}\n for cat, apiList in apisDict.items():\n if use_limits:\n feat_vals = fixing_wells_compl.loc[apiList, feature_name]\n ice_columns= cut_minmax(ice.columns, feat_vals.min(), feat_vals.max())\n icelines[cat] = ice.loc[apiList, ice_columns]\n else: icelines[cat] = ice.loc[apiList]\n return icelines\n\ndef train_test_split_may0test(df, test_size=0, shuffle=True, random_state=None):\n '''same train test split but also supports 0 test->just shuffles'''\n if test_size==0:\n if shuffle: return df.sample(frac=1, random_state=random_state), pd.DataFrame()\n else: return df, pd.DataFrame()\n else: return train_test_split(df, test_size=test_size, shuffle=shuffle, random_state=random_state)", "id": "6011363", "language": "Python", "matching_score": 3.937002420425415, "max_stars_count": 1, "path": "data.py" }, { "content": "# utils for models analysis\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import mean_squared_error, r2_score, explained_variance_score\nfrom matplotlib import pyplot as plt\nimport warnings\nfrom sklearn.exceptions import DataConversionWarning\n#import xgboost as xgb\npd.options.mode.chained_assignment = None # default='warn'\n\nfrom collections.abc import Iterable\nfrom functools import partial, update_wrapper\n\ndef partial_w(func, *args, **kwargs):\n '''partial functoins with correct dict, name, doc'''\n part = partial(func, *args, **kwargs)\n update_wrapper(part, func)\n return part\n\n# def listify(p=None):\n# \"Make `p` listy\"\n# if p is None: p=[]\n# elif isinstance(p, str): p = [p]\n# elif not isinstance(p, Iterable): p = [p]\n# else:\n# try: a = len(p)\n# except: p = [p]\n# return list(p)\n\n# def listify(p:OptListOrItem=None, q:OptListOrItem=None):\ndef listify(p=None, q=None):\n \"Make `p` listy and the same length as `q`.\"\n if p is None: p=[]\n elif isinstance(p, str): p = [p]\n elif not isinstance(p, Iterable): p = [p]\n #Rank 0 tensors in PyTorch are Iterable but don't have a length.\n else:\n try: a = len(p)\n except: p = [p]\n n = q if type(q)==int else len(p) if q is None else len(q)\n if len(p)==1: p = p * n\n assert len(p)==n, f'List len mismatch ({len(p)} vs {n})'\n return list(p)\n\ndef drop_inf(df):\n '''removes np.inf values'''\n return df.replace([np.inf, -np.inf], np.nan).dropna()\n\ndef exp_EV(y_pred, y_true):\n '''explained variance with exponent on values\n y->exp(y): score=1−Var[y^−y]/Var[y] '''\n return explained_variance_score(np.exp(y_true), np.exp(y_pred))\n\ndef exp_R2(y_pred, y_true):\n '''R^2 (coefficient of determination)\n y->exp(y): score=1−<(y^−y)^2>/<(y-<y>)^2] '''\n return r2_score(np.exp(y_true), np.exp(y_pred))\n \ndef rmspe(y_pred, targ):\n '''root mean square of percent error'''\n pct_var = (targ - y_pred)/targ\n return np.sqrt((pct_var**2).mean())\n\ndef exp_rmspe(y_pred, targ):\n '''root mean square of percent error of exp()'''\n return rmspe(np.exp(y_pred), np.exp(targ))\n\ndef mape(y_pred, targ):\n '''mean absolute percent error'''\n pct_var = (targ - y_pred)/targ\n return np.abs(pct_var).mean()\n\ndef exp_mape(y_pred, targ):\n '''mean absolute percent error of exp()'''\n return mape(np.exp(y_pred), np.exp(targ))\n\ndef pe(pred, targ):\n '''percent error'''\n pct_var = (pred-targ)/targ\n #pct_var = drop_inf(pct_var)\n return pct_var\n\ndef exp_pe(pred, targ):\n '''percent error of exp()'''\n return pe(np.exp(pred), np.exp(targ))\n\ndef ape(pred, targ):\n '''absolute percent error'''\n return np.abs(pe(pred, targ))\n\ndef exp_ape(pred, targ):\n '''absolute percent error'''\n return ape(np.exp(pred), np.exp(targ))\n\ndef metric_r2(rf, xt, yt):\n '''returns r2_score(yt, yp)'''\n # if xgboost:\n # xt = xgb.DMatrix(xt, label=yt, feature_names=xt.columns.tolist())\n yp = rf.predict(xt)\n return r2_score(yt, yp)\n\ndef shap_vals_df(shapExplainer, df):\n shapVals = shapExplainer.shap_values(df)\n return pd.DataFrame(shapVals, columns=df.columns, index=df.index)\n\ndef shap_values(X:pd.DataFrame, shapExplainer, sort=True, columns=None):\n '''return shap values of given columns sorted\n shapExplainer = shap.TreeExplainer(model)\n x = [api x features]'''\n columns = X.columns if columns is None else columns\n # shapVals = shapExplainer.shap_values(X)\n # shap_df = pd.DataFrame(shapVals, index=X.index, columns=X.columns).T\n shap_df = shap_vals_df(shapExplainer, X).T\n shap_df['mean_abs'] = shap_df.abs().mean(axis=1)\n shap_df_col = shap_df.loc[columns]\n if sort: shap_df_col.sort_values('mean_abs', inplace=True, ascending=False)\n shap_df_col.drop(columns='mean_abs', inplace=True)\n return shap_df_col\n \ndef permutation_importances(rf, X_train, y_train, metric, columns=None):\n #baseline = metric(rf, X_train, y_train, xgboost=xgboost)\n columns = X_train.columns if columns is None else columns\n baseline = metric(rf, X_train, y_train)\n imp = []\n for col in columns:\n save = X_train[col].copy()\n X_train[col] = np.random.permutation(X_train[col])\n m = metric(rf, X_train, y_train)\n X_train[col] = save\n imp.append(baseline - m)\n return np.array(imp)\n\ndef plot_permutation_importances(tree, X_train, y_train, metric, feature_importance=None, vert_plot=True, columns=None, ax=None):\n cols = X_train.columns.tolist() if columns is None else columns\n # Plot feature importance\n #feature_importance = permutation_importances(tree, X_train, y_train, metric, xgboost=xgboost)\n if feature_importance is None:\n feature_importance = permutation_importances(tree, X_train, y_train, metric)\n importance_df =pd.DataFrame({'Splits': feature_importance,'Feature':cols})\n\n\n if not vert_plot:\n if ax is None: fig, ax = plt.subplots(figsize=(8,15))\n importance_df.sort_values(by='Splits', inplace=True, ascending=True)\n importance_df.plot.barh(x='Feature', legend=None, ax=ax)\n else:\n if ax is None: fig, ax = plt.subplots(figsize=(12,3))\n importance_df.sort_values(by='Splits', inplace=True, ascending=False)\n importance_df.plot.bar(x='Feature', legend=None, ax=ax),\n ax.set_title('Permutation Importance')\n return ax\n\ndef plot_tree_importance(cols, tree, vert_plot=True):\n fi = pd.DataFrame({'imp':tree.feature_importances_}, index=cols)\n fi.imp = 100*fi.imp/fi.imp.sum()\n if not vert_plot:\n fi.sort_values(by='imp', inplace=True)\n fi.plot.barh(figsize=(5,12))\n plt.xlabel('Tree: Variable Importance')\n else: \n fi.sort_values(by='imp', inplace=True, ascending=False)\n fi.plot.bar(figsize=(14,4))\n plt.ylabel('Relative Importance')\n plt.title('Tree: Variable Importance')\n\ndef plot_pred_vs_targ(x, y, figsize=(5,5), ax=None, pp=0.3, ax_names=None):\n xy_min = min(x.max(), y.max())\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n ax.set_aspect('equal')\n ax.scatter(x, y, s=8, c='k', alpha=0.5)\n ax.plot([0, xy_min],[0, xy_min], 'r')\n ax.plot([0,xy_min*(1+pp)],[0,xy_min*(1+pp)*(1-pp)], ls='--', c='b')\n ax.plot([0,xy_min*(1-pp)],[0,xy_min*(1-pp)*(1+pp)], ls='--', c='b')\n if ax_names: \n ax.set_xlabel(ax_names[0]); ax.set_ylabel(ax_names[1])\n # plt.show()\n return ax\n\ndef plot_error_percentile_exp(y_pred, y_val):\n errors = pd.DataFrame(exp_ape(y_pred, y_val).values*100, columns=['absolute error, %']).clip(upper=100)\n errors.sort_values(by='absolute error, %', inplace=True)\n errors['Percentile'] = np.linspace(0,100,len(errors))\n ax=errors.plot.scatter(x='absolute error, %', y='Percentile' )\n return ax\n\ndef calc_potential(datain:pd.DataFrame, fixing_wells_compl:pd.DataFrame, predict, \n completion_features, location_transform, latLon=['Longitude_Mid', 'Latitude_Mid'], retunFull=False):\n ''' ===========Reservoir potential=================\n for wells in locations from <datain> set <completion_fetures> from <fixing_wells_compl>\n applies <location_transform> if <completions_features> alter <location_features>\n calculated predict(result), returns df[[latLon], ..comp_api...., mean] ##df[[latLon], ..comp_api...., mean]\n\n EXAMPLE\n location_features= ['Longitude_Mid', 'Latitude_Mid', 'TVD_FT', 'Elevation_FT', 'OilGravity_API', 'WellPadDirection', 'RSSubPlay', 'RSInterval' ,'Formation']\n completion_features =['FluidIntensity_BBLPerFT', 'ProppantIntensity_LBSPerFT', 'ProppantLoading_LBSPerGAL', 'Proppant_LBS', 'RSFracJobType',\n 'RSProdWellType', 'RSProppantType', 'TotalFluidPumped_BBL', 'MD_FT', 'PerfInterval_FT', 'FirstProdYear', 'RSOperator']\n \n def predict_exp(df): return np.exp(model.predict(df))\n def location_transform(df): df['MD_FT'] = df['PerfInterval_FT']+1.05*df['TVD_FT']\n '''\n potent_all = datain[latLon]\n for api, fixing_well in fixing_wells_compl.iterrows():\n data = datain.copy()\n #set completion to all locations\n for feat in completion_features: data[feat] = fixing_well[feat]\n location_transform(data)\n potent_all[f'{api}'] = predict(data)\n \n potent_all['mean']=potent_all.iloc[:,2:].T.mean()\n potent_all['median']=potent_all.iloc[:,2:].T.median()\n if retunFull: return potent_all\n return potent_all[latLon+['mean', 'median']]\n #return potent_all\n\ndef ice_lines(data, feature_grid, feature_name, data_transformer, predict):\n '''calculate ice linese inteating over fature through feature grid \n Example of data transformer\n def data_transformer_from_feature(feature_name): \n if feature_name=='FluidIntensity_BBLPerFT':\n def data_transformer(df):\n df['TotalFluidPumped_BBL'] = df['FluidIntensity_BBLPerFT']*df['PerfInterval_FT']\n # keep same proppant loading\n df['ProppantIntensity_LBSPerFT'] = df['FluidIntensity_BBLPerFT']*df['ProppantLoading_LBSPerGAL']*42\n df['Proppant_LBS'] = df['ProppantIntensity_LBSPerFT']*df['PerfInterval_FT']\n\n if feature_name=='ProppantIntensity_LBSPerFT':\n def data_transformer(df):\n df['Proppant_LBS'] = df['ProppantIntensity_LBSPerFT']*df['PerfInterval_FT']\n # keep same proppant loading\n df['FluidIntensity_BBLPerFT'] = df['ProppantIntensity_LBSPerFT']/df['ProppantLoading_LBSPerGAL']/42\n df['TotalFluidPumped_BBL'] = df['FluidIntensity_BBLPerFT']*df['PerfInterval_FT']\n\n return data_transformer\n '''\n ice_lines = pd.DataFrame(columns=feature_grid, index=data.index)\n # iterate over feature values\n for feature in feature_grid:\n points = data.copy()\n points[feature_name] = feature\n data_transformer(points)\n ice_lines[feature] = predict(points)\n return ice_lines\n\ndef pdp_map_iterCompl(fixing_wells_location, fixing_wells_compl, completion_features, feature_name, \n predict, func_dict, feature_grid=None, gridNum=20, \n latlon=['Longitude_Mid', 'Latitude_Mid'], returnOut=False):\n ''' needs completion_features\n # iterate over completions (faster if more locations than completions)\n out = [[locatons] X [grdpoints] X [completions]]\n returns pdp per location and its amplitudes\n '''\n location_transform, data_transformer = func_dict['location_transform'], func_dict['data_transformer']\n if feature_grid is None: \n minF, maxF = min(fixing_wells_compl[feature_name]), max(fixing_wells_compl[feature_name])\n feature_grid = np.linspace(minF, maxF, gridNum)\n\n out = np.zeros((len(fixing_wells_location), len(feature_grid), len(fixing_wells_compl)))\n # iterate over completions (faster if more locations than completions)\n for compl_idx, api in enumerate(fixing_wells_compl.index):\n data = fixing_wells_location.copy()\n #set fixed completion to all locations\n for feat in completion_features: data[feat] = fixing_wells_compl.loc[api, feat]\n location_transform(data)\n ice = ice_lines(data, feature_grid, feature_name, data_transformer, predict)\n out[:,:, compl_idx] = ice.values\n\n out_pdp = pd.DataFrame(out.mean(axis=2), columns=feature_grid, index=fixing_wells_location.index)\n out_med = pd.DataFrame(np.median(out, axis=2), columns=feature_grid, index=fixing_wells_location.index)\n out_mm = fixing_wells_location[latlon]\n out_mm['max_mean'] = out_pdp.max(axis=1)\n out_mm['min_mean'] = out_pdp.min(axis=1)\n out_mm['mean_mean'] = out_pdp.mean(axis=1)\n out_mm['max_median'] = out_med.max(axis=1)\n out_mm['min_median'] = out_med.min(axis=1)\n out_mm['median_median'] = out_med.median(axis=1)\n # for comnpatibility\n out_mm['abs'] = out_mm['max_mean'] - out_mm['min_mean']\n out_mm['rel'] = out_mm['abs'] / out_mm['min_mean']\n\n if returnOut: return out\n else: return out_pdp, out_med, out_mm\n\ndef pdp_map_iterLoc(fixing_wells_location, fixing_wells_compl, location_features, feature_name, \n feature_grid, predict, location_transform, data_transformer, \n latlon=['Longitude_Mid', 'Latitude_Mid'], returnOut=False):\n ''' needs location_features\n # # iterate over locations faster f more completions than locations\n out = [[completions] X [grdpoints] X [locatons]]\n returns pdp per location and its amplitudes\n '''\n out = np.zeros((len(fixing_wells_compl), len(feature_grid), len(fixing_wells_location)))\n # iterate over locations faster f more completions than locations\n for well_idx, api in enumerate(fixing_wells_location.index):\n ice = ice_fixed_location(fixing_wells_location.loc[[api]], fixing_wells_compl, location_features, \n feature_name, feature_grid, predict, location_transform, data_transformer)\n out[:,:, well_idx] = ice\n\n out_pdp = pd.DataFrame(out.T.mean(axis=2), columns=feature_grid, index=fixing_wells_location.index)\n out_mm = fixing_wells_location[latlon]\n out_mm['abs'] = out_pdp.max(axis=1) - out_pdp.min(axis=1)\n out_mm['rel'] = out_mm['abs'] / out_pdp.min(axis=1)\n if returnOut: return out\n return out_pdp, out_mm\n\ndef ice_fixed_location(fixing_well_location, fixing_wells_compl, location_features, feature_name, \n predict, funcs_dict, feature_grid=None, gridNum=40):\n ''' calculate ice linece for well fixining location \n returned df = [completion] x [feature_grid]\n EXAMPLE of funcs in funcs_dict\n def get_location_transform(df, alpha=1.05):\n # changes df in place !!\n # alpha = (MD-Perf_interval)/TVD'\n df['MD_FT'] = df['PerfInterval_FT']+alpha*df['TVD_FT']\n \n def data_transformer_from_feature(feature_name): \n if feature_name=='FluidIntensity_BBLPerFT':\n def data_transformer(df):\n # change in place\n df['TotalFluidPumped_BBL'] = df['FluidIntensity_BBLPerFT']*df['PerfInterval_FT']\n # keep same proppant loading\n df['ProppantIntensity_LBSPerFT'] = df['FluidIntensity_BBLPerFT']*df['ProppantLoading_LBSPerGAL']*42\n df['Proppant_LBS'] = df['ProppantIntensity_LBSPerFT']*df['PerfInterval_FT']\n\n if feature_name=='ProppantIntensity_LBSPerFT':\n def data_transformer(df):\n # change in place\n df['Proppant_LBS'] = df['ProppantIntensity_LBSPerFT']*df['PerfInterval_FT']\n # keep same proppant loading\n df['FluidIntensity_BBLPerFT'] = df['ProppantIntensity_LBSPerFT']/df['ProppantLoading_LBSPerGAL']/42\n df['TotalFluidPumped_BBL'] = df['FluidIntensity_BBLPerFT']*df['PerfInterval_FT']\n return data_transformer '''\n location_transform, data_transformer = funcs_dict['location_transform'], funcs_dict['data_transformer']\n data = fixing_wells_compl.copy()\n # assign all completions same locations\n for feat in location_features: data[feat] = fixing_well_location[feat].values[0]\n location_transform(data)\n\n if feature_grid is None: \n minF, maxF = min(fixing_wells_compl[feature_name]), max(fixing_wells_compl[feature_name])\n feature_grid = np.linspace(minF, maxF, gridNum)\n\n return ice_lines(data, feature_grid, feature_name, data_transformer, predict)\n\ndef cost_lines(data, feature_grid, feature_name, fd, eco):\n '''calculate cost lines iteating over feture through feature grid\n EXAMPLE\n def get_points_cost(points, eco):\n #set cost for \"point\": comepltion with fixed features (and location)\n point_cost = eco['cost_proppant_LB']*points['ProppantIntensity_LBSPerFT']\n point_cost += (eco['cost_water_BBL']+eco['cost_chemicals_BBL']\n +eco['cost_service_BBL'])*points['FluidIntensity_BBLPerFT']\n point_cost += eco['cost_drilling_FT']*points['MD_FT']/points['PerfInterval_FT']\n point_cost += eco['cost_completion_FT']\n return point_cost\n \n def get_full_cost(cost_per_ft, points): return cost_per_ft*points['PerfInterval_FT']'''\n data_transformer, points_cost, full_cost = fd['data_transformer'], fd['points_cost'], fd['full_cost']\n \n cost_ft_lines = pd.DataFrame(columns=feature_grid, index=data.index)\n cost_lines = pd.DataFrame(columns=feature_grid, index=data.index)\n\n # iterate over feature values\n for feature in feature_grid:\n points = data.copy()\n points[feature_name] = feature\n data_transformer(points)\n cost_per_ft = points_cost(points, eco)\n cost_ft_lines[feature] = cost_per_ft\n cost_lines[feature] = full_cost(cost_per_ft, points)\n\n return {'$/ft':cost_ft_lines, '$':cost_lines}\n\ndef cost_fixed_location(fixing_well_location, fixing_wells_compl, location_features, feature_name, funcs_dict,\n feature_grid=None, gridNum=40, eco=None):\n ''' calculate cost lines for well fixining location \n returned df = [completion] x [feature_grid]\n '''\n location_transform = funcs_dict['location_transform']\n\n if eco is None:\n eco ={'cost_drilling_FT': 120., # per total drilled ft\n 'cost_completion_FT': 200., # per laterla ft\n 'cost_water_BBL': 10.0,\n 'cost_chemicals_BBL': 1.0,\n 'cost_proppant_LB': 0.1,\n 'cost_service_BBL': 4.0,\n 'discount_rate': 0.06,\n 'price_BOE': 70.0} # in $ per BOE\n #========================================\n data = fixing_wells_compl.copy()\n # assign all completions same locations\n for feat in location_features: data[feat] = fixing_well_location[feat].values[0]\n location_transform(data)\n if feature_grid is None: \n minF, maxF = min(fixing_wells_compl[feature_name]), max(fixing_wells_compl[feature_name])\n feature_grid = np.linspace(minF, maxF, gridNum)\n return cost_lines(data, feature_grid, feature_name, funcs_dict, eco), eco\n\ndef economics(costD, ice_lines, eco):\n cost_ft_lines, cost_lines = costD['$/ft'], costD['$']\n cost_BOE = 1000*cost_ft_lines/ice_lines # cost/BOE = 1000* (cost/ft) / (BOE/1000ft)\n return cost_BOE\n\n# NPV1y = eco['price_BOE']*fullice_lines/(1.0+eco['discount_rate']) - fullcost_lines\n# NPV1yFT = eco['price_BOE']*ice_lines/(1.0+eco['discount_rate'])/1000 - cost_lines\n\n\n# def ice_fixed_location1(fixing_well_location, fixing_wells_compl, location_features, feature_name, \n# predict, location_transform, data_transformer, feature_grid=None, gridNum=40):\n# ''' calculate ice linece for well fixining location \n# returned df = [completion] x [feature_grid]\n# '''\n# data = fixing_wells_compl.copy()\n# # assign all completions same locations\n# for feat in location_features: data[feat] = fixing_well_location[feat].values[0]\n# location_transform(data)\n\n# if feature_grid is None: \n# minF, maxF = min(fixing_wells_compl[feature_name]), max(fixing_wells_compl[feature_name])\n# feature_grid = np.linspace(minF, maxF, gridNum)\n\n# return ice_lines(data, feature_grid, feature_name, data_transformer, predict)\n\n# def pdp_map_iterCompl1(fixing_wells_location, fixing_wells_compl, completion_features, feature_name, \n# feature_grid, predict, location_transform, data_transformer, \n# latlon=['Longitude_Mid', 'Latitude_Mid'], returnOut=False):\n# ''' needs completion_features\n# # iterate over completions (faster if more locations than completions)\n# out = [[locatons] X [grdpoints] X [completions]]\n# returns pdp per location and its amplitudes\n# '''\n# out = np.zeros((len(fixing_wells_location), len(feature_grid), len(fixing_wells_compl)))\n# # iterate over completions (faster if more locations than completions)\n# for compl_idx, api in enumerate(fixing_wells_compl.index):\n# data = fixing_wells_location.copy()\n# #set fixed completion to all locations\n# for feat in completion_features: data[feat] = fixing_wells_compl.loc[api, feat]\n# location_transform(data)\n# ice = ice_lines(data, feature_grid, feature_name, data_transformer, predict)\n# out[:,:, compl_idx] = ice.values\n\n# out_pdp = pd.DataFrame(out.mean(axis=2), columns=feature_grid, index=fixing_wells_location.index)\n# out_mm = fixing_wells_location[latlon]\n# out_mm['abs'] = out_pdp.max(axis=1) - out_pdp.min(axis=1)\n# out_mm['rel'] = out_mm['abs'] / out_pdp.min(axis=1)\n# if returnOut: return out_pdp, out_mm, out\n# else: return out_pdp, out_mm", "id": "12515555", "language": "Python", "matching_score": 5.726792812347412, "max_stars_count": 1, "path": "utils.py" }, { "content": "# utils for models analysis\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import mean_squared_error, r2_score, explained_variance_score\nfrom matplotlib import pyplot as plt\nimport warnings\nfrom sklearn.exceptions import DataConversionWarning\n#import xgboost as xgb\n\n\ndef drop_inf(df):\n '''removes np.inf values'''\n return df.replace([np.inf, -np.inf], np.nan).dropna()\n\ndef rmspe(y_pred, targ):\n '''root mean square of percent error'''\n pct_var = (targ - y_pred)/targ\n return np.sqrt((pct_var**2).mean())\n\ndef exp_rmspe(y_pred, targ):\n '''root mean square of percent error of exp()'''\n return rmspe(np.exp(y_pred), np.exp(targ))\n\ndef mape(y_pred, targ):\n '''mean absolute percent error'''\n pct_var = (targ - y_pred)/targ\n return np.abs(pct_var).mean()\n\ndef exp_mape(y_pred, targ):\n '''mean absolute percent error of exp()'''\n return mape(np.exp(y_pred), np.exp(targ))\n\ndef pe(pred, targ):\n '''percent error'''\n pct_var = (pred-targ)/targ\n #pct_var = drop_inf(pct_var)\n return pct_var\n\ndef exp_pe(pred, targ):\n '''percent error of exp()'''\n return pe(np.exp(pred), np.exp(targ))\n\ndef ape(pred, targ):\n '''absolute percent error'''\n return np.abs(pe(pred, targ))\n\ndef exp_ape(pred, targ):\n '''absolute percent error'''\n return ape(np.exp(pred), np.exp(targ))\n\n#def metric_r2(rf, xt, yt, xgboost=False):\ndef metric_r2(rf, xt, yt):\n '''returns r2_score(yt, yp)'''\n # if xgboost:\n # xt = xgb.DMatrix(xt, label=yt, feature_names=xt.columns.tolist())\n yp = rf.predict(xt)\n return r2_score(yt, yp)\n\n#def permutation_importances(rf, X_train, y_train, metric, xgboost=False):\ndef permutation_importances(rf, X_train, y_train, metric):\n #baseline = metric(rf, X_train, y_train, xgboost=xgboost)\n baseline = metric(rf, X_train, y_train)\n imp = []\n for col in X_train.columns:\n save = X_train[col].copy()\n X_train[col] = np.random.permutation(X_train[col])\n #m = metric(rf, X_train, y_train, xgboost=xgboost)\n m = metric(rf, X_train, y_train)\n X_train[col] = save\n imp.append(baseline - m)\n return np.array(imp)\n\n#def plot_permutation_importances(tree, X_train, y_train, metric, vert_plot=True, xgboost=False):\ndef plot_permutation_importances(tree, X_train, y_train, metric, vert_plot=True):\n cols = X_train.columns.values\n # Plot feature importance\n #feature_importance = permutation_importances(tree, X_train, y_train, metric, xgboost=xgboost)\n feature_importance = permutation_importances(tree, X_train, y_train, metric)\n importance_df =pd.DataFrame({'Splits': feature_importance,'Feature':cols.tolist()})\n\n if not vert_plot:\n importance_df.sort_values(by='Splits', inplace=True, ascending=True)\n importance_df.plot.barh(x='Feature', figsize=(8,15))\n plt.show()\n else: \n importance_df.sort_values(by='Splits', inplace=True, ascending=False)\n importance_df.plot.bar(x='Feature', figsize=(12,3))\n plt.title('Permutation Importance')\n\ndef plot_tree_importance(cols, tree, vert_plot=True):\n fi = pd.DataFrame({'imp':tree.feature_importances_}, index=cols)\n fi.imp = 100*fi.imp/fi.imp.sum()\n if not vert_plot:\n fi.sort_values(by='imp', inplace=True)\n fi.plot.barh(figsize=(5,12))\n plt.xlabel('Tree: Variable Importance')\n else: \n fi.sort_values(by='imp', inplace=True, ascending=False)\n fi.plot.bar(figsize=(14,4))\n plt.ylabel('Relative Importance')\n plt.title('Tree: Variable Importance')", "id": "1465295", "language": "Python", "matching_score": 0.4149504601955414, "max_stars_count": 1, "path": "fastml-arc/utils.py" }, { "content": "import subprocess, re\nfrom pkg_resources import get_distribution, DistributionNotFound\nfrom datetime import datetime\n\n# d[year][month][day] example: 20200420\nver_date = datetime.now().strftime(\"d%Y%m%d\")\n\ndef version():\n las_version = ''\n\n '''\n Look for distribution version\n\n This looks for a distribution in roughly the following order.\n - Local development install installed with 'pip install -e \".\"'\n Note:\n The '-e' puts the install in an editable-mode where it it looks at\n the source directory and reactes to changes in the code.\n - A lasio.egg-info dir in the current working directory.\n Note!:\n An existing lasio.egg-info dir can hide an offical release install\n for the case where an offical release is also installed. The\n lasio.egg-info can be an artifact of running 'pip install -e \".\"',\n or of runnng 'bdist_wheel' to create a release package.\n - An official release installed with 'pip install lasio' or a development\n release installed with 'pip install \".\"'\n '''\n\n\n try:\n las_version = get_distribution(__package__).version\n except DistributionNotFound:\n # TODO: Add logger message\n pass\n\n '''\n If no distribution is found, check if the current working directory is in a\n version control system and attempt to derive a version string from the vsc.\n '''\n if not las_version.strip():\n las_version = _get_vcs_version()\n\n '''\n Else set a sensible default version\n 0.25.0 was the most recent version before this change so it is being\n used as teh default basline.\n '''\n if not las_version.strip():\n las_version = (\n \"0.25.0.dev0+unknown-post-dist-version.{}\".format(ver_date)\n )\n\n return las_version\n\n\ndef _get_vcs_version():\n semver_regex = re.compile('^v\\d+\\.\\d+\\.\\d+') # examples: 'v0.0.0', 'v0.25.0'\n split_regex = re.compile('-')\n local_las_version = ''\n tmpstr = ''\n tmpbytes = b''\n\n '''\n https://git-scm.com/docs/git-describe\n git describe --tags --match 'v*'\n This cmd will find the most recent tag starting with 'v' on the current\n branch.\n '''\n try: \n tmpbytes = subprocess.check_output(\n [\"git\", \"describe\", \"--tags\", \"--match\", \"v*\"],\n stderr=subprocess.STDOUT,\n ).strip()\n\n except subprocess.CalledProcessError:\n pass\n\n # Convert byte string to text string\n try:\n tmpstr = \"\".join( chr(x) for x in tmpbytes)\n except TypeError as e:\n print(\"Error: {}\\n\".format(e))\n\n if semver_regex.match(tmpstr):\n tmpstr = tmpstr[1:]\n (rel_ver, commits_since_rel_ver, current_commit) = split_regex.split(tmpstr)\n local_las_version = \"{}.dev{}+{}.{}\".format(\n rel_ver, commits_since_rel_ver, current_commit, ver_date\n )\n\n return local_las_version\n", "id": "10447530", "language": "Python", "matching_score": 0.2861160337924957, "max_stars_count": 1, "path": "lasio/las_version.py" }, { "content": "'''\nYou can get the data via:\nwget http://pjreddie.com/media/files/cifar.tgz \nImportant: Before proceeding, the student must reorganize the downloaded dataset files to match the expected directory structure, so that there is a dedicated folder for each class under 'test' and 'train', e.g.:\n* test/airplane/airplane-1001.png\n* test/bird/bird-1043.png\n\n* train/bird/bird-10018.png\n* train/automobile/automobile-10000.png\n'''\n\nfrom pathlib import Path\nfrom typing import Tuple#List#, Set, Dict, Tuple, Optional\n\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck', 'automobile')\n\ndef sort_files(sourceDir: str, classes: Tuple) -> None:\n PATH = Path(sourceDir)\n \n for name in classes:\n clsdir = PATH/name\n clsdir.mkdir(exist_ok=True)\n pattern = r'*'+name+'.png'\n for file in PATH.glob(pattern):\n newfile = clsdir/(file.parts[-1])\n file.replace(newfile)\n\n#test\n# sort_files('temp', ('frog','dog'))\n\n#main run\nsort_files('test', classes)\nsort_files('train', classes)", "id": "6550099", "language": "Python", "matching_score": 0.9138338565826416, "max_stars_count": 1, "path": "sorting.py" }, { "content": "from utils import mape\n\ndef test_mape():\n assert mape(2,3)==3\n\ndef temp():\n return 2\n\ndef test_temp():\n assert temp()==2", "id": "7977592", "language": "Python", "matching_score": 0.27427101135253906, "max_stars_count": 1, "path": "tests/test_ICE.py" }, { "content": "# testing loading the module\n#from importlib import reload\n\n#import fastml #__init__ should have : import fastml.utils\n#fastml.utils.pp()\n\n# import fastml #__init__ has: from .utils import pp\n# fastml.utils.pp()\n\n# import fastml #__init__ has: from fastml import utils\n# fastml.utils.pp()\n\n# from fastml import * # __init__ should have: __all__ = ['utils']\n# utils.pp()\n\n# from fastml import * # __init__ should have: from .utils import pp\n# # next not important __all__ = ['pp']\n# pp()\n\n\n# from fastml import utils\n# reload(utils)\n# utils.pp()\n\n# from fastml.utils import pp\n# pp()\n\n", "id": "8471058", "language": "Python", "matching_score": 1.5987659692764282, "max_stars_count": 1, "path": "test_m1.py" }, { "content": "#print('executed_init_')\n#import fastml.utils\n#from fastml import utils \n#from .utils import pp\n#__all__ = ['utils']\n#__all__ = ['pp']", "id": "6710094", "language": "Python", "matching_score": 2.315408229827881, "max_stars_count": 1, "path": "fastml-arc/__init__.py" }, { "content": "#print('executed_init_')\n#import fastml.utils\n#from fastml import utils \n#from .utils import pp\n__all__ = ['utils', 'data']\n#__all__ = ['pp']\n\nfrom . import utils\nfrom . import data", "id": "1883288", "language": "Python", "matching_score": 2.3313486576080322, "max_stars_count": 1, "path": "__init__.py" } ]
1.717547
byakkozhang
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom DjangoUeditor.models import UEditorField\nfrom django.core.urlresolvers import reverse\n\n\n@python_2_unicode_compatible\nclass Column(models.Model):\n name = models.CharField('栏目名称', max_length=256)\n slug = models.CharField('栏目网址', max_length=256, db_index=True)\n intro = models.TextField('栏目简介', default='')\n nav_display = models.BooleanField('导航显示', default=False)\n home_display = models.BooleanField('首页显示', default=False)\n\n def get_absolute_url(self):\n return reverse('column', args=(self.slug, ))\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = '栏目'\n verbose_name_plural = '栏目'\n ordering = ['name'] # 排序\n\n\n@python_2_unicode_compatible\nclass Article(models.Model):\n column = models.ManyToManyField(Column, verbose_name='归属栏目')\n\n title = models.CharField('标题', max_length=256)\n slug = models.CharField('网址', max_length=256, db_index=True)\n\n author = models.ForeignKey('auth.User', blank=True, null=True, verbose_name='作者')\n content = models.TextField('内容', default='', blank=True)\n content = UEditorField('内容', height=300, width=1000,\n default=u'', blank=True, imagePath=\"uploads/images/\",\n toolbars='besttome', filePath='uploads/files/')\n\n pub_date = models.DateTimeField('发表时间', auto_now_add=True, editable=True)\n update_time = models.DateTimeField('更新时间', auto_now=True, null=True)\n published = models.BooleanField('正式发布', default=True)\n\n def get_absolute_url(self):\n return reverse('article', args=(self.pk, self.slug))\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = '教程'\n verbose_name_plural = '教程'\n", "id": "8448416", "language": "Python", "matching_score": 0, "max_stars_count": 83, "path": "news/models.py" } ]
0
thewisenerd
[ { "content": "import sys\n\nfrom google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\nfrom google_auth_oauthlib.flow import InstalledAppFlow\n\nfrom pytos.models.configuration import Configuration\n\noauth_scopes = [\n 'https://www.googleapis.com/auth/photoslibrary.readonly',\n 'https://www.googleapis.com/auth/photoslibrary.readonly.appcreateddata'\n]\n\n\nclass Authorizer:\n def __init__(self, config: Configuration):\n self.config = config\n self.credentials = Credentials\n\n def auth(self, scopes=None) -> Credentials:\n if scopes is None:\n scopes = oauth_scopes\n\n flow = InstalledAppFlow.from_client_secrets_file(self.config.secrets,\n scopes=scopes,\n redirect_uri='urn:ietf:wg:oauth:2.0:oob')\n\n refresh_token = self.config.refresh_token\n if refresh_token is not None:\n client_cfg = flow.client_config\n refresh_req = Request()\n\n fresh = Credentials(None,\n refresh_token=refresh_token,\n token_uri=client_cfg['token_uri'],\n client_id=client_cfg['client_id'],\n client_secret=client_cfg['client_secret'])\n fresh.refresh(refresh_req)\n if fresh.valid:\n return fresh\n\n print('cfg.user.refresh_token expired. ', end='', file=sys.stderr)\n\n credentials = flow.run_console(\n authorization_prompt_message='please login:\\n\\n\\t{url}\\n',\n authorization_code_message='auth code: ')\n return credentials\n", "id": "1442133", "language": "Python", "matching_score": 2.1069929599761963, "max_stars_count": 0, "path": "pytos/oauth/base.py" }, { "content": "import os\n\nimport toml\n\n\nclass Configuration:\n def __init__(self, path):\n self.path = path\n self.buf = toml.load(path)\n\n def validate(self) -> int:\n cfg = self.buf\n\n assert isinstance(cfg, dict)\n\n assert 'oauth' in cfg\n assert isinstance(cfg['oauth'], dict)\n\n assert 'secrets' in cfg['oauth']\n assert isinstance(cfg['oauth']['secrets'], str)\n\n if 'refresh_token' in cfg:\n assert isinstance(cfg['user']['refresh_token'], str)\n\n p1 = cfg['oauth']['secrets']\n p2 = os.path.join('cfg', cfg['oauth']['secrets'])\n assert (os.path.isfile(p1) or os.path.isfile(p2))\n\n assert 'app' in cfg\n assert isinstance(cfg['app'], dict)\n\n assert 'db' in cfg['app']\n assert isinstance(cfg['app']['db'], str)\n\n return 0\n\n def dump(self):\n with open(self.path, 'w') as fp:\n toml.dump(self.buf, fp)\n\n def get_db(self) -> str:\n cfg = self.buf\n return cfg['app']['db']\n\n db = property(get_db)\n\n def get_secrets(self) -> str:\n cfg = self.buf\n p1 = cfg['oauth']['secrets']\n p2 = os.path.join('cfg', cfg['oauth']['secrets'])\n return p1 if os.path.isfile(p1) else p2\n\n secrets = property(get_secrets)\n\n def get_refresh_token(self) -> str:\n cfg = self.buf\n return cfg['oauth']['refresh_token'] if 'refresh_token' in cfg['oauth'] else None\n\n def set_refresh_token(self, token):\n cfg = self.buf\n oauth = cfg['oauth']\n oauth['refresh_token'] = token\n cfg['oauth'] = oauth\n self.buf = cfg\n\n refresh_token = property(get_refresh_token, set_refresh_token)\n", "id": "6557353", "language": "Python", "matching_score": 0.12425468862056732, "max_stars_count": 0, "path": "pytos/models/configuration.py" }, { "content": "from bs4 import BeautifulSoup\n\nfrom ... import helpers\n\ndef parse(body):\n\n\t# delete leading </style>\n\t# grr\n\tbody = body.strip()\n\tif body.startswith('</style>'):\n\t\tbody = body[len('</style>'):]\n\n\ttry:\n\t\tsoup = BeautifulSoup(body, \"lxml\")\n\texcept HTMLParser.HTMLParseError as e:\n\t\treturn helpers.err(str(e))\n\n\ttables = soup.find_all('table')\n\n\t# tables[2] == student details\n\t# tables[3] == attendance\n\n\tif not len(tables) == 16:\n\t\treturn helpers.err('parse error')\n\n\trows = tables[3].find_all('tr')\n\trows = rows[1:] # headers\n\n\tdata = []\n\tfor r in rows:\n\t\tcol = r.find_all('td')\n\t\t# type = \"theory, practical, Lab Based Theory\"\n\t\t# cate = \"Core, elective, open elective\"\n\t\t# 0: cc\n\t\t# 1: title\n\t\t# 2: type\n\t\t# 3: faculty\n\t\t# 4: slot\n\t\t# 5: room\n\t\t# 6: total\n\t\t# 7: missed\n\t\t# 8: percent\n\n\t\tif not len(col) >= 7:\n\t\t\treturn helpers.err('parse error')\n\n\t\tcol[0].font.clear(); # clear 'regular' text\n\t\tcc = col[0].get_text()\n\t\ttitle = col[1].get_text()\n\t\tctype = col[2].get_text()\n\t\tfaculty = col[3].get_text()\n\t\tslot = col[4].get_text()\n\t\troom = col[5].get_text()\n\n\t\ttry:\n\t\t\ttotal = int(col[6].get_text())\n\t\t\tmissed = int(col[7].get_text())\n\t\texcept TypeError as e:\n\t\t\treturn helpers.err('parse error')\n\n\t\tdata.append({\n\t\t\t'course': {\n\t\t\t\t'slot': slot,\n\t\t\t\t'code': cc,\n\t\t\t\t'title': title,\n\t\t\t\t'type': ctype,\n\t\t\t\t'faculty': faculty,\n\t\t\t\t'room': room,\n\t\t\t},\n\t\t\t'total': total,\n\t\t\t'absent': missed,\n\t\t\t'percent': (total - missed) / total * 100,\n\t\t})\n\n\treturn helpers.ok(data)\n", "id": "5068175", "language": "Python", "matching_score": 3.466660737991333, "max_stars_count": 2, "path": "academia/parsers/academia/Sem4_My_Attendance.py" }, { "content": "from bs4 import BeautifulSoup\n\nfrom ... import helpers\n\ndef parse(body):\n\n\ttry:\n\t\tsoup = BeautifulSoup(body, \"lxml\")\n\texcept HTMLParser.HTMLParseError as e:\n\t\treturn helpers.err(str(e))\n\n\ttables = soup.find_all('table')\n\n\t# tables[0] == welcome message\n\t# tables[1] == profile\n\n\tif not len(tables) == 2:\n\t\treturn helpers.err('parse error')\n\n\trows = tables[1].find_all('tr')\n\tif not len(rows) >= 13:\n\t\treturn helpers.err('parse error')\n\n\trows = rows[1:] # headers\n\n\tdata = {}\n\t\"\"\"\n\t\t0: name, image\n\t\t1: regno\n\t\t2: office\n\t\t3: course\n\t\t4: father name\n\t\t5: dob\n\t\t6: sex\n\t\t7: blood group\n\t\t8: address\n\t\t9: email\n\t\t10: pincode\n\t\t11: place of birth\n\t\t12: validity\n\t\"\"\"\n\n\ttry:\n\n\t\t# row 0: name, image\n\t\ttds = rows[0].find_all('td')\n\t\tname = tds[1].get_text().strip()\n\t\timage = tds[2].img['src']\n\n\t\tif image.startswith('../'):\n\t\t\timage = 'http://evarsity.srmuniv.ac.in/srmswi/' + image[3:]\n\n\t\timage = image.strip()\n\n\t\t# row 1: regno\n\t\ttds = rows[1].find_all('td')\n\t\tregno = tds[1].get_text().strip()\n\n\t\t# row 2: office\n\t\ttds = rows[2].find_all('td')\n\t\toffice = tds[1].get_text().strip()\n\n\t\t# row 3: course\n\t\ttds = rows[3].find_all('td')\n\t\tcourse = tds[1].get_text().strip()\n\n\t\t# row 4: father name\n\t\ttds = rows[4].find_all('td')\n\t\tfname = tds[1].get_text().strip()\n\n\t\t# row 5: dob\n\t\ttds = rows[5].find_all('td')\n\t\tdob = tds[1].get_text().strip()\n\n\t\t# row 6: sex\n\t\ttds = rows[6].find_all('td')\n\t\tsex = tds[1].get_text().strip()\n\n\t\t# row 7: blood group\n\t\ttds = rows[7].find_all('td')\n\t\tbgroup = tds[1].get_text().strip()\n\n\t\t# row 8: address\n\t\ttds = rows[8].find_all('td')\n\t\taddress = tds[1].get_text().strip()\n\n\t\t# row 9: email\n\t\ttds = rows[9].find_all('td')\n\t\temail = tds[1].get_text().strip()\n\n\t\t# row 10: pincode\n\t\ttds = rows[10].find_all('td')\n\t\tpincode = tds[1].get_text().strip()\n\n\t\t# row 11: birthplace\n\t\ttds = rows[11].find_all('td')\n\t\tbirthplace = tds[1].get_text().strip()\n\n\t\t# row 12: validity\n\t\ttds = rows[12].find_all('td')\n\t\tvalidity = tds[1].get_text().strip()\n\texcept:\n\t\treturn helpers.err('parse error')\n\n\treturn helpers.ok({\n\t\t'regno': regno,\n\n\t\t'name': name,\n\t\t'email': email,\n\t\t'image': image,\n\n\t\t'course': course,\n\t\t'validity': validity,\n\t\t'office': office,\n\n\t\t'dob': dob,\n\t\t'sex': sex,\n\t\t'bloodgroup': bgroup,\n\t\t'fname': fname,\n\n\t\t'address': address,\n\t\t'pincode': pincode,\n\t\t'birthplace': birthplace,\n\t})\n", "id": "4001944", "language": "Python", "matching_score": 0.9918849468231201, "max_stars_count": 2, "path": "academia/parsers/evarsity/Sem4_General_Profile.py" }, { "content": "from ...parsers.academia import Sem4_My_Attendance\nfrom ... import helpers\n\ndef get(self, name):\n\tif not self.methods:\n\t\treturn helpers.err('no handlers bind.')\n\n\tif name not in self.methods.keys():\n\t\treturn helpers.err('no such handler bind.')\n\n\n\turl = 'https://academia.srmuniv.ac.in/liveViewHeader.do';\n\tpayload = {\n\t\t'sharedBy': 'srm_university',\n\t\t'appLinkName': 'academia-academic-services',\n\t\t'urlParams': '{}',\n\t\t'isPageLoad': 'true',\n\n\t\t'viewLinkName': self.methods[name],\n\t}\n\tresponse, e = self.request('POST', url, payload=payload)\n\tif e: # exception occured\n\t\treturn helpers.err('network request failed; exception: ' + str(e))\n\n\t# status code != 200. uh oh.\n\tif not response.status_code // 100 == 2:\n\t\treturn helpers.err('network request failed with status code: ' + response.status_code)\n\n\treturn self.parsers[name](response.text)\n\ndef bind(self, semester):\n\tif (semester == 4):\n\t\tself.methods['attendance'] = 'My_Attendance';\n\t\tself.parsers['attendance'] = Sem4_My_Attendance.parse;\n", "id": "9621937", "language": "Python", "matching_score": 3.0531914234161377, "max_stars_count": 2, "path": "academia/handlers/academia/AppHandlers.py" }, { "content": "from ...parsers.evarsity import Sem4_General_Profile\nfrom ... import helpers\n\ndef get(self, name):\n\tif not self.methods:\n\t\treturn helpers.err('no handlers bind.')\n\n\tif name not in self.methods.keys():\n\t\treturn helpers.err('no such handler bind.')\n\n\turlbase = 'http://[email protected]/srmswi/resource/StudentDetailsResources.jsp?resourceid=';\n\turl = urlbase + self.methods[name]\n\tresponse, e = self.request('GET', url)\n\tif e: # exception occured\n\t\treturn helpers.err('network request failed; exception: ' + str(e))\n\n\t# status code != 200. uh oh.\n\tif not response.status_code // 100 == 2:\n\t\treturn helpers.err('network request failed with status code: ' + response.status_code)\n\n\treturn self.parsers[name](response.text)\n\ndef bind(self, semester):\n\tif (semester == 4):\n\t\tself.methods['profile-general'] = '1';\n\t\tself.parsers['profile-general'] = Sem4_General_Profile.parse;\n", "id": "1286596", "language": "Python", "matching_score": 0.15668506920337677, "max_stars_count": 2, "path": "academia/handlers/evarsity/AppHandlers.py" }, { "content": "from typing import List\n\nfrom google.auth.app_engine import Credentials\nfrom google.auth.transport.requests import AuthorizedSession\n\nfrom pytos.models.album_item import AlbumItem\nfrom pytos.models.media_item import MediaItem\nfrom pytos.requests.builder import Builder\nfrom pytos.requests.context import RequestContext\n\n\ndef search(credentials: Credentials, search_id: str, query_params: dict) -> List[MediaItem]:\n a_session = AuthorizedSession(credentials)\n ub = Builder('photoslibrary.googleapis.com', 'v1')\n url = ub.build('/mediaItems:search')\n\n # dump all eg:\n params = {'pageSize': 100}\n for k in query_params:\n params[k] = query_params[k]\n wb = []\n context = RequestContext(search_id)\n while True:\n req = a_session.post(url, data=params)\n assert req.status_code == 200\n res = req.json()\n\n if 'mediaItems' not in res:\n break\n\n for _item in res['mediaItems']:\n wb.append(MediaItem.from_dict(_item))\n\n context.add(req.elapsed.total_seconds(), len(res['mediaItems']), 0)\n\n if 'nextPageToken' not in res:\n break\n\n params['pageToken'] = res['nextPageToken']\n\n context.stat()\n\n return wb\n\n\ndef search_by_album_id(credentials: Credentials, album_id: str) -> List[AlbumItem]:\n params = {\n 'albumId': album_id\n }\n search_id = 'search.album_id:{}'.format(album_id)\n results = search(credentials, search_id, params)\n return [AlbumItem(album_id, x.id) for x in results]\n", "id": "12237466", "language": "Python", "matching_score": 5.2156596183776855, "max_stars_count": 0, "path": "pytos/requests/search.py" }, { "content": "from typing import Dict\n\nfrom google.auth.transport.requests import AuthorizedSession\nfrom google.oauth2.credentials import Credentials\n\nfrom pytos.models.media_item import MediaItem\nfrom pytos.requests.builder import Builder\nfrom pytos.requests.context import RequestContext\n\n\ndef get_all_media_items(credentials: Credentials) -> Dict[str, MediaItem]:\n a_session = AuthorizedSession(credentials)\n ub = Builder('photoslibrary.googleapis.com', 'v1')\n url = ub.build('/mediaItems')\n\n # dump all eg:\n params = {'pageSize': 100}\n wb = {}\n context = RequestContext('get_all_media_items')\n while True:\n req = a_session.get(url, params=params)\n assert req.status_code == 200\n res = req.json()\n\n if 'mediaItems' not in res:\n break\n\n dup = 0\n for _item in res['mediaItems']:\n item = MediaItem.from_dict(_item)\n if item.id in wb:\n dup += 1\n wb[item.id] = item\n\n context.add(req.elapsed.total_seconds(), len(res['mediaItems']), dup)\n\n if 'nextPageToken' not in res:\n break\n\n params['pageToken'] = res['nextPageToken']\n\n context.stat()\n\n return wb\n", "id": "11339669", "language": "Python", "matching_score": 5.224946975708008, "max_stars_count": 0, "path": "pytos/requests/media_item.py" }, { "content": "from typing import Dict\n\nfrom google.auth.transport.requests import AuthorizedSession\nfrom google.oauth2.credentials import Credentials\n\nfrom pytos.models.album import Album\nfrom pytos.requests.builder import Builder\nfrom pytos.requests.context import RequestContext\n\n\ndef get_all_albums(credentials: Credentials) -> Dict[id, Album]:\n a_session = AuthorizedSession(credentials)\n ub = Builder('photoslibrary.googleapis.com', 'v1')\n url = ub.build('/albums')\n\n # dump all eg:\n params = {'pageSize': 50}\n wb = {}\n context = RequestContext('get_all_albums')\n while True:\n req = a_session.get(url, params=params)\n assert req.status_code == 200\n res = req.json()\n\n if 'albums' not in res:\n break\n\n dup = 0\n for _item in res['albums']:\n item = Album.from_dict(_item)\n if item.id in wb:\n dup += 1\n wb[item.id] = item\n\n context.add(req.elapsed.total_seconds(), len(res['albums']), dup)\n\n if 'nextPageToken' not in res:\n break\n\n params['pageToken'] = res['nextPageToken']\n\n context.stat()\n\n return wb\n", "id": "1505043", "language": "Python", "matching_score": 1.219529151916504, "max_stars_count": 0, "path": "pytos/requests/album.py" }, { "content": "#!/usr/bin/env python3\n\n#\n# copyright (c) 2017 thewisenerd <<EMAIL>>\n#\n# license: WTFPL, http://www.wtfpl.net/txt/copying\n#\n\nfrom flask import Flask\nfrom flask import request\nfrom flask import make_response\n\napp = Flask(__name__)\n\nimport os\nimport rsa\nimport json\nfrom binascii import hexlify\nfrom binascii import unhexlify\nimport hashlib\nimport time\n\nimport collections\n\ndef currentms():\n\treturn int(round(time.time() * 1000))\n\nhashtable = (\n\t# dummy:dummy\n\t'3644550032d201edab4bb729dc4b696af95f5d4b66365cdad26bb7ef9ab514a5',\n)\n\nSession = collections.namedtuple('Session', 'pubk privk logintoken gentime loginok logintime')\n\nlookuptable = {}\n\ndef getsha256(s):\n\tm = hashlib.sha256()\n\tm.update(s.encode('utf-8'))\n\treturn hexlify(m.digest()).decode('utf-8')\n\ndef revStrEncode(s):\n\tif (len(s) > 2):\n\t\ts = \"\".join([s[i:i+2] for i in range(len(s)-2, -2, -2)])\n\treturn s\n\[email protected]('/RSASettings')\ndef rsa_settings():\n\n\tsession = hexlify(os.urandom(8)).decode('utf-8')\n\tlogintoken = hexlify(os.urandom(8)).decode('utf-8')\n\t(pubk, privk) = rsa.newkeys(1024, exponent=0x11)\n\tlookuptable[session] = Session(pubk=pubk, privk=privk, logintoken=logintoken, gentime=currentms(), loginok=False, logintime=0)\n\n\tpayload = {\n\t\t'm': '%0256x' % pubk.n,\n\t\t'e': '%08x' % pubk.e,\n\t\t'loginToken': <PASSWORD>intoken\n\t}\n\n\tresp = make_response(json.dumps(payload))\n\tresp.set_cookie('NACSID', session)\n\n\treturn resp\n\[email protected]('/Login', methods=['POST'])\ndef login():\n\n\tauth_failure = {\n\t\t\"context\":\"\",\n\t\t\"type\":\"AUTH_FAILURE\",\n\t\t\"message\":\"Username or password incorrect\",\n\t\t\"opaque\":\"\",\n\t\t\"nextStateId\":\"\"\n\t}\n\n\tfailure = {\n\t\t\"context\":\"\",\n\t\t\"type\":\"FAILURE\",\n\t\t\"message\":\"Login failed. If the problem persists please contact your administrator\",\n\t\t\"opaque\":\"\",\n\t\t\"nextStateId\":\"\"\n\t}\n\n\tsession_failure = {\n\t\t\"context\":\"\",\n\t\t\"type\":\"SESSION_FAILURE\",\n\t\t\"message\":\"Your session has expired. Please try again\",\n\t\t\"opaque\":\"\",\n\t\t\"nextStateId\":\"\"\n\t}\n\n\tsuccess = {\n\t\t\"context\":\"\",\n\t\t\"type\":\"SUCCESS\",\n\t\t\"message\":\"\",\n\t\t\"opaque\":\"\",\n\t\t\"nextStateId\":\"\",\n\t\t\"orgUrl\":\"\",\n\t\t\"keepAliveActive\":False,\n\t\t\"delayInterval\":\"250\"\n\t}\n\n\tif 'NACSID' not in request.cookies:\n\t\treturn json.dumps(failure)\n\n\tif request.cookies['NACSID'] not in lookuptable:\n\t\treturn json.dumps(failure)\n\n\tif 'realm' not in request.form:\n\t\treturn json.dumps(failure)\n\tif 'username' not in request.form:\n\t\treturn json.dumps(failure)\n\tif 'password' not in request.form:\n\t\treturn json.dumps(failure)\n\n\tif request.form['realm'] != 'passwordRealm':\n\t\treturn json.dumps(failure)\n\n\tusername = request.form['username']\n\tpassword = request.form['password']\n\n\tsession = lookuptable[request.cookies['NACSID']]\n\n\tdecrypted = rsa.decrypt(unhexlify(revStrEncode(password)), session.privk).decode('utf-8')\n\ttoken = decrypted[0:16]\n\n\tif (token != session.logintoken):\n\t\treturn json.dumps(failure)\n\n\tif (currentms() - session.gentime > (250 * 1000)):\n\t\treturn json.dumps(session_failure)\n\n\tif getsha256( username + ':' + decrypted[16:] ) in hashtable:\n\t\tlookuptable[request.cookies['NACSID']] = Session(pubk=session.pubk, privk=session.privk, logintoken=session.logintoken, gentime=session.gentime, loginok=True, logintime=currentms())\n\t\treturn json.dumps(success)\n\n\treturn json.dumps(auth_failure)\n\[email protected]('/GetStateAndView')\ndef stateandview():\n\n\tok = {\n\t\t'view': 'Final'\n\t}\n\n\tauth = {\n\t\t'view': 'Authentication'\n\t}\n\n\tif 'NACSID' not in request.cookies:\n\t\treturn json.dumps(auth)\n\n\tif request.cookies['NACSID'] not in lookuptable:\n\t\treturn json.dumps(auth)\n\n\tsession = lookuptable[request.cookies['NACSID']]\n\n\tif session.loginok and (currentms() - session.logintime < (250 * 1000)):\n\t\treturn json.dumps(ok)\n\n\treturn json.dumps(auth)\n\n\nif __name__ == '__main__':\n\tapp.run()\n", "id": "5060035", "language": "Python", "matching_score": 4.493654251098633, "max_stars_count": 2, "path": "tests/dummy-server.py" }, { "content": "import requests\nimport json\n\nfrom rsa import encrypt\nfrom binascii import hexlify\n\n# disable insecure request warnings.\n# thx http://stackoverflow.com/a/28002687\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\nclass pubkey_t:\n\tdef __init__(self, n, e):\n\t\tself.n = n;\n\t\tself.e = e;\n\n# shitty _encoding_\n# original def goes something like this (js):\n#\n# if (value.length > 2)\n# {\n# var newPass = \"\";\n# for (var j=value.length-2; j>=0; j=j-2)\n# {\n# newPass = newPass.concat(value.substr(j,2));\n# }\n# value = newPass;\n# }\n#\n# TODO: i have NOT checked odd chars or sanity\ndef revStrEncode(s):\n\tif (len(s) > 2):\n\t\ts = \"\".join([s[i:i+2] for i in range(len(s)-2, -2, -2)])\n\treturn s\n\ndef login(regno, password):\n\tBaseURL = 'https://192.168.10.3';\n\tPortalMainURL = BaseURL + '/connect/PortalMain';\n\tRSASettingsURL = BaseURL + '/connect/RSASettings';\n\tGetStateAndViewURL = BaseURL + '/connect/GetStateAndView';\n\tLoginURL = BaseURL + '/connect/Login';\n\n\ts = requests.Session()\n\trsa = None\n\tsnv = None\n\n\t## get initial nacsid\n\t# optional\n\t# r = s.get(PortalMainURL, verify=False);\n\t# assert r.status_code == 200, \"status code %d\" % r.status_code;\n\n\t## get rsasettings\n\tr = s.get(RSASettingsURL, verify=False);\n\tassert r.status_code == 200, \"status code %d\" % r.status_code;\n\ttry:\n\t\trsa = r.json()\n\texcept ValueError:\n\t\treturn \"RSASettings decode error\"\n\n\t## make sure we're at auth?\n\tr = s.get(GetStateAndViewURL, verify=False);\n\ttry:\n\t\tsnv = r.json()\n\texcept ValueError:\n\t\treturn \"StateAndView decode error\"\n\t# assert auth?\n\t# optional\n\t#assert snv['view'] == 'Authentication', \"view [%s] != 'Authentication'\" % snv['view']\n\n\t## auth\n\t# maketh pubkey from RSASettings\n\tn = int(\"0x\" + rsa['m'], 16);\n\te = int(\"0x\" + rsa['e'], 16);\n\tpubkey = pubkey_t(n, e);\n\n\t# doeth teh _encryption_\n\tplaintext = rsa['loginToken'] + password;\n\tencrypted = encrypt(plaintext.encode('utf-8'), pubkey);\n\tencryptedhex = hexlify(encrypted);\n\tencryptedhexEncoded = revStrEncode(encryptedhex.decode('utf-8'))\n\n\tpayload = {\n\t\t'realm': '<PASSWORD>',\n\t\t'username': regno,\n\t\t'password': <PASSWORD>\n\t};\n\n\t# meh.\n\tr = s.post(LoginURL, data=payload, verify=False);\n\n\t# validate snv\n\tr = s.get(GetStateAndViewURL, verify=False);\n\ttry:\n\t\tsnv = r.json()\n\texcept ValueError:\n\t\treturn \"StateAndView decode error\"\n\n\tassert snv['view'] == 'Final', \"view [%s] != 'Final'; auth failure\" % snv['view']\n\treturn \"ok\"\n", "id": "9074506", "language": "Python", "matching_score": 2.100250720977783, "max_stars_count": 3, "path": "checkpoint.py" }, { "content": "import requests\nimport json\n\n# ocr\nfrom PIL import Image\nimport pytesseract\n\n# regex\nimport re\n\ndef conn(self):\n\tif self.session:\n\t\treturn\n\n\theaders = {\n\t\t'origin': \"http://evarsity.srmuniv.ac.in\",\n\t\t'referer': \"http://evarsity.srmuniv.ac.in/srmswi/usermanager/youLogin.jsp\",\n\n\t\t'user-agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36\",\n\t};\n\tadapter = requests.adapters.HTTPAdapter(max_retries=self.retries)\n\n\t# defaults\n\tself.session = requests.Session()\n\tself.session.headers.update(headers)\n\tself.session.mount('http://', adapter)\n\tself.session.mount('https://', adapter)\n\n\t# login payload\n\tpayload1 = {\n\t\t'Searchtext1:txtSearchText' : 'Search',\n\t\t'txtRegNumber': 'iamalsouser',\n\t\t'txtPwd': '<PASSWORD>',\n\t\t# 'txtverifycode': 'xxxxxx',\n\t\t'txtPA': '1',\n\n\t\t'txtSN': self.regno,\n\t\t'txtPD': self.password,\n\t}\n\n\t# login\n\t# scrape login page for session init\n\turl1 = 'http://evarsity.srmuniv.ac.in/srmswi/usermanager/youLogin.jsp'\n\ttry:\n\t\tr1 = self.session.request('GET', url1, timeout=self.timeout);\n\texcept requests.exceptions.RequestException as e:\n\t\tself.status = e\n\t\tif self.debug:\n\t\t\tprint(e)\n\n\tif not r1.status_code // 100 == 2:\n\t\tself.status = 'network request failed with status code: ' + r1.status_code\n\telse:\n\t\tself.status = \"ok\"\n\n\tif \"ok\" != self.status:\n\t\treturn\n\n\t# this will compound into (2 * self.retries ^ 2) :poker_face:\n\tfor i in range(0, self.retries):\n\t\t# get captcha\n\t\turl2 = 'http://evarsity.srmuniv.ac.in/srmswi/Captcha'\n\t\ttry:\n\t\t\tr2 = self.session.get(url2, stream=True)\n\t\texcept requests.exceptions.RequestException as e:\n\t\t\tself.status = e\n\t\t\tif self.debug:\n\t\t\t\tprint(e)\n\t\t\tcontinue # try again\n\t\tr2.raw.decode_content = True\n\n\t\t# try captcha\n\t\tcaptcha = pytesseract.image_to_string(Image.open(r2.raw));\n\t\tpayload1['txtverifycode'] = captcha\n\n\t\t# moment of truth\n\t\ttry:\n\t\t\tr3 = self.session.request('POST', url1, data=payload1, timeout=self.timeout)\n\t\texcept requests.exceptions.RequestException as e:\n\t\t\tself.status = e\n\t\t\tif self.debug:\n\t\t\t\tprint(e)\n\t\t\tcontinue # try again\n\n\t\tif 'http://evarsity.srmuniv.ac.in/srmswi/usermanager/youLogin.jsp' == r3.url: # login failed\n\t\t\tregex = r\"(?:LoadLoginPage\\(\\)\\{?)([^\\}]*)(?:\\}?)\"\n\t\t\tm1 = re.search(regex, r3.text)\n\t\t\tif not m1:\n\t\t\t\tself.status = \"login failure, unknown error\"\n\t\t\t\treturn\n\n\t\t\tregex = r\"loginerror=([^;\\\"]*)\"\n\t\t\tm2 = re.search(regex, m1.group(1))\n\t\t\tif not m2:\n\t\t\t\tself.status = \"login failure, unknown error\"\n\t\t\t\treturn\n\n\t\t\tself.status = m2.group(1) #failure\n\t\t\tif self.debug:\n\t\t\t\tprint(m2.group(1))\n\n\t\t\tif not (self.status == 'Invalid Verification code'):\n\t\t\t\treturn\n\n\t\telif 'http://evarsity.srmuniv.ac.in/srmswi/usermanager/home.jsp' == r3.url: # login success\n\t\t\tself.status = \"ok\"\n\t\t\tbreak\n\t\telse:\n\t\t\tself.status = \"login failure, unknown error\"\n\t\t\treturn\n\n\t# update headers\n\tdel self.session.headers['origin']\n\tdel self.session.headers['referer']\n\tself.session.headers.update({\n\t\t'host': \"evarsity.srmuniv.ac.in\",\n\t\t'referer': \"http://evarsity.srmuniv.ac.in/srmswi/usermanager/home.jsp\",\n\t})\n", "id": "7567099", "language": "Python", "matching_score": 5.226568698883057, "max_stars_count": 2, "path": "academia/handlers/evarsity/ConnHandler.py" }, { "content": "import requests\nimport json\n\ndef conn(self):\n\tif self.session:\n\t\treturn\n\n\theaders = {\n\t\t'origin': \"https://academia.srmuniv.ac.in\",\n\t\t'user-agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36\",\n\t\t'cache-control': \"no-cache\",\n\t};\n\tadapter = requests.adapters.HTTPAdapter(max_retries=self.retries)\n\n\t# defaults\n\tself.session = requests.Session()\n\tself.session.headers.update(headers)\n\tself.session.mount('http://', adapter)\n\tself.session.mount('https://', adapter)\n\n\t# login\n\turl1 = 'https://academia.srmuniv.ac.in/accounts/signin.ac'\n\tpayload1 = {\n\t\t'client_portal': 'true',\n\t\t'grant_type': 'password',\n\t\t'is_ajax': 'true',\n\t\t'portal': '10002227248',\n\t\t'servicename': 'ZohoCreator',\n\t\t'serviceurl': 'https://academia.srmuniv.ac.in/',\n\n\t\t'username': self.email,\n\t\t'password': self.password,\n\t}\n\tr1 = requests.Request('POST', url1, data=payload1)\n\tp1 = r1.prepare()\n\tp1.headers.update({\n\t\t'referer': \"https://academia.srmuniv.ac.in/accounts/signin\"\n\t})\n\n\ttry:\n\t\tresp = self.session.send(p1, timeout=self.timeout)\n\texcept requests.exceptions.RequestException as e:\n\t\tself.status = e\n\t\tif self.debug:\n\t\t\tprint(e)\n\n\tif not resp.status_code // 100 == 2:\n\t\tself.status = 'network request failed with status code: ' + r1.status_code\n\telse:\n\t\tself.status = \"ok\"\n\n\tif \"ok\" != self.status:\n\t\treturn\n\n\tj1 = None\n\ttry:\n\t\tj1 = resp.json()\n\texcept ValueError as e:\n\t\tself.status = \"invalid JSON response. try again.\"\n\t\tif self.debug:\n\t\t\tprint(e)\n\n\tif 'error' in j1.keys():\n\t\tself.status = json.dumps(j1['error'])\n\t\treturn # fatal, return.\n\n\t# do NOT refactor this\n\tif 't' in j1.keys() and 'data' in j1.keys():\n\t\tif ('success' != j1['data']['response']):\n\t\t\tself.status = \"unknown error\"\n\t\t\treturn # fatal, return.\n\t\telse:\n\t\t\treturn # normal, status='ok', return\n\telse:\n\t\tself.status = \"unknown error\"\n\t\treturn # fatal, return.\n", "id": "6127114", "language": "Python", "matching_score": 1.6489163637161255, "max_stars_count": 2, "path": "academia/handlers/academia/ConnHandler.py" }, { "content": "#!/usr/bin/env python\n\nimport json\n\nimport requests\nimport shutil\nimport tempfile\n\nfrom academia import evarsity\n\npayload = {\n\t'regno': 'RA151100xxxxxxx',\n\t'password': '<PASSWORD>',\n\t'semester': 4,\n}\n\no = evarsity.init(payload)\nif \"ok\" == o.status:\n\tdata = o.get('profile-general')\n\tif 'ok' == data['status']:\n\t\tprint(json.dumps(data['data'], indent=2))\n\n\t\t# store user image example\n\t\trq, e = o.request('GET', data['data']['image'], stream=True)\n\t\tif e: # exception occured\n\t\t\tprint('network request failed; exception: ' + str(e))\n\t\telse:\n\t\t\trq.raw.decode_content = True\n\t\t\tfh = tempfile.NamedTemporaryFile(delete = False)\n\t\t\tshutil.copyfileobj(rq.raw, fh)\n\t\t\tfh.close()\n\t\t\tprint('profile image saved at: ' + fh.name)\n\n\telse:\n\t\tprint('error: ' + data['data'])\nelse:\n\tprint('error: ' + o.status)\n", "id": "3417363", "language": "Python", "matching_score": 1.6295394897460938, "max_stars_count": 2, "path": "example-evarsity.py" }, { "content": "#!/usr/bin/env python\n\nfrom academia import academia\n\npayload = {\n\t'email': '<EMAIL>',\n\t'password': '<PASSWORD>',\n\t'semester': 4,\n}\n\no = academia.init(payload)\nif \"ok\" == o.status:\n\tprint(o.get('attendance'))\n", "id": "4464679", "language": "Python", "matching_score": 1.890791893005371, "max_stars_count": 2, "path": "example-academia.py" }, { "content": "from .handlers.academia import ConnHandler\nfrom .handlers.academia import AppHandlers\n\nfrom . import helpers\n\nclass Academia(object):\n\tstatus = None\n\temail = None\n\tpassword = <PASSWORD>\n\tsemester = None\n\tsession = None\n\ttimeout = (5, 5)\n\tretries = 5\n\tdebug = True\n\tmethods = {}\n\tparsers = {}\n\n\tdef __init__(self, payload):\n\t\tkeys = payload.keys()\n\n\t\tif 'email' not in keys:\n\t\t\tself.status = \"Missing email in payload\"\n\t\t\treturn\n\n\t\tif not any(x in keys for x in ['pass', 'password']):\n\t\t\tself.status = \"Missing password in payload\"\n\t\t\treturn\n\n\t\tif not any(x in keys for x in ['sem', 'semester']):\n\t\t\tself.status = \"Missing semester in payload\"\n\t\t\treturn\n\n\t\t# set email\n\t\tself.email = payload['email']\n\n\t\t# set pass\n\t\tif 'pass' in keys:\n\t\t\tself.password = payload['<PASSWORD>']\n\t\telse:\n\t\t\tself.password = payload['password']\n\n\t\t# set semester\n\t\tif 'sem' in keys:\n\t\t\tself.semester = payload['sem']\n\t\telse:\n\t\t\tself.semester = payload['semester']\n\n\t\t# ref status\n\t\tself.status = \"ok\"\n\n\t\t# connect\n\t\tConnHandler.conn(self)\n\t\tif \"ok\" != self.status:\n\t\t\treturn\n\n\t\t# register handlers\n\t\tAppHandlers.bind(self, self.semester)\n\n\tdef request(self, method, url, payload=None, stream=False):\n\t\treturn helpers.request(self, method, url, payload, stream)\n\n\tdef get(self, name):\n\t\treturn AppHandlers.get(self, name)\n\n# wrapper\ndef init(payload):\n\treturn Academia(payload);\n", "id": "9822837", "language": "Python", "matching_score": 1.7390906810760498, "max_stars_count": 2, "path": "academia/academia.py" }, { "content": "import json\nimport requests\n\ndef err(data):\n\treturn {\n\t\t'status': \"err\",\n\t\t'data': data\n\t}\n\ndef ok(data):\n\treturn {\n\t\t'status': \"ok\",\n\t\t'data': data\n\t}\n\ndef request(self, method, url, payload=None, stream=False):\n\tif self.debug:\n\t\tprint('.request(' + method + ', ' + url + ');')\n\t\tif payload:\n\t\t\tprint('\\tpayload: ' + json.dumps(payload))\n\t\tif stream:\n\t\t\tprint('\\tstream: ' + str(stream))\n\n\ttry:\n\t\tif not payload:\n\t\t\tresponse = self.session.request(method, url, timeout=self.timeout, stream=stream)\n\t\telse:\n\t\t\tresponse = self.session.request(method, url, data=payload, timeout=self.timeout, stream=stream)\n\texcept requests.exceptions.RequestException as e:\n\t\tif self.debug:\n\t\t\tprint(e)\n\t\treturn (None, e)\n\n\treturn (response, None)\n", "id": "11191447", "language": "Python", "matching_score": 0.4201536774635315, "max_stars_count": 2, "path": "academia/helpers.py" }, { "content": "#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport imp\n\nimport requests\nimport json\nimport jsonschema\n\nfrom .helpers import print, nukeline, info, success, error\n\ndef getvar(g, key):\n\tif key in g:\n\t\treturn g[key];\n\treturn None\n\ndef setvar(g, key, var):\n\tg[key] = var;\n\ndef iterable(d, g):\n\tfor k, v in d.items():\n\t\tif isinstance(v, dict):\n\t\t\td[k] = iterable(v)\n\t\telif isinstance(v, list):\n\t\t\td[k] = map(lambda x: iterable(x), v)\n\t\telse:\n\t\t\tif callable(v):\n\t\t\t\td[k] = getvar(g, v.args[0])\n\t\t\telse:\n\t\t\t\td[k] = v\n\treturn d\n\n\ndef runtest(FILE, baseurl, g, count, total):\n\tinfo('[%02d/%02d] initializing [%s]' % (count, total, FILE))\n\ttry:\n\t\t# yay for backwards compatibility.\n\t\t# deal with this when it is actually removed.\n\t\t# really.\n\t\ttestFILE = imp.load_source('*', FILE)\n\texcept Exception as e:\n\t\tnukeline();\n\t\terror('[%02d/%02d] error initializing [%s]\\n' % (count, total, FILE))\n\t\tprint('\\n' + ('-' * 80) + '\\n')\n\t\tprint(e)\n\t\tprint('\\n' + ('-' * 80) + '\\n')\n\t\treturn False\n\n\tif (not hasattr(testFILE, 'test')) or (not 'cases' in testFILE.test):\n\t\tnukeline();\n\t\terror('[%02d/%02d] error initializing [%s] : does not have required attrs\\n' % (count, total, FILE))\n\t\treturn False\n\n\tdesc = '' if not 'desc' in testFILE.test else (' : %s' % testFILE.test['desc']);\n\tnukeline();\n\tinfo('[%02d/%02d] running [%s]%s' % (count, total, FILE, desc))\n\n\tfor case in testFILE.test['cases']:\n\t\tif 'payload' in case:\n\t\t\tpayload = case['payload']\n\n\t\t\t# fix variables\n\t\t\tif ('globals' in case):\n\t\t\t\tif ('requires' in case['globals']):\n\t\t\t\t\tfor r in case['globals']['requires']:\n\t\t\t\t\t\tif (not r in g):\n\t\t\t\t\t\t\tnukeline();\n\t\t\t\t\t\t\terror('[%02d/%02d] error running [%s] : required variable `%s` not set\\n' % (count, total, FILE, r))\n\t\t\t\t\t\t\treturn\n\n\t\t\t\t# iterate over payload\n\t\t\t\tpayload = iterable(payload, g);\n\t\t\t# globals in case\n\t\t# payload in case\n\n\t\t# get response\n\t\tif (case['method'] == 'GET'):\n\t\t\tif 'payload' in case:\n\t\t\t\treq = requests.get(baseurl + testFILE.test['url'], params=payload)\n\t\t\telse:\n\t\t\t\treq = requests.get(baseurl + testFILE.test['url'])\n\t\telse: # todo: handle more cases here\n\t\t\tif 'payload' in case:\n\t\t\t\treq = requests.post(baseurl + testFILE.test['url'], data=payload)\n\t\t\telse:\n\t\t\t\treq = requests.post(baseurl + testFILE.test['url'])\n\n\t\ttry:\n\t\t\tres = case['response']\n\t\t\tassert (req.status_code == res['status_code']), \"status code mismatch\"\n\t\t\tassert (req.headers['content-type'] == res['content-type']), \"content-type mismatch\"\n\n\t\t\tbody = case['response']['body']\n\n\t\t\t# if resp is json\n\t\t\tif body['type'] == 'json' and 'schema' in body:\n\t\t\t\tr = req.json()\n\n\t\t\t\tdef buildrequired(schema):\n\t\t\t\t\tif schema['type'] == 'object' and 'properties' in schema:\n\t\t\t\t\t\tschema['additionalProperties'] = False;\n\t\t\t\t\t\tschema['minProperties'] = len(schema['properties'])\n\n\t\t\t\t\t\tfor k,v in schema['properties'].items():\n\t\t\t\t\t\t\tif v['type'] == 'object':\n\t\t\t\t\t\t\t\tschema['properties'][k] = buildrequired(v)\n\n\t\t\t\t\treturn schema\n\n\t\t\t\tschema = buildrequired(body['schema'])\n\t\t\t\tjsonschema.validate(r, schema)\n\n\t\t\t\tdef ordered(obj):\n\t\t\t\t\tif isinstance(obj, dict):\n\t\t\t\t\t\treturn sorted((k, ordered(v)) for k, v in obj.items())\n\t\t\t\t\tif isinstance(obj, list):\n\t\t\t\t\t\treturn sorted(ordered(x) for x in obj)\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn obj\n\n\t\t\t\tif 'raw' in body:\n\t\t\t\t\tif isinstance(body['raw'], str):\n\t\t\t\t\t\traw = json.loads(body['raw'])\n\t\t\t\t\telse:\n\t\t\t\t\t\traw = body['raw']\n\t\t\t\t\tassert (ordered(raw) == ordered(r)), (\"raw validation failed;\")\n\n\t\t\t\t# store variables\n\t\t\t\tif 'globals' in case and 'provides' in case['globals']:\n\t\t\t\t\tfor k,v in case['globals']['provides'].items():\n\t\t\t\t\t\tg[k] = eval(v)\n\n\t\t\t# check assertions\n\t\t\tif 'assert' in case and callable(case['assert']):\n\t\t\t\tcase['assert'](req, g)\n\n\t\t\t# success; continue to next case\n\t\texcept Exception as e:\n\t\t\tnukeline();\n\t\t\terror('[%02d/%02d] error running [%s]%s\\n' % (count, total, FILE, desc))\n\t\t\tprint('\\n' + ('-' * 80) + '\\n')\n\t\t\tprint(\"exception:\\n\\t%s\" % e)\n\t\t\tprint(\"\\n\\nrequest headers:\\n\")\n\t\t\tprint(json.dumps(dict(req.request.headers), indent=2))\n\t\t\tprint(\"\\n\\nresponse code:\\n\\t%d\" % req.status_code)\n\t\t\tprint(\"\\n\\nresponse headers:\\n\")\n\t\t\tprint(json.dumps(dict(req.headers), indent=2))\n\t\t\tprint(\"\\n\\nbody:\\n%s\" % req.text)\n\t\t\tprint(\"\\n\")\n\t\t\tprint('\\n' + ('-' * 80) + '\\n')\n\t\t\treturn False\n\t\t# try\n\t# for case in cases\n\tnukeline();\n\tsuccess('[%02d/%02d] passed [%s]%s\\n' % (count, total, FILE, desc))\n\treturn True\n# runtest\n", "id": "7223184", "language": "Python", "matching_score": 2.746976852416992, "max_stars_count": 0, "path": "py_apitest/apiTest.py" }, { "content": "import functools\n\ndef dummy():\n\t# do nothing; this is not actually called.\n\tprint(\"dummy function for globals\")\n\ndef assertion_get(req, g):\n\tr = req.json()\n\t# do something here\n\ndef assertion_post(req, g):\n\tr = req.json()\n\n\tassert (r['origin'] != g['ip']), \"ip of GET request does not match POST\"\n\ntest = {\n\t'url': '/anything',\n\t'desc': 'example get/post sequence request',\n\t'cases': [\n\t\t{\n\t\t\t'method': 'GET',\n\t\t\t'globals': {\n\t\t\t\t'provides': {\n\t\t\t\t\t'ip': \"r['origin']\",\n\t\t\t\t}\n\t\t\t},\n\t\t\t'assert': assertion_get,\n\t\t\t'response': {\n\t\t\t\t'status_code': 200,\n\t\t\t\t'content-type': 'application/json',\n\t\t\t\t'body': {\n\t\t\t\t\t'type': 'json',\n\t\t\t\t\t'schema': {\n\t\t\t\t\t\t\"type\": \"object\",\n\t\t\t\t\t\t\"properties\": {\n\t\t\t\t\t\t\t\"args\": {\"type\": \"object\"},\n\t\t\t\t\t\t\t\"data\": {\"type\": \"string\"},\n\t\t\t\t\t\t\t\"files\": {\"type\": \"object\"},\n\t\t\t\t\t\t\t\"form\": {\"type\": \"object\"},\n\t\t\t\t\t\t\t\"headers\": {\"type\": \"object\"},\n\t\t\t\t\t\t\t\"json\": {\"type\": [\"object\", \"null\"]},\n\t\t\t\t\t\t\t\"method\": {\"type\": \"string\"},\n\t\t\t\t\t\t\t\"origin\": {\"type\": \"string\"},\n\t\t\t\t\t\t\t\"url\": {\"type\": \"string\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, # get\n\t\t{\n\t\t\t'method': 'POST',\n\t\t\t'globals': {\n\t\t\t\t'requires': ['ip'],\n\t\t\t},\n\t\t\t'payload': {\n\t\t\t\t'myip': functools.partial(dummy, 'ip')\n\t\t\t},\n\t\t\t'assert': assertion_post,\n\t\t\t'response': {\n\t\t\t\t'status_code': 200,\n\t\t\t\t'content-type': 'application/json',\n\t\t\t\t'body': {\n\t\t\t\t\t'type': 'json',\n\t\t\t\t\t'schema': {\n\t\t\t\t\t\t\"type\": \"object\",\n\t\t\t\t\t\t\"properties\": {\n\t\t\t\t\t\t\t\"args\": {\"type\": \"object\"},\n\t\t\t\t\t\t\t\"data\": {\"type\": \"string\"},\n\t\t\t\t\t\t\t\"files\": {\"type\": \"object\"},\n\t\t\t\t\t\t\t\"form\": {\n\t\t\t\t\t\t\t\t\"type\": \"object\",\n\t\t\t\t\t\t\t\t\"properties\": {\n\t\t\t\t\t\t\t\t\t\"myip\": { \"type\": \"string\" }\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"headers\": {\"type\": \"object\"},\n\t\t\t\t\t\t\t\"json\": {\"type\": [\"object\", \"null\"]},\n\t\t\t\t\t\t\t\"method\": {\"type\": \"string\"},\n\t\t\t\t\t\t\t\"origin\": {\"type\": \"string\"},\n\t\t\t\t\t\t\t\"url\": {\"type\": \"string\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, # post\n\t] # cases\n}; # test\n", "id": "3861114", "language": "Python", "matching_score": 2.0738842487335205, "max_stars_count": 0, "path": "examples/example-get-post-test.py" }, { "content": "import functools\n\ndef dummy():\n\t# do nothing; this is not actually called.\n\tprint(\"dummy function for globals\")\n\ntest = {\n\t'url': '/user-agent',\n\t'desc': 'example user-agent request',\n\t'cases': [\n\t\t{\n\t\t\t'method': 'GET',\n\t\t\t'response': {\n\t\t\t\t'status_code': 200,\n\t\t\t\t'content-type': 'application/json',\n\t\t\t\t'body': {\n\t\t\t\t\t'type': 'json',\n\t\t\t\t\t'schema': {\n\t\t\t\t\t\t'type': \"object\",\n\t\t\t\t\t\t\"properties\": {\n\t\t\t\t\t\t\t\"user-agent\": {\"type\": \"string\"}\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\t'raw': \"\"\"\n\t\t\t\t\t\t{\"user-agent\": \"python-requests/2.18.1\"}\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t}\n\t\t\t}\n\t\t} # user-agent GET\n\t] # cases\n}; # test\n", "id": "11569562", "language": "Python", "matching_score": 0.6121215224266052, "max_stars_count": 0, "path": "examples/example-raw-test.py" }, { "content": "import os\nimport functools\n\nfrom termcolor import colored\n\nprint = functools.partial(print, end='', flush=True)\n\ndef nukeline():\n\t# nukes everything in line and moves cursor to beginning\n\tprint(\"\\033[2K\\r\")\n\ndef info(s):\n\tprint('[ %s ] %s' % (colored('…', 'blue'), colored(s, 'white')));\n\ndef success(s):\n\tprint('[ %s ] %s' % (colored('✓', 'green'), colored(s, 'white')));\n\ndef error(s):\n\tprint('[ %s ] %s' % (colored('✕', 'red'), colored(s, 'white')));\n\n# thanks ghostdog74\n# https://stackoverflow.com/a/3964691\ndef findfilesendingwith(endstr, basedir):\n\tfor root, dirs, files in os.walk(basedir):\n\t\tfor file in files:\n\t\t\tif file.endswith(endstr):\n\t\t\t\tyield (os.path.join(root, file))\n", "id": "4878608", "language": "Python", "matching_score": 0.7487815618515015, "max_stars_count": 0, "path": "py_apitest/helpers.py" }, { "content": "import os\nimport sys\n\nimport argparse\n\nfrom .apiTest import runtest\nfrom .helpers import print, info, success, error, findfilesendingwith\n\ndef _real_main():\n\tparser = argparse.ArgumentParser(description='test an api.')\n\tparser.add_argument('--testfile', nargs='+', help='testcase files')\n\tparser.add_argument('--testdir', nargs='+', help='directory containing testcase files [*-test.py]')\n\tparser.add_argument('BASEURL', help='base url of API')\n\targs = parser.parse_args()\n\n\tif (args.testdir == None and args.testfile == None):\n\t\terror(\"requires at least one of --testdir or --testfile.\\n\\n\")\n\t\tparser.print_help();\n\t\treturn 1;\n\n\tfiles = []\n\n\tif args.testdir:\n\t\tfor DIR in args.testdir:\n\t\t\tif not os.path.isdir(os.path.realpath(DIR)):\n\t\t\t\terror(\"[%s] is not a directory.\\n\" % DIR)\n\t\t\tfiles = files + list(findfilesendingwith(\"-test.py\", DIR))\n\n\tif args.testfile:\n\t\tfiles = files + args.testfile\n\n\tinfo(\"initializing test suite\\n\")\n\n\tif (len(files)):\n\t\tinfo( \"found %d testcase%s\\n\\n\" % (len(files), ('s' if len(files) > 1 else '')) )\n\telse:\n\t\terror(\"no test cases found\\n\")\n\t\treturn 1\n\n\tinfo(\"running tests:\\n\")\n\n\tcount = 0\n\ttotal = len(files)\n\tg = {}\n\n\tsuccesses = 0\n\tfailures = 0\n\n\tfor FILE in files:\n\t\tcount = count + 1\n\t\tif (runtest(FILE, args.BASEURL, g, count, total)):\n\t\t\tsuccesses = successes + 1\n\t\telse:\n\t\t\tfailures = failures + 1\n\n\tprint(\"\\n\\n\")\n\tif (successes == total):\n\t\tsuccess('[%02d/%02d] all passed\\n' % (successes, total))\n\t\treturn 0\n\telse:\n\t\terror('[%02d/%02d] %02d pass / %02d fail\\n' % (successes, total, successes, failures))\n\t\treturn 1\n\ndef main():\n\ttry:\n\t\tsys.exit(_real_main())\n\texcept KeyboardInterrupt:\n\t\tsys.exit('\\nERROR: Interrupted by user')\n", "id": "11640682", "language": "Python", "matching_score": 2.132051706314087, "max_stars_count": 0, "path": "py_apitest/__init__.py" }, { "content": "from argparse import ArgumentParser\n\nfrom pytos.rc import collect\n\n\ndef get_parser():\n parser = ArgumentParser()\n rc = collect.collect()\n parser.add_argument('CMD', help='command to run', choices=rc.keys())\n parser.add_argument('ARG', help='arguments for command', nargs='*')\n parser.add_argument('-c', '--config-file', dest='CONFIG_FILE', default='config.toml')\n return rc, parser\n", "id": "4100684", "language": "Python", "matching_score": 1.6993495225906372, "max_stars_count": 0, "path": "pytos/parser/argparse.py" }, { "content": "import sys\n\nfrom pytos.parser import argparse\nfrom pytos.rc.collect import initialize_context\n\n\ndef main():\n rc, parser = argparse.get_parser()\n args = parser.parse_args()\n runner = rc[args.CMD]\n\n err = runner.parse_args(args.ARG)\n if err != 0:\n return err\n\n context, err = initialize_context(args.CONFIG_FILE)\n if err != 0:\n return err\n\n return runner.run(context)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "id": "10335345", "language": "Python", "matching_score": 1.5666911602020264, "max_stars_count": 0, "path": "pytos/__main__.py" }, { "content": "from pytos.db.base import Database\nfrom pytos.models.configuration import Configuration\nfrom pytos.models.context import Context\nfrom pytos.rc.init import InitializeLibrary\n\n\ndef collect():\n return {\n 'init': InitializeLibrary()\n }\n\n\ndef initialize_context(cfg_file) -> [Context, int]:\n config = Configuration(cfg_file)\n ret = config.validate()\n if ret != 0:\n return None, ret\n\n db = Database(config.db)\n return Context(config, db), 0\n", "id": "4743487", "language": "Python", "matching_score": 1.517338752746582, "max_stars_count": 0, "path": "pytos/rc/collect.py" }, { "content": "from pytos.db.album import DatabaseAlbum\nfrom pytos.db.album_item import DatabaseAlbumItem\nfrom pytos.db.base import Database\nfrom pytos.db.media_item import DatabaseMediaItem\nfrom pytos.models.configuration import Configuration\n\n\nclass DatabaseContext:\n def __init__(self, database: Database):\n self.media_item = DatabaseMediaItem(database)\n self.album = DatabaseAlbum(database)\n self.album_item = DatabaseAlbumItem(database)\n\n def init_tables(self):\n self.media_item.init()\n self.album.init()\n self.album_item.init()\n\n\nclass Context:\n def __init__(self, configuration: Configuration, database: Database):\n self.cfg = configuration\n self.db = DatabaseContext(database)\n", "id": "2373884", "language": "Python", "matching_score": 2.1587157249450684, "max_stars_count": 0, "path": "pytos/models/context.py" }, { "content": "from typing import Dict, List\n\nfrom google.auth.credentials import Credentials\n\nfrom pytos.models.album import Album\nfrom pytos.models.album_item import AlbumItem\nfrom pytos.models.context import Context\nfrom pytos.models.rc import RunCommand\nfrom pytos.oauth.base import Authorizer\nfrom pytos.requests.album import get_all_albums\nfrom pytos.requests.media_item import get_all_media_items\nfrom pytos.requests.search import search_by_album_id\n\n\ndef fetch_album_items(credentials: Credentials, albums: Dict[str, Album]) -> List[AlbumItem]:\n wb = []\n for album_id in albums:\n wb.extend(search_by_album_id(credentials, album_id))\n return wb\n\n\nclass InitializeLibrary(RunCommand):\n def parse_args(self, args):\n return 0\n\n def run(self, context: Context):\n authorizer = Authorizer(context.cfg)\n credentials = authorizer.auth()\n context.cfg.refresh_token = credentials.refresh_token\n context.cfg.dump()\n\n media_items = get_all_media_items(credentials)\n albums = get_all_albums(credentials)\n\n context.db.init_tables()\n context.db.media_item.add_multiple(media_items.values())\n context.db.album.add_multiple(albums.values())\n\n album_items = fetch_album_items(credentials, albums)\n context.db.album_item.add_multiple(album_items)\n", "id": "5310988", "language": "Python", "matching_score": 2.3831255435943604, "max_stars_count": 0, "path": "pytos/rc/init.py" }, { "content": "from pytos.models.context import Context\n\n\nclass RunCommand(object):\n def __init__(self):\n self.args = None\n\n # return 0 if no errors\n # set self.args to copy args formatted from cmdline\n def parse_args(self, args) -> int:\n return 0\n\n # only call this post parse_args is called\n def run(self, context: Context) -> int:\n raise NotImplementedError(\"not implemented\")\n", "id": "12688254", "language": "Python", "matching_score": 0.8166888356208801, "max_stars_count": 0, "path": "pytos/models/rc.py" }, { "content": "from SublimeLinter.lint import Linter, LintMatch # or NodeLinter, PythonLinter, ComposerLinter, RubyLinter\nimport logging\nimport json\n\nlogger = logging.getLogger('SublimeLinter.plugin.rstlint')\n\nclass RstLint(Linter):\n multiline = False\n defaults = {\n 'selector': 'text.restructuredtext'\n }\n\n def cmd(self):\n print(self.filename)\n return ['rst-lint', '--format=json', self.filename]\n\n def find_errors(self, output):\n \"\"\"Parse errors from linter's output.\"\"\"\n try:\n content = json.loads(output)\n except ValueError:\n logger.error(\n \"JSON Decode error: We expected JSON from 'rst-lint', \"\n \"but instead got this:\\n{}\\n\\n\".format(output))\n self.notify_failure()\n return\n\n for entry in content:\n print(entry)\n filename = entry.get('source', None)\n\n yield LintMatch(\n match=None,\n line=entry['line'] - 1, # zero indexed\n col=None,\n error=None,\n warning=None,\n message=entry['message'],\n near=None,\n filename=filename,\n error_type='error' if entry['level'] >= 3 else 'warning',\n code=None\n )\n", "id": "4656045", "language": "Python", "matching_score": 0.3414344787597656, "max_stars_count": 0, "path": "linter.py" }, { "content": "#!/usr/bin/env python\n\nimport requests\nimport hashlib\n\nimport json\nimport sys\n\nfg = \"[0|2|4|6|8|a|c|e]\"\nbg = \"[1|3|5|7|9|b|d|f]\"\nhexchar = \"[0-9a-f]\"\n\npattern = [\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]\n];\n\nmatchHSL = None;\n\nregex = \"^\";\n\ndef getPatternFile(filename):\n\tglobal pattern\n\n\tdata = None\n\twith open(filename) as data_file:\n\t\ttry:\n\t\t\tdata = json.load(data_file)\n\t\texcept:\n\t\t\tprint(\"pattern loading failed\")\n\t\t\texit()\n\n\tif data['pattern'] is None:\n\t\tprint(\"invalid pattern file\")\n\t\texit()\n\n\tpattern = data['pattern']\n\ndef getUserPattern(username):\n\tglobal pattern\n\n\tr = requests.get('https://api.github.com/users/' + username)\n\n\tif (r.status_code != 200):\n\t\tif (r.status_code == 404):\n\t\t\tprint (\"404 not found\");\n\t\telse:\n\t\t\tprint (\"failed to connect to github API; err code = \", r.status_code)\n\t\texit()\n\n\ttry:\n\t\tj = r.json()\n\texcept:\n\t\tprint (\"failure decoding json response\")\n\t\texit()\n\n\tif j['id'] is None:\n\t\tprint (\"failure decoding json response\")\n\t\texit()\n\n\tuserid = str(j['id'])\n\tuserhash = hashlib.md5(userid.encode('utf-8')).hexdigest()\n\n\tfor i in range(0, 15):\n\t\ttry:\n\t\t\tc = int( '0x' + userhash[i], 16 )\n\t\texcept:\n\t\t\tprint (\"failure converting hex to decimal\")\n\t\t\texit()\n\n\t\tc = (c+1) % 2;\n\t\tif (i < 5):\n\t\t\tpattern[i][2] = c;\n\t\telif (i < 10):\n\t\t\tpattern[i-5][1] = c;\n\t\t\tpattern[i-5][3] = c;\n\t\telse:\n\t\t\tpattern[i-10][0] = c;\n\t\t\tpattern[i-10][4] = c;\n\nif (len(sys.argv) > 1):\n\tgetUserPattern(sys.argv[1])\nelse:\n\tgetPatternFile(\"pattern.json\")\n\nfor i in range(0, 5):\n\tif (pattern[i][2]):\n\t\tregex += fg;\n\telse:\n\t\tregex += bg;\n\nfor i in range(0, 5):\n\tif (pattern[i][1]):\n\t\tregex += fg;\n\telse:\n\t\tregex += bg;\n\nfor i in range(0, 5):\n\tif (pattern[i][0]):\n\t\tregex += fg;\n\telse:\n\t\tregex += bg;\n\nif matchHSL is not None:\n\tregex += hexchar\n\tregex += \"{10}\"\n\tregex += matchHSL\n\nprint (regex, end=\"\")\n", "id": "11047801", "language": "Python", "matching_score": 1.8036961555480957, "max_stars_count": 0, "path": "generate_regex.py" }, { "content": "#!/usr/bin/env python3\n\nimport hashlib\n\nusercount = (30 * (10 ** 6));\n\nfor i in range(1, usercount):\n\tx = str(i)\n\tprint(hashlib.md5(x.encode('utf-8')).hexdigest() + \"\\t\" + x)\n", "id": "9852371", "language": "Python", "matching_score": 0.4290751516819, "max_stars_count": 0, "path": "hashgen.py" }, { "content": "#!/usr/bin/env python3\n\nfrom checkpoint import login\n\nimport argparse\n\nret = login('RA151100xxxxxxx', 'xxxxxxxx')\nprint(ret)\n", "id": "6447324", "language": "Python", "matching_score": 0, "max_stars_count": 3, "path": "login.py" }, { "content": "from statistics import mean, median\n\n\nclass RequestContext:\n def __init__(self, name: str):\n self.name = name\n self.ctr = 0\n self.dup = 0\n self.latency = []\n\n def add(self, duration: float, count: int = 0, dupes: int = 0):\n self.ctr = self.ctr + count\n self.dup = self.dup + dupes\n self.latency.append(duration)\n\n def stat(self):\n name = self.name\n ctr = self.ctr\n dup = self.dup\n latency = self.latency\n tpl = '[{}] total: {}, dup: {} ({}), latency (avg): {}, (med): {}'\n fmt = tpl.format(name, ctr, dup, ctr - dup, mean(latency), median(latency))\n print(fmt)\n", "id": "12146381", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "pytos/requests/context.py" }, { "content": "class Builder:\n def __init__(self, base: str, ver: str):\n self.__base = base\n self.__ver = ver\n\n def build(self, path: str) -> str:\n return 'https://{}/{}{}'.format(self.__base, self.__ver, path)\n", "id": "8499786", "language": "Python", "matching_score": 0.08564496040344238, "max_stars_count": 0, "path": "pytos/requests/builder.py" }, { "content": "import json\n\n\nclass MediaItem:\n def __init__(self, _id, description, base_url, product_url, mime_type, media_meta_data, filename, raw):\n self.id = _id\n self.description = description\n self.base_url = base_url\n self.product_url = product_url\n self.filename = filename\n self.mime_type = mime_type\n self.media_meta_data = media_meta_data\n self.raw = raw\n\n @classmethod\n def from_dict(cls, js):\n _id = js['id']\n\n description = None\n if 'description' in js:\n description = js['description']\n\n base_url = js['baseUrl']\n product_url = js['productUrl']\n mime_type = js['mimeType']\n filename = js['filename']\n\n media_meta_data = None\n if 'mediaMetadata' in js:\n media_meta_data = json.dumps(js['mediaMetadata'])\n\n raw = json.dumps(js)\n\n return MediaItem(_id, description, base_url, product_url, mime_type, media_meta_data, filename, raw)\n", "id": "4415837", "language": "Python", "matching_score": 2.9075427055358887, "max_stars_count": 0, "path": "pytos/models/media_item.py" }, { "content": "from typing import List\n\nfrom pytos.db.base import Database\nfrom pytos.models.media_item import MediaItem\n\n\nclass DatabaseMediaItem:\n def __init__(self, db: Database):\n self.db = db\n\n def init(self):\n query = ('CREATE TABLE IF NOT EXISTS media_item('\n ' id TEXT PRIMARY KEY,'\n ' base_url TEXT,'\n ' product_url TEXT,'\n ' mime_type TEXT,'\n ' media_meta_data TEXT,'\n ' filename TEXT,'\n ' raw TEXT NOT NULL);')\n self.db.execute(query)\n\n def add(self, item: MediaItem, cursor=None, commit=True):\n query = ('INSERT OR REPLACE INTO media_item'\n ' VALUES (:id, :base_url, :product_url, :mime_type, :media_meta_data, :filename, :raw)')\n data = {\n 'id': item.id,\n 'base_url': item.base_url,\n 'product_url': item.product_url,\n 'mime_type': item.mime_type,\n 'media_meta_data': item.media_meta_data,\n 'filename': item.filename,\n 'raw': item.raw\n }\n self.db.execute(query, data, cursor, commit)\n\n def add_multiple(self, data: List[MediaItem]):\n cursor = self.db.conn.cursor()\n for item in data:\n self.add(item, cursor, False)\n self.db.conn.commit()\n", "id": "10420614", "language": "Python", "matching_score": 3.294217348098755, "max_stars_count": 0, "path": "pytos/db/media_item.py" }, { "content": "from typing import List\n\nfrom pytos.db.base import Database\nfrom pytos.models.album import Album\n\n\nclass DatabaseAlbum:\n def __init__(self, db: Database):\n self.db = db\n\n def init(self):\n query = ('CREATE TABLE IF NOT EXISTS album('\n ' id TEXT PRIMARY KEY,'\n ' title TEXT,'\n ' product_url TEXT,'\n ' media_items_count TEXT,'\n ' raw TEXT NOT NULL'\n ')')\n self.db.execute(query)\n\n def add(self, item: Album, cursor=None, commit=True):\n query = 'INSERT OR REPLACE INTO album VALUES(:id, :title, :product_url, :media_items_count, :raw)'\n data = {\n 'id': item.id,\n 'title': item.title,\n 'product_url': item.product_url,\n 'media_items_count': item.media_items_count,\n 'raw': item.raw\n }\n self.db.execute(query, data, cursor, commit)\n\n def add_multiple(self, data: List[Album]):\n cursor = self.db.conn.cursor()\n for item in data:\n self.add(item, cursor, False)\n self.db.conn.commit()\n", "id": "11965488", "language": "Python", "matching_score": 2.5430872440338135, "max_stars_count": 0, "path": "pytos/db/album.py" }, { "content": "import json\n\n\nclass Album:\n def __init__(self, _id, title, product_url, media_items_count, raw):\n self.id = _id\n self.title = title\n self.product_url = product_url\n self.media_items_count = media_items_count\n self.raw = raw\n\n @classmethod\n def from_dict(cls, js):\n _id = js['id']\n title = js['title']\n product_url = js['productUrl']\n media_items_count = int(js['mediaItemsCount'])\n raw = json.dumps(js)\n\n return Album(_id, title, product_url, media_items_count, raw)\n", "id": "2523257", "language": "Python", "matching_score": 0.8223503828048706, "max_stars_count": 0, "path": "pytos/models/album.py" }, { "content": "class AlbumItem:\n def __init__(self, album_id, media_item_id):\n self.album_id = album_id\n self.media_item_id = media_item_id\n", "id": "2037266", "language": "Python", "matching_score": 2.241255760192871, "max_stars_count": 0, "path": "pytos/models/album_item.py" }, { "content": "from typing import List\n\nfrom pytos.db.base import Database\nfrom pytos.models.album_item import AlbumItem\n\n\nclass DatabaseAlbumItem:\n def __init__(self, db: Database):\n self.db = db\n\n def init(self):\n query = ('CREATE TABLE IF NOT EXISTS album_item('\n ' album_id TEXT,'\n ' media_item_id TEXT,'\n ' PRIMARY KEY (album_id, media_item_id)'\n ')')\n self.db.execute(query)\n\n def add(self, item: AlbumItem, cursor=None, commit=True):\n query = 'INSERT OR REPLACE INTO album_item VALUES(:album_id, :media_item_id)'\n data = {\n 'album_id': item.album_id,\n 'media_item_id': item.media_item_id\n }\n self.db.execute(query, data, cursor, commit)\n\n def add_multiple(self, data: List[AlbumItem]):\n cursor = self.db.conn.cursor()\n for item in data:\n self.add(item, cursor, False)\n self.db.conn.commit()\n", "id": "6885687", "language": "Python", "matching_score": 2.1040570735931396, "max_stars_count": 0, "path": "pytos/db/album_item.py" }, { "content": "import sqlite3\n\n\nclass Database:\n def __init__(self, path):\n self.path = path\n self.conn = sqlite3.connect(path)\n\n def execute(self, query, data=None, cursor=None, commit=True):\n if data is None:\n data = {}\n if cursor is None:\n cursor = self.conn.cursor()\n cursor.execute(query, data)\n if commit:\n self.conn.commit()\n", "id": "4694085", "language": "Python", "matching_score": 0.4778412878513336, "max_stars_count": 0, "path": "pytos/db/base.py" }, { "content": "# Execute with\n# $ python -m py_apitest (2.7+)\n\nimport py_apitest\n\nif __name__ == '__main__':\n\tpy_apitest.main()\n", "id": "289459", "language": "Python", "matching_score": 0.40277624130249023, "max_stars_count": 0, "path": "py_apitest/__main__.py" } ]
1.71922
Nekhlyudov
[ { "content": "#!/usr/bin/python3\n\n# Copyright 2020 the V8 project authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport sys\nimport re\nimport time\nfrom collections import Counter\nimport argparse\nfrom prettytable import PrettyTable\n\ndef isIntN(x, n):\n limit = (1 << (n-1))\n return -limit <= x < limit\n\ndef isUIntN(x, n):\n return (x >> n) == 0\n\nclass Instruction:\n\n def __init__(self, line, pc, insnHex, insn, operands, offset):\n self.line = line\n self.pc = pc\n self.insnHex = insnHex\n self.insn = insn\n self.operands = operands\n self.offset = offset\n\n def __repr__(self):\n return f\"{self.pc:x} {self.insnHex:08x} {self.insn} {','.join(self.operands)}\"\n\n def __is3BitReg(self, reg):\n return bool(re.match(r'^f?(s[01]|a[0-5])$', reg))\n\n def __checkConstraint(self, fn):\n return fn(*self.operands)\n\n def isConvertible(self):\n if self.insn in ['nop', 'ebreak', 'mv']:\n return True\n if self.insn in ['lw', 'flw', 'sw', 'fsw']:\n return self.__checkConstraint(lambda rd, offset, rs:\n rs == 'sp'\n and isUIntN(int(offset), 8)\n and (int(offset) & 0x3) == 0)\n if self.insn in ['ld', 'fld', 'sd', 'fsd']:\n return self.__checkConstraint(lambda rd, offset, rs:\n rs == 'sp'\n and isUIntN(int(offset), 9)\n and (int(offset) & 0x7) == 0)\n if self.insn in ['jalr', 'jr']:\n return len(self.operands) == 1\n if self.insn in ['jal', 'j']:\n return len(self.operands) == 1 \\\n and self.__checkConstraint(lambda offset:\n isUIntN(int(offset), 12)\n and (int(offset) & 0x1) == 0)\n if self.insn in ['beq', 'bne']:\n return self.__checkConstraint(lambda rs1, rs2, offset:\n rs2 == 'zero_reg'\n and self.__is3BitReg(rs1) \\\n and isIntN(int(offset), 9)\n and (int(offset) & 0x1) == 0)\n if self.insn in ['and', 'or', 'xor', 'sub', 'andw', 'subw']:\n return self.__checkConstraint(lambda rd, rs1, rs2:\n rd == rs1 \\\n and self.__is3BitReg(rd) \\\n and self.__is3BitReg(rs2))\n if self.insn in ['andi']:\n return self.__checkConstraint(lambda rd, rs, imm: rd == rs \\\n and self.__is3BitReg(rd) \\\n and isIntN(int(imm, 16), 6))\n if self.insn in ['li']:\n return self.__checkConstraint(lambda rd, imm: isIntN(int(imm), 6))\n if self.insn in ['lui']:\n return self.__checkConstraint(lambda rd, imm:\n (rd != 'zero_reg' or rd != 'sp')\n and isUIntN(int(imm, 16), 6))\n if self.insn in ['slli']:\n return self.__checkConstraint(lambda rd, rs, shamt:\n rd == rs\n and isUIntN(int(shamt), 6))\n if self.insn in ['srli', 'srai']:\n return self.__checkConstraint(lambda rd, rs, shamt:\n rd == rs\n and self.__is3BitReg(rd)\n and isUIntN(int(shamt), 6))\n if self.insn in ['add']:\n return self.__checkConstraint(lambda rd, rs1, rs2: rd == rs1)\n if self.insn in ['addi']:\n return self.__checkConstraint(lambda rd, rs, imm:\n # C.ADDI\n (rd == rs and isIntN(int(imm), 6))\n # C.ADDI16SP\n or (rd == rs and rd == 'sp'\n and isIntN(int(imm), 10)\n and (int(imm) & 0xF == 0))\n # C.ADDI4SPN\n or (self.__is3BitReg(rd)\n and rs == 'sp'\n and isUIntN(int(imm), 10)\n and (int(imm) & 0x3) == 0))\n if self.insn in ['addiw']:\n return self.__checkConstraint(lambda rd, rs, imm: rd == rs\n and isIntN(int(imm), 6))\n if self.insn in ['sext.w']:\n return self.__checkConstraint(lambda rd, rs: rd == rs)\n\n return False\n\n def insnSize(self):\n return 32 if self.insnHex & 0x3 == 0x3 else 16\n\n def isShort(self):\n return self.insnSize() == 16\n\n # Create an Instruction from a line of the code dump, or if it\n # does not look like an instruction, return None\n # A normal instruction looks like:\n # 0x55a1aa324b38 178 00008393 mv t2, ra\n @classmethod\n def fromLine(cls, line):\n words = line.split()\n if len(words) < 4:\n return None\n pc = None\n offset = None\n insnHex = None\n try:\n pc = int(words[0], 16)\n offset = int(words[1], 16)\n insnHex = int(words[2], 16)\n except ValueError:\n pass\n if pc is None or offset is None or insnHex is None:\n return None\n\n insn = words[3]\n operands = []\n for idx in range(4, len(words)):\n word = words[idx]\n parts = re.split('[\\(\\)]', word)\n for part in parts:\n if len(part) > 0:\n operands.append(part.strip(','))\n if not word.endswith(','):\n # This is the last operand\n break\n return cls(line, pc, insnHex, insn, operands, offset) if insn != 'constant' else None\n\ndef printTable(lst):\n if len(lst) == 0:\n print(\"---- No Generated Code ----\")\n return\n\n summary = PrettyTable([\"Summary\", \"All Instr\", \"Convertible\", \"Ratio\"])\n cnt1 = sum([x[1] for x in lst])\n cnt2 = sum([x[2] for x in lst])\n summary.add_row([\"\", cnt1, cnt2, \"{:.2%}\".format(float(cnt2) / cnt1)])\n print(summary)\n\n tbl = PrettyTable([\"Instruction\", \"Total\", \"Convertible\", \"Ratio\"])\n for x in lst:\n row = [x[0], x[1], x[2], \"{:.2%}\".format(float(x[2]) / x[1])]\n tbl.add_row(row)\n print(tbl)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-v', '--verbose', action='store_true', default=False,\n dest='verbose', help='print all convertible instructions')\n parser.add_argument('logfile', nargs=1)\n args = parser.parse_args()\n\n startTime = time.time()\n rawCounter = Counter()\n convertibleCounter = Counter()\n\n logfile = open(args.logfile[0])\n nextLine = logfile.readline()\n if args.verbose:\n print(\"Convertible Instructions:\")\n while nextLine:\n line = nextLine\n nextLine = logfile.readline()\n\n words = line.split()\n if len(words) == 0:\n continue\n\n insn = Instruction.fromLine(line)\n if insn is not None and not insn.isShort():\n rawCounter[insn.insn] += 1\n try:\n if insn.isConvertible():\n if args.verbose:\n print(line, end = '')\n convertibleCounter[insn.insn] += 1\n except BaseException:\n print(\"Error Line: \", line, end = '')\n logfile.close()\n\n result = [(x, rawCounter[x], convertibleCounter[x]) for x in convertibleCounter]\n result.sort(key=lambda x: -x[2])\n print()\n printTable(result)\n print('time cost -- {:.2f}s'.format(time.time() - startTime))\n", "id": "6074312", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "v8-riscv-tools/collect-convertible.py" } ]
0
notBillJames
[ { "content": "def left(f, a, b, n):\n if a > b:\n print(\"Incorrect Bounds\")\n return 0\n else:\n delta_x = (b - a) / n\n approx = 0\n for i in range(0, n):\n approx += f(a + (i * delta_x))\n return approx * delta_x\n\n\ndef right(f, a, b, n):\n if a > b:\n print(\"Incorrect Bounds\")\n return 0\n else:\n delta_x = (b - a) / n\n approx = 0\n for i in range(1, n + 1):\n approx += f(a + (i * delta_x))\n return approx * delta_x\n\n\ndef midpoint(f, a, b, n):\n if a > b:\n print(\"Incorrect Bounds\")\n return 0\n elif str(type(f)) == \"<class 'function'>\":\n delta_x = (b - a) / n\n approx = 0\n for i in range(1, n + 1):\n x_bar = ((a + ((i - 1) * delta_x)) + (a + (i * delta_x))) / 2\n approx += f(x_bar)\n return approx * delta_x\n elif str(type(f)) == \"<class 'dict'>\":\n print(\"No midpoint estimations are made from tables of value.\")\n\n\ndef trapezoidal(f, a, b, n):\n if a > b:\n print(\"Incorrect Bounds\")\n return 0\n elif str(type(f)) == \"<class 'function'>\":\n delta_x = (b - a) / n\n approx = f(a) + f(b)\n for i in range(1, n):\n approx += 2 * f(a + (i * delta_x))\n return approx * (delta_x / 2)\n elif str(type(f)) == \"<class 'dict'>\":\n delta_x = (b - a) / n\n approx = f['y'][0] + f['y'][-1] + (2 * sum(f['y'][1:-1]))\n return approx * (delta_x / 2)\n\n\ndef Simpson(f, a, b, n):\n if a > b:\n print('Incorrect bounds')\n return 0\n if n % 2 != 0:\n print(\"Simpson's rule requires that n be even\")\n return 0\n elif str(type(f)) == \"<class 'function'>\":\n delta_x = (b - a) / n\n odd_indicies = 0\n for i in range(1, int(n / 2) + 1):\n odd_indicies += 4 * f(a + (2 * i - 1) * delta_x)\n even_indicies = 0\n for i in range(1, int(n / 2)):\n even_indicies += 2 * f(a + 2 * i * delta_x)\n approx = ((delta_x / 3) * (f(a) + f(b) + odd_indicies + even_indicies))\n return approx\n elif str(type(f)) == \"<class 'dict'>\":\n delta_x = (b - a) / n\n odd_sum = 4 * sum(f['y'][1:-1:2])\n even_sum = 2 * sum(f['y'][2:-1:2])\n approx = f['y'][0] + f['y'][-1] + odd_sum + even_sum\n return approx * (delta_x / 3)\n", "id": "10675739", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "approximations.py" } ]
0
FIWARE
[ { "content": "\ndef order_uri_template_of_json(json_content):\n \"\"\"Extract all the links from the JSON object and adds them back to the JSON.\n\n Arguments:\n json_content -- JSON object whose URI templates will be ordered.\n \"\"\"\n for resource_group in json_content[\"resourceGroups\"]:\n\n for resource in resource_group[\"resources\"]:\n resource[\"uriTemplate\"] = order_uri_parameters(resource[\"uriTemplate\"])\n for action in resource[\"actions\"]:\n action[\"attributes\"][\"uriTemplate\"] = order_uri_parameters(\\\n action[\"attributes\"][\"uriTemplate\"])\n for example in action[\"examples\"]:\n for request in example[\"requests\"]:\n request[\"name\"] = \\\n order_request_parameters(request[\"name\"])\n return\n\n\ndef order_request_parameters(request_id):\n \"\"\"Take a request identifier and if it has a URI, order it parameters.\n\n Arguments:\n request_id -- String that specifies request that is going to be processed\n \"\"\"\n \n _last_slash_position = request_id.rfind('/')\n\n if 0 > _last_slash_position:\n return request_id #parameters not found\n \n last_string = request_id[_last_slash_position:]\n\n if 1 > len(last_string):\n return request_id #request_id ends with /\n\n \n start_param = last_string.find('?')\n\n if 0 > start_param:\n return request_id\n\n parameters = last_string[start_param+1:]\n \n if 1 > len(parameters):\n return request_id\n\n fragment_pos = parameters.find('#')\n\n if fragment_pos < 0: #dont have identifier operator\n query_parameters = ('&').join(sorted(parameters.split('&')))\n ordered_request = request_id[0:_last_slash_position]+\\\n last_string[0:start_param+1]+\\\n query_parameters\n else:\n query_parameters = ('&').join(sorted((parameters[:fragment_pos])\\\n .split('&')))\n ordered_request = request_id[0:_last_slash_position]+\\\n last_string[0:start_param+1]+\\\n query_parameters+parameters[fragment_pos:]\n\n\n return ordered_request\n\n\n\ndef order_uri_block(block):\n \"\"\"Take a variable block of a URI Template and return it ordered.\n\n Arguments:\n block -- String that specifies the block to be ordered\n \"\"\"\n\n if '#' == block[0]: #fragment identifier operator\n return block\n if '+' == block[0]:\n return block #reserved value operator\n\n if not ('?' == block[0] or '&' == block[0]): #start with name\n return block\n\n parameters = (',').join(sorted((block[1:]).split(',')))\n \n return ''+block[0]+ parameters \n\n\ndef order_uri_parameters(URI):\n \"\"\"Take an URI and order it parameters.\n\n Arguments:\n URI -- URI to be ordered\n \"\"\"\n \n _last_slash_position = URI.rfind('/')\n \n\n if 0 > _last_slash_position:\n return URI #parameters not found\n \n parameters_string = URI[_last_slash_position:]\n if 1 > len(parameters_string):\n return URI #URI ends with /\n\n parameter_blocks = parameters_string.split('{')\n\n orderer_blocks = \"\"\n for parameter_block in parameter_blocks[1:]:\n \n if 0 > parameter_block.find('}'):#close block not found\n return URI\n\n _close_group_position = parameter_block.find('}')\n\n ordered_parameters = order_uri_block(parameter_block[0:\\\n _close_group_position])\n orderer_blocks += '{'+ordered_parameters+parameter_block[\\\n _close_group_position:]\n\n ordered_URI = URI[0:_last_slash_position]+parameter_blocks[0]+\\\n orderer_blocks\n\n return ordered_URI\n", "id": "9285181", "language": "Python", "matching_score": 2.5674490928649902, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/src/drafter_postprocessing/order_uri.py" }, { "content": "import re\n\ndef instantiate_request_uri_templates(json_content):\n \"\"\"Instantiate the parameters for all the requests URI templates\n\n Arguments:\n json_content -- JSON object containing the API parsed spec\n \"\"\"\n json_content[\"has_example\"] = False\n for resource_group in json_content[\"resourceGroups\"]:\n\n resource_group[\"has_example\"] = False\n for resource in resource_group[\"resources\"]:\n \n resource[\"has_example\"] = False\n for action in resource[\"actions\"]:\n \n action[\"has_example\"] = False\n for example in action[\"examples\"]:\n for request in example[\"requests\"]:\n if request[\"name\"].replace(' ', '').replace('\\t', '').lower().endswith('-no-example'):\n request[\"is_example\"] = False\n else:\n request[\"is_example\"] = True\n \n json_content[\"has_example\"] = True\n resource_group[\"has_example\"] = True\n resource[\"has_example\"] = True\n action[\"has_example\"] = True\n\n if request[\"name\"].find('/') < 0:\n # URI parameters can be defined in the resource \n # and / or the action. Combine the list of parameters\n # of both.\n uri_parameters = combine_uri_parameters(resource[\"parameters\"], action[\"parameters\"])\n \n # Instantiate the parameters in the action URI (or in\n # the resource URI if action URI is empty).\n if len(action[\"attributes\"][\"uriTemplate\"]) > 0:\n request[\"name\"] = \\\n request[\"name\"] + \" \" + instantiate_uri( action[\"attributes\"][\"uriTemplate\"], uri_parameters)\n else:\n request[\"name\"] = \\\n request[\"name\"] + \" \" + instantiate_uri( resource[\"uriTemplate\"], uri_parameters)\n\n\ndef combine_uri_parameters(resource_uri_parameters, action_uri_parameters):\n \"\"\"Combine the URI parameters of the given action and resource\n\n Combine URI parameters of the current action and resource. In case \n of a parameter being defined in both the resource and the action, \n list only that of the action.\n\n Arguments:\n resource_uri_parameters -- URI parameters of the given resource\n action_uri_parameters -- URI parameters of the given action \n \"\"\"\n uri_parameters = []\n\n\n # Append to the result list all the URI parameters from the resource \n # which are not redefined in the action.\n for resource_uri_parameter in resource_uri_parameters:\n parameter_overwritten_in_action = False\n for action_uri_parameter in action_uri_parameters:\n if resource_uri_parameter[\"name\"] == action_uri_parameter[\"name\"]:\n parameter_overwritten_in_action = True\n\n if not parameter_overwritten_in_action:\n uri_parameters.append(resource_uri_parameter)\n\n # Append all the parameters from the action to the result list.\n uri_parameters.extend(action_uri_parameters)\n\n return uri_parameters\n\n\ndef instantiate_uri(URI_template, parameters):\n \"\"\"Instantiate an URI template from a list of parameters\n\n Arguments:\n URI_template - URI template to be instanted\n parameters - List of URI parameters used for instantiating\n \"\"\"\n # Find all the parameter blocks (ie. {var}, {?var1,var2}, etc). \n processed_URI = ''\n regex = re.compile(\"{([^}]*)}\")\n URI_parameters_blocks = re.findall(regex,URI_template)\n\n # Process every parameter block found in the URI\n for URI_parameter_block in URI_parameters_blocks:\n # Parameters of the form \"#var\" will be replaced with \"#value\", so we\n # keep the '#' as a prefix.\n processed_URI = ''\n prefix = ''\n if URI_parameter_block[0] == '#':\n prefix = '#'\n\n # Form-style parameters (ie. ?var, &var) requires a different \n # substitution, so mark them as special cases for the substitutions\n # loop.\n form_style_query_parameters = False;\n if URI_parameter_block[0] == '?':\n form_style_query_parameters = True;\n first_form_style_query_parameter = True;\n elif URI_parameter_block[0] == '&':\n form_style_query_parameters = True;\n first_form_style_query_parameter = False;\n\n # If the current parameters blocks startswith '?', '&', etc we\n # remove such prefix for the substitutions loop.\n if prefix == '' and form_style_query_parameters == False and URI_parameter_block[0] != '+':\n URI_parameter_block_replace = URI_parameter_block\n else:\n URI_parameter_block_replace = URI_parameter_block[1:]\n\n # Start replacing all the parameters inside the parameter blocks one\n # by one.\n for URI_parameter in URI_parameter_block_replace.split(','):\n # Form-style parameters as \"?var\" will be replaced by \n # \"?var=value\", so keep \"var=\" as a prefix.\n #processed_URI += proccess_URI_parameter(URI_parameter)\n if form_style_query_parameters == True:\n if first_form_style_query_parameter:\n prefix = \"?\" + URI_parameter + \"=\"\n first_form_style_query_parameter = False\n else:\n prefix = \"&\" + URI_parameter + \"=\"\n\n # Search the current URI parameter in the list of parameters \n # given and replace its name with its example value.\n i = 0\n parameter_definition_found = False\n while i < len(parameters) and not parameter_definition_found:\n if parameters[i]['name'] == URI_parameter and len(parameters[i]['example']) > 0:\n parameter_definition_found = True\n URI_parameter_block_replace = URI_parameter_block_replace.replace(URI_parameter, prefix + parameters[i]['example'])\n \n processed_URI += prefix + parameters[i]['example']\n\n i += 1\n\n # If the parameter can not be found or it has not example value,\n # we replace it with \"{prefix+var-name}\" or simply ignore it \n # depending on the type of parameter.\n if parameter_definition_found == False:\n if URI_parameter_block[0] != '?' and URI_parameter_block[0] != '&':\n if URI_parameter_block[0] == '+':\n prefix = '+'\n URI_parameter_block_replace = URI_parameter_block_replace.replace(URI_parameter, \"{\" + prefix + URI_parameter + \"}\")\n processed_URI += \"{\" + prefix + URI_parameter + \"}\"\n else:\n URI_parameter_block_replace = URI_parameter_block_replace.replace(URI_parameter, '')\n processed_URI += ''\n\n # Replace the original parameter block with the values of its members\n # omiting the separator character (',').\n \n URI_parameter_block_replace = URI_parameter_block_replace.replace(',','')\n URI_template = URI_template.replace(\"{\" + URI_parameter_block + \"}\",processed_URI)\n\n return URI_template", "id": "5039210", "language": "Python", "matching_score": 1.1687668561935425, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/src/drafter_postprocessing/instantiate_uri.py" }, { "content": "\nimport unittest\nfrom os import path\nimport sys\n\nimport_path = path.abspath(__file__)\n\nwhile path.split(import_path)[1] != 'fiware_api_blueprint_renderer':\n\n import_path = path.dirname(import_path)\n\nsys.path.append(import_path)\n\nfrom src.drafter_postprocessing.order_uri import order_uri_parameters, order_request_parameters\nfrom tests.test_utils import *\n\n\nclass TestOrderURIFunctions(unittest.TestCase):\n __metaclass__ = TestCaseWithExamplesMetaclass\n\n @for_examples(\n ('api/entity/v2', 'api/entity/v2'),\n ('api/entity/', 'api/entity/'),\n ('api/entity/v2?get=2&opt=test', 'api/entity/v2?get=2&opt=test'),\n ('api/entity/v2?opt=test&get=2', 'api/entity/v2?get=2&opt=test'),\n ('api/entity/?get=2&opt=test', 'api/entity/?get=2&opt=test'),\n ('api/entity/?opt=test&get=2', 'api/entity/?get=2&opt=test'),\n ('api/entity/v2?get=2&opt=test#anchor', 'api/entity/v2?get=2&opt=test#anchor'),\n ('api/entity/v2?opt=test&get=2#anchor', 'api/entity/v2?get=2&opt=test#anchor'),\n ('api/entity/?get=2&opt=test#anchor', 'api/entity/?get=2&opt=test#anchor'),\n ('api/entity/?opt=test&get=2#anchor', 'api/entity/?get=2&opt=test#anchor'),\n )\n def test_order_request_parameters_function(self, original_request, expected_request):\n \n converted_request = order_request_parameters(original_request)\n self.assertEqual(converted_request, expected_request)\n\n @for_examples(\n ('api/entity/v2', \n 'api/entity/v2'),\n\n ('api/entity/', \n 'api/entity/'),\n\n ('/path/{+var}/42', \n '/path/{+var}/42'),\n\n ('/path/to/resources/{varone}{?vartwo}', \n '/path/to/resources/{varone}{?vartwo}'),\n\n ('/path/to/resources/{varone}?path=test{&vartwo,varthree}', \n '/path/to/resources/{varone}?path=test{&varthree,vartwo}'),\n\n ('/path/to/resources/{varone}?path=test{&varthree,vartwo}', \n '/path/to/resources/{varone}?path=test{&varthree,vartwo}'),\n\n ('/path/to/resources/{varone}{?vartwo,varthree}', \n '/path/to/resources/{varone}{?varthree,vartwo}'),\n\n ('/path/to/resources/{varone}{?varthree,vartwo}', \n '/path/to/resources/{varone}{?varthree,vartwo}'),\n\n ('/path/to/resources/{varone}{?varthree,vartwo}{&varfour,varfive}', \n '/path/to/resources/{varone}{?varthree,vartwo}{&varfive,varfour}'),\n \n ('/path/to/resources/{varone}{?varthree,vartwo}{&varfive,varfour}', \n '/path/to/resources/{varone}{?varthree,vartwo}{&varfive,varfour}'),\n \n ('api/entity/v2#anchor', \n 'api/entity/v2#anchor'),\n\n ('api/entity/#anchor', \n 'api/entity/#anchor'),\n\n ('/path/{+var}/42#anchor', \n '/path/{+var}/42#anchor'),\n\n ('/path/to/resources/{varone}{?vartwo}#anchor', \n '/path/to/resources/{varone}{?vartwo}#anchor'),\n\n ('/path/to/resources/{varone}?path=test{&vartwo,varthree}#anchor', \n '/path/to/resources/{varone}?path=test{&varthree,vartwo}#anchor'),\n\n ('/path/to/resources/{varone}?path=test{&varthree,vartwo}#anchor', \n '/path/to/resources/{varone}?path=test{&varthree,vartwo}#anchor'),\n\n ('/path/to/resources/{varone}{?vartwo,varthree}#anchor', \n '/path/to/resources/{varone}{?varthree,vartwo}#anchor'),\n\n ('/path/to/resources/{varone}{?varthree,vartwo}#anchor', \n '/path/to/resources/{varone}{?varthree,vartwo}#anchor'),\n\n ('/path/to/resources/{varone}{?varthree,vartwo}{&varfour,varfive}#anchor', \n '/path/to/resources/{varone}{?varthree,vartwo}{&varfive,varfour}#anchor'),\n\n ('/path/to/resources/{varone}{?varthree,vartwo}{&varfive,varfour}#anchor', \n '/path/to/resources/{varone}{?varthree,vartwo}{&varfive,varfour}#anchor'),\n )\n def test_order_uri_parameters_functions(self, original_uri, expected_uri):\n\n converted_uri = order_uri_parameters(original_uri)\n self.assertEqual(converted_uri,expected_uri)\n\n\n\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestOrderURIFunctions)\nunittest.TextTestRunner(verbosity=2).run(suite)", "id": "6372240", "language": "Python", "matching_score": 2.1797916889190674, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/tests/test_ordered_uri/test_order_uri.py" }, { "content": "#!/usr/bin/env python\n\nimport unittest\nfrom os import path\nimport sys\n\n\nsys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )\nfrom src.drafter_postprocessing.instantiate_uri import instantiate_uri\n\n\nclass TestInstantiatingURIParameters( unittest.TestCase ):\n \n def test_simple_uri_parameter(self):\n parameters = [\n {\n \"name\": \"id\", \n \"example\": \"32\",\n }\n ]\n self.assertEqual(instantiate_uri('/Entity/{id}', parameters), '/Entity/32')\n\n\n def test_simple_empty_uri_parameter(self):\n parameters = [\n {\n \"name\": \"id\", \n \"example\": \"\",\n }\n ]\n self.assertEqual(instantiate_uri('/Entity/{id}', parameters), '/Entity/{id}')\n\n\n def test_multiple_simple_uri_parameters(self):\n parameters = [\n {\n \"name\": \"id\", \n \"example\": \"15\",\n },\n {\n \"name\": \"name\", \n \"example\": \"entity-name\",\n }\n ]\n self.assertEqual(instantiate_uri('/Entity/{id}/{name}', parameters), '/Entity/15/entity-name')\n\n\n def test_multiple_simple_uri_parameters(self):\n parameters = [\n {\n \"name\": \"id\", \n \"example\": \"15\",\n },\n {\n \"name\": \"name\", \n \"example\": \"entity-name\",\n }\n ]\n self.assertEqual(instantiate_uri('/Entity/{id,name}', parameters), '/Entity/15entity-name')\n\n\n def test_hashtag_uri_parameters(self):\n parameters = [\n {\n \"name\": \"id\", \n \"example\": \"15\",\n }\n ]\n self.assertEqual(instantiate_uri('/Entity/{#id}', parameters), '/Entity/#15')\n\n\n def test_empty_hashtag_uri_parameters(self):\n parameters = []\n self.assertEqual(instantiate_uri('/Entity/{#id}', parameters), '/Entity/{#id}')\n\n\n def test_form_style_uri_parameter(self):\n parameters = [\n {\n \"name\": \"id\", \n \"example\": \"15\",\n }\n ]\n self.assertEqual(instantiate_uri('/Entity{?id}', parameters), '/Entity?id=15')\n\n\n def test_empty_form_style_uri_parameter(self):\n parameters = []\n self.assertEqual(instantiate_uri('/Entity{?id}', parameters), '/Entity')\n\n\n def test_multiple_form_style_uri_parameters(self):\n parameters = [\n {\n \"name\": \"id\", \n \"example\": \"15\",\n },\n {\n \"name\": \"name\", \n \"example\": \"entity-name\",\n }\n ]\n self.assertEqual(instantiate_uri('/Entity/{?id,name}', parameters), '/Entity/?id=15&name=entity-name')\n\n\n def test_multiple_form_style_uri_parameters_with_empty_ones(self):\n parameters = [\n {\n \"name\": \"id\", \n \"example\": \"15\",\n },\n {\n \"name\": \"name\", \n \"example\": \"\",\n }\n ]\n self.assertEqual(instantiate_uri('/Entity/{?id,name}', parameters), '/Entity/?id=15')\n\n\n def test_plus_uri_parameters(self):\n parameters = [\n {\n \"name\": \"resource_path\", \n \"example\": \"path/to/resource\",\n }\n ]\n self.assertEqual(instantiate_uri('/Entity/{+resource_path}', parameters), '/Entity/path/to/resource')\n\n\n def test_empty_plus_uri_parameters(self):\n parameters = [\n {\n \"name\": \"resource_path\", \n \"example\": \"\",\n }\n ]\n self.assertEqual(instantiate_uri('/Entity/{+resource_path}', parameters), '/Entity/{+resource_path}')\n\n\n def test_multiple_plus_uri_parameters(self):\n parameters = [\n {\n \"name\": \"resource_path_1\", \n \"example\": \"path/to/resource/1\",\n },\n {\n \"name\": \"resource_path_2\", \n \"example\": \"\",\n }\n ]\n self.assertEqual(instantiate_uri('/Entity/{+resource_path_1,resource_path_2}', parameters), '/Entity/path/to/resource/1{+resource_path_2}')\n\n\n def test_ampersand_uri_parameter(self):\n parameters = [\n {\n \"name\": \"id\", \n \"example\": \"15\",\n }\n ]\n self.assertEqual(instantiate_uri('/Entity{&id}', parameters), '/Entity&id=15')\n\n\n def test_empty_ampersand_uri_parameter(self):\n parameters = []\n self.assertEqual(instantiate_uri('/Entity{&id}', parameters), '/Entity')\n\n\n def test_multiple_ampersand_uri_paramaters(self):\n parameters = [\n {\n \"name\": \"id\", \n \"example\": \"15\",\n },\n {\n \"name\": \"name\", \n \"example\": \"entity-name\",\n }\n ]\n self.assertEqual(instantiate_uri('/Entity{&id,name}', parameters), '/Entity&id=15&name=entity-name')\n\n\n def test_multiple_ampersand_uri_paramaters_with_empty_one(self):\n parameters = [\n {\n \"name\": \"id\", \n \"example\": \"\",\n },\n {\n \"name\": \"name\", \n \"example\": \"entity-name\",\n }\n ]\n self.assertEqual(instantiate_uri('/Entity{&id,name}', parameters), '/Entity&name=entity-name')\n \n def test_multiple_blocks_bug(self):\n parameters = [\n {\n \"name\": \"environment\", \n \"example\": \"env1\",\n },\n {\n \"name\": \"tier\", \n \"example\": \"tier1\",\n }\n ]\n\n self.assertEqual(instantiate_uri('/rest/catalog/org/FIWARE/environment/{environment}/tier/{tier}', parameters),'/rest/catalog/org/FIWARE/environment/env1/tier/tier1')\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "2905105", "language": "Python", "matching_score": 0.484454482793808, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/tests/test_instantiating_uri_parameters.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n__author__ = 'arobres, jfernandez'\n\nfrom fabric.api import env, get, run\nfrom fabric.tasks import execute as fabric_execute\nfrom fabric.contrib import files\nfrom configuration import CONFIG_VM_IP, CONFIG_VM_PASSWORD, CONFIG_VM_USERNAME, PROVISION_ROOT_PATH, \\\n CONFIG_CHEF_SERVER_USERNAME, CONFIG_CHEF_SERVER_PASSWORD, CONFIG_CHEF_SERVER_IP, CONFIG_PUPPET_MASTER_IP,\\\n CONFIG_PUPPET_MASTER_PASSWORD, CONFIG_PUPPET_MASTER_USERNAME\nfrom constants import FABRIC_RESULT_EXECUTE\nfrom StringIO import StringIO\n\n# Command templates to be used by this helper\nCOMMAND_CHEF_CLIENT = \"chef-client -i 30 -d\"\nCOMMAND_CHEF_CLIENT_ONCE = \"chef-client\"\nCOMMAND_CHEF_CLIENT_STOP = \"killall chef-client\"\nCOMMAND_RM_CHEF_CERTS = \"rm /etc/chef/client.pem\"\nCOMMAND_PUPPET_AGENT = \"puppet agent --daemonize\"\nCOMMAND_PUPPET_AGENT_ONCE = \"puppet agent --test\"\nCOMMAND_PUPPET_AGENT_STOP = \"killall puppet\"\nCOMMAND_KNIFE_NODE_SHOW = \"knife node show {}\"\nCOMMAND_PUPPET_GET_CERT = \"puppet cert list --all | grep {}\"\nCOMMAND_RM_PUPPET_CERTS = \"rm -r -f /var/lib/puppet/ssl/*\"\nCOMMAND_RM_ALL_TESTFILES = \"rm -rf /tmp/qa-test-product-*\"\nCOMMAND_RM_PUPPET_CLIENT_CATALOG = \"rm -f /var/lib/puppet/client_data/catalog/*\"\n\n\ndef _init_vm_connection():\n \"\"\"\n Init Fabric environment with VM credentials\n :return: None\n \"\"\"\n env.user = CONFIG_VM_USERNAME\n env.password = <PASSWORD>\n env.host_string = CONFIG_VM_IP\n\n\ndef _init_chef_server_connection():\n \"\"\"\n Init Fabric environment with Chef-Server credentials\n :return: None\n \"\"\"\n env.user = CONFIG_CHEF_SERVER_USERNAME\n env.password = <PASSWORD>\n env.host_string = CONFIG_CHEF_SERVER_IP\n\n\ndef _init_puppet_master_connection():\n \"\"\"\n Init Fabric environment with Puppet Master credentials\n :return:\n \"\"\"\n env.user = CONFIG_PUPPET_MASTER_USERNAME\n env.password = <PASSWORD>\n env.host_string = CONFIG_PUPPET_MASTER_IP\n\n\ndef assert_file_exist(test_file_name):\n \"\"\"\n Fabric assertion: Check if file (result of installing a test product) exists on the current remote hosts.\n :param test_file_name: File name\n :return: True if given file exists on the current remote host (dir: PROVISION_ROOT_PATH).\n \"\"\"\n\n file_path = PROVISION_ROOT_PATH.format(test_file_name)\n return files.exists(file_path)\n\n\ndef assert_content_in_file(file_name, expected_content):\n \"\"\"\n Fabric assertion: Check if some text is in the specified file (result of installing a test product)\n Provision dir: PROVISION_ROOT_PATH\n :param file_name: File name\n :param expected_content: String to be found in file\n :return: True if given content is in file (dir: PROVISION_ROOT_PATH).\n \"\"\"\n\n file_path = PROVISION_ROOT_PATH.format(file_name)\n\n fd = StringIO()\n get(file_path, fd)\n file_content = fd.getvalue()\n\n return expected_content in file_content\n\n\ndef execute_file_exist(test_file_name):\n \"\"\"\n Fabric executor: Run method with assertion 'assert_file_exist' on the remote host\n :param test_file_name: Target file name\n :return: True if file contains that content (dir: PROVISION_ROOT_PATH)\n \"\"\"\n print \"FABRIC: Checking if file exists\"\n _init_vm_connection()\n\n success = fabric_execute(assert_file_exist, test_file_name=test_file_name)\n return success[FABRIC_RESULT_EXECUTE]\n\n\ndef execute_content_in_file(file_name, expected_content):\n \"\"\"\n Fabric executor: Run method with assertion 'assert_content_in_file' on the remote host\n :param file_name: Target file name\n :param expected_content: String to be found in file\n :return: True if file contains that content (dir: PROVISION_ROOT_PATH)\n \"\"\"\n print \"FABRIC: Checking if file contains:\", expected_content\n _init_vm_connection()\n\n success = fabric_execute(assert_content_in_file, file_name=file_name, expected_content=expected_content)\n return success[FABRIC_RESULT_EXECUTE]\n\n\ndef _execute_command(command):\n \"\"\"\n Execute a shell command on the current remote host\n :param command: Command to be execute\n :return: Result of the remote execution or None if some problem happens\n \"\"\"\n print \"FABRIC: Executing remote command:\", command\n try:\n result = run(command)\n return result\n except:\n print \" WARNING: Any problem executing command\"\n return None\n\n\ndef execute_chef_client():\n \"\"\"\n Init environment to connect to VM and execute 'chef-client' (runs as a daemon)\n :return: Result of the remote execution\n \"\"\"\n _init_vm_connection()\n return _execute_command(COMMAND_CHEF_CLIENT)\n\n\ndef execute_chef_client_once():\n \"\"\"\n Init environment to connect to VM and execute 'chef-client' runs ones)\n :return: Result of the remote execution\n \"\"\"\n _init_vm_connection()\n return _execute_command(COMMAND_CHEF_CLIENT_ONCE)\n\n\ndef execute_chef_client_stop():\n \"\"\"\n Init environment to connect to VM and kill all 'chef-client' process\n :return: Result of the remote execution\n \"\"\"\n _init_vm_connection()\n return _execute_command(COMMAND_CHEF_CLIENT_STOP)\n\n\ndef remove_chef_client_cert_file():\n \"\"\"\n Init environment to connect to VM and remove Chef client certificates\n :return: Result of the remote execution\n \"\"\"\n _init_vm_connection()\n return _execute_command(COMMAND_RM_CHEF_CERTS)\n\n\ndef execute_puppet_agent():\n \"\"\"\n Init environment to connect to VM and execute 'puppet agent' (runs as a daemon)\n :return: Result of the remote execution\n \"\"\"\n _init_vm_connection()\n return _execute_command(COMMAND_PUPPET_AGENT)\n\n\ndef execute_puppet_agent_once():\n \"\"\"\n Init environment to connect to VM and execute 'puppet agent' (ones)\n :return: Result of the remote execution\n \"\"\"\n _init_vm_connection()\n return _execute_command(COMMAND_PUPPET_AGENT_ONCE)\n\n\ndef execute_puppet_agent_stop():\n \"\"\"\n Init environment to connect to VM and kill all 'puppet' process\n :return: Result of the remote execution\n \"\"\"\n _init_vm_connection()\n return _execute_command(COMMAND_PUPPET_AGENT_STOP)\n\n\ndef get_chef_node_info_from_server(node_name):\n \"\"\"\n Init environment to connect to Chef-Server and retrieve the information for that node in the server (if exists)\n :param node_name: Node name\n :return: Information about the node that is managed by Chef-Server or None if this node has not been found\n \"\"\"\n _init_chef_server_connection()\n return _execute_command(COMMAND_KNIFE_NODE_SHOW.format(node_name))\n\n\ndef remove_puppet_agent_cert_file():\n \"\"\"\n Init environment to connect to VM and remove Puppet client certificates\n :return: Result of the remote execution\n \"\"\"\n _init_vm_connection()\n return _execute_command(COMMAND_RM_PUPPET_CERTS)\n\n\ndef get_puppet_node_cert_from_server(node_name):\n \"\"\"\n Init environment to connect to Puppet Master and retrieve the certificate for that node in the server (if exists)\n :param node_name: Name of target node\n :return: Certificate for that node in Puppet Master or None if this information has not been found\n \"\"\"\n _init_puppet_master_connection()\n return _execute_command(COMMAND_PUPPET_GET_CERT.format(node_name))\n\n\ndef remove_all_generated_test_files():\n \"\"\"\n Init environment to connect to VM and remove all files generated by recipe/manifest execution\n :return: Result of the remote execution\n \"\"\"\n _init_vm_connection()\n return _execute_command(COMMAND_RM_ALL_TESTFILES)\n\n\ndef remove_puppet_agent_catalog():\n \"\"\"\n Init environment to connect to VM and remove the client catalog (agent) after manifest execution\n :return: Result of the remote execution\n \"\"\"\n _init_vm_connection()\n return _execute_command(COMMAND_RM_PUPPET_CLIENT_CATALOG)\n", "id": "11225903", "language": "Python", "matching_score": 4.562437534332275, "max_stars_count": 0, "path": "test/acceptance/commons/fabric_utils.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n__author__ = \"<NAME>\"\n__email__ = \"<EMAIL>\"\n__copyright__ = \"Copyright 2015\"\n__license__ = \" Apache License, Version 2.0\"\n__version__ = \"1.0.0\"\n\n\nimport logging\nfrom fabric.api import env, hide, run, get\nfrom fabric.tasks import execute\nfrom fabric.contrib import files\nfrom StringIO import StringIO\n\n__logger__ = logging.getLogger(\"qautils\")\n\nFABRIC_ASSERT_RESULT = u'<local-only>'\n\n\nclass FabricAssertions():\n\n @staticmethod\n def assert_file_exist(path):\n\n \"\"\"\n Fabric assertion: Check if file exists on the current remote hosts.\n :param path (string): Absolute path to file\n\n :return (bool): True if given file exists on the current remote host (dir: PROVISION_ROOT_PATH).\n \"\"\"\n\n return files.exists(path)\n\n @staticmethod\n def assert_content_in_file(path, expected_content):\n\n \"\"\"\n Fabric assertion: Check if some text is in the given {dir_path}/{file}\n :param path (string): Absolute path to file\n :param expected_content (string): String to look for.\n :return (bool): True if given content is in file.\n \"\"\"\n\n fd = StringIO()\n get(path, fd)\n file_content = fd.getvalue()\n\n return expected_content in file_content\n\n\nclass FabricUtils():\n\n def __init__(self, host_name, host_username, host_password=None, host_ssh_key=None):\n \"\"\"\n Init Fabric client.\n :param host_name (string): Hostname\n :param host_username (string): Username\n :param host_password (string): Password\n :param host_ssh_key (string): SSH private key file\n :return: None\n \"\"\"\n\n __logger__.info(\"Init Fabric to execute remote commands in '%s'. Credentials: '%s/%s'; SSH Key file: '%s'\",\n host_name, host_username, host_password, host_ssh_key)\n env.host_string = host_name\n env.user = host_username\n env.password = <PASSWORD>\n env.key_filename = host_ssh_key\n\n self.fabric_assertions = FabricAssertions()\n\n @staticmethod\n def execute_command(command):\n \"\"\"\n Execute a shell command on the current remote host\n :param command (string): Command to be execute\n :return (string): Result of the remote execution or None if some problem happens\n \"\"\"\n\n __logger__.debug(\"Executing remote command: '%s'\", command)\n try:\n with hide('running', 'stdout'):\n result = run(command)\n __logger__.debug(\"Result of execution: \\n%s\", result)\n return result\n except:\n __logger__.error(\"Any problem executing command: '%s'\", command)\n return None\n\n def file_exist(self, dir_path, file_name):\n \"\"\"\n Fabric executor: Run method with assertion 'assert_file_exist' in the remote host\n :param dir_path (string): Path of the directory where file is located.\n :param file_name (string): File name\n :return (bool): True if file contains that content (dir: PROVISION_ROOT_PATH)\n \"\"\"\n\n path = \"{}/{}\".format(dir_path, file_name)\n __logger__.debug(\"Checking if remote file exists: '%s'\", path)\n\n with hide('running', 'stdout'):\n success = execute(self.fabric_assertions.assert_file_exist, path=path)\n return success[FABRIC_ASSERT_RESULT]\n\n def content_in_file(self, dir_path, file_name, expected_content):\n \"\"\"\n Fabric executor: Run method with assertion 'assert_content_in_file' on the remote host\n :param dir_path (string): Path of the directory where file is located.\n :param file_name (string): File name\n :param expected_content (string): String to be found in file\n :return (bool): True if file contains that content (dir: PROVISION_ROOT_PATH)\n \"\"\"\n\n path = \"{}/{}\".format(dir_path, file_name)\n __logger__.debug(\"Checking if the content '%s' is in remote file: '%s'\", expected_content, path)\n try:\n with hide('running', 'stdout'):\n success = execute(self.fabric_assertions.assert_content_in_file,\n path=path, expected_content=expected_content)\n except:\n __logger__.error(\"Problem when trying to access to remote file\")\n return False\n\n return success[FABRIC_ASSERT_RESULT]\n", "id": "39587", "language": "Python", "matching_score": 0.7782502770423889, "max_stars_count": 0, "path": "test/acceptance/tools/fabric_utils.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\nfrom tools import http\n\n__author__ = 'henar'\n\nfrom tools.productrelease import ProductRelease\nimport sys\n\nimport json\n\nfrom tools.productinstance import ProductInstance\nfrom tools.productrelease import Attribute\nfrom tools.productinstancedto import ProductInstanceDto\nfrom tools.productinstancedto import ProductReleaseDto\nfrom tools.productrequest import ProductRequest\nfrom xml.etree.ElementTree import tostring\n\n###\n### http://docs.openstack.org/developer/glance/glanceapi.html\nclass ProductInstanceRequest:\n def __init__(self, keystone_url, sdc_url, tenant, user, password, vdc):\n self.keystone_url = keystone_url\n self.sdc_url = sdc_url\n self.user = user\n self.password = password\n self.tenant = tenant\n self.token = self.__get__token()\n self.vdc = vdc\n self.products = []\n\n def __get__token(self):\n return http.get_token(self.keystone_url + '/tokens', self.tenant, self.user, self.password)\n\n def deploy_product(self, ip, product_name, product_version, attributes_string):\n url = \"%s/%s/%s/%s\" % (self.sdc_url, \"vdc\", self.vdc, \"productInstance\")\n headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,\n 'Accept': \"application/json\", 'Content-Type': \"application/xml\"}\n\n productrequest = ProductRequest(self.keystone_url, self.sdc_url, self.tenant, self.user, self.password)\n\n productrequest.get_product_info(product_name)\n attributes = self.__process_attributes(attributes_string)\n\n product_release = ProductReleaseDto(product_name, product_version)\n\n productInstanceDto = ProductInstanceDto(ip, product_release, attributes)\n payload = productInstanceDto.to_xml()\n print url\n print headers\n print tostring(payload)\n response = http.post(url, headers, tostring(payload))\n\n ## Si la respuesta es la adecuada, creo el diccionario de los datos en JSON.\n if response.status != 200:\n print 'error to add the product sdc ' + str(response.status)\n sys.exit(1)\n else:\n http.processTask(headers, json.loads(response.read()))\n\n def __process_attributes(self, attributes_string):\n attributes = []\n atts = attributes_string.split(';')\n for att in atts:\n a = att.split('=')\n\n attribute = Attribute(a[0], a[1])\n attributes.append(attribute)\n return attributes\n\n def get_product_instances(self):\n url = \"%s/%s/%s/%s\" % (self.sdc_url, \"vdc\", self.vdc, \"productInstance\")\n print url\n\n headers = {'X-Auth-Token': self.token,\n 'Accept': \"application/json\"}\n response = http.get(url, headers)\n\n ## Si la respuesta es la adecuada, creo el diccionario de los datos en JSON.\n if response.status != 200:\n print 'error to obtain the token'\n sys.exit(1)\n else:\n data = json.loads(response.read())\n\n products = data[\"productInstance\"]\n print products\n\n if isinstance(products, list):\n for product in products:\n print product\n product_release = ProductRelease(product['productRelease']['product']['name'],\n product['productRelease']['version'], product['productRelease']['product']['description'])\n productInstance = ProductInstance(product['vm']['hostname'], product['status'], product['vm']['ip'],\n product_release)\n productInstance.to_string()\n else:\n product_release = ProductRelease(products['productRelease']['product']['name'],\n products['productRelease']['version'], products['productRelease']['product']['description'])\n productInstance = ProductInstance(products['vm']['hostname'], products['status'], products['vm']['ip'],\n product_release)\n productInstance.to_string()\n\n", "id": "6064687", "language": "Python", "matching_score": 5.292175769805908, "max_stars_count": 0, "path": "automatization_scripts/tools/productrinstanceequest.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n__author__ = 'henar'\n\nfrom productrelease import ProductRelease, Product\nimport sys\n\nimport json\nimport http\nfrom productrelease import Attribute\nfrom xml.etree.ElementTree import tostring\n\n###\n### http://docs.openstack.org/developer/glance/glanceapi.html\nclass ProductRequest:\n def __init__(self, keystone_url, sdc_url, tenant, user, password):\n self.keystone_url = keystone_url\n self.sdc_url = sdc_url\n self.user = user\n self.password = password\n self.tenant = tenant\n self.vdc= tenant\n self.token = self.__get__token()\n self.products = []\n\n\n def __get__token(self):\n return http.get_token(self.keystone_url + '/tokens', self.tenant, self.user, self.password)\n\n def add_product(self, product_name, product_description, attributes, metadatas):\n print 'add_product'\n url = \"%s/%s\" % (self.sdc_url, \"catalog/product\")\n headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,\n 'Accept': \"application/json\", 'Content-Type': 'application/xml'}\n\n attributes = self.__process_attributes(attributes)\n metadatas = self.__process_attributes(metadatas)\n\n product = Product(product_name, product_description)\n\n for att in attributes:\n product.add_attribute(att)\n\n for meta in metadatas:\n product.add_metadata(meta)\n\n payload = product.to_product_xml()\n\n response = http.post(url, headers, tostring(payload))\n\n ## Si la respuesta es la adecuada, creo el diccionario de los datos en JSON.\n if response.status != 200:\n print 'error to add the product sdc ' + str(response.status)\n sys.exit(1)\n else:\n self.products.append(product)\n\n\n def add_product_release(self, product_name, version):\n url = \"%s/%s/%s/%s\" % (self.sdc_url, \"catalog/product\", product_name, \"release\")\n headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,\n 'Accept': \"application/json\", 'Content-Type': 'application/xml'}\n\n # product = self.get_product_info (product_name)\n product = Product(product_name)\n product_release = ProductRelease(product, version)\n\n payload = product_release.to_product_xml()\n response = http.post(url, headers, tostring(payload))\n\n ## Si la respuesta es la adecuada, creo el diccionario de los datos en JSON.\n if response.status != 200:\n print 'error to add the product release to sdc ' + str(response.status)\n sys.exit(1)\n else:\n self.products.append(product)\n\n def __process_attributes(self, attributes_string):\n attributes = []\n if attributes_string == None:\n return attributes\n atts = attributes_string.split(';')\n for att in atts:\n a = att.split('=')\n\n attribute = Attribute(a[0], a[1])\n attributes.append(attribute)\n return attributes\n\n def get_product_release(self, product_name):\n #headers={'X-Auth-Token': self.token,\n # 'Accept': \"application/json\"}\n headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,\n 'Accept': \"application/json\"}\n #get product release\n url = \"%s/%s/%s/%s\" % (self.sdc_url, \"catalog/product\", product_name, \"release\" )\n\n response = http.get(url, headers)\n\n if response.status != 200:\n print 'error to get the product ' + product_name + ' ' + str(response.status)\n sys.exit(1)\n else:\n data = json.loads(response.read())\n if data == None:\n return None\n\n return data['productRelease']['version']\n\n def get_product_release_info(self, product_name, product_version):\n headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,\n 'Accept': \"application/json\"}\n #get product release\n url = \"%s/%s/%s/%s/%s\" % (self.sdc_url, \"catalog/product\", product_name, \"release\", product_version )\n\n response = http.get(url, headers)\n\n if response.status != 200:\n print 'error to get the product ' + product_name + ' ' + str(response.status)\n sys.exit(1)\n else:\n data = json.loads(response.read())\n if data == None:\n return None\n product = ProductRelease(data['product']['name'], data['version'], data['product']['description'])\n try:\n for att in data['attributes']:\n attribute = Attribute(att['key'], att['version']);\n product.add_attribute(attribute)\n except:\n pass\n\n return product\n\n def get_product_info(self, product_name):\n #headers={'X-Auth-Token': self.token,\n # 'Accept': \"application/json\"}\n headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,\n 'Accept': \"application/json\"}\n #get product release\n url = \"%s/%s/%s\" % (self.sdc_url, \"catalog/product\", product_name )\n\n response = http.get(url, headers)\n\n if response.status != 200:\n print 'error to get the product ' + product_name + ' ' + str(response.status)\n sys.exit(1)\n else:\n data = json.loads(response.read())\n print data\n if data == None:\n return None\n product = Product(data['name'], data['description'])\n try:\n for att in data['attributes']:\n attribute = Attribute(att['key'], att['version']);\n product.add_attribute(attribute)\n except:\n pass\n\n return product\n\n def delete_product_release(self, product_name, version):\n #headers={'X-Auth-Token': self.token,\n # 'Accept': \"application/json\"}\n headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,\n 'Accept': \"application/json\"}\n #get product release\n url = \"%s/%s/%s/%s/%s\" % (self.sdc_url, \"catalog/product\", product_name, \"release\", version)\n\n response = http.delete(url, headers)\n\n if response.status != 200 and response.status != 204:\n print 'error to delete the product release ' + product_name + ' ' + str(response.status)\n sys.exit(1)\n else:\n pass\n\n\n def delete_product(self, product_name):\n version = self.get_product_release(product_name);\n\n if version != None:\n self.delete_product_release(product_name, version);\n #headers={'X-Auth-Token': self.token,\n # 'Accept': \"application/json\"}\n headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,\n 'Accept': \"application/json\"}\n #get product release\n url = \"%s/%s\" % (self.sdc_url, \"catalog/product/\" + product_name)\n response = http.delete(url, headers)\n\n if response.status != 200 and response.status != 204:\n print 'error to delete the product ' + product_name + ' ' + str(response.status)\n sys.exit(1)\n\n def get_products(self):\n url = \"%s/%s\" % (self.sdc_url, \"catalog/product\")\n headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,\n 'Accept': \"application/json\"}\n #headers={'X-Auth-Token': self.token,\n # 'Accept': \"application/json\"}\n response = http.get(url, headers)\n\n ## Si la respuesta es la adecuada, creo el diccionario de los datos en JSON.\n if response.status != 200:\n print 'error to obtain the token'\n sys.exit(1)\n else:\n data = json.loads(response.read())\n\n products_string = data[\"product\"]\n\n for product_string in products_string:\n product = Product(product_string['name'])\n\n try:\n attributes = product['attributes']\n var = var + ' atts:'\n for att in attributes:\n var = var + '\\t' + att['key'] + \":\" + att['value']\n except:\n pass\n try:\n metadatas = product['metadatas']\n var = var + ' metas:'\n for meta in metadatas:\n var = var + '\\t' + meta['key'] + \":\" + meta['value']\n except:\n pass\n print var\n\n # dom = parseString(res_data)\n # xml_products = (dom.getElementsByTagName('product'))\n\n\n # product_name =\n # product = Product ()\n # add_product\n\n\n ##\n ## products - Obtiene la lista de imagenes --- Detalle images/detail\n ##\n def get_products(self):\n url = \"%s/%s\" % (self.sdc_url, \"catalog/product\")\n print url\n print self.token\n #headers={'X-Auth-Token': self.token,\n # 'Accept': \"application/json\"}\n headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,\n 'Accept': \"application/json\"}\n print headers\n response = http.get(url, headers)\n\n ## Si la respuesta es la adecuada, creo el diccionario de los datos en JSON.\n if response.status != 200:\n print 'error to obtain the the list of product' + str(response.status)\n sys.exit(1)\n else:\n data = json.loads(response.read())\n\n products = data[\"product\"]\n\n for product in products:\n var = product['name']\n prod = Product(product['name'])\n try:\n attributes = product['attributes']\n var = var + ' atts:'\n for att in attributes:\n attri = Attribute(att['key'], att['value'])\n prod.add_attribute(attri)\n var = var + '\\t' + att['key'] + \":\" + att['value']\n except:\n pass\n try:\n metadatas = product['metadatas']\n var = var + ' metas:'\n for meta in metadatas:\n var = var + '\\t' + meta['key'] + \":\" + meta['value']\n except:\n pass\n print var\n\n\n # print g.products\n # p = Product('contextbroker-standalon','contextbroker-standalon','1.0.0')\n #p = Product('cosmos_slave_node','cosmos_slave_node','0.9.0')\n #g.add_product(p)\n #p = Product('cosmos_master_node','cosmos_master_node','0.9.0')\n #g.add_product(p)\n #p = Product('cosmos_injection_node','cosmos_injection_node','0.9.0')\n #g.add_product(p)\n", "id": "2457829", "language": "Python", "matching_score": 1.0991051197052002, "max_stars_count": 0, "path": "automatization_scripts/tools/productrequest.py" }, { "content": "# -*- coding: utf-8 -*-\n\n# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FIWARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n\nfrom utils.rest_client_utils import RestClient, API_ROOT_URL_ARG_NAME, model_to_request_body, \\\n delete_element_when_value_none, response_body_to_dict, HEADER_CONTENT_TYPE, HEADER_ACCEPT, HEADER_REPRESENTATION_XML\nfrom utils.logger_utils import get_logger\n\nlogger = get_logger(__name__)\n\n\n#URI ELEMENT\nPAASMANAGER_BASE_URI = \"{\" + API_ROOT_URL_ARG_NAME + \"}\"\nENVIRONMENT_INSTANCE_RESOURCE_ROOT_URI = PAASMANAGER_BASE_URI + \\\n \"/envInst/org/FIWARE/vdc/{tenant_id}/environmentInstance\"\nENVIRONMENT_INSTANCE_RESOURCE_DETAIL_URI = ENVIRONMENT_INSTANCE_RESOURCE_ROOT_URI + \"/{environment_instance_name}\"\n\n\n# BODY ELEMENTS\nENVIRONMENT_INSTANCE_BODY_ROOT = \"environmentInstanceDto\"\nENVIRONMENT_INSTANCE_BODY_NAME = \"blueprintName\"\nENVIRONMENT_INSTANCE_BODY_DESCRIPTION = \"description\"\n\nENVIRONMENT_BODY_ROOT = \"environmentDto\"\nENVIRONMENT_BODY_NAME = \"name\"\nENVIRONMENT_BODY_DESCRIPTION = \"description\"\n\nTIER_BODY_ROOT = \"tierDtos\"\nTIER_BODY_INITIAL_INSTANCES = \"initialNumberInstances\"\nTIER_BODY_MAXIMUM_INSTANCES = \"maximumNumberInstances\"\nTIER_BODY_MINIMUM_INSTANCES = \"minimumNumberInstances\"\nTIER_BODY_NAME = \"name\"\nTIER_BODY_IMAGE = \"image\"\nTIER_BODY_FLAVOUR = \"flavour\"\nTIER_BODY_KEYPAIR = \"keypair\"\nTIER_BODY_FLOATINGIP = \"floatingip\"\nTIER_BODY_REGION = \"region\"\nTIER_BODY_PRODUCTRELEASE = \"productReleaseDtos\"\nTIER_BODY_PRODUCTRELEASE_NAME = \"productName\"\nTIER_BODY_PRODUCTRELEASE_VERSION = \"version\"\nTIER_BODY_NETWORK = \"networkDto\"\nTIER_BODY_NETWORK_NAME = \"networkName\"\nTIER_BODY_SUBNETWORK = \"subNetworkDto\"\nTIER_BODY_SUBNETWORK_NAME = \"subnetName\"\n\nTASK_BODY_ROOT=\"task\"\n\nclass EnvironmentInstanceResourceClient(RestClient):\n\n def __init__(self, protocol, host, port, tenant_id, resource=None, headers=None):\n \"\"\"\n Class constructor. Inits default attributes.\n :param protocol: Connection protocol (HTTP | HTTPS)\n :param host: Host\n :param port: Port\n :param tenant_id: TenantID\n :param resource: Base URI resource\n :param headers: HTTP Headers\n :return: None\n \"\"\"\n if headers is None:\n self.headers = {HEADER_CONTENT_TYPE: HEADER_REPRESENTATION_XML,\n HEADER_ACCEPT: HEADER_REPRESENTATION_XML}\n self.headers = headers\n self.tenant_id = tenant_id\n super(EnvironmentInstanceResourceClient, self).__init__(protocol, host, port, resource=resource)\n\n def create_environment_instance(self, name, description, environment_name, environment_description, tier_name,\n image, region_name, keypair=None, product_name=None,\n product_version=None, network_name=None, subnetwork_name=None):\n \"\"\"\n Create a new environment (Tenant)\n :param name: Name of the environment instance (blueprint)\n :param description: Description of the environment instance (blueprint)\n :param environment_name: Name of the environment\n :param tier_name: Name of the tier\n :param image: image to deploy a VM from\n :param keypair: keypair of the user to enter the deployed VM\n :param product_name: Name of the product\n :param product_version: Product version\n :param network_name: Name of the network\n :param subnetwork_name: Name of the subnetwork\n :return: A duple : The task (asynchronous method) as a dict, the 'Request' response\n \"\"\"\n logger.info(\"Creating new environment instance\")\n\n env_model = {ENVIRONMENT_INSTANCE_BODY_ROOT:\n {\n ENVIRONMENT_INSTANCE_BODY_NAME: name,\n ENVIRONMENT_BODY_DESCRIPTION: description,\n ENVIRONMENT_BODY_ROOT:\n {\n ENVIRONMENT_BODY_NAME: environment_name,\n ENVIRONMENT_BODY_DESCRIPTION: environment_description,\n TIER_BODY_ROOT:\n {\n TIER_BODY_NAME: tier_name,\n TIER_BODY_INITIAL_INSTANCES: \"1\",\n TIER_BODY_MAXIMUM_INSTANCES: \"1\",\n TIER_BODY_MINIMUM_INSTANCES: \"1\",\n TIER_BODY_IMAGE: image,\n TIER_BODY_FLAVOUR: \"2\",\n TIER_BODY_KEYPAIR: keypair,\n TIER_BODY_FLOATINGIP: \"False\",\n TIER_BODY_REGION: region_name,\n TIER_BODY_PRODUCTRELEASE :\n {\n TIER_BODY_PRODUCTRELEASE_NAME : product_name,\n TIER_BODY_PRODUCTRELEASE_VERSION : product_version\n },\n TIER_BODY_NETWORK :\n {\n TIER_BODY_NETWORK_NAME : network_name,\n TIER_BODY_SUBNETWORK :\n {\n TIER_BODY_SUBNETWORK_NAME: subnetwork_name\n }\n }\n }\n }\n }\n }\n #Removing keys whose values are None\n delete_element_when_value_none(env_model)\n #Converting json to body request\n body = model_to_request_body(env_model, self.headers[HEADER_CONTENT_TYPE])\n\n response = self.post(ENVIRONMENT_INSTANCE_RESOURCE_ROOT_URI, body, self.headers,\n parameters=None, tenant_id=self.tenant_id)\n task_dict = response_body_to_dict(response,self.headers[HEADER_ACCEPT], xml_root_element_name=TASK_BODY_ROOT )\n return task_dict, response\n\n def delete_environment_instance(self, name):\n \"\"\"\n Delete an environemnt instance(Tenant)\n :param name: Name of the environment instance to be deleted\n :return: A duple : The task (asynchronous method) as a dict, the 'Request' response\n \"\"\"\n logger.info(\"Deleting environment instance \" + name)\n response = self.delete(ENVIRONMENT_INSTANCE_RESOURCE_DETAIL_URI, headers=self.headers, parameters=None,\n tenant_id=self.tenant_id, environment_instance_name=name)\n task_dict = response_body_to_dict(response,self.headers[HEADER_ACCEPT], xml_root_element_name=TASK_BODY_ROOT )\n return task_dict, response\n\n def get_environment_instance(self, name):\n \"\"\"\n Get an environment instance(Tenant)\n :param name: Name of the environment instance to get\n :return: A duple: The environment as a dict, , the 'Request' response\n \"\"\"\n logger.info(\"Get environment instance \" + name )\n response = self.get(ENVIRONMENT_INSTANCE_RESOURCE_DETAIL_URI, headers=self.headers, parameters=None,\n tenant_id=self.tenant_id, environment_instance_name=name)\n\n dict_response = response_body_to_dict(response, self.headers[HEADER_ACCEPT],\n xml_root_element_name=ENVIRONMENT_INSTANCE_BODY_ROOT)\n return dict_response, response\n", "id": "4563492", "language": "Python", "matching_score": 7.824339866638184, "max_stars_count": 0, "path": "python-paasmanagerclient/paasmanagerclient/environment_instance_resource_client.py" }, { "content": "# -*- coding: utf-8 -*-\n\n# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FIWARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n\nfrom utils.rest_client_utils import RestClient, API_ROOT_URL_ARG_NAME, model_to_request_body, \\\n response_body_to_dict, HEADER_CONTENT_TYPE, HEADER_ACCEPT, HEADER_REPRESENTATION_XML\nfrom utils.logger_utils import get_logger\n\nlogger = get_logger(__name__)\n\n\n#URI ELEMENT\nPAASMANAGER_BASE_URI = \"{\" + API_ROOT_URL_ARG_NAME + \"}\"\nENVIRONMENT_RESOURCE_ROOT_URI = PAASMANAGER_BASE_URI + \"/catalog/org/FIWARE/vdc/{tenant_id}/environment\"\nENVIRONMENT_RESOURCE_DETAIL_URI = ENVIRONMENT_RESOURCE_ROOT_URI + \"/{environment_name}\"\n\n\n# BODY ELEMENTS\nENVIRONMENT_BODY_ROOT = \"environmentDto\"\nENVIRONMENT_BODY_NAME = \"name\"\nENVIRONMENT_BODY_DESCRIPTION = \"description\"\n\nclass EnvironmentResourceClient(RestClient):\n\n def __init__(self, protocol, host, port, tenant_id, resource=None, headers=None):\n \"\"\"\n Class constructor. Inits default attributes.\n :param protocol: Connection protocol (HTTP | HTTPS)\n :param host: Host\n :param port: Port\n :param tenant_id: TenantID\n :param resource: Base URI resource\n :param headers: HTTP Headers\n :return: None\n \"\"\"\n if headers is None:\n self.headers = {HEADER_CONTENT_TYPE: HEADER_REPRESENTATION_XML,\n HEADER_ACCEPT: HEADER_REPRESENTATION_XML}\n self.headers = headers\n self.tenant_id = tenant_id\n super(EnvironmentResourceClient, self).__init__(protocol, host, port, resource=resource)\n\n def create_environment(self, name, description):\n \"\"\"\n Create a new environment (Tenant)\n :param name: Name of the environment\n :param description: Description of the environment\n :return: 'Requests' response\n \"\"\"\n logger.info(\"Creating new environment\")\n env_model = {ENVIRONMENT_BODY_ROOT: {ENVIRONMENT_BODY_NAME: name,\n ENVIRONMENT_BODY_DESCRIPTION: description}}\n body = model_to_request_body(env_model, self.headers[HEADER_ACCEPT])\n\n return self.post(ENVIRONMENT_RESOURCE_ROOT_URI, body, self.headers, parameters=None,\n tenant_id=self.tenant_id)\n\n def delete_environment(self, name):\n \"\"\"\n Delete an environemnt (Tenant)\n :param name: Name of the environment to be deleted\n :return: 'Request' response\n \"\"\"\n logger.info(\"Deleting environment\")\n return self.delete(ENVIRONMENT_RESOURCE_DETAIL_URI, headers=self.headers, parameters=None,\n tenant_id=self.tenant_id, environment_name=name)\n\n def get_environment(self, name):\n \"\"\"\n Get an environment (Tenant)\n :return: A duple: The environment as a dict, , the 'Request' response\n \"\"\"\n logger.info(\"Get environment\")\n response = self.get(ENVIRONMENT_RESOURCE_DETAIL_URI, headers=self.headers, parameters=None,\n tenant_id=self.tenant_id, environment_name=name)\n\n dict_environment = response_body_to_dict(response, self.headers[HEADER_ACCEPT],\n xml_root_element_name=ENVIRONMENT_BODY_ROOT)\n return dict_environment, response\n", "id": "1432218", "language": "Python", "matching_score": 7.133833885192871, "max_stars_count": 0, "path": "python-paasmanagerclient/paasmanagerclient/environment_resource_client.py" }, { "content": "# -*- coding: utf-8 -*-\n\n# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FIWARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n\nfrom utils.rest_client_utils import RestClient, API_ROOT_URL_ARG_NAME, response_body_to_dict,\\\n HEADER_CONTENT_TYPE, HEADER_ACCEPT, HEADER_REPRESENTATION_XML\nfrom utils.logger_utils import get_logger\n\nlogger = get_logger(__name__)\n\n\n#URI ELEMENT\nPAASMANAGER_BASE_URI = \"{\" + API_ROOT_URL_ARG_NAME + \"}\"\nTASK_RESOURCE_ROOT_URI = PAASMANAGER_BASE_URI + \"/vdc/{tenant_id}/task\"\nTASK_RESOURCE_DETAIL_URI = TASK_RESOURCE_ROOT_URI + \"/{task_id}\"\n\n#BODY ELEMENT\nTASK_BODY_ROOT =\"task\"\n\nclass TaskResourceClient(RestClient):\n\n def __init__(self, protocol, host, port, tenant_id, resource=None, headers=None):\n \"\"\"\n Class constructor. Inits default attributes.\n :param protocol: Connection protocol (HTTP | HTTPS)\n :param host: Host\n :param port: Port\n :param tenant_id: TenantID\n :param resource: Base URI resource\n :param headers: HTTP Headers\n :return: None\n \"\"\"\n if headers is None:\n self.headers = {HEADER_CONTENT_TYPE: HEADER_REPRESENTATION_XML,\n HEADER_ACCEPT: HEADER_REPRESENTATION_XML}\n self.headers = headers\n self.tenant_id = tenant_id\n super(TaskResourceClient, self).__init__(protocol, host, port, resource=resource)\n\n def get_task(self, task_id):\n \"\"\"\n Get a PaasManager Task (Tenant)\n :param task_id: ID of the task to obtain\n :return: A duple: The corresponding task, , the 'Request' response\n \"\"\"\n logger.info(\"Get task\")\n response = self.get(TASK_RESOURCE_DETAIL_URI, headers=self.headers, parameters=None,\n tenant_id=self.tenant_id, task_id=task_id)\n\n task_dict = response_body_to_dict(response, self.headers[HEADER_ACCEPT],\n xml_root_element_name=TASK_BODY_ROOT)\n return task_dict, response\n", "id": "8671916", "language": "Python", "matching_score": 1.5426493883132935, "max_stars_count": 0, "path": "python-paasmanagerclient/paasmanagerclient/task_resource_client.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n__author__ = 'jfernandez'\n\nimport http\n\n\nclass NovaRequest:\n\n def __init__(self, nova_url, tenant, user, password, vdc, auth_token):\n \"\"\"\n Constructor. Initializes class attributes.\n :param nova_url: Nova URL\n :param tenant: Fiware tenant name\n :param user: Fiware User name\n :param password: <PASSWORD>\n :param vdc: TenantId\n :param auth_token: Valid Auth Token (Keystone)\n \"\"\"\n self.nova_url = nova_url\n\n self.vdc = vdc\n\n self.user = user\n self.password = password\n self.tenant = tenant\n\n self.default_headers = {'Accept': 'application/json',\n 'X-Auth-Token': auth_token,\n 'Tenant-Id': self.tenant}\n\n def __get__(self, url):\n \"\"\"\n Executes a get request to Nova service\n :param url: Full URL to GET\n :return: HTTPlib request\n \"\"\"\n return http.get(url, self.default_headers)\n\n def get_flavour_list(self):\n \"\"\"\n Gets the list of available flavors\n :return: HTTPlib request\n \"\"\"\n url = \"{}/{}\".format(self.nova_url, 'flavors')\n return self.__get__(url)\n\n def get_server_list(self):\n \"\"\"\n Gets the list of launched servers\n :return: HTTPlib request\n \"\"\"\n url = \"{}/{}\".format(self.nova_url, 'servers')\n return self.__get__(url)\n\n def get_server_details(self, server_id):\n \"\"\"\n Gets full details about a server\n :return: HTTPlib request\n \"\"\"\n url = \"{}/{}/{}\".format(self.nova_url, 'servers', server_id)\n return self.__get__(url)\n\n def get_security_group_list(self):\n \"\"\"\n Gets the list of security groups\n :return: HTTPlib request\n \"\"\"\n url = \"{}/{}\".format(self.nova_url, 'os-security-groups')\n return self.__get__(url)\n\n\ndef get_number_of_flavors(body_response):\n \"\"\"\n Returns the number of images in the list.\n :param body_response: Parsed response (Python dic). List of flavors\n :return: Length of the list\n \"\"\"\n return len(body_response['flavors'])\n\n\ndef get_first_flavor_in_list(body_response, name_filter=None):\n flavor = None\n if name_filter is not None:\n for flavor in body_response['flavors']:\n if name_filter in flavor['name']:\n flavor = flavor['id']\n break\n else:\n if len(body_response['flavors']) != 0:\n flavor = body_response['flavors'][0]['id']\n\n return flavor\n\n\ndef get_server_id_by_partial_name(body_response_server_list, partial_server_name):\n \"\"\"\n Looks for server Id in the server list by server name\n :param body_response_server_list: Parsed response (python dic). List of deployed instances\n :param partial_name: The name of the server to find (or a substring)\n :return: Server ID\n \"\"\"\n server_id = None\n for server in body_response_server_list['servers']:\n if partial_server_name in server['name']:\n server_id = server['id']\n break\n\n return server_id\n\n\ndef get_metadata_value(body_response_server_details, metadata_key):\n \"\"\"\n Retrieves the metadata value from instance details\n :param body_response_server_details: Parsed response (python dic). Server details data\n :param metadata_key: The key of the metadata to be retrieved\n :return: Metadata value with that key\n \"\"\"\n metadata_value = None\n if metadata_key in body_response_server_details['server']['metadata']:\n metadata_value = body_response_server_details['server']['metadata'][metadata_key]\n\n return metadata_value\n\n\ndef get_security_group_rules(body_response_sec_group_list, sec_group_name):\n \"\"\"\n Retrieve security group rules list\n :param body_response_sec_group_list: Parsed response (python dic). List of sec. groups\n :param sec_group_name: Name of Sec. Group to look for\n :return: Return the first sec. group with sec_group_name IN name.\n \"\"\"\n rules_list = None\n for sec_group in body_response_sec_group_list:\n if sec_group_name in sec_group['name']:\n rules_list = sec_group['rules']\n break\n\n return rules_list\n\n\ndef get_ports_from_rules(body_response_sec_group_list, sec_group_name, protocol='TCP'):\n \"\"\"\n Retrieve the list of TCP open ports\n :param body_response_sec_group_list: Parsed response (python dic). List of sec. groups\n :param sec_group_name: Name of Sec. Group to look for\n :param protocol: TCP or UDP\n :return: List of TCP ports (list of strings)\n \"\"\"\n port_list = []\n for sec_group in body_response_sec_group_list['security_groups']:\n if sec_group_name in sec_group['name']:\n for rule in sec_group['rules']:\n if rule['ip_protocol'] == protocol.lower():\n port = str(rule['from_port'])\n to_port = str(rule['to_port'])\n port = port + \"-\" + to_port if to_port != port else port\n port_list.append(port)\n\n return port_list\n\n\ndef get_network_name_list(body_response_server_details):\n \"\"\"\n Retrieve the list of network names where this server is connected\n :param body_response_server_details: (dic) Parsed response. Server details data\n :return: (list) The list of linked networks to the server\n \"\"\"\n network_list = list()\n for address in body_response_server_details['server']['addresses']:\n network_list.append(address)\n\n return network_list\n\n\ndef get_floating_ip(body_response_server_details, network_name):\n \"\"\"\n Retrieve the first floating IP from the given network attached to VM\n :param body_response_server_details: (dic) Parsed response. Server details data\n :param network_name (String): Name of the network where floating IP is allocated.\n :return: (String) The floating IP. None if floating IP not found in the given network (VM)\n \"\"\"\n floating_ip = None\n addresses = body_response_server_details['server']['addresses']\n for address in addresses:\n if address == network_name:\n for net in addresses[address]:\n if net['OS-EXT-IPS:type'] == \"floating\":\n floating_ip = net['addr']\n break\n\n return floating_ip\n", "id": "6969360", "language": "Python", "matching_score": 4.107071876525879, "max_stars_count": 0, "path": "test/acceptance/tools/nova_request.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n__author__ = 'jfernandez'\n\nimport http\n\n\nclass GlanceRequest:\n\n def __init__(self, glance_url, tenant, user, password, vdc, auth_token):\n \"\"\"\n Class constructor. Initializes class variables\n :param glance_url: Glance URL\n :param tenant: Fiware Tenant name\n :param user: Fiware User name\n :param password: <PASSWORD>\n :param vdc: Tenant Id\n :param auth_token: Valid Auth Token (Keystone)\n \"\"\"\n self.glance_url = glance_url\n\n self.vdc = vdc\n\n self.user = user\n self.password = password\n self.tenant = tenant\n\n self.default_headers = {'Accept': 'application/json',\n 'X-Auth-Token': auth_token,\n 'Tenant-Id': self.tenant}\n\n def __get__(self, url):\n \"\"\"\n Executes a get request to Glance service\n :param url: Full URL to GET\n :return: HTTPlib request\n \"\"\"\n return http.get(url, self.default_headers)\n\n def get_image_list(self):\n \"\"\"\n Retrieves the list of images from Glance\n :return: HTTPlib request\n \"\"\"\n url = \"{}/{}\".format(self.glance_url, 'images')\n return self.__get__(url)\n\n def get_image_list_by_property(self, property_name, property_value):\n \"\"\"\n Retrieves the detailed list of images from Glance filtered by property\n :return: HTTPlib request\n \"\"\"\n url = \"{}/{}/{}?property-{}={}\".format(self.glance_url, 'images', 'detail', property_name, property_value)\n return self.__get__(url)\n\n\ndef get_number_of_images(body_response):\n \"\"\"\n Returns the number of images in the list.\n :param body_response: Parsed response (Python dic). List of images\n :return: Length of list\n \"\"\"\n return len(body_response['images'])\n\n\ndef get_first_image_in_list(body_response, name_filter=None):\n \"\"\"\n Gets the first image in the list\n :param body_response: Parsed response (Python dic)\n :param name_filter: If this arg is set, this method will filtered by name content\n :return: First image in list (that contains name_filter in its name); None if not found or list is empty\n \"\"\"\n image_id = None\n if name_filter is not None:\n for image in body_response['images']:\n if name_filter in image['name']:\n image_id = image['id']\n break\n else:\n if len(body_response['images']) != 0:\n image_id = body_response['images'][0]['id']\n\n return image_id\n", "id": "993342", "language": "Python", "matching_score": 1.2186919450759888, "max_stars_count": 0, "path": "test/acceptance/tools/glance_request.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n__author__ = 'jfernandez'\n\n\nfrom lettuce import before, world, after\nfrom tools.http import get_token\nfrom tools.keystone_request import KeystoneRequest\nfrom tools.glance_request import GlanceRequest\nfrom tools.environment_request import EnvironmentRequest\nfrom tools.environment_instance_request import EnvironmentInstanceRequest\nfrom tools.nova_request import NovaRequest\nfrom tools import environment_request, environment_instance_request\nfrom tools.utils import raw_httplib_request_to_python_dic\nimport os\nfrom tools.constants import PAAS, KEYSTONE_URL, PAASMANAGER_URL, TENANT, USER,\\\n PASSWORD, VDC, SDC_URL, GLANCE_URL, NOVA_URL, ENVIRONMENT, ENVIRONMENT_TESTFILES\n\n\[email protected]\ndef before_all():\n \"\"\" Hook: Before all features. It will config common requisites for TCs execution \"\"\"\n # Get Auth Token\n world.auth_token = get_token(world.config[PAAS][KEYSTONE_URL] + '/tokens', world.config[PAAS][TENANT],\n world.config[PAAS][USER],\n world.config[PAAS][PASSWORD])\n\n\[email protected]_feature\ndef after_each_feature(feature):\n \"\"\" Hook: Before each feature. It will instance all request managers used in the feature and will initialize vars\"\"\"\n world.keystone_request = KeystoneRequest(world.config[PAAS][KEYSTONE_URL],\n world.config[PAAS][TENANT],\n world.config[PAAS][USER],\n world.config[PAAS][PASSWORD],\n world.config[PAAS][VDC])\n\n world.glance_request = GlanceRequest(world.config[PAAS][GLANCE_URL],\n world.config[PAAS][TENANT],\n world.config[PAAS][USER],\n world.config[PAAS][PASSWORD],\n world.config[PAAS][VDC],\n world.auth_token)\n\n world.nova_request = NovaRequest(world.config[PAAS][NOVA_URL],\n world.config[PAAS][TENANT],\n world.config[PAAS][USER],\n world.config[PAAS][PASSWORD],\n world.config[PAAS][VDC],\n world.auth_token)\n\n world.env_requests = EnvironmentRequest(world.config[PAAS][KEYSTONE_URL],\n world.config[PAAS][PAASMANAGER_URL],\n world.config[PAAS][TENANT],\n world.config[PAAS][USER],\n world.config[PAAS][PASSWORD],\n world.config[PAAS][VDC],\n world.config[PAAS][SDC_URL])\n\n world.inst_requests = EnvironmentInstanceRequest(world.config[PAAS][KEYSTONE_URL],\n world.config[PAAS][PAASMANAGER_URL],\n world.config[PAAS][TENANT],\n world.config[PAAS][USER],\n world.config[PAAS][PASSWORD],\n world.config[PAAS][VDC],\n world.config[PAAS][SDC_URL])\n\n world.region_list = None\n\n\[email protected]\ndef before_outline(param1, param2, param3, param4):\n \"\"\" Hook: Will be executed before each Scenario Outline. Same behaviour as 'before_each_scenario'\"\"\"\n try:\n test_files_dir = world.config[ENVIRONMENT][ENVIRONMENT_TESTFILES]\n print \"Writing instance {} details to dir {}\".format(world.instance_name, test_files_dir)\n\n world.inst_requests.get_instance(world.instance_name)\n body_env_response = raw_httplib_request_to_python_dic(world.response)\n\n if not os.path.exists(test_files_dir):\n os.makedirs(test_files_dir)\n\n file = open(os.path.join(test_files_dir, world.instance_name+\"_instance\"), 'w')\n file.write(str(body_env_response))\n file.close()\n except Exception as e:\n print \"WARNING: Instance data cannot be written to test file. {}\".format(e.message)\n\n\[email protected]_scenario\ndef after_each_scenario(scenario):\n \"\"\" Hook: After each scenario. It will clean environments and instances created in the scenario. \"\"\"\n environment_instance_request.delete_created_instances()\n environment_request.delete_created_environments()\n", "id": "1851810", "language": "Python", "matching_score": 5.346877098083496, "max_stars_count": 0, "path": "test/acceptance/integration/e2e/deploy_environment_regions/terrain.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\nfrom lettuce import world, after, before\nfrom tools import terrain_steps\nfrom tools.http import get_token\nfrom tools import environment_request, environment_instance_request\nfrom tools.environment_request import EnvironmentRequest\nfrom tools.environment_instance_request import EnvironmentInstanceRequest\nfrom tools.constants import PAAS, KEYSTONE_URL, PAASMANAGER_URL, TENANT, USER,\\\n PASSWORD, VDC, SDC_URL, NOVA_URL\nfrom tools.nova_request import NovaRequest\nfrom tools.product_sdc_request import ProductSdcRequest\n\n\[email protected]\ndef before_all():\n \"\"\" Hook: Before all features. It will config common requisites for TCs execution \"\"\"\n # Get Auth Token\n world.auth_token = get_token(world.config[PAAS][KEYSTONE_URL] + '/tokens', world.config[PAAS][TENANT],\n world.config[PAAS][USER],\n world.config[PAAS][PASSWORD])\n\n\[email protected]_feature\ndef before_each_feature(feature):\n world.env_requests = EnvironmentRequest(world.config[PAAS][KEYSTONE_URL],\n world.config[PAAS][PAASMANAGER_URL],\n world.config[PAAS][TENANT],\n world.config[PAAS][USER],\n world.config[PAAS][PASSWORD],\n world.config[PAAS][VDC],\n world.config[PAAS][SDC_URL])\n\n world.inst_requests = EnvironmentInstanceRequest(world.config[PAAS][KEYSTONE_URL],\n world.config[PAAS][PAASMANAGER_URL],\n world.config[PAAS][TENANT],\n world.config[PAAS][USER],\n world.config[PAAS][PASSWORD],\n world.config[PAAS][VDC],\n world.config[PAAS][SDC_URL])\n\n world.product_sdc_request = ProductSdcRequest(world.config[PAAS][KEYSTONE_URL],\n world.config[PAAS][SDC_URL],\n world.config[PAAS][TENANT],\n world.config[PAAS][USER],\n world.config[PAAS][PASSWORD],\n world.config[PAAS][VDC])\n\n world.nova_request = NovaRequest(world.config[PAAS][NOVA_URL],\n world.config[PAAS][TENANT],\n world.config[PAAS][USER],\n world.config[PAAS][PASSWORD],\n world.config[PAAS][VDC],\n world.auth_token)\n\n # Init vars\n world.product_and_release_list = list()\n world.product_installator = 'chef'\n\n\[email protected]_scenario\ndef before_each_scenario(scenario):\n \"\"\" Lettuce Hook. Will be executed before each scenario. Init global scenario vars. \"\"\"\n world.product_list_with_attributes = list()\n world.paas_product_list_with_attributes = list()\n\n\[email protected]\ndef after_outline(param1, param2, param3, param4):\n \"\"\" Hook: Will be executed after each Scenario Outline. Same behaviour as 'after_each_scenario'\"\"\"\n after_each_scenario(None)\n\n\[email protected]_scenario\ndef after_each_scenario(scenario):\n raw_input(\"Press Key...\")\n\n # Delete the environments created in the scenario.\n environment_instance_request.delete_created_instances()\n environment_request.delete_created_environments()\n\n # Remove SDC product data\n for product_and_release in world.product_and_release_list:\n world.product_sdc_request.delete_product_and_release(product_and_release['product_name'],\n product_and_release['product_release'])\n\n\[email protected]\ndef before_outline(param1, param2, param3, param4):\n \"\"\" Hook: Will be executed before each Scenario Outline. Same behaviour as 'before_each_scenario'\"\"\"\n before_each_scenario(None)\n\n\[email protected]_feature\ndef after_feature(feature):\n \"\"\"Hook: Will be executed after the feature\"\"\"\n # Remove SDC product data\n for product_and_release in world.product_and_release_list:\n world.product_sdc_request.delete_product_and_release(product_and_release['product_name'],\n product_and_release['product_release'])\n", "id": "10562192", "language": "Python", "matching_score": 2.7993404865264893, "max_stars_count": 0, "path": "test/acceptance/integration/e2e/test_chef_puppet_on_base_images/terrain.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\nfrom lettuce import step, world\nfrom lettuce_tools.dataset_utils.dataset_utils import DatasetUtils\nfrom tools import http, environment_request, environment_instance_request\nfrom tools.environment_instance import EnvironmentInstance\nimport json\nfrom tools.constants import NAME, DESCRIPTION, METADATA_NID_NOVA_KEY, METADATA_NID, REGION_DEFAULT_SHAREDNET_PROPERTY,\\\n PAAS\nfrom common_steps import sdc_product_provisioning_steps, paas_environment_provisioning\nfrom tools.utils import raw_httplib_request_to_python_dic\nfrom nose.tools import assert_equal, assert_is_not_none, assert_equals, assert_in\nfrom tools.nova_request import get_server_id_by_partial_name, get_metadata_value, get_ports_from_rules,\\\n get_network_name_list\n\ndataset_utils = DatasetUtils()\n\n\ndef _check_metadata_for_tier_in_nova(tier_name, metadata_key, metadata_value):\n # Request the list of servers to Nova\n raw_response = world.nova_request.get_server_list()\n assert_equal(raw_response.status, 200, \"Error to obtain the server list. HTTP status code is not the expected\")\n\n body_response = raw_httplib_request_to_python_dic(raw_response)\n sub_instance_name = \"{}-{}\".format(world.instance_name, tier_name)\n server_id = get_server_id_by_partial_name(body_response, sub_instance_name)\n assert_is_not_none(server_id, \"Server has not been found with sub-name \" + sub_instance_name)\n\n # Request the server details by server_id\n raw_response = world.nova_request.get_server_details(server_id)\n assert_equal(raw_response.status, 200, \"Error to obtain server details. HTTP status code is not the expected\")\n\n body_response = raw_httplib_request_to_python_dic(raw_response)\n nid_metadata_value = get_metadata_value(body_response, metadata_key)\n assert_equals(str(nid_metadata_value), str(metadata_value))\n\n\n@step(u'the paas manager is up and properly configured')\ndef the_paas_manager_is_up_and_properly_configured(step):\n pass # Nothing to do here, the set up should be done by external means\n\n\n@step(u'a list of tiers has been defined with data:')\ndef a_list_of_tiers_has_been_defined_with_data(step):\n world.tiers = paas_environment_provisioning.process_the_list_of_tiers(step)\n\n\n@step(u'an environment has already been created with data:')\ndef an_environment_has_already_been_created_with_data(step):\n data = dataset_utils.prepare_data(step.hashes[0])\n world.env_requests.add_environment(data.get(NAME), data.get(DESCRIPTION))\n\n\n@step(u'an environment has already been created with the previous tiers and data:')\ndef an_environment_has_already_been_created_with_the_previous_tiers_and_data(step):\n data = dataset_utils.prepare_data(step.hashes[0])\n world.env_requests.add_environment(data.get(NAME), data.get(DESCRIPTION), world.tiers)\n assert world.response.status == 204,\\\n \"Wrong status code received creating environment: %d. Expected: %d. Body content: %s\"\\\n % (world.response.status, 204, world.response.read())\n\n\n@step(u'an instance of the environment \"([^\"]*)\" has already been created using data:')\ndef an_instance_of_the_environment_has_already_been_created_using_data(step, env_name):\n i_request_the_creation_of_an_instance_of_an_environment_using_data(step, env_name)\n the_task_ends_with_status(step, \"SUCCESS\")\n\n\n@step(u'I request the creation of an instance of the environment \"([^\"]*)\" using data:')\ndef i_request_the_creation_of_an_instance_of_an_environment_using_data(step, env_name):\n # First, send the request to get the environment on which the instance will be based\n env_name = dataset_utils.generate_fixed_length_param(env_name)\n world.env_requests.get_environment(env_name)\n assert world.response.status == 200,\\\n \"Wrong status code received getting environment: %d. Expected: %d. Body content: %s\"\\\n % (world.response.status, 200, world.response.read())\n environment = environment_request.process_environment(json.loads(world.response.read()))\n # Then, create the instance\n data = dataset_utils.prepare_data(step.hashes[0])\n world.instance_name = data.get(NAME)\n instance = EnvironmentInstance(world.instance_name, data.get(DESCRIPTION), environment)\n world.inst_requests.add_instance(instance)\n\n\n@step(u'I receive an? \"([^\"]*)\" response(?: with a task)?')\ndef i_receive_a_response_of_type(step, response_type):\n status_code = http.status_codes[response_type]\n environment_instance_request.check_add_instance_response(world.response, status_code)\n\n\n@step(u'the task ends with \"([^\"]*)\" status')\ndef the_task_ends_with_status(step, status):\n environment_instance_request.check_task_status(world.task_data, status)\n\n\n@step(u'the product installator to be used is \"([^\"]*)\"')\ndef the_installator_to_be_used_is_group1(step, installator):\n world.product_installator = installator\n\n\n@step(u'the product \"([^\"]*)\" with version \"([^\"]*)\" is created in SDC$')\ndef the_product_group1_is_created_in_sdc(step, product_name, product_version):\n world.product_sdc_request.create_product_and_release(product_name, product_version)\n\n\n@step(u'the product \"([^\"]*)\" with version \"([^\"]*)\" is created in SDC with attributes:$')\ndef the_product_group1_is_created_in_sdc_with_attributes(step, product_name, product_version):\n sdc_product_provisioning_steps.product_is_created_in_sdc_with_attributes(step, product_name, product_version)\n paas_environment_provisioning.process_following_instance_attributes(step, product_name)\n\n\n@step(u'the product \"([^\"]*)\" with version \"([^\"]*)\" is created in SDC with metadatas:$')\ndef the_product_group1_is_created_in_sdc_with_metadatas(step, product_name, product_version):\n sdc_product_provisioning_steps.product_is_created_in_sdc_with_metadatas(step, product_name, product_version)\n\n\n@step(u'the created instances have not NID metadata')\ndef the_created_instances_have_not_nid_metadata(step):\n # For each tier, check NID value\n for tier in world.tiers:\n _check_metadata_for_tier_in_nova(tier.name, METADATA_NID_NOVA_KEY, None)\n\n\n@step(u'the created instances have the correct NID metadata value')\ndef the_created_instances_have_not_nid_metadata(step):\n # For each tier, check NID value\n for tier in world.tiers:\n # Get the last product installed\n last_product_name = None\n for tier_product in tier.products:\n last_product_name = tier_product.product\n\n # Look for test product data in the world and get the nid value\n metadata_value = None\n if last_product_name is not None:\n for test_product in world.product_list_with_attributes:\n if test_product['name'] == last_product_name:\n # Look for metadata with key=nid\n for metadata in test_product['metadatas']:\n if metadata['key'] == METADATA_NID:\n metadata_value = metadata['value']\n break\n break\n\n print \"NID value expected:\", metadata_value\n _check_metadata_for_tier_in_nova(tier.name, METADATA_NID_NOVA_KEY, metadata_value)\n\n\n@step(u'the created security group has rules with \"(TCP|UDP)\" ports \"([^\"]*)\"')\ndef the_created_sec_group_has_rules(step, protocol, open_ports):\n open_ports_list = []\n if \"\" != open_ports:\n open_ports_list = open_ports.split(' ')\n\n # Add default port '22'\n if protocol == 'TCP':\n open_ports_list.append('22')\n\n # Get Sec. Group from Nova\n raw_response = world.nova_request.get_security_group_list()\n assert_equal(raw_response.status, 200, \"Error to obtain Sec. Groups list. HTTP status code is not the expected\")\n\n body_response = raw_httplib_request_to_python_dic(raw_response)\n rules_tcp_ports_list = get_ports_from_rules(body_response, world.instance_name, protocol=protocol)\n\n # Check expected ports\n assert_equals(len(open_ports_list), len(rules_tcp_ports_list))\n for expected_port in open_ports_list:\n assert_in(expected_port, rules_tcp_ports_list)\n\n\n@step(u'the expected networks are connected to the instances')\ndef the_networks_are_connected_to_instance(step):\n\n # Get Server list from Nova\n raw_response = world.nova_request.get_server_list()\n assert_equal(raw_response.status, 200, \"Error to obtain Server list. HTTP status code is not the expected\")\n server_list = raw_httplib_request_to_python_dic(raw_response)\n\n # For each tier, check Networks\n for tier in world.tiers:\n # Get Server id\n sub_instance_name = \"{}-{}\".format(world.instance_name, tier.name)\n server_id = get_server_id_by_partial_name(server_list, sub_instance_name)\n\n # Get Server details\n raw_response = world.nova_request.get_server_details(server_id)\n assert_equal(raw_response.status, 200, \"Error to obtain Server details. HTTP status code is not the expected\")\n server_details = raw_httplib_request_to_python_dic(raw_response)\n\n connected_networks_list = get_network_name_list(server_details)\n\n if tier.networks is None or tier.networks == []:\n assert_equal(len(connected_networks_list), 1,\n \"The number of connected networks is not the expected one\")\n assert_equal(connected_networks_list[0], world.config[PAAS][REGION_DEFAULT_SHAREDNET_PROPERTY],\n \"The connected network is not the configured as Shared-Net\")\n else:\n assert_equal(len(connected_networks_list), len(tier.networks),\n \"The number of connected networks is not the expected one\")\n for network in tier.networks:\n assert_in(network.network_name, connected_networks_list,\n \"The network '%s' is not connected to the instance\".format(network.network_name))\n", "id": "1392991", "language": "Python", "matching_score": 9.123093605041504, "max_stars_count": 0, "path": "test/acceptance/integration/instances/create_instance/steps.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n__author__ = 'jfernandez'\n\n\nfrom lettuce import step, world\nfrom lettuce_tools.dataset_utils.dataset_utils import DatasetUtils\nfrom common_steps import sdc_product_provisioning_steps, paas_environment_provisioning\nfrom tools.constants import NAME, DESCRIPTION, PRODUCT_FILE_NAME_FORMAT, PAAS, CONFIG_SUPPORT_KEY_FILE, \\\n CONFIG_SUPPORT_USER, REGION_DEFAULT_SHAREDNET_PROPERTY, INSTALLATION_PRODUCT_DIR, MAX_CHECKS_SSH_CONNECTION,\\\n SLEEP_TIME_CHECKS\nfrom tools import http, environment_request, environment_instance_request\nfrom tools.environment_instance_request import EnvironmentInstance\nfrom tools.fabric_utils import FabricUtils\nfrom tools.nova_request import get_server_id_by_partial_name, get_network_name_list, get_floating_ip\nfrom tools.utils import raw_httplib_request_to_python_dic\nimport json\nfrom nose.tools import assert_equal, assert_is_not_none, assert_equals, assert_in, assert_true\nimport time\n\ndataset_utils = DatasetUtils()\n\n\n@step(u'the paas manager is up and properly configured')\ndef the_paas_manager_is_up_and_properly_configured(step):\n\n pass # Nothing to do here, the set up should be done by external means\n\n\n@step(u'a list of tiers has been defined with data:')\ndef a_list_of_tiers_has_been_defined_with_data(step):\n\n world.tiers = paas_environment_provisioning.process_the_list_of_tiers(step)\n\n\n@step(u'an environment has already been created with the previous tiers and data:')\ndef an_environment_has_already_been_created_with_the_previous_tiers_and_data(step):\n\n data = dataset_utils.prepare_data(step.hashes[0])\n world.env_requests.add_environment(data.get(NAME), data.get(DESCRIPTION), world.tiers)\n assert world.response.status == 204, \\\n \"Wrong status code received creating environment: %d. \" \\\n \"Expected: %d. Body content: %s\" % (world.response.status, 204, world.response.read())\n\n\n@step(u'the product \"([^\"]*)\" with version \"([^\"]*)\" is created in SDC with metadatas:$')\ndef the_product_group1_is_created_in_sdc_with_metadatas(step, product_name, product_version):\n\n sdc_product_provisioning_steps.product_is_created_in_sdc_with_metadatas(step, product_name, product_version)\n\n\n@step(u'I request the creation of an instance of the environment \"([^\"]*)\" using data:')\ndef i_request_the_creation_of_an_instance_of_an_environment_using_data(step, env_name):\n\n # First, send the request to get the environment on which the instance will be based\n env_name = dataset_utils.generate_fixed_length_param(env_name)\n world.env_requests.get_environment(env_name)\n assert world.response.status == 200, \\\n \"Wrong status code received getting environment: %d. \" \\\n \"Expected: %d. Body content: %s\" % (world.response.status, 200, world.response.read())\n\n environment = environment_request.process_environment(json.loads(world.response.read()))\n\n # Then, create the instance\n data = dataset_utils.prepare_data(step.hashes[0])\n world.instance_name = data.get(NAME)\n instance = EnvironmentInstance(world.instance_name, data.get(DESCRIPTION), environment)\n world.inst_requests.add_instance(instance)\n\n\n@step(u'I receive an? \"([^\"]*)\" response(?: with a task)?')\ndef i_receive_a_response_of_type(step, response_type):\n\n status_code = http.status_codes[response_type]\n environment_instance_request.check_add_instance_response(world.response, status_code)\n\n\n@step(u'the task ends with \"([^\"]*)\" status')\ndef the_task_ends_with_status(step, status):\n\n environment_instance_request.check_task_status(world.task_data, status)\n\n\n@step(u'the product \"([^\"]*)\" with version \"([^\"]*)\" of tier \"([^\"]*)\" has been installed using \"(chef|puppet)\"')\ndef the_product_is_installed(step, product_name, product_version, tier_name, installator):\n\n tier = None\n for tier_world in world.tiers:\n if tier_world.name == tier_name:\n tier = tier_world\n assert_is_not_none(tier, \"Tier with name '{}' not found in created tier list.\".format(tier_name))\n\n # > Get from Nova the IP of the Floating VM\n # >> Get Server list from Nova\n raw_response = world.nova_request.get_server_list()\n assert_equal(raw_response.status, 200, \"Error to obtain Server list. HTTP status code is not the expected\")\n server_list = raw_httplib_request_to_python_dic(raw_response)\n\n sub_instance_name = \"{}-{}\".format(world.instance_name, tier.name)\n server_id = get_server_id_by_partial_name(server_list, sub_instance_name)\n\n # Get Server details\n raw_response = world.nova_request.get_server_details(server_id)\n assert_equal(raw_response.status, 200, \"Error to obtain Server details. HTTP status code is not the expected\")\n server_details = raw_httplib_request_to_python_dic(raw_response)\n\n connected_networks_list = get_network_name_list(server_details)\n shared_net = world.config[PAAS][REGION_DEFAULT_SHAREDNET_PROPERTY]\n assert_in(shared_net, connected_networks_list,\n \"The connected network is not the configured as Shared-Net\")\n\n ip_internet = get_floating_ip(server_details, shared_net)\n assert_is_not_none(ip_internet, \"Floating IP not found Shared-Net. Is the Internet network added to tier conf?\")\n\n # Create new Fabric connection\n fabric_client = FabricUtils(host_name=ip_internet,\n host_username=world.config[PAAS][CONFIG_SUPPORT_USER],\n host_password=\"-\",\n host_ssh_key=world.config[PAAS][CONFIG_SUPPORT_KEY_FILE])\n\n\n file_name = PRODUCT_FILE_NAME_FORMAT.format(product_name=product_name,\n product_version=product_version,\n installator=installator)\n\n # Wait for software installation.\n time.sleep(SLEEP_TIME_CHECKS)\n\n # Retry SSH 5 times\n response = False\n for i in range(MAX_CHECKS_SSH_CONNECTION):\n try:\n response = fabric_client.file_exist(INSTALLATION_PRODUCT_DIR, file_name)\n print \"Connected!\"\n break\n except Exception as e:\n print \"SSH Connection #%d: %s\" % (i, e.message)\n time.sleep(SLEEP_TIME_CHECKS)\n\n assert_true(response, \"Softwer is not installed. File not found: '{}/{}\".format(INSTALLATION_PRODUCT_DIR,\n file_name))\n", "id": "9738445", "language": "Python", "matching_score": 5.4110894203186035, "max_stars_count": 0, "path": "test/acceptance/integration/e2e/test_chef_puppet_on_base_images/steps.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\nfrom lettuce import step, world\nfrom lettuce_tools.dataset_utils.dataset_utils import DatasetUtils\nfrom tools import http\nfrom tools import environment_request\nfrom tools.constants import NAME, DESCRIPTION\nfrom common_steps import sdc_product_provisioning_steps, paas_environment_provisioning\n\ndataset_utils = DatasetUtils()\n\n\n@step(u'the paas manager is up and properly configured')\ndef the_paas_manager_is_up_and_properly_configured(step):\n pass # Nothing to do here, the set up should be done by external means\n\n\n@step(u'a list of tiers has been defined with data:')\ndef a_list_of_tiers_has_been_defined_with_data(step):\n world.tiers = paas_environment_provisioning.process_the_list_of_tiers(step)\n\n\n@step(u'an environment has already been created with data:')\ndef an_environment_has_already_been_created_with_data(step):\n i_request_the_creation_of_an_environment_with_data(step)\n\n\n@step(u'I request the creation of an environment with data:')\ndef i_request_the_creation_of_an_environment_with_data(step):\n data = dataset_utils.prepare_data(step.hashes[0])\n world.env_requests.add_environment(data.get(NAME), data.get(DESCRIPTION))\n\n\n@step(u'I request the creation of an environment with the previous tiers and data:')\ndef i_request_the_creation_of_an_environment_with_tiers_and_data(step):\n data = dataset_utils.prepare_data(step.hashes[0])\n world.env_requests.add_environment(data.get(NAME), data.get(DESCRIPTION), world.tiers)\n\n\n@step(u'I receive an? \"([^\"]*)\" response')\ndef i_receive_a_response_of_type(step, response_type):\n status_code = http.status_codes[response_type]\n environment_request.check_add_environment_response(world.response, status_code)\n\n\n@step(u'the following instance attributes for product \"([^\"]*)\":')\ndef the_following_instance_attributes(step, product_name):\n paas_environment_provisioning.process_following_instance_attributes(step, product_name)\n\n\n@step(u'the product \"([^\"]*)\" with version \"([^\"]*)\" is created in SDC with attributes:')\ndef the_product_group1_is_created_in_sdc_with_attributes(step, product_name, product_version):\n sdc_product_provisioning_steps.product_is_created_in_sdc_with_attributes(step, product_name, product_version)\n", "id": "4438453", "language": "Python", "matching_score": 4.41176176071167, "max_stars_count": 0, "path": "test/acceptance/integration/environments/create_environment/steps.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\nfrom lettuce import step, world\nfrom lettuce_tools.dataset_utils.dataset_utils import DatasetUtils\nfrom tools import http\nfrom tools import environment_request\nfrom tools.constants import NAME, DESCRIPTION\nfrom common_steps import sdc_product_provisioning_steps, paas_environment_provisioning\n\ndataset_utils = DatasetUtils()\n\n\n@step(u'a content-type header value \"(.*)\"')\ndef content_type_header_value(step, content_type):\n world.headers.update({'Content-Type': content_type})\n\n\n@step(u'an accept header value \"(.*)\"')\ndef content_type_header_value(step, content_type):\n world.headers.update({'Accept': content_type})\n\n\n@step(u'the authentication token \"(.*)\"')\ndef the_auth_token(step, token):\n world.headers.update({'X-Auth-Token': token})\n\n\n@step(u'the authentication tenant-id \"(.*)\"')\ndef the_auth_token(step, tenant_id):\n world.headers.update({'Tenant-Id': tenant_id})\n\n\n@step(u'I request the creation of an environment with data:')\ndef i_request_the_creation_of_an_environment_with_data(step):\n data = dataset_utils.prepare_data(step.hashes[0])\n world.env_requests.add_environment(data.get(NAME), data.get(DESCRIPTION), headers=world.headers)\n\n\n@step(u'I receive an? \"([^\"]*)\" response$')\ndef i_receive_an_http_response(step, response_type):\n status_code = http.status_codes[response_type]\n assert status_code == world.response.status, \"HTTP status code is not the expected [{} != {}]\"\\\n .format(world.response.status, status_code)\n", "id": "10000682", "language": "Python", "matching_score": 1.7538135051727295, "max_stars_count": 0, "path": "test/acceptance/integration/environments/api_restrictions/steps.py" }, { "content": "__author__ = 'arobres, jfernandez'\n\n# -*- coding: utf-8 -*-\nfrom lettuce import step, world\nfrom commons.rest_utils import RestUtils\nfrom commons.product_steps import ProductSteps\nfrom commons.provisioning_steps import ProvisioningSteps\nfrom commons.constants import *\nfrom commons.utils import response_body_to_dict, generate_product_instance_id\nfrom nose.tools import assert_equals, assert_true\n\napi_utils = RestUtils()\nproduct_steps = ProductSteps()\nprovisioning_steps = ProvisioningSteps()\n\n\n@step(u'a created product but not installed')\ndef a_created_product_but_not_installed(step):\n world.product_name = step.hashes[0]['product_name']\n world.product_version = step.hashes[0]['version']\n world.vm_ip = step.hashes[0]['ip']\n world.vm_hostname = step.hashes[0]['hostname']\n world.vm_fqn = step.hashes[0]['fqn']\n world.cm_tool = step.hashes[0]['cm_tool']\n\n product_steps.a_created_product_with_name_and_release(step, world.product_name, world.product_version)\n\n\n@step(u'a created and installed product')\ndef a_created_and_installed_product(step):\n a_created_product_but_not_installed(step)\n\n provisioning_steps.i_install_the_product_in_the_vm(step)\n\n\n@step(u'I get all product instances')\ndef i_get_all_product_instances(step):\n world.response = api_utils.retrieve_product_instance_list(headers=world.headers, vdc_id=world.tenant_id)\n\n\n@step(u'the product instance is in the returned list')\ndef the_product_instance_is_returned_in_the_list(step):\n instance_id = generate_product_instance_id(world.vm_fqn, world.product_name, world.product_version)\n assert_true(world.response.ok, 'RESPONSE: {}'.format(world.response))\n \n response_body = response_body_to_dict(world.response, world.headers[ACCEPT_HEADER],\n xml_root_element_name=PRODUCT_INSTANCE_LIST)\n response_body = response_body\n assert_true(len(response_body) != 0)\n \n product_instance = None\n for product_ins in response_body:\n if instance_id in product_ins[PRODUCT_INSTANCE_NAME]:\n product_instance = product_ins\n break\n\n assert_true(product_instance is not None)\n assert_equals(product_instance[PRODUCT_INSTANCE_NAME], instance_id)\n assert_true(product_instance[PRODUCT_INSTANCE_STATUS] != \"\")\n assert_equals(product_instance[PRODUCT_INSTANCE_VM][PRODUCT_INSTANCE_VM_IP], world.vm_ip)\n assert_equals(product_instance[PRODUCT_INSTANCE_VM][PRODUCT_INSTANCE_VM_FQN], world.vm_fqn)\n assert_equals(product_instance[PRODUCT_INSTANCE_VM][PRODUCT_INSTANCE_VM_HOSTNAME], world.vm_hostname)\n \n assert_equals(product_instance[PRODUCT][VERSION], world.product_version)\n assert_equals(product_instance[PRODUCT][PRODUCT_NAME], world.product_name)\n\n@step(u'I get the product instance details')\ndef i_get_the_product_instance_details(step):\n world.instance_id = generate_product_instance_id(world.vm_fqn, world.product_name, world.product_version)\n world.response = api_utils.retrieve_product_instance(headers=world.headers, vdc_id=world.tenant_id,\n product_instance_id=world.instance_id)\n\n@step(u'the product instance is returned')\ndef the_product_instance_is_returned(step):\n assert_true(world.response.ok, 'RESPONSE: {}'.format(world.response))\n\n response_body = response_body_to_dict(world.response, world.headers[ACCEPT_HEADER],\n xml_root_element_name=PRODUCT_INSTANCE)\n\n assert_equals(response_body[PRODUCT_INSTANCE_NAME], world.instance_id)\n assert_true(response_body[PRODUCT_INSTANCE_STATUS] != \"\")\n assert_equals(response_body[PRODUCT_INSTANCE_VM][PRODUCT_INSTANCE_VM_IP], world.vm_ip)\n assert_equals(response_body[PRODUCT_INSTANCE_VM][PRODUCT_INSTANCE_VM_FQN], world.vm_fqn)\n assert_equals(response_body[PRODUCT_INSTANCE_VM][PRODUCT_INSTANCE_VM_HOSTNAME], world.vm_hostname)\n\n assert_equals(response_body[PRODUCT][VERSION], world.product_version)\n assert_equals(response_body[PRODUCT][PRODUCT_NAME], world.product_name)\n\n\n@step(u'the HTTP response code is (.*)')\ndef the_http_response_code_is_group1(step, http_status_code):\n assert_equals(world.response.status_code, int(http_status_code))\n", "id": "6335506", "language": "Python", "matching_score": 2.1587295532226562, "max_stars_count": 0, "path": "test/acceptance/component/get_product_instances/features/get_product_instances.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n__author__ = 'jfernandez'\n\n\nfrom lettuce import step, world\nfrom lettuce_tools.dataset_utils.dataset_utils import DatasetUtils\nfrom tools.keystone_request import get_images_regions, get_public_endpoint_url_by_type, get_public_endpoint_url_by_name\nfrom tools.glance_request import get_number_of_images, get_first_image_in_list\nfrom tools.tier import Tier\nfrom tools.constants import NAME, DESCRIPTION, PRODUCTS, NETWORKS\nfrom tools.utils import raw_httplib_request_to_python_dic\nfrom tools.nova_request import get_number_of_flavors, get_first_flavor_in_list\nfrom tools.environment_instance_request import EnvironmentInstance, check_task_status, check_add_instance_response\nfrom tools.environment_request import process_environment\nfrom nose.tools import assert_not_equal, assert_equal, assert_true\nfrom tools import http\n\ndataset_utils = DatasetUtils()\n\n\n@step(u'the region \"([^\"]*)\"')\ndef the_region_group1(step, region_name):\n \"\"\" Set region name \"\"\"\n world.region_name = region_name\n\n\n@step(u'I retrieve the service catalog')\ndef i_retrieve_the_service_catalog(step):\n \"\"\" Retrieve service catalog from keystone \"\"\"\n world.response = world.keystone_request.get_token()\n\n\n@step(u'endpoints from service catalog')\ndef endpoints_from_service_catalog(step):\n \"\"\" Configure endpoints to be used from service catalog urls instead of config file \"\"\"\n response = world.keystone_request.get_token()\n body_keystone_response = raw_httplib_request_to_python_dic(response)\n\n world.glance_request.glance_url = get_public_endpoint_url_by_type(body_keystone_response,\n 'image', world.region_name)\n world.nova_request.nova_url = get_public_endpoint_url_by_type(body_keystone_response,\n 'compute', world.region_name)\n world.env_requests.paasmanager_url = get_public_endpoint_url_by_type(body_keystone_response,\n 'paas', world.region_name)\n world.inst_requests.paasmanager_url = world.env_requests.paasmanager_url\n\n\n@step(u'the region is in image endpoints list')\ndef the_region_is_in_image_endpoints_list(step):\n \"\"\" Check if region is in keystone (image service) \"\"\"\n assert_equal(world.response.status, 200, \"Error to obtain the token. HTTP status code is not the expected\")\n world.body_keystone_response = raw_httplib_request_to_python_dic(world.response)\n\n # Get img list from response (service catalog)\n if world.region_list is None:\n world.region_list = get_images_regions(world.body_keystone_response)\n\n found = False\n for region in world.region_list:\n if world.region_name == region['region']:\n found = True\n break\n assert_true(found, \"Region {} not found in services catalog (images)\".format(world.region_name))\n\n\n@step(u'I retrieve the list of images')\ndef i_retrieve_the_list_of_images(step):\n \"\"\" Retrieve the list of images \"\"\"\n try:\n world.response = world.glance_request.get_image_list()\n except:\n raise Exception(\"Problem retrieving images from GLANCE ({}: {})\".format(world.region_name,\n world.glance_request.glance_url))\n\n\n@step(u'I retrieve the list of SDC-aware images')\ndef i_retrieve_the_list_of_sdc_aware_images(step):\n \"\"\" Retrieve the list of SDC-Aware images \"\"\"\n try:\n world.response = world.glance_request.get_image_list_by_property('sdc_aware', 'true')\n except:\n raise Exception(\"Problem retrieving images from GLANCE ({}: {})\".format(world.region_name,\n world.glance_request.glance_url))\n\n\n@step(u'I retrieve the list of flavors')\ndef i_retrieve_the_list_of_flavours(step):\n \"\"\" Retrieve the list of flavors \"\"\"\n try:\n world.response = world.nova_request.get_flavour_list()\n except:\n raise Exception(\"Problem retrieving flavors from NOVA ({}: {})\".format(world.region_name,\n world.nova_request.nova_url))\n\n\n@step(u'the region has at least one SDC-aware image')\ndef the_region_has_sdc_aware_images(step):\n \"\"\" Check if the list retrieved has a SDC-aware image at least and save in World var the first one \"\"\"\n assert_equal(world.response.status, 200, \"Error to obtain the images list. HTTP status code is not the expected\")\n world.body_response = raw_httplib_request_to_python_dic(world.response)\n\n # Check if there are images (list > 0)\n number_of_images = get_number_of_images(world.body_response)\n assert_not_equal(number_of_images, 0, \"There are not images in the region \" + world.region_name)\n\n # Get a image from the list. First, try to get the first Centos img\n world.image_sdc_aware_id = get_first_image_in_list(world.body_response, name_filter='Cent')\n if world.image_sdc_aware_id is None:\n # If no centOS images found, get the first in list whatever its type is\n print \"WARNING: No 'Cent' images found. It will get the first image found whatever its type is\"\n world.image_sdc_aware_id = get_first_image_in_list(world.body_response)\n\n assert_not_equal(world.image_sdc_aware_id, None, \"Error to obtain a image from the list\")\n\n\n@step(u'the region has images')\ndef the_region_has_images(step):\n \"\"\" Check if the image list retrieved is not empty and save in World var the first one \"\"\"\n the_region_has_sdc_aware_images(step)\n\n\n@step(u'the region has at least one flavor')\ndef the_region_has_at_least_one_flavor(step):\n \"\"\" Check if the flavors list retrieved is not empty and save in World var the first one \"\"\"\n assert_equal(world.response.status, 200, \"Error to obtain the images list. HTTP status code is not the expected\")\n world.body_nova_response = raw_httplib_request_to_python_dic(world.response)\n\n # Check if there are flavors (list > 0)\n number_of_flavors = get_number_of_flavors(world.body_nova_response)\n assert_not_equal(number_of_flavors, 0, \"There are not flavors in the region \" + world.region_name)\n\n # Get a image from the list. First, try to get the first 'small' flavor\n world.flavor_id = get_first_flavor_in_list(world.body_nova_response, 'small')\n if world.flavor_id is None:\n # If no 'small' flavor found, get the first in list whatever its type is\n print \"WARNING: No 'small' flavor found. It will get the first one found whatever its type is\"\n world.flavor_id = get_first_flavor_in_list(world.body_nova_response, 'small')\n\n assert_not_equal(world.flavor_id, None, \"Error to obtain a flavor from the list\")\n\n\n@step(u'the region exists and it has valid images and flavors')\ndef the_region_exists_and_it_has_valid_images_and_flavors(step):\n \"\"\" Complex step: Retrieve and check if region exists in keystone and retrieve and check images and flavors \"\"\"\n # Region is in service catalog list (image)\n i_retrieve_the_service_catalog(step)\n the_region_is_in_image_endpoints_list(step)\n\n # Region has images\n i_retrieve_the_list_of_sdc_aware_images(step)\n try:\n the_region_has_images(step)\n except:\n i_retrieve_the_list_of_images(step)\n the_region_has_images(step)\n\n # Region has flavors\n i_retrieve_the_list_of_flavours(step)\n the_region_has_at_least_one_flavor(step)\n\n\n@step(u'the region exists and it has valid SDC-aware images and flavors')\ndef the_region_exists_and_it_has_valid_images_and_flavors(step):\n \"\"\"\n Complex step: Retrieve and check if region exists in keystone and retrieve and check SDC-aware images and flavors\n \"\"\"\n # Region is in service catalog list (image)\n i_retrieve_the_service_catalog(step)\n the_region_is_in_image_endpoints_list(step)\n\n # Region has images\n i_retrieve_the_list_of_sdc_aware_images(step)\n the_region_has_images(step)\n\n # Region has flavors\n i_retrieve_the_list_of_flavours(step)\n the_region_has_at_least_one_flavor(step)\n\n\n@step(u'a created environment with data:')\ndef a_created_environment_with_data(step):\n \"\"\" Create a environment \"\"\"\n data = dataset_utils.prepare_data(step.hashes[0])\n world.environment_name = data.get(NAME)+world.region_name.replace(\"_\", \"\")\n world.env_requests.add_environment(world.environment_name, data.get(DESCRIPTION))\n\n\n@step(u'a created tiers with data:')\ndef a_list_of_tiers_has_been_defined_with_data(step):\n \"\"\" Create and add tiers to the environment \"\"\"\n world.tiers = []\n for row in step.hashes:\n data = dataset_utils.prepare_data(row)\n\n tier = Tier(data.get(NAME), world.image_sdc_aware_id, tier_flavour=world.flavor_id,\n tier_region=world.region_name)\n\n tier.parse_and_add_products(data.get(PRODUCTS))\n\n if NETWORKS in data:\n # Is Neutron available?\n i_retrieve_the_service_catalog(step)\n body_response = raw_httplib_request_to_python_dic(world.response)\n nova_public_url = get_public_endpoint_url_by_type(body_response, 'network', world.region_name)\n if nova_public_url is None:\n raise Exception(\"Networks are not available. Region: {}\".format(world.region_name))\n\n tier.parse_and_add_networks(data.get(NETWORKS))\n\n world.env_requests.add_tier_environment(world.environment_name, tier)\n\n\n@step(u'I request the creation of an instance for that environment using data:')\ndef i_request_the_creation_of_an_instance_using_data(step):\n \"\"\" Create a product instance for the created environment and tiers \"\"\"\n # First, send the request to get the environment on which the instance will be based\n world.env_requests.get_environment(world.environment_name)\n body_env_response = raw_httplib_request_to_python_dic(world.response)\n assert_equal(world.response.status, 200,\n \"Wrong status code received getting environment: %d. Expected: %d. Body content: %s\"\n % (world.response.status, 200, body_env_response))\n\n target_environment = process_environment(body_env_response)\n\n # Then, create the instance\n env_data = dataset_utils.prepare_data(step.hashes[0])\n world.instance_name = env_data.get(NAME)+world.region_name.replace(\"_\", \"\")\n environment_instance = EnvironmentInstance(world.instance_name,\n env_data.get(DESCRIPTION),\n target_environment)\n\n world.inst_requests.add_instance(environment_instance)\n\n\n@step(u'I receive an? \"([^\"]*)\" response(?: with a task)?')\ndef i_receive_a_response_of_type(step, response_type):\n \"\"\" Check task response \"\"\"\n status_code = http.status_codes[response_type]\n check_add_instance_response(world.response, status_code)\n\n\n@step(u'the task ends with \"([^\"]*)\" status')\ndef the_task_ends_with_status(step, status):\n \"\"\" Wait for task execution and check task status \"\"\"\n print \"TEST DATA - Region: {}; Image: {}; Flavor: {}\".format(world.region_name, world.image_sdc_aware_id,\n world.flavor_id)\n check_task_status(world.task_data, status)\n", "id": "9508275", "language": "Python", "matching_score": 3.870877504348755, "max_stars_count": 0, "path": "test/acceptance/integration/e2e/deploy_environment_regions/steps.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n__author__ = 'henar'\n\nimport json\nfrom xml.etree.ElementTree import tostring\n\nfrom tools import http, tier\nfrom tools.environment_instance import EnvironmentInstance\nfrom lettuce import world\n\n\nclass EnvironmentInstanceRequest:\n def __init__(self, keystone_url, paas_manager_url,\n tenant, user, password, vdc, sdc_url=''):\n self.paasmanager_url = paas_manager_url\n self.sdc_url = sdc_url\n self.vdc = vdc\n self.keystone_url = keystone_url\n\n self.user = user\n self.password = password\n self.tenant = tenant\n\n self.token = self.__get__token()\n\n def __get__token(self):\n return http.get_token(self.keystone_url + '/tokens',\n self.tenant, self.user, self.password)\n\n def __process_env_inst(self, data):\n envIns = EnvironmentInstance(data['blueprintName'], data['description'],\n None, data['status'])\n return envIns\n\n def add_instance(self, environment_instance):\n url = \"%s/%s/%s/%s/%s\" % (self.paasmanager_url, \"envInst/org/FIWARE\",\n \"vdc\", self.vdc, \"environmentInstance\")\n headers = {'X-Auth-Token': <PASSWORD>.token, 'Tenant-Id': self.vdc,\n 'Content-Type': \"application/xml\", 'Accept': \"application/json\"}\n payload = tostring(environment_instance.to_xml())\n world.response = http.post(url, headers, payload)\n\n \"\"\"Store it in the world to tear it down later\"\"\"\n try:\n world.instances.append(environment_instance.blueprint_name)\n except AttributeError:\n world.instances = [environment_instance.blueprint_name]\n\n if world.response.status == 200:\n \"\"\"Wait for the associated task to finish and store its data\"\"\"\n world.task_data = http.wait_for_task(json.loads(world.response.read()), headers)\n\n def delete_instance(self, instance_name):\n url = \"%s/%s/%s/%s/%s/%s\" % (self.paasmanager_url, \"envInst/org/FIWARE\",\n \"vdc\", self.vdc, \"environmentInstance\", instance_name)\n headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,\n 'Content-Type': \"application/xml\", 'Accept': \"application/json\"}\n world.response = http.delete(url, headers)\n\n \"\"\"Remove it from the world too\"\"\"\n try:\n world.instances.remove(instance_name)\n except:\n pass\n\n if world.response.status == 200:\n \"\"\"Wait for the associated task to finish and store its data\"\"\"\n world.task_data = http.wait_for_task(json.loads(world.response.read()), headers)\n\n def get_instances(self):\n url = \"%s/%s/%s/%s/%s\" % (self.paasmanager_url, \"envInst/org/FIWARE\",\n \"vdc\", self.vdc, \"environmentInstance\")\n headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,\n 'Content-Type': \"application/xml\", 'Accept': \"application/json\"}\n world.response = http.get(url, headers)\n\n def get_instance(self, instance_name):\n url = \"%s/%s/%s/%s/%s/%s\" % (self.paasmanager_url, \"envInst/org/FIWARE\",\n \"vdc\", self.vdc, \"environmentInstance\", instance_name)\n headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,\n 'Content-Type': \"application/xml\", 'Accept': \"application/json\"}\n world.response = http.get(url, headers)\n\n\ndef delete_created_instances():\n \"\"\"\n Delete the instances created so far in the tests.\n \"\"\"\n try:\n while len(world.instances) > 0:\n world.inst_requests.delete_instance(world.instances[0])\n del world.instances\n except AttributeError:\n pass\n\n\ndef process_instances(instances):\n \"\"\"\n Process the instances provided as a list of dictionaries.\n :param instances: instances to be processed.\n :return: a list of EnvironmentInstance objects.\n \"\"\"\n processed_instances = []\n if isinstance(instances, list):\n for env in instances:\n processed_instances.append(process_instance(env))\n else:\n # Single instance received\n processed_instances.append(process_instance(instances))\n return processed_instances\n\n\ndef process_instance(instance):\n \"\"\"\n Process the instance provided as a dictionary.\n :param instance: instance to be processed.\n :return: a EnvironmentInstance object.\n \"\"\"\n processed_instance = EnvironmentInstance(instance['blueprintName'],\n instance['description'],\n status=instance['status'])\n try:\n tiers = tier.process_tiers(instance['tierDto'])\n processed_instance.add_tiers(tiers)\n except:\n pass\n return processed_instance\n\n\ndef check_add_instance_response(response, expected_status_code):\n \"\"\"\n Check that the response for an add instance request is the\n expected one.\n :param response: Response to be checked.\n :param expected_status_code: Expected status code of the response.\n \"\"\"\n assert response.status == expected_status_code,\\\n \"Wrong status code received: %d. Expected: %d. Body content: %s\"\\\n % (response.status, expected_status_code, response.read())\n\n\ndef check_delete_instance_response(response, expected_status_code):\n \"\"\"\n Check that the response for a delete instance request is the\n expected one.\n :param response: Response to be checked.\n :param expected_status_code: Expected status code of the response.\n \"\"\"\n assert response.status == expected_status_code,\\\n \"Wrong status code received: %d. Expected: %d. Body content: %s\"\\\n % (response.status, expected_status_code, response.read())\n\n\ndef check_get_instances_response(response, expected_status_code,\n expected_instances_number=None):\n \"\"\"\n Check that the response for a get instances request is the\n expected one.\n :param response: Response to be checked.\n :param expected_status_code: Expected status code of the response.\n :param expected_instances_number: Expected number of instances.\n \"\"\"\n assert response.status == expected_status_code,\\\n \"Wrong status code received: %d. Expected: %d. Body content: %s\"\\\n % (response.status, expected_status_code, response.read())\n\n if expected_instances_number is not None:\n data = json.loads(response.read())\n #print data, \"\\n\\n\\n\\n\"\n if expected_instances_number == 0:\n # No content expected when the lists of instances is empty\n assert data == None, \"Unexpected content received: %s\" % data\n else:\n instances = data[\"environmentInstancePDto\"]\n world.response.instances = process_instances(instances)\n\n assert len(world.response.instances) == expected_instances_number,\\\n \"Wrong number of instances received: %d. Expected: %d.\"\\\n % (len(world.response.instances), expected_instances_number)\n\n\ndef check_instance_in_list(instances_list, instance_name, instance_description, tiers_number=0):\n \"\"\"\n Check that a certain instance is in the list of instances provided.\n :param instances_list: List of instances to be checked.\n :param instance_name: Name of the instance to be found.\n :param instance_description: Description of the instance to be found.\n :param env_name: Name of the template environment of the instance to be found.\n :param tiers_number: Number of tiers of the environment to be found.\n \"\"\"\n for inst in instances_list:\n if inst.blueprint_name == instance_name: # Expected instance found\n assert inst.blueprint_description == instance_description,\\\n \"Wrong description received for instance %s: %s. Expected: %s.\"\\\n % (inst.blueprint_name, inst.blueprint_description, instance_description)\n\n assert len(inst.get_tiers()) == tiers_number,\\\n \"Wrong number of tiers received for instance %s: %d. Expected: %d.\"\\\n % (inst.blueprint_name, len(inst.get_tiers()), tiers_number)\n\n return\n\n assert False, \"No instance found in the list with name %s\" % (instance_name)\n\n\ndef check_get_instance_response(response, expected_status_code,\n expected_instance_name=None,\n expected_instance_description=None,\n expected_tiers=None):\n \"\"\"\n Check that the response for a get instance request is the\n expected one.\n :param response: Response to be checked.\n :param expected_status_code: Expected status code of the response.\n :param expected_instance_name: Expected name of the instance.\n :param expected_instance_description: Expected description of the instance.\n :param expected_tiers: Expected tiers of the instance.\n \"\"\"\n assert response.status == expected_status_code,\\\n \"Wrong status code received: %d. Expected: %d. Body content: %s\"\\\n % (response.status, expected_status_code, response.read())\n\n if expected_instance_name is not None:\n data = json.loads(response.read())\n #print data, \"\\n\\n\\n\\n\"\n instance = process_instance(data)\n\n assert instance.blueprint_name == expected_instance_name,\\\n \"Wrong name received: %s. Expected: %s.\"\\\n % (instance.blueprint_name, expected_instance_name)\n\n if expected_instance_description is not None:\n assert instance.blueprint_description == expected_instance_description,\\\n \"Wrong description received: %s. Expected: %s.\"\\\n % (instance.blueprint_description, expected_instance_description)\n\n if expected_tiers is not None:\n assert len(instance.get_tiers()) == len(expected_tiers),\\\n \"Wrong number of tiers received: %d. Expected: %d.\"\\\n % (len(instance.get_tiers()), len(expected_tiers))\n\n for expected_tier in expected_tiers:\n # Find the tier that matches each of the expected ones and compare\n received_tier = None\n for tier in instance.get_tiers():\n if tier.name == expected_tier.name:\n received_tier = tier\n break\n\n assert received_tier is not None,\\\n \"Tier not found in response: %s\" % (expected_tier.name)\n\n assert received_tier == expected_tier,\\\n \"The data for tier %s does not match the expected one. Received: %s. Expected: %s.\"\\\n % (received_tier.name, tostring(received_tier.to_xml()), tostring(expected_tier.to_xml()))\n\n\ndef check_task_status(task_data, expected_status):\n \"\"\"\n Check that the status of a task is the expected one, provided its data.\n :param task_data: Dictionary with the task data.\n :param expected_status: Expected status of the task.\n \"\"\"\n assert task_data[\"status\"] == expected_status,\\\n \"Wrong status received: %s. Expected: %s. Task data: %s\"\\\n % (task_data[\"status\"], expected_status, task_data)\n", "id": "10300138", "language": "Python", "matching_score": 2.6276776790618896, "max_stars_count": 0, "path": "test/acceptance/tools/environment_instance_request.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n\nfrom tools.enviornmentrequest import EnvironmentRequest\nfrom tools.environment_instance import EnvironmentInstance\nfrom tools.enviornment_instance_request import EnvironmentInstanceRequest\n\n\nif __name__ == \"__main__\":\n\n\n# total = len(sys.argv)\n\n # Get the arguments list\n #cmd_args = sys.argv\n\n cmd_args = []\n cmd_args.append('hola')\n cmd_args.append('envjesus50')\n cmd_args.append('bluepjesus50')\n cmd_args.append('tiertest')\n cmd_args.append('mysql=1.2.4')\n\n config = {}\n execfile(\"sdc.conf\", config)\n\n env_request = EnvironmentRequest(config['keystone_url'], config['paasmanager_url'], config['tenant'],\n config['user'], config['password'],\n config['vdc'], config['image'], config['sdc_url'])\n\n instance_request = EnvironmentInstanceRequest(config['keystone_url'], config['paasmanager_url'], config['tenant'],\n config['user'], config['password'],\n config['vdc'], config['sdc_url'])\n\n environment_name = cmd_args[1]\n print environment_name\n blueprint_name = cmd_args[2]\n tier_name = cmd_args[3]\n product_name = cmd_args[4]\n\n print('Create a blueprint Template : ' + environment_name )\n env_request.add_environment(environment_name, 'description')\n print(\" OK\")\n\n print('Create tier: ' + tier_name + \" with products \" + product_name)\n env_request.add_tier_environment(environment_name, tier_name, product_name)\n print(\" OK\")\n\n print('Get Information about the template: ' + environment_name )\n env = env_request.get_environment(environment_name)\n env.to_string()\n print(\" OK\")\n\n print('Deploy an environment Instance: ' + blueprint_name )\n blueprint_instance = EnvironmentInstance(blueprint_name, 'description', env, 'INIT')\n instance_request.add_blueprint_instance(blueprint_instance)\n print ('OK')\n\n print('Delete an environment Instance: ' + blueprint_name )\n instance_request.delete_blueprint_instance(blueprint_name)\n\n print('borrado del blueprint Template: ' + environment_name)\n env_request.delete_environment(environment_name)\n print(\" OK\")\n", "id": "1366450", "language": "Python", "matching_score": 2.4397404193878174, "max_stars_count": 0, "path": "automatization_scripts/test_environment_instance_management.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n\n__author__ = 'henar'\n\nfrom productrequest import ProductRequest\nimport sys\nimport getopt\nimport argparse\n\ndef usage():\n print \"\"\"\n *****************USAGE******************\n ACTIONS:\n product-add -n <name> -v <version> -m <metadata> -a <arguments> -d <description>\n product-delete -n <name> [-v <version>]\n product-list\n show -n <name> -v <version>\n\n OPTIONS:\n --help -h:\t\tUsage help \n --name -n : \t\tProduct Name\n --description -d :\tProduct Description\n --version -v:\t\tProduct Release version\n\t--metadata -m:\t\tProduct Metadata\n\t--arguments -a:\t\tProduct Arguments\n\n EXAMPLE:\n\tpython sdc-catalog.py product-add -a 'openports=1026 27017 27018 27019 28017; key1=valuekey1' -n orion -v 0.6.0\n \"\"\"\n sys.exit();\n\nif __name__ == \"__main__\":\n if (len(sys.argv) < 2 ):\n usage();\n #definimos las opciones\n name = ''\n version = ''\n description = ''\n arguments = ''\n metadata = ''\n\n parser = argparse.ArgumentParser()\n #argumento obligatorio\n parser.add_argument(\"option\", type=str, help=\"type of action\")\n #argumento opcional\n parser.add_argument(\"-n\", \"--name\", help=\"product name\")\n parser.add_argument(\"-d\", \"--description\", help=\"product description\")\n parser.add_argument(\"-a\", \"--arguments\", help=\"product atributes\")\n parser.add_argument(\"-m\", \"--metadata\", help=\"product metadata\")\n parser.add_argument(\"-v\", \"--version\", help=\"product version\")\n\n args = parser.parse_args()\n\n if args.name:\n name = args.name\n if args.version:\n version = args.version\n if args.description:\n description = args.description\n if args.name:\n metadata = args.metadata\n if args.name:\n arguments = args.arguments\n\n config = {}\n execfile(\"sdc.conf\", config)\n g = ProductRequest(config['keystone_url'], config['sdc_url'], config['tenant'], config['user'], config['password'])\n\n if args.option == \"product-list\":\n g.get_products()\n\n elif args.option == \"product-delete\":\n if (name == ''):\n usage()\n if (version != ''):\n g.delete_product_release(name, version)\n else:\n g.delete_product(name)\n\n elif args.option == \"product-add\":\n if (name == ''):\n usage()\n else:\n g.add_product(name, description, arguments, metadata)\n if (version != ''):\n g.add_product_release(name, version)\n\n elif args.option == \"show\":\n if ((name != '') & (version != '')):\n g.get_product_info(name, version)\n else:\n usage();\n\n", "id": "9478125", "language": "Python", "matching_score": 1.9374442100524902, "max_stars_count": 0, "path": "automatization_scripts/tools/sdc-catalog.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n__author__ = 'jfernandez'\n\nimport http\nfrom lettuce import world\n\n\nclass ProductSdcRequest:\n def __init__(self, keystone_url, sdc_url, tenant, user, password, vdc):\n \"\"\"\n Init class vars and get initial token from keystone\n \"\"\"\n self.sdc_url = sdc_url\n self.vdc = vdc\n self.keystone_url = keystone_url\n\n self.user = user\n self.password = password\n self.tenant = tenant\n\n self.token = self.__get__token()\n\n def __get__token(self):\n \"\"\" Get token from keystone \"\"\"\n return http.get_token(self.keystone_url + '/tokens', self.tenant, self.user, self.password)\n\n def __get_product_sdc(self, url):\n \"\"\" Get product from SDC \"\"\"\n headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,\n 'Content-Type': \"application/xml\"}\n return http.get(url, headers)\n\n def __add_product_sdc(self, url, product_sdc_payload):\n \"\"\" Add product to SDC catalog \"\"\"\n headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,\n 'Content-Type': \"application/xml\"}\n return http.post(url, headers, product_sdc_payload)\n\n def __delete_product_sdc(self, url):\n \"\"\" Delete product from SDC catalog \"\"\"\n headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,\n 'Content-Type': \"application/xml\"}\n return http.delete(url, headers)\n\n def __delete_node(self, url):\n headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,\n 'Content-Type': \"application/xml\"}\n return http.delete(url, headers)\n\n def get_product(self, product_name):\n \"\"\" Get product from SDC catalog \"\"\"\n url = \"%s/%s/%s/%s\" % (self.sdc_url, \"catalog\", \"product\", product_name)\n\n world.response = self.__get_product_sdc(url)\n\n def get_product_release(self, product_name, product_release):\n \"\"\" Get product release from SDC catalog \"\"\"\n url = \"%s/%s/%s/%s/%s/%s\" % (self.sdc_url, \"catalog\", \"product\", product_name, \"release\", product_release)\n\n world.response = self.__get_product_sdc(url)\n\n def add_product(self, product_name, product_description):\n \"\"\" ADD product to SDC catalog \"\"\"\n url = \"%s/%s/%s\" % (self.sdc_url, \"catalog\", \"product\")\n\n payload = \"<product><name>%s</name><description>%s</description></product>\" \\\n % (product_name, product_description)\n world.response = self.__add_product_sdc(url, payload)\n\n def add_product_with_installator(self, product_name, product_description, installator):\n \"\"\" ADD product to SDC catalog with s custom installator \"\"\"\n url = \"%s/%s/%s\" % (self.sdc_url, \"catalog\", \"product\")\n\n payload = \"<product><name>%s</name><description>%s</description>\" \\\n \"<metadatas><key>installator</key><value>%s</value></metadatas></product>\" \\\n % (product_name, product_description, installator)\n world.response = self.__add_product_sdc(url, payload)\n\n def add_product_with_attributes(self, product_name, product_description, attribute_list):\n \"\"\" ADD product to SDC catalog with attributes \"\"\"\n url = \"%s/%s/%s\" % (self.sdc_url, \"catalog\", \"product\")\n\n attribute_list_xml = \"\"\n for attribute in attribute_list:\n attribute_list_xml += \"<attributes><key>%s</key><value>%s</value><type>%s</type></attributes>\" % \\\n (attribute['key'], attribute['value'], attribute['type'])\n payload = \"<product><name>%s</name><description>%s</description>%s</product>\" \\\n % (product_name, product_description, attribute_list_xml)\n world.response = self.__add_product_sdc(url, payload)\n\n def add_product_with_metadatas(self, product_name, product_description, metadata_list):\n \"\"\" ADD product to SDC catalog with metadatas \"\"\"\n url = \"%s/%s/%s\" % (self.sdc_url, \"catalog\", \"product\")\n\n metadata_list_xml = \"\"\n for metadata in metadata_list:\n metadata_list_xml += \"<metadatas><key>%s</key><value>%s</value></metadatas>\" % \\\n (metadata['key'], metadata['value'])\n payload = \"<product><name>%s</name><description>%s</description>%s</product>\" \\\n % (product_name, product_description, metadata_list_xml)\n world.response = self.__add_product_sdc(url, payload)\n\n def add_product_with_attributes_and_installator(self, product_name, product_description, attribute_list,\n installator):\n \"\"\" Get product release from SDC catalog \"\"\"\n url = \"%s/%s/%s\" % (self.sdc_url, \"catalog\", \"product\")\n\n attribute_list_xml = \"\"\n for attribute in attribute_list:\n if 'type' in attribute:\n attribute_list_xml += \"<attributes><key>%s</key><value>%s</value><type>%s</type></attributes>\" % \\\n (attribute['key'], attribute['value'], attribute['type'])\n else:\n attribute_list_xml += \"<attributes><key>%s</key><value>%s</value></attributes>\" % \\\n (attribute['key'], attribute['value'])\n installator_metadata = \"<metadatas><key>installator</key><value>%s</value></metadatas>\" % installator\n payload = \"<product><name>%s</name><description>%s</description>%s%s</product>\" \\\n % (product_name, product_description, installator_metadata, attribute_list_xml)\n world.response = self.__add_product_sdc(url, payload)\n\n def add_product_release(self, product_name, product_release):\n \"\"\" Add product release to SDC catalog \"\"\"\n url = \"%s/%s/%s/%s/%s\" % (self.sdc_url, \"catalog\", \"product\", product_name, \"release\")\n\n payload = \"<productReleaseDto><version>%s</version></productReleaseDto>\" % product_release\n world.response = self.__add_product_sdc(url, payload)\n\n def delete_product(self, product_name):\n \"\"\" Delete product from SDC catalog \"\"\"\n url = \"%s/%s/%s/%s\" % (self.sdc_url, \"catalog\", \"product\", product_name)\n\n world.response = self.__delete_product_sdc(url)\n\n def delete_product_release(self, product_name, product_release):\n \"\"\" Delete product release from SDC catalog \"\"\"\n url = \"%s/%s/%s/%s/%s/%s\" % (self.sdc_url, \"catalog\", \"product\", product_name, \"release\", product_release)\n\n world.response = self.__delete_product_sdc(url)\n\n def delete_node(self, node_name):\n \"\"\" Delete node from Chef-Server and Puppet-Master \"\"\"\n url = \"%s/%s/%s/%s/%s\" % (self.sdc_url, \"vdc\", self.vdc, \"chefClient\", node_name)\n world.response = self.__delete_node(url)\n\n def create_product_and_release(self, product_name, product_release, installator=None):\n \"\"\" Helper: Create product and product release \"\"\"\n self.get_product(product_name)\n if world.response.status is not 200:\n if installator:\n self.add_product_with_installator(product_name, 'QA Tests - PaaS Manager', installator)\n else:\n self.add_product(product_name, 'QA Tests - PaaS Manager')\n self.add_product_release(product_name, product_release)\n else:\n self.get_product_release(product_name, product_release)\n if world.response.status is not 200:\n self.add_product_release(product_name, product_release)\n world.product_and_release_list.append({'product_name': product_name, 'product_release': product_release})\n\n def create_product_and_release_with_attributes(self, product_name, product_release, attribute_list):\n \"\"\" Helper: Create product with attributes and it release \"\"\"\n self.get_product(product_name)\n if world.response.status is not 200:\n self.add_product_with_attributes(product_name, 'QA Tests - PaaS Manager', attribute_list)\n self.add_product_release(product_name, product_release)\n else:\n self.get_product_release(product_name, product_release)\n if world.response.status is not 200:\n self.add_product_release(product_name, product_release)\n world.product_and_release_list.append({'product_name': product_name, 'product_release': product_release})\n\n def create_product_and_release_with_metadatas(self, product_name, product_release, metadata_list):\n \"\"\" Helper: Create product with custom metadatas and it release \"\"\"\n self.get_product(product_name)\n if world.response.status is not 200:\n self.add_product_with_metadatas(product_name, 'QA Tests - PaaS Manager - Metadatas', metadata_list)\n self.add_product_release(product_name, product_release)\n else:\n self.get_product_release(product_name, product_release)\n if world.response.status is not 200:\n self.add_product_release(product_name, product_release)\n world.product_and_release_list.append({'product_name': product_name, 'product_release': product_release})\n\n def create_product_and_release_with_attributes_and_installator(self, product_name, product_release, attribute_list,\n installator):\n \"\"\" Helper: Create product with attributes and installator, and its release \"\"\"\n self.get_product(product_name)\n if world.response.status is not 200:\n self.add_product_with_attributes_and_installator(product_name, 'QA Tests - PaaS Manager', attribute_list,\n installator)\n self.add_product_release(product_name, product_release)\n else:\n self.get_product_release(product_name, product_release)\n if world.response.status is not 200:\n self.add_product_release(product_name, product_release)\n world.product_and_release_list.append({'product_name': product_name, 'product_release': product_release})\n\n def delete_product_and_release(self, product_name, product_release):\n \"\"\" Helper: Delete product and product release \"\"\"\n self.get_product_release(product_name, product_release)\n if world.response.status is 200:\n self.delete_product_release(product_name, product_release)\n self.delete_product(product_name)\n else:\n self.get_product(product_name)\n if world.response.status is 200:\n self.delete_product(product_name)\n # world.product_and_release_list.remove({'product_name': product_name, 'product_release': product_release})\n", "id": "1609581", "language": "Python", "matching_score": 2.8155174255371094, "max_stars_count": 0, "path": "test/acceptance/tools/product_sdc_request.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n__author__ = 'jfernandez'\n\nfrom lettuce import world\nfrom lettuce_tools.dataset_utils.dataset_utils import DatasetUtils\n\ndataset_utils = DatasetUtils()\n\n\ndef product_is_created_in_sdc_with_attributes(step, product_name, product_version):\n \"\"\"\n Lettuce Step. Register the product in SDC and save register data in the world (product_list_with_attributes).\n Attributes will be defined in the step dataset.\n :param step: Lettuce step data.\n :param product_name: Name of the product\n :param product_version: Version of the product\n :return: None\n \"\"\"\n product_data = dict()\n product_data['name'] = product_name\n\n attribute_list = list()\n for dataset_row in step.hashes:\n attribute_list.append(dataset_utils.prepare_data(dataset_row))\n product_data['attributes'] = attribute_list\n world.product_list_with_attributes.append(product_data)\n\n # Create product in SDC\n world.product_sdc_request.create_product_and_release_with_attributes_and_installator(product_name, product_version,\n attribute_list,\n world.product_installator)\n\n\ndef product_is_created_in_sdc_with_metadatas(step, product_name, product_version):\n \"\"\"\n Lettuce Step. Register the product in SDC and save register data in the world (product_list_with_attributes).\n Metadatas will be defined in the step dataset.\n :param step: Lettuce step data.\n :param product_name: Name of the product\n :param product_version: Version of the product\n :return: None\n \"\"\"\n\n product_data = dict()\n\n # Look for the product in the list (retrieve it if this one is already created for this test)\n for produc_with_attributes in world.product_list_with_attributes:\n if produc_with_attributes['name'] == product_name:\n product_data = produc_with_attributes\n break\n\n if len(product_data) == 0:\n product_data['name'] = product_name\n\n metadata_list = list()\n for dataset_row in step.hashes:\n metadata_list.append(dataset_utils.prepare_data(dataset_row))\n product_data['metadatas'] = metadata_list\n world.product_list_with_attributes.append(product_data)\n\n # Create product in SDC\n world.product_sdc_request.create_product_and_release_with_metadatas(product_name, product_version, metadata_list)\n", "id": "7247453", "language": "Python", "matching_score": 3.7535555362701416, "max_stars_count": 0, "path": "test/acceptance/common_steps/sdc_product_provisioning_steps.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n__author__ = 'jfernandez'\n\nfrom lettuce import world\nfrom lettuce_tools.dataset_utils.dataset_utils import DatasetUtils\n\nfrom tools.tier import Tier\nfrom tools.constants import NAME, PAAS, TIER_IMAGE, PRODUCTS, NETWORKS, TIER_REQUEST_REGION, TIER_REQUEST_IMAGE\n\ndataset_utils = DatasetUtils()\n\n\ndef process_following_instance_attributes(step, product_name):\n \"\"\"\n Lettuce step. Adds to the world the instance attributes to create the product data for the environment.\n Attributes will be defined in the step dataset and they will be added to the list paas_product_list_with_attributes\n after being processed.\n :param step: Lettuce step data.\n :param product_name: Name of the product to add attributes\n :return:\n \"\"\"\n paas_instance_attributes = dict()\n paas_instance_attributes['name'] = product_name\n\n attribute_list = list()\n for dataset_row in step.hashes:\n attribute_list.append(dataset_utils.prepare_data(dataset_row))\n paas_instance_attributes['attributes'] = attribute_list\n world.paas_product_list_with_attributes.append(paas_instance_attributes)\n\n\ndef process_the_list_of_tiers(step):\n \"\"\"\n Lettuce step. This function parses dataset to prepare each tier data.\n Tiers list will be returned with the tiers parsed from dataset\n :param step: Lettuce step with all data about tiers\n :return: List of processed tiers from step data\n \"\"\"\n tier_list = list()\n for row in step.hashes:\n data = dataset_utils.prepare_data(row)\n tier = Tier(data.get(NAME), world.config[PAAS][TIER_IMAGE])\n tier.parse_and_add_products(data.get(PRODUCTS))\n\n if TIER_REQUEST_IMAGE in data:\n tier.tier_image = data.get(TIER_REQUEST_IMAGE)\n\n if TIER_REQUEST_REGION in data:\n tier.region = data.get(TIER_REQUEST_REGION)\n\n # For each product, check if there are defined attributes\n for paas_product_with_attributes in world.paas_product_list_with_attributes:\n for attribute in paas_product_with_attributes['attributes']:\n attribute_type = attribute['type'] if 'type' in attribute else None\n tier.add_attribute_to_product(paas_product_with_attributes['name'], attribute['key'],\n attribute['value'], attribute_type)\n\n tier.parse_and_add_networks(data.get(NETWORKS))\n tier_list.append(tier)\n\n return tier_list\n", "id": "7327879", "language": "Python", "matching_score": 2.4896979331970215, "max_stars_count": 0, "path": "test/acceptance/common_steps/paas_environment_provisioning.py" }, { "content": "# -*- coding: utf-8 -*-\r\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\r\n#\r\n# This file is part of FI-WARE project.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n#\r\n# You may obtain a copy of the License at:\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\r\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n#\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n#\r\n# For those usages not covered by the Apache version 2.0 License please\r\n# contact with <EMAIL>\r\n# Names of the properties in the {environment}-properties.json files\r\n\r\n# Configuration values\r\nENVIRONMENT = \"environment\"\r\nENVIRONMENT_TESTFILES = \"test_files\"\r\nNAME = \"name\"\r\nLOGS_PATH = \"logs_path\"\r\nPAAS = \"paas\"\r\nKEYSTONE_URL = \"keystone_url\"\r\nSDC_URL = \"sdc_url\"\r\nPAASMANAGER_URL = \"paasmanager_url\"\r\nGLANCE_URL = \"glance_url\"\r\nNOVA_URL = \"nova_url\"\r\nVDC = \"vdc\"\r\nTENANT = \"tenant\"\r\nUSER = \"user\"\r\nPASSWORD = \"password\"\r\nTENANT_ALT = \"tenant_alt\"\r\nVDC_ALT = \"vdc_alt\"\r\nUSER_ALT = \"user_alt\"\r\nPASSWORD_ALT = \"password_alt\"\r\nTIER_IMAGE = \"tier_image\"\r\nTIER_NUM_MIN = \"tier_num_min\"\r\nTIER_NUM_MAX = \"tier_num_max\"\r\nTIER_NUM_INITIAL = \"tier_num_initial\"\r\nTIER_FLAVOUR = \"tier_flavour\"\r\nTIER_KEYPAIR = \"tier_keypair\"\r\nTIER_FLOATINGIP = \"tier_floatingip\"\r\nTIER_REGION = \"tier_region\"\r\nREGION_DEFAULT_SHAREDNET_PROPERTY = \"region_default_sharednet\"\r\nCONFIG_SUPPORT_USER = \"support_user\"\r\nCONFIG_SUPPORT_KEY_FILE = \"support_key_file\"\r\n\r\n\r\n# METADATA constants\r\nMETADATA_NID = \"nid\"\r\nMETADATA_NID_NOVA_KEY = \"nid\"\r\n\r\n# Auxiliary constants used in .feature files or interfaces\r\n# Request values\r\nNAME = \"name\"\r\nDESCRIPTION = \"description\"\r\nPRODUCTS = \"products\"\r\nNETWORKS = \"networks\"\r\nTIER_REQUEST_IMAGE = \"image\"\r\nTIER_REQUEST_NUM_MIN = \"minimumNumberInstances\"\r\nTIER_REQUEST_NUM_MAX = \"maximumNumberInstances\"\r\nTIER_REQUEST_NUM_INITIAL = \"initialNumberInstances\"\r\nTIER_REQUEST_FLAVOUR = \"flavour\"\r\nTIER_REQUEST_KEYPAIR = \"keypair\"\r\nTIER_REQUEST_FLOATINGIP = \"floatingip\"\r\nTIER_REQUEST_REGION = \"region\"\r\n\r\n# E2E Testing\r\nMAX_CHECKS_SSH_CONNECTION = 5\r\nSLEEP_TIME_CHECKS = 5 # seconds\r\nPRODUCT_FILE_NAME_FORMAT = u'{product_name}_{product_version}_{installator}'\r\nINSTALLATION_PRODUCT_DIR = u'/tmp'\r\n", "id": "1718136", "language": "Python", "matching_score": 2.2850823402404785, "max_stars_count": 0, "path": "test/acceptance/tools/constants.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n__author__ = 'arobres, jfernandez'\n\n# SDC CONFIGURATION\nSDC_PROTOCOL = 'https'\nSDC_IP = '172.16.58.3'\nSDC_PORT = '8443'\nHEADERS = {'Accept': '', 'Tenant-Id': '', 'X-Auth-Token': ''}\n\n# CHEF-SERVER CONFIGURATION\nCONFIG_CHEF_SERVER_IP = 'chef-server.dev-havana.fi-ware.org'\nCONFIG_CHEF_SERVER_USERNAME = '******** SET ********'\nCONFIG_CHEF_SERVER_PASSWORD = '******** <PASSWORD> ********'\n\n# PUPPET-MASTER CONFIGURATION\nCONFIG_PUPPET_MASTER_IP = 'puppet-master.dev-havana.fi-ware.org'\nCONFIG_PUPPET_MASTER_USERNAME = '******** SET ********'\nCONFIG_PUPPET_MASTER_PASSWORD = '******** <PASSWORD> ********'\nCONFIG_PUPPETDB_PROTOCOL = 'http'\nCONFIG_PUPPETDB_IP = 'puppet-master.dev-havana.fi-ware.org'\nCONFIG_PUPPETDB_PORT = '8080'\n\n\n#AUTHENTICATION\nAUTHENTICATION_HEADERS = {'content-type': 'application/json', 'Accept': 'application/json'}\nTENANT_NAME_VALUE = '******** SET ********'\nUSERNAME_VALUE = '******** SET ********'\nPWD_VALUE = '******** <PASSWORD> ********'\nKEYSTONE_URL = 'http://172.16.31.10:4731/v2.0/tokens'\n\n#E2E TEST: DEFAULT CONFIGURATION\nCONFIG_PRODUCT_NAME_CHEF = 'qa-test-product-chef-01'\nCONFIG_PRODUCT_NAME_CHEF_2 = 'qa-test-product-chef-02'\nCONFIG_PRODUCT_VERSION_CHEF = '1.2.3'\n\nCONFIG_PRODUCT_NAME_PUPPET = 'qa-test-product-puppet-01'\nCONFIG_PRODUCT_NAME_PUPPET_2 = 'qa-test-product-puppet-02'\nCONFIG_PRODUCT_VERSION_PUPPET = '1.2.3'\n\nCONFIG_VM_HOSTNAME = 'qa-test-sp1'\nCONFIG_VM_IP = '172.16.17.32'\nCONFIG_VM_FQN = 'qa-test-sp.openstacklocal'\nCONFIG_VM_USERNAME = '******** SET ********'\nCONFIG_VM_PASSWORD = '******** <PASSWORD> ********'\n\n#PROVISION HOST\nPROVISION_ROOT_PATH = u'/tmp/{}'\n\n# Defines the wait time for operation (install / uninstall)\nWAIT_FOR_OPERATION = 300\nWAIT_FOR_INSTALLATION = 30\n", "id": "2018969", "language": "Python", "matching_score": 2.540891170501709, "max_stars_count": 0, "path": "test/acceptance/commons/configuration.py" }, { "content": "__author__ = 'arobres'\n\n#AUTHENTICATION CONSTANTS\n\nAUTH = u'auth'\nTENANT_NAME = u'tenantName'\nUSERNAME = u'username'\nPASSWORD = u'password'\nACCESS = u'access'\nTOKEN = u'token'\nTENANT = u'tenant'\nID = u'id'\n\n#PRODUCT_PROPERTIES\nPRODUCT_NAME = u'name'\nPRODUCT_DESCRIPTION = u'description'\nPRODUCT = u'product'\nPRODUCTS = u'products'\nPRODUCT_ATTRIBUTES = u'attributes'\nPRODUCT_METADATAS = u'metadatas'\n\nMETADATA = u'metadata'\nATTRIBUTE = u'attribute'\nKEY = u'key'\nVALUE = u'value'\nDESCRIPTION = u'description'\nATTRIBUTE_TYPE = u'type'\nATTRIBUTE_TYPE_PLAIN = u'Plain'\nATTRIBUTE_TYPE_IPALL = u'IPALL'\n\n\n#HEADERS\nCONTENT_TYPE = u'content-type'\nCONTENT_TYPE_JSON = u'application/json'\nCONTENT_TYPE_XML = u'application/xml'\nAUTH_TOKEN_HEADER = u'X-Auth-Token'\nTENANT_ID_HEADER = u'Tenant-Id'\nACCEPT_HEADER = u'Accept'\nACCEPT_HEADER_XML = u'application/xml'\nACCEPT_HEADER_JSON = u'application/json'\n\n#PRODUCT RELEASE\nPRODUCT_RELEASE = u'productRelease'\nPRODUCT_RELEASE_LIST = u'productReleases'\nVERSION = u'version'\n\n#INCORRECT PARAMETERS\nLONG_ID = 'long' * 64 + 'a' #STRING WITH 257 characters\n\n#DEFAULT_METADATA\nNUMBER_OF_DEFAULT_SDC_METADATA = 6\nDEFAULT_METADATA = {\"metadata\": [{\"key\": \"image\", \"value\": \"\"},\n {\"key\": \"cookbook_url\", \"value\": ''}, {\"key\": \"cloud\", \"value\": \"yes\"},\n {\"key\": \"installator\", \"value\": \"chef\"}, {\"key\": \"open_ports\", \"value\": \"80 22\"}]}\n\nDEFAULT_ATTRIBUTE = {\"attribute\": [{\"key\": \"custom_att_01\", \"value\": \"att_01_default\", \"type\": \"Plain\"},\n {\"key\": \"custom_att_02\", \"value\": \"att_02_default\", \"type\": \"Plain\"}]}\n\nPRODUCT_RELEASE_WITHOUT_RELEASES_RESPONSE = u'<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>' \\\n u'<productReleases></productReleases>'\n\n# FABRIC AND PRODUCT INSTALLATION\nFABRIC_RESULT_EXECUTE = u'<local-only>'\n\nPRODUCT_FILE_NAME_FORMAT = u'{product_name}_{product_version}_{installator}'\nPRODUCT_INSTALLATION_FILE_CONTENT = u'Operation: install; Product: {product_name}; Version: {product_version}; Att01: {att_01}; Att02: {att_02}'\nPRODUCT_INSTALLATION_ATT1_DEFAULT = u'att_01_default'\nPRODUCT_INSTALLATION_ATT2_DEFAULT = u'att_02_default'\n\n#PRODUCT_INSTALLATION_PARAMETERS\n\nPRODUCT_INSTANCE_LIST = u'productInstanceDtoes'\nPRODUCT_INSTANCE = u'productInstanceDto'\nPRODUCT_INSTANCE_NAME = u'name'\nPRODUCT_INSTANCE_STATUS = u'status'\nPRODUCT_INSTANCE_VM = u'vm'\nPRODUCT_INSTANCE_VM_IP = u'ip'\nPRODUCT_INSTANCE_VM_FQN = u'fqn'\nPRODUCT_INSTANCE_VM_OSTYPE = u'osType'\nPRODUCT_INSTANCE_VM_HOSTNAME = u'hostname'\nPRODUCT_INSTANCE_ATTRIBUTES = u'attributes'\n\n\n# METADATAS VALUES\n\nINSTALLATOR = u'installator'\nINSTALLATOR_VALUE = (u'puppet', u'chef')\nMETADATA_TENANT_ID = u'tenant_id'\n\n#TASKS\nTASK = u'task'\nTASK_HREF = u'href'\nTASK_STARTTIME = u'startTime'\nTASK_STATUS = u'status'\nTASK_DESCRIPTION = u'description'\nTASK_VDC = u'vdc'\nTASK_ERROR = u'error'\nTASK_ERROR_MINOR_CODE = u'minorErrorCode'\nTASK_URL = u'@href'\nSTATUS = u'status'\nSTATUS_XML = u'@status'\nVDC = u'vdc'\nTASK_STATUS_VALUE_RUNNING = u'RUNNING'\nTASK_STATUS_VALUE_SUCCESS = u'SUCCESS'\nTASK_STATUS_VALUE_ERROR = u'ERROR'\nTASK_STATUS_VALUE_INSTALLED = u'INSTALLED'\nTASK_STATUS_VALUE_UNINSTALLED = u'UNINSTALLED'\n\n#PRODUCTANDRELEASE VALUES\nPRODUCTANDRELEASE_LIST = u'productAndReleaseDtoes'\nPRODUCTANDRELEASE = u'productAndReleaseDto'\n\n#ATTRIBUTE FROM CONFIG FILE (for loading values from config_file)\nCONFIG_FILE = u'${CONFIG_FILE}'\n\n", "id": "8966823", "language": "Python", "matching_score": 3.5019843578338623, "max_stars_count": 0, "path": "test/acceptance/commons/constants.py" }, { "content": "# -*- coding: utf-8 -*-\n\n# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FIWARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n__author__ = 'jesus.movilla'\n\nimport sys\nimport argparse\nimport time\nimport requests\nimport datetime\nimport xmltodict\n\nfrom sdcclient.client import SDCClient\nfrom paasmanagerclient.client import PaaSManagerClient\nfrom utils.logger_utils import get_logger\n\nlogger = get_logger(__name__)\n\n# HEADERS\nX_AUTH_TOKEN = \"X-Auth-Token\"\nTENANT_ID = \"Tenant-Id\"\nACCEPT = \"Accept\"\nAPPLICATION_JSON = \"application/json\"\n\n#HTTP STATUS CODE\nHTTPS_PROTOCOL =\"https\"\nHTTP_STATUSCODE_NO_CONTENT = 204\nHTTP_STATUSCODE_OK = 200\n\n# GLANCE SERVICE\nGLANCE_SERVICE_TYPE = \"glance\"\nGLANCE_ENDPOINT_TYPE = \"publicURL\"\nGLANCE_REQUEST_IMAGES_SDC_AWARE_URL = '/images/detail?property-sdc_aware=true&&type=fiware:utils'\n\n# IMAGE BODY ELEMENTS\nIMAGE_BODY_NAME =\"name\"\nIMAGE_BODY_ID =\"id\"\nIMAGE_BODY_IMAGES =\"images\"\n\n# TASK BODY ELEMENTS\nTASK_BODY_ROOT = \"task\"\nTASK_BODY_HREF = \"href\"\nTASK_BODY_STATUS = \"status\"\nTASK_BODY_ERROR = \"error\"\nTASK_BODY_ERROR_MAJORCODE=\"majorErrorCode\"\nTASK_BODY_ERROR_MESSAGE=\"message\"\nTASK_BODY_ERROR_MINORCODE=\"minorErrorCode\"\n\n#TASK STATUS\nTASK_STATUS_ERROR = \"ERROR\"\nTASK_STATUS_SUCCESS = \"SUCCESS\"\nTASK_STATUS_RUNNING = \"RUNNING\"\n\n#PRODUCTANRELEASE BODY ELEMENTS\nPRODUCTANDRELEASE_BODY_ROOT = \"productAndReleaseDto\";\nPRODUCTANDRELEASE_BODY_PRODUCT = \"product\";\nPRODUCTANDRELEASE_BODY_PRODUCTNAME = \"name\";\nPRODUCTANDRELEASE_BODY_PRODUCTVERSION = \"version\";\nPRODUCTANDRELEASE_BODY_METADATAS = \"metadatas\";\nPRODUCTANDRELEASE_BODY_METADATA_KEY = \"key\";\nPRODUCTANDRELEASE_BODY_METADATA_VALUE = \"value\";\nPRODUCTANDRELEASE_BODY_METADATA_INSTALLATOR = \"installator\";\nPRODUCTANDRELEASE_BODY_METADATA_INSTALLATOR_CHEF_VALUE = \"chef\";\nPRODUCTANDRELEASE_BODY_METADATA_IMAGE = \"image\";\n\n\n#IMAGE DICTIONARY KEYS\nDICT_IMAGE_NAME =\"image_name\"\nDICT_IMAGE_ID = \"image_id\"\n\n#IMAGE_PRODUCTRELEASE DICTIONARY KEYS\nDICT_IMAGE_PRODUCTRELEASE_PRODUCTRELEASE = \"product_release\"\nDICT_IMAGE_PRODUCTRELEASE_PRODUCTNAME = \"product_name\"\nDICT_IMAGE_PRODUCTRELEASE_PRODUCTVERSION = \"product_version\"\n\nTIME_INTERVAL_TO_DEPLOY = 60\nTIME_INTERVAL_TO_DELETE = 45\n\ndef main(argv=None):\n \"\"\"\n Getting parameters\n :param argv:\n \"\"\"\n parser = argparse.ArgumentParser(description='Testing product installation using paasmanager')\n parser.add_argument(\"-u\", \"--os-username\", dest='user', help='valid username', required=True)\n parser.add_argument(\"-p\", \"--os-password\", dest='password', help='valid password', required=True)\n parser.add_argument(\"-t\", \"--os-tenant-id\", dest='tenant_id', help=\"user tenant_id\", required=True)\n parser.add_argument(\"-r\", \"--os-region-name\", dest='region_name', default='Spain2', help='the name of region')\n parser.add_argument(\"-k\", \"--os-auth-url\", dest=\"auth_url\", default='http://cloud.lab.fiware.org:4731/v2.0',\n help='url to keystone <host or ip>:<port>/v2.0')\n parser.add_argument(\"-e\", \"--envName\", dest='envName', default='EnvName', help='valid environment name')\n parser.add_argument(\"-f\", \"--reportfile\", dest='reportfile', default='/var/log/recipes_checking_report.log',\n help='Name of the Report File')\n\n\n args = parser.parse_args()\n logger.info(args)\n\n # This is the file where to find the report about the tests of BlueprintInstance installation with\n # all the recipes available in the Chef-Server\n report_file = open(args.reportfile, 'w')\n\n check_recipes (report_file, envName= args.envName,\n auth_url=args.auth_url,\n tenant_id=args.tenant_id,\n user=args.user,\n password=<PASSWORD>,\n region_name=args.region_name)\n\ndef check_recipes(report_file, envName, auth_url, tenant_id, user, password, region_name):\n\n report_file.writelines(\"========================================================================================\\n\")\n report_file.write(\"Platform: \" + auth_url + \". Region: \" + region_name + \". Username: \" + user\n + \" Tenant-ID: \" + tenant_id + \"\\n\")\n report_file.writelines(\"========================================================================================\\n\")\n\n logger.info(\"SDC call to get the list of products available in catalog\")\n\n #Llamar al SDC para sacar la lista de productos.\n sdc_client = SDCClient(user, password, tenant_id, auth_url, region_name)\n productandrelease_client = sdc_client.getProductAndReleaseResourceClient()\n allproductreleases,_ = productandrelease_client.get_allproductandrelease()\n\n logger.debug(str(allproductreleases['productAndReleaseDto']))\n logger.info(\"There are \" + str(len(allproductreleases['productAndReleaseDto'])) + \" product Releases in SDC\")\n report_file.writelines(\"There are \" + str(len(allproductreleases['productAndReleaseDto']))\n + \" product Releases in SDC\")\n\n paasmanager_client = PaaSManagerClient(user, password, tenant_id, auth_url, region_name )\n glance_url = paasmanager_client.get_paasmanager_endpoint_from_keystone(region_name, GLANCE_SERVICE_TYPE,\n GLANCE_ENDPOINT_TYPE)\n logger.info(\"Loading image list from glance : \" + glance_url + \" Region: \" + region_name)\n report_file.writelines(\"Loading image list from glance : \" + glance_url + \" Region: \" + region_name + \"\\n\")\n report_file.write (\"------------------------------------------------------------------------------------------- \\n\")\n\n response_images = find_all_images_sdc_aware(glance_url, region_name, paasmanager_client.token, tenant_id)\n logger.debug(response_images)\n\n sdc_aware_images = []\n for i in response_images[IMAGE_BODY_IMAGES]:\n image_name = i[IMAGE_BODY_NAME]\n image_id = i[IMAGE_BODY_ID]\n image_dict = {DICT_IMAGE_NAME: image_name, DICT_IMAGE_ID: image_id}\n logger.info(\"Image id: \" + image_dict['image_id']+ \"| Image name: \" + image_dict['image_name'] + \"\\n\")\n report_file.writelines(\"Image id: \" + image_dict['image_id']+ \"| Image name: \" + image_dict['image_name'] + \"\\n\")\n sdc_aware_images.append(image_dict)\n report_file.write (\"------------------------------------------------------------------------------------------- \\n\")\n\n logger.info(\"Building all combinations images - product releases\")\n\n images_productReleases = get_product_releases_images (allproductreleases, sdc_aware_images)\n\n logger.info(\"Product Releases to TEST in different images:\")\n for i in images_productReleases:\n logger.info(\"image: \" + i[DICT_IMAGE_NAME] + \". Product Release: \" + i['product_release'] + \"\\n\")\n report_file.write (\"image: \" + i[DICT_IMAGE_NAME] + \". Product Release: \" + i['product_release'] + \"\\n\")\n\n number_of_productrelease_images = images_productReleases.__len__()\n logger.info(\"there are \" + str(number_of_productrelease_images) + \" combinations products - images\")\n report_file.write (\"------------------------------------------------------------------------------------------- \\n\")\n report_file.write (\"Product Releases to TEST in different images: \")\n report_file.write (\" There are \" + str(number_of_productrelease_images) + \" combinations products - images \\n\")\n report_file.write (\"------------------------------------------------------------------------------------------- \\n\")\n\n environment_client = paasmanager_client.getEnvironmentResourceClient()\n tier_client = paasmanager_client.getTierResourceClient()\n environment_instance_client = paasmanager_client.getEnvironmentInstanceResourceClient()\n task_client = paasmanager_client.getTaskResourceClient()\n\n report_file.write (\"Product Releases Execution (recipes) Report: \\n\")\n report_file.write (\"------------------------------------------- \\n\")\n\n index=0\n for image_productrelease in images_productReleases:\n product_name = image_productrelease[DICT_IMAGE_PRODUCTRELEASE_PRODUCTNAME]\n product_version = image_productrelease[DICT_IMAGE_PRODUCTRELEASE_PRODUCTVERSION]\n image_id = image_productrelease[DICT_IMAGE_ID]\n image_name = image_productrelease[DICT_IMAGE_NAME]\n\n env_name = envName + str(index)\n tier_name = \"tierName\" + env_name\n blueprint_name = env_name + \"Instance\"\n\n index = index + 1\n logger.info (\"--------------------------------------------------------------------------------------\")\n logger.info (\"Product: \" + product_name + \"-\" + product_version + \" with image name: \" + image_name\n + \" and imageid: \" + image_id)\n logger.info (\"--------------------------------------------------------------------------------------\")\n\n logger.info(\"Create an Environment \" + env_name )\n\n environment = environment_client.create_environment(env_name, \"For testing purposes\")\n if (environment.status_code != HTTP_STATUSCODE_NO_CONTENT) :\n logger.info (\"Error creating Environment \" + env_name + \" Description: \" + environment._content)\n\n environment_dict, _ = environment_client.get_environment(env_name)\n logger.debug(str(environment_dict))\n\n logger.info(\"Add Tier tierName\" + env_name + \" to the Environment \" + env_name)\n tier = tier_client.create_tier(environment_name = env_name,\n name = \"tierName\" + env_name,\n product_name = product_name,\n product_version = product_version,\n image = image_id,\n region_name = region_name)\n tier_dict, _ = tier_client.get_tier(env_name, tier_name)\n logger.debug(\"Tier created : \" + str(tier_dict))\n\n logger.info(\"Creating Environment Instance \" + blueprint_name)\n\n initial_time_deploy = time.strftime(\"%H:%M:%S\")\n initial_time_deploy_datetime = datetime.datetime.now()\n\n environment_instance_task_dict, environment_instance_response = \\\n environment_instance_client.create_environment_instance (name=blueprint_name,\n description=\"For Testing purposes\",\n environment_name=env_name,\n environment_description = \"For Testing purposes env\",\n tier_name = tier_name,\n product_name = product_name,\n product_version = product_version,\n image = image_id,\n region_name = region_name)\n\n if (environment_instance_response.status_code != HTTP_STATUSCODE_OK):\n logger.info (\"Error creating Environment Instance \" + blueprint_name + \" Description: \"\n + environment_instance_response._content)\n\n logger.info(\"Waiting for Environment Instance \" + env_name + \"Instance to be created\")\n task_url = getTaskUrl(environment_instance_task_dict)\n task_id = paasmanager_client.get_taskid(task_url)\n task, _ = task_client.get_task(task_id)\n task_status = task[TASK_BODY_STATUS]\n\n while task_status==TASK_STATUS_RUNNING:\n time.sleep(TIME_INTERVAL_TO_DEPLOY)\n task, _ = task_client.get_task(task_id)\n task_status = task[TASK_BODY_STATUS]\n logger.info(\"Polling every \" + str(TIME_INTERVAL_TO_DEPLOY) +\" seconds - Task status: \" + task_status)\n\n final_time_deploy = time.strftime(\"%H:%M:%S\")\n final_time_deploy_datetime = datetime.datetime.now()\n interval_deploy = final_time_deploy_datetime - initial_time_deploy_datetime\n\n if task_status==TASK_STATUS_SUCCESS:\n logger.info (\"Image name: \" + image_name + \". Product Release: \" + product_name + \"-\" + product_version +\n \" SUCCESS to deploy in \" + str(interval_deploy.seconds) + \" seconds \\n\")\n report_file.write (\"Image name: \" + image_name + \". Product Release: \" + product_name + \"-\"\n + product_version + \" SUCCESS to deploy in \" + str(interval_deploy.seconds)\n + \" seconds \\n\")\n elif task_status == TASK_STATUS_ERROR:\n task_error = task[TASK_BODY_ERROR]\n major_error_desc = task_error[TASK_BODY_ERROR_MAJORCODE]\n error_message = task_error[TASK_BODY_ERROR_MESSAGE]\n minorErrorCode = task_error[TASK_BODY_ERROR_MINORCODE]\n\n logger.info (\"Image name: \" + image_name + \". Product Release: \" + product_name + \"-\" + product_version +\n \" ERROR to deploy in \" + str(interval_deploy.seconds) + \" seconds \\n\")\n logger.info(\"ERROR Major Error Description : \" + str(major_error_desc))\n logger.info(\"ERROR Message : \" + str(error_message))\n logger.info(\"ERROR Minor Error Code : \" + str(minorErrorCode))\n report_file.write (\"Image name: \" + image_name + \". Product Release: \" + product_name + \"-\"\n + product_version + \" ERROR to deploy in \" + str(interval_deploy.seconds)\n + \" seconds \\n\")\n report_file.write(\"ERROR Major Error Description : \" + str(major_error_desc) + \"\\n\")\n report_file.write(\"ERROR Message : \" + str(error_message) + \"\\n\")\n report_file.write(\"ERROR Minor Error Code : \" + str(minorErrorCode) + \"\\n\")\n\n logger.info(\"Deleting Environment Instance \" + blueprint_name)\n\n initial_time_delete = time.strftime(\"%H:%M:%S\")\n initial_time_delete_datetime = datetime.datetime.now()\n\n environment_instance_task_dict, environment_instance_response = \\\n environment_instance_client.delete_environment_instance(blueprint_name)\n\n logger.info(\"Waiting for Environment Instance \" + blueprint_name + \"Instance to be deleted\")\n task_url = getTaskUrl(environment_instance_task_dict)\n task_id = paasmanager_client.get_taskid(task_url)\n task, _ = task_client.get_task(task_id)\n task_status = task[TASK_BODY_STATUS]\n\n while task_status==TASK_STATUS_RUNNING:\n time.sleep(TIME_INTERVAL_TO_DELETE)\n task, _ = task_client.get_task(task_id)\n task_status = task[TASK_BODY_STATUS]\n logger.info(\"Polling every \" + str(TIME_INTERVAL_TO_DELETE) + \" seconds - Task status: \" + task_status)\n\n final_time_delete = time.strftime(\"%H:%M:%S\")\n final_time_delete_datetime = datetime.datetime.now()\n interval_delete = final_time_delete_datetime - initial_time_delete_datetime\n\n if task_status==TASK_STATUS_SUCCESS:\n logger.info (\"Image name: \" + image_name + \". Product Release: \" + product_name + \"-\" + product_version +\n \" SUCCESS to delete in \" + str(interval_delete.seconds) + \" seconds \\n\")\n elif task_status ==TASK_STATUS_ERROR:\n logger.info (\"Image name: \" + image_name + \". Product Release: \" + product_name + \"-\" + product_version +\n \" ERROR to delete in \" + str(interval_delete.seconds) + \" seconds \\n\")\n\n logger.info(\"Deleting Tier \" + tier_name)\n tier_client.delete_tier(env_name, tier_name)\n\n logger.info(\"Deleting Environment \" + env_name)\n environment_client.delete_environment(env_name)\n\n logger.info(\"Environment \" + env_name + \" FINISHED\")\n\ndef getTaskUrl (environment_instance_task_dict):\n return environment_instance_task_dict[TASK_BODY_HREF]\n\ndef get_product_releases_images (allproductreleases, sdc_aware_images):\n images_productReleases = []\n\n for i in allproductreleases[PRODUCTANDRELEASE_BODY_ROOT]:\n product_name = i[PRODUCTANDRELEASE_BODY_PRODUCT][PRODUCTANDRELEASE_BODY_PRODUCTNAME]\n product_version = i[PRODUCTANDRELEASE_BODY_PRODUCTVERSION]\n product_release = product_name + \"_\" + product_version\n\n if i[PRODUCTANDRELEASE_BODY_PRODUCT].get(PRODUCTANDRELEASE_BODY_METADATAS): # Checks if there are metadatas in the product\n for j in i[PRODUCTANDRELEASE_BODY_PRODUCT][PRODUCTANDRELEASE_BODY_METADATAS]:\n product_metadatas = i[PRODUCTANDRELEASE_BODY_PRODUCT][PRODUCTANDRELEASE_BODY_METADATAS]\n try :\n metadata_key = j[PRODUCTANDRELEASE_BODY_METADATA_KEY]\n metadata_value = j[PRODUCTANDRELEASE_BODY_METADATA_VALUE]\n except TypeError:\n metadata_key = i[PRODUCTANDRELEASE_BODY_PRODUCT][PRODUCTANDRELEASE_BODY_METADATAS][PRODUCTANDRELEASE_BODY_METADATA_KEY]\n metadata_value = i[PRODUCTANDRELEASE_BODY_PRODUCT][PRODUCTANDRELEASE_BODY_METADATAS][PRODUCTANDRELEASE_BODY_METADATA_VALUE]\n\n if ((metadata_key == PRODUCTANDRELEASE_BODY_METADATA_INSTALLATOR) and\n (metadata_value == PRODUCTANDRELEASE_BODY_METADATA_INSTALLATOR_CHEF_VALUE)):\n for k in product_metadatas:\n try :\n metadata_key = k[PRODUCTANDRELEASE_BODY_METADATA_KEY]\n metadata_value = k[PRODUCTANDRELEASE_BODY_METADATA_VALUE]\n except TypeError:\n metadata_key = i[PRODUCTANDRELEASE_BODY_PRODUCT][PRODUCTANDRELEASE_BODY_METADATAS][PRODUCTANDRELEASE_BODY_METADATA_KEY]\n metadata_value = i[PRODUCTANDRELEASE_BODY_PRODUCT][PRODUCTANDRELEASE_BODY_METADATAS][PRODUCTANDRELEASE_BODY_METADATA_VALUE]\n\n if (metadata_key == PRODUCTANDRELEASE_BODY_METADATA_IMAGE) and \\\n ((metadata_value == \"\") or (metadata_value is None)):\n for sdc_aware_image in sdc_aware_images:\n image_id = sdc_aware_image[DICT_IMAGE_ID]\n image_name = sdc_aware_image[DICT_IMAGE_NAME]\n image_productRelease = { DICT_IMAGE_ID : image_id,\n DICT_IMAGE_NAME : image_name,\n DICT_IMAGE_PRODUCTRELEASE_PRODUCTRELEASE : product_release,\n DICT_IMAGE_PRODUCTRELEASE_PRODUCTNAME: product_name,\n DICT_IMAGE_PRODUCTRELEASE_PRODUCTVERSION: product_version\n }\n images_productReleases.append(image_productRelease)\n else:\n for sdc_aware_image in sdc_aware_images:\n if ((metadata_key == PRODUCTANDRELEASE_BODY_METADATA_IMAGE) and\n (sdc_aware_image[DICT_IMAGE_ID] in metadata_value)):\n image_id = sdc_aware_image[DICT_IMAGE_ID]\n image_name = sdc_aware_image[DICT_IMAGE_NAME]\n image_productRelease = { DICT_IMAGE_ID : image_id,\n DICT_IMAGE_NAME : image_name,\n DICT_IMAGE_PRODUCTRELEASE_PRODUCTRELEASE : product_release,\n DICT_IMAGE_PRODUCTRELEASE_PRODUCTNAME: product_name,\n DICT_IMAGE_PRODUCTRELEASE_PRODUCTVERSION: product_version\n }\n images_productReleases.append(image_productRelease)\n else: #products without metadatas are included for all sdc_aware_images in the Catalog\n for sdc_aware_image in sdc_aware_images:\n image_id = sdc_aware_image[DICT_IMAGE_ID]\n image_name = sdc_aware_image[DICT_IMAGE_NAME]\n image_productRelease = { DICT_IMAGE_ID : image_id,\n DICT_IMAGE_NAME : image_name,\n DICT_IMAGE_PRODUCTRELEASE_PRODUCTRELEASE : product_release,\n DICT_IMAGE_PRODUCTRELEASE_PRODUCTNAME: product_name,\n DICT_IMAGE_PRODUCTRELEASE_PRODUCTVERSION: product_version\n }\n images_productReleases.append(image_productRelease)\n\n return images_productReleases\n\ndef find_all_images_sdc_aware(url_base, region, token, tenant_id):\n logger.debug(\"find all images in \" + region + '->' + url_base)\n\n url = url_base + GLANCE_REQUEST_IMAGES_SDC_AWARE_URL\n headers = {ACCEPT: APPLICATION_JSON,\n X_AUTH_TOKEN: '' + token + '',\n TENANT_ID: '' + tenant_id + ''\n }\n return sendGet(headers, url)\n\ndef sendGet(headers, url):\n if url.startswith (HTTPS_PROTOCOL):\n response = requests.get(url, headers=headers, verify=False)\n else:\n response = requests.get(url, headers=headers, verify=False)\n response_json = response.json()\n return response_json\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n", "id": "5178017", "language": "Python", "matching_score": 4.417797565460205, "max_stars_count": 0, "path": "scripts/recipes_checking/recipes_checking.py" }, { "content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n#\n# Copyright 2014 Telefónica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n#\nimport sys\nimport http\nimport requests\nimport json\nimport os\n\nEXCLUDED_REGIONS=['Prague2', 'Waterford', 'Lannion', 'Crete', 'Karlskrona', 'Budapest', 'Spain2', 'Zurich', 'Prague']\ndef get_images_filter():\n \"\"\"It prints all products with metatada image, and obtain all the\n images in all regions.\n \"\"\"\n KEYSTONE = os.environ.get('OS_KEYSTONE')\n TENANT_ID = os.environ.get('OS_TENANT_ID')\n USERNAME = os.environ.get('OS_USERNAME')\n PASSWORD = <PASSWORD>('OS_PASSWORD')\n\n token = get_token(KEYSTONE,\n TENANT_ID,\n USERNAME,\n PASSWORD)\n endpoints = get_endpoints(KEYSTONE,\n TENANT_ID,\n USERNAME,\n PASSWORD)\n\n products = get_product_with_image_filtered(endpoints['sdc']['Spain2'],\n token, TENANT_ID)\n\n for product in products:\n image_list = product[1].split(' ')\n image_ids = ''\n for image in image_list:\n try:\n try:\n image_ids.index(image)\n continue\n except:\n image_name = check_image_in_spain(\n endpoints['image']['Spain2'], image, token)\n if image_name is None:\n continue\n image_ids = image_ids + ' ' + image\n for endpoint_glace in endpoints['image']:\n print endpoint_glace\n\t\t if endpoint_glace in EXCLUDED_REGIONS:\n continue\n image_id = get_image_id_another_region(\n endpoints['image'][endpoint_glace],\n image_name, token)\n if image_id is None:\n continue\n image_ids = image_ids + ' ' + image_id\n update_metadata_image (endpoints['sdc']['Spain2'],\n token, TENANT_ID, product[0], image_ids)\n except Exception, e:\n print 'Error with image ' + image + ' ' + e.message\n print 'Product ' + product[0] + ' ' + image_ids\n\n\ndef get_token(url_base, tenant_id, user, password):\n \"\"\"It obtains a valid token.\n :param url_base: keystone url\n :param tenand_id: the id of the tenant\n :param user: the user\n :param paassword: the password\n \"\"\"\n url = 'http://' + url_base + '/v2.0/tokens'\n headers = {'Content-Type': 'application/json'}\n payload = '{\"auth\":{\"tenantId\":\"' + tenant_id +\\\n '\",\"passwordCredentials\":{\"username\":\"'\\\n + user + '\",\"password\":\"' + password + '\"}}}'\n response = requests.post(url, headers=headers, data=payload)\n if response.status_code != 200:\n print 'error to obtain the token ' + str(response.status_code)\n exit(1)\n response_json = response.json()\n token = response_json['access']['token']['id']\n return token\n\n\ndef get_sdc_url(url_base, tenant_id, user, password):\n \"\"\"It get the SDC url\n :param url_base: keystone url\n :param tenand_id: the id of the tenant\n :param user: the user\n :param paassword: the password\n \"\"\"\n get_url(url_base, tenant_id, user, password, 'sdc', 'Spain')\n\n\ndef get_glance_url(url_base, tenant_id, user, password, region):\n \"\"\"It get the glance url\n :param url_base: keystone url\n :param tenand_id: the id of the tenant\n :param user: the user\n :param paassword: the password\n \"\"\"\n get_url(url_base, tenant_id, user, password, 'image', region)\n\n\ndef get_url(url_base, tenant_id, user, password, type, region):\n \"\"\"It get the url for a concrete service\n :param url_base: keystone url\n :param tenand_id: the id of the tenant\n :param user: the user\n :param paassword: the password\n :param type: the type of service\n :param region: the region\n \"\"\"\n url = 'http://' + url_base + '/v2.0/tokens'\n headers = {'Accept': 'application/json'}\n payload = {'auth': {'tenantName': '' + tenant_id + '',\n 'passwordCredentials':\n {'username': '' + user + '',\n 'password': '' + password + ''}}}\n try:\n response = requests.post(url, headers=headers,\n data=json.dumps(payload))\n response_json = response.json()\n services = response_json['access']['serviceCatalog']\n except Exception as e:\n raise Exception('Error to obtain a image ' + e.message)\n\n for service in services:\n if service['type'] == type and service['region'] == region:\n for endpoint in service['endpoints']:\n return endpoint['publicURL']\n\n\ndef get_endpoints(url_base, tenant_id, user, password):\n \"\"\"It get the endpoints\n :param url_base: keystone url\n :param tenand_id: the id of the tenant\n :param user: the user\n :param paassword: the password\n \"\"\"\n url = 'http://' + url_base + '/v2.0/tokens'\n headers = {'Content-Type': 'application/json'}\n payload = {'auth': {'tenantId': '' + tenant_id + '',\n 'passwordCredentials': {'username':\n '' + user + '', 'password': '' +\n password + ''}}}\n try:\n response = requests.post(url, headers=headers,\n data=json.dumps(payload))\n response_json = response.json()\n services = response_json['access']['serviceCatalog']\n except Exception as e:\n raise Exception('Error to obtain a image ' + e.message)\n\n endpoints = {}\n for service in services:\n service_endpoints = {}\n for endpoint in service['endpoints']:\n service_endpoints.update({endpoint['region']:\n endpoint['publicURL']})\n endpoints.update({service['type']: service_endpoints})\n return endpoints\n\n\ndef get_product_with_image_filtered(sdc_url, token, vdc):\n \"\"\"It get the product for an concrete image\n :param sdc_url: the sdc url\n :param token: the valid token\n :param vdc: the user\n \"\"\"\n url = \"%s/%s\" % (sdc_url, \"catalog/product\")\n headers = {'X-Auth-Token': token, 'Tenant-Id': vdc,\n 'Accept': \"application/json\"}\n\n response = http.get(url, headers)\n\n if response.status != 200:\n print 'error to get the product with image filtered ' + str(response.status)\n sys.exit(1)\n else:\n data = json.loads(response.read())\n if data is None:\n return None\n products = []\n for product in data:\n try:\n for metadata in product['metadatas']:\n if metadata == 'value':\n if product['metadatas']['key'] == 'image'\\\n and product['metadatas']['value'] != '':\n value = []\n value.append(product['name'])\n value.append(product['metadatas']['value'])\n products.append(value)\n\n if metadata['key'] == 'image' and metadata['value'] != '':\n value = []\n value.append(product['name'])\n value.append(metadata['value'])\n products.append(value)\n\n except:\n continue\n return products\n\n\ndef check_image_in_spain(glance_url, id, token):\n \"\"\"It obtain if the image is in Spain\n :param glance_url: the sdc url\n :param token: the valid token\n :param id: image id\n \"\"\"\n url = glance_url + '/images?property-sdc_aware=true'\n headers = {'Accept': 'application/json', 'X-Auth-Token': token}\n\n try:\n response = requests.get(url, headers=headers)\n response_json = response.json()\n for image in response_json['images']:\n if image['id'] == id:\n return image['name']\n return None\n except Exception as e:\n raise Exception('Error to obtain a image ' + e.message)\n\n\ndef get_image_id_another_region(glance_url, image_name, token):\n \"\"\"It obtains the image in another regions\n :param glance_url: the sdc url\n :param token: the valid token\n :param image_name: image name\n \"\"\"\n url = glance_url + '/images?property-sdc_aware=true'\n headers = {'Accept': 'application/json', 'X-Auth-Token': token}\n try:\n response = requests.get(url, headers=headers)\n response_json = response.json()\n for image_info in response_json['images']:\n if image_info['name'] == image_name:\n return image_info['id']\n return None\n except Exception as e:\n return None\n\n\ndef update_metadata_image(sdc_url, token, vdc, product, metadata_image):\n \"\"\"It updates the product metadada for image filtered\n :param glance_url: the sdc url\n :param token: the valid token\n :param metadata_image: image name\n :param product: image name\n\n \"\"\"\n print 'update metadata' \n print product\n url = sdc_url+ \"/catalog/product/\"+product\n print url\n headers = {'X-Auth-Token': token, 'Tenant-Id': vdc,\n 'Accept': \"application/json\",\n 'Content-Type': 'application/json'}\n print headers\n response = http.get(url, headers)\n print url\n if response.status != 200:\n print 'error to get the product ' + str(response.status)\n return\n else:\n \n payload = '{\"key\":\"image\",\"value\":\"' + metadata_image + '\"}'\n print payload\n response = http.put(url + \"/metadatas/image\", headers, payload)\n print response\n if response.status != 200:\n print 'error to update the product ' + product \\\n + ' ' + str(response.status)\n\n\nif __name__ == \"__main__\":\n get_images_filter()\n", "id": "531583", "language": "Python", "matching_score": 3.4049246311187744, "max_stars_count": 0, "path": "sdc_utilities/update_image_filter.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n__author__ = 'jfernandez'\n\nimport http\nimport json\n\n\nclass KeystoneRequest:\n def __init__(self, keystone_url, tenant, user, password, vdc=None):\n \"\"\"\n Class constructor. Initializes class attributes.\n :param keystone_url: Keystone URL\n :param tenant: Fiware Tenant name\n :param user: Fiware User names\n :param password: <PASSWORD>\n :param vdc: TenantId\n \"\"\"\n self.vdc = vdc\n self.keystone_url = keystone_url\n\n self.user = user\n self.password = password\n self.tenant = tenant\n\n def __get__token(self, url, payload):\n \"\"\"\n Builds the request to get a token from Keystone\n :param url: Full URL to execute POST method\n :param payload: Body of the request\n :return: HTTPlib response\n \"\"\"\n headers = {'Content-Type': 'application/json',\n 'Accept': \"application/json\"}\n return http.post(url, headers, payload)\n\n def get_token(self):\n \"\"\"\n Executes request for getting the auth token from Keystone\n :return: HTTPlib response\n \"\"\"\n url = \"{}/{}\".format(self.keystone_url, 'tokens')\n payload = {\"auth\": {\"tenantName\": self.tenant,\n \"passwordCredentials\": {\"username\": self.user, \"password\": <PASSWORD>}}}\n\n return self.__get__token(url, json.dumps(payload))\n\n\ndef get_token_value(body_response):\n \"\"\"\n Gets token value from Keystone response\n :param body_response: Keystone response (/token)\n :return: Token ID\n \"\"\"\n return body_response['access']['token']['id']\n\n\ndef get_public_endpoint_url_by_type(body_response, endpoint_type, region_name):\n \"\"\"\n Get the public endpoint for service in a region by service TYPE\n :param body_response: Keystone response (/token)\n :param endpoint_type: Service type\n :param region_name: Name of the region\n :return: Public URL or None if not found\n \"\"\"\n service_list = body_response['access']['serviceCatalog']\n\n public_url = None\n for service in service_list:\n if service['type'] == endpoint_type:\n for endpoint in service['endpoints']:\n if endpoint['region'] == region_name:\n public_url = endpoint['publicURL']\n\n return public_url\n\n\ndef get_public_endpoint_url_by_name(body_response, endpoint_name, region_name):\n \"\"\"\n Get the public endpoint for service in a region by service NAME\n :param body_response: Keystone response (/token)\n :param endpoint_name: Service name\n :param region_name: Name of the region\n :return: Public URL or None if not found\n \"\"\"\n service_list = body_response['access']['serviceCatalog']\n\n public_url = None\n for service in service_list:\n if service['name'] == endpoint_name:\n for endpoint in service['endpoints']:\n if endpoint['region'] == region_name:\n public_url = endpoint['publicURL']\n\n return public_url\n\n\ndef get_images_regions(body_response):\n \"\"\"\n Gets the list of regions that have been found in the Image service with its public URL\n :param body_response: Keystone response (/token)\n :return: List of regions found with name and public URL\n \"\"\"\n service_list = body_response['access']['serviceCatalog']\n\n regions = []\n for service in service_list:\n if service['type'] == 'image':\n for endpoint in service['endpoints']:\n regions.append({'region': endpoint['region'], 'public_url': endpoint['publicURL']})\n return regions\n", "id": "9393567", "language": "Python", "matching_score": 2.8663113117218018, "max_stars_count": 0, "path": "test/acceptance/tools/keystone_request.py" }, { "content": "# -*- coding: utf-8 -*-\n\n# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FIWARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n\nimport uuid\nimport re\n\nfrom keystoneclient.v2_0 import Client as KeystoneClient\n\nfrom utils.rest_client_utils import HEADER_REPRESENTATION_XML, HEADER_CONTENT_TYPE, HEADER_TRANSACTION_ID\nfrom utils.logger_utils import get_logger\nfrom sdcclient.productandrelease_resource_client import ProductAndReleaseResourceClient\n\nlogger = get_logger(__name__)\n\n# HEADERS\nX_AUTH_TOKEN = \"X-Auth-Token\"\nTENANT_ID = \"Tenant-Id\"\n\n# TRANSACTION ID\nTRANSACTION_ID_PATTERN = \"qa/{uuid}\"\n\n# SERVICE\nSDC_SERVICE_TYPE = \"sdc\"\nSDC_ENDPOINT_TYPE = \"publicURL\"\n\n\ndef generate_transaction_id():\n \"\"\"\n Generate a transaction ID value following defined pattern.\n :return: New transactionId\n \"\"\"\n\n return TRANSACTION_ID_PATTERN.format(uuid=uuid.uuid4())\n\n\nclass SDCClient():\n\n headers = dict()\n keystone_client = None\n\n def __init__(self, username, password, tenant_id, auth_url, region_name, service_type=SDC_SERVICE_TYPE,\n endpoint_type=SDC_ENDPOINT_TYPE):\n \"\"\"\n Init Nova-Client. Url will be loaded from Keystone Service Catalog (publicURL, compute service)\n :param username: Fiware username\n :param password: <PASSWORD>\n :param tenant_id: Fiware Tenant ID\n :param auth_url: Keystore URL\n :param region_name: Fiware Region name\n :param service_type: SDC Service type in Keystone (paasmanager by default)\n :param endpoint_type: SDC Endpoint type in Keystone (publicURL by default)\n :return: None\n \"\"\"\n\n logger.info(\"Init SDC Client\")\n logger.debug(\"Client parameters: Username: %s, Password: %s, TenantId: %s, AuthURL: %s, RegionName: %s, \"\n \"ServiceType: %s, EndpointType: %s\", username, password, tenant_id, auth_url, region_name,\n service_type, endpoint_type)\n self.tenant_id = tenant_id\n\n self.__init_keystone_client__(username, password, tenant_id, auth_url)\n self.token = self.get_auth_token()\n self.init_headers(self.token, self.tenant_id)\n\n self.endpoint_url = self.get_sdc_endpoint_from_keystone(region_name, service_type, endpoint_type)\n\n def __init_keystone_client__(self, username, password, tenant_id, auth_url):\n \"\"\"\n Init the keystone client to request token and endpoint data\n :param string username: Username for authentication.\n :param string password: Password for authentication.\n :param string tenant_id: Tenant id.\n :param string auth_url: Keystone service endpoint for authorization.\n :param string region_name: Name of a region to select when choosing an\n endpoint from the service catalog.\n :return None\n \"\"\"\n\n logger.debug(\"Init Keystone Client\")\n self.keystone_client = KeystoneClient(username=username, password=password, tenant_id=tenant_id,\n auth_url=auth_url)\n\n def get_auth_token(self):\n \"\"\"\n Get token from Keystone\n :return: Token (String)\n \"\"\"\n\n logger.debug(\"Getting auth Token\")\n return self.keystone_client.auth_ref['token']['id']\n\n def get_sdc_endpoint_from_keystone(self, region_name, service_type, endpoint_type):\n \"\"\"\n Get the endpoint of SDC from Keystone Service Catalog\n :param region_name: Name of the region\n :param service_type: Type of service (Endpoint name)\n :param endpoint_type: Type of the URL to look for\n :return:\n \"\"\"\n\n logger.debug(\"Getting SDC endpoint\")\n endpoint = None\n for service in self.keystone_client.auth_ref['serviceCatalog']:\n if service['name'] == service_type:\n for endpoint in service['endpoints']:\n if endpoint['region'] == region_name:\n endpoint = endpoint[endpoint_type]\n break\n break\n logger.debug(\"SDC endpoint (Service: %s, Type: %s, Region: %s) is: %s\", service_type, endpoint_type,\n region_name, endpoint_type)\n return endpoint\n\n def init_headers(self, x_auth_token, tenant_id, content_type=HEADER_REPRESENTATION_XML,\n transaction_id=generate_transaction_id()):\n \"\"\"\n Init header to values (or default values)\n :param x_auth_token: Token from Keystone for tenant_id (OpenStack)\n :param tenant_id: TenantId (OpenStack)\n :param content_type: Content-Type header value. By default application/xml\n :param transaction_id: txId header value. By default, generated value by generate_transaction_id()\n :return: None\n \"\"\"\n\n logger.debug(\"Init headers\")\n if content_type is None:\n if HEADER_CONTENT_TYPE in self.headers:\n del(self.headers[HEADER_CONTENT_TYPE])\n else:\n self.headers.update({HEADER_CONTENT_TYPE: content_type})\n\n if transaction_id is None:\n if HEADER_TRANSACTION_ID in self.headers:\n del(self.headers[HEADER_TRANSACTION_ID])\n else:\n self.headers.update({HEADER_TRANSACTION_ID: transaction_id})\n\n self.headers.update({X_AUTH_TOKEN: x_auth_token})\n\n self.headers.update({TENANT_ID: tenant_id})\n\n logger.debug(\"Headers: \" + str(self.headers))\n\n def set_headers(self, headers):\n \"\"\"\n Set header.\n :param headers: Headers to be used by next request (dict)\n :return: None\n \"\"\"\n\n logger.debug(\"Setting headers: \" + str(headers))\n self.headers = headers\n\n def getProductAndReleaseResourceClient(self):\n \"\"\"\n Create an API resource REST client\n :return: Rest client for 'Environment' API resource\n \"\"\"\n split_regex = \"(.*)://(.*):(\\d*)/(.*)\"\n regex_matches = re.search(split_regex, self.endpoint_url)\n\n logger.info(\"Creating ProductAndReleaseResourceClient\")\n\n return ProductAndReleaseResourceClient(protocol=regex_matches.group(1), host=regex_matches.group(2),\n port=regex_matches.group(3), tenant_id=self.tenant_id,\n resource=regex_matches.group(4), headers=self.headers)\n\n\n", "id": "12562918", "language": "Python", "matching_score": 2.7263481616973877, "max_stars_count": 0, "path": "python-sdcclient/sdcclient/client.py" }, { "content": "# -*- coding: utf-8 -*-\n\n# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FIWARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n\nimport requests\nimport xmltodict\nimport xmldict\nfrom json import JSONEncoder\nfrom logger_utils import get_logger, log_print_request, log_print_response\n\nrequests.packages.urllib3.disable_warnings()\nlogger = get_logger(\"restClientUtils\")\n\n\n# HEADERS\nHEADER_CONTENT_TYPE = u'content-type'\nHEADER_ACCEPT = u'accept'\nHEADER_REPRESENTATION_JSON = u'application/json'\nHEADER_REPRESENTATION_XML = u'application/xml'\nHEADER_REPRESENTATION_TEXTPLAIN = u'text/plain'\nHEADER_AUTH_TOKEN = u'X-Auth-Token'\nHEADER_TENANT_ID = u'Tenant-Id'\nHEADER_TRANSACTION_ID = u'txid'\n\n# HTTP VERBS\nHTTP_VERB_POST = 'post'\nHTTP_VERB_GET = 'get'\nHTTP_VERB_PUT = 'put'\nHTTP_VERB_DELETE = 'delete'\nHTTP_VERB_UPDATE = 'update'\n\n\n# REST CLIENT PATTERS\nAPI_ROOT_URL_ARG_NAME = 'api_root_url'\nURL_ROOT_PATTERN = \"{protocol}://{host}:{port}\"\n\n\nclass RestClient(object):\n\n api_root_url = None\n\n def __init__(self, protocol, host, port, resource=None):\n \"\"\"\n Init the RestClient with an URL ROOT Pattern using the specified params\n :param protocol: Web protocol [HTTP | HTTPS] (string)\n :param host: Hostname or IP (string)\n :param port: Service port (string)\n :param resource: Base URI resource, if exists (string)\n :return: None\n \"\"\"\n\n self.api_root_url = self._generate_url_root(protocol, host, port)\n if resource is not None:\n self.api_root_url += \"/\" + resource\n\n @staticmethod\n def _generate_url_root(protocol, host, port):\n \"\"\"\n Generate API root URL without resources\n :param protocol: Web protocol [HTTP | HTTPS] (string)\n :param host: Hostname or IP (string)\n :param port: Service port (string)\n :return: ROOT url\n \"\"\"\n return URL_ROOT_PATTERN.format(protocol=protocol, host=host, port=port)\n\n def _call_api(self, uri_pattern, method, body=None, headers=None, parameters=None, **kwargs):\n \"\"\"\n Launch HTTP request to the API with given arguments\n :param uri_pattern: string pattern of the full API url with keyword arguments (format string syntax)\n :param method: HTTP method to execute (string) [get | post | put | delete | update]\n :param body: Raw Body content (string) (Plain/XML/JSON to be sent)\n :param headers: HTTP header request (dict)\n :param parameters: Query parameters for the URL. i.e. {'key1': 'value1', 'key2': 'value2'}\n :param **kwargs: URL parameters (without API_ROOT_URL_ARG_NAME) to fill the patters\n :returns: REST API response ('Requests' response)\n \"\"\"\n\n kwargs[API_ROOT_URL_ARG_NAME] = self.api_root_url\n url = uri_pattern.format(**kwargs)\n logger.info(\"Executing API request [%s %s]\", method, url)\n\n log_print_request(logger, method, url, parameters, headers, body)\n\n try:\n response = requests.request(method=method, url=url, data=body, headers=headers, params=parameters,\n verify=False)\n except Exception, e:\n logger.error(\"Request {} to {} crashed: {}\".format(method, url, str(e)))\n raise e\n\n log_print_response(logger, response)\n\n return response\n\n def launch_request(self, uri_pattern, body, method, headers=None, parameters=None, **kwargs):\n \"\"\"\n Launch HTTP request to the API with given arguments\n :param uri_pattern: string pattern of the full API url with keyword arguments (format string syntax)\n :param body: Raw Body content (string) (Plain/XML/JSON to be sent)\n :param method: HTTP ver to be used in the request [GET | POST | PUT | DELETE | UPDATE ]\n :param headers: HTTP header (dict)\n :param parameters: Query parameters for the URL. i.e. {'key1': 'value1', 'key2': 'value2'}\n :param **kwargs: URL parameters (without url_root) to fill the patters\n :returns: REST API response ('Requests' response)\n \"\"\"\n return self._call_api(uri_pattern, method, body, headers, parameters, **kwargs)\n\n def get(self, uri_pattern, headers=None, parameters=None, **kwargs):\n \"\"\"\n Launch HTTP GET request to the API with given arguments\n :param uri_pattern: string pattern of the full API url with keyword arguments (format string syntax)\n :param headers: HTTP header (dict)\n :param parameters: Query parameters. i.e. {'key1': 'value1', 'key2': 'value2'}\n :param **kwargs: URL parameters (without url_root) to fill the patters\n :returns: REST API response ('Requests' response)\n \"\"\"\n return self._call_api(uri_pattern, HTTP_VERB_GET, headers=headers, parameters=parameters, **kwargs)\n\n def post(self, uri_pattern, body, headers=None, parameters=None, **kwargs):\n \"\"\"\n Launch HTTP POST request to the API with given arguments\n :param uri_pattern: string pattern of the full API url with keyword arguments (format string syntax)\n :param body: Raw Body content (string) (Plain/XML/JSON to be sent)\n :param headers: HTTP header (dict)\n :param parameters: Query parameters. i.e. {'key1': 'value1', 'key2': 'value2'}\n :param **kwargs: URL parameters (without url_root) to fill the patters\n :returns: REST API response ('Requests' response)\n \"\"\"\n return self._call_api(uri_pattern, HTTP_VERB_POST, body, headers, parameters, **kwargs)\n\n def put(self, uri_pattern, body, headers=None, parameters=None, **kwargs):\n \"\"\"\n Launch HTTP PUT request to the API with given arguments\n :param uri_pattern: string pattern of the full API url with keyword arguments (format string syntax)\n :param body: Raw Body content (string) (Plain/XML/JSON to be sent)\n :param headers: HTTP header (dict)\n :param parameters: Query parameters. i.e. {'key1': 'value1', 'key2': 'value2'}\n :param **kwargs: URL parameters (without url_root) to fill the patters\n :returns: REST API response ('Requests' response)\n \"\"\"\n return self._call_api(uri_pattern, HTTP_VERB_PUT, body, headers, parameters, **kwargs)\n\n def delete(self, uri_pattern, headers=None, parameters=None, **kwargs):\n \"\"\"\n Launch HTTP DELETE request to the API with given arguments\n :param uri_pattern: string pattern of the full API url with keyword arguments (format string syntax)\n :param headers: HTTP header (dict)\n :param parameters: Query parameters. i.e. {'key1': 'value1', 'key2': 'value2'}\n :param **kwargs: URL parameters (without url_root) to fill the patters\n :returns: REST API response ('Requests' response)\n \"\"\"\n return self._call_api(uri_pattern, HTTP_VERB_DELETE, headers=headers, parameters=parameters, **kwargs)\n\n\ndef _xml_to_dict(xml_to_convert):\n \"\"\"\n Convert RAW XML string to Python dict\n :param xml_to_convert: XML to convert (string/text)\n :return: Python dict with all XML data\n \"\"\"\n\n logger.debug(\"Converting to Python dict this XML: \" + str(xml_to_convert))\n return xmltodict.parse(xml_to_convert, attr_prefix='')\n\n\ndef _dict_to_xml(dict_to_convert):\n \"\"\"\n Convert Python dict to XML\n :param dict_to_convert: Python dict to be converted (dict)\n :return: XML (string)\n \"\"\"\n\n logger.debug(\"Converting to XML the Python dict: \" + str(dict_to_convert))\n return xmldict.dict_to_xml(dict_to_convert)\n\n\ndef response_body_to_dict(http_requests_response, content_type, xml_root_element_name=None, is_list=False):\n \"\"\"\n Convert a XML or JSON response in a Python dict\n :param http_requests_response: 'Requests (lib)' response\n :param content_type: Expected content-type header value (Accept header value in the request)\n :param xml_root_element_name: For XML requests. XML root element in response.\n :param is_list: For XML requests. If response is a list, a True value will delete list node name\n :return: Python dict with response.\n \"\"\"\n\n logger.info(\"Converting response body from API (XML or JSON) to Python dict\")\n if HEADER_REPRESENTATION_JSON == content_type:\n try:\n return http_requests_response.json()\n except Exception, e:\n logger.error(\"Error parsing the response to JSON. Exception:\" + str(e))\n raise e\n else:\n assert xml_root_element_name is not None,\\\n \"xml_root_element_name is a mandatory param when body is in XML\"\n\n try:\n response_body = _xml_to_dict(http_requests_response.content)[xml_root_element_name]\n except Exception, e:\n logger.error(\"Error parsing the response to XML. Exception: \" + str(e))\n raise e\n\n if is_list and response_body is not None:\n response_body = response_body.popitem()[1]\n\n return response_body\n\n\ndef model_to_request_body(body_model, content_type, body_model_root_element=None):\n \"\"\"\n Convert a Python dict (body model) to XML or JSON\n :param body_model: Model to be parsed. This model should have a root element.\n :param content_type: Target representation (Content-Type header value)\n :param body_model_root_element: For XML requests. XML root element in the model (if exists).\n :return:\n \"\"\"\n\n logger.info(\"Converting body request model (Python dict) to JSON or XML\")\n if HEADER_REPRESENTATION_XML == content_type:\n try:\n return _dict_to_xml(body_model)\n except Exception, e:\n logger.error(\"Error parsing the body model to XML. Exception: \" + str(e))\n raise e\n else:\n body_json = body_model[body_model_root_element] if body_model_root_element is not None else body_model\n encoder = JSONEncoder()\n\n try:\n return encoder.encode(body_json)\n except Exception, e:\n logger.error(\"Error parsing the body model to JSON. Exception:\" + str(e))\n raise e\n\ndef delete_element_when_value_none(data_structure):\n \"\"\"\n This method remove all entries in a Python dict when its value is None.\n :param data_structure: Python dict (lists are supported). e.i:\n [{\"element1\": \"e1\",\n \"element2\": {\"element2.1\": \"e2\",\n \"element2.2\": None},\n \"element3\": \"e3\"},\n {\"elementA\": \"eA\",\n \"elementB\": {\"elementB.1\": None,\n \"elementB2\": [\"a\", \"b\"]}}]\n :return: None. The data_structure is modified\n \"\"\"\n if isinstance(data_structure, list):\n for element in data_structure:\n delete_element_when_value_none(element)\n elif isinstance(data_structure, dict):\n for element in data_structure.keys():\n if data_structure[element] is None:\n del data_structure[element]\n else:\n delete_element_when_value_none(data_structure[element])\n if len(data_structure[element]) == 0:\n del data_structure[element]\n", "id": "8582057", "language": "Python", "matching_score": 3.065986156463623, "max_stars_count": 0, "path": "python-paasmanagerclient/utils/rest_client_utils.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-WARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n__author__ = 'henar'\n\nimport httplib\nfrom xml.dom.minidom import parse, parseString\nfrom urlparse import urlparse\nimport sys\nimport json\n\nimport httplib\nimport mimetypes\n\ndef post_multipart(host, port, selector, fields, files):\n content_type, body = encode_multipart_formdata(fields, files)\n h = httplib.HTTP(host, port)\n h.putrequest('POST', selector)\n h.putheader('content-type', content_type)\n h.putheader('content-length', str(len(body)))\n h.endheaders()\n h.send(body)\n errcode, errmsg, headers = h.getreply()\n print errcode\n return h.file.read()\n\n\ndef encode_multipart_formdata(fields, files):\n LIMIT = '100'\n dd = '\\r\\n'\n L = []\n for (key, value) in fields:\n L.append('--' + LIMIT)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n print files\n for (filename, value) in files:\n L.append('--' + LIMIT)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (filename, filename))\n L.append('Content-Type: %s' % get_content_type(filename))\n L.append('')\n L.append(value)\n L.append('--' + LIMIT + '--')\n L.append('')\n print L\n body = dd.join(L)\n content_type = 'multipart/form-data; boundary=%s' % LIMIT\n return content_type, body\n\n\ndef get_content_type(filename):\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n\n\ndef __do_http_req(method, url, headers, payload):\n print '__do_http_req'\n parsed_url = urlparse(url)\n con = httplib.HTTPConnection(parsed_url.netloc)\n con.request(method, parsed_url.path, payload, headers)\n return con.getresponse()\n\ndef __do_http_req_https(method, url, headers, payload):\n print '__do_http_req_https'\n parsed_url = urlparse(url)\n con = httplib.HTTPSConnection(parsed_url.netloc)\n con.request(method, parsed_url.path, payload, headers)\n return con.getresponse()\n\n ##\n ## Metod que hace el HTTP-GET\n ##\n\n\ndef get(url, headers):\n print url\n if url.startswith ('https'):\n return __do_http_req_https(\"GET\", url, headers, None)\n else:\n return __do_http_req(\"GET\", url, headers, None)\n\n\ndef delete(url, headers):\n if url.startswith ('https'):\n return __do_http_req_https(\"DELETE\", url, headers, None)\n else:\n return __do_http_req(\"DELETE\", url, headers, None)\n\n\n ##\n ## Metod que hace el HTTP-PUT\n ##\n\n\ndef __put(url, headers):\n if url.startswith ('https'):\n return __do_http_req_https(\"PUT\", url, headers, None)\n else:\n return __do_http_req(\"PUT\", url, headers, None)\n\n\n ##\n ## Metod que hace el HTTP-POST\n ##\n\n\ndef post(url, headers, payload):\n if url.startswith ('https'):\n return __do_http_req_https(\"POST\", url, headers, payload)\n else:\n return __do_http_req(\"POST\", url, headers, payload)\n\n\n\ndef get_token(keystone_url, tenant, user, password):\n\n# url=\"%s/%s\" %(keystone_url,\"v2.0/tokens\")\n headers = {'Content-Type': 'application/json',\n 'Accept': \"application/xml\"}\n payload = '{\"auth\":{\"tenantName\":\"' + tenant + '\",\"passwordCredentials\":{\"username\":\"' + user + '\",\"password\":\"' + password + '\"}}}'\n\n response = post(keystone_url, headers, payload)\n data = response.read()\n\n ## Si la respuesta es la adecuada, creo el diccionario de los datos en JSON.\n if response.status != 200:\n print 'error to obtain the token ' + str(response.status)\n sys.exit(1)\n else:\n dom = parseString(data)\n try:\n result = (dom.getElementsByTagName('token'))[0]\n var = result.attributes[\"id\"].value\n\n return var\n except:\n print (\"Error in the processing enviroment\")\n sys.exit(1)\n\n\ndef processTask(headers, taskdom):\n try:\n print taskdom\n href = taskdom[\"href\"]\n print href\n status = taskdom[\"status\"]\n while status == 'RUNNING':\n data1 = get_task(href, headers)\n data = json.loads(data1)\n status = data[\"status\"]\n\n if status == 'ERROR':\n error = taskdom[\"error\"]\n message = error[\"message\"]\n majorErrorCode = error[\"majorErrorCode\"]\n print \"ERROR : \" + message + \" \" + majorErrorCode\n return status\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n sys.exit(1)\n\n\ndef get_task(url, headers):\n\n# url=\"%s/%s\" %(keystone_url,\"v2.0/tokens\")\n response = get(url, headers)\n\n ## Si la respuesta es la adecuada, creo el diccionario de los datos en JSON.\n if response.status != 200:\n print 'error to obtain the token ' + str(response.status)\n sys.exit(1)\n else:\n data = response.read()\n return data\n", "id": "6006842", "language": "Python", "matching_score": 2.5110280513763428, "max_stars_count": 0, "path": "automatization_scripts/tools/http.py" }, { "content": "#!/usr/bin/python\n\n# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U\n#\n# This file is part of FIWARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n#\n# This file obtain the validation chef key from the\n# the user data. This information is defined in the\n# chefkey resource\n#\n\nimport yaml\nimport urllib2\n\ndef get_validation_key(userdata):\n data = yaml.load(userdata)\n if not 'chefkey' in data:\n return None\n data = data['chefkey']\n print data\n f = open('/etc/validation.pem','w')\n f.write(data)\n f.close()\n\ndef get_user_data():\n try:\n h = urllib2.urlopen('http://169.254.169.254/openstack/latest/user_data',None,30)\n except Exception, e:\n return None, None\n if h.getcode() != 200:\n return None, None\n\n return get_validation_key(h)\n\nget_user_data()\n", "id": "7293934", "language": "Python", "matching_score": 0.5248792767524719, "max_stars_count": 0, "path": "scripts/userdata_scripts/user_data_script.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FIWARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nfrom pip.req import parse_requirements\n\nREQUIREMENTS_FILE = \"requirements.txt\"\n# Get requirements list from requirements.txt file\n# > parse_requirements() returns generator of pip.req.InstallRequirement objects\ninstall_reqs = parse_requirements(REQUIREMENTS_FILE, session=False)\n# > requirements_list is a list of requirement; e.g. ['requests==2.6.0', 'Fabric==1.8.3']\nrequirements_list = [str(ir.req) for ir in install_reqs]\n\nsetup(name='python-sdcclient',\n version='0.0.2',\n description='SDC Client',\n author='jmms392',\n url='https://github.com/telefonicaid/fiware-sdc/python-sdcclient',\n packages=find_packages(),\n install_requires=requirements_list)\n", "id": "5547825", "language": "Python", "matching_score": 0.3613329529762268, "max_stars_count": 0, "path": "python-sdcclient/setup.py" }, { "content": "# -*- coding: utf-8 -*-\n\n# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FIWARE project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with <EMAIL>\n\n\nimport logging\nimport logging.config\nfrom xml.dom.minidom import parseString\nimport json\nimport os\n\nHEADER_CONTENT_TYPE = u'content-type'\nHEADER_REPRESENTATION_JSON = u'application/json'\nHEADER_REPRESENTATION_XML = u'application/xml'\nHEADER_REPRESENTATION_TEXTPLAIN = u'text/plain'\n\n\"\"\"\nPart of this code has been taken from:\n https://pdihub.hi.inet/fiware/fiware-iotqaUtils/raw/develop/iotqautils/iotqaLogger.py\n\"\"\"\n\nLOG_CONSOLE_FORMATTER = \" %(asctime)s - %(name)s - %(levelname)s - %(message)s\"\nLOG_FILE_FORMATTER = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n\n\nif os.path.exists(\"./settings/logging.conf\"):\n logging.config.fileConfig(\"./settings/logging.conf\")\n\n\n# Console logging level. By default: ERROR\nlogging_level = logging.ERROR\n\n\ndef configure_logging(level):\n \"\"\"\n Configure global log level to given one\n :param level: Level (INFO | DEBUG | WARN | ERROR)\n :return:\n \"\"\"\n\n global logging_level\n logging_level = logging.ERROR\n if \"info\" == level.lower():\n logging_level = logging.INFO\n elif \"warn\" == level.lower():\n logging_level = logging.WARNING\n elif \"debug\" == level.lower():\n logging_level = logging.DEBUG\n\n\ndef get_logger(name):\n \"\"\"\n Create new logger with the given name\n :param name: Name of the logger\n :return: Logger\n \"\"\"\n\n logger = logging.getLogger(name)\n return logger\n", "id": "4487010", "language": "Python", "matching_score": 0.5806039571762085, "max_stars_count": 0, "path": "scripts/recipes_checking/utils/logger_utils.py" }, { "content": "import json\nimport os\n\ndef instantiate_all_example_body(json_content):\n \"\"\"Instantiate the body of payload if it is not specified.\n\n Arguments:\n JSON_content -- JSON with APIB definition\n \"\"\"\n for resource_group in json_content[\"resourceGroups\"]:\n for resource in resource_group[\"resources\"]:\n for action in resource[\"actions\"]:\n for example in action[\"examples\"]:\n for request in example[\"requests\"]:\n if len(request[\"body\"]):\n continue #if it has body don't replace it\n\n ##check itselft definition ???\n if len(request['content']):\n for data_structure in request[\"content\"]:\n if len(data_structure[\"sections\"]):\n\n _json=get_attributes(data_structure[\"sections\"])\n if {} == _json:\n continue\n request[\"body\"] = json.dumps(_json,sort_keys=True, indent=4)\n else:\n try:\n _json = get_attributes_from_data_structure(data_structure[\"typeDefinition\"][\"typeSpecification\"][\"name\"][\"literal\"],json_content[\"data_structures\"] )\n if {} == _json:\n continue\n request[\"body\"] = json.dumps(_json, sort_keys=True, indent=4)\n except Exception, e:\n print \"error resquest\"\n pass\n continue\n #check action parent\n if len(action[\"content\"]): \n #it has elements\n for data_structure in action[\"content\"]:\n if len(data_structure[\"sections\"]):\n\n request[\"body\"] = json.dumps(get_attributes(data_structure[\"sections\"]),sort_keys=True, indent=4)\n\n else:\n try:\n _json = get_attributes_from_data_structure(data_structure[\"typeDefinition\"][\"typeSpecification\"][\"name\"][\"literal\"],json_content[\"data_structures\"] )\n if {} == _json:\n continue\n request[\"body\"] = json.dumps(_json, sort_keys=True, indent=4)\n except Exception, e:\n print \"error resquest\"\n pass\n continue\n \n #check resource grandparent\n if len(resource[\"content\"]):\n \n for data_structure in resource[\"content\"]:\n if len(data_structure[\"sections\"]):\n\n\n _json = get_attributes(data_structure[\"sections\"])\n if {} == _json:\n continue\n request[\"body\"] = json.dumps(_json, sort_keys=True, indent=4)\n else:\n try:\n _json = get_attributes_from_data_structure(data_structure[\"typeDefinition\"][\"typeSpecification\"][\"name\"][\"literal\"],json_content[\"data_structures\"] )\n if {} == _json:\n continue\n request[\"body\"] = json.dumps(_json, sort_keys=True, indent=4)\n except Exception, e:\n print \"error resquest\"\n pass\n continue\n\n for response in example[\"responses\"]:\n if len(response[\"body\"]):\n continue #if it has body don't replace it\n\n ##check itselft definition ???\n if len(response['content']):\n for data_structure in response[\"content\"]:\n if len(data_structure[\"sections\"]):\n\n _json=get_attributes(data_structure[\"sections\"])\n if {} == _json:\n continue\n response[\"body\"] = json.dumps(_json,sort_keys=True, indent=4)\n else:\n try:\n _json = get_attributes_from_data_structure(data_structure[\"typeDefinition\"][\"typeSpecification\"][\"name\"][\"literal\"],json_content[\"data_structures\"] )\n if {} == _json:\n continue\n response[\"body\"] = json.dumps(_json, sort_keys=True, indent=4)\n except Exception, e:\n print \"error resquest\"\n pass\n continue\n #check action parent\n if len(action[\"content\"]): \n #it has elements\n for data_structure in action[\"content\"]:\n if len(data_structure[\"sections\"]):\n\n response[\"body\"] = json.dumps(get_attributes(data_structure[\"sections\"]),sort_keys=True, indent=4)\n\n else:\n try:\n _json = get_attributes_from_data_structure(data_structure[\"typeDefinition\"][\"typeSpecification\"][\"name\"][\"literal\"],json_content[\"data_structures\"] )\n if {} == _json:\n continue\n response[\"body\"] = json.dumps(_json, sort_keys=True, indent=4)\n except Exception, e:\n print \"error resquest\"\n pass\n continue\n \n #check resource grandparent\n if len(resource[\"content\"]):\n \n for data_structure in resource[\"content\"]:\n if len(data_structure[\"sections\"]):\n\n\n _json = get_attributes(data_structure[\"sections\"])\n if {} == _json:\n continue\n response[\"body\"] = json.dumps(_json, sort_keys=True, indent=4)\n else:\n try:\n _json = get_attributes_from_data_structure(data_structure[\"typeDefinition\"][\"typeSpecification\"][\"name\"][\"literal\"],json_content[\"data_structures\"] )\n if {} == _json:\n continue\n response[\"body\"] = json.dumps(_json, sort_keys=True, indent=4)\n except Exception, e:\n print \"error resquest\"\n pass\n continue\n\n return\n\n\ndef get_attributes(sections):\n\n _json={}\n for section in sections:\n if section[\"class\"] == \"memberType\":\n for contents in section[\"content\"]:\n if contents[\"class\"] == \"property\":\n try:\n name = contents[\"content\"][\"name\"][\"literal\"]\n value = contents[\"content\"][\"valueDefinition\"][\"values\"][0][\"literal\"] \n\n _json[name]=value\n\n except Exception, e:\n try:\n name = contents[\"content\"][\"name\"][\"literal\"]\n value = get_attributes(contents[\"content\"][\"sections\"])\n if value != {}:\n _json[name]=value\n\n except Exception, e:\n pass\n pass\n\n return _json\n\n\n\ndef get_attributes_from_data_structure(structure_name, data_structures):\n\n\n _json={}\n try:\n for attribute in data_structures[structure_name][\"attributes\"]:\n if len(attribute[\"values\"]):\n _json[attribute[\"name\"]] = attribute[\"values\"][0]\n else:\n if len (attribute[\"subproperties\"]):\n _inner_json = {}\n _inner_json = get_recursive_attributes_from_data_structure(attribute[\"subproperties\"])\n if {} == _inner_json:\n continue\n \n _json[attribute[\"name\"]] = _inner_json\n\n except Exception, e:\n print \"Data structure \"+structure_name+\" not found\"\n return _json\n\n return _json\n \n\n\ndef get_recursive_attributes_from_data_structure(subproperties):\n \n _json = {}\n for subproperty in subproperties:\n if len(subproperty['values']):\n _json[subproperty['name']] = subproperty['values'][0]\n continue\n\n if len(subproperty['subproperties']):\n _inner_json = {}\n _inner_json = get_recursive_attributes_from_data_structure(subproperty['subproperties'])\n\n if {} == _inner_json:\n continue\n\n _json[subproperty['name']] = _inner_json\n\n return _json\n", "id": "1067266", "language": "Python", "matching_score": 2.487957239151001, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/src/drafter_postprocessing/instantiate_body.py" }, { "content": "\nimport unittest\nfrom os import path\nimport sys\n\nimport_path = path.abspath(__file__)\n\nwhile path.split(import_path)[1] != 'fiware_api_blueprint_renderer':\n\n import_path = path.dirname(import_path)\n\nsys.path.append(import_path)\n\nfrom src.drafter_postprocessing.instantiate_body import get_attributes_from_data_structure \nfrom tests.test_utils import *\n\ndata_structures = {\n\"structure1\":{\n \"attributes\":[\n {\"name\": \"attributeWithoutValue\",\n \"values\":[],\n \"subproperties\":[]\n },\n {\"name\":\"attributeWithValue1\",\n \"values\":['value1', 'value2'],\n \"subproperties\":[]\n },\n {\"name\":\"attributeWithRecursiveValue\",\n \"values\":[],\n \"subproperties\":[\n {\"name\": \"recursiveWithoutValue\",\n \"values\":[],\n \"subproperties\":[]\n },\n {\"name\": \"recursiveWithValue1\",\n \"values\":['RecursiveVal1'],\n \"subproperties\":[]\n },\n {\"name\": \"recursiveWithValueRecursive\",\n \"values\":[],\n \"subproperties\":[\n {\"name\":\"recursiveThirdLevel1\",\n \"values\":[\n 'value_recursive_third1'],\n \"subproperties\":[]\n\n },\n {\"name\":\"recursiveThirdLevel2\",\n \"values\":[\n 'value_recursive_third2'],\n \"subproperties\":[]\n\n }\n ]\n },\n {\"name\":\"recursiveWithValue2\",\n \"values\":[\"value_recursive2\"],\n \"subproperties\":[]\n }\n ]\n }\n ]\n },\n\"structure2\":{\n \"attributes\":[\n {\"name\":\"structure2_var\",\n \"values\":[\"5\"],\n \"subproperties\":[]\n },\n {\"name\":\"structure2_var2\",\n \"values\":[\"6\",\"7\"],\n \"subproperties\":[]\n }\n ]\n}\n\n}\n\nclass TestGetAttributesFromDataStructureFunction(unittest.TestCase):\n __metaclass__ = TestCaseWithExamplesMetaclass\n\n @for_examples(\n ('structure2',{\"structure2_var\":\"5\", \"structure2_var2\":\"6\"}),\n ('structure1',{\"attributeWithValue1\":\"value1\", \"attributeWithRecursiveValue\":{\"recursiveWithValue1\":\"RecursiveVal1\", \"recursiveWithValueRecursive\":{\"recursiveThirdLevel1\":\"value_recursive_third1\",\"recursiveThirdLevel2\":\"value_recursive_third2\"},\"recursiveWithValue2\":\"value_recursive2\"}})\n )\n def test_get_attributes_function(self, structure_name, expected_attributes):\n obtained_attributes = (\n get_attributes_from_data_structure(structure_name, data_structures) )\n self.assertEqual(obtained_attributes, expected_attributes)\n\n\n\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestGetAttributesFromDataStructureFunction)\nunittest.TextTestRunner(verbosity=2).run(suite)", "id": "9095037", "language": "Python", "matching_score": 1.326154351234436, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/tests/test_payload_instantiation/test_get_attributes_from_data_structure_function.py" }, { "content": "import unittest\n\n\n__examples__ = \"__examples__\"\n\n\ndef for_examples(*examples):\n def decorator(f, examples=examples):\n if len(examples) == 1 and isinstance(examples[0], list):\n setattr(f, __examples__, getattr(f, __examples__,tuple(examples[0])))\n else:\n setattr(f, __examples__, getattr(f, __examples__,()) + examples)\n return f\n return decorator\n\n\n\n\nclass TestCaseWithExamplesMetaclass(type):\n def __new__(meta, name, bases, dict):\n def tuplify(x):\n if not isinstance(x, tuple):\n return (x,)\n return x\n for methodname, method in dict.items():\n if hasattr(method, __examples__):\n dict.pop(methodname)\n examples = getattr(method, __examples__)\n delattr(method, __examples__)\n for example in (tuplify(x) for x in examples):\n def method_for_example(self, method = method, example = example):\n method(self, *example)\n methodname_for_example = methodname + \"(\" + \", \".join(str(v) for v in example) + \")\"\n dict[methodname_for_example] = method_for_example\n return type.__new__(meta, name, bases, dict)\n \n\ndef combinar_roles_entidades(roles, entidades):\n combinado=[]\n for rol in roles:\n combinado += [ (rol, ele) for ele in entidades]\n print combinado\n return combinado\n", "id": "2509344", "language": "Python", "matching_score": 0.34372368454933167, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/tests/test_utils.py" }, { "content": "\nimport unittest\nfrom os import path\nimport os\nimport sys\nimport shutil\nimport json\nfrom subprocess import Popen, PIPE, call\nimport pprint\n\nimport_path = path.abspath(__file__)\n\nwhile path.split(import_path)[1] != 'fiware_api_blueprint_renderer':\n\n import_path = path.dirname(import_path)\n\nsys.path.append(import_path)\n\n\nfrom tests.test_utils import *\n\ntest_data={\n \"Action with attributes and without parent\":\n {\n \"get_with_body /v2\":{\"ambientNoise\": 31.5},\n \"get_without_body /v2\": {\"entities_url\": \"entityExample\"}\n },\n \"Action without attr but with resoruce attr\":\n {\n \"get_with_body /v2/entities\":{\"ambientNoise\": 31.5},\n \"get_without_body /v2/entities\":{\n \"entities_url\": \"entityExample\", \n \"registrations_url\": {\n \"second_level\": {\n \"thirdLevel\": {\n \"fourthLevel\": \"fourth level value\"\n }\n }\n }\n }\n\n },\n \"action with attr link\":\n {\n \"get_with_body /v2/entities\": {\"ambientNoise\": 31.5},\n \"get_without_body /v2/entities\":{\n \"first_level\": {\n \"second_level\": \"stringValue\",\n \"third_level\": {\n \"terString\": \"value3\"\n }\n }, \n \"url\": \"aa\"\n } \n },\n \"Action without attr but with resoruce link\":\n {\n \"get_with_body /v2/entities\": {\"ambientNoise\": 31.5},\n \"get_without_body /v2/entities\": {\n \"first_level\": {\n \"second_level\": \"stringValue\", \n \"third_level\": {\n \"terString\": \"value3\"\n }\n }, \n \"url\": \"aa\"\n }\n\n },\n \"action with attr link\":\n {\n \"get_with_body /v2/entities\":{\"ambientNoise\": 31.5},\n \"get_without_body /v2/entities\":{\n \"first_level\": {\n \"second_level\": \"stringValue\", \n \"third_level\": {\n \"terString\": \"value3\"\n }\n }, \n \"url\": \"aa\"\n }\n },\n \"attributes in request test\":\n {\n \"get_with_body /v2/entitiess\":{\"ambientNoise\": 31.5},\n \"get_without_body_but_attr /v2/entitiess\":{\"entities_url\": \"entityExampleREQ\"},\n \"get_without_body_but_attr_linked /v2/entitiess\": {\n \"first_level\": {\n \"second_level\": \"stringValue\", \n \"third_level\": {\n \"terString\": \"value3\"\n }\n }, \n \"url\": \"aa\"\n }\n\n },\n \"recursive attributes\":\n {\n \"get_recursive /v2/entitiesR\":{\n \"first_level\": {\n \"Country\": \"United Kingdom\", \n \"city\": \"London\", \n \"second_level\": {\n \"with_value\": \"defaultValue\", \n \"third_level\": {\n \"with_value\": \"value\"\n }\n }\n }\n },\n \"attribute_referencing_resource /v2/entitiesR\":{\n \"entities_url\": \"entityExample\", \n \"registrations_url\": {\n \"second_level\": {\n \"thirdLevel\": {\n \"fourthLevel\": \"fourth level value\"\n }\n }\n }\n }\n }\n\n\n }\n\n\n\n\nclass TestPayloadInstantiationFromAPIB(unittest.TestCase):\n __metaclass__ = TestCaseWithExamplesMetaclass\n\n @classmethod\n def setUpClass(cls):\n pathname_ = path.dirname(path.abspath(__file__))\n cls.apib_file = pathname_+\"/api_test.apib\"\n cls.tmp_result_files = \"/var/tmp/test-links-in-reference-160faf1aae1dd41c8f16746ea744f138\"\n\n if os.path.exists(cls.tmp_result_files):\n shutil.rmtree(cls.tmp_result_files)\n\n os.makedirs(cls.tmp_result_files)\n\n Popen([\"fabre\", \"-i\", cls.apib_file, \"-o\", \n cls.tmp_result_files, \"--no-clear-temp-dir\"], stdout=PIPE, stderr=PIPE).communicate()\n\n with open('/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.json', 'r') as f:\n cls.out_json = json.load(f)\n\n @classmethod\n def tearDownClass(cls):\n if os.path.exists(cls.tmp_result_files):\n shutil.rmtree(cls.tmp_result_files)\n\n to_delete = ['/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.apib',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.extras',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.json']\n\n for filename in to_delete:\n if os.path.exists(filename):\n os.remove(filename)\n\n\n def test_payload_instantiation_json(self):\n \n for group in self.out_json['resourceGroups']:\n for resource in group['resources']:\n for action in resource['actions']: \n for example in action['examples']:\n for request in example['requests']:\n expected_value = test_data[action[\"name\"]][request[\"name\"]]\n\n expected_value = json.dumps(expected_value, sort_keys=True, indent=4)\n\n \"\"\" \n print \"debug\"\n print expected_value\n print request['body']\n \"\"\"\n self.assertEqual(expected_value.strip(), request['body'].strip()) \n\n\n\n\n\n\n\n\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestPayloadInstantiationFromAPIB)\nunittest.TextTestRunner(verbosity=2).run(suite)", "id": "3770020", "language": "Python", "matching_score": 4.650003910064697, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/tests/test_payload_instantiation/test_payload_instantation_from_apib.py" }, { "content": "\nimport unittest\nfrom os import path\nimport os\nimport sys\nimport shutil\nimport json\nfrom subprocess import Popen, PIPE, call\nimport pprint\n\nimport_path = path.abspath(__file__)\n\nwhile path.split(import_path)[1] != 'fiware_api_blueprint_renderer':\n\n import_path = path.dirname(import_path)\n\nsys.path.append(import_path)\n\nfrom src.drafter_postprocessing.order_uri import order_uri_parameters, order_request_parameters\nfrom tests.test_utils import *\n\n\nclass TestOrderURIFromAPIB(unittest.TestCase):\n __metaclass__ = TestCaseWithExamplesMetaclass\n\n @classmethod\n def setUpClass(cls):\n pathname_ = path.dirname(path.abspath(__file__))\n cls.apib_file = pathname_+\"/api_test.apib\"\n cls.tmp_result_files = \"/var/tmp/test-links-in-reference-160faf1aae1dd41c8f16746ea744f138\"\n\n if os.path.exists(cls.tmp_result_files):\n shutil.rmtree(cls.tmp_result_files)\n\n os.makedirs(cls.tmp_result_files)\n\n Popen([\"fabre\", \"-i\", cls.apib_file, \"-o\", \n cls.tmp_result_files, \"--no-clear-temp-dir\"], stdout=PIPE, stderr=PIPE).communicate()\n\n with open('/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.json', 'r') as f:\n cls.out_json = json.load(f)\n\n @classmethod\n def tearDownClass(cls):\n if os.path.exists(cls.tmp_result_files):\n shutil.rmtree(cls.tmp_result_files)\n\n to_delete = ['/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.apib',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.extras',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.json']\n\n for filename in to_delete:\n if os.path.exists(filename):\n os.remove(filename)\n\n\n def check_all_actions_equals_in_group(self,group,expected_value,occurrences):\n\n found = 0\n for resource in group['resources']:\n for action in resource['actions']:\n for example in action['examples']:\n for request in example['requests']:\n self.assertEqual(expected_value, request['name'].strip())\n found = found +1\n\n self.assertEqual(occurrences,found)\n\n def check_exist_actions_in_group(self, group,expected_value, occurrences):\n found = 0\n for resource in group['resources']:\n for action in resource['actions']:\n for example in action['examples']:\n for request in example['requests']:\n if expected_value == request['name'].strip():\n found = found +1\n\n self.assertEqual(occurrences,found)\n \n\n def test_order_request_parameters_from_json(self):\n \n\n for group in self.out_json['resourceGroups']:\n if \"Root\" == group['name']:\n self.check_all_actions_equals_in_group(group,\"/v2\",2)\n \n elif \"Entities\" == group['name']:\n self.check_all_actions_equals_in_group(group,\"/v2/entities?id=foo&limit=10&options=bar\",2)\n \n elif \"AutoInstantiateEntities\" == group['name']:\n self.check_exist_actions_in_group(group,\"/v2/entities2{?attrs,coords,geometry,id,idPattern,limit,offset,options,q,type}\",1)\n self.check_exist_actions_in_group(group,\"/v2/entities2\",1)\n \n elif \"instantiate with values\" == group['name']:\n self.check_exist_actions_in_group(group,\"/v2/entities2{?attrs,coords,geometry,id,idPattern,limit,offset,options,q,type}\",1)\n self.check_exist_actions_in_group(group,\"/v2/entities2/?limit=100&offset=101\",1)\n else:\n print \"unexpected group\", group['name']\n assert False\n \n\n \n def test_order_action_uri_parameters_for_json(self):\n\n expected_uris = ['/v2', \n '/v2/entities{?attrs,coords,geometry,id,idPattern,limit,offset,options,q,type}',\n '/v2/entities2{?attrs,coords,geometry,id,idPattern,limit,offset,options,q,type}',\n '/v2/entities2/{?attrs,coords,geometry,id,idPattern,limit,offset,options,q,type}'\n ]\n\n for group in self.out_json['resourceGroups']:\n for resource in group['resources']:\n for action in resource['actions']:\n try:\n expected_uris.remove(action['attributes']['uriTemplate'])\n except Exception as e:\n print \"Error trying to remove \", action['attributes']['uriTemplate']\n print e\n assert False\n\n if len(expected_uris) > 0:\n print \"Some URIs have not been found\"\n print expected_uris\n assert False\n\n assert True\n\n\n def test_order_resource_uri_parameters_for_json(self):\n\n expected_uris = ['/v2/entities{?attrs,coords,geometry,id,idPattern,limit,offset,options,q,type}', \n '/v2/entities2{?attrs,coords,geometry,id,idPattern,limit,offset,options,q,type}',\n '/v2/entities3{?limit,offset}',\n '/v2'\n ]\n\n for group in self.out_json['resourceGroups']:\n for resource in group['resources']:\n try:\n expected_uris.remove(resource['uriTemplate'])\n except Exception as e:\n print \"Error trying remove \", resource['uriTemplate']\n print e\n assert False\n\n if len(expected_uris) > 0:\n print \"Some URIs have not been found\"\n print expected_uris\n assert False\n\n assert True\n\n\n\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestOrderURIFromAPIB)\nunittest.TextTestRunner(verbosity=2).run(suite)", "id": "9663963", "language": "Python", "matching_score": 6.021100997924805, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/tests/test_ordered_uri/test_order_uri_from_apib.py" }, { "content": "\nimport unittest\nfrom os import path\nimport os\nimport sys\nimport shutil\nimport json\nfrom subprocess import Popen, PIPE, call\nimport pprint\n\nimport_path = path.abspath(__file__)\n\nwhile path.split(import_path)[1] != 'fiware_api_blueprint_renderer':\n\n import_path = path.dirname(import_path)\n\nsys.path.append(import_path)\n\n\nfrom tests.test_utils import *\n\n\nmetadata_test = {\n \"FORMAT\": \"1A\",\n \"HOST\": \"http://telefonicaid.github.io/fiware-orion/api/v2/\",\n \"TITLE\": \"FIWARE-NGSI v2 Specification\",\n \"DATE\": \"30 July 2015\",\n \"VERSION\": \"abcedefg\",\n \"PREVIOUS_VERSION\": \"jhdfgh\",\n \"APIARY_PROJECT\": \"test5950\",\n \"GITHUB_SOURCE\": \"http://github.com/telefonicaid/fiware-orion.git\",\n \"SPEC_URL\": \"http://example.com/api/\"\n}\n\nclass TestMetadataInJSON(unittest.TestCase):\n __metaclass__ = TestCaseWithExamplesMetaclass\n\n @classmethod\n def setUpClass(cls):\n pathname_ = path.dirname(path.abspath(__file__))\n cls.apib_file = pathname_+\"/api_test.apib\"\n cls.tmp_result_files = \"/var/tmp/test-links-in-reference-160faf1aae1dd41c8f16746ea744f138\"\n\n if os.path.exists(cls.tmp_result_files):\n shutil.rmtree(cls.tmp_result_files)\n\n os.makedirs(cls.tmp_result_files)\n\n Popen([\"fabre\", \"-i\", cls.apib_file, \"-o\", \n cls.tmp_result_files, \"--no-clear-temp-dir\"], stdout=PIPE, stderr=PIPE).communicate()\n\n with open('/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.json', 'r') as f:\n cls.out_json = json.load(f)\n\n @classmethod\n def tearDownClass(cls):\n if os.path.exists(cls.tmp_result_files):\n shutil.rmtree(cls.tmp_result_files)\n\n to_delete = ['/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.apib',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.extras',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.json']\n\n for filename in to_delete:\n if os.path.exists(filename):\n os.remove(filename)\n\n\n\n def test_metadata_in_json(self):\n \n self.assertEqual(len(self.out_json['metadata']), len(metadata_test))\n for metadatum in self.out_json['metadata']: \n self.assertEqual(metadata_test[metadatum[\"name\"]], metadatum[\"value\"]) \n\n\n\n\n\n\n\n\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestMetadataInJSON)\nunittest.TextTestRunner(verbosity=2).run(suite)", "id": "11202797", "language": "Python", "matching_score": 3.499303102493286, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/tests/test_metadata/test_correct_metadata_in_json.py" }, { "content": "#! /usr/bin/env python\n\nimport os\nfrom os import path\nimport json\nfrom pprint import pprint\nimport shutil\nfrom subprocess import Popen, PIPE, call\nimport unittest\n\n\nclass TestLinksReference(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n pathname_ = path.dirname(path.abspath(__file__))\n cls.apib_file = pathname_+\"/base.apib\"\n cls.tmp_result_files = \"/var/tmp/test-links-in-reference-160faf1aae1dd41c8f16746ea744f138\"\n\n if os.path.exists(cls.tmp_result_files):\n shutil.rmtree(cls.tmp_result_files)\n\n os.makedirs(cls.tmp_result_files)\n\n Popen([\"fabre\", \"-i\", cls.apib_file, \"-o\", \n cls.tmp_result_files, \"--no-clear-temp-dir\"], stdout=PIPE, stderr=PIPE).communicate()\n\n\n with open('/var/tmp/fiware_api_blueprint_renderer_tmp/base.json', 'r') as f:\n cls.out_json = json.load(f)\n\n\n @classmethod\n def tearDownClass(cls):\n if os.path.exists(cls.tmp_result_files):\n shutil.rmtree(cls.tmp_result_files)\n\n to_delete = ['/var/tmp/fiware_api_blueprint_renderer_tmp/base.apib',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/base.extras',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/base.json']\n\n for filename in to_delete:\n if os.path.exists(filename):\n os.remove(filename)\n\n\n def test_normal_link_in_abstract(self):\n \n json_link = {\"url\": \"http://normal-link-in-abstract.com\",\\\n \"title\": \"normal-link-in-abstract\"}\n\n self.assertIn(json_link, self.out_json[\"reference_links\"])\n\n\n def test_direct_secure_link_in_abstract(self):\n \n json_link = {\"url\": \"https://direct-secure-link-in-abstract.com\",\\\n \"title\": \"https://direct-secure-link-in-abstract.com\"}\n\n self.assertIn(json_link, self.out_json[\"reference_links\"])\n\n\n def test_direct_link_in_abstract(self):\n \n json_link ={\"url\": \"http://direct-link-in-abstract.com\",\\\n \"title\": \"http://direct-link-in-abstract.com\"}\n\n self.assertIn(json_link, self.out_json[\"reference_links\"])\n\n\"\"\"\n def test_link_with_quotes_in_abstract(self):\n \n \n json_link = {\"url\": \"http://link-with-quotes.com?id=\\\"weird-id\\\"\", \\\n \"title\": \"http://link-with-quotes.com?id=\\\"weird-id\\\"\"}\n\n self.assertIn(json_link, self.out_json[\"reference_links\"])\n\"\"\"\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestLinksReference)\nunittest.TextTestRunner(verbosity=1).run(suite)", "id": "2494998", "language": "Python", "matching_score": 2.3232240676879883, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/tests/test_reference_links/test_links_in_reference.py" }, { "content": "import unittest\nfrom os import path\nimport os\nimport shutil\nimport sys\nimport json\nfrom subprocess import Popen, PIPE, call\nimport pprint\nfrom lxml import etree, objectify\nfrom lxml.cssselect import CSSSelector\nimport pprint\nfrom pyquery import PyQuery as pq\n\nimport_path = path.abspath(__file__)\n\nwhile path.split(import_path)[1] != 'fiware_api_blueprint_renderer':\n\n import_path = path.dirname(import_path)\n\nsys.path.append(import_path)\n\nfrom src.drafter_postprocessing.order_uri import order_uri_parameters, order_request_parameters\nfrom tests.test_utils import *\nfrom src.renderer import main\n\n\nclass TestTOCJSON(unittest.TestCase):\n __metaclass__ = TestCaseWithExamplesMetaclass\n \n @classmethod\n def setUpClass(cls):\n pathname_ = path.dirname(path.abspath(__file__))\n cls.apib_file = pathname_+\"/api_test.apib\"\n cls.tmp_result_files = \"/var/tmp/test-links-in-reference-160faf1aae1dd41c8f16746ea744f138\"\n\n cls.html_output = cls.tmp_result_files+\"api_test.html\"\n\n if os.path.exists(cls.tmp_result_files):\n shutil.rmtree(cls.tmp_result_files)\n\n os.makedirs(cls.tmp_result_files)\n\n main([\"fabre\", \"-i\", cls.apib_file, \"-o\", \n cls.tmp_result_files, \"--no-clear-temp-dir\"])\n\n parser = etree.HTMLParser()\n cls.tree = etree.parse(\"\"+cls.tmp_result_files+\"/api_test.html\", parser)\n cls.pq = pq(filename = cls.tmp_result_files+\"/api_test.html\")\n\n with open('/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.json', 'r') as f:\n cls.out_json = json.load(f)\n\n @classmethod\n def tearDownClass(cls):\n if os.path.exists(cls.tmp_result_files):\n shutil.rmtree(cls.tmp_result_files)\n\n to_delete = ['/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.apib',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.extras',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.json']\n\n for filename in to_delete:\n if os.path.exists(filename):\n os.remove(filename)\n\n\n def test_TOC_HTML(self):\n\n sel = CSSSelector('nav#toc')\n\n li_elements=[\n {\"text\":\"FIWARE-NGSI v2 Specification\",\n \"href\":\"#API-content\",\n \"subelements\":[]\n },\n {\"text\":\"API Summary\",\n \"href\":\"#api-summary\",\n \"subelements\":[]\n },\n {\"text\":\"Specification\",\n \"href\":\"#specification\",\n \"subelements\":[\n {\"text\":\"Introduction\",\n \"href\":\"#introduction\",\n \"subelements\":[]\n },\n ]\n },\n {\"text\":\"Common Payload Definition\",\n \"href\":\"#common-payload-definition\",\n \"subelements\":[]\n },\n {\"text\":\"API Specification\",\n \"href\":\"#API_specification\",\n \"subelements\":[\n {\"text\":\"Group Root\",\n \"href\":\"#resource_group_root\",\n \"subelements\":[\n {\"text\":\"GET - Action with attributes and without parent\",\n \"href\":\"#action_action-with-attributes-and-without-parent\",\n \"subelements\":[]\n }\n ]\n },\n {\"text\":\"Group Entities\",\n \"href\":\"#resource_group_entities\",\n \"subelements\":[\n {\"text\":\"Resource Resource with attributes\",\n \"href\":\"#resource_resource-with-attributes\",\n \"subelements\":[\n {\"text\":\"GET - Action without attr but with resoruce attr\",\n \"href\":\"#action_action-without-attr-but-with-resoruce-attr\",\n \"subelements\":[]\n },\n {\"text\":\"POST - action with attr link\",\n \"href\":\"#action_action-with-attr-link\",\n \"subelements\":[]\n }\n ]\n },\n {\"text\":\"Resource Resource with link\",\n \"href\":\"#resource_resource-with-link\",\n \"subelements\":[\n {\"text\":\"GET - Action without attr but with resoruce link\",\n \"href\":\"#action_action-without-attr-but-with-resoruce-link\",\n \"subelements\":[]\n },\n {\"text\":\"POST - action with attr link\",\n \"href\":\"#action_action-with-attr-link\",\n \"subelements\":[]\n },\n {\"text\":\"POST - attributes in request test\",\n \"href\":\"#action_attributes-in-request-test\",\n \"subelements\":[]\n },\n {\"text\":\"GET - atributos con recursividad\",\n \"href\":\"#action_atributos-con-recursividad\",\n \"subelements\":[]\n },\n ]\n\n }\n ]\n },\n {\"text\":\"Examples\",\n \"href\":\"#examples\",\n \"subelements\":[]\n }\n ]\n },\n {\n \"text\":\"Acknowledgements\",\n \"href\":\"#acknowledgements\",\n \"subelements\":[]\n },\n {\n \"text\":\"References\",\n \"href\":\"#references\",\n \"subelements\":[]\n }\n ]\n\n self.check_toc_with_list(self.pq(\"#toc>ul\").children(), li_elements)\n\n\n\n def check_toc_with_list(self,toc_elements,list_elements):\n for child in toc_elements:\n try:\n next_element = list_elements.pop(0)\n except IndexError as e:\n print \"TOC has too many elements\"\n assert False\n except Exception as e:\n print e\n assert False\n link = pq(child).children(\"a\")\n self.assertEqual(pq(link).attr[\"href\"], next_element[\"href\"])\n self.assertEqual(pq(link).text().strip(), next_element[\"text\"].strip())\n\n\n if(len(next_element[\"subelements\"])):\n ##recursive\n self.check_toc_with_list(pq(child).children(\"ul\").children(), next_element[\"subelements\"])\n pass\n\n ##list must be empty\n if (len(list_elements)):\n print \"Some TOC elements have benn not appeared\"\n print list_elements\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestTOCJSON)\nunittest.TextTestRunner(verbosity=2).run(suite)", "id": "6692205", "language": "Python", "matching_score": 6.9489545822143555, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/tests/test_toc/test_toc_order.py" }, { "content": "import unittest\nfrom os import path\nimport os\nimport shutil\nimport sys\nimport json\nfrom subprocess import Popen, PIPE, call\nimport pprint\nfrom lxml import etree, objectify\nfrom lxml.cssselect import CSSSelector\nimport pprint\nfrom pyquery import PyQuery as pq\n\nimport_path = path.abspath(__file__)\n\nwhile path.split(import_path)[1] != 'fiware_api_blueprint_renderer':\n\n import_path = path.dirname(import_path)\n\nsys.path.append(import_path)\n\nfrom src.drafter_postprocessing.order_uri import order_uri_parameters, order_request_parameters\nfrom tests.test_utils import *\nfrom src.renderer import main\n\n\nclass TestParametersHTML(unittest.TestCase):\n __metaclass__ = TestCaseWithExamplesMetaclass\n \n @classmethod\n def setUpClass(cls):\n pathname_ = path.dirname(path.abspath(__file__))\n cls.apib_file = pathname_+\"/api_test.apib\"\n cls.tmp_result_files = \"/var/tmp/test-links-in-reference-160faf1aae1dd41c8f16746ea744f138\"\n\n cls.html_output = cls.tmp_result_files+\"api_test.html\"\n\n if os.path.exists(cls.tmp_result_files):\n shutil.rmtree(cls.tmp_result_files)\n\n os.makedirs(cls.tmp_result_files)\n\n main([\"fabre\", \"-i\", cls.apib_file, \"-o\", \n cls.tmp_result_files, \"--no-clear-temp-dir\"])\n\n parser = etree.HTMLParser()\n cls.tree = etree.parse(\"\"+cls.tmp_result_files+\"/api_test.html\", parser)\n cls.pq = pq(filename = cls.tmp_result_files+\"/api_test.html\")\n\n with open('/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.json', 'r') as f:\n cls.out_json = json.load(f)\n\n @classmethod\n def tearDownClass(cls):\n if os.path.exists(cls.tmp_result_files):\n shutil.rmtree(cls.tmp_result_files)\n\n to_delete = ['/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.apib',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.extras',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.json']\n\n for filename in to_delete:\n if os.path.exists(filename):\n os.remove(filename)\n\n\n def test_parameters_name_order(self):\n\n\n parameters_dl = self.pq(\".parameters-title+dl\")\n \n\n check_parameter_dl_order(parameters_dl)\n \n \n def test_parameters_payload_name_order(self):\n\n\n payloads_dl = self.pq(\".action-parameters-table\")\n\n check_payload_dl_order(payloads_dl)\n \n \n\n\n\n\n\n def check_toc_with_list(self,toc_elements,list_elements):\n for child in toc_elements:\n try:\n next_element = list_elements.pop(0)\n except IndexError as e:\n print \"TOC has to much elements\"\n assert False\n except Exception as e:\n print e\n assert False\n link = pq(child).children(\"a\")\n self.assertEqual(pq(link).attr[\"href\"], next_element[\"href\"])\n self.assertEqual(pq(link).text().strip(), next_element[\"text\"].strip())\n\n\n if(len(next_element[\"subelements\"])):\n ##recursive\n self.check_toc_with_list(pq(child).children(\"ul\").children(), next_element[\"subelements\"])\n pass\n\n ##list must be empty\n if (len(list_elements)):\n print \"Some TOC elements have benn not appeared\"\n print list_elements\n\n\n\ndef check_parameter_dl_order(parameters_dl):\n for parameter_dl in parameters_dl:\n parameters_name = pq(parameter_dl).find(\".parameter-name\").text().split(\" \")\n \n if not isinstance(parameters_name, basestring):\n assert sorted(parameters_name) == parameters_name\n\n #check_parameter_dl_order(pq(parameter_dl).find(\"dl\")) #not recursive\n\n\ndef check_payload_dl_order(payloads_dl):\n for payload_dl in payloads_dl:\n payloads_name = pq(payload_dl).children(\"dt\").children(\".parameter-name\").text().split(\" \")\n if not isinstance(payloads_name, basestring):\n assert sorted(payloads_name) == payloads_name\n\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestParametersHTML)\nunittest.TextTestRunner(verbosity=2).run(suite)", "id": "9857746", "language": "Python", "matching_score": 5.601156711578369, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/tests/test_ordered_parameters/test_ordered_parameters.py" }, { "content": "import unittest\nfrom os import path\nimport os\nimport shutil\nimport sys\nimport json\nfrom subprocess import Popen, PIPE, call\nimport pprint\nfrom lxml import etree, objectify\nfrom lxml.cssselect import CSSSelector\nfrom pyquery import PyQuery as pq\n\nimport_path = path.abspath(__file__)\n\nwhile path.split(import_path)[1] != 'fiware_api_blueprint_renderer':\n\n import_path = path.dirname(import_path)\n\nsys.path.append(import_path)\n\nfrom src.drafter_postprocessing.order_uri import order_uri_parameters, order_request_parameters\nfrom tests.test_utils import *\n\n\nclass TestVersionLinks(unittest.TestCase):\n __metaclass__ = TestCaseWithExamplesMetaclass\n \n @classmethod\n def setUpClass(cls):\n pass\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n\n @for_examples(\n ('api_test1', [\n 'http://example.com/api/abcedefg',\n 'http://example.com/api/jhdfgh',\n 'http://example.com/api/latest'\n ]),\n ('api_test2', [\n 'http://example.com/api/abcedefg',\n 'http://example.com/api/latest'\n ]),\n\n )\n\n def test_github_apiary_links(self, apib_file_name, expected_urls):\n\n self.render_apib(apib_file_name)\n \n sel = CSSSelector('div#top-source-buttons')\n\n versions_dd = self.pq(\"dd.versionValue\")\n\n for version_dd in versions_dd:\n a_element = pq(version_dd).children(\"a\")\n assert pq(a_element).attr[\"href\"] in expected_urls\n expected_urls.remove(pq(a_element).attr[\"href\"])\n\n self.assertEqual(len(expected_urls), 0)\n\n\n self.del_apib_files(apib_file_name)\n\n\n\n\n def del_apib_files(self,apib_file):\n if os.path.exists(self.tmp_result_files):\n shutil.rmtree(self.tmp_result_files)\n\n to_delete = ['/var/tmp/fiware_api_blueprint_renderer_tmp/'+apib_file+'.apib',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/'+apib_file+'.extras',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/'+apib_file+'.json']\n\n for filename in to_delete:\n if os.path.exists(filename):\n os.remove(filename)\n\n\n def render_apib(self,apib_file):\n pathname_ = path.dirname(path.abspath(__file__))\n self.apib_file = pathname_+\"/\"+apib_file+\".apib\"\n self.tmp_result_files = \"/var/tmp/test-links-in-reference-160faf1aae1dd41c8f16746ea744f138\"\n\n self.html_output = self.tmp_result_files+\"/\"+apib_file+\".html\"\n\n if os.path.exists(self.tmp_result_files):\n shutil.rmtree(self.tmp_result_files)\n\n os.makedirs(self.tmp_result_files)\n\n Popen([\"fabre\", \"-i\", self.apib_file, \"-o\", \n self.tmp_result_files, \"--no-clear-temp-dir\"], stdout=PIPE, stderr=PIPE).communicate()\n\n parser = etree.HTMLParser()\n self.tree = etree.parse(\"\"+self.tmp_result_files+\"/\"+apib_file+\".html\", parser)\n self.pq = pq(filename = self.tmp_result_files+\"/\"+apib_file+\".html\")\n\n with open('/var/tmp/fiware_api_blueprint_renderer_tmp/'+apib_file+'.json', 'r') as f:\n self.out_json = json.load(f)\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestVersionLinks)\nunittest.TextTestRunner(verbosity=2).run(suite)", "id": "295988", "language": "Python", "matching_score": 7.50969123840332, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/tests/test_version_links/test_version_links.py" }, { "content": "import unittest\nfrom os import path\nimport os\nimport shutil\nimport sys\nimport json\nfrom subprocess import Popen, PIPE, call\nimport pprint\nfrom lxml import etree, objectify\nfrom lxml.cssselect import CSSSelector\nfrom pyquery import PyQuery as pq\n\n\nimport_path = path.abspath(__file__)\n\nwhile path.split(import_path)[1] != 'fiware_api_blueprint_renderer':\n\n import_path = path.dirname(import_path)\n\nsys.path.append(import_path)\n\nfrom src.drafter_postprocessing.order_uri import order_uri_parameters, order_request_parameters\nfrom tests.test_utils import *\nfrom src.renderer import main\n\n\n\nclass TestLinksGithubApiary(unittest.TestCase):\n __metaclass__ = TestCaseWithExamplesMetaclass\n \n @classmethod\n def setUpClass(cls):\n pass\n\n @classmethod\n def tearDownClass(cls):\n pass\n \"\"\"\n @for_examples(\n ('api_test1', ['http://docs.test5950.apiary.io/#reference','http://github.com/telefonicaid/fiware-orion.git']),\n ('api_test2', ['http://docs.test5950.apiary.io/#reference']),\n ('api_test3', ['http://github.com/telefonicaid/fiware-orion.git']),\n ('api_test4', []),\n )\n \"\"\"\n @for_examples(\n ('api_test1', ['http://docs.test5950.apiary.io/#reference','http://github.com/telefonicaid/fiware-orion.git']),\n ('api_test2', ['exception','GITHUB_SOURCE' ]),\n ('api_test3', ['exception','APIARY_PROJECT']),\n ('api_test4', ['exception','APIARY_PROJECT']),\n )\n\n def test_github_apiary_links(self, apib_file_name, expected_urls):\n\n\n if 'exception' == expected_urls[0]:\n\n try:\n self.render_apib(apib_file_name)\n except Exception, e:\n err_msg = 'Metadata ' + expected_urls[1] + ' not provided'\n self.assertEqual(err_msg, e.message)\n return\n \n self.render_apib(apib_file_name)\n sel = CSSSelector('div#top-source-buttons')\n\n\n links=self.pq('div#top-source-buttons').children()\n\n for link in links:\n try:\n url = expected_urls.pop(0)\n except IndexError as e:\n print \"APIB has to much links in #top-source-buttons div\"\n assert False\n except Exception as e:\n print e\n assert False\n self.assertEqual(pq(link).attr[\"href\"], url)\n\n\n self.assertEqual(len(expected_urls), 0)\n\n\n self.del_apib_files(apib_file_name)\n\n\n\n\n def del_apib_files(self,apib_file):\n if os.path.exists(self.tmp_result_files):\n shutil.rmtree(self.tmp_result_files)\n\n to_delete = ['/var/tmp/fiware_api_blueprint_renderer_tmp/'+apib_file+'.apib',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/'+apib_file+'.extras',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/'+apib_file+'.json']\n\n for filename in to_delete:\n if os.path.exists(filename):\n os.remove(filename)\n\n\n def render_apib(self,apib_file):\n pathname_ = path.dirname(path.abspath(__file__))\n self.apib_file = pathname_+\"/\"+apib_file+\".apib\"\n self.tmp_result_files = \"/var/tmp/test-links-in-reference-160faf1aae1dd41c8f16746ea744f138\"\n\n self.html_output = self.tmp_result_files+\"/\"+apib_file+\".html\"\n\n if os.path.exists(self.tmp_result_files):\n shutil.rmtree(self.tmp_result_files)\n\n os.makedirs(self.tmp_result_files)\n\n main([\"fabre\", \"-i\", self.apib_file, \"-o\", \n self.tmp_result_files, \"--no-clear-temp-dir\"])\n \n\n parser = etree.HTMLParser()\n self.tree = etree.parse(\"\"+self.tmp_result_files+\"/\"+apib_file+\".html\", parser)\n self.pq = pq(filename = self.tmp_result_files+\"/\"+apib_file+\".html\")\n\n with open('/var/tmp/fiware_api_blueprint_renderer_tmp/'+apib_file+'.json', 'r') as f:\n self.out_json = json.load(f)\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestLinksGithubApiary)\nunittest.TextTestRunner(verbosity=2).run(suite)", "id": "9051716", "language": "Python", "matching_score": 4.1933674812316895, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/tests/test_links_github_apiary/test_links_github_apiary.py" }, { "content": "# -*- coding: utf-8 -*-\nimport unittest\nfrom os import path\nimport os\nimport sys\nimport shutil\nimport json\nfrom subprocess import Popen, PIPE, call\nimport pprint\nimport codecs\n\nimport_path = path.abspath(__file__)\n\nwhile path.split(import_path)[1] != 'fiware_api_blueprint_renderer':\n\n import_path = path.dirname(import_path)\n\nsys.path.append(import_path)\n\n\nfrom tests.test_utils import *\n\nspecial_section_test = None\n\ndata_test_path=os.path.dirname(path.abspath(__file__))+\"/special_sections.json\"\n\nwith open(data_test_path, 'r') as f: \n special_section_test = json.load(f)\n\n\nclass TestSpecialSectionsInJSON(unittest.TestCase):\n __metaclass__ = TestCaseWithExamplesMetaclass\n\n @classmethod\n def setUpClass(cls):\n pathname_ = path.dirname(path.abspath(__file__))\n cls.apib_file = pathname_+\"/api_test.apib\"\n cls.tmp_result_files = \"/var/tmp/test-links-in-reference-160faf1aae1dd41c8f16746ea744f138\"\n\n if os.path.exists(cls.tmp_result_files):\n shutil.rmtree(cls.tmp_result_files)\n\n os.makedirs(cls.tmp_result_files)\n\n Popen([\"fabre\", \"-i\", cls.apib_file, \"-o\", \n cls.tmp_result_files, \"--no-clear-temp-dir\"], stdout=PIPE, stderr=PIPE).communicate()\n\n with codecs.open('/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.json', 'r', encoding='UTF-8') as f:\n doc = f.read()\n doc.decode(encoding=\"UTF-8\")\n cls.out_json = json.loads(doc, encoding='UTF-8')\n\n\n @classmethod\n def tearDownClass(cls):\n if os.path.exists(cls.tmp_result_files):\n shutil.rmtree(cls.tmp_result_files)\n\n to_delete = ['/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.apib',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.extras',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.json']\n\n for filename in to_delete:\n if os.path.exists(filename):\n os.remove(filename)\n\n\n\n def test_special_sections_in_json(self):\n \n _json_special_section = self.out_json[\"api_metadata\"][\"subsections\"][0][\"subsections\"]\n _special_section = json.dumps(special_section_test)\n _special_section = json.loads(_special_section)\n self.assertEqual(len(_json_special_section), len(_special_section))\n for section in _json_special_section:\n expected_value = _special_section[section[\"id\"]].encode('latin-1')\n obtained_value =section[\"body\"].encode('latin-1')\n self.assertEqual(expected_value, obtained_value) \n\n\n\n\n\n\n\n\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestSpecialSectionsInJSON)\nunittest.TextTestRunner(verbosity=2).run(suite)", "id": "2413428", "language": "Python", "matching_score": 5.982567310333252, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/tests/test_special_sections/test_special_sections_in_json.py" }, { "content": "# -*- coding: utf-8 -*-\nimport unittest\nfrom os import path\nimport os\nimport sys\nimport shutil\nimport json\nfrom subprocess import Popen, PIPE, call\nimport pprint\nimport codecs\nimport copy\n\nimport_path = path.abspath(__file__)\n\nwhile path.split(import_path)[1] != 'fiware_api_blueprint_renderer':\n\n import_path = path.dirname(import_path)\n\nsys.path.append(import_path)\n\n\nfrom tests.test_utils import *\n\n\n\nclass TestAdditionalSectionsInJSON(unittest.TestCase):\n __metaclass__ = TestCaseWithExamplesMetaclass\n\n @classmethod\n def setUpClass(cls):\n pathname_ = path.dirname(path.abspath(__file__))\n cls.apib_file = pathname_+\"/api_test.apib\"\n cls.tmp_result_files = \"/var/tmp/test-links-in-reference-160faf1aae1dd41c8f16746ea744f138\"\n\n if os.path.exists(cls.tmp_result_files):\n shutil.rmtree(cls.tmp_result_files)\n\n os.makedirs(cls.tmp_result_files)\n\n Popen([\"fabre\", \"-i\", cls.apib_file, \"-o\", \n cls.tmp_result_files, \"--no-clear-temp-dir\"], stdout=PIPE, stderr=PIPE).communicate()\n\n with codecs.open('/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.json', 'r', encoding='UTF-8') as f:\n doc = f.read()\n doc.decode(encoding=\"UTF-8\")\n cls.out_json = json.loads(doc, encoding='UTF-8')\n\n\n data_test_path=os.path.dirname(path.abspath(__file__))+\"/additional_sections.json\"\n\n with open(data_test_path, 'r') as f: \n cls.additional_section_test = json.load(f)\n\n @classmethod\n def tearDownClass(cls):\n if os.path.exists(cls.tmp_result_files):\n shutil.rmtree(cls.tmp_result_files)\n\n to_delete = ['/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.apib',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.extras',\n '/var/tmp/fiware_api_blueprint_renderer_tmp/api_test.json']\n\n for filename in to_delete:\n if os.path.exists(filename):\n os.remove(filename)\n\n\n\n def test_additional_sections_in_json(self):\n \n _json_sections = self.out_json[\"api_metadata\"][\"subsections\"]\n\n _json_test = copy.deepcopy(self.additional_section_test)\n \n self.check_and_remove_section_from_json(_json_sections, _json_test)\n\n self.assertEqual(0,len( _json_test))\n\n\n def check_and_remove_section_from_json(self, subsections,expected_values):\n for subsection in subsections:\n self.assertEqual(expected_values[subsection[\"id\"]],\n subsection[\"body\"])\n if len(subsection[\"subsections\"]):\n self.check_and_remove_section_from_json(subsection[\"subsections\"], expected_values)\n del expected_values[subsection[\"id\"]]\n\n\n\n def generate_test_example_base(self):\n _json={}\n subsections = self.out_json[\"api_metadata\"][\"subsections\"]\n\n add_subsections_to_json(subsections,_json)\n\n data_test_path=os.path.dirname(path.abspath(__file__))+\"/additional_sections.json\"\n \n with open(data_test_path, \"w\") as outfile:\n json.dump(_json, outfile, indent=4)\n \n\n\ndef add_subsections_to_json(subsections,json_var):\n\n for subsection in subsections:\n \n json_var[subsection[\"id\"]]=subsection[\"body\"]\n if len(subsection[\"subsections\"]):\n add_subsections_to_json(subsection[\"subsections\"], json_var)\n\n\n\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestAdditionalSectionsInJSON)\nunittest.TextTestRunner(verbosity=2).run(suite)", "id": "4144028", "language": "Python", "matching_score": 0.8519558906555176, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/tests/test_aditional_sections/test_aditional_sections.py" }, { "content": "from ..apib_extra_parse_utils import parse_to_markdown\n\ndef parse_meta_data(filepath):\n \"\"\"Parses API metadata and returns the result in a JSON object\n \n Arguments: \n filepath -- File with extra sections\n \"\"\"\n metadata = create_json_section(\"root\", \"\")\n \n with open(filepath, 'rU') as file_:\n last_position_read = parse_metadata_subsections(filepath, metadata)\n\n file_.seek(last_position_read)\n line = file_.readline()\n while(line):\n last_position_read = parse_metadata_subsections(filepath, metadata, last_position_read)\n file_.seek(last_position_read)\n line = file_.readline()\n\n return metadata\n\n\ndef create_json_section(section_markdown_title, section_body):\n \"\"\"Creates a JSON\n \n Arguments:\n section_markdown_title -- Markdown title of the section\n section_body -- body of the subsection\n \"\"\"\n section_title = section_markdown_title.lstrip('#').strip()\n\n section = {}\n section[\"id\"] = get_markdown_title_id( section_title )\n section[\"name\"] = section_title\n section[\"body\"] = parse_to_markdown(section_body)\n\n section[\"subsections\"] = []\n\n return section\n\n\ndef parse_metadata_subsections(filepath, parent_section_JSON, last_pos=0):\n \"\"\"Generates a JSON tree of nested metadata sections\n\n Arguments:\n filepath -- Name and path of the file to iterate over\n parent_section_JSON -- JSON object representing the current parent section\n last_pos -- Last byte position read in the file \n \"\"\"\n \n previous_pos = last_pos\n with open(filepath, 'rU') as file_descriptor:\n\n file_descriptor.seek(previous_pos)\n line = file_descriptor.readline()\n pos = file_descriptor.tell()\n\n # EOF case\n if line:\n if line.startswith('#'):\n\n section_name = line\n (body, previous_pos) = get_subsection_body(filepath, pos)\n\n section_JSON = create_json_section(section_name, body)\n parent_section_JSON['subsections'].append(section_JSON)\n\n section_level = get_heading_level(section_name)\n\n file_descriptor.seek(previous_pos)\n line = file_descriptor.readline()\n pos = file_descriptor.tell() \n next_section_level = get_heading_level(line)\n\n if section_level == next_section_level: # Section sibling\n previous_pos = parse_metadata_subsections(filepath, parent_section_JSON, last_pos=previous_pos) \n elif section_level < next_section_level: # Section child\n previous_pos = parse_metadata_subsections(filepath, section_JSON, last_pos=previous_pos) \n else: # Not related to current section\n return previous_pos\n\n file_descriptor.seek(previous_pos)\n next_line = file_descriptor.readline()\n pos = file_descriptor.tell()\n\n if next_line :\n next_section_level = get_heading_level(next_line)\n if section_level == next_section_level: # Section sibling\n previous_pos = parse_metadata_subsections(filepath, parent_section_JSON, last_pos=previous_pos)\n else: # Not related to current section\n pass \n\n return previous_pos\n\n\ndef get_subsection_body(filepath, last_position):\n \"\"\"Reads the given file until a Markdown header is found and returns the bytes read\n\n Arguments:\n filepath -- Path of the file to iterate over\n position -- Descriptor of the file being read\"\"\"\n\n with open(filepath, 'rU') as file_descriptor:\n body = ''\n previous_pos = last_position\n file_descriptor.seek(previous_pos)\n line = file_descriptor.readline()\n pos = file_descriptor.tell()\n\n while line and not line.startswith('#'):\n body += line\n\n previous_pos = pos\n line = file_descriptor.readline()\n pos = file_descriptor.tell()\n\n return (body, previous_pos)\n\n\ndef get_markdown_title_id(section_title):\n \"\"\"Returns the HTML equivalent id from a section title\n \n Arguments: \n section_title -- Section title\n \"\"\"\n return section_title.replace(\" \", \"_\").lower()\n\n\ndef get_heading_level(heading):\n \"\"\"Returns the level of a given Markdown heading\n \n Arguments:\n heading -- Markdown title \n \"\"\"\n i = 0\n while( i < len(heading) and heading[i] == '#' ):\n i += 1\n\n return i", "id": "4139280", "language": "Python", "matching_score": 2.6517515182495117, "max_stars_count": 1, "path": "fiware_api_blueprint_renderer/src/drafter_postprocessing/metadata.py" }, { "content": "#!/usr/bin/env python\n\nfrom collections import OrderedDict\nimport inspect\nimport json\nimport os\nimport re\nimport shutil\nimport io\nfrom subprocess import call, Popen, PIPE\nimport sys, getopt\nimport pkg_resources\nimport subprocess\n\nfrom jinja2 import Environment, FileSystemLoader\n\nfrom drafter_postprocessing.json_processing import postprocess_drafter_json\nfrom apib_extra_parse_utils import preprocess_apib_parameters_lines, start_apib_section, get_indentation\n\n\nclass MandatoryFieldException(Exception):\n '''raised when a mandatory field or section is missing'''\n pass\n\ndef print_api_spec_title_to_extra_file(input_file_path, extra_sections_file_path):\n \"\"\"Extracts the title of the API specification and writes it to the extra sections file.\n\n Arguments:\n input_file_path -- File with the API specification\n extra_sections_file_path -- File where we will write the extra sections\n \"\"\"\n with open(input_file_path, 'rU') as input_file_path, open(extra_sections_file_path, 'w') as extra_sections_file:\n line = input_file_path.readline()\n while (line != \"\" and not line.startswith(\"# \")):\n line = input_file_path.readline()\n \n extra_sections_file.write( line )\n\n\ndef separate_extra_sections_and_api_blueprint(input_file_path, extra_sections_file_path, API_blueprint_file_path):\n \"\"\"Divides a Fiware API specification into extra sections and its API blueprint.\n\n Arguments:\n input_file_path -- A Fiware API specification file.\n extra_sections_file_path -- Resulting file containing extra information about the API specification.\n API_blueprint_file_path -- Resulting file containing the API blueprint of the Fiware API.\n \"\"\"\n print_api_spec_title_to_extra_file(input_file_path, extra_sections_file_path) \n\n with open(input_file_path, 'rU') as input_file, open(extra_sections_file_path, 'a') as extra_sections_file, open(API_blueprint_file_path, 'w') as API_blueprint_file:\n \n line_counter = 0\n title_line_end = -1\n apib_line_start = -1\n\n metadata_section = True\n apib_part = False\n title_section = False\n parameters_section = False\n data_structures_section = 0\n\n for line in input_file:\n line_counter += 1\n\n copy = False\n\n if metadata_section and len(line.split(':')) == 1:\n metadata_section = False\n title_section = True\n \n if metadata_section:\n copy = False\n else:\n if title_section and line.startswith('##'):\n title_section = False\n\n if title_section:\n copy = False\n\n else:\n if not apib_part:\n apib_part = start_apib_section(line)\n if title_line_end < 0:\n title_line_end = line_counter\n \n if not apib_part:\n copy = True\n else:\n copy = False\n if apib_line_start < 0:\n if line.strip() == \"# REST API\":\n line = '\\n'\n apib_line_start = line_counter\n\n if copy:\n extra_sections_file.write(line)\n else:\n line = line.replace('\\t',' ')\n (line, parameters_section, data_structures_section) = preprocess_apib_parameters_lines(line, parameters_section, \n data_structures_section)\n API_blueprint_file.write(line)\n\n return (title_line_end, apib_line_start)\n\n\ndef convert_message_error_lines(drafter_output, title_line_end, apib_line_start):\n \"\"\"Convert the error lines to match the extended FIWARE APIB file format\n\n Arguments:\n drafter_output -- Text with drafter postprocessing output\n title_line_end -- Line where the specification title ends\n apib_line_start -- Line where the specification of the API starts\n \"\"\"\n\n line_error_regex = re.compile( \"line (\\d+),\" )\n\n line_error_matches = line_error_regex.findall(drafter_output)\n if line_error_matches:\n line_error_set = set(line_error_matches)\n for line_error in line_error_set:\n if line_error >= apib_line_start:\n line_error_substitute = int(line_error) - title_line_end + apib_line_start\n drafter_output = drafter_output.replace(\"line {},\".format(line_error), \"line {},\".format(line_error_substitute))\n\n return drafter_output\n\n\n\ndef parse_api_blueprint_with_drafter(API_blueprint_file_path, API_blueprint_JSON_file_path, title_line_end, apib_line_start):\n \"\"\"Parse the API Blueprint file with the API specification and save the output to a JSON file\n\n Arguments:\n API_blueprint_file_path -- An API Blueprint definition file \n API_blueprint_JSON_file_path -- Path to JSON file\n title_line_end -- Line where the specification title ends. Needed to reconvert error messages from drafter.\n apib_line_start -- Line where the specification of the API starts. Needed to reconvert error messages from drafter.\n \"\"\"\n\n command_call = [\"drafter\", API_blueprint_file_path, \"--output\", API_blueprint_JSON_file_path, \"--format\", \"json\", \"--use-line-num\"]\n [_, execution_error_output] = Popen(command_call, stderr=PIPE).communicate()\n\n print convert_message_error_lines(execution_error_output, title_line_end, apib_line_start)\n\n\ndef generate_metadata_dictionary(metadata_section):\n \"\"\"Generates a metadata section as a dictionary from a non-dictionary section\n \n Arguments:\n metadata_section -- Source metadata section\n \"\"\"\n metadata_section_dict = {}\n metadata_section_dict['id'] = metadata_section['id']\n metadata_section_dict['name'] = metadata_section['name']\n metadata_section_dict['body'] = metadata_section['body']\n metadata_section_dict['subsections'] = OrderedDict()\n\n for subsection in metadata_section['subsections']:\n metadata_section_dict['subsections'][subsection['name']] = generate_metadata_dictionary(subsection)\n\n return metadata_section_dict\n\n\ndef copy_static_files(template_dir_path, dst_dir_path):\n \"\"\"Copies the static files used by the resulting rendered site\n \n Arguments:\n template_dir_path -- path to the template directory\n dst_dir_path -- destination directory\n \"\"\"\n subdirectories = ['/css', '/js', '/img']\n\n for subdirectory in subdirectories:\n if os.path.exists(dst_dir_path + subdirectory):\n shutil.rmtree(dst_dir_path + subdirectory)\n shutil.copytree(template_dir_path + subdirectory, dst_dir_path + subdirectory, ignore=shutil.ignore_patterns('*.pyc', '*.py'))\n\n\ndef check_all_metadata_exists(context_file_path):\n with open(context_file_path, \"rU\") as contextFile:\n metadata = json.load(contextFile)\n\n metadata_list = {}\n for metadata_value in metadata['metadata']:\n metadata_list[metadata_value['name']] = metadata_value['value']\n \n forced_metadata = ['HOST','TITLE','DATE','VERSION','APIARY_PROJECT','SPEC_URL','GITHUB_SOURCE']\n\n for forced_metadatum in forced_metadata:\n if not metadata_list.has_key(forced_metadatum):\n err_msg = 'Metadata ' + forced_metadatum + ' not provided'\n raise MandatoryFieldException(err_msg)\n\n\ndef check_required_sections(context_file_path):\n with open(context_file_path, \"rU\") as contextFile:\n metadata = json.load(contextFile)\n\n required_sections=['Copyright', 'License']\n existing_sections =[]\n for subsection in metadata['api_metadata']['subsections'][0]['subsections']:\n existing_sections.append(subsection['name'])\n \n \n for required_section in required_sections:\n if required_section not in existing_sections:\n err_msg = 'Section ' + required_section + ' not provided' \n raise MandatoryFieldException(err_msg) \n\n\n\ndef render_api_blueprint(template_file_path, context_file_path, dst_dir_path):\n \"\"\"Renders an API Blueprint context file with a Jinja2 template.\n \n Arguments: \n template_file_path -- The Jinja2 template path \n context_file_path -- Path to the context file \n dst_dir_path -- Path to save the compiled site\n \"\"\"\n\n env = Environment(extensions=[\"jinja2.ext.do\",], loader=FileSystemLoader(os.path.dirname(template_file_path)))\n env.filters['sort_payload_parameters'] = sort_payload_parameters\n env.filters['contains_common_payload_definitions'] = contains_common_payload_definitions\n template = env.get_template(os.path.basename(template_file_path))\n output = \"\"\n with open(context_file_path, \"rU\") as contextFile:\n output = template.render(json.load(contextFile))\n\n rendered_HTML_filename = os.path.splitext(os.path.basename(context_file_path))[0]\n rendered_HTML_path = os.path.join(dst_dir_path, rendered_HTML_filename + \".html\")\n with open(rendered_HTML_path, 'w') as output_file:\n output_file.write(output.encode('utf-8'))\n copy_static_files(os.path.dirname(template_file_path), dst_dir_path)\n\n\ndef create_directory_if_not_exists(dir_path):\n \"\"\"Creates a directory with the given path if it doesn't exists yet\"\"\"\n\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n\ndef clear_directory(dir_path):\n \"\"\"Removes all the files on a directory given its path\"\"\"\n \n for file in os.listdir(dir_path):\n file_path = os.path.join(dir_path, file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception, e:\n print e\n\n\ndef compare_payload_parameter(paramA, paramB):\n \"\"\"Returns a boolean indicating whether paramA < paramB (alphabetically)\n\n Arguments:\n paramA - first operand of the comparison\n paramB - second operand of the comparison\"\"\"\n if( paramA['class'] == \"property\" and \n paramB['class'] == \"property\" \n ):\n if( paramA['content']['name']['literal'] < paramB['content']['name']['literal'] ):\n return -1\n else:\n return 1\n else:\n return 0\n\n\ndef sort_payload_parameters(parameters_list):\n \"\"\"Jinja2 custom filter for ordering a list of parameters\n\n Arguments:\n parameters_list - list of payload parameters given by Drafter\"\"\"\n return sorted(parameters_list, cmp=compare_payload_parameter)\n\n\ndef contains_common_payload_definitions(data_structures):\n \"\"\"Jinja2 custom filter for checking if a data structures list contains common payload definitions\n\n Arguments:\n data_structures - list of data structures\"\"\"\n for data_structure in data_structures.itervalues():\n if data_structure['is_common_payload']:\n return True\n return False\n\n\ndef render_api_specification(API_specification_path, template_path, dst_dir_path, clear_temporal_dir=True, cover=None):\n \"\"\"Renders an API specification using a template and saves it to destination directory.\n \n Arguments: \n API_specification_path -- Path to API Blueprint specification\n template_path -- The Jinja2 template path\n dst_dir_path -- Path to save the compiled site\n clear_temporal_dir -- Flag to clear temporary files generated by the script \n \"\"\"\n\n temp_dir_path = \"/var/tmp/fiware_api_blueprint_renderer_tmp\"\n\n API_specification_file_name = os.path.splitext(os.path.basename(API_specification_path))[0]\n\n\n API_extra_sections_file_path = os.path.join(temp_dir_path, API_specification_file_name + '.extras')\n API_blueprint_file_path = os.path.join(temp_dir_path + '/' + API_specification_file_name + '.apib')\n API_blueprint_JSON_file_path = os.path.join(temp_dir_path + '/' + API_specification_file_name + '.json')\n \n create_directory_if_not_exists(temp_dir_path)\n (title_line_end, apib_line_start) = separate_extra_sections_and_api_blueprint(API_specification_path, \n API_extra_sections_file_path, \n API_blueprint_file_path)\n\n parse_api_blueprint_with_drafter(API_blueprint_file_path, API_blueprint_JSON_file_path, title_line_end, apib_line_start)\n \n is_PDF = cover is not None\n postprocess_drafter_json(API_blueprint_JSON_file_path,API_blueprint_file_path,API_extra_sections_file_path, is_PDF)\n \n\n \n check_all_metadata_exists(API_blueprint_JSON_file_path)\n check_required_sections(API_blueprint_JSON_file_path)\n\n \n render_api_blueprint(template_path, API_blueprint_JSON_file_path, dst_dir_path)\n\n if is_PDF: #cover needed for pdf\n cover_json_path = os.path.join( dst_dir_path + '/' + 'cover' + '.json' )\n shutil.move(API_blueprint_JSON_file_path, cover_json_path)\n render_api_blueprint( cover, cover_json_path, dst_dir_path )\n shutil.move(cover_json_path, API_blueprint_JSON_file_path)\n return\n\n if clear_temporal_dir == True:\n clear_directory( temp_dir_path )\n\n\ndef print_package_dependencies():\n \"\"\"Print the dependencies of package Fabre\"\"\"\n \n print \"\\n# PIP dependencies\\n\"\n dependencies_matrix = [[\"Package\", \"Required version\", \"Installed version\"]]\n\n version_regex = re.compile(\"Version: (.*)\")\n for package in pkg_resources.get_distribution(\"fiware_api_blueprint_renderer\").requires():\n package_header = str(package).split('>=')\n package_name = package_header[0]\n package_required_version = \">= \" + package_header[1]\n package_installed_info = subprocess.check_output(['pip', 'show', package_name])\n package_installed_version = version_regex.search(package_installed_info).group(1) \n\n dependencies_matrix.append([package_name, package_required_version, package_installed_version])\n\n pretty_print_matrix(dependencies_matrix)\n\n print \"\\n\\n# System dependencies\\n\"\n system_dependencies = [('drafter', 'v0.1.9'), ('wkhtmltopdf', '0.12.2.1 (with patched qt)')]\n for (package_name, package_required_version) in system_dependencies:\n package_installed_version = subprocess.check_output([package_name, '--version'])\n\n print \"Name: \"\n print \"\\t%s\\n\" % package_name\n print \"Required version: \"\n print \"\\t%s\\n\" % package_required_version\n print \"Installed version (%s --version): \" % package_name\n for version_line in package_installed_version.split('\\n'):\n print \"\\t%s\" % version_line\n print \"\\n\"\n\n\ndef pretty_print_matrix(matrix):\n \"\"\"Pretty print the given matrix (as a table)\"\"\"\n\n # Retrieve the size of the matrix longest element\n longest_matrix_string_size = 0\n for row in matrix:\n longest_row_string_size = len(max(row, key=len))\n if longest_row_string_size > longest_matrix_string_size:\n longest_matrix_string_size = longest_row_string_size\n\n # Print the matrix as a table\n row_format = \"{:<%i}\" % (longest_matrix_string_size + 2)\n row_format = row_format * len(matrix[0])\n for row in matrix:\n print \"\\t\" + row_format.format(*row)\n\n\ndef main(argv=None): \n \n exit = False\n if argv == None:\n exit = True\n argv = sys.argv\n\n usage = \"Usage: \\n\\t\" + argv[0] + \" -i <api-spec-path> -o <dst-dir> [--pdf] [--no-clear-temp-dir] [--template]\"\n #version = \"fabre \" + pkg_resources.require(\"fiware_api_blueprint_renderer\")[0].version\n \n default_theme = os.path.dirname(__file__)+\"/../themes/default_theme/api-specification.tpl\"\n pdf_template_path= os.path.dirname(__file__)+\"/../themes/default_theme/api-specification.tpl\"\n cover_template_path= os.path.dirname(__file__)+\"/../themes/default_theme/cover.tpl\"\n pdf_toc_xsl = os.path.dirname(__file__)+\"/../themes/default_theme/xsl/toc.xsl\"\n template_path= default_theme\n clear_temporal_dir = True\n API_specification_path = None\n dst_dir_path = None\n temp_pdf_path = \"/var/tmp/fiware_api_blueprint_renderer_tmp_pdf/\"\n pdf = False\n\n try:\n opts, args = getopt.getopt(argv[1:],\"hvi:o:ct:\",[\"version\",\"ifile=\",\"odir=\",\"no-clear-temp-dir\",\"template=\",\"pdf\",\"version-dependencies\"])\n except getopt.GetoptError:\n print usage\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print usage\n sys.exit()\n elif opt in (\"-v\", \"--version\"):\n #print version\n print \"fabre \" + pkg_resources.require(\"fiware_api_blueprint_renderer\")[0].version\n sys.exit()\n elif opt == '--version-dependencies':\n #print version\n print \"fabre \" + pkg_resources.require(\"fiware_api_blueprint_renderer\")[0].version\n print_package_dependencies()\n sys.exit()\n elif opt in (\"-i\", \"--input\"):\n API_specification_path = arg\n elif opt in (\"-o\", \"--output\"):\n dst_dir_path = arg\n elif opt in (\"-t\", \"--template\"):\n template_path = arg\n elif opt in (\"-c\", \"--no-clear-temp-dir\"):\n clear_temporal_dir = False\n elif opt in (\"--pdf\"):\n pdf = True\n #if no template is specified, uses the default pdf template\n if not ('-t' in zip(*opts)[0] or '--template' in zip(*opts)[0]):\n template_path = pdf_template_path\n\n\n if API_specification_path is None:\n print \"API specification file must be specified\"\n print usage\n sys.exit(3)\n\n if dst_dir_path is None:\n print \"Destination directory must be specified\"\n print usage\n sys.exit(4)\n\n if pdf:\n create_directory_if_not_exists(temp_pdf_path)\n rendered_HTML_filename = os.path.splitext(os.path.basename(API_specification_path))[0]\n rendered_HTML_path = os.path.join(temp_pdf_path, rendered_HTML_filename + \".html\")\n rendered_HTML_cover = os.path.join(temp_pdf_path, \"cover\" + \".html\")\n\n if \".pdf\" not in dst_dir_path:\n create_directory_if_not_exists(dst_dir_path)\n dst_dir_path = os.path.join(dst_dir_path, rendered_HTML_filename + \".pdf\")\n try:\n render_api_specification(API_specification_path, template_path, temp_pdf_path, clear_temporal_dir, cover_template_path)\n except MandatoryFieldException, e:\n print e.message\n if exit:\n sys.exit(-1)\n else:\n raise e\n\n call( [\"wkhtmltopdf\", '-d', '125', '--page-size','A4', \"cover\", \"file://\"+rendered_HTML_cover, \n \"toc\", \"--xsl-style-sheet\", pdf_toc_xsl, \n \"page\", \"file://\"+rendered_HTML_path, \n '--footer-center', \"Page [page]\",'--footer-font-size', '8', '--footer-spacing', '3',\n '--run-script', \"setInterval(function(){if(document.readyState=='complete') window.status='done';},100)\", \n \"--window-status\", \"done\", dst_dir_path ])\n\n else:\n create_directory_if_not_exists( dst_dir_path )\n try:\n render_api_specification( API_specification_path, template_path, dst_dir_path, clear_temporal_dir, None)\n except MandatoryFieldException, e:\n print e.message\n if exit:\n sys.exit(-1)\n else:\n raise e\n \n if exit:\n sys.exit(0)\n else:\n return\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "10011405", "language": "Python", "matching_score": 1.6552292108535767, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/src/renderer.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# on_rtd is whether we are on readthedocs.org\nimport os\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# otherwise, readthedocs.org uses their theme by default, so no need to specify it\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'FIWARE-SDC'\n", "id": "3370846", "language": "Python", "matching_score": 0.1082673892378807, "max_stars_count": 0, "path": "conf.py" }, { "content": "#!/usr/bin/env python\n\n\nimport json\nimport re\nimport sys\nimport shutil\nfrom pprint import pprint\n\nimport markdown\nimport mdx_linkify\n\n\ndef parse_to_markdown(markdown_text):\n \"\"\"Parse Markdown text to HTML\n\n Arguments:\n markdown_text -- String to be parsed into HTML format\n \"\"\"\n\n extensions_list = ['linkify','markdown.extensions.tables','markdown.extensions.fenced_code']\n\n try:\n parsed_text = markdown.markdown(markdown_text.decode('utf-8'), extensions=extensions_list)\n\n except (UnicodeEncodeError, UnicodeDecodeError) as encoding_error:\n parsed_text = markdown.markdown(markdown_text, extensions=extensions_list)\n\n parsed_text = escape_code_sections(parsed_text)\n\n return parsed_text\n\n\ndef escape_code_sections(html_text):\n \"\"\"Parse HTML code sections to escape them\n\n Arguments:\n html_text -- String to find code sections\n \"\"\"\n\n code_regex = re.compile(r\"<code>(.*?)</code>\")\n\n code_matches = code_regex.findall(html_text)\n if code_matches:\n for code_match in code_matches:\n html_text = html_text.replace( code_match[0], code_match[0].replace(\"<\", \"&lt;\"))\n\n return html_text\n\n\n\ndef get_indentation(line):\n \"\"\"Returns the indentation (number of spaces and tabs at the begining) of a given line\"\"\"\n i = 0\n while (i < len(line) and (line[i] == ' ' or line[i] == '\\t')):\n i += 1\n return i\n\n\ndef get_parameter_value_list(file_descriptor, param_regex):\n\n\tmember_regex = re.compile(\"^[ \\t]*[+|-][ ]([^ +-]*)[ ]*-?(.*)$\")\n\n\tline = file_descriptor.readline()\n\t\n\tvalue_list = []\n\twhile (line and line.strip(' \\n') and not param_regex.match(line)):\n\n\t\tmember_match = member_regex.match(line)\n\t\tif member_match:\n\t\t\tvalue_list.append({\"name\":member_match.group(1), \"description\":member_match.group(2)})\n\t\t\n\t\t\n\t\tline = file_descriptor.readline()\n\n\t\twhile (line.strip(' \\n\\t') \n\t\t\tand not line.startswith('+')\n\t\t\tand not line.startswith('-')\n\t\t\tand not line.startswith('#')\n\t\t\tand not member_regex.match(line)\n\t\t\tand not param_regex.match(line)\n\t\t):\n\t\t\tvalue_list[-1][\"description\"] += line\n\t\t\tline = file_descriptor.readline()\n\n\n\treturn (line, value_list)\n\n\ndef get_parameters_with_values(file_descriptor, param_keyword_regex, param_regex, members_keyword_regex):\n\n\tmember_regex = re.compile(\"^[ \\t]*[+|-][ ]([^ +-]*)[ ]*-?(.*)$\")\n\n\tline = file_descriptor.readline()\n\t\n\tparameters_with_values = []\n\twhile (line):\n\n\t\tparam_match = param_regex.match(line)\n\n\t\tif param_match:\n\t\t\tparameter = param_match.group(1)\n\t\t\tline = file_descriptor.readline()\n\n\t\t\tif members_keyword_regex.match(line):\n\t\t\t\t(line, value_list) = get_parameter_value_list(file_descriptor, param_regex)\n\n\t\t\t\tif value_list:\n\t\t\t\t\tparameters_with_values.append({\"name\": parameter, \"values\": value_list})\n\n\t\telse:\n\t\t\tif line.startswith('+') or line.startswith('-') or line.startswith('#'):\n\t\t\t\tbreak\n\n\t\t\tline = file_descriptor.readline()\n\n\treturn (line, parameters_with_values)\n\n\ndef get_header_nested_parameter_values_description(file_descriptor, header_regex, param_keyword_regex, param_regex, members_keyword_regex):\n\n\tline = file_descriptor.readline()\n\t\t\n\tparameter_values = []\n\twhile (line and not header_regex.match(line)):\n\n\t\tparam_keyword_match = param_keyword_regex.match(line)\n\n\t\tif param_keyword_match:\n\t\t\t(line, parameter_values) = get_parameters_with_values(file_descriptor, param_keyword_regex, param_regex, members_keyword_regex)\n\t\t\n\t\t\tif parameter_values:\n\t\t\t\tbreak\n\t\telse:\n\t\t\tline = file_descriptor.readline()\n\n\treturn (line, parameter_values)\n\n\ndef get_nested_parameter_values_description(filename):\n\n\theader_regex = re.compile(\"^(#+)[ ]*(.*)$\")\n\tparam_keyword_regex = re.compile(\"^[+|-][ ]Parameters[ ]*$\")\n\tparam_regex = re.compile(\"^[ \\t]*[+|-][ ]([^ \\(\\)]*)[ ][^\\(\\)]*\\(.*\\).*$\")\n\tmembers_keyword_regex = re.compile(\"^([^+-]*)[+|-][ ]Members[ ]*$\")\n\n\tnested_description_list = []\n\twith open(filename, 'r') as read_file:\n\t\t\n\t\tline = read_file.readline()\n\t\twhile line:\n\n\t\t\theader_match = header_regex.match(line)\n\n\t\t\tif header_match:\n\t\t\t\tcurrent_parent = line.strip()\n\n\t\t\t\t(line, nested_description) = get_header_nested_parameter_values_description(read_file, header_regex, param_keyword_regex, param_regex, members_keyword_regex)\n\t\t\t\n\t\t\t\tif nested_description:\n\t\t\t\t\tnested_description_list.append({ \"parent\": current_parent, \"parameters\": nested_description })\n\t\t\telse: \n\t\t\t\tline = read_file.readline()\n\t\t\t\t\n\treturn nested_description_list\t\n\n\ndef parse_property_member_declaration(property_member_declaration_string):\n \"\"\" Utility to parse the declaration of a property member into custom JSON. Based on the MSON specification. \"\"\"\n \n\n # Store MSON reserved words for the parsing below.\n # We are interested in the type attribute reserved keywords in order to know whether \n # a property member is required or optional.\n reserved_keywords = {}\n reserved_keywords['type_attribute'] = ['required', 'optional', 'fixed', 'sample', 'default']\n\n if property_member_declaration_string == '': return {}\n\n # Parse the line in order to get the following fields:\n # - property_name: The name given to the property\n # - values: Possbile values the property can have\n # - type_definition_list: The list with the technical definition of the property. Since this\n # list is unordered, we will parse it later to find the needed keywords.\n # - description: The text provided to describe the context of the property.\n regex_string = \"^[ ]*[-|+][ ](?P<property_name>\\w+)[ ]*(?:[[: ](?P<values>[\\w, ]*)]?[ ]*\\((?P<type_definition_list>[\\w\\W ]+)\\))?[ ]*(?:[-](?P<property_description>[ \\w\\W]+))?\\Z\"\n declaration_regex = re.compile(regex_string)\n\n declaration_match = declaration_regex.match(property_member_declaration_string)\n declaration_dict = declaration_match.groupdict()\n \n property_declaration={}\n property_declaration['name'] = declaration_dict['property_name']\n property_declaration['description'] = declaration_dict['property_description']\n property_declaration['subproperties'] = []\n try:\n property_declaration['values'] = [e.strip(\" \") for e in declaration_dict['values'].split(',')]\n except AttributeError as error:\n property_declaration['values'] = []\n\n if property_declaration['values'] == ['']:\n property_declaration['values'] = []\n\n # Construct the type_definition field from the type_definition_list field retrieved in the\n # regular expression.\n property_declaration['required']=False # Default value for the required attribute\n for type_specification_attribute in declaration_dict['type_definition_list'].split(','):\n # If the current element is not in the type_attributes reserved keywords list, it is\n # the property type specification.\n if type_specification_attribute.strip() not in reserved_keywords['type_attribute']:\n property_declaration['type'] = type_specification_attribute.strip()\n else:\n if type_specification_attribute.strip() == 'required': property_declaration['required']=True\n\n return property_declaration\n\n\ndef escape_parenthesis_in_parameter_description(parameter_definition):\n \"\"\"Given an APIB parameter definition, escape the parenthesis in its description\n \n Arguments:\n line - string containing the parameter definition.\n \"\"\"\n\n\n parameter_definition_list = parameter_definition.split(' - ', 1)\n if len(parameter_definition_list) > 1:\n parameter_header = parameter_definition_list[0]\n parameter_body = parameter_definition_list[1]\n parameter_body = parse_to_markdown(parameter_body)+ '\\n'\n parameter_body = parameter_body.replace('<p>', \"\")\n parameter_body = parameter_body.replace('</p>', \"\")\n parameter_body = parameter_body.replace('(', \"&#40;\")\n parameter_body = parameter_body.replace(')', \"&#41;\")\n\n return parameter_header + ' - ' + parameter_body\n else:\n return parameter_definition\n\n\ndef preprocess_apib_parameters_lines(line, defining_parameters, defining_data_structure):\n \"\"\"Preprocess a given APIB line if it contains a parameter definition\n\n Arguments:\n line - line to be preprocessed\n defining_parameters - bool indicating whether we are in a parameters section (APIB) or not\n defining_data_structure - int indicating the level we are respecting to data strucutres section\n \"\"\"\n regex_parameter = r\"^[ \\t]*[+|-][ ]([^ +-]*)[ ]*-?(.*)$\"\n\n if defining_data_structure == 0:\n match_result = re.match(r\"^(#*)[ ]Data Structures[ ]*$\", line)\n if match_result:\n defining_data_structure = len(match_result.group(0))\n return (line, defining_parameters, defining_data_structure)\n\n if defining_data_structure > 0:\n if re.match(regex_parameter, line) or re.match(r\"^ *$\", line):\n line = escape_parenthesis_in_parameter_description(line)\n else:\n match_result = re.match(r\"^(#*)[ ].*$\", line)\n if match_result:\n if len(match_result.group(0)) <= defining_data_structure:\n # Stop searching for data structures\n defining_data_structure = -1\n\n return (line, defining_parameters, defining_data_structure)\n\n if not defining_parameters:\n if line == '+ Parameters\\n':\n defining_parameters = True\n else:\n if re.match(regex_parameter, line) or re.match(r\"^ *$\", line):\n line = escape_parenthesis_in_parameter_description(line)\n else:\n if (re.match(r\"^[ \\t]*[+|-][ ](Attributes)(.*)$\", line) or\n re.match(r\"^[ \\t]*[+|-][ ](Request)(.*)$\", line) or\n re.match(r\"^[ \\t]*[+|-][ ](Response)(.*)$\", line) or\n re.match(r\"^[ \\t]*#(.*)$\", line)\n ):\n defining_parameters = False\n\n return (line, defining_parameters, defining_data_structure)\n\n\ndef start_apib_section(line):\n \"\"\"Tells if the line indicates the beginning of the apib section.\n\n Arguments:\n line -- Last read line from the FIWARE extended APIB file.\n \"\"\"\n result = False\n\n group_regex = re.compile(\"^#+[ ]Group ([ \\w\\W\\-\\_]*)$\")\n resource_regex = re.compile(\"^#+[ ]([ \\w\\W\\-\\_]*) \\[([ \\w\\W\\-\\_]*)\\]$\")\n direct_URI_regex = re.compile(\"^#+[ ]([ ]*[/][ \\w\\W\\-\\_]*)$\")\n\n\n if (line.strip() == \"# REST API\" \n or line.strip() == \"## Data Structures\"\n or group_regex.match(line)\n or resource_regex.match(line)\n or direct_URI_regex.match(line)\n ):\n\n result = True\n \n return result\n", "id": "10705168", "language": "Python", "matching_score": 3.9496817588806152, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/src/apib_extra_parse_utils.py" }, { "content": "from collections import deque\n\nfrom ..apib_extra_parse_utils import parse_property_member_declaration\nfrom ..apib_extra_parse_utils import get_indentation\n\ndef parser_json_data_structures(json_content):\n \"\"\"Retrieves data structures definition from JSON file and writes them in an easier to access format\"\"\"\n \n if len(json_content['content']) > 0:\n json_content['data_structures'] = parse_defined_data_structures(json_content['content'][0])\n else:\n json_content['data_structures'] = {}\n\n\n # Add resource level defined data structures\n structures_from_resources = get_data_structures_from_resources(json_content)\n json_content['data_structures'].update(structures_from_resources)\n\n\ndef get_data_structures_from_resources(json_content):\n \"\"\"Retrieve data structures defined in named resources.\n\n Arguments:\n json_content -- JSON object where resources will be analysed\n \"\"\"\n\n data_structures = {}\n\n for resource_group in json_content[\"resourceGroups\"]:\n for resource in resource_group[\"resources\"]:\n\n if resource[\"name\"] == \"\": continue\n\n for content in resource[\"content\"]:\n if content[\"element\"] == \"dataStructure\":\n # Retrieve it if it is not a link to another data structure\n if content[\"typeDefinition\"][\"typeSpecification\"][\"name\"] == 'object': \n attributes = get_data_structure_properties_from_json(content[\"sections\"])\n data_structures[resource[\"name\"]] = {\"attributes\": attributes, \"is_common_payload\": False}\n\n\n return data_structures\n\n\ndef get_data_structure_properties_from_json(data_structure_content):\n \"\"\"Extract simpler representation of properties from drafter JSON representation.\n\n Arguments:\n data_structure_content -- JSON content section of \"dataStructures\" element or nested property\n \"\"\"\n attributes = []\n\n for membertype in data_structure_content:\n if \"content\" not in membertype: return attributes\n \n for property_ in membertype[\"content\"]:\n attribute = {}\n\n attribute['name'] = property_['content']['name']['literal']\n attribute['required'] = 'required' in property_['content']['valueDefinition']['typeDefinition']['attributes']\n attribute['type'] = \\\n property_['content']['valueDefinition']['typeDefinition']['typeSpecification']['name']\n attribute['description'] = property_['content']['description']\n try:\n values_string = property_['content']['valueDefinition']['values'][0]['literal']\n attribute['values'] = [e.strip(\" \") for e in values_string.split(',')]\n except IndexError as error:\n attribute['values'] = []\n attribute['subproperties'] = get_data_structure_properties_from_json(property_['content'][\"sections\"])\n\n attributes.append(attribute)\n\n return attributes\n\n\ndef parse_defined_data_structures(data):\n \"\"\"Retrieves data structures definition from JSON fragment and gives them back as Python dict\"\"\"\n data_structure_dict = {}\n\n try:\n if data[\"content\"][0][\"sections\"][0][\"class\"] != u'blockDescription':\n raise ValueError('Unexpected section received.')\n except:\n return data_structure_dict\n\n\n for content in data[\"content\"]:\n data_structure = {}\n data_structure_definition = []\n\n if content[\"sections\"]!=[]:\n data_structure_content = content[\"sections\"][0][\"content\"]\n parse_defined_data_structure_properties(data_structure_definition, deque(data_structure_content.split('\\n')))\n\n data_structure_name = content[\"name\"][\"literal\"]\n data_structure[\"attributes\"] = data_structure_definition\n data_structure[\"is_common_payload\"] = True\n data_structure_dict[data_structure_name] = data_structure\n\n return data_structure_dict\n\n\ndef parse_defined_data_structure_properties(properties_list, remaining_property_lines):\n \"\"\"Parses the properties definitions of a given data structure given its body\n\n Arguments:\n properties_list - List where we'll insert new properties to\n remaining_property_lines - Property definition lines pending to be processed\n \"\"\"\n last_member_indentation = -1\n\n while len(remaining_property_lines) > 0:\n property_member_declaration = remaining_property_lines[0]\n if property_member_declaration != '':\n # Retrieve the indentation of the current property definition.\n current_member_indentation = get_indentation(property_member_declaration)\n if last_member_indentation == -1:\n last_member_indentation = current_member_indentation\n \n # Process the new property as a child, parent or uncle of the last\n # one processed according to their relative line indentations.\n if current_member_indentation == last_member_indentation:\n parsed_attribute_definition = parse_property_member_declaration(property_member_declaration)\n remaining_property_lines.popleft()\n properties_list.append(parsed_attribute_definition)\n elif current_member_indentation > last_member_indentation:\n parse_defined_data_structure_properties(parsed_attribute_definition['subproperties'], remaining_property_lines)\n else:\n return\n else:\n remaining_property_lines.popleft()", "id": "1554528", "language": "Python", "matching_score": 3.1122007369995117, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/src/drafter_postprocessing/data_structures.py" }, { "content": "#!/usr/bin/env python\n\nimport json\nimport sys\nfrom os import path\nimport re\n\nfrom markdown.extensions.toc import slugify\n\nif __package__ is None:\n sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )\n from apib_extra_parse_utils import get_nested_parameter_values_description\n from apib_extra_parse_utils import parse_to_markdown\nelse:\n from ..apib_extra_parse_utils import get_nested_parameter_values_description\n from ..apib_extra_parse_utils import parse_to_markdown\n\nfrom data_structures import parser_json_data_structures\nfrom instantiate_body import instantiate_all_example_body\nfrom instantiate_uri import instantiate_request_uri_templates\nfrom metadata import parse_meta_data\nfrom order_uri import order_uri_template_of_json\n\n\ndef extract_markdown_header_dict(markdown_header):\n \"\"\"Returns a dict with the elements of a given Markdown header (for resources or actions)\"\"\"\n markdown_header = markdown_header.lstrip('#').strip()\n \n p = re.compile(\"(.*) \\[(\\w*) (.*)\\]\")\n \n header_dict = {}\n if p.match( markdown_header ):\n header_groups = p.match(markdown_header).groups()\n\n header_dict['name'] = header_groups[0]\n header_dict['method'] = header_groups[1]\n header_dict['uriTemplate'] = header_groups[2]\n else:\n p = re.compile(\"(.*) \\[(.*)\\]\")\n header_groups = p.match( markdown_header ).groups()\n\n header_dict['name'] = header_groups[0]\n header_dict['uriTemplate'] = header_groups[1]\n \n return header_dict\n\n\ndef add_nested_parameter_description_to_json(API_blueprint_file_path, json_content):\n \"\"\"Extracts all nested description for`parameter values and adds them to the JSON.\n\n Arguments:\n API_specification_path -- path to the specification file where all the links will be extracted from.\n json_content -- JSON object where all the links will be added.\n \"\"\"\n nested_descriptions_list = get_nested_parameter_values_description(API_blueprint_file_path)\n\n for nested_description in nested_descriptions_list:\n for parameter in nested_description[\"parameters\"]:\n for value in parameter[\"values\"]:\n\n add_description_to_json_parameter_value(json_content, \n nested_description[\"parent\"], \n parameter[\"name\"],\n value[\"name\"],\n value[\"description\"])\n\n\ndef add_description_to_json_parameter_value(json_content, resource_or_action_markdown_header, parameter_name, value_name, value_description):\n \"\"\"\"\"\"\n wanted_object = extract_markdown_header_dict( resource_or_action_markdown_header)\n\n found_object = None\n\n if 'method' in wanted_object:\n for resource_group in json_content['resourceGroups']:\n for resource in resource_group['resources']:\n for action in resource['actions']:\n if( action['name'] == wanted_object['name'] and action['method'] == wanted_object['method'] and action['attributes']['uriTemplate'] == wanted_object['uriTemplate'] ):\n found_object = action\n break\n else:\n for resource_group in json_content['resourceGroups']:\n for resource in resource_group['resources']:\n if resource['name'] == wanted_object['name'] and resource['uriTemplate'] == wanted_object['uriTemplate']:\n found_object = resource\n break\n\n if found_object != None:\n add_description_to_json_object_parameter_value(found_object, parameter_name, value_name, value_description)\n\n\ndef add_description_to_json_object_parameter_value(JSON_object, parameter_name, value_name, value_description):\n \"\"\"\"\"\"\n value_object = None\n\n for object_parameter in JSON_object['parameters']:\n if object_parameter['name'] == parameter_name:\n for parameter_value in object_parameter['values']:\n if parameter_value['value'] == value_name:\n value_object = parameter_value\n \n if value_object != None:\n value_object['description'] = value_description\n\n\ndef get_links_from_description(description):\n \"\"\"Find via regex all the links in a description string\"\"\"\n\n link_regex = re.compile( r\"\\[(?P<linkText>[^\\(\\)\\[\\]]*)\\]\\((?P<linkRef>[^\\(\\)\\[\\]]*)\\)\" )\n auto_link_regex = re.compile(r\"\\<(?P<linkRef>http[s]?://[^\\\"]*)\\>\")\n html_link_regex = re.compile(r\"\\<a href=\\\"(?P<linkRef>http[s]?://[^\\\"]*)\\\"\\>(?P<linkText>[^\\<]*)\\</a>\")\n\n links = []\n\n link_matches = link_regex.findall(description)\n if link_matches:\n for link_match in link_matches:\n link = {}\n link['title'] = link_match[0]\n link['url'] = link_match[1]\n\n links.append(link)\n else:\n link_matches = auto_link_regex.findall(description)\n if link_matches:\n for link_match in link_matches:\n link = {}\n link['title'] = link_match\n link['url'] = link_match\n\n links.append(link)\n else:\n link_matches = html_link_regex.findall(description)\n if link_matches:\n for link_match in link_matches:\n link = {}\n link['title'] = link_match[1]\n link['url'] = link_match[0]\n\n links.append(link)\n\n return links\n\n\ndef get_links_api_metadata(section):\n \"\"\"Recursively get links from the api_metadata json section.\"\"\"\n\n\n links = []\n links += get_links_from_description(section[\"body\"])\n\n for subsection in section[\"subsections\"]:\n links += get_links_api_metadata(subsection)\n\n return links\n\n\n\ndef parse_json_description(JSON_element, links):\n \"\"\"Search for a 'decription' key in the current object and parse ti as markdown\n\n Arguments:\n JSON_element -- JSON element to iterate and parse\n links - List of links gathered from the descriptions\n \"\"\"\n\n if type(JSON_element) is dict:\n for key in JSON_element:\n if key == \"description\":\n JSON_element[key] = parse_to_markdown(JSON_element[key])\n\n for link in get_links_from_description(JSON_element[key]):\n if link not in links:\n links.append(link)\n \n else:\n JSON_element[key] = parse_json_description(JSON_element[key], links)\n\n elif type(JSON_element) is list:\n for key in range(len(JSON_element)):\n JSON_element[key] = parse_json_description(JSON_element[key], links)\n\n return JSON_element\n\n\ndef add_metadata_to_json(metadata, json_content):\n \"\"\"Adds metadata values to a json object\n \n Arguments: \n metadata -- Metadata values in JSON format\n json_content -- JSON object\n \"\"\"\n json_content['api_metadata'] = {}\n for metadataKey in metadata:\n json_content['api_metadata'][metadataKey] = metadata[metadataKey]\n\n\ndef parse_json_descriptions_and_get_links(json_content):\n \"\"\"Gets the descriptions of resources and actions and parses them as markdown. Saves the result in the same JSON file.\n \n Arguments: \n json_content -- JSON object containing the parsed apib.\n \"\"\"\n links = []\n\n for metadatum in json_content['metadata']:\n if 'APIARY_PROJECT' == metadatum['name']:\n link = {\"title\": \"Apiary project\", \n \"url\": \"http://docs.{}.apiary.io/#reference\".format(metadatum['value'])}\n links.append(link)\n\n if 'GITHUB_SOURCE' == metadatum['name']:\n link = {\"title\": \"Github source\", \"url\":metadatum['value']}\n links.append(link)\n\n # Abstract\n for link in get_links_from_description(json_content[\"description\"]):\n if link not in links: links.append(link)\n\n # API Metadata\n for link in get_links_api_metadata(json_content[\"api_metadata\"]):\n if link not in links: links.append(link)\n\n json_content = parse_json_description(json_content, links)\n\n return links\n\n\n\n\ndef find_and_mark_empty_resources(json_content):\n \"\"\"Makes a resource able to be ignored by emprtying its title. \n\n When a resource has only one action and they share names, the APIB declared an action witohut parent resource.\n \"\"\"\n for resource_group in json_content[\"resourceGroups\"]:\n for resource in resource_group[\"resources\"]:\n if len(resource[\"actions\"]) == 1:\n if resource[\"actions\"][0][\"name\"] == resource[\"name\"]:\n resource[\"ignoreTOC\"] = True\n else:\n resource[\"ignoreTOC\"] = False\n\n\ndef render_description(json_content):\n \"\"\"Escaping ampersand symbol form URIs.\n\n Arguments:\n JSON_file_path -- path to the JSON file where the ampersand will be be escaped in URIs.\n \"\"\"\n json_content[\"description\"] = parse_to_markdown(json_content[\"description\"])\n\n\ndef escape_requests_responses_json(json_content):\n \"\"\"Identifies when the body of a request or response uses an XML like type and escapes the '<' for browser rendering.\n\n Arguments:\n json_content -- JSON content where requests and responses with XML like body will be escaped.\n \"\"\"\n for resource_group in json_content[\"resourceGroups\"]:\n for resource in resource_group[\"resources\"]:\n for action in resource[\"actions\"]:\n for example in action[\"examples\"]:\n\n for request in example[\"requests\"]:\n if request[\"body\"]:\n request[\"body\"] = request[\"body\"].replace(\"<\", \"&lt;\")\n if not \"sections\" in request[\"content\"][0]:\n request[\"content\"][0][\"content\"] = request[\"content\"][0][\"content\"].replace(\"<\", \"&lt;\")\n\n for response in example[\"responses\"]:\n if response[\"body\"]:\n response[\"body\"] = response[\"body\"].replace(\"<\", \"&lt;\")\n if not \"sections\" in response[\"content\"][0]:\n response[\"content\"][0][\"content\"] = response[\"content\"][0][\"content\"].replace(\"<\", \"&lt;\")\n\n\ndef escape_ampersand_uri_templates(json_content):\n \"\"\"Renders the description of the API spscification to display it properly.\n\n Arguments:\n json_content - json object containing the content to be replaced.\n \"\"\"\n if(isinstance(json_content, dict)):\n for key, value in json_content.iteritems():\n if isinstance(value, dict) or isinstance(value, list):\n escape_ampersand_uri_templates(value)\n elif key == 'uriTemplate':\n json_content[key] = json_content[key].replace('&', '&amp;')\n elif(isinstance(json_content,list)):\n for value in json_content:\n if isinstance(value, dict) or isinstance(value, list):\n escape_ampersand_uri_templates(value)\n\n\ndef generate_resources_and_action_ids(json_content):\n \"\"\"Generate an ID for every resource and action in the given JSON file\n\n Arguments:\n json_content - JSON object containing the API parsed definition\"\"\"\n for resource_group in json_content[\"resourceGroups\"]:\n for resource in resource_group[\"resources\"]:\n if len( resource[\"name\"] ) > 0:\n resource[\"id\"] = 'resource_' + slugify( resource[\"name\"], '-' )\n else:\n resource[\"id\"] = 'resource_' + slugify( resource[\"uriTemplate\"], '-' )\n\n for action in resource[\"actions\"]:\n if len( action[\"name\"] ) > 0:\n action[\"id\"] = 'action_' + slugify( action[\"name\"],'-' )\n else:\n if len( action[\"attributes\"][\"uriTemplate\"] ) > 0:\n action[\"id\"] = 'action_' + slugify( action[\"attributes\"][\"uriTemplate\"], '-' )\n else:\n if resource[\"ignoreTOC\"] == True:\n action[\"id\"] = 'action_' + slugify( resource[\"uriTemplate\"] + action[\"method\"], '-' )\n else:\n action[\"id\"] = 'action_' + slugify( resource[\"name\"] + action[\"method\"], '-' )\n\n\ndef remove_redundant_spaces(json_content):\n \"\"\"Remove redundant spaces from names of resources and actions\n\n Arguments:\n json_content - a JSON object containing the API parsed definition\"\"\"\n\n for resource_group in json_content[\"resourceGroups\"]:\n resource_group[\"name\"] = re.sub( \" +\", \" \", resource_group[\"name\"] )\n for resource in resource_group[\"resources\"]:\n resource[\"name\"] = re.sub( \" +\", \" \", resource[\"name\"] )\n for action in resource[\"actions\"]:\n action[\"name\"] = re.sub( \" +\", \" \", action[\"name\"] )\n\n\n\ndef postprocess_drafter_json(JSON_file_path, API_blueprint_file_path, API_extra_sections_file_path, is_PDF):\n \"\"\"Apply a set of modifications to a JSON file containing an API specification\"\"\" \n with open(JSON_file_path, 'rU') as json_file:\n json_content = json.load(json_file)\n \n add_metadata_to_json(parse_meta_data(API_extra_sections_file_path), json_content)\n add_nested_parameter_description_to_json(API_blueprint_file_path, json_content)\n links = parse_json_descriptions_and_get_links(json_content)\n json_content['reference_links'] = links\n instantiate_request_uri_templates(json_content)\n order_uri_template_of_json(json_content)####--##\n parser_json_data_structures(json_content)\n find_and_mark_empty_resources(json_content)\n render_description(json_content)\n escape_requests_responses_json(json_content)\n escape_ampersand_uri_templates(json_content)\n generate_resources_and_action_ids(json_content)\n remove_redundant_spaces(json_content)\n instantiate_all_example_body(json_content)##\n\n json_content['is_PDF'] = is_PDF\n\n with open(JSON_file_path, 'w') as json_file:\n json.dump(json_content, json_file, indent=4)\n", "id": "6561098", "language": "Python", "matching_score": 2.8801047801971436, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/src/drafter_postprocessing/json_processing.py" }, { "content": "# -*- coding: utf-8 -*-\n\nfrom pprint import pprint\nimport re\n\nimport markdown\nimport mdx_linkify\n\n\ndef parse_to_markdown(markdown_text):\n \"\"\"Parse Markdown text to HTML\n\n Arguments:\n markdown_text -- String to be parsed into HTML format\n \"\"\"\n\n extensions_list = ['linkify','markdown.extensions.tables','markdown.extensions.fenced_code']\n\n try:\n parsed_text = markdown.markdown(markdown_text.decode('utf-8'), extensions=extensions_list)\n\n except (UnicodeEncodeError, UnicodeDecodeError) as encoding_error:\n parsed_text = markdown.markdown(markdown_text, extensions=extensions_list)\n\n return parsed_text\n\n\ndef get_links_from_description(description):\n \"\"\"Find via regex all the links in a description string\"\"\"\n\n \n html_link_regex = re.compile(r\"\\<a href=\\\"(?P<linkRef>.*)\\\"\\>(?P<linkText>[^\\<]*)\\</a>\")\n\n links = []\n\n \n link_matches = html_link_regex.findall(description)\n if link_matches:\n for link_match in link_matches:\n link = {}\n link['title'] = link_match[1]\n link['url'] = link_match[0]\n\n links.append(link)\n\n return links\n\n\ntext = \"\"\" <http://autolink-in-abstract.com>.\n\nETC ETC ETC ETC ETC ETC ETC ETC ETC ETC http://link-with-quotes.com?id='&quotweird-id&quot' ETC ETC ETC\"\"\"\n\nprint parse_to_markdown(text)\n\n\npprint(get_links_from_description(parse_to_markdown(text)))\n", "id": "9072217", "language": "Python", "matching_score": 1.6050838232040405, "max_stars_count": 0, "path": "fiware_api_blueprint_renderer/tests/test_reference_links/notest_complex_link.py" } ]
2.726348
HoomCC
[ { "content": "# Copyright 2017 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Support for product types used by Apple bundling rules.\n\nThis file should be loaded by the top-level Apple platform .bzl files\n(ios.bzl, watchos.bzl, and so forth) and should export *only* the\n`apple_product_type` struct so that BUILD files can import it through there\nand access the constants in their own targets.\n\"\"\"\n\nload(\n \"@build_bazel_rules_apple//common:attrs.bzl\",\n \"attrs\",\n)\n\napple_product_type = struct(\n application = \"com.apple.product-type.application\",\n app_extension = \"com.apple.product-type.app-extension\",\n bundle = \"com.apple.product-type.bundle\",\n framework = \"com.apple.product-type.framework\",\n kernel_extension = \"com.apple.product-type.kernel-extension\",\n messages_application = \"com.apple.product-type.application.messages\",\n messages_extension = \"com.apple.product-type.app-extension.messages\",\n messages_sticker_pack_extension = (\n \"com.apple.product-type.app-extension.messages-sticker-pack\"\n ),\n spotlight_importer = \"com.apple.product-type.spotlight-importer\",\n static_framework = \"com.apple.product-type.framework.static\",\n tool = \"com.apple.product-type.tool\",\n ui_test_bundle = \"com.apple.product-type.bundle.ui-testing\",\n unit_test_bundle = \"com.apple.product-type.bundle.unit-test\",\n watch2_application = \"com.apple.product-type.application.watchapp2\",\n watch2_extension = \"com.apple.product-type.watchkit2-extension\",\n xpc_service = \"com.apple.product-type.xpc-service\",\n)\n\"\"\"\nProduct type identifiers used to describe various bundle types.\n\nThe \"product type\" is a concept used internally by Xcode (the strings themselves\nare visible inside the `.pbxproj` file) that describes properties of the bundle,\nsuch as its default extension.\n\nAdditionally, products like iMessage applications and sticker packs in iOS 10\nrequire a stub executable instead of a user-defined binary and additional values\ninjected into their `Info.plist` files. These behaviors are also captured in the\nproduct type identifier. The product types currently supported are:\n\n* `application`: A basic iOS, macOS, or tvOS application. This is the default\n product type for those targets; it can be overridden with a more specific\n product type if needed.\n* `app_extension`: A basic iOS, macOS, or tvOS application extension. This is\n the default product type for those targets; it can be overridden with a more\n specific product type if needed.\n* `bundle`: A loadable macOS bundle. This is the default product type for\n `macos_bundle` targets; it can be overridden with a more specific product type\n if needed.\n* `framework`: A basic dynamic framework. This is the default product type for\n those targets; it does not need to be set explicitly (and cannot be changed).\n* `kernel_extension`: A macOS kernel extension. This product type should be used\n with a `macos_bundle` target to create such a plug-in; the built bundle will\n have the extension `.kext`.\n* `messages_application`: An application that integrates with the Messages\n app (iOS 10 and above). This application must include an `ios_extension`\n with the `messages_extension` or `messages_sticker_pack_extension` product\n type (or both extensions). This product type does not contain a user-provided\n binary.\n* `messages_extension`: An extension that integrates custom code/behavior into\n a Messages application. This product type should contain a user-provided\n binary.\n* `messages_sticker_pack_extension`: An extension that defines custom sticker\n packs for the Messages app. This product type does not contain a\n user-provided binary.\n* `spotlight_importer`: A macOS Spotlight importer plug-in. This product type\n should be used with a `macos_bundle` target to create such a plug-in; the\n built bundle will have the extension `.mdimporter`.\n* `static_framework`: An iOS static framework, which is a `.framework` bundle\n that contains resources and headers but a static library instead of a dynamic\n library.\n* `tool`: A command-line tool. This is the default product type for\n `macos_command_line_application`; it does not need to be set explicitly (and\n cannot be changed).\n* `ui_test_bundle`: A UI testing bundle (.xctest). This is the default product\n type for those targets; it does not need to be set explicitly (and cannot be\n changed).\n* `unit_test_bundle`: A unit test bundle (.xctest). This is the default product\n type for those targets; it does not need to be set explicitly (and cannot be\n changed).\n* `watch2_application`: A watchOS 2+ application. This is the default product\n type for those targets; it does not need to be set explicitly (and cannot be\n changed).\n* `watch2_extension`: A watchOS 2+ application extension. This is the default\n product type for those targets; it does not need to be set explicitly (and\n cannot be changed).\n* `xpc_service`: A macOS XPC service. This product type should be used with a\n `macos_application` target to create such a service; the built bundle will\n have the extension `.xpc`.\n\"\"\"\n\ndef _describe_stub(\n xcenv_based_path,\n path_in_archive,\n additional_bundle_path = None):\n \"\"\"Returns a struct suitable for the `stub` field of a product type struct.\n\n Args:\n xcenv_based_path: The Xcode-environment-based path from which the stub\n binary should be copied (rooted at either `$(SDKROOT)` or\n `$(PLATFORM_DIR)`).\n path_in_archive: The path relative to the root of a top-level application\n archive where the stub should be copied as a support file.\n additional_bundle_path: A path relative to the bundle where the stub binary\n should be copied, *in addition to* the standard location of the\n executable.\n\n Returns:\n A struct suitable for the `stub` field of a product type struct.\n \"\"\"\n return struct(\n xcenv_based_path = xcenv_based_path,\n path_in_archive = path_in_archive,\n additional_bundle_path = additional_bundle_path,\n )\n\ndef _describe_product_type(\n bundle_extension,\n additional_infoplist_values = {},\n stub = None):\n \"\"\"Returns a new product type descriptor.\n\n Args:\n bundle_extension: The default extension for bundles with this product type,\n which will be used if not overridden on the target. The extension\n includes the leading dot.\n additional_infoplist_values: Any additional keys and values that should be\n added to the `Info.plist` for bundles with this product type.\n stub: A descriptor returned by `_stub_descriptor` that contains information\n about the stub binary for the bundle, if any.\n\n Returns:\n A new product type descriptor.\n \"\"\"\n return struct(\n bundle_extension = bundle_extension,\n additional_infoplist_values = additional_infoplist_values,\n stub = stub,\n )\n\n# Descriptors for the various product types.\n_PRODUCT_TYPE_DESCRIPTORS = {\n apple_product_type.application: _describe_product_type(\n bundle_extension = \".app\",\n ),\n apple_product_type.app_extension: _describe_product_type(\n bundle_extension = \".appex\",\n ),\n apple_product_type.bundle: _describe_product_type(\n bundle_extension = \".bundle\",\n ),\n apple_product_type.framework: _describe_product_type(\n bundle_extension = \".framework\",\n ),\n apple_product_type.kernel_extension: _describe_product_type(\n bundle_extension = \".kext\",\n ),\n apple_product_type.messages_application: _describe_product_type(\n bundle_extension = \".app\",\n additional_infoplist_values = {\"LSApplicationLaunchProhibited\": True},\n stub = _describe_stub(\n xcenv_based_path = (\"$(PLATFORM_DIR)/Library/Application Support/\" +\n \"MessagesApplicationStub/\" +\n \"MessagesApplicationStub\"),\n path_in_archive = (\"MessagesApplicationSupport/\" +\n \"MessagesApplicationSupportStub\"),\n ),\n ),\n apple_product_type.messages_extension: _describe_product_type(\n bundle_extension = \".appex\",\n ),\n apple_product_type.messages_sticker_pack_extension: _describe_product_type(\n bundle_extension = \".appex\",\n additional_infoplist_values = {\"LSApplicationIsStickerProvider\": \"YES\"},\n stub = _describe_stub(\n xcenv_based_path = (\"$(PLATFORM_DIR)/Library/Application Support/\" +\n \"MessagesApplicationExtensionStub/\" +\n \"MessagesApplicationExtensionStub\"),\n path_in_archive = (\"MessagesApplicationExtensionSupport/\" +\n \"MessagesApplicationExtensionSupportStub\"),\n ),\n ),\n apple_product_type.spotlight_importer: _describe_product_type(\n bundle_extension = \".mdimporter\",\n ),\n apple_product_type.static_framework: _describe_product_type(\n bundle_extension = \".framework\",\n ),\n apple_product_type.tool: _describe_product_type(\n bundle_extension = \"\",\n ),\n apple_product_type.ui_test_bundle: _describe_product_type(\n bundle_extension = \".xctest\",\n ),\n apple_product_type.unit_test_bundle: _describe_product_type(\n bundle_extension = \".xctest\",\n ),\n apple_product_type.watch2_application: _describe_product_type(\n bundle_extension = \".app\",\n stub = _describe_stub(\n xcenv_based_path = (\"__BAZEL_XCODE_SDKROOT__/Library/\" +\n \"Application Support/WatchKit/WK\"),\n path_in_archive = \"WatchKitSupport2/WK\",\n additional_bundle_path = \"_WatchKitStub/WK\",\n ),\n ),\n apple_product_type.watch2_extension: _describe_product_type(\n bundle_extension = \".appex\",\n ),\n apple_product_type.xpc_service: _describe_product_type(\n bundle_extension = \".xpc\",\n ),\n}\n\ndef _contains_stub_binary(ctx):\n \"\"\"Returns whether the current product type contains a stub binary.\n\n Args:\n ctx: The Skylark context.\n\n Returns:\n True if the current target contains a stub binary, False otherwise.\n \"\"\"\n product_type = _product_type(ctx)\n product_type_descriptor = _product_type_descriptor(product_type)\n if product_type_descriptor and product_type_descriptor.stub:\n return True\n return False\n\ndef _product_type(ctx):\n \"\"\"Returns the product type identifier for the current target.\n\n Args:\n ctx: The Skylark context.\n\n Returns:\n The product type identifier for the current target, or None if there is\n none.\n \"\"\"\n return attrs.get(ctx.attr, \"product_type\", default = attrs.private_fallback)\n\ndef _product_type_descriptor(product_type):\n \"\"\"Returns the descriptor for the given product type.\n\n The returned descriptor has the following fields:\n\n * `bundle_extension`: The default extension for bundles with this product\n type, including the leading dot.\n * `additional_infoplist_values`: A dictionary of keys and values that should\n be added to the `Info.plist` of a bundle with this product type.\n * `stub`: A descriptor for the stub binary required by this product type, if\n any (or `None` if this product type does not use a stub binary). This\n descriptor contains the following fields:\n\n * `xcenv_based_path`: The Xcode-environment-based path from which the stub\n binary should be copied.\n * `path_in_archive`: The path relative to the root of a top-level\n application archive where the stub should be copied as a support file.\n * `additional_bundle_path`: A path relative to the bundle where the stub\n binary should be copied, *in addition to* the standard location of the\n executable.\n\n Args:\n product_type: The product type.\n\n Returns:\n The product type descriptor.\n \"\"\"\n return _PRODUCT_TYPE_DESCRIPTORS.get(product_type)\n\n# Define the loadable module that lists the exported symbols in this file.\nproduct_support = struct(\n contains_stub_binary = _contains_stub_binary,\n product_type = _product_type,\n product_type_descriptor = _product_type_descriptor,\n)\n", "id": "3941336", "language": "Python", "matching_score": 2.0621025562286377, "max_stars_count": 1, "path": "apple/bundling/product_support.bzl" }, { "content": "# Copyright 2018 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Experimental implementation of macOS test bundle rules.\"\"\"\n\nload(\n \"@build_bazel_rules_apple//apple:providers.bzl\",\n \"MacosXcTestBundleInfo\",\n)\nload(\n \"@build_bazel_rules_apple//apple/internal/testing:apple_test_bundle_support.bzl\",\n \"apple_test_bundle_support\",\n)\n\ndef macos_test_bundle_impl(ctx):\n \"\"\"Experimental implementation of ios_application.\"\"\"\n providers = apple_test_bundle_support.apple_test_bundle_impl(ctx)\n return providers + [MacosXcTestBundleInfo()]\n", "id": "2254188", "language": "Python", "matching_score": 0.32437822222709656, "max_stars_count": 1, "path": "apple/internal/testing/macos_rules.bzl" }, { "content": "# Copyright 2017 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Defines providers and related types used throughout the bundling rules.\n\nThese providers are part of the public API of the bundling rules. Other rules\nthat want to propagate information to the bundling rules or that want to\nconsume the bundling rules as their own inputs should use these to handle the\nrelevant information that they need.\n\"\"\"\n\nAppleBundleInfo = provider(\n doc = \"\"\"\nProvides information about an Apple bundle target.\n\nThis provider propagates general information about an Apple bundle that is not\nspecific to any particular bundle type.\n\"\"\",\n fields = {\n \"archive\": \"`File`. The archive that contains the built application.\",\n \"archive_root\": \"\"\"\n`string`. The file system path (relative to the workspace root)\nwhere the signed bundle was constructed (before archiving). Other rules\n*should not* depend on this field; it is intended to support IDEs that\nwant to read that path from the provider to avoid unzipping the output\narchive.\n\"\"\",\n \"binary\": \"\"\"\n`File`. The binary (executable, dynamic library, etc.) that was bundled. The\nphysical file is identical to the one inside the bundle except that it is\nalways unsigned, so note that it is _not_ a path to the binary inside your\noutput bundle. The primary purpose of this field is to provide a way to access\nthe binary directly at analysis time; for example, for code coverage.\n\"\"\",\n \"bundle_dir\": \"`File`. The directory that represents the bundle.\",\n \"bundle_id\": \"\"\"\n`string`. The bundle identifier (i.e., `CFBundleIdentifier` in\n`Info.plist`) of the bundle.\n\"\"\",\n \"bundle_name\": \"\"\"\n`string`. The name of the bundle, without the extension.\n\"\"\",\n \"bundle_extension\": \"\"\"\n`string`. The bundle extension.\n\"\"\",\n \"entitlements\": \"`File`. Entitlements file used to codesign, if any.\",\n \"extension_safe\": \"\"\"\nBoolean. True if the target propagating this provider was\ncompiled and linked with -application-extension, restricting it to\nextension-safe APIs only.\n\"\"\",\n \"infoplist\": \"\"\"\n`File`. The complete (binary-formatted) `Info.plist` file for the bundle.\n\"\"\",\n \"minimum_os_version\": \"\"\"\n`string`. The minimum OS version (as a dotted version\nnumber like \"9.0\") that this bundle was built to support.\n\"\"\",\n \"product_type\": \"\"\"\n`string`. The dot-separated product type identifier associated\nwith the bundle (for example, `com.apple.product-type.application`).\n\"\"\",\n \"propagated_framework_files\": \"\"\"\n`depset` of `File`s. Individual files that make up\nframework dependencies of the target but which are propagated to an\nembedding target instead of being bundled with the propagator. For\nexample, an `ios_extension` propagates its frameworks to be bundled with\nthe embedding `ios_application` rather than bundling the frameworks with\nthe extension itself. (This mainly supports `objc_framework`, which\npropagates its contents as individual files instead of a zipped framework;\nsee `propagated_framework_zips`.)\n\"\"\",\n \"propagated_framework_zips\": \"\"\"\n`depset` of `File`s. Files that are zipped\nframework dependencies of the target but which are propagated to an\nembedding target instead of being bundled with the propagator. For\nexample, an `ios_extension` propagates its frameworks to be bundled with\nthe embedding `ios_application` rather than bundling the frameworks with\nthe extension itself.\n\"\"\",\n \"root_merge_zips\": \"\"\"\n`list` of `File`s. A list of any `.zip` files that should be\nmerged into the root of the top-level bundle (such as `ios_application` or\n`tvos_application`) that embeds the target propagating this provider.\n\"\"\",\n \"uses_swift\": \"\"\"\nBoolean. True if Swift is used by the target propagating this\nprovider. This does not consider embedded bundles; for example, an\nObjective-C application containing a Swift extension would have this field\nset to true for the extension but false for the application.\n\"\"\",\n },\n)\n\nAppleBundleVersionInfo = provider(\n doc = \"Provides versioning information for an Apple bundle.\",\n fields = {\n \"version_file\": \"\"\"\nA `File` containing JSON-formatted text describing the version\nnumber information propagated by the target. It contains two keys:\n`build_version`, which corresponds to `CFBundleVersion`; and\n`short_version_string`, which corresponds to `CFBundleShortVersionString`.\n\"\"\",\n },\n)\n\nAppleExtraOutputsInfo = provider(\n doc = \"\"\"\nProvides information about extra outputs that should be produced from the build.\n\nThis provider propagates supplemental files that should be produced as outputs\neven if the bundle they are associated with is not a direct output of the rule.\nFor example, an application that contains an extension will build both targets\nbut only the application will be a rule output. However, if dSYM bundles are\nalso being generated, we do want to produce the dSYMs for *both* application and\nextension as outputs of the build, not just the dSYMs of the explicit target\nbeing built (the application).\n\"\"\",\n fields = {\n \"files\": \"\"\"\n`depset` of `File`s. These files will be propagated from embedded bundles (such\nas frameworks and extensions) to the top-level bundle (such as an application)\nto ensure that they are explicitly produced as outputs of the build.\n\"\"\",\n },\n)\n\nAppleResourceInfo = provider(\n doc = \"\"\"\nProvides information about resources from transitive dependencies.\n\nThe `AppleResourceInfo` provider should be propagated by rules that want to\npropagate resources--such as images, strings, Interface Builder files, and so\nforth--to a depending application or extension. For example, `swift_library`\ncan provide attributes like `bundles`, `resources`, and `structured_resources`\nthat allow users to associate resources with the code that uses them.\n\"\"\",\n fields = {\n \"resource_sets\": \"\"\"\n`list` of `struct`s. Each `struct` is one defined by\n`AppleResourceSet` and the full list describes the transitive resources\npropagated by this rule.\n\"\"\",\n \"owners\": \"\"\"\n`dict` of resource short paths to a `depset` of target labels in string form.\nUsed to account for multiple resource references to decide whether or not to deduplicate resources\nbetween frameworks and application bundles.\n\"\"\",\n },\n)\n\nIosApplicationBundleInfo = provider(\n doc = \"\"\"\nDenotes that a target is an iOS application.\n\nThis provider does not contain any fields of its own at this time but is used as\na \"marker\" to indicate that a target is specifically an iOS application bundle\n(and not some other Apple bundle). Rule authors who wish to require that a\ndependency is an iOS application should use this provider to describe that\nrequirement.\n\"\"\",\n)\n\nIosExtensionBundleInfo = provider(\n doc = \"\"\"\nDenotes that a target is an iOS application extension.\n\nThis provider does not contain any fields of its own at this time but is used as\na \"marker\" to indicate that a target is specifically an iOS application\nextension bundle (and not some other Apple bundle). Rule authors who wish to\nrequire that a dependency is an iOS application extension should use this\nprovider to describe that requirement.\n\"\"\",\n)\n\nIosFrameworkBundleInfo = provider(\n doc = \"\"\"\nDenotes that a target is an iOS dynamic framework.\n\nThis provider does not contain any fields of its own at this time but is used as\na \"marker\" to indicate that a target is specifically an iOS dynamic framework\nbundle (and not some other Apple bundle). Rule authors who wish to require that\na dependency is an iOS dynamic framework should use this provider to describe\nthat requirement.\n\"\"\",\n)\n\nIosStaticFrameworkBundleInfo = provider(\n doc = \"\"\"\nDenotes that a target is an iOS static framework.\n\nThis provider does not contain any fields of its own at this time but is used as\na \"marker\" to indicate that a target is specifically an iOS static framework\nbundle (and not some other Apple bundle). Rule authors who wish to require that\na dependency is an iOS static framework should use this provider to describe\nthat requirement.\n\"\"\",\n)\n\nIosXcTestBundleInfo = provider(\n doc = \"\"\"\nDenotes a target that is an iOS .xctest bundle.\n\nThis provider does not contain any fields of its own at this time but is used as\na \"marker\" to indicate that a target is specifically an iOS .xctest bundle (and\nnot some other Apple bundle). Rule authors who wish to require that a dependency\nis an iOS .xctest bundle should use this provider to describe that requirement.\n\"\"\",\n)\n\nMacosApplicationBundleInfo = provider(\n doc = \"\"\"\nDenotes that a target is a macOS application.\n\nThis provider does not contain any fields of its own at this time but is used as\na \"marker\" to indicate that a target is specifically a macOS application bundle\n(and not some other Apple bundle). Rule authors who wish to require that a\ndependency is a macOS application should use this provider to describe that\nrequirement.\n\"\"\",\n)\n\nMacosBundleBundleInfo = provider(\n doc = \"\"\"\nDenotes that a target is a macOS loadable bundle.\n\nThis provider does not contain any fields of its own at this time but is used as\na \"marker\" to indicate that a target is specifically a macOS loadable bundle\n(and not some other Apple bundle). Rule authors who wish to require that a\ndependency is a macOS loadable bundle should use this provider to describe that\nrequirement.\n\"\"\",\n)\n\nMacosExtensionBundleInfo = provider(\n doc = \"\"\"\nDenotes that a target is a macOS application extension.\n\nThis provider does not contain any fields of its own at this time but is used as\na \"marker\" to indicate that a target is specifically a macOS application\nextension bundle (and not some other Apple bundle). Rule authors who wish to\nrequire that a dependency is a macOS application extension should use this\nprovider to describe that requirement.\n\"\"\",\n)\n\nMacosXcTestBundleInfo = provider(\n doc = \"\"\"\nDenotes a target that is a macOS .xctest bundle.\n\nThis provider does not contain any fields of its own at this time but is used as\na \"marker\" to indicate that a target is specifically a macOS .xctest bundle\n(and not some other Apple bundle). Rule authors who wish to require that a\ndependency is a macOS .xctest bundle should use this provider to describe that\nrequirement.\n\"\"\",\n)\n\nSwiftInfo = provider(\n doc = \"\"\"\nProvides information about a Swift library.\n\nFields:\n direct_lib: `File`. The single static library that was produced by compiling\n the propagating target. (Contrast with `transitive_libs`.)\n direct_module: `File`. The single `.swiftmodule` file that was produced by\n compiling the propagating target. (Contrast with `transitive_modules`.)\n direct_doc: `File`. The single `.swiftdoc` file that was produced by\n compiling the propagating target. (Contrast with `transitive_docs`.)\n swift_version: `string`. The version of the Swift language that was used when\n compiling the propagating target; that is, the value passed via the\n `-swift-version` compiler flag. This will be `None` if the flag was not\n set.\n transitive_defines: `depset` of `string`s. The set of conditional compilation\n flags defined by the propagating target and all of its transitive\n dependencies.\n transitive_libs: `depset` of `File`s. The set of static library files output\n by the propagating target and all of its transitive dependencies.\n transitive_modules: `depset` of `File`s. The set of `.swiftmodule` files\n output by the propagating target and all of its transitive dependencies.\n transitive_docs: `depset` of `File`s. The set of `.swiftdoc` files\n output by the propagating target and all of its transitive dependencies.\n\"\"\",\n)\n\nTvosApplicationBundleInfo = provider(\n doc = \"\"\"\nDenotes that a target is a tvOS application.\n\nThis provider does not contain any fields of its own at this time but is used as\na \"marker\" to indicate that a target is specifically a tvOS application bundle\n(and not some other Apple bundle). Rule authors who wish to require that a\ndependency is a tvOS application should use this provider to describe that\nrequirement.\n\"\"\",\n)\n\nTvosExtensionBundleInfo = provider(\n doc = \"\"\"\nDenotes that a target is a tvOS application extension.\n\nThis provider does not contain any fields of its own at this time but is used as\na \"marker\" to indicate that a target is specifically a tvOS application\nextension bundle (and not some other Apple bundle). Rule authors who wish to\nrequire that a dependency is a tvOS application extension should use this\nprovider to describe that requirement.\n\"\"\",\n)\n\nWatchosApplicationBundleInfo = provider(\n doc = \"\"\"\nDenotes that a target is a watchOS application.\n\nThis provider does not contain any fields of its own at this time but is used as\na \"marker\" to indicate that a target is specifically a watchOS application\nbundle (and not some other Apple bundle). Rule authors who wish to require that\na dependency is a watchOS application should use this provider to describe that\nrequirement.\n\"\"\",\n)\n\nWatchosExtensionBundleInfo = provider(\n doc = \"\"\"\nDenotes that a target is a watchOS application extension.\n\nThis provider does not contain any fields of its own at this time but is used as\na \"marker\" to indicate that a target is specifically a watchOS application\nextension bundle (and not some other Apple bundle). Rule authors who wish to\nrequire that a dependency is a watchOS application extension should use this\nprovider to describe that requirement.\n\"\"\",\n)\n\ndef AppleResourceBundleTargetData(\n label,\n bundle_name,\n product_name):\n \"\"\"Returns a new resource bundle target info to use with an AppleResourceSet.\n\n Resource bundles are collected via aspect and not actually processed until\n reaching the bundling product. This encapsulates everything about a\n `objc_bundle_library` target that is needed for the final processing.\n\n Args:\n label: The `Label` of the resource bundle target.\n bundle_name: The bundle name to use for the target.\n product_name: The product name to use for the target.\n\n Returns:\n A struct containing the target info needed from a resource bundle.\n \"\"\"\n return struct(\n label = label,\n bundle_name = bundle_name,\n product_name = product_name,\n )\n\ndef AppleResourceSet(\n bundle_dir = None,\n infoplists = depset(),\n objc_bundle_imports = depset(),\n resource_bundle_target_data = None,\n resources = depset(),\n structured_resources = depset(),\n structured_resource_zips = depset(),\n swift_module = None):\n \"\"\"Returns a new resource set to be propagated via `AppleResourceInfo`.\n\n Args:\n bundle_dir: The path within the final bundle (relative to its resources\n root) where the resources should be stored. For example, a resource\n bundle rule would specify something of the form `\"Foo.bundle\"` here;\n library rules that propagate resources to the application itself\n should specify `None` (or omit it, as `None` is the default).\n infoplists: A `depset` of `File`s representing plists that should be\n merged to produce the `Info.plist` for the bundle.\n objc_bundle_imports: A `depset` of `File`s representing resources that\n came from an `objc_bundle` target and need to have their paths stripped\n of any segments before the `\"*.bundle\"` name.\n resource_bundle_target_data: If this resource set is from a bundle library,\n a AppleResourceBundleTargetData instance from that rule.\n resources: A `depset` of `File`s representing resources that should be\n processed (if they are a known type) or copied (if the type is not\n recognized) and placed in the bundle at the location specified by\n `bundle_dir`. The relative paths to these files are ignored, with the\n exception that files contained in a directory named `\"*.lproj\"` will\n be placed in a directory of the same name in the final bundle.\n structured_resources: A `depset` of `File`s representing resources that\n should be copied into the bundle without any processing at the location\n specified by `bundle_dir`. The relative paths of these files are\n preserved.\n structured_resource_zips: A `depset` of `File`s representing ZIP archives\n whose contents should unzipped into the bundle without any processing\n at the location specified by `bundle_dir`. The directory structure\n within the archive is preserved.\n swift_module: The name of the Swift module with which these resources are\n associated. Some resource types, such as Interface Builder files or\n Core Data models, require the Swift module to be specified during\n compilation so that the classes they reference can be found at runtime.\n If this value is `None`, then the resources are not associated with a\n Swift module (for example, resources attached to Objective-C rules) and\n the name of the main application/extension/framework will be passed to\n the resource tool instead.\n Returns:\n A struct containing a set of resources that can be propagated by the\n `AppleResourceInfo` provider.\n \"\"\"\n return struct(\n bundle_dir = bundle_dir,\n infoplists = infoplists,\n objc_bundle_imports = objc_bundle_imports,\n resource_bundle_target_data = resource_bundle_target_data,\n resources = resources,\n structured_resources = structured_resources,\n structured_resource_zips = structured_resource_zips,\n swift_module = swift_module,\n )\n\ndef _apple_resource_set_utils_minimize(\n resource_sets,\n framework_resource_sets = [],\n dedupe_unbundled = False,\n whitelisted_mapping = None):\n \"\"\"Minimizes and reduces a list of resource sets.\n\n This both merges similar resource set elements and subtracts all resources\n already defined in framework resource sets.\n\n Two or more resource sets can be merged if their `bundle_dir` and\n `swift_module` values are the same, which means that they can be passed to\n the same resource processing tool invocation. The list returned by this\n function represents the minimal possible list after merging such sets.\n\n The main Apple bundler will minimize the list of transitive resource sets\n before processing resources, but other rules that propagate resource sets are\n advised to call this function as well after collecting their transitive\n resources to avoid propagating a large number of minimizable sets to their\n dependers.\n\n In order to support smart deduplication, we also allow the whitelisted_mapping\n argument, which accepts a dictionary of file paths of resources which should\n not be deduplicated. If whitelisted_mapping is None, then the legacy\n deduplication logic will be used, where the resources present in\n framework_resource_sets will be deduplicated from the resource_sets. If\n whitelisted_mapping is not None (e.g. could be an empty dictionary), then the\n smart dedupe logic will apply, and framework_resource_sets will not be taken\n into account.\n\n Args:\n resource_sets: The list of `AppleResourceSet` values that should be merged.\n framework_resource_sets: The list of \"AppleResourceSet\" values which contain\n resources already included in framework bundles. Resources present\n in these sets will not be included in the returned list.\n dedupe_unbundled: If false, resources that have no bundle directory will\n not be subtracted. False by default.\n whitelisted_mapping: A dictionary of file short paths where the keys are file\n paths of resources that should not be deduplicated.\n Returns:\n The minimal possible list after merging `AppleResourceSet` values with\n the same `bundle_dir` and `swift_module`.\n \"\"\"\n framework_minimized_dict = _apple_resource_set_dict(framework_resource_sets)\n if not dedupe_unbundled:\n framework_minimized_dict_without_unbundled = {}\n for (bundle_dir, swift_module), value in framework_minimized_dict.items():\n if bundle_dir:\n key = (bundle_dir, swift_module)\n framework_minimized_dict_without_unbundled[key] = value\n framework_minimized_dict = framework_minimized_dict_without_unbundled\n minimized_dict = _apple_resource_set_dict(\n resource_sets,\n framework_minimized_dict,\n whitelisted_mapping = whitelisted_mapping,\n )\n\n return [_dedupe_resource_set_files(rs) for rs in minimized_dict.values()]\n\ndef _apple_resource_set_dict(resource_sets, avoid_resource_dict = {}, whitelisted_mapping = None):\n \"\"\"Returns a minimal map of resource sets, omitting specified resources.\n\n Map keys are `(bundle_dir, swift_module)` of the resource set; multiple\n resource sets with the same key will be combined into a single resource\n set of that key.\n\n Any resources present under a given key in `avoid_resource_dict` will be\n omitted from that keyed resource set in the returned value.\n\n Args:\n resource_sets: The list of `AppleResourceSet` values for the map.\n avoid_resource_dict: A map of `AppleResourceSet` values already keyed by\n `(bundle_dir, swift_module)` that should be omitted from the output.\n whitelisted_mapping: A dictionary of file short paths where the keys are file\n paths of resources that should not be deduplicated.\n Returns:\n A minimal map from `(bundle_dir, swift_module)` to `AppleResourceSet`\n containing the resources in `resource_sets` minus the resources in\n `avoid_resource_dict`.\n \"\"\"\n minimized_dict = {}\n\n for current_set in resource_sets:\n key = (current_set.bundle_dir, current_set.swift_module)\n existing_set = minimized_dict.get(key)\n avoid_set = avoid_resource_dict.get(key)\n\n avoid_objc_bundle_imports = depset()\n avoid_resources = depset()\n avoid_structured_resources = depset()\n avoid_structured_resource_zips = depset()\n\n if avoid_set:\n avoid_objc_bundle_imports = avoid_set.objc_bundle_imports\n avoid_resources = avoid_set.resources\n avoid_structured_resources = avoid_set.structured_resources\n avoid_structured_resource_zips = avoid_set.structured_resource_zips\n\n resource_bundle_target_data = current_set.resource_bundle_target_data\n\n if existing_set:\n if existing_set.resource_bundle_target_data:\n if resource_bundle_target_data:\n existing_label = existing_set.resource_bundle_target_data.label\n if resource_bundle_target_data.label != existing_label:\n fail((\"Internal error: AppleResourceSets with different \" +\n \"resource_bundle_target_datas?! (%r: %r vs %r)\") %\n (\n current_set.bundle_dir,\n str(resource_bundle_target_data.label),\n str(existing_label),\n ))\n else:\n resource_bundle_target_data = existing_set.resource_bundle_target_data\n infoplists = existing_set.infoplists + current_set.infoplists\n objc_bundle_imports = (existing_set.objc_bundle_imports +\n current_set.objc_bundle_imports)\n resources = existing_set.resources + current_set.resources\n structured_resources = (existing_set.structured_resources +\n current_set.structured_resources)\n structured_resource_zips = (existing_set.structured_resource_zips +\n current_set.structured_resource_zips)\n else:\n infoplists = current_set.infoplists\n objc_bundle_imports = current_set.objc_bundle_imports\n resources = current_set.resources\n structured_resources = current_set.structured_resources\n structured_resource_zips = current_set.structured_resource_zips\n\n new_set = AppleResourceSet(\n bundle_dir = current_set.bundle_dir,\n infoplists = infoplists,\n objc_bundle_imports = _filter_files(\n objc_bundle_imports,\n avoid_objc_bundle_imports,\n whitelisted_mapping = whitelisted_mapping,\n ),\n resource_bundle_target_data = resource_bundle_target_data,\n resources = _filter_files(\n resources,\n avoid_resources,\n whitelisted_mapping = whitelisted_mapping,\n ),\n structured_resources = _filter_files(\n structured_resources,\n avoid_structured_resources,\n whitelisted_mapping = whitelisted_mapping,\n ),\n structured_resource_zips = _filter_files(\n structured_resource_zips,\n avoid_structured_resource_zips,\n whitelisted_mapping = whitelisted_mapping,\n ),\n swift_module = current_set.swift_module,\n )\n\n # If this is for the root bundle dir, it always gets saved, otherwise\n # it has to have some resource left. This prunes bundles that no longer\n # have resources which ensure the Info.plist won't get built for the now\n # deduped bundle.\n if not new_set.bundle_dir:\n minimized_dict[key] = new_set\n elif (new_set.objc_bundle_imports or new_set.resources or\n new_set.structured_resources or new_set.structured_resource_zips):\n minimized_dict[key] = new_set\n elif existing_set:\n fail(\"An empty set was already added? %s\" % new_set.bundle_dir)\n\n return minimized_dict\n\ndef _filter_files(files, avoid_files, whitelisted_mapping = None):\n \"\"\"Returns a depset containing files minus avoid_files.\"\"\"\n\n # If whitelisted_mapping is None, use the legacy deduplication logic. We\n # explicitly test for None as an empty dictionary is a valid\n # whitelisted_mapping value.\n if whitelisted_mapping == None:\n avoid_short_paths = {f.short_path: None for f in avoid_files.to_list()}\n return depset([f for f in files if f.short_path not in avoid_short_paths])\n\n return depset([f for f in files if f.short_path in whitelisted_mapping])\n\ndef _dedupe_files(files):\n \"\"\"Deduplicates files based on their short paths.\n\n Args:\n files: The set of `File`s that should be deduplicated based on their short\n paths.\n Returns:\n The `depset` of `File`s where duplicate short paths have been removed by\n arbitrarily removing all but one from the set.\n \"\"\"\n short_path_to_files_mapping = {}\n\n for f in files:\n short_path = f.short_path\n if short_path not in short_path_to_files_mapping:\n short_path_to_files_mapping[short_path] = f\n\n return depset(short_path_to_files_mapping.values())\n\ndef _dedupe_resource_set_files(resource_set):\n \"\"\"Deduplicates the files in a resource set based on their short paths.\n\n It is possible to have genrules that produce outputs that will be used later\n as resource inputs to other rules (and not just genrules, in fact, but any\n rule that produces an output file), and these rules register separate actions\n for each split configuration when a target is built for multiple\n architectures. If we don't deduplicate those files, the outputs of both sets\n of actions will be sent to the resource processor and it will attempt to put\n the compiled results in the same intermediate file location.\n\n Therefore, we deduplicate resources that have the same short path, which\n ensures (due to action pruning) that only one set of actions will be executed\n and only one output will be generated. This implies that the genrule must\n produce equivalent content for each configuration. This is likely OK, because\n if the output is actually architecture-dependent, then the actions need to\n produce those outputs with names that allow the bundler to distinguish them.\n\n Args:\n resource_set: The resource set whose `infoplists`, `resources`,\n `structured_resources`, and `structured_resource_zips` should be\n deduplicated.\n Returns:\n A new resource set with duplicate files removed.\n \"\"\"\n return AppleResourceSet(\n bundle_dir = resource_set.bundle_dir,\n infoplists = _dedupe_files(resource_set.infoplists),\n objc_bundle_imports = _dedupe_files(resource_set.objc_bundle_imports),\n resource_bundle_target_data = resource_set.resource_bundle_target_data,\n resources = _dedupe_files(resource_set.resources),\n structured_resources = _dedupe_files(resource_set.structured_resources),\n structured_resource_zips = _dedupe_files(\n resource_set.structured_resource_zips,\n ),\n swift_module = resource_set.swift_module,\n )\n\ndef _apple_resource_set_utils_prefix_bundle_dir(resource_set, prefix):\n \"\"\"Returns an equivalent resource set with a new path prepended to it.\n\n This function should be used by rules that allow nested bundles; for example,\n a resource bundle that contains other resource bundles must prepend its own\n `bundle_dir` to the `bundle_dir`s of its child bundles to ensure that the\n files are bundled in the correct location.\n\n For example, if `resource_set` has a `bundle_dir` of `\"Foo.bundle\"` and\n `prefix` is `\"Bar.bundle\"`, the returned resource set will have a\n `bundle_dir` equal to `\"Bar.bundle/Foo.bundle\"`. Likewise, if `resource_set`\n had a `bundle_dir` of `None`, then the new `bundle_dir` would be\n `\"Bar.bundle\"`.\n\n Args:\n resource_set: The `AppleResourceSet` whose `bundle_dir` should be prefixed.\n prefix: The path that should be prepended to the existing `bundle_dir`.\n Returns:\n A new `AppleResourceSet` whose `bundle_dir` has been prefixed with the\n given path.\n \"\"\"\n nested_dir = prefix\n if resource_set.bundle_dir:\n nested_dir += \"/\" + resource_set.bundle_dir\n\n return AppleResourceSet(\n bundle_dir = nested_dir,\n infoplists = resource_set.infoplists,\n objc_bundle_imports = resource_set.objc_bundle_imports,\n resource_bundle_target_data = resource_set.resource_bundle_target_data,\n resources = resource_set.resources,\n structured_resources = resource_set.structured_resources,\n structured_resource_zips = resource_set.structured_resource_zips,\n swift_module = resource_set.swift_module,\n )\n\n# Export the module containing helper functions for resource sets.\napple_resource_set_utils = struct(\n minimize = _apple_resource_set_utils_minimize,\n prefix_bundle_dir = _apple_resource_set_utils_prefix_bundle_dir,\n)\n", "id": "3610031", "language": "Python", "matching_score": 7.238765239715576, "max_stars_count": 0, "path": "apple/providers.bzl" }, { "content": "# Copyright 2017 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Core bundling support used by the Apple rules.\n\nThis file is only meant to be imported by the platform-specific top-level rules\n(ios.bzl, tvos.bzl, and so forth).\n\"\"\"\n\nload(\n \"@bazel_skylib//lib:paths.bzl\",\n \"paths\",\n)\nload(\n \"@build_bazel_rules_apple//apple:providers.bzl\",\n \"AppleBundleInfo\",\n \"AppleExtraOutputsInfo\",\n \"AppleResourceInfo\",\n \"AppleResourceSet\",\n \"apple_resource_set_utils\",\n)\nload(\n \"@build_bazel_rules_apple//apple:utils.bzl\",\n \"group_files_by_directory\",\n \"optionally_prefixed_path\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:binary_support.bzl\",\n \"binary_support\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:bitcode_actions.bzl\",\n \"bitcode_actions\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:bundling_support.bzl\",\n \"bundling_support\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:clang_support.bzl\",\n \"clang_support\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:codesigning_support.bzl\",\n \"codesigning_support\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:debug_symbol_actions.bzl\",\n \"debug_symbol_actions\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:entitlements.bzl\",\n \"AppleEntitlementsInfo\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:file_support.bzl\",\n \"file_support\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:platform_support.bzl\",\n \"platform_support\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:plist_actions.bzl\",\n \"plist_actions\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:product_actions.bzl\",\n \"product_actions\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:product_support.bzl\",\n \"product_support\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:resource_actions.bzl\",\n \"resource_actions\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:swift_actions.bzl\",\n \"swift_actions\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:swift_support.bzl\",\n \"swift_support\",\n)\nload(\n \"@build_bazel_rules_apple//common:attrs.bzl\",\n \"attrs\",\n)\nload(\n \"@build_bazel_rules_apple//common:define_utils.bzl\",\n \"define_utils\",\n)\nload(\n \"@build_bazel_rules_apple//common:providers.bzl\",\n \"providers\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:smart_dedupe.bzl\",\n \"smart_dedupe\",\n)\n\n# Directories inside .frameworks that should not be included in final\n# application/extension bundles.\n_FRAMEWORK_DIRS_TO_EXCLUDE = [\n \"Headers\",\n \"Modules\",\n \"PrivateHeaders\",\n]\n\n# Resource sets with None for their bundle_dir represent resources that belong\n# in the root of the bundle, as opposed to a .bundle subdirectory. This matches\n# the default value for AppleResourceSet's `bundle_dir` (in providers.bzl).\n_ROOT_BUNDLE_DIR = None\n\n# A private provider used by the bundler to propagate AppleResourceSet\n# information between frameworks and the bundles that depend on them. This is\n# used during resource de-duping.\n_ResourceBundleInfo = provider()\n\ndef _bundlable_files_for_control(bundlable_files):\n \"\"\"Converts a list of bundlable files to be used by bundler.py.\n\n Bundlable files are stored during the initial analysis with their `src` as\n the `File` artifact so that they can be passed as inputs to other actions.\n But when writing out the control file for the bundling script, we need to\n write out the path names. This function applies that simple conversion.\n\n Args:\n bundlable_files: A list of bundlable file values.\n\n Returns:\n A list representing the same bundlable files, but with the `File` objects\n replaced by their paths.\n \"\"\"\n return [\n bundling_support.bundlable_file(\n bf.src.path,\n bf.dest if bf.dest else \"\",\n bf.executable,\n )\n for bf in bundlable_files\n ]\n\ndef _convert_native_bundlable_file(bf, bundle_dir = _ROOT_BUNDLE_DIR):\n \"\"\"Transforms bundlable file values obtained from an `objc` provider.\n\n The native `objc` provider returns bundlable files as a struct with two keys:\n `file` to represent the file being bundled and `bundle_path` for the path\n inside the bundle where it should be placed. These rules use a different\n format, so this function converts the native format to the one we need.\n\n This list can also contain Skylark bundlable files (as returned by\n `bundling_support.bundlable_file`); they will be returned with the\n `bundle_dir` prepended to their destination.\n\n TODO(b/33618143): Remove this when the native bundlable file type is\n removed.\n\n Args:\n bf: A bundlable file, potentially from a native provider.\n bundle_dir: If provided, a directory path that will be prepended to the\n `bundle_path` of the bundlable file's destination path.\n\n Returns:\n A list of bundlable file values corresponding to the inputs, as returned by\n `bundling_support.bundlable_file`.\n \"\"\"\n if hasattr(bf, \"file\") and hasattr(bf, \"bundle_path\"):\n return bundling_support.bundlable_file(\n bf.file,\n optionally_prefixed_path(bf.bundle_path, bundle_dir),\n )\n else:\n return bundling_support.bundlable_file(\n bf.src,\n optionally_prefixed_path(bf.dest, bundle_dir),\n bf.executable,\n )\n\ndef _bundlable_dynamic_framework_files(ctx, files):\n \"\"\"Computes the set of bundlable files for framework dependencies.\n\n The `files` argument passed into this function is expected to be a set of\n `File`s from the `dynamic_framework_file` key of a dependency's `objc`\n provider. This function then returns a set of bundlable files that dictates\n where the framework files should go in the final application/extension\n bundle, excluding any files that don't need to be packaged with the final\n product (such as headers).\n\n Args:\n ctx: The Skylark context.\n files: A `depset` of `File`s inside .framework folders that should be merged\n into the bundle.\n\n Returns:\n A list of bundlable file structs corresponding to the files that should be\n copied into the bundle.\n \"\"\"\n bundle_files = []\n\n grouped_files = group_files_by_directory(files, [\"framework\"], \"deps\")\n for framework, framework_files in grouped_files.items():\n framework_name = paths.basename(framework)\n for f in framework_files:\n relative_path = paths.relativize(f.path, framework)\n first_segment = relative_path.partition(\"/\")[0]\n if first_segment not in _FRAMEWORK_DIRS_TO_EXCLUDE:\n bundle_files.append(bundling_support.contents_file(\n ctx,\n f,\n \"Frameworks/%s/%s\" % (framework_name, relative_path),\n ))\n\n return bundle_files\n\ndef _validate_attributes(ctx, bundle_id):\n \"\"\"Validates the target's attributes and fails the build if any are invalid.\n\n Args:\n ctx: The Skylark context.\n bundle_id: The bundle_id to use for this target.\n \"\"\"\n families = platform_support.families(ctx)\n allowed_families = ctx.attr._allowed_families\n for family in families:\n if family not in allowed_families:\n fail((\"One or more of the provided device families \\\"%s\\\" is not in the \" +\n \"list of allowed device families \\\"%s\\\"\") % (\n families,\n allowed_families,\n ))\n\n if (getattr(ctx.attr, \"extension_safe\", False) or\n getattr(ctx.attr, \"_extension_safe\", False)):\n for framework in getattr(ctx.attr, \"frameworks\", []):\n if not framework[AppleBundleInfo].extension_safe:\n print((\"The target %s is for an extension but its framework \" +\n \"dependency %s is not marked extension-safe. Specify \" +\n \"'extension_safe = 1' on the framework target. This \" +\n \"will soon cause a build failure.\") % (\n ctx.label,\n framework.label,\n ))\n\n if not ctx.attr.minimum_os_version:\n # TODO(b/38006810): Once the minimum OS command line flags are deprecated,\n # update this message to use the SDK version instead.\n minimum_os = platform_support.minimum_os(ctx)\n platform_type = platform_support.platform_type(ctx)\n print((\"The target %s does not specify its minimum OS version, so %s \" +\n \"from the --%s_minimum_os setting will be used. Please set one \" +\n \"for this target specifically by using the minimum_os_version \" +\n \"attribute (for example, 'minimum_os_version = \\\"9.0\\\"').\") %\n (ctx.label, minimum_os, platform_type))\n\n # bundle_id is optional for some rules, hence nil check.\n if bundle_id != None:\n bundling_support.validate_bundle_id(bundle_id)\n\ndef _invalid_top_level_directories_for_platform(platform_type):\n \"\"\"List of invalid top level directories for the given platform.\n\n Args:\n platform_type: String containing the platform type for the bundle being\n validated.\n\n Returns:\n A list of top level directory names that have to be avoided for the given\n platform.\n \"\"\"\n\n # As far as we know, there are no locations in macOS bundles that would break\n # codesigning.\n if platform_type == \"macos\":\n return []\n\n # Non macOS bundles can't have a top level Resources folder, as it breaks\n # codesigning for some reason. With this, we validate that there are no\n # Resources folder going to be created in the bundle, with a message that\n # better explains which files are incorrectly placed.\n return [\"Resources\"]\n\ndef _validate_files_to_bundle(\n invalid_top_level_dirs,\n platform_type,\n files_to_bundle):\n \"\"\"Validates that the files to bundle are not placed in invalid locations.\n\n codesign will complain when building a non macOS bundle that contains certain\n folders at the top level. We check if there are files that would break\n codesign, and fail early with a nicer message.\n\n Args:\n invalid_top_level_dirs: String list containing the top level\n directories that have to be avoided when bundling resources.\n platform_type: String containing the platform type for the bundle being\n validated.\n files_to_bundle: String list of paths of where resources are going to be\n placed in the final bundle.\n \"\"\"\n invalid_destinations = []\n for invalid_dir in invalid_top_level_dirs:\n for file_to_bundle in files_to_bundle:\n if file_to_bundle.startswith(invalid_dir + \"/\"):\n invalid_destinations.append(file_to_bundle)\n\n if invalid_destinations:\n fail((\"Error: The following files would be bundled in invalid \" +\n \"locations.\\n\\n%s\\n\\nFor %s bundles, the following top level \" +\n \"directories are invalid: %s\") %\n (\n \"\\n\".join(sorted(invalid_destinations)),\n platform_type,\n \", \".join(invalid_top_level_dirs),\n ))\n\ndef _dedupe_bundle_merge_files(bundlable_files):\n \"\"\"Deduplicates bundle files by destination.\n\n No two resources should be destined for the same location within the\n bundle unless they come from the same root-relative source. This removes\n duplicates but fails if two different source files are to end up at the same\n bundle path.\n\n Args:\n bundlable_files: The list of bundlable files to deduplicate.\n\n Returns:\n A list of bundle files with duplicates purged.\n \"\"\"\n deduped_bundlable_files = []\n path_to_files = {}\n for bf in bundlable_files:\n this_file = bf.src\n\n if not bf.contents_only:\n other_bf = path_to_files.get(bf.dest)\n if other_bf:\n if other_bf.src.short_path != this_file.short_path:\n fail((\"Multiple files would be placed at \\\"%s\\\" in the bundle, \" +\n \"which is not allowed: [%s,%s]\") % (\n bf.dest,\n this_file.short_path,\n other_bf.src.short_path,\n ))\n else:\n deduped_bundlable_files.append(bf)\n path_to_files[bf.dest] = bf\n else:\n deduped_bundlable_files.append(bf)\n\n return deduped_bundlable_files\n\ndef _safe_files(ctx, name):\n \"\"\"Safely returns files from an attribute, or the empty set.\n\n Args:\n ctx: The Skylark context.\n name: The attribute name.\n\n Returns:\n The `depset` of `File`s if the attribute exists, or an empty set otherwise.\n \"\"\"\n return depset(getattr(ctx.files, name, []))\n\ndef _is_ipa(ctx):\n \"\"\"Returns a value indicating whether the target is an IPA.\n\n This function returns True for \"releasable\" artifacts that are archived as\n IPAs, such as iOS and tvOS applications. It returns False for \"intermediate\"\n bundles, like iOS extensions or watchOS applications (which must be embedded\n in an iOS application).\n\n Args:\n ctx: The Skylark context.\n\n Returns:\n True if the target is archived as an IPA, or False if it is archived as a\n ZIP.\n \"\"\"\n return ctx.outputs.archive.basename.endswith(\".ipa\")\n\ndef _create_unprocessed_archive(\n ctx,\n bundle_name,\n bundle_path_in_archive,\n bundle_merge_files,\n bundle_merge_zips,\n root_merge_zips,\n mnemonic,\n progress_description):\n \"\"\"Creates an archive containing the not-yet-signed bundle.\n\n This function registers an action that uses the underlying bundler.py tool to\n build an archive with the bundle contents, before the post-processing script\n is run (if present) and before it is signed. This is done because creating\n a ZIP in this way turns out to be much faster than performing a large number\n of small file copies (for targets with many resources).\n\n Args:\n ctx: The Skylark context.\n bundle_name: The name of the bundle.\n bundle_path_in_archive: The path to the bundle within the archive.\n bundle_merge_files: A list of bundlable file values that represent files\n that should be copied to specific locations in the bundle.\n bundle_merge_zips: A list of bundlable file values that represent ZIP\n archives that should be expanded into specific locations in the bundle.\n root_merge_zips: A list of bundlable file values that represent ZIP\n archives that should be expanded into specific locations relative to\n the root of the archive.\n mnemonic: The mnemonic to use for the bundling action.\n progress_description: The message that should be shown as the progress\n description for the bundling action.\n\n Returns:\n A `File` representing the unprocessed archive.\n \"\"\"\n unprocessed_archive = file_support.intermediate(\n ctx,\n \"%{name}.unprocessed.zip\",\n )\n\n control = struct(\n bundle_merge_files = _bundlable_files_for_control(bundle_merge_files),\n bundle_merge_zips = _bundlable_files_for_control(bundle_merge_zips),\n bundle_path = bundle_path_in_archive,\n output = unprocessed_archive.path,\n root_merge_zips = _bundlable_files_for_control(root_merge_zips),\n )\n control_file = file_support.intermediate(ctx, \"%{name}.bundler-control\")\n ctx.actions.write(\n output = control_file,\n content = control.to_json(),\n )\n\n bundler_inputs = (\n list(bundling_support.bundlable_file_sources(\n bundle_merge_files + bundle_merge_zips + root_merge_zips,\n )) + [control_file]\n )\n\n ctx.actions.run(\n inputs = bundler_inputs,\n outputs = [unprocessed_archive],\n executable = ctx.executable._bundletool,\n arguments = [control_file.path],\n mnemonic = mnemonic,\n progress_message = \"Bundling %s: %s\" % (progress_description, bundle_name),\n )\n return unprocessed_archive\n\ndef _process_and_sign_archive(\n ctx,\n bundle_name,\n bundle_path_in_archive,\n output_archive,\n unprocessed_archive,\n mnemonic,\n progress_description):\n \"\"\"Post-processes and signs an archived bundle.\n\n Args:\n ctx: The Skylark context.\n bundle_name: The name of the bundle.\n bundle_path_in_archive: The path to the bundle inside the archive.\n output_archive: The `File` representing the processed and signed archive.\n unprocessed_archive: The `File` representing the archive containing the\n bundle that has not yet been processed or signed.\n mnemonic: The mnemonic to use for the bundling action.\n progress_description: The message that should be shown as the progress\n description for the bundling action.\n\n Returns:\n Tuple containing:\n 1. The path to the directory that represents the root of the expanded\n processed and signed files (before zipping). This is useful to external\n tools that want to access the directory directly instead of unzipping\n the final archive again.\n 2. The entitlements file used to sign, potentially None.\n \"\"\"\n script_inputs = [unprocessed_archive]\n\n entitlements = None\n\n # Use the entitlements from the internal provider if it's present (to support\n # rules that manipulate them before passing them to the bundler); otherwise,\n # use the file that was provided instead.\n if getattr(ctx.attr, \"entitlements\", None):\n if AppleEntitlementsInfo in ctx.attr.entitlements:\n entitlements = (\n ctx.attr.entitlements[AppleEntitlementsInfo].final_entitlements\n )\n else:\n entitlements = ctx.file.entitlements\n\n if entitlements:\n script_inputs.append(entitlements)\n\n provisioning_profile = getattr(ctx.file, \"provisioning_profile\", None)\n if provisioning_profile:\n script_inputs.append(provisioning_profile)\n\n signing_command_lines = \"\"\n if not ctx.attr._skip_signing:\n frameworks = bundling_support.path_in_contents_dir(ctx, \"Frameworks/\")\n paths_to_sign = [\n codesigning_support.path_to_sign(\n \"$WORK_DIR/\" + bundle_path_in_archive + \"/\" + frameworks,\n optional = True,\n glob = \"*\",\n ),\n ]\n is_device = platform_support.is_device_build(ctx)\n if is_device or codesigning_support.should_sign_simulator_bundles(ctx):\n paths_to_sign.append(\n codesigning_support.path_to_sign(\n \"$WORK_DIR/\" + bundle_path_in_archive,\n ),\n )\n signing_command_lines = codesigning_support.signing_command_lines(\n ctx,\n paths_to_sign,\n entitlements,\n )\n\n ipa_post_processor = ctx.executable.ipa_post_processor\n ipa_post_processor_path = \"\"\n if ipa_post_processor:\n ipa_post_processor_path = ipa_post_processor.path\n script_inputs.append(ipa_post_processor)\n\n # The directory where the archive contents will be collected. This path is\n # also passed out via the AppleBundleInfo provider so that external tools can\n # access the bundle layout directly, saving them an extra unzipping step.\n work_dir = paths.replace_extension(output_archive.path, \".archive-root\")\n\n # Only compress the IPA for optimized (release) builds. For debug builds,\n # zip without compression, which will speed up the build.\n should_compress = (ctx.var[\"COMPILATION_MODE\"] == \"opt\")\n\n process_and_sign_script = file_support.intermediate(\n ctx,\n \"%{name}.process-and-sign.sh\",\n )\n ctx.actions.expand_template(\n template = ctx.file._process_and_sign_template,\n output = process_and_sign_script,\n is_executable = True,\n substitutions = {\n \"%output_path%\": output_archive.path,\n \"%ipa_post_processor%\": ipa_post_processor_path or \"\",\n \"%signing_command_lines%\": signing_command_lines,\n \"%should_compress%\": \"1\" if should_compress else \"\",\n \"%unprocessed_archive_path%\": unprocessed_archive.path,\n \"%work_dir%\": work_dir,\n },\n )\n\n platform_support.xcode_env_action(\n ctx,\n inputs = script_inputs,\n outputs = [output_archive],\n executable = process_and_sign_script,\n mnemonic = mnemonic + \"ProcessAndSign\",\n progress_message = \"Processing and signing %s: %s\" % (\n progress_description,\n bundle_name,\n ),\n )\n return (work_dir, entitlements)\n\ndef _experimental_create_and_sign_bundle(\n ctx,\n bundle_dir,\n bundle_name,\n bundle_merge_files,\n bundle_merge_zips,\n mnemonic,\n progress_description):\n \"\"\"Bundles and signs the current target.\n\n THIS IS CURRENTLY EXPERIMENTAL. It can be enabled by building with the\n `apple.experimental_bundling` define set to `bundle_and_archive`\n or `bundle_only` but it should not be used for production builds yet.\n\n Args:\n ctx: The Skylark context.\n bundle_dir: The directory of the bundle.\n bundle_name: The name of the bundle.\n bundle_merge_files: A list of bundlable file values that represent files\n that should be copied to specific locations in the bundle.\n bundle_merge_zips: A list of bundlable file values that represent ZIP\n archives that should be expanded into specific locations in the bundle.\n mnemonic: The mnemonic to use for the bundling action.\n progress_description: The message that should be shown as the progress\n description for the bundling action.\n \"\"\"\n control_file = file_support.intermediate(\n ctx,\n \"%{name}.experimental-bundler-control\",\n )\n bundler_inputs = (\n list(bundling_support.bundlable_file_sources(bundle_merge_files +\n bundle_merge_zips)) + [control_file]\n )\n\n entitlements = attrs.get(ctx.file, \"entitlements\")\n if entitlements:\n bundler_inputs.append(entitlements)\n\n signing_command_lines = \"\"\n if not ctx.attr._skip_signing:\n frameworks = bundling_support.path_in_contents_dir(ctx, \"Frameworks/\")\n paths_to_sign = [\n codesigning_support.path_to_sign(\n \"$WORK_DIR/\" + bundle_dir.basename + \"/\" + frameworks,\n optional = True,\n glob = \"*\",\n ),\n ]\n is_device = platform_support.is_device_build(ctx)\n if is_device or codesigning_support.should_sign_simulator_bundles(ctx):\n paths_to_sign.append(\n codesigning_support.path_to_sign(\"$WORK_DIR/\" + bundle_dir.basename),\n )\n signing_command_lines = codesigning_support.signing_command_lines(\n ctx,\n paths_to_sign,\n entitlements,\n )\n\n # TODO(allevato): Add a `bundle_post_processor` attribute.\n\n control = struct(\n bundle_merge_files = _bundlable_files_for_control(bundle_merge_files),\n bundle_merge_zips = _bundlable_files_for_control(bundle_merge_zips),\n code_signing_commands = signing_command_lines,\n output = bundle_dir.path,\n )\n ctx.actions.write(\n output = control_file,\n content = control.to_json(),\n )\n\n platform_support.xcode_env_action(\n ctx,\n inputs = bundler_inputs,\n outputs = [bundle_dir],\n executable = ctx.executable._bundletool_experimental,\n arguments = [control_file.path],\n mnemonic = mnemonic,\n progress_message = \"Bundling and signing %s: %s\" % (\n progress_description,\n bundle_name,\n ),\n )\n\ndef _run(\n ctx,\n mnemonic,\n progress_description,\n bundle_id,\n binary_artifact,\n additional_bundlable_files = depset(),\n additional_resource_sets = [],\n avoid_propagated_framework_files = None,\n embedded_bundles = [],\n framework_files = depset(),\n is_dynamic_framework = False,\n deps_objc_providers = [],\n suppress_bundle_infoplist = False,\n version_keys_required = True,\n extra_runfiles = [],\n resource_dep_bundle_attributes = [\"frameworks\"],\n debug_outputs = None,\n resource_info_providers = []):\n \"\"\"Implements the core bundling logic for an Apple bundle archive.\n\n Args:\n ctx: The Skylark context. Required.\n mnemonic: The mnemonic to use for the final bundling action. Required.\n progress_description: The human-readable description of the bundle being\n created in the progress message. For example, in the progress message\n \"Bundling iOS application: <name>\", the string passed into this\n argument would be \"iOS application\". Required.\n bundle_id: Bundle identifier to set to the bundle. Required.\n binary_artifact: The binary artifact to bundle. Required.\n additional_bundlable_files: An optional list of additional bundlable files\n that should be copied into the final bundle at locations denoted by\n their bundle path.\n additional_resource_sets: An optional list of `AppleResourceSet` values that\n represent resources not included by dependencies that should also be\n processed among the other resources in the target (for example, app\n icons, launch images, launch storyboards, and settings bundle files).\n avoid_propagated_framework_files: An optional list of framework files to be\n avoided when bundling the target. This exists to allow test bundles to\n deduplicate framework files which have already been bundled in the test\n host.\n embedded_bundles: A list of values (as returned by\n `bundling_support.embedded_bundle`) that denote bundles such as\n extensions or frameworks that should be included in the bundle being\n built.\n framework_files: An optional set of bundlable files that should be copied\n into the framework that this rule produces. If any files are present,\n this is implicitly noted to be a framework bundle, and additional\n provider keys (such as framework search paths) will be propagated\n appropriately.\n is_dynamic_framework: If True, create this bundle as a dynamic framework.\n deps_objc_providers: objc providers containing information about the\n dependencies of the binary target.\n suppress_bundle_infoplist: If True, ensure the Info.plist was created for\n the main bundle.\n version_keys_required: If True, the merged Info.plist file is required\n to have entries for CFBundleShortVersionString and CFBundleVersion.\n NOTE: Almost every type of bundle for Apple's platforms should get\n version numbers; so this is done a something only needed to opt out of\n the require for the exceptional cases (like unittest bundles).\n extra_runfiles: List of additional files to be marked as required for\n running this target.\n resource_dep_bundle_attributes: List of attributes that reference bundles\n which contain resources that need to be deduplicated from the current\n bundle.\n debug_outputs: dSYM bundle binary provider.\n resource_info_providers: List of providers with transitive resource sets.\n\n Returns:\n A tuple containing two values:\n 1. A list of modern providers that should be propagated by the calling rule.\n 2. A dictionary of legacy providers that should be propagated by the calling\n rule.\n \"\"\"\n _validate_attributes(ctx, bundle_id)\n if suppress_bundle_infoplist:\n if bundle_id:\n fail(\"Internal Error: Suppressing bundle Info.plist, but got a \" +\n \"bundle_id?\")\n if version_keys_required:\n fail(\"Internal Error: Suppressing bundle Info.plist, but expected \" +\n \"version keys in one?\")\n\n # A list of files that should be used as the outputs of the rule invoking this\n # instance of the bundler, but which should not necessarily be propagated to\n # bundles that embed this bundle. For example, the application/extension\n # bundle itself is a main output but not an extra output because we don't\n # want extensions to be treated as separate outputs from the application that\n # depends on them.\n main_outputs = []\n\n # A list of extra outputs that should be returned by the calling rule and also\n # propagated by bundles that depend on the invoking target. For example, the\n # dSYM bundles of extensions should be propagated up to depending applications\n # so that they are generated when the application is built.\n extra_outputs = []\n\n # A list of output files included in the local_outputs output group. These are\n # must be requested explicitly by including \"local_outputs\" in the\n # --output_groups flag of the build command line.\n local_outputs = []\n\n # The name of the target is used as the name of the executable in the binary,\n # which we also need to write into the Info.plist file over whatever the user\n # already has there.\n bundle_name = bundling_support.bundle_name(ctx)\n\n # bundle_merge_files collects the files (or directories of files) from\n # providers and actions that should be copied into the bundle by the final\n # packaging action.\n bundle_merge_files = [\n _convert_native_bundlable_file(bf)\n for bf in additional_bundlable_files\n ]\n\n # bundle_merge_zips collects ZIP files from providers and actions that should\n # be expanded into the bundle by the final packaging action.\n bundle_merge_zips = []\n\n # Collects the ZIP files that should be merged into the root of an archive.\n # archive. Note that this only applies if an IPA is being built; it is\n # ignored for ZIP archives created from non-app artifacts like extensions.\n root_merge_zips = []\n\n # Collects ZIP files representing frameworks that should be propagated to the\n # bundle inside which the current bundle is embedded.\n propagated_framework_zips = []\n\n # Keeps track of whether this is a device build or a simulator build.\n is_device = platform_support.is_device_build(ctx)\n\n # If this is a device build for which code signing is required, copy the\n # provisioning profile into the bundle with the expected name.\n provisioning_profile = getattr(ctx.file, \"provisioning_profile\", None)\n if (is_device and provisioning_profile and not ctx.attr._skip_signing):\n bundle_merge_files.append(bundling_support.contents_file(\n ctx,\n provisioning_profile,\n codesigning_support.embedded_provisioning_profile_name(ctx),\n ))\n\n # The path to the .app bundle inside the IPA archive.\n bundle_path_in_archive = (ctx.attr._path_in_archive_format %\n bundling_support.bundle_name_with_extension(ctx))\n\n # Start by collecting resources for the bundle being built. The empty string\n # for the bundle path indicates that resources should appear at the top level\n # of the bundle.\n target_infoplists = list(_safe_files(ctx, \"infoplists\"))\n\n resource_sets = list(additional_resource_sets)\n\n dep_bundle_resource_sets = []\n owners_mappings = []\n avoided_owners_mappings = []\n\n # Collect dependencies framework zips to be bundled and/or propagated.\n for framework in attrs.get(ctx.attr, \"frameworks\", []):\n propagated_framework_zips.append(framework[AppleBundleInfo].archive)\n\n # Collect resource sets from dependencies.\n if attrs.get(ctx.attr, \"exclude_resources\"):\n resource_sets.append(AppleResourceSet(infoplists = target_infoplists))\n else:\n for dep_bundle_attribute in resource_dep_bundle_attributes:\n for dep_bundle in attrs.get_as_list(ctx.attr, dep_bundle_attribute, []):\n if dep_bundle and _ResourceBundleInfo in dep_bundle:\n avoided_owners_mappings.append(dep_bundle[_ResourceBundleInfo].owners)\n dep_bundle_resource_sets.extend(\n dep_bundle[_ResourceBundleInfo].resource_sets,\n )\n\n # If no resource_info_providers were passed to _run, retrieve the\n # resources from the binary target. Otherwise, assume there is no\n # binary target.\n if not resource_info_providers:\n provider = binary_support.get_binary_provider(\n ctx.attr.deps,\n AppleResourceInfo,\n )\n if provider:\n resource_info_providers = [provider]\n\n # Add the transitive resource sets, except for those that have already\n # been included by a framework dependency.\n for provider in resource_info_providers:\n owners_mappings.append(provider.owners)\n for rs in provider.resource_sets:\n resource_sets.append(rs)\n\n # Finally, add any extra resources specific to the target being built\n # itself.\n target_resources = _safe_files(ctx, \"strings\")\n resource_sets.append(AppleResourceSet(\n infoplists = target_infoplists,\n resources = target_resources,\n ))\n\n top_level_resources = []\n for top_level_resource_set in additional_resource_sets:\n for field in [\"infoplists\", \"objc_bundle_imports\", \"resources\", \"structured_resources\"]:\n resources = getattr(top_level_resource_set, field)\n if resources:\n top_level_resources.extend(resources.to_list())\n\n for field in [\"strings\", \"infoplists\"]:\n top_level_resources.extend(getattr(ctx.files, field, []))\n\n owners_mappings.append(smart_dedupe.create_owners_mapping(\n top_level_resources,\n owner = str(ctx.label),\n ))\n\n owners_mapping = smart_dedupe.merge_owners_mappings(\n owners_mappings,\n default_owner = str(ctx.label),\n )\n avoided_owners_mapping = smart_dedupe.merge_owners_mappings(\n avoided_owners_mappings,\n validate_all_files_owned = True,\n )\n\n smart_dedupe_enabled = define_utils.bool_value(\n ctx,\n \"apple.experimental.smart_dedupe\",\n True,\n )\n whitelisted_mapping = None\n if smart_dedupe_enabled:\n whitelisted_mapping = smart_dedupe.subtract_owners_mappings(\n owners_mapping,\n avoided_owners_mapping,\n )\n\n smart_dedupe_debug_enabled = define_utils.bool_value(\n ctx,\n \"apple.experimental.smart_dedupe_debug\",\n False,\n )\n if smart_dedupe_debug_enabled:\n debug_file = ctx.actions.declare_file(\"%s-smart_dedupe_debug.txt\" % ctx.label.name)\n extra_outputs.append(debug_file)\n smart_dedupe.write_debug_file(\n owners_mapping,\n avoided_owners_mapping,\n ctx.actions,\n debug_file,\n )\n\n # Iterate over each set of resources and register the actions. This\n # ensures that each bundle among the dependencies has its resources\n # processed independently.\n dedupe_unbundled = getattr(ctx.attr, \"dedupe_unbundled_resources\", False)\n resource_sets = apple_resource_set_utils.minimize(\n resource_sets,\n dep_bundle_resource_sets,\n dedupe_unbundled,\n whitelisted_mapping = whitelisted_mapping,\n )\n process_results = resource_actions.process_resource_sets(\n ctx,\n bundle_id,\n resource_sets,\n )\n\n bundle_merge_files.extend(process_results.bundle_merge_files)\n bundle_merge_zips.extend(process_results.bundle_merge_zips)\n\n platform_type = platform_support.platform_type(ctx)\n invalid_top_level_dirs = _invalid_top_level_directories_for_platform(\n platform_type,\n )\n if invalid_top_level_dirs:\n # Avoid processing the files if invalid_top_level_dirs is empty.\n _validate_files_to_bundle(\n invalid_top_level_dirs,\n platform_type,\n [x.dest for x in bundle_merge_files if hasattr(x, \"dest\")],\n )\n\n if suppress_bundle_infoplist:\n # Remove any value (default is so pop won't fail if there was no entry for\n # the root).\n process_results.bundle_infoplists.pop(_ROOT_BUNDLE_DIR, None)\n elif _ROOT_BUNDLE_DIR not in process_results.bundle_infoplists:\n process_results.bundle_infoplists[_ROOT_BUNDLE_DIR] = []\n\n # Merge the Info.plists into binary format and collect the resulting PkgInfo\n # file as well. Keep track of the Info.plist for the main bundle while we do\n # this so that it can be propagated out (for situations where this bundle is a\n # child of another bundle and bundle ID consistency is checked).\n main_infoplist = None\n for bundle_dir, infoplists in process_results.bundle_infoplists.items():\n merge_infoplist_args = {\n \"input_plists\": list(infoplists),\n }\n\n bundles_to_datas = process_results.bundle_dir_to_resource_bundle_target_datas\n bundle_target_data = bundles_to_datas.get(bundle_dir)\n merge_infoplist_args[\"resource_bundle_target_data\"] = bundle_target_data\n\n if not bundle_dir:\n # Extra work/options only when doing the main bundle and not some\n # resource bundle being include in the product.\n child_infoplists = [\n eb.target[AppleBundleInfo].infoplist\n for eb in embedded_bundles\n if eb.verify_has_child_plist\n ]\n child_required_values = [\n (\n eb.target[AppleBundleInfo].infoplist,\n [[eb.parent_bundle_id_reference, bundle_id]],\n )\n for eb in embedded_bundles\n if eb.parent_bundle_id_reference\n ]\n merge_infoplist_args[\"child_plists\"] = child_infoplists\n merge_infoplist_args[\"child_required_values\"] = child_required_values\n merge_infoplist_args[\"bundle_id\"] = bundle_id\n merge_infoplist_args[\"extract_from_ctxt\"] = True\n merge_infoplist_args[\"include_xcode_env\"] = True\n merge_infoplist_args[\"version_keys_required\"] = version_keys_required\n\n plist_results = plist_actions.merge_infoplists(\n ctx,\n bundle_dir,\n **merge_infoplist_args\n )\n\n if not bundle_dir:\n main_infoplist = plist_results.output_plist\n\n # The files below need to be merged with specific names in the final\n # bundle.\n if bundle_dir:\n file_creator = bundling_support.resource_file\n else:\n file_creator = bundling_support.contents_file\n bundle_merge_files.append(file_creator(\n ctx,\n plist_results.output_plist,\n optionally_prefixed_path(\"Info.plist\", bundle_dir),\n ))\n if plist_results.pkginfo:\n bundle_merge_files.append(file_creator(\n ctx,\n plist_results.pkginfo,\n optionally_prefixed_path(\"PkgInfo\", bundle_dir),\n ))\n\n # Some application/extension types require stub executables, so collect that\n # information if necessary.\n product_type = product_support.product_type(ctx)\n product_type_descriptor = product_support.product_type_descriptor(\n product_type,\n )\n has_built_binary = False\n if product_type_descriptor and product_type_descriptor.stub:\n stub_descriptor = product_type_descriptor.stub\n bundle_merge_files.append(bundling_support.binary_file(\n ctx,\n binary_artifact,\n bundle_name,\n executable = True,\n ))\n if stub_descriptor.additional_bundle_path:\n # TODO(b/34684393): Figure out if macOS ever uses stub binaries for any\n # product types, and if so, is this the right place for them?\n bundle_merge_files.append(bundling_support.contents_file(\n ctx,\n binary_artifact,\n stub_descriptor.additional_bundle_path,\n executable = True,\n ))\n\n # TODO(b/34047985): This should be conditioned on a flag, not just\n # compilation mode.\n if ctx.var[\"COMPILATION_MODE\"] == \"opt\":\n support_zip = product_actions.create_stub_zip_for_archive_merging(\n ctx,\n binary_artifact,\n stub_descriptor,\n )\n root_merge_zips.append(bundling_support.bundlable_file(support_zip, \".\"))\n elif hasattr(ctx.attr, \"deps\"):\n if not ctx.attr.deps:\n fail(\"Library dependencies must be provided for this product type.\")\n if not binary_artifact:\n fail(\"A binary artifact must be specified for this product type.\")\n has_built_binary = True\n\n bundle_merge_files.append(bundling_support.binary_file(\n ctx,\n binary_artifact,\n bundle_name,\n executable = True,\n ))\n\n # Compute the Swift libraries that are used by the target currently being\n # built.\n uses_swift = swift_support.uses_swift(ctx.attr.deps)\n if uses_swift:\n swift_zip = swift_actions.zip_swift_dylibs(ctx, binary_artifact)\n\n if ctx.attr._bundles_frameworks:\n bundle_merge_zips.append(bundling_support.contents_file(\n ctx,\n swift_zip,\n \"Frameworks\",\n ))\n else:\n propagated_framework_zips.append(swift_zip)\n\n platform = platform_support.platform(ctx)\n root_merge_zips.append(bundling_support.bundlable_file(\n swift_zip,\n \"SwiftSupport/%s\" % platform.name_in_plist.lower(),\n ))\n\n # Add Clang runtime inputs when needed.\n if has_built_binary and clang_support.should_package_clang_runtime(ctx):\n clang_rt_zip = file_support.intermediate(ctx, \"%{name}.clang_rt_libs.zip\")\n clang_support.register_runtime_lib_actions(\n ctx,\n binary_artifact,\n clang_rt_zip,\n )\n bundle_merge_zips.append(\n bundling_support.contents_file(ctx, clang_rt_zip, \"Frameworks\"),\n )\n\n # If debug_outputs is not provided to _run, get them from the binary\n # target.\n if not debug_outputs:\n debug_outputs = binary_support.get_binary_provider(\n ctx.attr.deps,\n apple_common.AppleDebugOutputs,\n )\n\n # Include bitcode symbol maps when needed.\n if has_built_binary and debug_outputs:\n bitcode_maps_zip = bitcode_actions.zip_bitcode_symbols_maps(\n ctx,\n binary_artifact,\n debug_outputs,\n )\n if bitcode_maps_zip:\n root_merge_zips.append(bundling_support.bundlable_file(\n bitcode_maps_zip,\n \"BCSymbolMaps\",\n ))\n\n # Include any embedded bundles.\n propagated_framework_files = depset()\n for eb in embedded_bundles:\n apple_bundle = eb.target[AppleBundleInfo]\n\n propagated_framework_files = depset(\n transitive = [propagated_framework_files, apple_bundle.propagated_framework_files],\n )\n if ctx.attr._bundles_frameworks:\n if apple_bundle.propagated_framework_zips:\n bundle_merge_zips.extend([\n bundling_support.contents_file(ctx, f, \"Frameworks\")\n for f in apple_bundle.propagated_framework_zips\n ])\n if apple_bundle.propagated_framework_files:\n bundle_merge_files.extend(_bundlable_dynamic_framework_files(\n ctx,\n apple_bundle.propagated_framework_files,\n ))\n else:\n # TODO(kaipi): This is needed to avoid propagating watchOS frameworks into\n # the iOS bundle. But we should differentiate the _bundles_frameworks into\n # 2 dimensions: whether to propagate and whether to bundle. For instance,\n # we want ios_application to bundle and propagate frameworks (for\n # ios_unit_test to deduplicate), but watchos_application to only bundle\n # and not propagate.\n propagated_framework_zips += list(apple_bundle.propagated_framework_zips)\n\n bundle_merge_zips.append(bundling_support.contents_file(\n ctx,\n apple_bundle.archive,\n eb.path,\n ))\n root_merge_zips.extend(list(apple_bundle.root_merge_zips))\n\n # Merge in any prebuilt frameworks (i.e., objc_framework dependencies).\n for objc in deps_objc_providers:\n files = objc.dynamic_framework_file\n propagated_framework_files = depset(\n transitive = [propagated_framework_files, files],\n )\n if ctx.attr._bundles_frameworks:\n # Deduplicate framework files which have already been packaged in the\n # container bundle. This enables test bundles from bundling frameworks\n # which have already been bundled in the test host.\n if avoid_propagated_framework_files:\n paths = [x.short_path for x in avoid_propagated_framework_files]\n files = depset([x for x in files if x.short_path not in paths])\n bundle_merge_files.extend(_bundlable_dynamic_framework_files(ctx, files))\n\n bundle_merge_files = _dedupe_bundle_merge_files(bundle_merge_files)\n\n # Perform the final bundling tasks.\n root_merge_zips_to_archive = root_merge_zips if _is_ipa(ctx) else []\n\n experimental_bundling = ctx.var.get(\n \"apple.experimental_bundling\",\n \"off\",\n ).lower()\n if experimental_bundling not in (\"bundle_and_archive\", \"bundle_only\", \"off\"):\n fail(\"Valid values for --define=apple.experimental_bundling\" +\n \"are: bundle_and_archive, bundle_only, off.\")\n\n # Only use experimental bundling for main app's bundle.\n if bundling_support.bundle_name_with_extension(ctx).endswith(\".app\"):\n experimental_bundling = \"off\"\n if experimental_bundling in (\"bundle_and_archive\", \"bundle_only\"):\n out_bundle = ctx.experimental_new_directory(\n bundling_support.bundle_name_with_extension(ctx),\n )\n main_outputs.append(out_bundle)\n _experimental_create_and_sign_bundle(\n ctx,\n out_bundle,\n bundle_name,\n bundle_merge_files,\n bundle_merge_zips,\n mnemonic,\n progress_description,\n )\n\n work_dir = None\n entitlements = None\n bundle_dir = None\n main_outputs.append(ctx.outputs.archive)\n if experimental_bundling in (\"bundle_and_archive\", \"off\"):\n unprocessed_archive = _create_unprocessed_archive(\n ctx,\n bundle_name,\n bundle_path_in_archive,\n bundle_merge_files,\n bundle_merge_zips,\n root_merge_zips_to_archive,\n mnemonic,\n progress_description,\n )\n work_dir, entitlements = _process_and_sign_archive(\n ctx,\n bundle_name,\n bundle_path_in_archive,\n ctx.outputs.archive,\n unprocessed_archive,\n mnemonic,\n progress_description,\n )\n else:\n # Create a dummy archive for the bundle_only case, because we have to create\n # something.\n ctx.actions.write(\n output = ctx.outputs.archive,\n content = \"This is a dummy archive.\",\n )\n\n additional_providers = []\n legacy_providers = {}\n\n if has_built_binary and debug_outputs:\n # TODO(b/110264170): Propagate the provider that makes the dSYM bundle\n # available as opposed to AppleDebugOutputs which propagates the standalone\n # binaries.\n additional_providers.append(debug_outputs)\n\n # Create a .dSYM bundle with the expected name next to the archive in the\n # output directory.\n if ctx.fragments.objc.generate_dsym:\n bundle_extension = bundling_support.bundle_extension(ctx)\n symbol_bundle = debug_symbol_actions.create_symbol_bundle(\n ctx,\n debug_outputs,\n bundle_name,\n bundle_extension,\n )\n extra_outputs.extend(symbol_bundle)\n\n if ctx.fragments.objc.generate_linkmap:\n linkmaps = debug_symbol_actions.collect_linkmaps(\n ctx,\n debug_outputs,\n bundle_name,\n )\n extra_outputs.extend(linkmaps)\n\n if framework_files and is_dynamic_framework:\n framework_dir, bundled_framework_files = (\n _copy_framework_files(ctx, framework_files)\n )\n\n # TODO(cparsons): These will no longer be necessary once apple_binary\n # uses the values in the dynamic framework provider.\n legacy_objc_provider = apple_common.new_objc_provider(\n dynamic_framework_dir = depset([framework_dir]),\n dynamic_framework_file = bundled_framework_files,\n providers = deps_objc_providers,\n )\n\n framework_provider = apple_common.new_dynamic_framework_provider(\n objc = legacy_objc_provider,\n binary = binary_artifact,\n framework_files = bundled_framework_files,\n framework_dirs = depset([framework_dir]),\n )\n additional_providers.extend([framework_provider])\n\n extension_safe = (getattr(ctx.attr, \"extension_safe\", False) or\n getattr(ctx.attr, \"_extension_safe\", False))\n apple_bundle_info_args = {\n \"archive\": ctx.outputs.archive,\n \"binary\": binary_artifact,\n \"bundle_dir\": bundle_dir,\n \"bundle_extension\": bundling_support.bundle_extension(ctx),\n \"bundle_id\": bundle_id,\n \"bundle_name\": bundle_name,\n \"entitlements\": entitlements,\n \"extension_safe\": extension_safe,\n \"infoplist\": main_infoplist,\n \"minimum_os_version\": platform_support.minimum_os(ctx),\n \"product_type\": product_type,\n \"propagated_framework_files\": propagated_framework_files,\n \"propagated_framework_zips\": depset(propagated_framework_zips),\n \"root_merge_zips\": root_merge_zips if not _is_ipa(ctx) else [],\n \"uses_swift\": uses_swift,\n }\n if work_dir:\n apple_bundle_info_args[\"archive_root\"] = work_dir\n legacy_providers[\"apple_bundle\"] = struct(**apple_bundle_info_args)\n\n # Collect extra outputs from embedded bundles so that they also get included\n # as outputs of the rule.\n transitive_extra_outputs = depset(direct = extra_outputs)\n propagate_embedded_extra_outputs = define_utils.bool_value(\n ctx,\n \"apple.propagate_embedded_extra_outputs\",\n False,\n )\n if propagate_embedded_extra_outputs:\n embedded_bundle_targets = [eb.target for eb in embedded_bundles]\n for extra in providers.find_all(\n embedded_bundle_targets,\n AppleExtraOutputsInfo,\n ):\n transitive_extra_outputs = depset(\n transitive = [transitive_extra_outputs, extra.files],\n )\n\n additional_providers.extend([\n AppleBundleInfo(**apple_bundle_info_args),\n AppleExtraOutputsInfo(files = transitive_extra_outputs),\n DefaultInfo(\n files = depset(\n direct = main_outputs,\n transitive = [transitive_extra_outputs],\n ),\n runfiles = ctx.runfiles(\n files = [ctx.outputs.archive] + extra_runfiles,\n ),\n ),\n OutputGroupInfo(local_outputs = depset(local_outputs)),\n # Propagate the resource sets contained by this bundle along with the ones\n # contained in the frameworks dependencies, so that higher level bundles\n # can also skip the bundling of those resources.\n _ResourceBundleInfo(\n owners = owners_mapping,\n resource_sets = resource_sets + dep_bundle_resource_sets,\n ),\n ])\n\n return (additional_providers, legacy_providers)\n\ndef _copy_framework_files(ctx, framework_files):\n \"\"\"Copies the files in `framework_files` to the right place in the framework.\n\n Args:\n ctx: The Skylark context.\n framework_files: A list of files to copy into the framework.\n\n Returns:\n A two-element tuple: the framework directory path, and a set containing the\n output files in their final locations.\n \"\"\"\n bundle_name = bundling_support.bundle_name(ctx)\n framework_dir_name = \"_frameworks/\" + bundle_name + \".framework/\"\n bundled_framework_files = []\n for framework_file in framework_files:\n output_file = ctx.actions.declare_file(\n framework_dir_name + framework_file.dest,\n )\n ctx.actions.run_shell(\n outputs = [output_file],\n inputs = [framework_file.src],\n mnemonic = \"Cp\",\n arguments = [\n output_file.dirname,\n framework_file.src.path,\n output_file.path,\n ],\n command = 'mkdir -p \"$1\" && cp \"$2\" \"$3\"',\n progress_message = (\n \"Copying \" + framework_file.src.path + \" to \" + output_file.path\n ),\n )\n bundled_framework_files.append(output_file)\n return (\n ctx.outputs.archive.dirname + \"/\" + framework_dir_name,\n depset(bundled_framework_files),\n )\n\n# Define the loadable module that lists the exported symbols in this file.\nbundler = struct(\n run = _run,\n)\n", "id": "7596436", "language": "Python", "matching_score": 5.5775251388549805, "max_stars_count": 1, "path": "apple/bundling/bundler.bzl" }, { "content": "# Copyright 2018 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Implementation of the resource propagation aspect.\"\"\"\n\nload(\n \"@bazel_skylib//lib:paths.bzl\",\n \"paths\",\n)\nload(\n \"@bazel_skylib//lib:partial.bzl\",\n \"partial\",\n)\nload(\n \"@build_bazel_rules_apple//common:define_utils.bzl\",\n \"define_utils\",\n)\nload(\n \"@build_bazel_rules_apple//common:path_utils.bzl\",\n \"path_utils\",\n)\nload(\n \"@build_bazel_rules_apple//apple/internal:experimental.bzl\",\n \"is_experimental_bundling_enabled\",\n)\nload(\n \"@build_bazel_rules_apple//apple/internal:resources.bzl\",\n \"NewAppleResourceInfo\",\n \"resources\",\n)\nload(\n \"@build_bazel_rules_swift//swift:swift.bzl\",\n \"SwiftInfo\",\n)\n\n# List of native resource attributes to use to collect by default. This list should dissapear in the\n# long term; objc_library will remove the resource specific attributes and the native rules (that\n# have these attributes) will dissapear. The new resource rules will either have specific attributes\n# or use data, but in any of those cases, this list won't be used as if there are specific\n# attributes, we will not merge them to split them again.\n_NATIVE_RESOURCE_ATTRS = [\n \"asset_catalogs\",\n \"datamodels\",\n \"resources\",\n \"storyboards\",\n \"strings\",\n \"xibs\",\n]\n\ndef _structured_resources_parent_dir(resource, parent_dir):\n \"\"\"Returns the package relative path for the parent directory of a resource.\n\n Args:\n resource: The resource for which to calculate the package relative path.\n\n Returns:\n The package relative path to the parent directory of the resource.\n \"\"\"\n package_relative = path_utils.owner_relative_path(resource)\n path = paths.dirname(package_relative).rstrip(\"/\")\n return paths.join(parent_dir or \"\", path or \"\") or None\n\ndef _bundle_relative_parent_dir(resource, extension):\n \"\"\"Returns the bundle relative path to the resource rooted at the bundle.\n\n Looks for the first instance of a folder with the suffix specified by `extension`, and then\n returns the directory path to the file within the bundle. For example, for a resource with path\n my/package/Contents.bundle/directory/foo.txt and `extension` equal to `\"bundle\"`, it would\n return Contents.bundle/directory.\n\n Args:\n resource: The resource for which to calculate the bundle relative path.\n extension: The bundle extension to use when finding the relative path.\n\n Returns:\n The bundle relative path, rooted at the outermost bundle.\n \"\"\"\n bundle_path = path_utils.farthest_directory_matching(resource.short_path, extension)\n bundle_relative_path = paths.relativize(resource.short_path, bundle_path)\n\n parent_dir = paths.basename(bundle_path)\n bundle_relative_dir = paths.dirname(bundle_relative_path).strip(\"/\")\n if bundle_relative_dir:\n parent_dir = paths.join(parent_dir, bundle_relative_dir)\n return parent_dir\n\ndef _apple_resource_aspect_impl(target, ctx):\n \"\"\"Implementation of the resource propation aspect.\"\"\"\n\n # Kill switch to disable the aspect unless explicitly required.\n if not is_experimental_bundling_enabled(ctx):\n return []\n\n # If the target already propagates a NewAppleResourceInfo, do nothing.\n if NewAppleResourceInfo in target:\n return []\n\n providers = []\n\n bucketize_args = {}\n collect_args = {}\n\n # Owner to attach to the resources as they're being bucketed.\n owner = None\n if ctx.rule.kind == \"objc_bundle\":\n bucketize_args[\"parent_dir_param\"] = partial.make(\n _bundle_relative_parent_dir,\n extension = \"bundle\",\n )\n collect_args[\"res_attrs\"] = [\"bundle_imports\"]\n\n elif ctx.rule.kind == \"objc_bundle_library\":\n parent_dir_param = \"%s.bundle\" % ctx.label.name\n bucketize_args[\"parent_dir_param\"] = parent_dir_param\n collect_args[\"res_attrs\"] = _NATIVE_RESOURCE_ATTRS\n\n # Collect the specified infoplists that should be merged together. The replacement for\n # objc_bundle_library should handle it within its implementation.\n plists = resources.collect(ctx.rule.attr, res_attrs = [\"infoplist\", \"infoplists\"])\n plist_provider = resources.bucketize_typed(\n plists,\n bucket_type = \"infoplists\",\n parent_dir_param = parent_dir_param,\n )\n providers.append(plist_provider)\n\n # Nest bundles added through the bundles attribute in objc_bundle_library.\n if ctx.rule.attr.bundles:\n bundle_merged_provider = resources.merge_providers(\n [x[NewAppleResourceInfo] for x in ctx.rule.attr.bundles],\n )\n\n providers.append(resources.nest_bundles(bundle_merged_provider, parent_dir_param))\n\n elif ctx.rule.kind == \"objc_library\":\n collect_args[\"res_attrs\"] = _NATIVE_RESOURCE_ATTRS\n\n # Only set objc_library targets as owners if they have srcs, non_arc_srcs or deps. This\n # treats objc_library targets without sources as resource aggregators.\n if ctx.rule.attr.srcs or ctx.rule.attr.non_arc_srcs or ctx.rule.attr.deps:\n owner = str(ctx.label)\n\n # Collect objc_library's bundles dependencies and propagate them.\n providers.extend([\n x[NewAppleResourceInfo]\n for x in ctx.rule.attr.bundles\n ])\n\n elif ctx.rule.kind == \"swift_library\":\n bucketize_args[\"swift_module\"] = target[SwiftInfo].module_name\n collect_args[\"res_attrs\"] = [\"resources\"]\n owner = str(ctx.label)\n\n elif ctx.rule.kind == \"apple_binary\" or ctx.rule.kind == \"apple_stub_binary\":\n # Set the binary targets as the default_owner to avoid losing ownership information when\n # aggregating dependencies resources that have an owners on one branch, and that don't have\n # an owner on another branch. When rules_apple stops using apple_binary intermediaries this\n # should be removed as there would not be an intermediate aggregator.\n owner = str(ctx.label)\n\n elif apple_common.Objc in target:\n # TODO(kaipi): Clean up usages of the ObjcProvider as means to propagate resources, then\n # remove this case.\n if hasattr(target[apple_common.Objc], \"merge_zip\"):\n merge_zips = target[apple_common.Objc].merge_zip.to_list()\n merge_zips_provider = resources.bucketize_typed(\n merge_zips,\n bucket_type = \"resource_zips\",\n )\n providers.append(merge_zips_provider)\n\n # Collect all resource files related to this target.\n files = resources.collect(ctx.rule.attr, **collect_args)\n if files:\n providers.append(\n resources.bucketize(files, owner = owner, **bucketize_args),\n )\n\n # If the target has structured_resources, we need to process them with a different\n # parent_dir_param\n if hasattr(ctx.rule.attr, \"structured_resources\"):\n if ctx.rule.attr.structured_resources:\n # TODO(kaipi): Validate that structured_resources doesn't have processable resources,\n # e.g. we shouldn't accept xib files that should be compiled before bundling.\n structured_files = resources.collect(\n ctx.rule.attr,\n res_attrs = [\"structured_resources\"],\n )\n\n if ctx.rule.kind == \"objc_bundle_library\":\n # TODO(kaipi): Once we remove the native objc_bundle_library, there won't be a need\n # for repeating the bundle name here.\n structured_parent_dir = \"%s.bundle\" % ctx.label.name\n else:\n structured_parent_dir = None\n\n # Avoid processing PNG files that are referenced through the structured_resources\n # attribute. This is mostly for legacy reasons and should get cleaned up in the future.\n providers.append(\n resources.bucketize(\n structured_files,\n owner = owner,\n parent_dir_param = partial.make(\n _structured_resources_parent_dir,\n parent_dir = structured_parent_dir,\n ),\n avoid_buckets = [\"pngs\"],\n ),\n )\n\n # Get the providers from dependencies.\n # TODO(kaipi): Add data here once we propagate resources through that attribute.\n for attr in [\"deps\"]:\n if hasattr(ctx.rule.attr, attr):\n providers.extend([\n x[NewAppleResourceInfo]\n for x in getattr(ctx.rule.attr, attr)\n if NewAppleResourceInfo in x\n ])\n\n if providers:\n # If any providers were collected, merge them.\n return [resources.merge_providers(providers, default_owner = owner)]\n return []\n\napple_resource_aspect = aspect(\n implementation = _apple_resource_aspect_impl,\n # TODO(kaipi): The aspect should also propagate through the data attribute.\n attr_aspects = [\"bundles\", \"deps\"],\n doc = \"\"\"Aspect that collects and propagates resource information to be bundled by a top-level\nbundling rule.\"\"\",\n)\n", "id": "3087522", "language": "Python", "matching_score": 2.145277738571167, "max_stars_count": 0, "path": "apple/internal/aspects/resource_aspect.bzl" }, { "content": "# Copyright 2018 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Implementation of the aspect that propagates framework import files.\"\"\"\n\nAppleFrameworkImportInfo = provider(\n doc = \"Provider that propagates information about framework import targets.\",\n fields = {\n \"framework_imports\": \"\"\"\nDepset of Files that represent framework imports that need to be bundled in the top level\napplication bundle under the Frameworks directory.\n\"\"\",\n },\n)\n\ndef _framework_import_aspect_impl(target, ctx):\n \"\"\"Implementation of the framework import propagation aspect.\"\"\"\n if AppleFrameworkImportInfo in target:\n return []\n\n transitive_sets = []\n for attribute in [\"deps\", \"frameworks\"]:\n if not hasattr(ctx.rule.attr, attribute):\n continue\n for dep_target in getattr(ctx.rule.attr, attribute):\n if AppleFrameworkImportInfo in dep_target:\n transitive_sets.append(dep_target[AppleFrameworkImportInfo].framework_imports)\n\n if (ctx.rule.kind == \"objc_framework\" and\n ctx.rule.attr.is_dynamic and\n ctx.rule.attr.framework_imports):\n framework_imports = []\n for file_target in ctx.rule.attr.framework_imports:\n for file in file_target.files.to_list():\n file_short_path = file.short_path\n if file_short_path.endswith(\".h\"):\n continue\n if file_short_path.endswith(\".modulemap\"):\n continue\n if \"Headers/\" in file_short_path:\n # This matches /Headers/ and /PrivateHeaders/\n continue\n if \"/Modules/\" in file_short_path:\n continue\n framework_imports.append(file)\n\n if framework_imports:\n transitive_sets.append(depset(framework_imports))\n\n if not transitive_sets:\n return []\n\n return [AppleFrameworkImportInfo(framework_imports = depset(transitive = transitive_sets))]\n\nframework_import_aspect = aspect(\n implementation = _framework_import_aspect_impl,\n attr_aspects = [\"deps\", \"frameworks\"],\n doc = \"\"\"\nAspect that collects all files from framework import targets (e.g. objc_framework) so that they can\nbe packaged within the top-level application bundle.\n\"\"\",\n)\n", "id": "4321603", "language": "Python", "matching_score": 1.05355703830719, "max_stars_count": 0, "path": "apple/internal/aspects/framework_import_aspect.bzl" }, { "content": "# Copyright 2018 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Partial implementation for embedding provisioning profiles.\"\"\"\n\nload(\n \"@bazel_skylib//lib:partial.bzl\",\n \"partial\",\n)\nload(\n \"@build_bazel_rules_apple//apple/internal:processor.bzl\",\n \"processor\",\n)\n\ndef _framework_headers_partial_impl(ctx, hdrs):\n \"\"\"Implementation for the framework headers partial.\"\"\"\n _ignore = [ctx]\n\n return struct(\n bundle_files = [\n (processor.location.bundle, \"Headers\", depset(hdrs)),\n ],\n )\n\ndef framework_headers_partial(hdrs):\n \"\"\"Constructor for the framework headers partial.\n\n This partial bundles the headers for dynamic frameworks.\n\n Args:\n hdrs: The list of headers to bundle.\n\n Returns:\n A partial that returns the bundle location of the framework header artifacts.\n \"\"\"\n return partial.make(\n _framework_headers_partial_impl,\n hdrs = hdrs,\n )\n", "id": "2485321", "language": "Python", "matching_score": 1.395937442779541, "max_stars_count": 1, "path": "apple/internal/partials/framework_headers.bzl" }, { "content": "# Copyright 2018 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helper methods for implementing the test bundles.\"\"\"\n\nload(\n \"@build_bazel_rules_apple//apple:providers.bzl\",\n \"AppleBundleInfo\",\n \"AppleExtraOutputsInfo\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:product_support.bzl\",\n \"apple_product_type\",\n)\nload(\n \"@build_bazel_rules_apple//apple/internal:outputs.bzl\",\n \"outputs\",\n)\nload(\n \"@build_bazel_rules_apple//apple/internal:partials.bzl\",\n \"partials\",\n)\nload(\n \"@build_bazel_rules_apple//apple/internal:processor.bzl\",\n \"processor\",\n)\n\n# Default test bundle ID for tests that don't have a test host or were not given\n# a bundle ID.\n_DEFAULT_TEST_BUNDLE_ID = \"com.bazelbuild.rulesapple.Tests\"\n\ndef _computed_test_bundle_id(test_host_bundle_id):\n \"\"\"Compute a test bundle ID from the test host, or a default if not given.\"\"\"\n if test_host_bundle_id:\n bundle_id = test_host_bundle_id + \"Tests\"\n else:\n bundle_id = _DEFAULT_TEST_BUNDLE_ID\n\n return bundle_id\n\ndef _test_host_bundle_id(test_host):\n \"\"\"Return the bundle ID for the given test host, or None if none was given.\"\"\"\n if not test_host:\n return None\n test_host_bundle_info = test_host[AppleBundleInfo]\n return test_host_bundle_info.bundle_id\n\ndef _apple_test_bundle_impl(ctx):\n \"\"\"Experimental implementation of Apple test bundles.\"\"\"\n test_host_bundle_id = _test_host_bundle_id(ctx.attr.test_host)\n if ctx.attr.bundle_id:\n bundle_id = ctx.attr.bundle_id\n else:\n bundle_id = _computed_test_bundle_id(test_host_bundle_id)\n\n if bundle_id == test_host_bundle_id:\n fail(\"The test bundle's identifier of '\" + bundle_id + \"' can't be the \" +\n \"same as the test host's bundle identifier. Please change one of \" +\n \"them.\")\n\n # TODO(kaipi): Replace the debug_outputs_provider with the provider returned from the linking\n # action, when available.\n # TODO(kaipi): Extract this into a common location to be reused and refactored later when we\n # add linking support directly into the rule.\n binary_target = ctx.attr.deps[0]\n binary_artifact = binary_target[apple_common.AppleLoadableBundleBinary].binary\n\n test_host_list = []\n if ctx.attr.test_host and ctx.attr.product_type == apple_product_type.unit_test_bundle:\n test_host_list.append(ctx.attr.test_host)\n\n processor_partials = [\n partials.apple_bundle_info_partial(bundle_id = bundle_id),\n partials.binary_partial(binary_artifact = binary_artifact),\n partials.clang_rt_dylibs_partial(binary_artifact = binary_artifact),\n partials.debug_symbols_partial(\n debug_outputs_provider = binary_target[apple_common.AppleDebugOutputs],\n ),\n partials.framework_import_partial(\n targets = ctx.attr.deps,\n targets_to_avoid = test_host_list,\n ),\n partials.resources_partial(\n bundle_id = bundle_id,\n plist_attrs = [\"infoplists\"],\n targets_to_avoid = test_host_list,\n version_keys_required = False,\n ),\n partials.swift_dylibs_partial(\n binary_artifact = binary_artifact,\n bundle_dylibs = True,\n ),\n ]\n\n processor_result = processor.process(ctx, processor_partials)\n\n # TODO(kaipi): Remove this filtering when apple_*_test is merged with the bundle and binary\n # rules. The processor outputs has all the extra outputs like dSYM files that we want to\n # propagate, but it also includes the archive artifact. Because this target is an intermediate\n # and hidden target, we don't want to expose this artifact directly as an output, as the\n # apple_*_test rules will copy and rename this archive with the correct name.\n filtered_outputs = [\n x\n for x in processor_result.output_files.to_list()\n if x != outputs.archive(ctx)\n ]\n\n return processor_result.providers + [\n # TODO(kaipi): Remove this provider when apple_*_test is merged with the bundle and binary\n # rules.\n AppleExtraOutputsInfo(files = depset(filtered_outputs)),\n ]\n\napple_test_bundle_support = struct(\n apple_test_bundle_impl = _apple_test_bundle_impl,\n)\n", "id": "2658808", "language": "Python", "matching_score": 3.1989452838897705, "max_stars_count": 1, "path": "apple/internal/testing/apple_test_bundle_support.bzl" }, { "content": "# Copyright 2018 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Partial implementation for debug symbol file processing.\"\"\"\n\nload(\n \"@bazel_skylib//lib:partial.bzl\",\n \"partial\",\n)\nload(\n \"@bazel_skylib//lib:paths.bzl\",\n \"paths\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:bundling_support.bzl\",\n \"bundling_support\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:file_actions.bzl\",\n \"file_actions\",\n)\n\n# TODO(b/110264170): Expose this provider so that IDEs can use it to reference the dSYM bundles\n# contained in the dsym_bundles field.\n_AppleDebugInfo = provider(\n doc = \"\"\"\nPrivate provider to propagate transitive debug symbol information.\n\"\"\",\n fields = {\n \"dsym_bundles\": \"\"\"\nPaths to dSYM bundles that this target provides. This includes the paths to dSYM bundles generated\nfor dependencies of this target (e.g. frameworks and extensions).\n\"\"\",\n \"files\": \"\"\"\nDepset of `File` references to all debug symbol files. This may include dSYM and linkmap files, if\nrequested through --apple_generate_dsym and/or --objc_generate_linkmap.\n\"\"\",\n },\n)\n\ndef _collect_linkmaps(ctx, debug_provider, bundle_name):\n \"\"\"Collects the available linkmaps from the binary.\n\n Args:\n ctx: The target's rule context.\n debug_provider: The AppleDebugOutput provider for the binary target.\n bundle_name: The name of the output bundle.\n\n Returns:\n A list of linkmap files, one per linked architecture.\n \"\"\"\n outputs = []\n\n # TODO(b/36174487): Iterate over .items() once the Map/dict problem is fixed.\n for arch in debug_provider.outputs_map:\n arch_outputs = debug_provider.outputs_map[arch]\n linkmap = arch_outputs[\"linkmap\"]\n output_linkmap = ctx.actions.declare_file(\n \"%s_%s.linkmap\" % (bundle_name, arch),\n )\n outputs.append(output_linkmap)\n file_actions.symlink(ctx, linkmap, output_linkmap)\n\n return outputs\n\n# TODO(kaipi): Investigate moving this actions into a tool so that we can use\n# tree artifacts instead, which should simplify parts of this file.\ndef _bundle_dsym_files(ctx, debug_provider, bundle_name, bundle_extension = \"\"):\n \"\"\"Recreates the .dSYM bundle from the AppleDebugOutputs provider.\n\n The generated bundle will have the same name as the bundle being built (including its\n extension), but with the \".dSYM\" extension appended to it.\n\n If the target being built does not have a binary or if the build it not generating debug\n symbols (`--apple_generate_dsym` is not provided), then this function is a no-op that returns\n an empty list.\n\n Args:\n ctx: The target's rule context.\n debug_provider: The AppleDebugOutput provider for the binary target.\n bundle_name: The name of the output bundle.\n bundle_extension: The extension for the bundle.\n\n Returns:\n A list of files that comprise the .dSYM bundle, which should be returned as additional\n outputs from the target.\n \"\"\"\n bundle_name_with_extension = bundle_name + bundle_extension\n dsym_bundle_name = bundle_name_with_extension + \".dSYM\"\n\n outputs = []\n\n # TODO(b/36174487): Iterate over .items() once the Map/dict problem is fixed.\n for arch in debug_provider.outputs_map:\n arch_outputs = debug_provider.outputs_map[arch]\n dsym_binary = arch_outputs[\"dsym_binary\"]\n output_binary = ctx.actions.declare_file(\n \"%s/Contents/Resources/DWARF/%s_%s\" % (\n dsym_bundle_name,\n bundle_name,\n arch,\n ),\n )\n outputs.append(output_binary)\n file_actions.symlink(ctx, dsym_binary, output_binary)\n\n # If we found any outputs, create the Info.plist for the bundle as well; otherwise, we just\n # return the empty list. The plist generated by dsymutil only varies based on the bundle name,\n # so we regenerate it here rather than propagate the other one from the apple_binary. (See\n # https://github.com/llvm-mirror/llvm/blob/master/tools/dsymutil/dsymutil.cpp)\n if outputs:\n dsym_plist = ctx.actions.declare_file(\n \"%s/Contents/Info.plist\" % dsym_bundle_name,\n )\n outputs.append(dsym_plist)\n ctx.actions.expand_template(\n output = dsym_plist,\n template = ctx.file._dsym_info_plist_template,\n substitutions = {\n \"%bundle_name_with_extension%\": bundle_name_with_extension,\n },\n )\n\n return outputs\n\ndef _debug_symbols_partial_impl(ctx, debug_dependencies = [], debug_outputs_provider = None):\n \"\"\"Implementation for the debug symbols processing partial.\"\"\"\n deps_providers = [\n x[_AppleDebugInfo]\n for x in debug_dependencies\n if _AppleDebugInfo in x\n ]\n\n dsym_bundles = depset(transitive = [x.dsym_bundles for x in deps_providers])\n output_files = depset(transitive = [x.files for x in deps_providers])\n output_providers = []\n\n if debug_outputs_provider:\n output_providers.append(debug_outputs_provider)\n\n bundle_name = bundling_support.bundle_name(ctx)\n\n if ctx.fragments.objc.generate_dsym:\n bundle_extension = bundling_support.bundle_extension(ctx)\n dsym_files = _bundle_dsym_files(\n ctx,\n debug_outputs_provider,\n bundle_name,\n bundle_extension,\n )\n output_files = depset(\n dsym_files,\n transitive = [output_files],\n )\n\n absolute_dsym_bundle_path = paths.join(\n ctx.bin_dir.path,\n ctx.label.package,\n bundle_name + bundle_extension + \".dSYM\",\n )\n dsym_bundles = depset(\n [absolute_dsym_bundle_path],\n transitive = [dsym_bundles],\n )\n\n if ctx.fragments.objc.generate_linkmap:\n linkmaps = _collect_linkmaps(ctx, debug_outputs_provider, bundle_name)\n output_files = depset(\n linkmaps,\n transitive = [output_files],\n )\n\n output_providers.append(\n _AppleDebugInfo(\n dsym_bundles = dsym_bundles,\n files = output_files,\n ),\n )\n\n return struct(\n output_files = output_files,\n providers = output_providers,\n )\n\ndef debug_symbols_partial(debug_dependencies = [], debug_outputs_provider = None):\n \"\"\"Constructor for the debug symbols processing partial.\n\n This partial collects all of the transitive debug files information. The output of this partial\n are the debug output files for the target being processed _plus_ all of the dependencies debug\n symbol files. This includes dSYM bundles and linkmaps. With this, for example, by building an\n ios_application target with --apple_generate_dsym, this partial will return the dSYM bundle of\n the ios_application itself plus the dSYM bundles of any ios_framework and ios_extension\n dependencies there may be, which will force bazel to present these files in the output files\n section of a successful build.\n\n Args:\n debug_dependencies: List of targets from which to collect the transitive dependency debug\n information to propagate them upstream.\n debug_outputs_provider: The AppleDebugOutputs provider containing the references to the debug\n outputs of this target's binary.\n\n Returns:\n A partial that returns the debug output files, if any were requested.\n \"\"\"\n return partial.make(\n _debug_symbols_partial_impl,\n debug_dependencies = debug_dependencies,\n debug_outputs_provider = debug_outputs_provider,\n )\n", "id": "4895833", "language": "Python", "matching_score": 2.3951869010925293, "max_stars_count": 1, "path": "apple/internal/partials/debug_symbols.bzl" }, { "content": "# Copyright 2018 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"File references to important output files from the rule.\n\nThese file references can be used across the bundling logic, but there must be only 1 action\nregistered to generate these files.\n\"\"\"\n\nload(\n \"@bazel_skylib//lib:paths.bzl\",\n \"paths\",\n)\nload(\n \"@build_bazel_rules_apple//apple/bundling:bundling_support.bzl\",\n \"bundling_support\",\n)\nload(\n \"@build_bazel_rules_apple//apple/internal:intermediates.bzl\",\n \"intermediates\",\n)\n\ndef _archive(ctx):\n \"\"\"Returns a file reference for this target's archive.\"\"\"\n\n # TODO(kaipi): Look into removing this rule implicit output and just return it using\n # DefaultInfo.\n return ctx.outputs.archive\n\ndef _archive_root_path(ctx):\n \"\"\"Returns the path to a directory reference for this target's archive root.\"\"\"\n\n # TODO(b/65366691): Migrate this to an actual tree artifact.\n archive_root_name = paths.replace_extension(_archive(ctx).path, \"_archive-root\")\n return archive_root_name\n\ndef _binary(ctx):\n \"\"\"Returns a file reference for the binary that will be packaged into this target's archive. \"\"\"\n return intermediates.file(\n ctx.actions,\n ctx.label.name,\n bundling_support.bundle_name(ctx),\n )\n\ndef _infoplist(ctx):\n \"\"\"Returns a file reference for this target's Info.plist file.\"\"\"\n return intermediates.file(ctx.actions, ctx.label.name, \"Info.plist\")\n\noutputs = struct(\n archive = _archive,\n archive_root_path = _archive_root_path,\n binary = _binary,\n infoplist = _infoplist,\n)\n", "id": "10747809", "language": "Python", "matching_score": 1.1172302961349487, "max_stars_count": 1, "path": "apple/internal/outputs.bzl" } ]
2.10369
QSD-for-WaSH
[ { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\n# %%\n\nfrom .. import SanUnit\nfrom ..utils import ospath, load_data, data_path\n\n__all__ = ('Excretion',)\n\nexcretion_path = ospath.join(data_path, 'sanunit_data/_excretion.tsv')\n\n\n# %%\n\nclass Excretion(SanUnit):\n '''\n Estimation of N, P, K, and COD in urine and feces based on dietary intake\n for one person based on `Trimmer et al. <https://doi.org/10.1021/acs.est.0c03296>`_\n\n Parameters\n ----------\n waste_ratio : float\n A ratio in [0, 1] to indicate the amount of intake calories and nutrients\n (N, P, K) that is wasted.\n\n Examples\n --------\n `bwaise systems <https://github.com/QSD-Group/EXPOsan/blob/main/exposan/bwaise/systems.py>`_\n\n References\n ----------\n [1] Trimmer et al., Navigating Multidimensional Social–Ecological System\n Trade-Offs across Sanitation Alternatives in an Urban Informal Settlement.\n Environ. Sci. Technol. 2020, 54 (19), 12641–12653.\n https://doi.org/10.1021/acs.est.0c03296.\n '''\n\n _N_ins = 0\n _N_outs = 2\n\n def __init__(self, ID='', ins=None, outs=(), thermo=None, init_with='WasteStream',\n waste_ratio=0, **kwargs):\n SanUnit.__init__(self, ID, ins, outs, thermo, init_with)\n self.waste_ratio = waste_ratio\n\n data = load_data(path=excretion_path)\n for para in data.index:\n value = float(data.loc[para]['expected'])\n setattr(self, '_'+para, value)\n del data\n\n for attr, value in kwargs.items():\n setattr(self, attr, value)\n\n def _run(self):\n ur, fec = self.outs\n ur.empty()\n fec.empty()\n\n not_wasted = 1 - self.waste_ratio\n factor = 24 * 1e3 # from g per person per day to kg per hour\n\n ur_N = (self.p_veg+self.p_anim)/factor*self.N_prot \\\n * self.N_exc*self.N_ur*not_wasted\n ur.imass['NH3'] = ur_N * self.N_ur_NH3\n ur.imass['NonNH3'] = ur_N - ur.imass['NH3']\n\n ur.imass['P'] = (self.p_veg*self.P_prot_v+self.p_anim*self.P_prot_a)/factor \\\n * self.P_exc*self.P_ur*not_wasted\n\n e_cal = self.e_cal / 24 * not_wasted\n ur.imass['K'] = e_cal/1e3 * self.K_cal/1e3 * self.K_exc*self.K_ur\n ur.imass['Mg'] = self.Mg_ur / factor\n ur.imass['Ca'] = self.Ca_ur / factor\n\n ur_exc = self.ur_exc / factor\n ur.imass['H2O'] = self.ur_moi * ur_exc\n ur.imass['OtherSS'] = ur_exc - ur.F_mass\n\n fec_exc = self.fec_exc / factor\n fec_N = (1-self.N_ur)/self.N_ur * ur_N\n fec.imass['NH3'] = fec_N * self.N_fec_NH3\n fec.imass['NonNH3'] = fec_N - fec.imass['NH3']\n fec.imass['P'] = (1-self.P_ur)/self.P_ur * ur.imass['P']\n fec.imass['K'] = (1-self.K_ur)/self.K_ur * ur.imass['K']\n fec.imass['Mg'] = self.Mg_fec / factor\n fec.imass['Ca'] = self.Ca_fec / factor\n fec.imass['H2O'] = self.fec_moi * fec_exc\n fec.imass['OtherSS'] = fec_exc - fec.F_mass\n\n # 14 kJ/g COD, the average lower heating value of excreta\n tot_COD = e_cal*self.e_exc*4.184/14/1e3 # in kg COD/hr\n ur._COD = tot_COD*(1-self.e_fec) / (ur.F_vol/1e3) # in mg/L\n fec._COD = tot_COD*self.e_fec / (fec.F_vol/1e3) # in mg/L\n\n @property\n def e_cal(self):\n '''[float] Caloric intake, [kcal/cap/d].'''\n return self._e_cal\n @e_cal.setter\n def e_cal(self, i):\n self._e_cal = i\n\n @property\n def p_veg(self):\n '''[float] Vegetal protein intake, [g/cap/d].'''\n return self._p_veg\n @p_veg.setter\n def p_veg(self, i):\n self._p_veg = i\n\n @property\n def p_anim(self):\n '''[float] Animal protein intake, [g/cap/d].'''\n return self._p_anim\n @p_anim.setter\n def p_anim(self, i):\n self._p_anim = i\n\n @property\n def N_prot(self):\n '''[float] Nitrogen content in protein, [wt%].'''\n return self._N_prot\n @N_prot.setter\n def N_prot(self, i):\n self._N_prot = i\n\n @property\n def P_prot_v(self):\n '''[float] Phosphorus content in vegetal protein, [wt%].'''\n return self._P_prot_v\n @P_prot_v.setter\n def P_prot_v(self, i):\n self._P_prot_v = i\n\n @property\n def P_prot_a(self):\n '''[float] Phosphorus content in animal protein, [wt%].'''\n return self._P_prot_a\n @P_prot_a.setter\n def P_prot_a(self, i):\n self._P_prot_a = i\n\n @property\n def K_cal(self):\n '''[float] Potassium intake relative to caloric intake, [g K/1000 kcal].'''\n return self._K_cal\n @K_cal.setter\n def K_cal(self, i):\n self._K_cal = i\n\n @property\n def N_exc(self):\n '''[float] Nitrogen excretion factor, [% of intake].'''\n return self._N_exc\n @N_exc.setter\n def N_exc(self, i):\n self._N_exc = i\n\n @property\n def P_exc(self):\n '''[float] Phosphorus excretion factor, [% of intake].'''\n return self._P_exc\n @P_exc.setter\n def P_exc(self, i):\n self._P_exc = i\n\n @property\n def K_exc(self):\n '''[float] Potassium excretion factor, [% of intake].'''\n return self._K_exc\n @K_exc.setter\n def K_exc(self, i):\n self._K_exc = i\n\n @property\n def e_exc(self):\n '''[float] Energy excretion factor, [% of intake].'''\n return self._e_exc\n @e_exc.setter\n def e_exc(self, i):\n self._e_exc = i\n\n @property\n def N_ur(self):\n '''[float] Nitrogen recovered in urine, [wt%].'''\n return self._N_ur\n @N_ur.setter\n def N_ur(self, i):\n self._N_ur = i\n\n @property\n def P_ur(self):\n '''[float] Phosphorus recovered in urine, [wt%].'''\n return self._P_ur\n @P_ur.setter\n def P_ur(self, i):\n self._P_ur = i\n\n @property\n def K_ur(self):\n '''[float] Potassium recovered in urine, [wt%].'''\n return self._K_ur\n @K_ur.setter\n def K_ur(self, i):\n self._K_ur = i\n\n @property\n def e_fec(self):\n '''[float] Percent of excreted energy in feces, [%].'''\n return self._e_fec\n @e_fec.setter\n def e_fec(self, i):\n self._e_fec = i\n\n @property\n def N_ur_NH3(self):\n '''[float] Reduced inorganic nitrogen in urine, modeled as NH3, [% of total urine N].'''\n return self._N_ur_NH3\n @N_ur_NH3.setter\n def N_ur_NH3(self, i):\n self._N_ur_NH3 = i\n\n @property\n def N_fec_NH3(self):\n '''[float] Reduced inorganic nitrogen in feces, modeled as NH3, [% of total feces N].'''\n return self._N_fec_NH3\n @N_fec_NH3.setter\n def N_fec_NH3(self, i):\n self._N_fec_NH3 = i\n\n @property\n def ur_exc(self):\n '''[float] Urine generated per day, [g/cap/d].'''\n return self._ur_exc\n @ur_exc.setter\n def ur_exc(self, i):\n self._ur_exc = i\n\n @property\n def fec_exc(self):\n '''[float] Feces generated per day, [g/cap/d].'''\n return self._fec_exc\n @fec_exc.setter\n def fec_exc(self, i):\n self._fec_exc = i\n\n @property\n def ur_moi(self):\n '''[float] Moisture (water) content of urine, [wt%].'''\n return self._ur_moi\n @ur_moi.setter\n def ur_moi(self, i):\n self._ur_moi = i\n\n @property\n def fec_moi(self):\n '''[float] Moisture (water) content of feces, [wt%].'''\n return self._fec_moi\n @fec_moi.setter\n def fec_moi(self, i):\n self._fec_moi = i\n\n @property\n def Mg_ur(self):\n '''[float] Magnesium excreted in urine, [g Mg/cap/d].'''\n return self._Mg_ur\n @Mg_ur.setter\n def Mg_ur(self, i):\n self._Mg_ur = i\n\n @property\n def Mg_fec(self):\n '''[float] Magnesium excreted in feces, [g Mg/cap/d].'''\n return self._Mg_fec\n @Mg_fec.setter\n def Mg_fec(self, i):\n self._Mg_fec = i\n\n @property\n def Ca_ur(self):\n '''[float] Calcium excreted in urine, [g Ca/cap/d].'''\n return self._Ca_ur\n @Ca_ur.setter\n def Ca_ur(self, i):\n self._Ca_ur = i\n\n @property\n def Ca_fec(self):\n '''[float] Calcium excreted in feces, [g Ca/cap/d].'''\n return self._Ca_fec\n @Ca_fec.setter\n def Ca_fec(self, i):\n self._Ca_fec = i\n\n @property\n def waste_ratio(self):\n '''\n [float] The amount of intake calories and nutrients\n (N, P, K) that is wasted.\n\n .. note::\n Not considered for Mg and Ca.\n '''\n return self._waste_ratio\n @waste_ratio.setter\n def waste_ratio(self, i):\n self._waste_ratio = i", "id": "12419026", "language": "Python", "matching_score": 3.996177911758423, "max_stars_count": 2, "path": "qsdsan/sanunits/_excretion.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\n\n# %%\n\nfrom warnings import warn\nfrom .. import SanUnit\n\n__all__ = ('CropApplication',)\n\nclass CropApplication(SanUnit):\n '''\n Recovery nutrients in the recycled excreta (energy not recovered) based on\n `Trimmer et al. <https://doi.org/10.1021/acs.est.0c03296>`_\n\n Parameters\n ----------\n if_material_loss : bool or dict\n If material loss occurs during application.\n loss_ratio : float or dict\n Fractions of material losses during application (if `if_materiloass` is True).\n\n Examples\n --------\n `bwaise systems <https://github.com/QSD-Group/EXPOsan/blob/main/exposan/bwaise/systems.py>`_\n\n References\n ----------\n [1] Trimmer et al., Navigating Multidimensional Social–Ecological System\n Trade-Offs across Sanitation Alternatives in an Urban Informal Settlement.\n Environ. Sci. Technol. 2020, 54 (19), 12641–12653.\n https://doi.org/10.1021/acs.est.0c03296.\n '''\n\n def __init__(self, ID='', ins=None, outs=(), thermo=None, init_with='WasteStream',\n if_material_loss=True, loss_ratio=0.02):\n\n SanUnit.__init__(self, ID, ins, outs, thermo, init_with)\n self.if_material_loss = if_material_loss\n self.loss_ratio = loss_ratio\n\n _N_ins = 1\n _N_outs = 2\n\n def _run(self):\n applied, loss = self.outs\n applied.copy_like(self.ins[0])\n loss.empty()\n if self.if_material_loss:\n if self._loss_ratio_type == 'float':\n loss.copy_like(applied)\n applied.mass *= 1 - self.loss_ratio\n loss.mass = self.ins[0].mass - applied.mass\n else:\n for cmp, ratio in self.loss_ratio.items():\n applied.imass[cmp] *= 1 - ratio\n loss.imass[cmp] = self.ins[0].imass[cmp] - applied.imass[cmp]\n\n\n @property\n def loss_ratio(self):\n '''\n [float] or [dict] Fractions of material losses during application.\n If a single number is provided, then it is assumed that losses of\n all Components in the WasteStream are the same.\n\n .. note::\n\n Set state variable values (e.g., COD) will be retained if the loss\n ratio is a single number (treated like the loss stream is split\n from the original stream), but not when the ratio is a dict.\n\n '''\n return self._loss_ratio\n @loss_ratio.setter\n def loss_ratio(self, i):\n if not self.if_material_loss:\n msg = f'`if_material_loss` is False, the set value {i} is ignored.'\n warn(msg, source=self)\n else:\n try:\n self._loss_ratio = float(i)\n self._loss_ratio_type = 'float'\n except TypeError:\n if isinstance(i, dict):\n self._loss_ratio = i\n self._loss_ratio_type = 'dict'\n else:\n raise TypeError(f'Only float or dict allowed, not {type(i).__name__}.')", "id": "1685512", "language": "Python", "matching_score": 0.5100886225700378, "max_stars_count": 2, "path": "qsdsan/sanunits/_crop_application.py" }, { "content": "# -*- coding: utf-8 -*-\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n\nPart of this module is based on the Thermosteam package:\nhttps://github.com/BioSTEAMDevelopmentGroup/thermosteam\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\nfrom thermosteam.utils import chemicals_user\nfrom thermosteam import settings\nfrom qsdsan import Component, Components, Process, Processes, CompiledProcesses\nimport numpy as np\nfrom qsdsan.utils import ospath, data_path, save_pickle, load_pickled_cmps\nfrom scipy.optimize import fsolve, least_squares as lq, Bounds\nfrom math import log10\nfrom warnings import warn\n\n__all__ = ('load_adm1_cmps', 'ADM1')\n\n_path = ospath.join(data_path, 'process_data/_adm1.tsv')\n_path_cmps = ospath.join(data_path, '_adm1_cmps.pckl')\n_load_components = settings.get_default_chemicals\n\n# =============================================================================\n# ADM1-specific components\n# =============================================================================\ndef create_adm1_cmps(pickle=False):\n cmps_all = Components.load_default()\n\n # varies\n X_c = cmps_all.X_OHO.copy('X_c')\n X_c.description = 'Composite'\n X_c.i_C = 0.02786 * 12\n X_c.i_N = 0.0376\n\n\n X_ch = Component.from_chemical('X_ch', chemical='glycogen', Tc=1011.4, # glucan\n description='Carbohydrates',\n measured_as='COD',\n particle_size='Particulate',\n degradability='Slowly',\n organic=True)\n\n # varies\n X_pr = cmps_all.X_B_Subst.copy('X_pr')\n X_pr.i_N = 0.007 * 14\n\n X_li = Component.from_chemical('X_li', chemical='tripalmitin',\n description='Lipids',\n measured_as='COD',\n particle_size='Particulate',\n degradability='Slowly',\n organic=True)\n\n # both varies\n X_I = cmps_all.X_U_Inf.copy('X_I')\n S_I = cmps_all.S_U_Inf.copy('S_I')\n X_I.i_N = S_I.i_N = 0.06\n\n S_su = Component.from_chemical('S_su', chemical='glucose',\n description='Monosaccharides',\n measured_as='COD',\n particle_size='Soluble',\n degradability='Readily',\n organic=True)\n\n # varies\n S_aa = cmps_all.S_F.copy('S_aa')\n S_aa.i_N = 0.007 * 14\n S_aa.i_P = 0\n S_aa.i_C = 0.313\n\n S_fa = Component.from_chemical('S_fa', chemical='palmitate',\n description='Total long-chain fatty acids',\n measured_as='COD',\n particle_size='Soluble',\n degradability='Readily',\n organic=True)\n\n S_va = Component.from_chemical('S_va', chemical='valerate',\n description='Total valerate',\n measured_as='COD',\n particle_size='Soluble',\n degradability='Readily',\n organic=True)\n\n S_bu = Component.from_chemical('S_bu', chemical='butyrate',\n description='Total butyrate',\n measured_as='COD',\n particle_size='Soluble',\n degradability='Readily',\n organic=True)\n\n S_pro = cmps_all.S_Prop.copy('S_pro')\n S_ac = cmps_all.S_Ac.copy('S_ac')\n S_h2 = cmps_all.S_H2.copy('S_h2')\n S_ch4 = cmps_all.S_CH4.copy('S_ch4')\n\n S_IC = Component.from_chemical('S_IC', chemical='CO2',\n measured_as='C',\n description='Inorganic carbon',\n particle_size='Dissolved gas',\n degradability='Undegradable',\n organic=False)\n\n S_IN = Component.from_chemical('S_IN', chemical='NH3',\n measured_as='N',\n description='Inorganic nitrogen',\n particle_size='Soluble',\n degradability='Undegradable',\n organic=False)\n\n X_su = cmps_all.X_FO.copy('X_su')\n X_su.description = 'Biomass uptaking sugars'\n\n X_aa = cmps_all.X_FO.copy('X_aa')\n X_aa.description = 'Biomass uptaking amino acids'\n\n X_fa = cmps_all.X_FO.copy('X_fa')\n X_fa.description = 'Biomass uptaking long chain fatty acids'\n\n X_c4 = cmps_all.X_FO.copy('X_c4')\n X_c4.description = 'Biomass uptaking c4 fatty acids'\n\n X_pro = cmps_all.X_PRO.copy('X_pro')\n X_ac = cmps_all.X_ACO.copy('X_ac')\n X_h2 = cmps_all.X_HMO.copy('X_h2')\n\n for bio in (X_su, X_aa, X_fa, X_c4, X_pro, X_ac, X_h2): bio.formula = 'C5H7O2N'\n\n S_cat = cmps_all.S_CAT.copy('S_cat')\n S_an = cmps_all.S_AN.copy('S_an')\n S_cat.i_mass = S_an.i_mass = 1\n\n cmps_adm1 = Components([S_su, S_aa, S_fa, S_va, S_bu, S_pro, S_ac, S_h2,\n S_ch4, S_IC, S_IN, S_I, X_c, X_ch, X_pr, X_li,\n X_su, X_aa, X_fa, X_c4, X_pro, X_ac, X_h2, X_I,\n S_cat, S_an, cmps_all.H2O])\n cmps_adm1.default_compile()\n\n if pickle:\n save_pickle(cmps_adm1, _path_cmps)\n return cmps_adm1\n\n# cmps = create_adm1_cmps(False)\n# create_adm1_cmps(True)\n\ndef load_adm1_cmps(pickle=None):\n return load_pickled_cmps(create_adm1_cmps, _path_cmps, pickle)\n\n\n#%%\n# =============================================================================\n# kinetic rate functions\n# =============================================================================\n\ndef non_compet_inhibit(Si, Ki):\n return Ki/(Ki+Si)\n\ndef substr_inhibit(Si, Ki):\n return Si/(Ki+Si)\n\ndef mass2mol_conversion(cmps):\n '''conversion factor from kg[measured_as]/m3 to mol[component]/L'''\n return cmps.i_mass / cmps.chem_MW\n\ndef T_correction_factor(T1, T2, theta):\n return np.exp(theta * (T2-T1))\n\n# def T_correction_factor(T1, T2, delta_H, R):\n# return np.exp(delta_H/R * (1/T1 - 1/T2))\n\ndef calc_Kas(pKas, T_base, T_op, theta):\n pKas = np.asarray(pKas)\n return 10**(-pKas) * T_correction_factor(T_base, T_op, theta)\n\ndef acid_base_rxn(mols, weak_acids_tot, Kas):\n h, oh, nh4, nh3, co2, hco3, hac, ac, hpr, pr, hbu, bu, hva, va = mols\n S_cat, S_an, S_IN, S_IC, S_ac, S_pro, S_bu, S_va = weak_acids_tot # in M\n Kw, Ka_nh, Ka_co2, Ka_ac, Ka_pr, Ka_bu, Ka_va = Kas\n rhs = np.zeros_like(mols)\n rhs[0] = h + nh4 + S_cat - oh - hco3 - ac - pr - bu - va - S_an\n rhs[1] = S_IN - nh3 - nh4\n rhs[2] = S_IC - co2 - hco3\n rhs[3] = S_ac - ac - hac\n rhs[4] = S_pro - pr - hpr\n rhs[5] = S_bu - bu - hbu\n rhs[6] = S_va - va - hva\n rhs[7] = h*oh - Kw\n rhs[8] = h*nh3 - Ka_nh*nh4\n rhs[9] = h*hco3 - Ka_co2*co2\n rhs[10] = h*ac - Ka_ac*hac\n rhs[11] = h*pr - Ka_pr*hpr\n rhs[12] = h*bu - Ka_bu*hbu\n rhs[13] = h*va - Ka_va*hva\n return rhs\n\n# 7 acid-base pairs\njac0 = np.zeros((14, 14))\njac0[0,[0,2]] = 1\njac0[0,[1,5,7,9,11,13]] = -1\njac0[1,2:4] = -1\njac0[2,4:6] = -1\njac0[3,6:8] = -1\njac0[4,8:10] = -1\njac0[5,10:12] = -1\njac0[6,12:] = -1\n\ndef jacobian(mols, weak_acids_tot, Kas):\n h, oh = mols[:2]\n Kw, Ka_nh, Ka_co2, Ka_ac, Ka_pr, Ka_bu, Ka_va = Kas\n fprime = jac0.copy()\n fprime[7,[0,1]] = [oh, h]\n fprime[8,[2,3]] = [-Ka_nh, h]\n fprime[9,[4,5]] = [-Ka_co2, h]\n fprime[10,[6,7]] = [-Ka_ac, h]\n fprime[11,[8,9]] = [-Ka_pr, h]\n fprime[12,[10,11]] = [-Ka_bu, h]\n fprime[13,[12,13]] = [-Ka_va, h]\n return fprime\n\ndef pH_inhibit(pH, ul, ll, lower_only=True):\n if lower_only:\n # if pH >= ul: return 1\n # else: return exp(-3 * ((pH-ul)/(ul-ll))**2)\n low_by = np.minimum(pH-ul, 0)\n return np.exp(-3 * (low_by/(ul-ll))**2)\n else:\n return (1+2*10**(0.5*(ll-ul)))/(1+10**(pH-ul)+10**(ll-pH))\n\nR = 8.3144598e-2 # Universal gas constant, [bar/M/K]\n\nrhos = np.zeros(22) # 22 kinetic processes\nbounds = Bounds(0, np.inf)\n\ndef rhos_adm1(state_arr, params):\n ks = params['rate_constants']\n Ks = params['half_sat_coeffs']\n cmps = params['components']\n # n = len(cmps)\n pH_ULs = params['pH_ULs']\n pH_LLs = params['pH_LLs']\n KS_IN = params['KS_IN']\n KI_nh3 = params['KI_nh3']\n KIs_h2 = params['KIs_h2']\n KHb = params['K_H_base']\n Kab = params['Ka_base']\n KH_theta = params['K_H_theta']\n Ka_theta = params['Ka_theta']\n kLa = params['kLa']\n T_base = params['T_base']\n ab_0 = params['root'].data.copy()\n # f = params['f_abrxn']\n # f_prime = params['f_prime']\n # Cs_ids = cmps.indices(['X_c', 'X_ch', 'X_pr', 'X_li', 'X_su', 'X_aa',\n # 'X_fa', 'X_c4', 'X_c4', 'X_pro', 'X_ac', 'X_h2',\n # 'X_su', 'X_aa', 'X_fa', 'X_c4', 'X_pro', 'X_ac', 'X_h2'])\n # Cs = state_arr[Cs_ids]\n Cs = np.empty(19)\n Cs[:8] = state_arr[12:20]\n Cs[8:12] = state_arr[19:23]\n Cs[12:] = state_arr[16:23]\n # substrates_ids = cmps.indices(['S_su', 'S_aa', 'S_fa', 'S_va',\n # 'S_bu', 'S_pro', 'S_ac', 'S_h2'])\n # substrates = state_arr[substrates_ids]\n substrates = state_arr[:8]\n # S_va, S_bu, S_h2, S_IN = state_arr[cmps.indices(['S_va', 'S_bu', 'S_h2', 'S_IN'])]\n S_va, S_bu, S_h2, S_ch4, S_IC, S_IN = state_arr[[3,4,7,8,9,10]]\n unit_conversion = mass2mol_conversion(cmps)\n cmps_in_M = state_arr[:27] * unit_conversion\n weak_acids = cmps_in_M[[24, 25, 10, 9, 6, 5, 4, 3]]\n\n T_op = state_arr[-1]\n biogas_S = state_arr[7:10]\n biogas_M = state_arr[27:30]\n biogas_p = R * T_op * biogas_M\n Kas = Kab * T_correction_factor(T_base, T_op, Ka_theta)\n KH = KHb * T_correction_factor(T_base, T_op, KH_theta) / unit_conversion[7:10]\n\n rhos[:-3] = ks * Cs\n rhos[4:12] *= substr_inhibit(substrates, Ks)\n if S_va > 0: rhos[7] *= 1/(1+S_bu/S_va)\n if S_bu > 0: rhos[8] *= 1/(1+S_va/S_bu)\n ub = np.ones(14)\n ub[[2,4,6,8,10,12]] = ub[[3,5,7,9,11,13]] = weak_acids[2:]\n ab_0 = np.minimum(ab_0, ub)\n # ab_root = fsolve(acid_base_rxn, ab_0, args=(weak_acids, Kas,), fprime=jacobian)\n try: result = lq(acid_base_rxn, ab_0, args=(weak_acids, Kas), bounds=(0, ub), ftol=1e-15, xtol=1e-15, gtol=1e-15)\n except: breakpoint()\n ab_root = result.x\n # if any(ab_root < 0): breakpoint()\n params['root'].data[:] = ab_root\n\n try: pH = - log10(ab_root[0])\n except: breakpoint()\n rhos[4:12] *= pH_inhibit(pH, pH_ULs, pH_LLs) * substr_inhibit(S_IN, KS_IN)\n rhos[6:10] *= non_compet_inhibit(S_h2, KIs_h2)\n rhos[10] *= non_compet_inhibit(S_IN, KI_nh3)\n rhos[-3:] = kLa * (biogas_S - KH * biogas_p)\n\n return rhos\n\n# =============================================================================\n# ADM1 class\n# =============================================================================\nclass TempState:\n\n def __init__(self, length):\n self.data = np.zeros(length)\n\n\n@chemicals_user\nclass ADM1(CompiledProcesses):\n\n _stoichio_params = ('f_ch_xc', 'f_pr_xc', 'f_li_xc', 'f_xI_xc',\n 'f_fa_li', 'f_bu_su', 'f_pro_su', 'f_ac_su',\n 'f_va_aa', 'f_bu_aa', 'f_pro_aa', 'f_ac_aa',\n 'f_ac_fa', 'f_pro_va', 'f_ac_va', 'f_ac_bu', 'f_ac_pro',\n 'Y_su', 'Y_aa', 'Y_fa', 'Y_c4', 'Y_pro', 'Y_ac', 'Y_h2')\n _kinetic_params = ('rate_constants', 'half_sat_coeffs', 'pH_ULs', 'pH_LLs',\n 'KS_IN', 'KI_nh3', 'KIs_h2',\n 'Ka_base', 'Ka_theta', 'K_H_base', 'K_H_theta', 'kLa',\n 'T_base', 'components', 'root')\n _acid_base_pairs = (('H+', 'OH-'), ('NH4+', 'NH3'), ('CO2', 'HCO3-'),\n ('HAc', 'Ac-'), ('HPr', 'Pr-'),\n ('HBu', 'Bu-'), ('HVa', 'Va-'))\n _biogas_IDs = ('S_h2', 'S_ch4', 'S_IC')\n\n def __new__(cls, components=None, path=None, N_xc=2.69e-3, N_I=4.29e-3, N_aa=7e-3,\n f_ch_xc=0.2, f_pr_xc=0.2, f_li_xc=0.3, f_xI_xc=0.2,\n f_fa_li=0.95, f_bu_su=0.13, f_pro_su=0.27, f_ac_su=0.41,\n f_va_aa=0.23, f_bu_aa=0.26, f_pro_aa=0.05, f_ac_aa=0.4,\n f_ac_fa=0.7, f_pro_va=0.54, f_ac_va=0.31, f_ac_bu=0.8, f_ac_pro=0.57,\n Y_su=0.1, Y_aa=0.08, Y_fa=0.06, Y_c4=0.06, Y_pro=0.04, Y_ac=0.05, Y_h2=0.06,\n q_dis=0.5, q_ch_hyd=10, q_pr_hyd=10, q_li_hyd=10,\n k_su=30, k_aa=50, k_fa=6, k_c4=20, k_pro=13, k_ac=8, k_h2=35,\n K_su=0.5, K_aa=0.3, K_fa=0.4, K_c4=0.3, K_pro=0.3, K_ac=0.15, K_h2=2.5e-5,\n b_su=0.02, b_aa=0.02, b_fa=0.02, b_c4=0.02, b_pro=0.02, b_ac=0.02, b_h2=0.02,\n KI_h2_fa=5e-6, KI_h2_c4=1e-5, KI_h2_pro=3.5e-6, KI_nh3=1.8e-3, KS_IN=1e-4,\n pH_limits_aa=(4,5.5), pH_limits_ac=(6,7), pH_limits_h2=(5,6),\n T_base=298.15, pKa_base=[14, 9.25, 6.35, 4.76, 4.88, 4.82, 4.86],\n Ka_theta=[0.076, 0.070, 0.010, 0, 0, 0, 0],\n kLa=200, K_H_base=[7.8e-4, 1.4e-3, 3.5e-2],\n K_H_theta=[-5.66e-3, -1.929e-2, -2.629e-2],\n **kwargs):\n\n cmps = _load_components(components)\n cmps.X_c.i_N = N_xc * 14\n cmps.X_I.i_N = N_I * 14\n cmps.S_aa.i_N = cmps.X_pr.i_N = N_aa * 14\n\n if not path: path = _path\n self = Processes.load_from_file(path,\n components=cmps,\n conserved_for=('COD', 'C', 'N'),\n parameters=cls._stoichio_params,\n compile=False)\n\n gas_transfer = []\n for i in cls._biogas_IDs:\n new_p = Process('%s_transfer' % i.lstrip('S_'),\n reaction={i:-1},\n ref_component=i,\n conserved_for=(),\n parameters=())\n gas_transfer.append(new_p)\n self.extend(gas_transfer)\n self.compile()\n\n stoichio_vals = (f_ch_xc, f_pr_xc, f_li_xc, f_xI_xc,\n f_fa_li, f_bu_su, f_pro_su, f_ac_su,\n f_va_aa, f_bu_aa, f_pro_aa, f_ac_aa,\n f_ac_fa, f_pro_va, f_ac_va, f_ac_bu, f_ac_pro,\n Y_su, Y_aa, Y_fa, Y_c4, Y_pro, Y_ac, Y_h2)\n pH_ULs = np.array([pH_limits_aa[0]]*6 + [pH_limits_ac[0], pH_limits_h2[0]])\n pH_LLs = np.array([pH_limits_aa[1]]*6 + [pH_limits_ac[1], pH_limits_h2[1]])\n ks = np.array((q_dis, q_ch_hyd, q_pr_hyd, q_li_hyd,\n k_su, k_aa, k_fa, k_c4, k_c4, k_pro, k_ac, k_h2,\n b_su, b_aa, b_fa, b_c4, b_pro, b_ac, b_h2))\n Ks = np.array((K_su, K_aa, K_fa, K_c4, K_c4, K_pro, K_ac, K_h2))\n KIs_h2 = np.array((KI_h2_fa, KI_h2_c4, KI_h2_c4, KI_h2_pro))\n K_H_base = np.array(K_H_base)\n K_H_theta = np.array(K_H_theta)\n Ka_base = np.array([10**(-pKa) for pKa in pKa_base])\n Ka_theta = np.array(Ka_theta)\n root = TempState(len(cls._acid_base_pairs) * 2)\n dct = self.__dict__\n dct.update(kwargs)\n\n self.set_rate_function(rhos_adm1)\n dct['_parameters'] = dict(zip(cls._stoichio_params, stoichio_vals))\n self.rate_function._params = dict(zip(cls._kinetic_params,\n [ks, Ks, pH_ULs, pH_LLs, KS_IN*14,\n KI_nh3*14, KIs_h2, Ka_base, Ka_theta,\n K_H_base, K_H_theta, kLa,\n T_base, self._components, root]))\n\n dct.update({\n '_stoichio_params':cls._stoichio_params,\n '_kinetic_params':cls._kinetic_params,\n '_acid_base_pairs':cls._acid_base_pairs,\n '_biogas_IDs':cls._biogas_IDs\n })\n\n return self\n\n def set_pKas(self, pKas):\n if len(pKas) != 7:\n raise ValueError(f'pKas must be an array of 7 elements, one for each '\n f'acid-base pair, not {len(pKas)} elements.')\n dct = self.rate_function._params\n dct['Ka_base'] = np.array([10**(-pKa) for pKa in pKas])\n\n def _find_index(self, process):\n isa = isinstance\n if isa(process, int): return process\n elif isa(process, str): return self.index(process)\n elif isa(process, Process): return self.index(process.ID)\n else: raise TypeError(f'must input an int or str or :class:`Process`, '\n f'not {type(process)}')\n\n def set_rate_constant(self, k, process):\n i = self._find_index(process)\n self.rate_function._params['ks'][i] = k\n\n def set_half_sat_K(self, K, process):\n i = self._find_index(process)\n self.rate_function._params['Ks'][i-4] = K\n\n def set_pH_inhibit_bounds(self, process, lower=None, upper=None):\n i = self._find_index(process)\n dct = self.rate_function._params\n if lower is None: lower = dct['pH_LLs'][i-4]\n else: dct['pH_LLs'][i-4] = lower\n if upper is None: upper = dct['pH_ULs'][i-4]\n else: dct['pH_ULs'][i-4] = upper\n if lower >= upper:\n raise ValueError(f'lower limit for pH inhibition of {process} must '\n f'be lower than the upper limit, not {[lower, upper]}')\n\n def set_h2_inhibit_K(self, KI, process):\n i = self._find_index(process)\n self.rate_function._params['KIs_h2'][i-6] = KI\n\n def set_KS_IN(self, K):\n '''Set inhibition coefficient for inorganic nitrogen as a secondary\n substrate [M nitrogen].'''\n self.rate_function._params['KS_IN'] = K * 14\n\n def set_KI_nh3(self, K):\n '''Set inhibition coefficient for ammonia-nitrogen [M nitrogen].'''\n self.rate_function._params['KI_nh3'] = K * 14\n\n def set_parameters(self, **parameters):\n '''Set values to stoichiometric parameters.'''\n non_stoichio = {}\n for k,v in parameters.items():\n if k in self._stoichio_params:\n if v >= 0 : self._parameters[k] = v\n else: raise ValueError(f'{k} must >= 0, not {v}')\n else: non_stoichio[k] = v\n if non_stoichio:\n warn(f'ignored value setting for non-stoichiometric parameters {non_stoichio}')\n self.check_stoichiometric_parameters()\n if self._stoichio_lambdified is not None:\n self.__dict__['_stoichio_lambdified'] = None\n\n def check_stoichiometric_parameters(self):\n stoichio = self.parameters\n subst = ('xc', 'li', 'su', 'aa', 'fa', 'va', 'bu', 'pro')\n for s in subst:\n f_tot = sum([stoichio[k] for k in self._stoichio_params[:17] \\\n if k.endswith(s)])\n if f_tot > 1:\n raise ValueError(f\"the sum of 'f_()_{s}' values mustn't exceed 1\")\n", "id": "10348488", "language": "Python", "matching_score": 6.082770824432373, "max_stars_count": 2, "path": "qsdsan/processes/_adm1.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n <NAME> <<EMAIL>>\n\nPart of this module is based on the Thermosteam package:\nhttps://github.com/BioSTEAMDevelopmentGroup/thermosteam\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\nimport numpy as np\nimport pandas as pd\nimport thermosteam as tmo\nfrom . import _component, Chemical, Chemicals, CompiledChemicals, Component\nfrom .utils import add_V_from_rho, load_data\n\n__all__ = ('Components', 'CompiledComponents')\n\nsetattr = object.__setattr__\n\nutils = tmo.utils\n_component_properties = _component._component_properties\n_num_component_properties = _component._num_component_properties\n_key_component_properties = _component._key_component_properties\n# _TMH = tmo.base.thermo_model_handle.ThermoModelHandle\n_PH = tmo.base.phase_handle.PhaseHandle\nDomainError = tmo.exceptions.DomainError\n\n\n# %%\n\nclass UndefinedComponent(AttributeError):\n '''AttributeError regarding undefined :class:`Component` objects.'''\n def __init__(self, ID):\n super().__init__(repr(ID))\n\ndef must_compile(*args, **kwargs): # pragma: no cover\n raise TypeError('Method valid only for CompiledChemicals, '\n 'run <Components>.compile() to compile first.')\n\n\n# =============================================================================\n# Define the Components class\n# =============================================================================\n\nclass Components(Chemicals):\n '''\n A subclass of :class:`thermosteam.Chemicals`, contains :class:`Component`\n objects as attributes.\n\n Examples\n --------\n `Component <https://qsdsan.readthedocs.io/en/latest/tutorials/2_Component.html>`_\n\n See Also\n --------\n `thermosteam.Chemicals <https://thermosteam.readthedocs.io/en/latest/Chemicals.html>`_\n\n '''\n\n def __new__(cls, components, cache=False):\n self = super(Chemicals, cls).__new__(cls)\n isa = isinstance\n setfield = setattr\n IDs = set()\n # CASs = set()\n for i in components:\n if isa(i, Component):\n ID = i.ID\n if ID in IDs:\n raise ValueError(f'More than one `Component` has the ID {ID}.')\n # CAS = i.CAS\n # if CAS in CASs: continue\n # CASs.add(CAS)\n setfield(self, i.ID, i)\n elif isa(i, Chemical):\n raise TypeError(f'{i} is a `thermosteam.Chemical` object, '\n 'use `Component.from_chemical` to define a `Component` object.')\n else:\n raise TypeError(f'Only `Component` objects can be included, not a `{type(i).__name__}` object.')\n\n return self\n\n\n def __setattr__(self, ID, component):\n raise TypeError('Cannot set attribute; use `<Components>.append/extend` instead.')\n\n\n def __setitem__(self, ID, component):\n raise TypeError('Cannot set item; use `<Components>.append/extend` instead.')\n\n\n def __getitem__(self, key):\n '''Return a :class:`Component` object or a list of :class:`Component` objects.'''\n dct = self.__dict__\n try:\n if isinstance(key, str):\n return dct[key]\n else:\n return [dct[i] for i in key]\n except KeyError as key_error:\n raise UndefinedComponent(key_error.args[0])\n\n\n def __contains__(self, component):\n if isinstance(component, str):\n return component in self.__dict__\n elif isinstance(component, Component):\n return component in self.__dict__.values()\n else: # pragma: no cover\n return False\n\n\n def __repr__(self):\n return f\"Components([{', '.join(self.__dict__)}])\"\n\n\n def copy(self):\n '''Return a copy.'''\n copy = object.__new__(Components)\n for cmp in self: setattr(copy, cmp.ID, cmp)\n return copy\n\n\n def append(self, component):\n '''Append a Component'''\n if not isinstance(component, Component):\n if isinstance(component, Chemical):\n raise TypeError(f'{component} is a `Chemical` object, '\n 'use `Component.from_chemical` to define a `Component` object.')\n else:\n raise TypeError(\"only `Component` objects can be appended, \"\n f\"not `{type(component).__name__}` object.\")\n ID = component.ID\n if ID in self.__dict__:\n raise ValueError(f\"{ID} already defined in this `Components` object.\")\n setattr(self, ID, component)\n\n\n def extend(self, components):\n '''Extend with more :class:`Component` objects.'''\n if isinstance(components, Components):\n self.__dict__.update(components.__dict__)\n else:\n for component in components: self.append(component)\n\n\n def compile(self, skip_checks=False):\n '''Cast as a :class:`CompiledComponents` object.'''\n components = tuple(self)\n setattr(self, '__class__', CompiledComponents)\n try: self._compile(components, skip_checks)\n except Exception as error:\n setattr(self, '__class__', Components)\n setattr(self, '__dict__', {i.ID: i for i in components})\n raise error\n\n kwarray = array = index = indices = must_compile\n\n _default_data = None\n\n\n def default_compile(self, lock_state_at='l',\n soluble_ref='Urea', gas_ref='CO2', particulate_ref='Stearin'):\n '''\n Auto-fill of the missing properties of the components and compile,\n boiling point (Tb) and molar volume (V) will be copied from the reference component,\n the remaining missing properties will be copied from those of water.\n\n Parameters\n ----------\n lock_state_at : str\n Lock the state of components at a certain phase,\n can be 'g', 'l', 's', or left as empty to avoid locking state.\n Components that have already been locked will not be affected.\n soluble_ref : obj or str\n Reference component (or chemical ID) for those with `particle_size` == 'Soluble'.\n gas_ref : obj or str\n Reference component (or chemical ID) for those with `particle_size` == 'Dissolved gas'.\n particulate_ref : obj or str\n Reference component (or chemical ID) for those with `particle_size` == 'Particulate'.\n\n Examples\n --------\n >>> from qsdsan import Component, Components\n >>> X = Component('X', phase='s', measured_as='COD', i_COD=0, description='Biomass',\n ... organic=True, particle_size='Particulate', degradability='Readily')\n >>> X_inert = Component('X_inert', phase='s', description='Inert biomass', i_COD=0,\n ... organic=True, particle_size='Particulate', degradability='Undegradable')\n >>> Substrate = Component('Substrate', phase='s', measured_as='COD', i_mass=18.3/300,\n ... organic=True, particle_size='Particulate', degradability='Readily')\n >>> cmps = Components([X, X_inert, Substrate])\n >>> cmps.default_compile()\n >>> cmps\n CompiledComponents([X, X_inert, Substrate])\n '''\n isa = isinstance\n get = getattr\n if isa(soluble_ref, str):\n sol = Chemical(soluble_ref)\n if soluble_ref.lower() == 'urea': sol.at_state('l')\n if isa(gas_ref, str):\n gas = Chemical(gas_ref)\n if gas_ref.lower() == 'co2': gas.at_state('g')\n if isa(particulate_ref, str):\n par = Chemical(particulate_ref)\n if particulate_ref.lower() == 'stearin':\n # 0.8559 at 90 °C https://pubchem.ncbi.nlm.nih.gov/compound/Tristearin#section=Density\n # avg ~0.9 http://www.dgfett.de/material/physikalische_eigenschaften.pdf\n add_V_from_rho(par, 0.9, 'g/ml', 's')\n par.at_state('s')\n\n TPkwargs = dict(T=298.15, P=101325)\n def get_constant_V_model(ref_cmp, phase=''):\n if ref.locked_state: return ref.V(**TPkwargs)\n else: return ref.V(phase, **TPkwargs)\n\n water = Chemical('Water')\n for cmp in self:\n particle_size = cmp.particle_size\n ref = sol if particle_size=='Soluble' \\\n else gas if particle_size=='Dissolved gas' else par\n if lock_state_at:\n try: cmp.at_state(lock_state_at)\n except TypeError: pass\n cmp.Tb = cmp.Tb or ref.Tb\n\n # If don't have model for molar volume, set those to default\n COPY_V = False\n cmp_V = cmp.V if cmp.locked_state else cmp.V.l\n try: cmp_V(**TPkwargs)\n except: COPY_V = True\n if COPY_V:\n locked_state = cmp.locked_state\n if locked_state:\n V_const = get_constant_V_model(ref, locked_state)*(cmp.chem_MW/ref.MW)\n cmp.V.add_model(V_const)\n else:\n for phase in ('g', 'l', 's'): # iterate through phases\n backup_ref = gas if phase=='g' else sol if phase=='l' else par\n try: V_const = get_constant_V_model(ref, phase)\n except: V_const = get_constant_V_model(backup_ref, phase)\n V_const *= (cmp.chem_MW/ref.MW)\n get(cmp.V, phase).add_model(V_const)\n\n if not cmp.Hvap.valid_methods():\n try:\n ref.Hvap(cmp.Tb)\n cmp.copy_models_from(ref, names=('Hvap', ))\n except RuntimeError: # Hvap model cannot be extrapolated to Tb\n cmp.copy_models_from(water, names=('Hvap', ))\n\n # Copy all remaining properties from water\n cmp.copy_models_from(water)\n self.compile()\n\n\n @classmethod\n def load_from_file(cls, path_or_df, index_col=None,\n use_default_data=False, store_data=False):\n '''\n Create and return a :class:`Components` objects based on properties\n defined in a datasheet.\n\n Parameters\n ----------\n path_or_df : str or :class:`pandas.DataFrame`\n File path, the file should end with \".cvs\", \".xls\", or \"xlsx\".\n index_col : None or int\n Index column of the :class:`pandas.DataFrame`.\n use_default_data : bool\n Whether to use the cached default components.\n store_data : bool\n Whether to store this as the default components.\n\n Returns\n -------\n A :class:`Components` object that contains all created Component objects.\n\n\n .. note::\n\n The :class:`Components` object needs to be compiled before it is used in simulation.\n\n '''\n if use_default_data and cls._default_data is not None:\n data = cls._default_data\n elif isinstance(path_or_df, str):\n data = load_data(path_or_df, index_col=index_col)\n else:\n data = path_or_df\n\n new = cls(())\n\n for i, cmp in data.iterrows():\n if pd.isna(cmp.measured_as):\n cmp.measured_as = None\n try:\n component = Component(ID = cmp.ID,\n search_ID = str(cmp.CAS),\n measured_as = cmp.measured_as)\n except LookupError:\n try:\n component = Component(ID = cmp.ID,\n search_ID = 'PubChem='+str(int(cmp.PubChem)),\n measured_as = cmp.measured_as)\n except:\n if not pd.isna(cmp.formula):\n component = Component(ID = cmp.ID,\n formula = cmp.formula,\n measured_as = cmp.measured_as)\n else:\n component = Component(ID = cmp.ID,\n measured_as = cmp.measured_as)\n for j in _component_properties:\n field = '_' + j\n try:\n if pd.isna(cmp[j]): setattr(component, j, None)\n else: setattr(component, field, cmp[j])\n except KeyError:\n continue\n new.append(component)\n\n if store_data:\n cls._default_data = data\n return new\n\n\n @classmethod\n def load_default(cls, use_default_data=True, store_data=False, default_compile=True):\n '''\n Create and return a :class:`Components` or :class:`CompiledComponents`\n object containing all default :class:`Component` objects based on\n `Reiger et al. <https://iwaponline.com/ebooks/book/630/Guidelines-for-Using-Activated-Sludge-Models>`_\n\n Parameters\n ----------\n use_default_data : bool, optional\n Whether to use default cache data. The default is True.\n store_data : bool, optional\n Whether to store the default data as cache. The default is True.\n default_compile : bool, optional\n Whether to compile the default :class:`Components`. The default is True.\n\n Returns\n -------\n A :class:`Components` or :class:`CompiledComponents` object with\n default :class:`Component` objects.\n\n\n .. note::\n\n [1] Component-specific properties are defined in ./data/component.cvs.\n\n [2] When `default_compile` is True, all essential chemical-specific properties\n (except molar volume model and normal boiling temperature) that are missing will\n be defaulted to those of water.\n\n [3] When `default_compile` is True, missing molar volume models will be defaulted\n according to particle sizes: particulate or colloidal -> copy from NaCl,\n soluble -> copy from urea, dissolved gas -> copy from CO2.\n\n [4] When `default_compile` is True, missing normal boiling temperature will be\n defaulted according to particle sizes: particulate or colloidal -> copy from NaCl,\n soluble -> copy from urea, dissolved gas -> copy from CO2.\n\n\n See Also\n --------\n :func:`~.Components.default_compile`\n\n References\n ----------\n [1] <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.\n Guidelines for Using Activated Sludge Models; IWA Publishing, 2012.\n https://doi.org/10.2166/9781780401164.\n '''\n import os\n path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/_components.tsv')\n del os\n new = cls.load_from_file(path, index_col=None,\n use_default_data=use_default_data, store_data=store_data)\n\n H2O = Component.from_chemical('H2O', Chemical('H2O'),\n i_charge=0, f_BOD5_COD=0, f_uBOD_COD=0,\n f_Vmass_Totmass=0, description=\"Water\",\n particle_size='Soluble',\n degradability='Undegradable', organic=False)\n new.append(H2O)\n\n if default_compile:\n new.default_compile(lock_state_at='', particulate_ref='NaCl')\n new.compile()\n # Add aliases\n new.set_alias('H2O', 'Water')\n # Pre-define groups\n new.define_group('substrates',\n ('S_CH3OH', 'S_Ac', 'S_Prop', 'S_F', 'C_B_Subst', 'X_B_Subst'))\n new.define_group('biomass',\n ('X_OHO', 'X_AOO', 'X_NOO', 'X_AMO', 'X_PAO',\n 'X_MEOLO', 'X_FO', 'X_ACO', 'X_HMO', 'X_PRO'))\n new.define_group('S_VFA', ('S_Ac', 'S_Prop'))\n new.define_group('X_Stor', ('X_OHO_PHA', 'X_GAO_PHA', 'X_PAO_PHA',\n 'X_GAO_Gly', 'X_PAO_Gly'),)\n new.define_group('X_ANO', ('X_AOO', 'X_NOO'))\n new.define_group('X_Bio', ('X_OHO', 'X_AOO', 'X_NOO', 'X_AMO', 'X_PAO',\n 'X_MEOLO', 'X_ACO', 'X_HMO', 'X_PRO', 'X_FO'))\n new.define_group('S_NOx', ('S_NO2', 'S_NO3'))\n new.define_group('X_PAO_PP', ('X_PAO_PP_Lo', 'X_PAO_PP_Hi'))\n new.define_group('TKN', [i.ID for i in new if i.ID not in ('S_N2','S_NO2','S_NO3')])\n return new\n\n\n @staticmethod\n def append_combustion_components(components, alt_IDs={},\n try_default_compile=True,\n **default_compile_kwargs):\n '''\n Return a new :class:`~.Components` object with the given components\n and those needed for combustion reactions (complete oxidation with O2),\n namely O2, CO2 (for C), H2O (for H), N2 (for N), P4O10 (for P), and SO2 (for S).\n\n If the combustion components are already in the given collection,\n they will NOT be overwritten.\n\n Parameters\n ----------\n components : Iterable(obj)\n The original components to be appended.\n alt_IDs : dict\n Alternative IDs for the combustion components to be added as aliases,\n e.g., if \"S_O2\" is used instead of \"O2\", then pass {'O2': 'S_O2'}.\n default_compile : bool\n Whether to try default compile when some components\n are missing key properties for compiling.\n default_compile_kwargs : dict\n Keyword arguments to pass to `default_compile` if needed.\n\n See Also\n --------\n :func:`default_compile`\n\n Examples\n --------\n >>> from qsdsan import Components\n >>> cmps = Components.load_default()\n >>> cmps\n CompiledComponents([S_H2, S_CH4, S_CH3OH, S_Ac, S_Prop, S_F, S_U_Inf, S_U_E, C_B_Subst, C_B_BAP, C_B_UAP, C_U_Inf, X_B_Subst, X_OHO_PHA, X_GAO_PHA, X_PAO_PHA, X_GAO_Gly, X_PAO_Gly, X_OHO, X_AOO, X_NOO, X_AMO, X_PAO, X_MEOLO, X_FO, X_ACO, X_HMO, X_PRO, X_U_Inf, X_U_OHO_E, X_U_PAO_E, X_Ig_ISS, X_MgCO3, X_CaCO3, X_MAP, X_HAP, X_HDP, X_FePO4, X_AlPO4, X_AlOH, X_FeOH, X_PAO_PP_Lo, X_PAO_PP_Hi, S_NH4, S_NO2, S_NO3, S_PO4, S_K, S_Ca, S_Mg, S_CO3, S_N2, S_O2, S_CAT, S_AN, H2O])\n >>> CH4 = cmps.S_CH4.copy('CH4', phase='g')\n >>> cmps = Components.append_combustion_components([*cmps, CH4], alt_IDs=dict(O2='S_O2'))\n >>> cmps\n CompiledComponents([S_H2, S_CH4, S_CH3OH, S_Ac, S_Prop, S_F, S_U_Inf, S_U_E, C_B_Subst, C_B_BAP, C_B_UAP, C_U_Inf, X_B_Subst, X_OHO_PHA, X_GAO_PHA, X_PAO_PHA, X_GAO_Gly, X_PAO_Gly, X_OHO, X_AOO, X_NOO, X_AMO, X_PAO, X_MEOLO, X_FO, X_ACO, X_HMO, X_PRO, X_U_Inf, X_U_OHO_E, X_U_PAO_E, X_Ig_ISS, X_MgCO3, X_CaCO3, X_MAP, X_HAP, X_HDP, X_FePO4, X_AlPO4, X_AlOH, X_FeOH, X_PAO_PP_Lo, X_PAO_PP_Hi, S_NH4, S_NO2, S_NO3, S_PO4, S_K, S_Ca, S_Mg, S_CO3, S_N2, S_O2, S_CAT, S_AN, H2O, CH4, CO2, N2, P4O10, SO2])\n >>> cmps.O2 is cmps.S_O2\n True\n '''\n cmps = components if isinstance(components, (Components, CompiledComponents)) \\\n else Components(components)\n comb_cmps = ['O2', 'CO2', 'H2O', 'N2', 'P4O10', 'SO2']\n aliases = dict(H2O='Water')\n aliases.update(alt_IDs)\n get = getattr\n for k, v in alt_IDs.items():\n try:\n get(cmps, v)\n aliases[k] = comb_cmps.pop(comb_cmps.index(k))\n except AttributeError:\n pass\n for ID in comb_cmps:\n try: get(cmps, ID)\n except AttributeError:\n phase = 'g' if ID in ('O2', 'CO2', 'N2', 'SO2') else 's' if ID=='P4O10' else ''\n ps = 'Dissolved gas' if phase == 'g' else 'Particulate' if phase=='s' else 'Soluble'\n cmp = Component(ID, phase=phase, organic=False, particle_size=ps,\n degradability='Undegradable')\n cmps.append(cmp)\n add_V_from_rho(cmps.P4O10, rho=2.39, rho_unit='g/mL') # http://www.chemspider.com/Chemical-Structure.14128.html\n try:\n cmps.compile()\n except RuntimeError: # cannot compile due to missing properties\n cmps.default_compile(**default_compile_kwargs)\n for k, v in aliases.items():\n cmps.set_alias(k, v)\n return cmps\n\n\n @classmethod\n def from_chemicals(cls, chemicals, **data):\n '''\n Return a new :class:`Components` from a :class:`thermosteam.Chemicals`\n or :class:`thermosteam.CompiledChemicals` object.\n\n Parameters\n ----------\n chemicals: thermosteam.Chemicals\n The :class:`thermosteam.Chemicals` object as the basis\n for the new :class:`~.Components` object.\n :class:`Component` objects will have the same ID as the corresponding\n :class:`thermosteam.Chemical` object in the :class:`thermosteam.Chemicals`\n object.\n data : dict\n A nested dict with keys being the new components and values being the inner dict,\n keys and values of the inner dict are the attribute names and values, respectively.\n\n Examples\n --------\n >>> import qsdsan as qs\n >>> chems = qs.Chemicals((qs.Chemical('Water'), qs.Chemical('Ethanol')))\n >>> data = {'Water': {'particle_size': 'Soluble',\n ... 'degradability': 'Undegradable',\n ... 'organic': False},\n ... 'Ethanol': {'particle_size': 'Soluble',\n ... 'degradability': 'Readily',\n ... 'organic': False}}\n >>> cmps = qs.Components.from_chemicals(chems, **data)\n >>> cmps\n Components([Water, Ethanol])\n '''\n cmps = cls.__new__(cls, ())\n for i in chemicals:\n val_dct = data.get(i.ID)\n cmp = Component.from_chemical(i.ID, i)\n if val_dct:\n for k, v in val_dct.items():\n setattr(cmp, k, v)\n cmps.append(cmp)\n\n return cmps\n\n\n# %%\n\n# =============================================================================\n# Define the CompiledComponents class\n# =============================================================================\n\nchemical_data_array = tmo._chemicals.chemical_data_array\n\ndef component_data_array(components, attr):\n data = chemical_data_array(components, attr)\n return data\n\n\nclass CompiledComponents(CompiledChemicals):\n '''\n A subclass of :class:`thermosteam.CompiledChemicals`, contains `Component` objects as attributes.\n\n Examples\n --------\n `Component <https://qsdsan.readthedocs.io/en/latest/tutorials/2_Component.html>`_\n\n See Also\n --------\n `thermosteam.CompiledChemicals <https://thermosteam.readthedocs.io/en/latest/Chemicals.html>`_\n\n '''\n\n _cache = {}\n\n def __new__(cls, components, cache=None):\n isa = isinstance\n components = tuple([cmp if isa(cmp, Component) else Component(cmp, cache)\n for cmp in components])\n cache = cls._cache\n if components in cache:\n self = cache[components]\n else:\n self = object.__new__(cls)\n setfield = setattr\n for cmp in components:\n setfield(self, cmp.ID, cmp)\n self._compile(components)\n cache[components] = self\n return self\n\n def __reduce__(self):\n return CompiledComponents, (self.tuple, )\n\n\n def __contains__(self, component):\n if isinstance(component, str):\n return component in self.__dict__\n elif isinstance(component, Component):\n return component in self.tuple\n else: # pragma: no cover\n return False\n\n\n def __repr__(self):\n return f\"CompiledComponents([{', '.join(self.IDs)}])\"\n\n\n def refresh_constants(self):\n '''\n Refresh constant arrays of :class:`Components` objects,\n including all chemical and component-specific properties.\n '''\n super().refresh_constants()\n dct = self.__dict__\n components = self.tuple\n for i in _num_component_properties:\n dct[i] = component_data_array(components, i)\n\n def compile(self, skip_checks=False):\n '''Do nothing, :class:`CompiledComponents` have already been compiled.'''\n pass\n\n\n def _compile(self, components, skip_checks=False):\n dct = self.__dict__\n tuple_ = tuple # this speeds up the code\n components = tuple_(dct.values())\n CompiledChemicals._compile(self, components, skip_checks)\n for component in components:\n missing_properties = component.get_missing_properties(_key_component_properties)\n if not missing_properties: continue\n missing = utils.repr_listed_values(missing_properties)\n raise RuntimeError(f'{component} is missing key component-related properties ({missing}).')\n\n for i in _num_component_properties:\n dct[i] = component_data_array(components, i)\n\n dct['g'] = np.asarray([1 if cmp.particle_size == 'Dissolved gas' else 0 for cmp in components])\n s = dct['s'] = np.asarray([1 if cmp.particle_size == 'Soluble' else 0 for cmp in components])\n c = dct['c'] = np.asarray([1 if cmp.particle_size == 'Colloidal' else 0 for cmp in components])\n dct['x'] = np.asarray([1 if cmp.particle_size == 'Particulate' else 0 for cmp in components])\n b = dct['b'] = np.asarray([1 if cmp.degradability != 'Undegradable' else 0 for cmp in components])\n dct['rb'] = np.asarray([1 if cmp.degradability == 'Readily' else 0 for cmp in components])\n org = dct['org'] = np.asarray([int(cmp.organic) for cmp in components])\n inorg = dct['inorg'] = np.ones_like(org) - org\n ID_arr = dct['_ID_arr'] = np.asarray([i.ID for i in components])\n dct['chem_MW'] = np.asarray([i.chem_MW for i in components])\n\n # Inorganic degradable non-gas, incorrect\n inorg_b = inorg * b * (s+c)\n if inorg_b.sum() > 0:\n bad_IDs = ID_arr[np.where(inorg_b==1)[0]]\n raise ValueError(f'Components {bad_IDs} are inorganic, degradable, and not gas, '\n 'which is not correct.')\n\n\n def subgroup(self, IDs):\n '''Create a new subgroup of :class:`Component` objects.'''\n components = self[IDs]\n new = Components(components)\n new.compile()\n for i in new.IDs:\n for j in self.get_aliases(i):\n try: new.set_alias(i, j)\n except: pass\n return new\n\n\n def index(self, ID):\n '''Return index of specified component.'''\n try: return self._index[ID]\n except KeyError:\n raise UndefinedComponent(ID)\n\n\n def indices(self, IDs):\n '''Return indices of multiple components.'''\n try:\n dct = self._index\n return [dct[i] for i in IDs]\n except KeyError as key_error:\n raise UndefinedComponent(key_error.args[0])\n\n\n def copy(self):\n '''Return a copy.'''\n copy = Components(self)\n copy.compile()\n return copy\n\n\n def get_IDs_from_array(self, array):\n '''\n Get the IDs of a group of components based on the 1/0 or True/False array.\n\n Parameters\n ----------\n array : Iterable(1/0)\n 1D collection of 1/0 or True/False with the same length\n as the IDs.\n\n Examples\n --------\n >>> from qsdsan import Components\n >>> cmps = Components.load_default()\n >>> cmps.get_IDs_from_array(cmps.g)\n ('S_H2', 'S_CH4', 'S_N2', 'S_O2')\n '''\n return tuple(self._ID_arr[np.asarray(array).astype(bool)])\n\n\n def get_array_from_IDs(self, IDs):\n '''\n Generate a ``numpy`` array in the same shape as ``CompiledComponents.IDs``,\n where the values would be 1 for components whose IDs are in the given ID iterable\n and 0 for components not in the given ID iterable.\n\n Parameters\n ----------\n IDs : Iterable(str)\n IDs of select components within this ``~.CompiledComponents``.\n\n Examples\n --------\n >>> from qsdsan import Components\n >>> cmps = Components.load_default()\n >>> IDs = ('S_H2', 'S_CH4', 'S_N2', 'S_O2')\n >>> cmps.get_array_from_IDs(IDs)\n array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0])\n '''\n arr = np.zeros_like(self._ID_arr, dtype='int')\n arr[self.indices(IDs)] = 1\n return arr\n\n @property\n def gases(self):\n '''[list] Gas components.'''\n return self[self.get_IDs_from_array(self.g)]\n\n @property\n def solids(self):\n '''[list] Solids (particulate) components.'''\n return self[self.get_IDs_from_array(self.x)]\n\n @property\n def inorganics(self):\n '''[list] Inorganic components.'''\n return self[self.get_IDs_from_array(self.inorg)]\n\n @property\n def inorganic_solids(self):\n '''[list] Inorganic solids (particulate & inorganic, all undegradable) components.'''\n return self[self.get_IDs_from_array(self.x*self.inorg)]\n\n @property\n def organic_solids(self):\n '''[list] Organic solids (particulate & organic) components.'''\n return self[self.get_IDs_from_array(self.x*self.org)]\n\n @property\n def substrates(self):\n '''\n [list] Substrate components.\n '''\n try: return self.__dict__['substrates']\n except:\n raise ValueError('The `substrates` group is not set, '\n 'use `define_group` to define the `substrates` group.')\n @substrates.setter\n def substrates(self, i):\n raise RuntimeError('Use `define_group` to define the `substrates` group.')\n\n @property\n def biomass(self):\n '''\n [list] Biomass components.\n '''\n try: return self.__dict__['biomass']\n except:\n raise ValueError('The `biomass` group is not set, '\n 'use `define_group` to define the `biomass` group.')\n @biomass.setter\n def biomass(self, i):\n raise RuntimeError('Use `define_group` to define the `biomass` group.')", "id": "1624450", "language": "Python", "matching_score": 3.0578088760375977, "max_stars_count": 2, "path": "qsdsan/_components.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\nimport numpy as np\nfrom chemicals.elements import molecular_weight\nfrom thermosteam.reaction import (\n Reaction as Rxn,\n ParallelReaction as PRxn\n )\nfrom . import auom\n\n__all__ = (\n 'cod_test_stoichiometry',\n 'compute_stream_COD',\n 'electron_acceptor_cod',\n 'get_bmp_stoichiometry',\n 'get_cod_stoichiometry',\n 'get_digestion_rxns',\n )\n\n\n#%%\n\ndichromate_oxidizing_elements = ('C', 'H', 'O', 'N', 'S', 'P')\ndichromate_oxidizing_elements_set = frozenset(dichromate_oxidizing_elements)\n\nget_CHONSP = lambda atoms: \\\n [atoms.get(atom) or 0. for atom in dichromate_oxidizing_elements]\n\ndef cod_test_stoichiometry(atoms, charge=0, MW=None, missing_handling='elemental'):\n r'''\n Return a dictionary of stoichiometric coefficients of the oxidation reaction\n by dichromate, given a dictionary of a molecule's constituent atoms and their\n counts, as well as the number of negative charge, if any.\n\n This function is based on the oxidation of organic materials by dichromate in\n an acid solution, as in a typical COD test of water or wastewater samples.\n Only C, H, O, N, S, P are considered active in the reaction.\n\n Parameters\n ----------\n atoms : dict[str, int or float]\n Dictionary of atoms and their counts, [-].\n charge : int or float\n Charge of the molecule.\n MW : float, optional\n Molecular weight of chemical, used only if `missing_handling` is 'Ash',\n [g/mol]\n missing_handling : str, optional\n How to handle compounds which do not appear in the stoichiometric\n reaction below. If 'elemental', return those atoms in the monatomic\n state; if 'ash', converts all missing atoms to 'Ash' in the output at\n a `MW` of 1 g/mol, [-]\n\n Returns\n -------\n stoichiometry : dict[str, float]\n Stoichiometric coefficients of the redox reaction. May include the following\n keys for complete oxidation: 'H2O', 'CO2', 'NH4+', 'SO4-2', 'PO4-3';\n if `missing_handling` is 'elemental' can include the other elements;\n if `missing_handling` is 'ash', Ash will be present in the output\n if the compounds whose reactions are not included here.\n 'Cr2O7-2' is always present, with negative values indicating dichromate is\n required/consumed. [-]\n\n .. note::\n\n The stoichiometry is given by:\n\n .. math::\n C_n H_a O_b N_c S_d P_e^{f-} + xCr_2O_7^{2-} + (8x+c-2d-3e+f)H^{+}\n -> nCO_2 + 2xCr^{3+} + cNH_4^{+} + dSO_4^{2-} + ePO_4^{3-} + (b+7x-2n-4d-4e)H_2O\n .. math::\n x = \\frac{4n+a-2b-3c+6d+5e+f}{6}\n\n Also included in the results is the moles of Cr2O7-2 required per mole of\n the mixture of the molecule.\n\n All products are in aqueous solution.\n\n Atoms not in ['C', 'H', 'N', 'O', 'S', 'P'] are returned as pure species;\n i.e. sodium hydroxide produces water and pure Na.\n\n Examples\n --------\n >>> # Acetate in COD test:\n >>> cod_test_stoichiometry({'C': 2, 'H':3, 'O':2}, -1)\n {'Cr2O7-2': -1.3333333333333333,\n 'H+': -11.666666666666666,\n 'Cr+3': 2.6666666666666665,\n 'CO2': 2,\n 'H2O': 7.333333333333332}\n '''\n products = {}\n nC, nH, nO, nN, nS, nP = get_CHONSP(atoms)\n ne = - charge\n\n if nC <= 0 or nH <= 0:\n if not (len(atoms) == 1 and nH == 2): # H2\n return {'Cr2O7-2': 0.}\n\n nCO2 = nC\n nNH4 = nN\n nSO4 = nS\n nPO4 = nP\n nCr2O7 = -(4*nC + nH - 2*nO - 3*nN + 6*nS + 5*nP + ne)/6\n nCr = -2*nCr2O7\n nH2O = nO - 7*nCr2O7 - 2*nC - 4*nS - 4*nP\n n_proton = 8*nCr2O7 - nN + 2*nS + 3*nP - ne\n\n if nCr2O7 != 0.0:\n products['Cr2O7-2'] = nCr2O7\n if n_proton != 0.0:\n products['H+'] = n_proton\n if nCr != 0.0:\n products['Cr+3'] = nCr\n if nCO2 != 0.0:\n products['CO2'] = nCO2\n if nSO4 != 0.0:\n products['SO4-2'] = nSO4\n if nNH4 != 0.0:\n products['NH4+'] = nNH4\n if nPO4 != 0.0:\n products['PO4-3'] = nPO4\n if nH2O != 0.0:\n products['H2O'] = nH2O\n\n missing_handling = missing_handling.lower()\n if missing_handling == 'elemental':\n for atom, value in atoms.items():\n if atom not in dichromate_oxidizing_elements_set:\n products[atom] = value\n elif missing_handling == 'ash':\n cod_atoms = {i: atoms.get(i, 0) for i in dichromate_oxidizing_elements}\n MW = MW or molecular_weight(atoms)\n Ash = MW - molecular_weight(cod_atoms)\n if Ash/MW > 0.0001:\n products['Ash'] = Ash\n else:\n raise ValueError(\"Allowed values for `missing_handling` are 'elemental' and 'ash'.\")\n return products\n\n\ndef electron_acceptor_cod(atoms, charge=0):\n if atoms == {'O':2}:\n return -1\n elif atoms == {'N':2}:\n return -1.5\n elif atoms == {'N':1, 'O':2} and charge == -1:\n return -1.5\n elif atoms == {'N':1, 'O':3} and charge == -1:\n return -2\n\n\ndef get_cod_stoichiometry(component, aqueous=False, **replace):\n r'''\n Get the molar stoichiometry for the theoretical\n chemical oxygen demand (COD) of a given component.\n\n COD stoichiometry is consistent with :func:`qsdsan.utils.cod_test_stoichiometry`\n other than the oxidant is O2 rather than Cr2O7-2,\n\n\n For organic components, elements other than \"C\", \"H\", \"O\", \"N\", \"S\", and \"P\" will\n be turned into \"Ash\" with a molecular weight of 1 g/mol.\n\n For inorganic components, all dict values will be 0.\n\n If `aqueous` == False, the stoichiometry is given by:\n\n .. math::\n C_nH_aO_bN_cS_dP_e + \\frac{2n+0.5a-b-1.5c+3d+2.5e}{2}O_2\n -> nCO_2 + \\frac{a-3c-2d}{2}H_2O + cNH_3 + dH_2SO_4 + \\frac{e}{4}P_4O_{10}\n\n otherwise:\n\n .. math::\n C_nH_aO_bN_cS_dP_e + \\frac{2n+0.5a-b-1.5c+3d+2.5e}{2}O_2 + (c-2d-3e)H^+\n -> nCO_2 + \\frac{a-3c-2d-3e}{2}H_2O + cNH_4^+ + dSO_4^{2-} + ePO_4^{3-}\n\n Parameters\n ----------\n component : obj\n The component whose COD will be calculated.\n aqueous : bool\n Whether the reaction will happen in aqueous phase.\n replace : dict\n Alternative IDs of the reactant/product components,\n e.g., if S_O2 is the ID of dissolved oxygen instead of O2,\n then can pass replace={'O2': 'S_O2'}.\n\n Examples\n --------\n >>> from qsdsan import Component\n >>> from qsdsan.utils import get_cod_stoichiometry\n >>> Glucose = Component('Glucose', organic=True, particle_size='Soluble',\n ... degradability='Readily')\n >>> get_cod_stoichiometry(Glucose)\n {'Glucose': -1.0,\n 'O2': -6.0,\n 'CO2': 6,\n 'H2O': 6.0,\n 'NH3': 0.0,\n 'H2SO4': 0.0,\n 'P4O10': 0.0}\n '''\n cmp_ID = component.ID\n atoms = component.atoms\n\n keys = (cmp_ID, 'O2', 'CO2', 'H2O', 'NH3', 'H2SO4', 'P4O10') if not aqueous \\\n else (cmp_ID, 'O2', 'H+', 'CO2', 'H2O', 'NH4+', 'SO42-', 'PO43-')\n dct = dict.fromkeys(keys, 0.)\n\n if atoms and component.organic:\n nC, nH, nO, nN, nS, nP = get_CHONSP(atoms)\n\n dct[cmp_ID] = -1.\n dct['O2'] = -(nC+0.25*nH-0.5*nO-0.75*nN+1.5*nS+1.25*nP)\n dct['CO2'] = nC\n if not aqueous:\n dct['H2O'] = 0.5*nH-1.5*nN-nS\n dct['NH3'] = nN\n dct['H2SO4'] = nS\n dct['P4O10'] = 0.25*nP\n else:\n dct['H+'] = -(nN-2*nS-3*nP)\n dct['H2O'] = 0.5*nH-1.5*nN-nS-1.5*nP\n dct['NH4+'] = nN\n dct['SO42-'] = nS\n dct['PO43-'] = nP\n\n cod_atoms = {i: atoms.get(i, 0) for i in dichromate_oxidizing_elements}\n MW = component.MW or molecular_weight(atoms)\n Ash = MW - molecular_weight(cod_atoms)\n if Ash/MW > 0.0001:\n dct['Ash'] = Ash\n\n for old_ID, new_ID in replace.items():\n dct[new_ID] = dct.pop(old_ID)\n\n return dct\n\n\ndef compute_stream_COD(stream, units='mg/L'):\n r'''\n Compute the chemical oxygen demand (COD) of a given stream\n by summing the COD of each component in the stream using:\n\n .. math::\n COD [\\frac{kg}{m^3}] = mol_{component} [\\frac{kmol}{m^3}] * \\frac{g O_2}{mol\\ component}\n '''\n try:\n COD = stream.COD\n except AttributeError: # not a WasteStream\n cmps = stream.components\n mol = stream.mol\n iCOD = np.array([-get_cod_stoichiometry(i)['O2'] for i in cmps])\n COD = (mol*iCOD).sum()*molecular_weight({'O': 2}) / stream.F_vol\n return auom('mg/L').convert(COD, units)\n\n\ndef get_bmp_stoichiometry(component, **replace):\n r'''\n Compute the theoretical biochemical methane potential (BMP) in\n mol :math:`CH_4`/mol chemical of a given component as in:\n\n .. math::\n C_vH_wO_xN_yS_z + \\frac{4v-w-2x+3y+2z}{2}H2O ->\n \\frac{4v+w-2x-3y-2z}{8}CH4 + \\frac{(4v-w+2x+3y+2z)}{8}CO2 + yNH_3 + zH_2S\n\n For organic components, elements other than \"C\", \"H\", \"O\", \"N\", and \"S\" will\n be turned into \"Ash\" with a molecular weight of 1 g/mol.\n\n For inorganic components, all dict values will be 0.\n\n Parameters\n ----------\n component : obj\n The component whose COD will be calculated.\n replace : dict\n Alternative IDs of the reactant/product components,\n e.g., if S_O2 is the ID of dissolved oxygen instead of O2,\n then can pass replace={'O2': 'S_O2'}.\n\n Examples\n --------\n >>> from qsdsan import Component\n >>> from qsdsan.utils import get_bmp_stoichiometry\n >>> Glucose = Component('Glucose', organic=True, particle_size='Soluble',\n ... degradability='Readily')\n >>> get_bmp_stoichiometry(Glucose)\n {'Glucose': -1.0, 'H2O': -0.0, 'CH4': 3.0, 'CO2': 3.0, 'NH3': 0.0, 'H2S': 0.0}\n '''\n cmp_ID = component.ID\n atoms = component.atoms\n keys = (cmp_ID, 'H2O', 'CH4', 'CO2', 'NH3', 'H2S')\n dct = dict.fromkeys(keys, 0.)\n\n if atoms and component.organic and component.formula != 'CH4':\n nC, nH, nO, nN, nS, nP = get_CHONSP(atoms)\n dct[cmp_ID] = -1.\n dct['H2O'] = -(nC-0.25*nH-0.5*nO+0.75*nN+0.5*nS)\n dct['CH4'] = 0.5*nC+0.125*nH-0.25*nO-0.375*nN-0.25*nS\n dct['CO2'] = 0.5*nC-0.125*nH+0.25*nO+0.375*nN+0.25*nS\n dct['NH3'] = nN\n dct['H2S'] = nS\n bmp_atoms = {i: atoms.get(i, 0) for i in dichromate_oxidizing_elements}\n MW = component.MW or molecular_weight(atoms)\n Ash = MW - molecular_weight(bmp_atoms)\n if Ash/MW > 0.0001:\n dct['Ash'] = Ash\n\n for old_ID, new_ID in replace.items():\n dct[new_ID] = dct.pop(old_ID)\n\n return dct\n\n\ndef get_digestion_rxns(components, X_biogas, X_growth, biomass_ID, biodegradability=1.):\n '''\n Generate anaerobic digestion (AD) and biomass growth reactions\n for a given set of components.\n\n AD stoichiometry is based on :func:`qsdsan.utils.get_bmp_stoichiometry`\n and biodegradabilities of the components as indicated in `biodegradability`.\n\n Biomass growth is purely based on mass balance, thus can potentially result\n in loss of atom balance.\n\n No reactions will be generated for inorganic components.\n\n Parameters\n ----------\n components : Iterable(obj)\n Set of components.\n X_biogas : float\n Fraction of the organic components that is used for AD.\n X_growth : float\n Fraction of the organic components that is used for biomass growth.\n biomass_ID : str\n ID of the biomass (should be included in the `components`).\n biodegradability : float or dict\n Biodegradabilities of the components.\n When given as a float, all organic components will be assumed to have the\n same biodegradability;\n when given as a dict, the keys should be the IDs of components and\n values the corresponding biodegradabilities,\n components without corresponding biodegradabilities will be assumed unbiodegradable.\n\n Examples\n --------\n >>> from qsdsan import Component, Components, set_thermo\n >>> from qsdsan.utils import load_example_cmps, get_digestion_rxns\n >>> example_cmps = load_example_cmps()\n >>> NH3 = Component('NH3', phase='g', organic=False, particle_size='Dissolved gas',\n ... degradability='Undegradable')\n >>> H2S = Component('H2S', phase='g', organic=False, particle_size='Dissolved gas',\n ... degradability='Undegradable')\n >>> P4O10 = Component('P4O10', phase='s',\n ... organic=False, particle_size='Particulate',\n ... degradability='Undegradable')\n >>> Biomass = Component('Biomass', phase='s', formula='CH1.8O0.5N0.2',\n ... organic=True, particle_size='Particulate',\n ... degradability='Slowly')\n >>> Ash = Component('Ash', phase='s', MW=1,\n ... organic=False, particle_size='Particulate',\n ... degradability='Undegradable')\n >>> for i in (P4O10, Biomass, Ash):\n ... i.copy_models_from(example_cmps.NaCl, ('V',))\n >>> for i in (Biomass, Ash): i.default() # doctest:+ELLIPSIS\n {...\n >>> cmps = Components([*example_cmps, NH3, H2S, P4O10, Biomass, Ash])\n >>> cmps.compile()\n >>> set_thermo(cmps)\n >>> cmps\n CompiledComponents([H2O, CO2, N2O, NaCl, H2SO4, CH4, Methanol, Ethanol, NH3, H2S, P4O10, Biomass, Ash])\n >>> rxns = get_digestion_rxns(cmps, X_biogas=0.9, X_growth=0.07,\n ... biomass_ID='Biomass', biodegradability=0.87)\n >>> rxns # doctest: +SKIP\n ParallelReaction (by mol):\n index stoichiometry reactant X[%]\n [0] Methanol -> 0.5 H2O + 0.25 CO2 + 0.75 CH4 Methanol 78.30\n [1] Ethanol -> 0.5 CO2 + 1.5 CH4 Ethanol 78.30\n [2] Methanol -> 1.3 Biomass Methanol 6.09\n [3] Ethanol -> 1.87 Biomass Ethanol 6.09\n '''\n biomass = getattr(components, biomass_ID)\n biomass_MW = biomass.MW or molecular_weight(biomass.atoms)\n BD = dict.fromkeys(components.IDs, biodegradability) if isinstance(biodegradability, float) \\\n else biodegradability\n\n if X_biogas+X_growth > 1:\n raise ValueError('Sum of `X_biogas`/`X_decomp` and `X_biogas` is '\n f'{X_biogas+X_growth}, larger than 100%.')\n\n biogas_rxns, growth_rxns = [], []\n for i in components:\n ID = i.ID\n\n if ID == biomass_ID:\n continue\n\n X = BD.get(i.ID)\n if not X:\n continue # assume no entry means not biodegradable\n\n biogas_stoyk = get_bmp_stoichiometry(i)\n if not biogas_stoyk.get(i.ID): # no conversion of this chemical\n continue\n\n iX_biogas = X * X_biogas # the amount of component used for biogas production\n iX_growth = X * X_growth # the amount of component used for cell growth\n\n if iX_biogas:\n biogas_rxn = Rxn(reaction=biogas_stoyk, reactant=ID, X=iX_biogas,\n check_atomic_balance=True)\n biogas_rxns.append(biogas_rxn)\n\n if iX_growth:\n growth_rxn = Rxn(f'{i.ID} -> {i.MW/biomass_MW}{biomass_ID}',\n reactant=i.ID, X=iX_growth,\n check_atomic_balance=False)\n growth_rxns.append(growth_rxn)\n\n if len(biogas_rxns)+len(growth_rxns)>1:\n return PRxn(biogas_rxns+growth_rxns)\n\n return []", "id": "10552464", "language": "Python", "matching_score": 2.122488021850586, "max_stars_count": 2, "path": "qsdsan/utils/cod.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\n\nfrom math import ceil, pi\nimport numpy as np\nfrom . import Decay\nfrom .. import SanUnit, Construction, WasteStream\nfrom ..sanunits import HXutility, WWTpump, CSTR\nfrom ..utils import ospath, load_data, data_path, auom, calculate_excavation_volume\n__all__ = (\n 'AnaerobicBaffledReactor',\n 'AnaerobicDigestion',\n 'SludgeDigester',\n 'AnaerobicCSTR'\n )\n\n\n# %%\n\nabr_path = ospath.join(data_path, 'sanunit_data/_anaerobic_baffled_reactor.tsv')\n\nclass AnaerobicBaffledReactor(SanUnit, Decay):\n '''\n Anaerobic baffled reactor with the production of biogas based on\n `Trimmer et al. <https://doi.org/10.1021/acs.est.0c03296>`_\n\n To enable life cycle assessment, the following impact items should be pre-constructed:\n `Concrete`, `Gravel`, `Excavation`.\n\n Parameters\n ----------\n ins : Iterable\n Waste for treatment.\n outs : Iterable\n Treated waste, biogas, fugitive CH4, and fugitive N2O.\n degraded_components : tuple\n IDs of components that will degrade (at the same removal as `COD_removal`).\n if_capture_biogas : bool\n If produced biogas will be captured, otherwise it will be treated\n as fugitive CH4.\n if_N2O_emission : bool\n If considering fugitive N2O generated from the degraded N.\n\n Examples\n --------\n `bwaise systems <https://github.com/QSD-Group/EXPOsan/blob/main/exposan/bwaise/systems.py>`_\n\n References\n ----------\n [1] Trimmer et al., Navigating Multidimensional Social–Ecological System\n Trade-Offs across Sanitation Alternatives in an Urban Informal Settlement.\n Environ. Sci. Technol. 2020, 54 (19), 12641–12653.\n https://doi.org/10.1021/acs.est.0c03296.\n\n See Also\n --------\n :ref:`qsdsan.sanunits.Decay <sanunits_Decay>`\n '''\n\n gravel_density = 1600\n\n def __init__(self, ID='', ins=None, outs=(), thermo=None, init_with='WasteStream',\n degraded_components=('OtherSS',), if_capture_biogas=True,\n if_N2O_emission=False, **kwargs):\n\n SanUnit.__init__(self, ID, ins, outs, thermo, init_with, F_BM_default=1)\n self.degraded_components = tuple(degraded_components)\n self.if_capture_biogas = if_capture_biogas\n self.if_N2O_emission = if_N2O_emission\n\n self.construction = (\n Construction('concrete', linked_unit=self, item='Concrete', quantity_unit='m3'),\n Construction('gravel', linked_unit=self, item='Gravel', quantity_unit='kg'),\n Construction('excavation', linked_unit=self, item='Excavation', quantity_unit='m3'),\n )\n\n data = load_data(path=abr_path)\n for para in data.index:\n value = float(data.loc[para]['expected'])\n setattr(self, '_'+para, value)\n del data\n\n for attr, value in kwargs.items():\n setattr(self, attr, value)\n\n _N_ins = 1\n _N_outs = 4\n\n def _run(self):\n waste = self.ins[0]\n treated, biogas, CH4, N2O = self.outs\n treated.copy_like(self.ins[0])\n biogas.phase = CH4.phase = N2O.phase = 'g'\n\n # COD removal\n _COD = waste._COD or waste.COD\n COD_deg = _COD*waste.F_vol/1e3*self.COD_removal # kg/hr\n treated._COD *= (1-self.COD_removal)\n treated.imass[self.degraded_components] *= (1-self.COD_removal)\n\n CH4_prcd = COD_deg*self.MCF_decay*self.max_CH4_emission\n if self.if_capture_biogas:\n biogas.imass['CH4'] = CH4_prcd\n CH4.empty()\n else:\n CH4.imass['CH4'] = CH4_prcd\n biogas.empty()\n\n N_tot = waste.TN/1e3 * waste.F_vol\n N_loss_tot = N_tot * self.N_removal\n NH3_rmd, NonNH3_rmd = \\\n self.allocate_N_removal(N_loss_tot, waste.imass['NH3'])\n treated.imass ['NH3'] = waste.imass['NH3'] - NH3_rmd\n treated.imass['NonNH3'] = waste.imass['NonNH3'] - NonNH3_rmd\n\n if self.if_N2O_emission:\n N2O.imass['N2O'] = N_loss_tot*self.N_max_decay*self.N2O_EF_decay*44/28\n else:\n N2O.empty()\n\n _units = {\n 'Residence time': 'd',\n 'Reactor length': 'm',\n 'Reactor width': 'm',\n 'Reactor height': 'm',\n 'Single reactor volume': 'm3'\n }\n\n def _design(self):\n design = self.design_results\n design['Residence time'] = self.tau\n design['Reactor number'] = N = self.N_reactor\n design['Baffle number'] = N_b = self.N_baffle\n design['Reactor length'] = L = self.reactor_L\n design['Reactor width'] = W = self.reactor_W\n design['Reactor height'] = H = self.reactor_H\n design['Single reactor volume'] = V = L*W*H\n\n constr = self.construction\n concrete = N*self.concrete_thickness*(2*L*W+2*L*H+(2+N_b)*W*H)*self.add_concrete\n constr[0].quantity = concrete\n constr[1].quantity = N*V/(N_b+1) * self.gravel_density\n constr[2].quantity = N * V # excavation\n\n self.add_construction()\n\n\n @property\n def tau(self):\n '''[float] Residence time, [d].'''\n return self._tau\n @tau.setter\n def tau(self, i):\n self._tau = i\n\n @property\n def COD_removal(self):\n '''[float] Fraction of COD removed during treatment.'''\n return self._COD_removal\n @COD_removal.setter\n def COD_removal(self, i):\n self._COD_removal = i\n\n @property\n def N_removal(self):\n '''[float] Fraction of N removed during treatment.'''\n return self._N_removal\n @N_removal.setter\n def N_removal(self, i):\n self._N_removal = i\n\n @property\n def N_reactor(self):\n '''[int] Number of reactors, float will be converted to the smallest integer.'''\n return self._N_reactor\n @N_reactor.setter\n def N_reactor(self, i):\n self._N_reactor = ceil(i)\n\n @property\n def reactor_L(self):\n '''[float] Reactor length, [m].'''\n return self._reactor_L\n @reactor_L.setter\n def reactor_L(self, i):\n self._reactor_L = i\n\n @property\n def reactor_W(self):\n '''[float] Reactor width, [m].'''\n return self._reactor_W\n @reactor_W.setter\n def reactor_W(self, i):\n self._reactor_W = i\n\n @property\n def reactor_H(self):\n '''[float] Reactor height, [m].'''\n return self._reactor_H\n @reactor_H.setter\n def reactor_H(self, i):\n self._reactor_H = i\n\n @property\n def N_baffle(self):\n '''[int] Number of reactors, float will be converted to the smallest integer.'''\n return self._N_baffle\n @N_baffle.setter\n def N_baffle(self, i):\n self._N_baffle = ceil(i)\n\n @property\n def add_concrete(self):\n '''\n [float] Additional concrete as a fraction of the reactor concrete usage\n to account for receiving basin and biogas tank.\n '''\n return self._add_concrete\n @add_concrete.setter\n def add_concrete(self, i):\n self._add_concrete = i\n\n @property\n def concrete_thickness(self):\n '''[float] Thickness of the concrete wall.'''\n return self._concrete_thickness\n @concrete_thickness.setter\n def concrete_thickness(self, i):\n self._concrete_thickness = i\n\n\n# %%\n\nad_path = ospath.join(data_path, 'sanunit_data/_anaerobic_digestion.tsv')\n\nclass AnaerobicDigestion(SanUnit, Decay):\n '''\n Anaerobic digestion of wastes with the production of biogas based on\n `Trimmer et al. <https://doi.org/10.1021/acs.est.0c03296>`_\n\n To enable life cycle assessment, the following impact items should be pre-constructed:\n `Concrete`, `Excavation`.\n\n Cost is calculated by the unit cost of the impact items and their quantities.\n\n Parameters\n ----------\n ins : Iterable\n Waste for treatment.\n outs : Iterable\n Treated waste, captured biogas, fugitive CH4, and fugitive N2O.\n flow_rate : float\n Total flow rate through the reactor (for sizing purpose), [m3/d].\n If not provided, will use F_vol_in.\n degraded_components : tuple\n IDs of components that will degrade (at the same removal as `COD_removal`).\n if_capture_biogas : bool\n If produced biogas will be captured, otherwise it will be treated\n as fugitive CH4.\n if_N2O_emission : bool\n If consider N2O emission from N degradation in the process.\n\n Examples\n --------\n `bwaise systems <https://github.com/QSD-Group/EXPOsan/blob/main/exposan/bwaise/systems.py>`_\n\n References\n ----------\n [1] Trimmer et al., Navigating Multidimensional Social–Ecological System\n Trade-Offs across Sanitation Alternatives in an Urban Informal Settlement.\n Environ. Sci. Technol. 2020, 54 (19), 12641–12653.\n https://doi.org/10.1021/acs.est.0c03296.\n\n See Also\n --------\n :ref:`qsdsan.sanunits.Decay <sanunits_Decay>`\n '''\n\n def __init__(self, ID='', ins=None, outs=(), thermo=None, init_with='WasteStream',\n flow_rate=None, degraded_components=('OtherSS',),\n if_capture_biogas=True, if_N2O_emission=False,\n **kwargs):\n SanUnit.__init__(self, ID, ins, outs, thermo, init_with)\n self._flow_rate = flow_rate\n self.degraded_components = tuple(degraded_components)\n self.if_capture_biogas = if_capture_biogas\n self.if_N2O_emission = if_N2O_emission\n\n self.construction = (\n Construction('concrete', linked_unit=self, item='Concrete', quantity_unit='m3'),\n Construction('excavation', linked_unit=self, item='Excavation', quantity_unit='m3'),\n )\n\n data = load_data(path=ad_path)\n for para in data.index:\n value = float(data.loc[para]['expected'])\n setattr(self, '_'+para, value)\n del data\n\n for attr, value in kwargs.items():\n setattr(self, attr, value)\n\n\n _N_ins = 1\n _N_outs = 4\n\n\n def _run(self):\n waste = self.ins[0]\n treated, biogas, CH4, N2O = self.outs\n treated.copy_like(self.ins[0])\n biogas.phase = CH4.phase = N2O.phase = 'g'\n\n # COD removal\n _COD = waste._COD or waste.COD\n COD_deg = _COD*treated.F_vol/1e3*self.COD_removal # kg/hr\n treated._COD *= (1-self.COD_removal)\n treated.imass[self.degraded_components] *= (1-self.COD_removal)\n\n CH4_prcd = COD_deg*self.MCF_decay*self.max_CH4_emission\n if self.if_capture_biogas:\n biogas.imass['CH4'] = CH4_prcd\n CH4.empty()\n else:\n CH4.imass['CH4'] = CH4_prcd\n biogas.empty()\n\n if self.if_N2O_emission:\n N_loss = self.first_order_decay(k=self.decay_k_N,\n t=self.tau/365,\n max_decay=self.N_max_decay)\n N_loss_tot = N_loss*waste.TN/1e3*waste.F_vol\n NH3_rmd, NonNH3_rmd = \\\n self.allocate_N_removal(N_loss_tot, waste.imass['NH3'])\n treated.imass['NH3'] = waste.imass['NH3'] - NH3_rmd\n treated.imass['NonNH3'] = waste.imass['NonNH3'] - NonNH3_rmd\n N2O.imass['N2O'] = N_loss_tot*self.N2O_EF_decay*44/28\n else:\n N2O.empty()\n\n _units = {\n 'Volumetric flow rate': 'm3/hr',\n 'Residence time': 'd',\n 'Single reactor volume': 'm3',\n 'Reactor diameter': 'm',\n 'Reactor height': 'm'\n }\n\n def _design(self):\n design = self.design_results\n design['Volumetric flow rate'] = Q = self.flow_rate\n design['Residence time'] = tau = self.tau\n design['Reactor number'] = N = self.N_reactor\n V_tot = Q * tau*24\n\n # One extra as a backup\n design['Single reactor volume'] = V_single = V_tot/(1-self.headspace_frac)/(N-1)\n\n # Rx modeled as a cylinder\n design['Reactor diameter'] = D = (4*V_single*self.aspect_ratio/pi)**(1/3)\n design['Reactor height'] = H = self.aspect_ratio * D\n\n constr = self.construction\n concrete = N*self.concrete_thickness*(2*pi/4*(D**2)+pi*D*H)\n constr[0].quantity = concrete\n constr[1].quantity = V_tot # excavation\n\n self.add_construction()\n\n\n @property\n def flow_rate(self):\n '''\n [float] Total flow rate through the reactor (for sizing purpose), [m3/d].\n If not provided, will calculate based on F_vol_in.\n '''\n return self._flow_rate if self._flow_rate else self.F_vol_in*24\n @flow_rate.setter\n def flow_rate(self, i):\n self._flow_rate = i\n\n @property\n def tau(self):\n '''[float] Residence time, [d].'''\n return self._tau\n @tau.setter\n def tau(self, i):\n self._tau = i\n\n @property\n def COD_removal(self):\n '''[float] Fraction of COD removed during treatment.'''\n return self._COD_removal\n @COD_removal.setter\n def COD_removal(self, i):\n self._COD_removal = i\n\n @property\n def N_reactor(self):\n '''[int] Number of reactors, float will be converted to the smallest integer.'''\n return self._N_reactor\n @N_reactor.setter\n def N_reactor(self, i):\n self._N_reactor = ceil(i)\n\n @property\n def aspect_ratio(self):\n '''[float] Diameter-to-height ratio of the reactor.'''\n return self._aspect_ratio\n @aspect_ratio.setter\n def aspect_ratio(self, i):\n self._aspect_ratio = i\n\n @property\n def headspace_frac(self):\n '''[float] Fraction of the reactor volume for headspace gas.'''\n return self._headspace_frac\n @headspace_frac.setter\n def headspace_frac(self, i):\n self._headspace_frac = i\n\n @property\n def concrete_thickness(self):\n '''[float] Thickness of the concrete wall.'''\n return self._concrete_thickness\n @concrete_thickness.setter\n def concrete_thickness(self, i):\n self._concrete_thickness = i\n\n\n# %%\n\nF_BM_pump = 1.18*(1+0.007/100) # 0.007 is for miscellaneous costs\ndefault_F_BM = {\n 'Pump': F_BM_pump,\n 'Pump building': F_BM_pump,\n }\ndefault_equipment_lifetime = {\n 'Pump': 15,\n 'Pump pipe stainless steel': 15,\n 'Pump stainless steel': 15,\n }\n\nclass SludgeDigester(SanUnit):\n '''\n A conventional digester for anaerobic digestion of sludge as in\n `Shoener et al. <https://doi.org/10.1039/C5EE03715H>`_.\n\n Note that the `CompiledComponents` object set in system simulation must\n have defined `active_biomass`.\n\n Parameters\n ----------\n ins : Iterable\n Sludge for digestion.\n outs : Iterable\n Digested sludge, generated biogas.\n HRT : float\n Hydraulic retention time, [d].\n SRT : float\n Solids retention time, [d].\n T : float\n Temperature within the digester, [K].\n Y : float\n Biomass yield, [mg VSS/mg BOD].\n b : float\n Endogenous decay coefficient, [1/d].\n organics_conversion : float\n Conversion of the organics (i.e., COD) of the sludge in fraction (i.e., 0.7 for 70%).\n COD_factor : float\n Biomass-to-COD conversion factor, [g COD/g VSS].\n methane_yield : float\n Methane yield from the digested organics, [m3/kg].\n methane_fraction : float\n Fraction of methane in the biogas, the rest is assumed to be CO2.\n depth : float\n Side depth of the digester, [m].\n heat_transfer_coeff : dict\n Heat transfer coefficients for heat loss calculation, [W/m2/°C],\n keys should contain \"wall\", \"floor\", and \"ceiling\".\n wall_concrete_unit_cost : float\n Unit cost of the wall concrete, [UDS/ft3].\n slab_concrete_unit_cost : float\n Unit cost of the slab concrete, [UDS/ft3].\n excavation_unit_cost : float\n Unit cost of the excavation activity, [UDS/ft3].\n\n References\n ----------\n [1] <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.\n Design of Anaerobic Membrane Bioreactors for the Valorization\n of Dilute Organic Carbon Waste Streams.\n Energy Environ. Sci. 2016, 9 (3), 1102–1112.\n https://doi.org/10.1039/C5EE03715H.\n\n '''\n _N_outs = 2\n \n # All in K\n _T_air = 17 + 273.15\n _T_earth = 10 + 273.15\n\n # All in ft\n _freeboard = 3\n _t_wall = 6/12\n _t_slab = 8/12\n\n # Pump building, all in ft\n _L_PB = 50\n _W_PB = 30\n _D_PB = 10\n\n # Excavation\n _excav_slope = 1.5 # horizontal/vertical\n _constr_access = 3 # ft\n\n auxiliary_unit_names = ('heat_exchanger',)\n\n def __init__(self, ID='', ins=None, outs=(), thermo=None, init_with='WasteStream',\n HRT=20, SRT=20, T=35+273.15, Y=0.08, b=0.03,\n organics_conversion=0.7, COD_factor=1.42,\n methane_yield=0.4, methane_fraction=0.65,\n depth=10,\n heat_transfer_coeff=dict(wall=0.7, floor=1.7, ceiling=0.95),\n wall_concrete_unit_cost=24, # from $650/yd3\n slab_concrete_unit_cost=13, # from $350/yd3\n excavation_unit_cost=0.3, # from $8/yd3\n F_BM=default_F_BM, lifetime=default_equipment_lifetime,\n F_BM_default=1, **kwargs):\n SanUnit.__init__(self, ID, ins, outs, thermo, init_with, F_BM_default=1)\n self.HRT = HRT\n self.SRT = SRT\n self.T = T\n self.Y = Y\n self.b = b\n self.organics_conversion = organics_conversion\n self.COD_factor = COD_factor\n self.methane_yield = methane_yield\n self.methane_fraction = methane_fraction\n self.depth = depth\n self.heat_transfer_coeff = heat_transfer_coeff\n self.heat_exchanger = hx = HXutility(None, None, None, T=T)\n self.heat_utilities = hx.heat_utilities\n self.wall_concrete_unit_cost = wall_concrete_unit_cost\n self.slab_concrete_unit_cost = slab_concrete_unit_cost\n self.excavation_unit_cost = excavation_unit_cost\n self.F_BM.update(F_BM)\n self._default_equipment_lifetime.update(lifetime)\n self.sludge_pump = WWTpump(\n ID=f'{ID}_sludge', ins=self.ins[0].proxy(), pump_type='',\n Q_mgd=None, add_inputs=(1,), capacity_factor=1.,\n include_pump_cost=True, include_building_cost=False,\n include_OM_cost=False)\n\n\n def _run(self):\n sludge, = self.ins\n digested, biogas = self.outs\n digested.T = biogas.T = self.T\n biogas.phase = 'g'\n\n # Biogas production estimation based on Example 13-5 of Metcalf & Eddy, 5th edn.\n Y, b, SRT = self.Y, self.b, self.SRT\n organics_conversion, COD_factor = self.organics_conversion, self.COD_factor\n methane_yield, methane_fraction = self.methane_yield, self.methane_fraction\n biomass_COD = sludge.imass['active_biomass'].sum()*1e3*24*1.42 # [g/d], 1.42 converts VSS to COD\n\n digested.mass = sludge.mass\n digested.imass['active_biomass'] = 0 # biomass-derived COD calculated separately\n substrate_COD = digested.COD*24*digested.F_vol # [g/d]\n\n tot_COD = biomass_COD + substrate_COD # [g/d]\n\n digestion_yield = Y*tot_COD*organics_conversion/(1+b*SRT) # [g/d]\n methane_vol = methane_yield*tot_COD*organics_conversion - COD_factor*digestion_yield\n\n # Update stream flows\n digested.imass['substrates'] *= (1-organics_conversion)\n digested.imass['active_biomass'] = \\\n sludge.imass['active_biomass']*(1-organics_conversion)\n\n biogas.empty()\n biogas.ivol['CH4'] = methane_vol\n biogas.ivol['CO2'] = methane_vol/methane_fraction*(1-methane_fraction)\n\n\n _units = {\n 'HRT': 'd',\n 'SRT': 'd',\n 'Volume': 'm3',\n 'Surface area': 'm2',\n 'Diameter': 'm',\n 'Wall concrete': 'ft3',\n 'Slab concrete': 'ft3',\n 'Excavation': 'ft3',\n 'Pump pipe stainless steel': 'kg',\n 'Pump stainless steel': 'kg',\n }\n def _design(self):\n design = self.design_results\n sludge, = self.ins\n Q = sludge.F_vol * 24 # from m3/hr to m3/d\n\n # Dimensions\n design['SRT'] = self.SRT\n HRT = design['HRT'] = self.HRT\n V = design['Volume'] = Q * HRT # m3\n depth = design['depth'] = self.depth # m\n A = design['Surface area'] = V / depth # m2\n dia = design['Diameter']= (A*4/pi) ** 0.5 # m\n\n # Calculate needed heating\n T = self.T\n sludge_T = sludge.T\n sludge_H_in = sludge.H\n sludge.T = T\n sludge_H_at_T = sludge.H\n sludge.T = sludge_T\n duty = sludge_H_at_T - sludge_H_in\n\n # Heat loss\n coeff = self.heat_transfer_coeff\n A_wall = pi * dia * depth\n wall_loss = coeff['wall'] * A_wall * (T-self.T_air) # [W]\n floor_loss = coeff['floor'] * A * (T-self.T_earth) # [W]\n ceiling_loss = coeff['ceiling'] * A * (T-self.T_air) # [W]\n duty += (wall_loss+floor_loss+ceiling_loss)*60*60/1e3 # kJ/hr\n self.heat_exchanger.simulate_as_auxiliary_exchanger(duty, sludge)\n\n # Concrete usage\n ft_2_m = auom('ft').conversion_factor('m')\n design['Wall concrete'] = self.t_wall * pi*(dia*ft_2_m)*(depth*ft_2_m+self.freeboard)\n design['Slab concrete'] = 2 * self.t_slab * A*(ft_2_m**2) # floor and ceiling\n\n # Excavation\n design['Excavation'] = calculate_excavation_volume(\n self.L_PB, self.W_PB, self.D_PB, self.excav_slope, self.constr_access)\n\n # Pump\n sludge_pump = self.sludge_pump\n sludge_pump.simulate()\n design.update(sludge_pump.design_results)\n\n def _cost(self):\n D, C = self.design_results, self.baseline_purchase_costs\n # F_BM, lifetime = self.F_BM, self._default_equipment_lifetime\n C['Wall concrete'] = D['Wall concrete'] * self.wall_concrete_unit_cost\n C['Slab concrete'] = D['Slab concrete'] * self.slab_concrete_unit_cost\n C['Excavation'] = D['Excavation'] * self.excavation_unit_cost\n sludge_pump = self.sludge_pump\n C.update(sludge_pump.baseline_purchase_costs)\n self.power_utility.rate = sludge_pump.power_utility.rate\n\n\n @property\n def T_air(self):\n '''[float] Temperature of the air, [K].'''\n return self._T_air\n @T_air.setter\n def T_air(self, i):\n self._T_air = i\n\n @property\n def T_earth(self):\n '''[float] Temperature of the air, [K].'''\n return self._T_earth\n @T_earth.setter\n def T_earth(self, i):\n self._T_earth = i\n\n @property\n def freeboard(self):\n '''[float] Freeboard added to the depth of the reactor tank, [ft].'''\n return self._freeboard\n @freeboard.setter\n def freeboard(self, i):\n self._freeboard = i\n\n @property\n def t_wall(self):\n '''[float] Concrete wall thickness, [ft].'''\n return self._t_wall\n @t_wall.setter\n def t_wall(self, i):\n self._t_wall = i\n\n @property\n def t_slab(self):\n '''\n [float] Concrete slab thickness, [ft],\n default to be 2 in thicker than the wall thickness.\n '''\n return self._t_slab or self.t_wall+2/12\n @t_slab.setter\n def t_slab(self, i):\n self._t_slab = i\n\n @property\n def L_PB(self):\n '''[float] Length of the pump building, [ft].'''\n return self._L_PB\n @L_PB.setter\n def L_PB(self, i):\n self._L_PB = i\n\n @property\n def W_PB(self):\n '''[float] Width of the pump building, [ft].'''\n return self._W_PB\n @W_PB.setter\n def W_PB(self, i):\n self._W_PB = i\n\n @property\n def D_PB(self):\n '''[float] Depth of the pump building, [ft].'''\n return self._D_PB\n @D_PB.setter\n def D_PB(self, i):\n self._D_PB = i\n\n @property\n def excav_slope(self):\n '''[float] Slope for excavation (horizontal/vertical).'''\n return self._excav_slope\n @excav_slope.setter\n def excav_slope(self, i):\n self._excav_slope = i\n\n @property\n def constr_access(self):\n '''[float] Extra room for construction access, [ft].'''\n return self._constr_access\n @constr_access.setter\n def constr_access(self, i):\n self._constr_access = i\n\n# %%\n\nclass AnaerobicCSTR(CSTR):\n \n _N_ins = 1\n _N_outs = 2\n _ins_size_is_fixed = True\n _outs_size_is_fixed = True\n _R = 8.3144598e-2 # Universal gas constant, [bar/M/K]\n \n def __init__(self, ID='', ins=None, outs=(), thermo=None,\n init_with='WasteStream', V_liq=3400, V_gas=300, model=None, \n T=308.15, headspace_P=1.013, external_P=1.013, \n pipe_resistance=5.0e4, fixed_headspace_P=False,\n isdynamic=True, **kwargs):\n \n super().__init__(ID=ID, ins=ins, outs=outs, thermo=thermo,\n init_with=init_with, V_max=V_liq, aeration=None,\n DO_ID=None, suspended_growth_model=None,\n isdynamic=isdynamic, **kwargs)\n self.V_gas = V_gas\n self.T = T\n # self._S_gas = None\n self._q_gas = 0\n self._n_gas = None\n self._gas_cmp_idx = None\n self._state_keys = None\n self._S_vapor = None\n self.model = model\n self._biogas = WasteStream(phase='g')\n self.headspace_P = headspace_P\n self.external_P = external_P\n self.pipe_resistance = pipe_resistance\n self.fixed_headspace_P = fixed_headspace_P\n \n def ideal_gas_law(self, p=None, S=None):\n # p in bar, S in M\n if p: return p/self._R/self.T\n elif S: return S*self._R*self.T\n \n def p_vapor(self, convert_to_bar=True):\n p = self.components.H2O.Psat(self.T)\n if convert_to_bar:\n return p*auom('Pa').conversion_factor('bar')\n else: return p\n \n @property\n def DO_ID(self):\n '''Not applicable.'''\n return None\n @DO_ID.setter\n def DO_ID(self, doid):\n '''Does nothing.'''\n pass\n \n @property\n def aeration(self):\n '''Not applicable'''\n return None\n @aeration.setter\n def aeration(self, ae):\n '''Does nothing.'''\n pass\n \n V_liq = property(CSTR.V_max.fget)\n @V_liq.setter\n def V_liq(self, V):\n CSTR.V_max.fset(self, V)\n \n model = property(CSTR.suspended_growth_model.fget)\n @model.setter\n def model(self, model):\n CSTR.suspended_growth_model.fset(self, model)\n if model is not None:\n #!!! how to make unit conversion generalizable to all models?\n self._S_vapor = self.ideal_gas_law(p=self.p_vapor())\n self._n_gas = len(model._biogas_IDs)\n self._state_keys = list(self.components.IDs) \\\n + [ID+'_gas' for ID in self.model._biogas_IDs] \\\n + ['Q', 'T_op']\n self._gas_cmp_idx = self.components.indices(self.model._biogas_IDs)\n self._state_header = self._state_keys\n \n @property\n def split(self):\n '''Not applicable.'''\n return None\n @split.setter\n def split(self, split):\n '''Does nothing.'''\n pass\n \n @property\n def headspace_P(self):\n '''Headspace total pressure [bar].'''\n return self._P_gas\n @headspace_P.setter\n def headspace_P(self, P):\n self._P_gas = P\n \n @property\n def external_P(self):\n '''External (atmospheric) pressure [bar].'''\n return self._P_atm\n @external_P.setter\n def external_P(self, P):\n self._P_atm = P\n \n @property\n def pipe_resistance(self):\n '''Gas pipe resistance coefficient [m3/d/bar].'''\n return self._k_p\n @pipe_resistance.setter\n def pipe_resistance(self, k):\n self._k_p = k\n\n @property\n def fixed_headspace_P(self):\n '''Headspace total pressure [bar].'''\n return self._fixed_P_gas\n @fixed_headspace_P.setter\n def fixed_headspace_P(self, b):\n self._fixed_P_gas = bool(b)\n \n @property\n def state(self):\n '''The state of the anaerobic CSTR, including component concentrations [kg/m3],\n biogas concentrations in the headspace [M biogas], liquid flow rate [m^3/d],\n and temperature [K].'''\n if self._state is None: return None\n else:\n return dict(zip(self._state_keys, self._state))\n\n @state.setter\n def state(self, arr):\n arr = np.asarray(arr)\n n_state = len(self._state_keys)\n if arr.shape != (n_state, ):\n raise ValueError(f'state must be a 1D array of length {n_state}')\n self._state = arr\n\n def _run(self):\n '''Only to converge volumetric flows.'''\n inf, = self.ins\n gas, liquid = self.outs\n liquid.copy_like(inf)\n gas.copy_like(self._biogas)\n if self._fixed_P_gas: \n gas.P = self.headspace_P * auom('bar').conversion_factor('Pa')\n gas.T = self.T\n \n def _init_state(self):\n inf, = self._ins\n Q = inf.get_total_flow('m3/d')\n #!!! how to make unit conversion generalizable to all models?\n if self._concs is not None: Cs = self._concs * 1e-3 # mg/L to kg/m3\n else: Cs = inf.conc * 1e-3 # mg/L to kg/m3\n self._state = np.append(Cs, [0]*self._n_gas + [Q, self.T]).astype('float64')\n self._dstate = self._state * 0.\n\n def _update_state(self):\n arr = self._state\n gas, liquid = self._outs\n y = arr.copy()\n i_mass = self.components.i_mass\n chem_MW = self.components.chem_MW\n n_cmps = len(self.components)\n if liquid.state is None:\n liquid.state = np.append(y[:n_cmps]*1e3, y[-2])\n else:\n liquid.state[:n_cmps] = y[:n_cmps]*1e3 # kg/m3 to mg/L\n liquid.state[-1] = y[-2]\n if gas.state is None:\n gas.state = np.zeros(n_cmps+1)\n gas.state[self._gas_cmp_idx] = y[n_cmps:(n_cmps + self._n_gas)]\n gas.state[self.components.index('H2O')] = self._S_vapor\n gas.state[-1] = self._q_gas\n gas.state[:n_cmps] = gas.state[:n_cmps] * chem_MW / i_mass # i.e., M biogas to g (measured_unit) / L\n\n def _update_dstate(self):\n arr = self._dstate\n gas, liquid = self._outs\n dy = arr.copy()\n n_cmps = len(self.components)\n if liquid.dstate is None:\n liquid.dstate = np.append(dy[:n_cmps]*1e3, dy[-2])\n else:\n liquid.dstate[:n_cmps] = dy[:n_cmps]*1e3\n liquid.dstate[-1] = dy[-2]\n if gas.dstate is None:\n # contains no info on dstate\n gas.dstate = np.zeros(n_cmps+1)\n\n \n def f_q_gas_fixed_P_headspace(self, rhoTs, S_gas, T):\n cmps = self.components\n gas_mass2mol_conversion = (cmps.i_mass / cmps.chem_MW)[self._gas_cmp_idx]\n self._q_gas = self._R*T/(self.P_gas-self.p_vapor(convert_to_bar=True))\\\n *self.V_liq*sum(rhoTs*gas_mass2mol_conversion)\n return self._q_gas\n\n def f_q_gas_var_P_headspace(self, rhoTs, S_gas, T):\n p_gas = S_gas * self._R * T\n self._P_gas = P = sum(p_gas) + self.p_vapor(convert_to_bar=True) \n self._q_gas = self._k_p * (P - self._P_atm) * P/self._P_atm # converted to gas flowrate at atm pressure\n return self._q_gas\n\n @property\n def ODE(self):\n if self._ODE is None:\n self._compile_ODE()\n return self._ODE\n \n def _compile_ODE(self):\n if self._model is None:\n CSTR._compile_ODE(self)\n else:\n cmps = self.components\n _dstate = self._dstate\n _update_dstate = self._update_dstate\n _f_rhos = self.model.rate_function\n _f_param = self.model.params_eval\n _M_stoichio = self.model.stoichio_eval\n n_cmps = len(cmps)\n n_gas = self._n_gas\n V_liq = self.V_liq\n V_gas = self.V_gas\n gas_mass2mol_conversion = (cmps.i_mass / cmps.chem_MW)[self._gas_cmp_idx]\n if self._fixed_P_gas:\n f_qgas = self.f_q_gas_fixed_P_headspace\n else:\n f_qgas = self.f_q_gas_var_P_headspace\n def dy_dt(t, QC_ins, QC, dQC_ins):\n S_liq = QC[:n_cmps]\n S_gas = QC[n_cmps: (n_cmps+n_gas)]\n Q, T = QC[-2:]\n S_in = QC_ins[0,:-1] * 1e-3 # mg/L to kg/m3\n Q_in = QC_ins[0,-1]\n _f_param(QC)\n M_stoichio = _M_stoichio()\n rhos =_f_rhos(QC)\n try: \n _dstate[:n_cmps] = (Q_in*S_in - Q*S_liq)/V_liq + np.dot(M_stoichio.T, rhos)\n except: breakpoint()\n q_gas = f_qgas(rhos[-3:], S_gas, T)\n _dstate[n_cmps: (n_cmps+n_gas)] = - q_gas*S_gas/V_gas \\\n + rhos[-3:] * V_liq/V_gas * gas_mass2mol_conversion\n _dstate[-2] = dQC_ins[0,-1]\n _dstate[-1] = 0\n # !!! currently no info on dT/dt\n _update_dstate()\n self._ODE = dy_dt", "id": "2548614", "language": "Python", "matching_score": 6.592216968536377, "max_stars_count": 2, "path": "qsdsan/sanunits/_anaerobic_reactors.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\n\n# %%\n\nfrom warnings import warn\nfrom math import ceil\nfrom . import Decay\nfrom .. import SanUnit, Construction\nfrom ..utils import ospath, load_data, data_path\n\n__all__ = ('Lagoon',)\n\nanaerobic_path = ospath.join(data_path, 'sanunit_data/_anaerobic_lagoon.tsv')\nfacultative_path = ospath.join(data_path, 'sanunit_data/_facultative_lagoon.tsv')\n\nclass Lagoon(SanUnit, Decay):\n '''\n Anaerobic and facultative lagoon treatment based on\n `Trimmer et al. <https://doi.org/10.1021/acs.est.0c03296>`_\n\n To enable life cycle assessment, the following impact items should be pre-constructed:\n `Plastic`, `Excavation`.\n\n Parameters\n ----------\n ins : WasteStream\n Waste for treatment.\n outs : WasteStream\n Treated waste, fugitive CH4, and fugitive N2O.\n design_type : str\n Can be \"anaerobic\" or \"facultative\".\n flow_rate : float\n Total flow rate through the lagoon (to calculate retention time), [m3/d].\n If not provided, will use F_vol_in.\n degraded_components : tuple\n IDs of components that will degrade (at the same removal as `COD_removal`).\n if_N2O_emission : bool\n If consider N2O emission from N degradation in the process.\n\n Examples\n --------\n `bwaise systems <https://github.com/QSD-Group/EXPOsan/blob/main/exposan/bwaise/systems.py>`_\n\n References\n ----------\n [1] <NAME> al., Navigating Multidimensional Social–Ecological System\n Trade-Offs across Sanitation Alternatives in an Urban Informal Settlement.\n Environ. Sci. Technol. 2020, 54 (19), 12641–12653.\n https://doi.org/10.1021/acs.est.0c03296.\n\n See Also\n --------\n :ref:`qsdsan.sanunits.Decay <sanunits_Decay>`\n '''\n\n def __init__(self, ID='', ins=None, outs=(), thermo=None, init_with='WasteStream',\n design_type='anaerobic', flow_rate=None, degraded_components=('OtherSS',),\n if_N2O_emission=False, **kwargs):\n\n SanUnit.__init__(self, ID, ins, outs, thermo, init_with)\n self._tau = None\n self._P_removal = 0.\n self._anaerobic_defaults = load_data(path=anaerobic_path)\n self._facultative_defaults = load_data(path=facultative_path)\n self._design_type = None\n self.design_type = design_type\n self._flow_rate = flow_rate\n self.degraded_components = tuple(degraded_components)\n self.if_N2O_emission = if_N2O_emission\n\n self.construction = (\n Construction('liner', linked_unit=self, item='Plastic', quantity_unit='kg'),\n Construction('excavation', linked_unit=self, item='Excavation', quantity_unit='m3'),\n )\n\n for attr, value in kwargs.items():\n setattr(self, attr, value)\n\n _N_ins = 1\n _N_outs = 3\n\n def _run(self):\n waste = self.ins[0]\n treated, CH4, N2O = self.outs\n CH4.phase = N2O.phase = 'g'\n\n treated.copy_like(waste)\n removed_frac = self.COD_removal*self.COD_decay\n treated.imass[self.degraded_components] *= 1 - self.COD_removal\n\n _COD = waste._COD or waste.COD\n CH4.imass['CH4'] = removed_frac*_COD*waste.F_vol/1e3 * \\\n self.MCF_decay*self.max_CH4_emission\n\n if self.if_N2O_emission:\n N_loss = self.first_order_decay(k=self.decay_k_N,\n t=self.tau/365,\n max_decay=self.N_max_decay)\n N_loss_tot = N_loss*waste.TN/1e3*waste.F_vol\n NH3_rmd, NonNH3_rmd = \\\n self.allocate_N_removal(N_loss_tot, waste.imass['NH3'])\n treated.imass ['NH3'] -= NH3_rmd\n treated.imass['NonNH3'] -= NonNH3_rmd\n N2O.imass['N2O'] = N_loss_tot*self.N2O_EF_decay*44/28\n else:\n N2O.empty()\n\n treated.imass['P'] *= 1 - self.P_removal\n treated._COD = _COD*waste.F_vol*(1-self.COD_removal)/treated.F_vol\n\n _units = {\n 'Single lagoon volume': 'm3',\n 'Lagoon length': 'm',\n 'Lagoon width': 'm',\n 'Lagoon depth': 'm'\n }\n\n def _design(self):\n design = self.design_results\n design['Lagoon number'] = N = self.N_lagoon\n design['Single lagoon volume'] = V = self.lagoon_V\n design['Lagoon length'] = L = self.lagoon_L\n design['Lagoon width'] = W = self.lagoon_W\n design['Lagoon depth'] = depth = V / (L*W)\n\n liner = (L*W + 2*depth*(L+W)) * N * self.liner_unit_mass\n constr = self.construction\n constr[0].quantity = liner\n constr[1].quantity = N * V # excavation\n\n self.add_construction(add_cost=False)\n\n @property\n def design_type(self):\n '''[str] Lagoon type, can be either \"anaerobic\" or \"facultative\".'''\n return self._design_type\n @design_type.setter\n def design_type(self, i):\n if i == self._design_type: pass\n else:\n if i == 'anaerobic':\n data = self._anaerobic_defaults\n self.line = 'Anaerobic lagoon'\n elif i == 'facultative':\n data = self._facultative_defaults\n self.line = 'Facultative lagoon'\n else:\n raise ValueError('`design_type` can only be \"anaerobic\" or \"facultative\", '\n f'not {i}.')\n for para in data.index:\n value = float(data.loc[para]['expected'])\n setattr(self, para, value)\n self._design_type = i\n\n @property\n def COD_removal(self):\n '''[float] Fraction of COD removed during treatment.'''\n return self._COD_removal\n @COD_removal.setter\n def COD_removal(self, i):\n self._COD_removal = i\n\n @property\n def COD_decay(self):\n '''[float] Fraction of removed COD that decays.'''\n return self._COD_decay\n @COD_decay.setter\n def COD_decay(self, i):\n self._COD_decay = i\n\n @property\n def P_removal(self):\n '''[float] Fraction of P removed during treatment.'''\n return self._P_removal\n @P_removal.setter\n def P_removal(self, i):\n self._P_removal = i\n\n @property\n def N_lagoon(self):\n '''[int] Number of lagoons, float will be converted to the smallest integer.'''\n return self._N_lagoon\n @N_lagoon.setter\n def N_lagoon(self, i):\n self._N_lagoon = ceil(i)\n\n @property\n def flow_rate(self):\n '''\n [float] Total flow rate through the lagoon (to calculate retention time), [m3/d].\n If not provided, will calculate based on F_vol_in.\n '''\n return self._flow_rate if self._flow_rate else self.F_vol_in*24\n @flow_rate.setter\n def flow_rate(self, i):\n self._flow_rate = i\n\n @property\n def tau(self):\n '''[float] Residence time, [d].'''\n if self._lagoon_V:\n return self._lagoon_V*self.N_lagoon/self.flow_rate\n else:\n return self._tau\n @tau.setter\n def tau(self, i):\n if self._lagoon_V:\n msg = f'Residence time set, the original lagoon volume of {self._lagoon_V} m3 is ignored.'\n warn(msg, source=self)\n self._lagoon_V = None\n self._tau = i\n\n @property\n def lagoon_V(self):\n '''[float] Volume of the lagoon, [m3].'''\n if self._tau:\n return self._tau*(self.F_vol_in)*24/self.N_lagoon\n else:\n return self._lagoon_V\n @lagoon_V.setter\n def lagoon_V(self, i):\n if self._tau:\n msg = f'Lagoon volume set, the original residence time of {self._tau} d is ignored.'\n warn(msg, source=self)\n self._tau = None\n self._lagoon_V = i\n\n @property\n def lagoon_L(self):\n '''[float] Length of the lagoon, [m].'''\n return self._lagoon_L\n @lagoon_L.setter\n def lagoon_L(self, i):\n self._lagoon_L = i\n\n @property\n def lagoon_W(self):\n '''[float] Width of the lagoon, [m].'''\n return self._lagoon_W\n @lagoon_W.setter\n def lagoon_W(self, i):\n self._lagoon_W = i\n\n @property\n def liner_unit_mass(self):\n '''[float] Unit mass of the lagoon liner, [kg/m2].'''\n return self._liner_unit_mass\n @liner_unit_mass.setter\n def liner_unit_mass(self, i):\n self._liner_unit_mass = i", "id": "5340584", "language": "Python", "matching_score": 6.068627834320068, "max_stars_count": 2, "path": "qsdsan/sanunits/_lagoon.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\n\n# %%\n\nfrom math import ceil, pi, cos\nfrom . import Decay, SludgeSeparator\nfrom .. import Construction\nfrom ..utils import ospath, load_data, data_path\n\n__all__ = ('Sedimentation',)\n\nsedmentation_path = ospath.join(data_path, 'sanunit_data/_sedimentation_tank.tsv')\n\n\nclass Sedimentation(SludgeSeparator, Decay):\n '''\n Sedimentation of wastes into liquid and solid phases based on\n `Trimmer et al. <https://doi.org/10.1021/acs.est.0c03296>`_\n\n To enable life cycle assessment, the following impact items should be pre-constructed:\n `Concrete`, `Steel`.\n\n Parameters\n ----------\n ins : WasteStream\n Waste for treatment.\n outs : WasteStream\n Liquid, settled solids, fugitive CH4, and fugitive N2O.\n degraded_components : tuple\n IDs of components that will degrade (simulated by first-order decay).\n if_N2O_emission : bool\n If consider N2O emission from N degradation in the process.\n\n Examples\n --------\n `bwaise systems <https://github.com/QSD-Group/EXPOsan/blob/main/exposan/bwaise/systems.py>`_\n\n References\n ----------\n [1] Trimmer et al., Navigating Multidimensional Social–Ecological System\n Trade-Offs across Sanitation Alternatives in an Urban Informal Settlement.\n Environ. Sci. Technol. 2020, 54 (19), 12641–12653.\n https://doi.org/10.1021/acs.est.0c03296.\n\n See Also\n --------\n :ref:`qsdsan.sanunits.Decay <sanunits_Decay>`\n '''\n\n def __init__(self, ID='', ins=None, outs=(),thermo=None, init_with='WasteStream',\n split=None, settled_frac=None,\n degraded_components=('OtherSS',), if_N2O_emission=False, **kwargs):\n\n SludgeSeparator.__init__(self, ID, ins, outs, thermo, init_with,\n split, settled_frac, F_BM_default=1)\n self.degraded_components = tuple(degraded_components)\n self.if_N2O_emission = if_N2O_emission\n\n self.construction = (\n Construction('concrete', linked_unit=self, item='Concrete', quantity_unit='m3'),\n Construction('steel', linked_unit=self, item='Steel', quantity_unit='kg'),\n )\n\n data = load_data(path=sedmentation_path)\n for para in data.index:\n value = float(data.loc[para]['expected'])\n setattr(self, '_'+para, value)\n del data\n\n for attr, value in kwargs.items():\n setattr(self, attr, value)\n\n _N_ins = 1\n _N_outs = 4\n\n def _run(self):\n waste = self.ins[0]\n liq, sol, CH4, N2O = self.outs\n CH4.phase = N2O.phase = 'g'\n\n # Retention in the settled solids\n SludgeSeparator._run(self)\n\n # COD degradation in settled solids\n COD_loss = self.first_order_decay(k=self.decay_k_COD,\n t=self.tau/365,\n max_decay=self.COD_max_decay)\n\n _COD = sol._COD or sol.COD\n tot_COD_kg = _COD * sol.F_vol / 1e3\n sol.imass[self.degraded_components] *= 1 - COD_loss\n\n # Adjust total mass of of the settled solids by changing water content\n liq, sol = self._adjust_solid_water(waste, liq, sol)\n\n COD_loss_kg = tot_COD_kg * COD_loss\n CH4.imass['CH4'] = COD_loss_kg * self.max_CH4_emission * self.MCF_decay\n sol._COD = tot_COD_kg*(1-COD_loss)/sol.F_vol*1e3\n\n # N degradation\n if self.if_N2O_emission:\n N_loss = self.first_order_decay(k=self.decay_k_N,\n t=self.tau/365,\n max_decay=self.N_max_decay)\n N_loss_tot = N_loss*sol.TN/1e3*sol.F_vol\n NH3_rmd, NonNH3_rmd = \\\n self.allocate_N_removal(N_loss_tot, sol.imass['NH3'])\n sol.imass ['NH3'] -= NH3_rmd\n sol.imass['NonNH3'] -= NonNH3_rmd\n N2O.imass['N2O'] = N_loss_tot*self.N2O_EF_decay*44/28\n else:\n N2O.empty()\n\n _units = {\n 'Single tank volume': 'm3',\n 'Single tank height': 'm',\n 'Single tank width': 'm',\n 'Single tank length': 'm',\n 'Single roof area': 'm2'\n }\n\n def _design(self):\n design = self.design_results\n # `tau` not used, might be that working volume fraction not known\n design['Tank number'] = N = self.N_tank\n design['Single tank volume'] = V_single = self.tank_V\n L2W = self.tank_L_to_W\n W2H = self.tank_W_to_H\n design['Single tank height'] = H = (V_single/(L2W*(W2H**2)))**(1/3)\n design['Single tank width'] = W = H * W2H\n design['Single tank length'] = L = W * L2W\n design['Single roof area'] = N*L*W/(cos(self.roof_slope/180*pi))\n side_area = N*2*(L*H + W*H)\n\n # Concrete\n thick = self.concrete_thickness\n side_concrete = N*thick*(L*W+2*W*H+2*L*H)\n column_concrete = N*(thick**2)*H*self.column_per_side*2\n\n constr = self.construction\n constr[0].quantity = side_concrete + column_concrete\n constr[1].quantity = (design['Single roof area']+side_area) * self.roof_unit_mass # steel\n\n self.add_construction()\n\n @property\n def tau(self):\n '''[float] Residence time, [d].'''\n return self._tau\n @tau.setter\n def tau(self, i):\n self._tau = i\n\n @property\n def tank_V(self):\n '''[float] Volume of the sedimentation tank.'''\n return self._tank_V\n @tank_V.setter\n def tank_V(self, i):\n self._tank_V = i\n\n @property\n def tank_L_to_W(self):\n '''[float] Length-to-width ratio of the sedimentation tank.'''\n return self._tank_L_to_W\n @tank_L_to_W.setter\n def tank_L_to_W(self, i):\n self._tank_L_to_W = i\n\n @property\n def tank_W_to_H(self):\n '''[float] Width-to-height ratio of the sedimentation tank.'''\n return self._tank_W_to_H\n @tank_W_to_H.setter\n def tank_W_to_H(self, i):\n self._tank_W_to_H = i\n\n @property\n def N_tank(self):\n '''[int] Number of sedimentation tanks, float will be converted to the smallest integer.'''\n return self._N_tank\n @N_tank.setter\n def N_tank(self, i):\n self._N_tank = ceil(i)\n\n @property\n def column_per_side(self):\n '''[int] Number of columns per side of sedimentation tanks, float will be converted to the smallest integer.'''\n return self._column_per_side\n @column_per_side.setter\n def column_per_side(self, i):\n self._column_per_side = ceil(i)\n\n @property\n def concrete_thickness(self):\n '''[float] Thickness of the concrete wall.'''\n return self._concrete_thickness\n @concrete_thickness.setter\n def concrete_thickness(self, i):\n self._concrete_thickness = i\n\n @property\n def roof_slope(self):\n '''[float] Slope of the tank roof, [°].'''\n return self._roof_slope\n @roof_slope.setter\n def roof_slope(self, i):\n self._roof_slope = i\n\n @property\n def roof_unit_mass(self):\n '''[float] Unit mass of the tank roof, [kg/m2].'''\n return self._roof_unit_mass\n @roof_unit_mass.setter\n def roof_unit_mass(self, i):\n self._roof_unit_mass = i", "id": "7280437", "language": "Python", "matching_score": 1.661344051361084, "max_stars_count": 2, "path": "qsdsan/sanunits/_sedimentation.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\n\n# %%\n\nimport pandas as pd\nfrom thermosteam.utils import registered\nfrom . import currency, ImpactItem\nfrom .utils import (\n auom, copy_attr,\n format_number as f_num,\n register_with_prefix,\n )\n\n__all__ = ('Construction',)\n\n\n@registered(ticket_name='Constr')\nclass Construction:\n '''\n Construction activity for cost and environmental impact calculations.\n\n Parameters\n ----------\n ID : str\n ID of this construction activity,\n a default ID will be given if not provided.\n If this construction activity is linked to a unit,\n then the actual ID will be {unit.ID}_{ID}.\n linked_unit : obj\n Unit that this construction activity is linked to, can be left as None.\n item : :class:`ImpactItem`\n Impact item associated with this construction activity.\n quantity : float\n Quantity of the impact item involved in this construction activity.\n lifetime : float\n Lifetime of the constructed item.\n lifetime_unit : str\n Unit of the lifetime.\n\n Examples\n --------\n >>> import qsdsan as qs\n >>> # Make impact indicators\n >>> GWP = qs.ImpactIndicator('GlobalWarming', alias='GWP', unit='kg CO2-eq')\n >>> FEC = qs.ImpactIndicator('FossilEnergyConsumption', alias='FEC', unit='MJ')\n >>> # Make impact item\n >>> Steel = qs.ImpactItem('Steel', 'kg', GWP=2.55, FEC=0.5)\n >>> # Make a construction activity that uses 100 g of steel\n >>> steel_100_g = qs.Construction('steel_100_g', item=Steel, quantity=100,\n ... quantity_unit='g')\n >>> steel_100_g.show()\n Construction : steel_100_g\n Impact item : Steel\n Lifetime : None yr\n Quantity : 0.1 kg\n Total cost : None USD\n Total impacts:\n Impacts\n GlobalWarming (kg CO2-eq) 0.255\n FossilEnergyConsumption (MJ) 0.05\n >>> # Registry management (construction activities will be auto-registered)\n >>> steel_100_g.deregister()\n The construction activity \"steel_100_g\" has been removed from the registry.\n >>> steel_100_g.register()\n The construction activity \"steel_100_g\" has been added to the registry.\n >>> Construction.clear_registry()\n All construction activities have been removed from registry.\n '''\n\n __slots__ = ('_ID', '_linked_unit', '_item', '_quantity', '_lifetime')\n\n def __init__(self, ID='', linked_unit=None, item=None, quantity=0., quantity_unit='',\n lifetime=None, lifetime_unit='yr'):\n self._linked_unit = linked_unit\n prefix = self.linked_unit.ID if self.linked_unit else ''\n register_with_prefix(self, prefix, ID)\n self.item = item\n self._update_quantity(quantity, quantity_unit)\n self._lifetime = None\n if lifetime:\n self._lifetime = auom(lifetime_unit).convert(lifetime, 'yr')\n\n def _update_quantity(self, quantity=0., quantity_unit=''):\n if not quantity_unit or quantity_unit == self.item.functional_unit:\n self._quantity = float(quantity)\n else:\n converted = auom(quantity_unit).convert(float(quantity), self.item.functional_unit)\n self._quantity = converted\n\n def __repr__(self):\n return f'<Construction: {self.ID}>'\n\n def show(self):\n '''Show basic information about this :class:`Construction` object.'''\n item = self.item\n impacts = self.impacts\n info = f'Construction : {self.ID}'\n info += f'\\nImpact item : {item.ID}'\n info += f'\\nLifetime : {f_num(self.lifetime)} yr'\n info += f'\\nQuantity : {f_num(self.quantity)} {item.functional_unit}'\n info += f'\\nTotal cost : {f_num(self.cost)} {currency}'\n info += '\\nTotal impacts:'\n print(info)\n if len(impacts) == 0:\n print(' None')\n else:\n index = pd.Index((i.ID+' ('+i.unit+')' for i in self.indicators))\n df = pd.DataFrame({\n 'Impacts': tuple(self.impacts.values())\n },\n index=index)\n # print(' '*15+df.to_string().replace('\\n', '\\n'+' '*15))\n print(df.to_string())\n\n _ipython_display_ = show\n\n def copy(self, new_ID='', skip_item=True, **kwargs):\n new = Construction.__new__(Construction)\n new.__init__(new_ID, **kwargs)\n if skip_item:\n new = copy_attr(new, self, skip=('_ID', '_item'))\n new.item = self.item\n else:\n new = copy_attr(new, self, skip=('_ID',))\n return new\n\n __copy__ = copy\n\n def register(self, print_msg=True):\n '''Add this construction activity to the registry.'''\n self.registry.register_safely(self.ID, self)\n if print_msg:\n print(f'The construction activity \"{self.ID}\" has been added to the registry.')\n\n def deregister(self, print_msg=True):\n '''Remove this construction activity to the registry.'''\n self.registry.discard(self.ID)\n if print_msg:\n print(f'The construction activity \"{self.ID}\" has been removed from the registry.')\n\n @classmethod\n def clear_registry(cls, print_msg=True):\n '''Remove all existing construction activities from the registry.'''\n cls.registry.clear()\n if print_msg:\n print('All construction activities have been removed from registry.')\n\n @property\n def linked_unit(self):\n '''\n :class:`~.SanUnit` The unit that this construction activity belongs to.\n\n .. note::\n\n This property will be updated upon initialization of the unit.\n '''\n return self._linked_unit\n\n @property\n def lifetime(self):\n '''[float] Lifetime of this construction activity.'''\n return self._lifetime\n @lifetime.setter\n def lifetime(self, lifetime, unit='yr'):\n if lifetime is None:\n self.lifetime = lifetime\n else:\n self._lifetime = auom(unit).convert(lifetime, 'yr')\n\n @property\n def item(self):\n '''[:class:`ImpactItem`] The impact item associated with this construction activity.'''\n return self._item\n @item.setter\n def item(self, i):\n if not i:\n i = None\n elif isinstance(i, str):\n i = ImpactItem.get_item(i) or ImpactItem(i) # add a filler to enable simulation without LCA\n elif not isinstance(i, ImpactItem):\n raise TypeError('Only `ImpactItem` or the ID of `ImpactItem` can be set, '\n f'not {type(i).__name__}.')\n self._item = i\n\n @property\n def indicators(self):\n ''' [tuple] Impact indicators associated with the construction item.'''\n return self.item.indicators\n\n @property\n def quantity(self):\n '''[float] Quantity of this construction item.'''\n return self._quantity\n @quantity.setter\n def quantity(self, quantity, unit=''):\n self._update_quantity(quantity, unit)\n\n @property\n def price(self):\n '''[float] Unit price of the item.'''\n return self.item.price\n\n @property\n def cost(self):\n '''[float] Total cost of this construction item.'''\n return self.quantity*self.price\n\n @property\n def impacts(self):\n '''[dict] Total impacts of this construction activity over its lifetime.'''\n impacts = {}\n for indicator, CF in self.item.CFs.items():\n impacts[indicator] = self.quantity*CF\n return impacts", "id": "1682185", "language": "Python", "matching_score": 3.403799295425415, "max_stars_count": 2, "path": "qsdsan/_construction.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\nfrom datetime import timedelta\nfrom biosteam.utils import TicToc\n\n__all__ = (\n 'copy_attr',\n 'ords',\n 'clear_lca_registries',\n 'register_with_prefix',\n 'time_printer',\n )\n\n\ndef copy_attr(new, original, skip=(), same=(), slots=None):\n '''\n Set the attributes of a new object based on an original one:\n\n - If one attribute is in `skip`, it will not be copied to the new object.\n - If one attribute is in `same`, the attribute of the new object will be \\\n the same as the original object.\n - For remaining attributes, if it has :func:`copy`, then the attribute \\\n of the new object will be set as the copy of the original one; otherwise, \\\n it will be the same as the original one.\n\n Parameters\n ----------\n new : obj\n The new object.\n origin : obj\n The original object.\n skip : Iterable\n Attributes that will not be copied.\n same : Iterable\n Attributes that will be the same for the original one and the copy.\n slots : Iterable[str]\n All fields of the original object, will be set to `original.__slots__` if not provided.\n '''\n slots = slots or original.__slots__\n for slot in slots:\n if slot in skip:\n continue\n else:\n value = getattr(original, slot)\n if slot in same:\n setattr(new, slot, value)\n return new\n else:\n if hasattr(value, 'copy'):\n new_value = value.copy()\n else:\n new_value = value\n setattr(new, slot, new_value)\n return new\n\n\ndef ords(string):\n '''\n Return the sum of Unicode of a string, more for fun.\n\n Examples\n --------\n >>> from qsdsan.utils import ords\n >>> ords('QSDsan')\n 554\n '''\n string = str(string)\n added = sum(ord(i) for i in string)\n return added\n\n\ndef clear_lca_registries(print_msg=False):\n '''\n Clear registries related to LCA, including instances of\n :class:`~.ImpactIndicator`, :class:`~.ImpactItem`, :class:`~.Construction`,\n and :class:`~.Transportation`\n\n Parameters\n ----------\n print_msg : bool\n Whether to print registry clear notice.\n\n Examples\n --------\n >>> from qsdsan.utils import clear_lca_registries\n >>> clear_lca_registries(True)\n All impact indicators have been removed from registry.\n All impact items have been removed from registry.\n All construction activities have been removed from registry.\n All transportation activities have been removed from registry.\n '''\n # Only import when this function is called to avoid circular import during package initialization\n from qsdsan import ImpactIndicator, ImpactItem, Construction, Transportation\n for lca_cls in (ImpactIndicator, ImpactItem, Construction, Transportation):\n lca_cls.clear_registry(print_msg)\n\n\ndef register_with_prefix(obj, prefix, ID):\n '''\n Register the object with a prefix (and a \"_\" between the prefix and the ID).\n\n Parameters\n ----------\n obj : obj\n The object to be registered, must has the `registry` attribute.\n prefix : str\n Prefix of the ID.\n ID : str\n The original ID.\n '''\n registry = obj.registry\n if ID == '' or None:\n data = registry.data\n ID = obj._take_ticket()\n full_ID = prefix+'_'+ID if prefix else ID\n while full_ID in data:\n ID = obj._take_ticket()\n full_ID = prefix+'_'+ID if prefix else ID\n registry.register(full_ID, obj)\n else:\n full_ID = prefix+'_'+ID if prefix else ID\n registry.register_safely(full_ID, obj)\n\n\ndef time_printer(func):\n '''\n Allow functions to print execution time with a `print_time` kwarg.\n\n Examples\n --------\n >>> from qsdsan.utils import time_printer\n >>> @time_printer\n ... def foo(a=1, **kwargs):\n ... return a\n >>> # This will print run time\n >>> print(foo(a=5))\n function `foo`\n Total time: 0:00:00.\n 5\n >>> # This will NOT print run time\n >>> print(foo(a=5, print_time=False))\n 5\n '''\n def inner(*args, **kwargs):\n print_time = kwargs.get('print_time')\n if print_time is not False:\n timer = TicToc()\n timer.tic()\n output = func(*args, **kwargs)\n if print_time is not False:\n time = str(timedelta(seconds=round(timer.elapsed_time)))\n name = str(func).split(' ')[1]\n print(f'function `{name}`')\n print(f'Total time: {time}.')\n return output\n inner.__doc__ = func.__doc__\n return inner", "id": "5035304", "language": "Python", "matching_score": 2.1743383407592773, "max_stars_count": 2, "path": "qsdsan/utils/misc.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\n\n# %%\n\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom math import ceil\nfrom collections.abc import Iterable\nfrom warnings import warn\nfrom . import ImpactIndicator, ImpactItem, Stream, SanStream, SanUnit\nfrom .utils import (\n auom,\n format_number as f_num\n )\n\n__all__ = ('LCA',)\n\n\nclass LCA:\n '''\n For life cycle assessment (LCA) of a System.\n\n Parameters\n ----------\n system : :class:`biosteam.System`\n System for which this LCA is conducted for.\n lifetime : int\n Lifetime of the LCA.\n lifetime_unit : str\n Unit of lifetime.\n indicators : Iterable(obj)\n `ImpactIndicator` objects or their IDs/aliases.\n uptime_ratio : float\n Fraction of time that the system is operating.\n annualize_construction : bool\n Used in the case that the lifetime of this LCA (e.g., 10 years)\n is not divisible by the lifetime of certain equipment (e.g., 8 years).\n If True, then the impacts from construction will be annualized using\n the lifetime of the equipment;\n if False, then the total number of the equipment needed throughout this\n LCA will be calculated using `ceil(LCA lifetime/equipment lifetime`.\n item_quantities : kwargs, :class:`ImpactItem` or str = float/callable or (float/callable, unit)\n Other :class:`ImpactItem` objects (e.g., electricity) and their quantities.\n Note that callable functions are used so that quantity of items can be updated.\n\n\n Examples\n --------\n A system should be constructed prior to LCA, here we import a pre-constructed one.\n\n >>> import qsdsan as qs\n >>> from qsdsan.utils import load_example_cmps, load_example_sys\n >>> cmps = load_example_cmps()\n >>> sys = load_example_sys(cmps)\n >>> sys.diagram() # doctest: +SKIP\n >>> sys.simulate()\n >>> sys.show()\n System: sys\n ins...\n [0] salt_water\n phase: 'l', T: 298.15 K, P: 101325 Pa\n flow (kmol/hr): H2O 111\n NaCl 0.856\n [1] methanol\n phase: 'l', T: 298.15 K, P: 101325 Pa\n flow (kmol/hr): Methanol 0.624\n [2] ethanol\n phase: 'l', T: 298.15 K, P: 101325 Pa\n flow (kmol/hr): Ethanol 0.217\n outs...\n [0] alcohols\n phase: 'l', T: 298.15 K, P: 101325 Pa\n flow (kmol/hr): Methanol 0.624\n Ethanol 0.217\n [1] waste_brine\n phase: 'l', T: 350 K, P: 101325 Pa\n flow (kmol/hr): H2O 88.8\n NaCl 0.684\n\n And we also need to specify the impact indicators that we are interested in.\n\n >>> GWP = qs.ImpactIndicator('GlobalWarming', alias='GWP', unit='kg CO2-eq')\n >>> FEC = qs.ImpactIndicator('FossilEnergyConsumption', alias='FEC', unit='MJ')\n\n There are four different types of impacts in `QSDsan`:\n construction, transportation, stream, and others.\n\n Note that it is best to add the impact items when developing the unit module,\n (i.e., typically in the `_design` function, but can also in `_run` or `_cost`)\n but for illustrative purpose, we add it after the system is constructed.\n\n Construction is mainly used for impacts that only occur once per lifetime\n of the equipment or the unit.\n\n For example, assume we want to consider the amount of stainless steel\n and concrete used in constructing the MixTank M1.\n\n >>> # Make the impact item, numbers are made up\n >>> SS = qs.ImpactItem('SS', functional_unit='kg', GWP=3, FEC=50)\n >>> Concrete = qs.ImpactItem('Concrete', functional_unit='kg', GWP=4, FEC=30)\n >>> # Specify the amount of stainless steel and concrete used in the unit\n >>> SS_constr_M1 = qs.Construction(item=SS, quantity=100)\n >>> Concrete_constr_M1 = qs.Construction(item=Concrete, quantity=50)\n >>> # Retrieve the unit from the registry\n >>> flowsheet = qs.Flowsheet.flowsheet.default\n >>> M1 = flowsheet.unit.M1\n >>> # Add the construction activity\n >>> M1.construction = (SS_constr_M1, Concrete_constr_M1)\n\n Transportation activity can be added in a similar manner, assuming that\n stainless steel and concrete are delivered by truck from 500 km away.\n\n The interval set below is assuming a system lifetime of 10 year\n and this delivery is only needed once for the entire lifetime.\n\n >>> lifetime = 10\n >>> Trucking = qs.ImpactItem('Trucking', functional_unit='kg*km',\n ... GWP=0.5, FEC=1.5)\n >>> total_quantity = SS_constr_M1.quantity + Concrete_constr_M1.quantity\n >>> Trans_M1 = qs.Transportation(item=Trucking, load_type='mass',\n ... load=total_quantity, distance=500,\n ... interval=lifetime, interval_unit='yr')\n >>> M1.transportation = Trans_M1\n\n We can als consider the impacts associated with chemicals and emissions.\n For example, assume the acquisition of methanol, ethanol and disposal of\n the waste brine all have impacts, but the generated alcohols can be treated\n as a product therefore have credits with\n\n >>> # Retrieve streams\n >>> methanol = flowsheet.stream.methanol\n >>> ethanol = flowsheet.stream.ethanol\n >>> alcohols = flowsheet.stream.alcohols\n >>> waste_brine = flowsheet.stream.waste_brine\n >>> # Create `StreamImpactItem` and link to the streams\n >>> methanol_item = qs.StreamImpactItem(linked_stream=methanol, GWP=2, FEC=13)\n >>> ethanol_item = qs.StreamImpactItem(linked_stream=ethanol, GWP=2.1, FEC=25)\n >>> alcohols_item = qs.StreamImpactItem(linked_stream=alcohols, GWP=-0.2, FEC=-5)\n >>> brine_item = qs.StreamImpactItem(linked_stream=waste_brine, GWP=2, FEC=3)\n\n Finally, there might be other impacts we want to include in the LCA,\n for example, the electricity needed to operate the system.\n\n We can use add those additional items when creating the `LCA` object.\n\n >>> # Get the electricity usage of the system throughout the lifetime,\n >>> # note that the default power utility unit is hr\n >>> total_power = sys.power_utility.rate*24*365*lifetime\n >>> # Create an impact item for the electricity\n >>> e_item = qs.ImpactItem('e_item', 'kWh', GWP=1.1, FEC=24)\n >>> # Create the LCA object\n >>> lca = qs.LCA(system=sys, lifetime=10, e_item=total_power)\n\n Now we can look at the total impacts associate with this system.\n\n >>> lca.show() # doctest: +ELLIPSIS\n LCA: sys (lifetime 10 yr)\n ...\n >>> # Retrieve impacts associated with a specific indicator\n >>> lca.get_total_impacts()[GWP.ID] # doctest: +ELLIPSIS\n 349737807.9765445...\n >>> # Or breakdowns of the different category\n >>> lca.get_impact_table('Construction') # doctest: +SKIP\n >>> # Below is for testing purpose, you do not need it\n >>> lca.get_impact_table('Construction').to_dict() # doctest: +ELLIPSIS\n {'Quantity': ...\n >>> lca.get_impact_table('Transportation').to_dict() # doctest: +ELLIPSIS\n {'Quantity': ...\n >>> lca.get_impact_table('Stream').to_dict() # doctest: +ELLIPSIS\n {'Mass [kg]': ...\n >>> lca.get_impact_table('Construction').to_dict() # doctest: +ELLIPSIS\n {'Quantity': ...\n\n You can also allocate the impact based on mass, energy, value, or a ratio you like\n\n >>> lca.get_allocated_impacts(sys.products, allocate_by='mass')['waste_brine']['FossilEnergyConsumption'] # doctest: +ELLIPSIS\n 46018518.870...\n >>> lca.get_allocated_impacts(sys.products, allocate_by='energy')['alcohols']['GlobalWarming'] # doctest: +ELLIPSIS\n 11063009.556...\n >>> alcohols.price = 5\n >>> waste_brine.price = 1\n >>> GWP_alcohols = lca.get_allocated_impacts(sys.products, allocate_by='value')['alcohols']['GlobalWarming']\n >>> GWP_brine = lca.get_allocated_impacts(sys.products, allocate_by='value')['waste_brine']['GlobalWarming']\n >>> GWP_alcohols + GWP_brine # doctest: +ELLIPSIS\n 5469807.9765...\n >>> lca.get_total_impacts(exclude=sys.products)['GlobalWarming'] # doctest: +ELLIPSIS\n 5469807.9765...\n\n See Also\n --------\n `SanUnit and System <https://qsdsan.readthedocs.io/en/latest/tutorials/SanUnit_and_System.html>`_\n\n `TEA and LCA <https://qsdsan.readthedocs.io/en/latest/tutorials/TEA_and_LCA.html>`_\n '''\n\n __slots__ = ('_system', '_lifetime', '_uptime_ratio',\n '_construction_units', '_transportation_units',\n '_lca_streams', '_indicators',\n '_other_items', '_other_items_f', 'annualize_construction')\n\n\n def __init__(self, system, lifetime, lifetime_unit='yr',\n indicators=(), uptime_ratio=1, annualize_construction=False,\n **item_quantities):\n system.simulate()\n self._construction_units = set()\n self._transportation_units = set()\n self._lca_streams = set()\n self._update_system(system)\n self._update_lifetime(lifetime, lifetime_unit)\n self.indicators = indicators\n self.uptime_ratio = uptime_ratio\n self.annualize_construction = annualize_construction\n self._other_items = {}\n self._other_items_f = {}\n for item, val in item_quantities.items():\n try:\n f_quantity, unit = val # unit provided for the quantity\n except Exception as e:\n if 'unpack' in str(sys.exc_info()[1]):\n f_quantity = val\n unit = ''\n else:\n raise e\n self.add_other_item(item, f_quantity, unit)\n\n\n def _update_system(self, system):\n for u in system.units:\n if not isinstance (u, SanUnit):\n continue\n if u.construction:\n self._construction_units.add(u)\n if u.transportation:\n self._transportation_units.add(u)\n self._construction_units = sorted(self._construction_units,\n key=lambda u: u.ID)\n self._transportation_units = sorted(self._transportation_units,\n key=lambda u: u.ID)\n for s in (i for i in system.feeds+system.products):\n if not hasattr(s, 'stream_impact_item'):\n continue\n if s.stream_impact_item:\n self._lca_streams.add(s)\n self._lca_streams = sorted(self._lca_streams, key=lambda s: s.ID)\n self._system = system\n try: # for older versions of biosteam without the `_LCA` attribute\n system._LCA = self\n except AttributeError:\n pass\n\n\n def _update_lifetime(self, lifetime=0., unit='yr'):\n if not unit or unit == 'yr':\n self._lifetime = int(lifetime)\n else:\n converted = auom(unit).convert(int(lifetime), 'yr')\n self._lifetime = converted\n\n\n def add_other_item(self, item, f_quantity, unit=''):\n '''Add other :class:`ImpactItem` in LCA.'''\n if isinstance(item, str):\n item = ImpactItem.get_item(item)\n fu = item.functional_unit\n if not callable(f_quantity):\n f = lambda: f_quantity\n else:\n f = f_quantity\n quantity = f()\n if unit and unit != fu:\n try:\n quantity = auom(unit).convert(quantity, fu)\n except:\n raise ValueError(f'Conversion of the given unit {unit} to '\n f'item functional unit {fu} is not supported.')\n self._other_items_f[item.ID] = {'item':item, 'f_quantity':f, 'unit':unit}\n self.other_items[item.ID] = {'item':item, 'quantity':quantity}\n\n\n def refresh_other_items(self):\n '''Refresh quantities of other items using the given functions.'''\n for item_ID, record in self._other_items_f.items():\n item, f_quantity, unit = record.values()\n self.other_items[item_ID]['quantity'] = f_quantity()\n\n\n def __repr__(self):\n return f'<LCA: {self.system}>'\n\n def show(self, lifetime_unit='yr'):\n '''Show basic information of this :class:`LCA` object.'''\n lifetime = auom('yr').convert(self.lifetime, lifetime_unit)\n info = f'LCA: {self.system} (lifetime {f_num(lifetime)} {lifetime_unit})'\n info += '\\nImpacts:'\n print(info)\n if len(self.indicators) == 0:\n print(' None')\n else:\n index = pd.Index((i.ID+' ('+i.unit+')' for i in self.indicators))\n df = pd.DataFrame({\n 'Construction': tuple(self.total_construction_impacts.values()),\n 'Transportation': tuple(self.total_transportation_impacts.values()),\n 'Stream': tuple(self.total_stream_impacts.values()),\n 'Others': tuple(self.total_other_impacts.values()),\n 'Total': tuple(self.total_impacts.values())\n },\n index=index)\n # print(' '*9+df.to_string().replace('\\n', '\\n'+' '*9))\n print(df.to_string())\n\n _ipython_display_ = show\n\n\n def get_construction_impacts(self, units=None, time=None, time_unit='hr'):\n '''\n Return all construction-related impacts for the given unit,\n normalized to a certain time frame.\n '''\n units = self.construction_units if units is None else units\n annualize = self.annualize_construction\n if not isinstance(units, Iterable) or isinstance(units, str):\n units = (units,)\n if time is None:\n time = self.lifetime_hr\n else:\n time = auom(time_unit).convert(float(time), 'hr')\n impacts = dict.fromkeys((i.ID for i in self.indicators), 0.)\n for i in units:\n if not isinstance(i, SanUnit):\n continue\n for j in i.construction:\n impact = j.impacts\n if j.lifetime is not None: # this equipment has a lifetime\n constr_lifetime = auom('yr').convert(j.lifetime, 'hr')\n ratio = ceil(time/constr_lifetime) if not annualize else time/constr_lifetime\n else: # equipment doesn't have a lifetime\n if i.lifetime and not isinstance(i.lifetime, dict): # unit has a uniform lifetime\n constr_lifetime = auom('yr').convert(i.lifetime, 'hr')\n ratio = ceil(time/constr_lifetime) if not annualize else time/constr_lifetime\n else: # no lifetime, assume just need one\n ratio = 1.\n for m, n in impact.items():\n if m not in impacts.keys():\n continue\n impacts[m] += n*ratio\n return impacts\n\n def get_transportation_impacts(self, units=None, time=None, time_unit='hr'):\n '''\n Return all transportation-related impacts for the given unit,\n normalized to a certain time frame.\n '''\n units = self.transportation_units if units is None else units\n if not isinstance(units, Iterable):\n units = (units,)\n if not time:\n time = self.lifetime_hr\n else:\n time = auom(time_unit).convert(float(time), 'hr')\n impacts = dict.fromkeys((i.ID for i in self.indicators), 0.)\n for i in units:\n if not isinstance(i, SanUnit):\n continue\n for j in i.transportation:\n impact = j.impacts\n for m, n in impact.items():\n if m not in impacts.keys():\n continue\n impacts[m] += n*time/j.interval\n return impacts\n\n\n def get_stream_impacts(self, stream_items=None, exclude=None,\n kind='all', time=None, time_unit='hr'):\n '''\n Return all stream-related impacts for the given streams,\n normalized to a certain time frame.\n '''\n isa = isinstance\n if stream_items == None:\n stream_items = self.stream_inventory\n if not isa(stream_items, Iterable):\n stream_items = (stream_items,)\n if not isa(exclude, Iterable):\n exclude = (exclude,)\n impacts = dict.fromkeys((i.ID for i in self.indicators), 0.)\n if not time:\n time = self.lifetime_hr\n else:\n time = auom(time_unit).convert(float(time), 'hr')\n for j in stream_items:\n # In case that ws instead of the item is given\n if isa(j, Stream):\n if not isa(j, SanStream):\n continue\n ws = j\n if j.stream_impact_item:\n j = ws.stream_impact_item\n else: continue\n else:\n ws = j.linked_stream\n\n if ws in exclude: continue\n\n for m, n in j.CFs.items():\n if kind in ('all', 'total', 'net'):\n pass\n elif kind in ('direct', 'direct_emission'):\n n = max(n, 0)\n elif kind == 'offset':\n n = min(n, 0)\n else:\n raise ValueError('kind can only be \"all\", \"direct_emission\", or \"offset\", '\n f'not \"{kind}\".')\n if m not in impacts.keys():\n continue\n impacts[m] += n*time*ws.F_mass\n return impacts\n\n def get_other_impacts(self, time=None, time_unit='hr'):\n '''\n Return all additional impacts from \"other\" :class:`ImpactItems` objects,\n based on defined quantity.\n '''\n self.refresh_other_items()\n impacts = dict.fromkeys((i.ID for i in self.indicators), 0.)\n other_dct = self.other_items\n if not time:\n time = self.lifetime_hr\n else:\n time = auom(time_unit).convert(float(time), 'hr')\n factor = time / self.lifetime_hr\n for i in other_dct.keys():\n item = ImpactItem.get_item(i)\n for m, n in item.CFs.items():\n if m not in impacts.keys():\n continue\n impacts[m] += n*other_dct[i]['quantity']*factor\n return impacts\n\n def get_total_impacts(self, exclude=None, time=None, time_unit='hr'):\n '''Return total impacts, normalized to a certain time frame.'''\n impacts = dict.fromkeys((i.ID for i in self.indicators), 0.)\n constr = self.get_construction_impacts(self.construction_units, time=time, time_unit=time_unit)\n trans = self.get_transportation_impacts(self.transportation_units, time=time, time_unit=time_unit)\n ws_impacts = self.get_stream_impacts(stream_items=self.stream_inventory,\n exclude=exclude, time=time, time_unit=time_unit)\n other = self.get_other_impacts(time=time, time_unit=time_unit)\n\n for i in (constr, trans, ws_impacts, other):\n for m, n in i.items():\n if m not in impacts.keys():\n continue\n impacts[m] += n\n return impacts\n\n def get_allocated_impacts(self, streams=(), allocate_by='mass'):\n '''\n Allocate total impacts to one or multiple streams.\n\n Note that original impacts assigned to the streams will be excluded,\n i.e., the total impact for allocation will be calculated using\n `LCA.get_total_impacts(exclude=streams)`.\n\n Parameters\n ----------\n streams : Iterable(obj)\n One or a Iterable of streams. Note that impacts of these streams will be\n excluded in calculating the total impacts.\n allocate_by : str, Iterable, or function to generate an Iterable\n If provided as a str, can be \"mass\" (`F_mass`), \"energy\" (`HHV`),\n or 'value' (`F_mass`*`price`) to allocate the impacts accordingly.\n If provided as an Iterable (no need to normalize so that sum of the Iterable is 1),\n will allocate impacts according to the Iterable.\n If provided as a function, will call the function to generate an\n Iterable to allocate the impacts accordingly.\n\n .. note::\n\n Energy of the stream will be calculated as the sum of HHVs of all components\n in the stream.\n\n '''\n if not isinstance(streams, Iterable):\n streams = (streams,)\n impact_dct = self.get_total_impacts(exclude=streams)\n impact_vals = np.array([i for i in impact_dct.values()])\n allocated = {}\n if len(streams) == 1:\n if not isinstance(streams[0], SanStream):\n return None\n return impact_dct\n if allocate_by == 'mass':\n ratios = np.array([i.F_mass for i in streams])\n elif allocate_by == 'energy':\n ratios = np.array([i.HHV for i in streams])\n elif allocate_by == 'value':\n ratios = np.array([i.F_mass*i.price for i in streams])\n elif iter(allocate_by):\n ratios = allocate_by\n elif callable(allocate_by):\n ratios = allocate_by()\n else:\n raise ValueError('allocate_by can only be \"mass\", \"energy\", \"value\", '\n 'an Iterable (with the same length as `streams`), '\n 'or a function to generate an Iterable.')\n if ratios.sum() == 0:\n raise ValueError('Calculated allocation ratios are all zero, cannot allocate.')\n ratios = ratios/ratios.sum()\n for n, s in enumerate(streams):\n if not isinstance(s, SanStream):\n continue\n if not s in self.system.streams:\n raise ValueError(f'`WasteStream` {s} not in the system.')\n allocated[s.ID] = dict(zip(impact_dct.keys(), ratios[n]*impact_vals))\n return allocated\n\n\n def get_unit_impacts(self, units, time=None, time_unit='hr',\n exclude=None):\n '''Return total impacts with certain units, normalized to a certain time frame. '''\n if not isinstance(units, Iterable):\n units = (units,)\n constr = self.get_construction_impacts(units, time, time_unit)\n trans = self.get_transportation_impacts(units, time, time_unit)\n stream_items = set(i for i in\n sum((tuple(unit.ins+unit.outs) for unit in units), ())\n if i.impact_item)\n\n s = self.get_stream_impacts(stream_items=stream_items, exclude=exclude,\n time=time, time_unit=time_unit)\n other = self.get_other_impacts()\n tot = constr.copy()\n for m in tot.keys():\n tot[m] += trans[m] + s[m] + other[m]\n return tot\n\n def _append_cat_sum(self, cat_table, cat, tot):\n num = len(cat_table)\n cat_table.loc[num] = '' # initiate a blank spot for value to be added later\n\n for i in self.indicators:\n cat_table[f'{i.ID} [{i.unit}]'][num] = tot[i.ID]\n cat_table[f'Category {i.ID} Ratio'][num] = 1\n\n if cat in ('construction', 'transportation'):\n cat_table.rename(index={num: ('Sum', 'All')}, inplace=True)\n cat_table.index = \\\n pd.MultiIndex.from_tuples(cat_table.index,\n names=[cat.capitalize(), 'SanUnit'])\n else:\n cat_table.rename(index={num: 'Sum'}, inplace=True)\n\n return cat_table\n\n def get_impact_table(self, category, time=None, time_unit='hr'):\n '''\n Return a :class:`pandas.DataFrame` table for the given impact category,\n normalized to a certain time frame.\n '''\n if not time:\n time = self.lifetime_hr\n else:\n time = auom(time_unit).convert(float(time), 'hr')\n\n cat = category.lower()\n tot_f = getattr(self, f'get_{cat}_impacts')\n kwargs = {'time': time, 'time_unit': time_unit} if cat != 'other' else {}\n tot = tot_f(**kwargs)\n time_ratio = time/self.lifetime_hr\n\n if cat in ('construction', 'transportation'):\n units = sorted(getattr(self, f'_{cat}_units'),\n key=(lambda su: su.ID))\n items = sorted(set(i.item for i in getattr(self, f'{cat}_inventory')),\n key=(lambda item: item.ID))\n if len(items) == 0:\n return f'No {cat}-related impacts.'\n\n # Note that item_dct = dict.fromkeys([item.ID for item in items], []) won't work\n item_dct = dict.fromkeys([item.ID for item in items])\n for item_ID in item_dct.keys():\n item_dct[item_ID] = dict(SanUnit=[], Quantity=[])\n for su in units:\n if not isinstance(su, SanUnit):\n continue\n for i in getattr(su, cat):\n item_dct[i.item.ID]['SanUnit'].append(su.ID)\n if cat == 'transportation':\n item_dct[i.item.ID]['Quantity'].append(i.quantity*time/i.interval)\n else: # construction\n lifetime = i.lifetime or su.lifetime or self.lifetime\n if isinstance(lifetime, dict): # in the case the the equipment is not in the unit lifetime dict\n lifetime = lifetime.get(i.item.ID) or self.lifetime\n constr_ratio = self.lifetime/lifetime if self.annualize_construction else ceil(self.lifetime/lifetime)\n item_dct[i.item.ID]['Quantity'].append(i.quantity*constr_ratio)\n\n dfs = []\n for item in items:\n dct = item_dct[item.ID]\n dct['SanUnit'].append('Total')\n dct['Quantity'] = np.append(dct['Quantity'], sum(dct['Quantity']))\n dct['Item Ratio'] = dct['Quantity']/dct['Quantity'].sum()*2\n for i in self.indicators:\n if i.ID in item.CFs:\n dct[f'{i.ID} [{i.unit}]'] = impact = dct['Quantity']*item.CFs[i.ID]\n dct[f'Category {i.ID} Ratio'] = impact/(tot[i.ID]*time_ratio)\n else:\n dct[f'{i.ID} [{i.unit}]'] = dct[f'Category {i.ID} Ratio'] = 0\n df = pd.DataFrame.from_dict(dct)\n index0 = f'{item.ID} [{item.functional_unit}]'\n df.set_index([pd.MultiIndex.from_arrays(\n [(index0,)*len(dct['SanUnit'])], names=(category,)),\n 'SanUnit'],\n inplace=True)\n dfs.append(df)\n\n table = pd.concat(dfs)\n return self._append_cat_sum(table, cat, tot)\n\n ind_head = sum(([f'{i.ID} [{i.unit}]',\n f'Category {i.ID} Ratio'] for i in self.indicators), [])\n\n if cat in ('stream', 'streams'):\n headings = ['Stream', 'Mass [kg]', *ind_head]\n item_dct = dict.fromkeys(headings)\n for key in item_dct.keys():\n item_dct[key] = []\n for ws_item in self.stream_inventory:\n ws = ws_item.linked_stream\n item_dct['Stream'].append(ws.ID)\n mass = ws.F_mass * time\n item_dct['Mass [kg]'].append(mass)\n for ind in self.indicators:\n if ind.ID in ws_item.CFs.keys():\n impact = ws_item.CFs[ind.ID]*mass\n item_dct[f'{ind.ID} [{ind.unit}]'].append(impact)\n item_dct[f'Category {ind.ID} Ratio'].append(impact/(tot[ind.ID]*time_ratio))\n else:\n item_dct[f'{ind.ID} [{ind.unit}]'].append(0)\n item_dct[f'Category {ind.ID} Ratio'].append(0)\n table = pd.DataFrame.from_dict(item_dct)\n table.set_index(['Stream'], inplace=True)\n return self._append_cat_sum(table, cat, tot)\n\n elif cat == 'other':\n headings = ['Other', 'Quantity', *ind_head]\n item_dct = dict.fromkeys(headings)\n for key in item_dct.keys():\n item_dct[key] = []\n for other_ID in self.other_items.keys():\n other = self.other_items[other_ID]['item']\n item_dct['Other'].append(f'{other_ID} [{other.functional_unit}]')\n quantity = self.other_items[other_ID]['quantity'] * time_ratio\n item_dct['Quantity'].append(quantity)\n for ind in self.indicators:\n if ind.ID in other.CFs.keys():\n impact = other.CFs[ind.ID]*quantity\n item_dct[f'{ind.ID} [{ind.unit}]'].append(impact)\n item_dct[f'Category {ind.ID} Ratio'].append(impact/(tot[ind.ID]*time_ratio))\n else:\n item_dct[f'{ind.ID} [{ind.unit}]'].append(0)\n item_dct[f'Category {ind.ID} Ratio'].append(0)\n\n table = pd.DataFrame.from_dict(item_dct)\n table.set_index(['Other'], inplace=True)\n return self._append_cat_sum(table, cat, tot)\n\n raise ValueError(\n 'category can only be \"Construction\", \"Transportation\", \"Stream\", or \"Other\", ' \\\n f'not \"{category}\".')\n\n\n def save_report(self, file=None, sheet_name='LCA',\n time=None, time_unit='hr',\n n_row=0, row_space=2):\n '''Save all LCA tables as an Excel file.'''\n if not file:\n file = f'{self.system.ID}_lca.xlsx'\n tables = [self.get_impact_table(cat, time, time_unit)\n for cat in ('Construction', 'Transportation',\n 'Stream', 'Other')]\n with pd.ExcelWriter(file) as writer:\n for table in tables:\n table.to_excel(writer, sheet_name=sheet_name, startrow=n_row)\n n_row += table.shape[0] + row_space + len(table.columns.names) # extra lines for the heading\n\n\n @property\n def system(self):\n '''[biosteam.System] The system linked to this LCA.'''\n return self._system\n @system.setter\n def system(self, i):\n self._update_system(i)\n\n @property\n def lifetime(self):\n '''[int] Lifetime of the system, [yr].'''\n return self._lifetime\n @lifetime.setter\n def lifetime(self, lifetime, unit='yr'):\n self._update_lifetime(lifetime, unit)\n\n @property\n def lifetime_hr(self):\n '''[float] Lifetime of the system in hours, [hr].'''\n return self._lifetime*365*24*self.uptime_ratio\n\n @property\n def uptime_ratio(self):\n '''[float] Fraction of time that the system is operating.'''\n return self._uptime_ratio\n @uptime_ratio.setter\n def uptime_ratio(self, i):\n if 0 <=i<= 1:\n self._uptime_ratio = float(i)\n else:\n raise ValueError('uptime_ratio must be in [0,1].')\n\n @property\n def indicators(self):\n '''\n [list] All impact indicators associated with this LCA object.\n If not `ImpactIndicator` has been added, then will be defaulted to\n sum of the `ImpactIndicator` objects added to the system associated\n with this LCA (e.g., associated with construction, streams, etc.\n\n '''\n if self._indicators:\n return self._indicators\n\n if not self.construction_inventory:\n constr = set()\n else:\n constr = set(sum((i.indicators for i in self.construction_inventory\n if i is not None), ()))\n if not self.transportation_inventory:\n trans = set()\n else:\n trans = set(sum((i.indicators for i in self.transportation_inventory\n if i is not None), ()))\n if not self.stream_inventory:\n ws = set()\n else:\n ws = set(sum((i.indicators for i in self.stream_inventory\n if i is not None), ()))\n if not self.other_items:\n other = set()\n else:\n other = set(sum((ImpactItem.get_item(i).indicators\n for i in self.other_items.keys()), ()))\n tot = constr.union(trans, ws, other)\n if len(tot) == 0:\n warn('No `ImpactIndicator` has been added.')\n return list(tot)\n @indicators.setter\n def indicators(self, i):\n if not (isinstance(i, Iterable) and not isinstance(i, str)):\n i = (i,)\n inds = []\n for ind in i:\n if isinstance(ind, str):\n ind = ImpactIndicator.get_indicator(ind)\n if not isinstance(ind, ImpactIndicator):\n raise TypeError(f'{ind} is not an `ImpactIndicator` or ID/alias of an `ImpactIndicator`.')\n inds.append(ind)\n self._indicators = inds\n\n @property\n def construction_units(self):\n '''[set] All units in the linked system with construction activity.'''\n return self._construction_units\n\n @property\n def construction_inventory(self):\n '''[tuple] All construction activities.'''\n return sum((i.construction for i in self.construction_units), ())\n\n @property\n def total_construction_impacts(self):\n '''[dict] Total impacts associated with construction activities.'''\n return self.get_construction_impacts(self.construction_units)\n\n @property\n def transportation_units(self):\n '''[set] All units in the linked system with transportation activity.'''\n return self._transportation_units\n\n @property\n def transportation_inventory(self):\n '''[tuple] All transportation activities.'''\n return sum((i.transportation for i in self.transportation_units), ())\n\n @property\n def total_transportation_impacts(self):\n '''[dict] Total impacts associated with transportation activities.'''\n return self.get_transportation_impacts(self.transportation_units)\n\n @property\n def lca_streams(self):\n '''[set] All streams in the linked system with impacts.'''\n return self._lca_streams\n\n @property\n def stream_inventory(self):\n '''[tuple] All chemical inputs, fugitive gases, waste emissions, and products.'''\n return tuple(i.stream_impact_item for i in self.lca_streams)\n\n @property\n def total_stream_impacts(self):\n '''[dict] Total impacts associated with `WasteStreams` (e.g., chemicals, emissions).'''\n return self.get_stream_impacts(stream_items=self.stream_inventory)\n\n @property\n def other_items (self):\n '''[dict] Other impact items (e.g., electricity) and their quantities.'''\n return self._other_items\n @other_items.setter\n def other_items(self, item, f_quantity, unit=''):\n self.add_other_item(item, f_quantity, unit)\n\n @property\n def total_other_impacts(self):\n '''[dict] Total impacts associated with other ImpactItems (e.g., electricity).'''\n return self.get_other_impacts()\n\n @property\n def total_impacts(self):\n '''[dict] Total impacts of the entire system (construction, transportation, and wastestream).'''\n return self.get_total_impacts()", "id": "5610433", "language": "Python", "matching_score": 3.566070795059204, "max_stars_count": 2, "path": "qsdsan/_lca.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\n# %%\n\nfrom warnings import warn\nfrom biosteam.utils import MissingStream\nfrom . import Stream\n\n__all__ = ('SanStream', 'MissingSanStream')\n\nsetattr = object.__setattr__\n\nclass SanStream(Stream):\n '''\n A subclass of :class:`thermosteam.Stream` with additional attributes\n for environmental impacts.\n\n .. note::\n\n Parameters below only include the ones additional to those of :class:`thermosteam.Stream`.\n\n\n Parameters\n ----------\n stream_impact_item : :class:`StreamImpactItem`\n The :class:`StreamImpactItem` this stream is linked to.\n\n Examples\n --------\n `WasteStream <https://qsdsan.readthedocs.io/en/latest/tutorials/WasteStream.html>`__\n\n See Also\n --------\n `thermosteam.Stream <https://thermosteam.readthedocs.io/en/latest/Stream.html>`_\n\n '''\n\n __slots__ = (*Stream.__slots__, '_impact_item', '_stream_impact_item')\n ticket_name = 'ss'\n\n def __init__(self, ID='', flow=(), phase='l', T=298.15, P=101325.,\n units='kg/hr', price=0., thermo=None, stream_impact_item=None,\n **component_flows):\n if 'impact_item' in component_flows.keys():\n raise ValueError('The keyword `impact_item` is deprecated, '\n 'please use `stream_impact_item` instead.')\n super().__init__(ID=ID, flow=flow, phase=phase, T=T, P=P,\n units=units, price=price, thermo=thermo,\n **component_flows)\n if stream_impact_item:\n stream_impact_item._linked_stream = self\n self._stream_impact_item = stream_impact_item\n self._impact_item = self._stream_impact_item\n\n def copy(self, new_ID='', copy_price=False, copy_impact_item=False):\n '''\n Copy the information of another stream.\n\n There are three functions related to copying: ``copy``, ``copy_like``, and ``copy_flow``,\n and they have slight differences in using.\n\n Both ``copy`` and ``copy_like`` makes the new stream the same as the original one\n (other than that the new stream does not have the cost),\n but when using ``copy``, you do now need to pre-create the new stream,\n (i.e., you can just do ``new_stream = original_stream.copy('new_ID')``),\n but to use ``copy_like``, you need to firstly create the new stream, then\n ``new_stream.copy_like(original_stream)``.\n\n For ``copy_flow``, it is similar to ``copy_like`` in that you need to firstly\n creating the new stream, but unlike ``copy_flow`` that copies properties\n such as temperature, pressure, ``copy_flow`` just copies the mass flow information,\n but you can choose which component to copy.\n\n Parameters\n ----------\n new_ID : str\n ID of the new stream, a default ID will be assigned if not provided.\n copy_price : bool\n If True, price of the new stream will be set to be the same as\n the original stream.\n copy_impact_item : bool\n If True and the original stream has an :class:`~.StreamImpactItem`,\n then a new :class:`~.StreamImpactItem` will be created for the new stream\n and the new impact item will be linked to the original impact item.\n '''\n\n new = super().copy(ID=new_ID)\n if copy_price:\n new.price = self.price\n if hasattr(self, '_stream_impact_item'):\n if self.stream_impact_item is not None:\n self.stream_impact_item.copy(stream=new)\n else:\n new._stream_impact_item = None\n return new\n\n __copy__ = copy\n\n\n def copy_like(self, other, copy_price=False, copy_impact_item=False):\n '''\n Copy the information of another stream without creating a new stream.\n\n Parameters\n ----------\n other : obj\n The stream where mass flows and stream properties will be copied from.\n copy_price : bool\n If True, price of the new stream will be set to be the same as\n the original stream.\n copy_impact_item : bool\n If True and the original stream has an :class:`~.StreamImpactItem`,\n then a new :class:`~.StreamImpactItem` will be created for the new stream\n and the new impact item will be linked to the original impact item.\n\n See Also\n --------\n :func:`copy` for the differences between ``copy``, ``copy_like``, and ``copy_flow``.\n '''\n\n Stream.copy_like(self, other)\n if not isinstance(other, SanStream):\n return\n if copy_price:\n other.price = self.price\n if copy_impact_item:\n if hasattr(other, '_stream_impact_item'):\n if other.stream_impact_item is not None:\n self.stream_impact_item.copy(stream=self)\n\n\n def copy_flow(self, other, IDs=..., *, remove=False, exclude=False):\n '''\n Copy only the mass flow of another stream without creating a new stream.\n\n Parameters\n ----------\n other : obj\n The stream where mass flows will be copied from.\n IDs=... : Iterable(str), defaults to all components.\n IDs of the components to be copied from.\n remove=False: bool, optional\n If True, copied components will be removed from the original stream.\n exclude=False: bool, optional\n If True, exclude designated components when copying.\n\n\n See Also\n --------\n :func:`copy` for the differences between ``copy``, ``copy_like``, and ``copy_flow``.\n '''\n Stream.copy_flow(self, other=other, IDs=IDs, remove=remove, exclude=exclude)\n\n if not isinstance(other, SanStream):\n return\n\n self._stream_impact_item = None\n\n\n def flow_proxy(self, ID=None):\n '''\n Return a new stream that shares flow data with this one.\n\n Parameters\n ----------\n ID : str\n ID of the new proxy stream.\n\n Examples\n --------\n >>> from qsdsan import set_thermo, SanStream\n >>> from qsdsan.utils import load_example_cmps\n >>> cmps = load_example_cmps()\n >>> set_thermo(cmps)\n >>> ss1 = SanStream('ss1', Water=100, NaCl=1, price=3.18)\n >>> ss2 = ss1.flow_proxy('ss2')\n >>> ss2.mol is ss1.mol\n True\n >>> ss2.thermal_condition is ss1.thermal_condition\n False\n '''\n new = Stream.flow_proxy(self, ID=ID)\n new._stream_impact_item = None\n return new\n\n\n def proxy(self, ID=None):\n '''\n Return a new stream that shares all data with this one.\n\n Note that unlike other properties, the `price` and `stream_impact_item`\n of the two streams are not connected,\n i.e., the price of the new stream will be the same as the\n original one upon creation, but then they can be different.\n\n Parameters\n ----------\n ID : str\n ID of the new proxy stream.\n\n Examples\n --------\n >>> from qsdsan import set_thermo, SanStream\n >>> from qsdsan.utils import load_example_cmps\n >>> cmps = load_example_cmps()\n >>> set_thermo(cmps)\n >>> ss1 = SanStream('ss1', Water=100, NaCl=1, price=3.18)\n >>> ss2 = ss1.proxy('ss2')\n >>> ss2.mol is ss1.mol\n True\n >>> ss2.thermal_condition is ss1.thermal_condition\n True\n >>> ss2.price = 5.2335\n >>> ss1.price\n 3.18\n '''\n new = Stream.proxy(self, ID=ID)\n new._stream_impact_item = None\n return new\n\n\n @staticmethod\n def degassing(original_stream, receiving_stream=None, gas_IDs=()):\n '''\n Remove all the gas components from the original stream,\n if `receiving_stream` is given, then the gas components will be transferred\n to the receiving stream.\n\n If `gas_IDs` is not provided, then the gas components will be those\n either have `locked_state` == \"g\" or `particle_size` == \"Dissolved gas\".\n\n Parameters\n ----------\n original_stream : None or obj\n The stream where the gas components will be removed.\n receiving_stream : None or obj\n The stream to receive the gas components.\n gas_IDs : Iterable(str)\n IDs of the gas components to be removed, will be set according\n to the component properties if not provided.\n '''\n if not gas_IDs:\n gas_IDs = original_stream.gases if isinstance(original_stream, SanStream) \\\n else [i.ID for i in original_stream.components if i.locked_state=='g']\n if receiving_stream:\n receiving_stream.imass[gas_IDs] += original_stream.imass[gas_IDs]\n original_stream.imass[gas_IDs] = 0\n\n\n @staticmethod\n def filtering(original_stream, receiving_stream=None, solid_IDs=()):\n '''\n Remove all the solid components from the original stream,\n if `receiving_stream` is given, then the solid components will be transferred\n to the receiving stream.\n\n If `solid_IDs` is not provided, then the gas components will be those\n either have `locked_state` == \"g\" or `particle_size` == \"Particulate\".\n\n Parameters\n ----------\n original_stream : None or obj\n The stream where the gas components will be removed.\n receiving_stream : None or obj\n The stream to receive the gas components.\n solid_IDs : Iterable(str)\n IDs of the solid components to be removed, will be set according\n to the component properties if not provided.\n '''\n if not solid_IDs:\n solid_IDs = original_stream.solids if isinstance(original_stream, SanStream) \\\n else [i.ID for i in original_stream.components if i.locked_state=='s']\n if receiving_stream:\n receiving_stream.imass[solid_IDs] += original_stream.imass[solid_IDs]\n original_stream.imass[solid_IDs] = 0\n\n\n @staticmethod\n def from_stream(cls, stream, ID='', **kwargs):\n '''\n Cast a :class:`thermosteam.Stream` or :class:`biosteam.utils.MissingStream`\n to :class:`SanStream` or :class:`MissingSanStream`.\n\n Parameters\n ----------\n cls : obj\n class of the stream to be created.\n stream : :class:`thermosteam.Stream`\n The original stream.\n ID : str\n If not provided, will use default ID.\n kwargs\n Additional properties of the new stream.\n\n Examples\n --------\n >>> import qsdsan as qs\n >>> cmps = qs.Components.load_default()\n >>> qs.set_thermo(cmps)\n >>> s = qs.Stream('s', H2O=100, price=5)\n >>> s.show()\n Stream: s\n phase: 'l', T: 298.15 K, P: 101325 Pa\n flow (kmol/hr): H2O 100\n >>> s.price\n 5.0\n >>> ss = qs.SanStream.from_stream(qs.SanStream, s, ID='ss', T=350, price=10)\n >>> ss.show()\n SanStream: ss\n phase: 'l', T: 350 K, P: 101325 Pa\n flow (kmol/hr): H2O 100\n >>> ss.price\n 10.0\n '''\n # Missing stream, note that if to make updates here,\n # it's likely that `WasteStream.from_stream` should be updated as well.\n if isinstance(stream, MissingStream):\n new = MissingSanStream.__new__(MissingSanStream)\n new.__init__(stream._source, stream._sink)\n return new\n # An actual stream\n if not isinstance(stream, cls):\n if not ID:\n stream.registry.discard(stream)\n # stream.registry.untrack((stream,))\n new = cls.__new__(cls)\n new_ID = ID if ID else stream.ID\n if new_ID[0]=='s' and new_ID[1:].isnumeric(): # old ID is default\n new_ID = ''\n new.__init__(ID=new_ID)\n\n source = new._source = stream._source\n if source:\n source._outs[source._outs.index(stream)] = new\n\n sink = new._sink = stream._sink\n if sink:\n sink._ins[sink._ins.index(stream)] = new\n\n new._thermo = stream._thermo\n new._imol = stream._imol.copy()\n new._thermal_condition = stream._thermal_condition.copy()\n new.reset_cache()\n new.price = 0\n new.stream_impact_item = None\n\n for attr, val in kwargs.items():\n setattr(new, attr, val)\n\n stream._sink = stream._source = None\n return new\n else:\n return stream\n\n def mix_from(self, others, **kwargs):\n '''\n Update this stream to be a mixture of other streams,\n initial content of this stream will be ignored.\n\n Parameters\n ----------\n others : Iterable(obj)\n Can contain :class:`thermosteam.Stream`, :class:`SanStream`,\n or :class:`~.WasteStream`\n\n .. note::\n\n Price and impact item are not included.\n\n\n Examples\n --------\n >>> import qsdsan as qs\n >>> cmps = qs.Components.load_default()\n >>> qs.set_thermo(cmps)\n >>> s1 = qs.Stream('s1', H2O=100, price=5, units='kg/hr')\n >>> s2 = qs.SanStream('s2', S_O2=100, units='kg/hr')\n >>> s3 = qs.SanStream('s3')\n >>> s3.mix_from((s1, s2))\n >>> s3.show()\n SanStream: s3\n phase: 'l', T: 298.15 K, P: 101325 Pa\n flow (kmol/hr): S_O2 3.13\n H2O 5.55\n '''\n\n others = [s for s in others if not 'Missing' in type(s).__name__]\n Stream.mix_from(self, others, **kwargs)\n if not hasattr(self, '_stream_impact_item'):\n self._stream_impact_item = None\n\n\n @property\n def stream_impact_item(self):\n '''[:class:`StreamImpactItem`] The :class:`StreamImpactItem` this stream is linked to.'''\n return self._stream_impact_item\n @stream_impact_item.setter\n def stream_impact_item(self, i):\n self._stream_impact_item = i\n if i:\n i.linked_stream = self\n\n @property\n def impact_item(self):\n '''Same as `stream_impact_item`, has been deprecated.'''\n warn('The property `impact_item` has been changed to `stream_impact_item`, '\n 'please use `stream_impact_item` instead.')\n return self.stream_impact_item\n\n @property\n def components(self):\n return self.chemicals\n\n\n# %%\n\nclass MissingSanStream(MissingStream):\n '''\n A subclass of :class:`biosteam.MissingStream`, create a special object\n that acts as a dummy until replaced by an actual :class:`SanStream`.\n\n .. note::\n\n Users usually do not need to interact with this class.\n\n '''\n line = 'SanStream'\n\n def materialize_connection(self, ID=''):\n '''\n Disconnect this missing stream from any unit operations and\n replace it with a material stream.\n '''\n source = self._source\n sink = self._sink\n if not (source or sink):\n raise RuntimeError(\"either a source or a sink is required to \"\n \"materialize connection\")\n material_stream = SanStream(ID, thermo=(source or sink).thermo)\n if source: source._outs.replace(self, material_stream)\n if sink: sink._ins.replace(self, material_stream)\n return material_stream\n\n @property\n def stream_impact_item(self):\n return None\n\n @property\n def impact_item(self):\n '''Same as `stream_impact_item`, has been deprecated.'''\n warn('The property `impact_item` has been changed to `stream_impact_item`, '\n 'please use `stream_impact_item` instead.')\n return self.stream_impact_item\n\n def __repr__(self):\n return '<MissingSanStream>'\n\n def __str__(self):\n return 'missing sanstream'", "id": "4038884", "language": "Python", "matching_score": 1.3345017433166504, "max_stars_count": 2, "path": "qsdsan/_sanstream.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\n# Check system environment, Python 3.7 and below will have issues unpickling saved results\nimport sys\npy_version = sys.version.split('.')\n_PY_MAJOR, _PY_MINOR = int(py_version[0]), int(py_version[1])\n\nif (_PY_MAJOR, _PY_MINOR) <= (3, 7): # pragma: no cover\n from warnings import warn\n if (_PY_MAJOR, _PY_MINOR) >= (3, 5):\n try: import pickle5 as _pk\n except ModuleNotFoundError:\n warn(f'Python version {_PY_MAJOR}.{_PY_MINOR} does not support Pickle Protocol 5, '\n 'installing `pickle5` by running `pip install pickle5` in your '\n 'command/Anaconda prompt or terminal can reduce the loading time.\\n'\n 'For further information, check https://pypi.org/project/pickle5/.')\n _pk = None\n else:\n warn(f'Python version {_PY_MAJOR}.{_PY_MINOR} does not support Pickle Protocol 5, '\n 'and will have slower speed in when loading the default processes.')\n _pk = None\n del warn\nelse:\n import pickle as _pk\n\n\nimport pkg_resources\ntry:\n __version__ = pkg_resources.get_distribution('qsdsan').version\nexcept pkg_resources.DistributionNotFound: # pragma: no cover\n __version__ = None\n\ndel sys, py_version, pkg_resources\n\n\nimport thermosteam as tmo\nimport biosteam as bst\nChemical = tmo.Chemical\nChemicals = tmo.Chemicals\nCompiledChemicals = tmo.CompiledChemicals\nStream = tmo.Stream\nMultiStream = tmo.MultiStream\nset_thermo = tmo.settings.set_thermo\nget_components = tmo.settings.get_chemicals\nget_thermo = tmo.settings.get_thermo\n\nPowerUtility = bst.PowerUtility\nUnit = bst.Unit\nSystem = bst.System\nScope = bst.utils.Scope\nModel = bst.Model\nMetric = bst.Metric\nParameter = bst.Parameter\nFlowsheet = bst.Flowsheet\nmain_flowsheet = bst.main_flowsheet\nCEPCI = bst.CE # Chemical Engineering Plant Cost Index\nCEPCI_by_year = bst.units.design_tools.CEPCI_by_year\ndel tmo, bst\n\ncurrency = 'USD'\n\nfrom . import utils\nfrom ._component import *\nfrom ._components import *\nfrom ._sanstream import *\nfrom ._waste_stream import *\nfrom ._process import *\nfrom ._impact_indicator import *\nfrom ._impact_item import *\nfrom ._construction import *\nfrom ._equipment import *\nfrom ._transportation import *\nfrom ._sanunit import *\nfrom ._simple_tea import *\nfrom ._lca import *\n\n\nfrom . import (\n _component,\n _components,\n _sanstream,\n _waste_stream,\n _process,\n _impact_indicator,\n _impact_item,\n _construction,\n _equipment,\n _transportation,\n _sanunit,\n _simple_tea,\n _lca,\n processes,\n equipments,\n sanunits,\n stats,\n )\n\nutils._secondary_importing()\nfor _slot in utils.doc_examples.__all__:\n setattr(utils, _slot, getattr(utils.doc_examples, _slot))\n\n# Add the `pump` decorator to the util module\nfrom .sanunits import wwtpump\nutils.__all__ = (*utils.__all__, 'wwtpump')\nsetattr(utils, 'wwtpump', wwtpump)\n\n\n__all__ = (\n *_component.__all__,\n *_components.__all__,\n *_sanstream.__all__,\n *_waste_stream.__all__,\n *_process.__all__,\n *_impact_indicator.__all__,\n *_impact_item.__all__,\n *_construction.__all__,\n *_transportation.__all__,\n *_equipment.__all__,\n *_sanunit.__all__,\n *_simple_tea.__all__,\n *_lca.__all__,\n )", "id": "1102593", "language": "Python", "matching_score": 1.5189661979675293, "max_stars_count": 2, "path": "qsdsan/__init__.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\nimport numpy as np\nimport biosteam as bst\nimport qsdsan as qs\nfrom numpy.testing import assert_allclose\n\n__all__ = (\n 'test_HXprocess',\n 'test_HXutility',\n 'test_MixTank',\n 'test_Pump',\n 'test_Splitter',\n 'test_StorageTank',\n )\n\n\nbst.default_utilities()\nchems = bst.Chemicals(('Methanol', 'Ethanol'))\n\nws_data = {\n 'particle_size': 'Soluble',\n 'degradability': 'Readily',\n 'organic': True\n }\ncmps = qs.Components((qs.Component('Methanol', search_ID='Methanol', **ws_data),\n qs.Component('Ethanol', search_ID='Ethanol', **ws_data)))\n\n\ndef create_streams(num):\n bst.settings.set_thermo(chems)\n bst_s = []\n for n in range(num):\n s = bst.Stream(Methanol=100*(n+1), Ethanol=100*(n+1), units='kg/hr')\n bst_s.append(s)\n\n qs.set_thermo(cmps)\n qs_ws = []\n for n in range(num):\n ws = qs.WasteStream(Methanol=100*(n+1), Ethanol=100*(n+1), units='kg/hr')\n qs_ws.append(ws)\n\n return bst_s, qs_ws\n\n\ndef check_results(bst_unit, qs_unit):\n bst_unit.simulate()\n qs_unit.simulate()\n\n bst_s = bst_unit.ins + qs_unit.outs\n qs_ws = qs_unit.ins + qs_unit.outs\n for n, s in enumerate(bst_s):\n assert_allclose(np.abs(s.mol-qs_ws[n].mol).sum(), 0, atol=1e-6)\n\n assert_allclose(bst_unit.installed_cost, qs_unit.installed_cost, atol=1e-6)\n assert_allclose(bst_unit.utility_cost, qs_unit.utility_cost, atol=1e-6)\n assert_allclose(bst_unit.power_utility.rate, qs_unit.power_utility.rate, atol=1e-6)\n\n\n\n# %%\n\n# =============================================================================\n# Testing functions\n# =============================================================================\n\ndef test_HXprocess():\n bst_s, qs_ws = create_streams(2)\n bst_s[0].T = qs_ws[0].T = 400\n\n bst.settings.set_thermo(chems)\n bst_unit = bst.units.HXprocess(ins=bst_s, phase0='l', phase1='l')\n\n qs.set_thermo(cmps)\n qs_unit = qs.sanunits.HXprocess(ins=qs_ws, phase0='l', phase1='l')\n\n check_results(bst_unit, qs_unit)\n\n\ndef test_HXutility():\n bst_s, qs_ws = create_streams(1)\n bst.settings.set_thermo(chems)\n bst_unit = bst.units.HXutility(ins=bst_s, T=400, rigorous=False) #!!! Try True\n\n qs.set_thermo(cmps)\n qs_unit = qs.sanunits.HXutility(ins=qs_ws, T=400, rigorous=False) #!!! Try True\n\n check_results(bst_unit, qs_unit)\n\n\ndef test_MixTank():\n bst_s, qs_ws = create_streams(2)\n bst.settings.set_thermo(chems)\n bst_unit = bst.units.MixTank(ins=bst_s)\n\n qs.set_thermo(cmps)\n qs_unit = qs.sanunits.MixTank(ins=qs_ws)\n\n check_results(bst_unit, qs_unit)\n\n\ndef test_Pump():\n bst_s, qs_ws = create_streams(1)\n bst.settings.set_thermo(chems)\n bst_unit = bst.units.Pump(ins=bst_s)\n\n qs.set_thermo(cmps)\n qs_unit = qs.sanunits.Pump(ins=qs_ws)\n\n check_results(bst_unit, qs_unit)\n\n\ndef test_Splitter():\n bst_s, qs_ws = create_streams(1)\n bst.settings.set_thermo(chems)\n bst_unit = bst.units.Splitter(ins=bst_s, split=0.1)\n\n qs.set_thermo(cmps)\n qs_unit = qs.sanunits.Splitter(ins=qs_ws, split=0.1)\n\n check_results(bst_unit, qs_unit)\n\n\ndef test_StorageTank():\n bst_s, qs_ws = create_streams(1)\n bst.settings.set_thermo(chems)\n bst_unit = bst.units.StorageTank(ins=bst_s)\n\n qs.set_thermo(cmps)\n qs_unit = qs.sanunits.StorageTank(ins=qs_ws)\n\n check_results(bst_unit, qs_unit)\n\n\nif __name__ == '__main__':\n test_HXprocess()\n test_HXutility()\n test_MixTank()\n test_Pump()\n test_Splitter()\n test_StorageTank()", "id": "9789995", "language": "Python", "matching_score": 2.0831727981567383, "max_stars_count": 2, "path": "tests/test_bst_units_.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/master/LICENSE.txt\nfor license details.\n'''\n\n__all__ = ('test_waste_stream',)\n\ndef test_waste_stream():\n import pytest, numpy as np\n from numpy.testing import assert_allclose\n from math import isclose\n from qsdsan import set_thermo, Components, WasteStream\n\n components = Components.load_default()\n set_thermo(components)\n\n ws1 = WasteStream.codstates_inf_model('ws1', 1e5)\n ws2 = WasteStream.codstates_inf_model('ws2', 1e5*24/1e3, units=('m3/d', 'g/m3'))\n assert isclose(ws1.COD, 430, rel_tol=1e-2)\n assert isclose(ws1.TKN, 40, rel_tol=1e-2)\n assert isclose(ws1.TP, 10, rel_tol=1e-2)\n assert isclose(ws1.F_vol, ws2.F_vol)\n\n ws3 = WasteStream(S_Ac=5, H2O=1000, units='kg/hr')\n ws4 = WasteStream(X_NOO=10, H2O=1000, units='kg/hr')\n ws5 = WasteStream()\n ws5.mix_from((ws3, ws4))\n assert_allclose(ws5.F_mass, 2015.0)\n # TODO: After updating the default component properties,\n # add in tests here to make sure COD, etc. are calculated correctly\n assert_allclose(ws5.COD, 7414.267796, rtol=1e-2)\n\n # Make sure below attributes are calculated based on flow info, cannot be set\n with pytest.raises(AttributeError):\n ws5.COD = 5\n\n # Concentration calclation\n ws6 = WasteStream(X_CaCO3=1, H2O=1000, units='kg/hr')\n assert_allclose(np.abs(ws6.conc.value-ws6.mass/ws6.F_vol*1e3).sum(), 0, atol=1e-6)\n ws6.imass['X_B_Subst', 'X_GAO_PHA'] = (100, 1)\n ws7 = WasteStream(X_CaCO3=1, X_B_Subst=100, X_GAO_PHA=1, H2O=1000, units='kg/hr')\n assert_allclose(np.abs(ws6.conc.value-ws7.mass/ws7.F_vol*1e3).sum(), 0, atol=1e-6)\n ws6.mass[:] = 1e-3\n ws6.imass['H2O'] = 1e3\n diff = ws6.conc.value - np.ones_like(ws6.conc.value)\n diff[components.index('H2O')] = 0\n assert_allclose(np.max(np.abs(diff)), 0, atol=1e-2)\n\n\nif __name__ == '__main__':\n test_waste_stream()", "id": "12002592", "language": "Python", "matching_score": 1.0421104431152344, "max_stars_count": 2, "path": "tests/test_waste_stream_.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\n# Add a trailing \"_\" to differentiate the module from\n# the functions within the module\n\nfrom . import (\n # QSDsan modules, alphabetically\n test_bst_units_,\n test_component_,\n test_dyn_sys_,\n test_process_,\n test_sanunit_,\n test_waste_stream_,\n # EXPOsan systems\n test_exposan_,\n )\n\n\nfrom .test_bst_units_ import *\nfrom .test_component_ import *\nfrom .test_dyn_sys_ import *\nfrom .test_process_ import *\nfrom .test_sanunit_ import *\nfrom .test_waste_stream_ import *\n\nfrom .test_exposan_ import *\n\n\n__all__ = (\n *test_bst_units_.__all__,\n *test_component_.__all__,\n *test_dyn_sys_.__all__,\n *test_process_.__all__,\n *test_sanunit_.__all__,\n *test_waste_stream_.__all__,\n\n *test_exposan_.__all__,\n )", "id": "2581847", "language": "Python", "matching_score": 0.10416092723608017, "max_stars_count": 2, "path": "tests/__init__.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n <NAME> <<EMAIL>>\n\nPart of this module is based on the biosteam package:\nhttps://github.com/BioSTEAMDevelopmentGroup/biosteam\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\nimport numpy as np\nfrom math import pi, ceil\nfrom biosteam.units import Pump\nfrom biosteam.units.design_tools.mechanical import (\n brake_efficiency as brake_eff,\n motor_efficiency as motor_eff\n )\nfrom .. import SanUnit\nfrom ..utils import auom, select_pipe, format_str\n\n__all__ = ('Pump', 'HydraulicDelay', 'WWTpump', 'wwtpump')\n\n\nclass Pump(SanUnit, Pump):\n '''\n Similar to the :class:`biosteam.units.Pump`,\n but can be initialized with :class:`qsdsan.SanStream` and :class:`qsdsan.WasteStream`,\n and allows dynamic simulation.\n\n See Also\n --------\n `biosteam.units.Pump <https://biosteam.readthedocs.io/en/latest/units/Pump.html>`_\n '''\n def __init__(self, ID='', ins=None, outs=(), thermo=None, *,\n P=None, pump_type='Default', material='Cast iron',\n dP_design=405300, ignore_NPSH=True,\n init_with='Stream', F_BM_default=None, isdynamic=False):\n SanUnit.__init__(self, ID, ins, outs, thermo,\n init_with=init_with, F_BM_default=F_BM_default,\n isdynamic=isdynamic)\n self.P = P\n self.pump_type = pump_type\n self.material = material\n self.dP_design = dP_design\n self.ignore_NPSH = ignore_NPSH\n\n @property\n def state(self):\n '''The state of the Pump, including component concentrations [mg/L] and flow rate [m^3/d].'''\n if self._state is None: return None\n else:\n return dict(zip(list(self.components.IDs) + ['Q'], self._state))\n\n def _init_state(self):\n self._state = self._ins_QC[0]\n self._dstate = self._state * 0.\n\n def _update_state(self):\n '''updates conditions of output stream based on conditions of the Mixer'''\n self._outs[0].state = self._state\n\n def _update_dstate(self):\n '''updates rates of change of output stream from rates of change of the Mixer'''\n self._outs[0].dstate = self._dstate\n\n @property\n def AE(self):\n if self._AE is None:\n self._compile_AE()\n return self._AE\n\n def _compile_AE(self):\n _state = self._state\n _dstate = self._dstate\n _update_state = self._update_state\n _update_dstate = self._update_dstate\n def yt(t, QC_ins, dQC_ins):\n _state[:] = QC_ins[0]\n _dstate[:] = dQC_ins[0]\n _update_state()\n _update_dstate()\n self._AE = yt\n\n\n# %%\n\nclass HydraulicDelay(Pump):\n '''\n A fake unit for implementing hydraulic delay by a first-order reaction\n (i.e., a low-pass filter) with a specified time constant [d].\n\n See Also\n --------\n `Benchmark Simulation Model No.1 implemented in MATLAB & Simulink <https://www.cs.mcgill.ca/~hv/articles/WWTP/sim_manual.pdf>`\n '''\n def __init__(self, ID='', ins=None, outs=(), thermo=None, t_delay=1e-4, *,\n init_with='WasteStream', F_BM_default=None, isdynamic=True):\n SanUnit.__init__(self, ID, ins, outs, thermo,\n init_with=init_with, F_BM_default=F_BM_default,\n isdynamic=isdynamic)\n self.t_delay = t_delay\n self._concs = None\n\n def set_init_conc(self, **kwargs):\n '''set the initial concentrations [mg/L].'''\n Cs = np.zeros(len(self.components))\n cmpx = self.components.index\n for k, v in kwargs.items(): Cs[cmpx(k)] = v\n self._concs = Cs\n\n def _init_state(self):\n '''initialize state by specifying or calculating component concentrations\n based on influents. Total flow rate is always initialized as the sum of\n influent wastestream flows.'''\n self._state = self._ins_QC[0]\n self._dstate = self._state * 0\n if self._concs is not None:\n self._state[:-1] = self._concs\n\n def _run(self):\n s_in, = self.ins\n s_out, = self.outs\n s_out.copy_like(s_in)\n\n @property\n def ODE(self):\n if self._ODE is None:\n self._compile_ODE()\n return self._ODE\n\n def _compile_ODE(self):\n T = self.t_delay\n _dstate = self._dstate\n _update_dstate = self._update_dstate\n def dy_dt(t, QC_ins, QC, dQC_ins):\n Q_in = QC_ins[0,-1]\n Q = QC[-1]\n C_in = QC_ins[0,:-1]\n C = QC[:-1]\n if dQC_ins[0,-1] == 0:\n _dstate[-1] = 0\n _dstate[:-1] = (Q_in*C_in - Q*C)/(Q*T)\n else:\n _dstate[-1] = (Q_in - Q)/T\n _dstate[:-1] = Q_in/Q*(C_in - C)/T\n _update_dstate()\n self._ODE = dy_dt\n\n def _design(self):\n pass\n\n def _cost(self):\n pass\n\n\n# %%\n\n_hp_to_kW = auom('hp').conversion_factor('kW')\n_lb_to_kg = auom('lb').conversion_factor('kg')\n_ft_to_m = auom('ft').conversion_factor('m')\n_ft3_to_gal = auom('ft3').conversion_factor('gallon')\n_m3_to_gal = auom('m3').conversion_factor('gallon')\nF_BM_pump = 1.18*(1+0.007/100) # 0.007 is for miscellaneous costs\ndefault_F_BM = {\n 'Pump': F_BM_pump,\n 'Pump building': F_BM_pump,\n }\ndefault_equipment_lifetime = {\n 'Pump': 15,\n 'Pump pipe stainless steel': 15,\n 'Pump stainless steel': 15,\n 'Pump chemical storage HDPE': 30,\n }\n\nclass WWTpump(SanUnit):\n '''\n Generic class for pumps used in wastewater treatment, [1]_\n all pumps are assumed be made of stainless steel.\n\n This class is intended to be used as a part of other units\n (e.g., :class:`~.AnMBR`), but it can be used as a standalone unit.\n\n Note that pump building concrete usage and excavation is not included here\n as pumps are often housed together with the reactors.\n\n Parameters\n ----------\n prefix : str\n If provided, all keys in design and cost dicts will be prefixed with\n the provided string.\n pump_type : str\n The type of the pump that determines the design algorithms to use.\n The following types are valid:\n\n - \"permeate_cross-flow\"\n - \"retentate_CSTR\"\n - \"retentate_AF\"\n - \"recirculation_CSTR\"\n - \"recirculation_AF\"\n - \"lift\"\n - \"sludge\"\n - \"chemical\"\n - \"\" (i.e., empty)\n\n When left as empty, the generic algorithm will be used and the following\n values should be included in `add_inputs` (in this order):\n\n - N_pump: number of pumps\n - L_s: pipe length of the suction side, [ft]\n - L_d: pipe length of the discharge side, [ft]\n - H_ts: total static head, [ft]\n - H_p: pressure head, [ft]\n\n Q_mgd : float\n Volumetric flow rate in million gallon per day, [mgd].\n Will use total volumetric flow through the unit if not provided.\n add_inputs : Iterable\n Additional inputs that will be passed to the corresponding design algorithm.\n Check the documentation of for the corresponding pump type\n for the design algorithm of the specific input requirements.\n capacity_factor : float\n A safety factor to handle peak flows.\n include_pump_cost : bool\n Whether to include pump cost.\n include_building_cost : bool\n Whether to include the cost of the pump building.\n include_OM_cost : bool\n Whether to include the operating and maintenance cost of the pump.\n F_BM : dict\n Bare module factors of the individual equipment.\n lifetime : dict\n Lifetime of the individual equipment.\n kwargs : dict\n Other attributes to be set.\n\n References\n ----------\n .. [1] Shoener et al., Design of Anaerobic Membrane Bioreactors for the\n Valorization of Dilute Organic Carbon Waste Streams.\n Energy Environ. Sci. 2016, 9 (3), 1102–1112.\n https://doi.org/10.1039/C5EE03715H.\n\n See Also\n --------\n :class:`~.AnMBR`\n '''\n _N_ins = 1\n _N_outs = 1\n\n _N_pump = 1\n _H_ts = 0. # total static head\n _H_p = 0. # total pressure head\n _H_sf = 0. # suction friction\n _H_df = 0. # discharge friction\n _v = 3 # fluid velocity, [ft/s]\n _C = 110 # Hazen-Williams coefficient for stainless steel (SS)\n\n # Pump SS (for pumps within 300-1000 gpm)\n # http://www.godwinpumps.com/images/uploads/ProductCatalog_Nov_2011_spread2.pdf\n # assume 50% of the product weight is SS\n _SS_per_pump = 725 * 0.5\n _building_unit_cost = 90 # [$/ft2]\n\n _units = {\n 'Pump pipe stainless steel': 'kg',\n 'Pump stainless steel': 'kg',\n 'Pump chemical storage HDPE': 'm3',\n }\n\n _valid_pump_types = (\n 'permeate_cross-flow',\n 'retentate_CSTR',\n 'retentate_AF',\n 'recirculation_CSTR',\n 'recirculation_AF',\n 'lift',\n 'sludge',\n 'chemical',\n '',\n )\n\n def __init__(self, ID='', ins=None, outs=(), thermo=None,\n init_with='WasteStream',\n prefix='', pump_type='', Q_mgd=None, add_inputs=(),\n capacity_factor=1.,\n include_pump_cost=True, include_building_cost=False,\n include_OM_cost=False,\n F_BM=default_F_BM,\n lifetime=default_equipment_lifetime,\n **kwargs):\n SanUnit.__init__(self, ID, ins, outs, thermo, init_with=init_with)\n self.pump_type = pump_type\n self.Q_mgd = Q_mgd\n try: iter(add_inputs)\n except: add_inputs = (add_inputs,)\n self.add_inputs = add_inputs\n self.capacity_factor = capacity_factor\n self.include_pump_cost = include_pump_cost\n self.include_building_cost = include_building_cost\n self.include_OM_cost = include_OM_cost\n self.F_BM.update(F_BM)\n self._default_equipment_lifetime.update(lifetime)\n\n self.prefix = prefix\n if prefix:\n self._units = {prefix+' '+[k][0].lower()+k[1:]:v for k, v in self._units.items()}\n self.F_BM = {prefix+' '+[k][0].lower()+k[1:]:v for k, v in self.F_BM.items()}\n self._default_equipment_lifetime = \\\n {prefix+' '+[k][0].lower()+k[1:]:v for k, v in self._default_equipment_lifetime.items()}\n\n for attr, val in kwargs.items(): setattr(self, attr, val)\n\n def _run(self):\n self.outs[0].copy_like(self.ins[0])\n\n def _format_key_start_with_prefix(self, start):\n return start.upper() if not self.prefix else self.prefix+' '+start.lower()\n\n def _design(self):\n pump_type = format_str(self.pump_type)\n if not pump_type:\n pipe, pumps = self._design_generic(self.Q_mgd, *self.add_inputs)\n hdpe = 0.\n else:\n design_func = getattr(self, f'design_{pump_type}')\n pipe, pumps, hdpe = design_func()\n\n D = self.design_results\n start = self._format_key_start_with_prefix('P')\n D[f'{start}ump pipe stainless steel'] = pipe\n D[f'{start}ump stainless steel'] = pumps\n if hdpe:\n D[f'{start}ump chemical storage HDPE'] = hdpe\n self._units[f'{start}ump chemical storage HDPE'] = 'm3'\n else:\n try:\n self._units.pop(f'{start}ump chemical storage HDPE')\n except KeyError:\n pass\n try:\n self.design_results.pop(f'{start}ump chemical storage HDPE')\n except KeyError:\n pass\n\n\n def _cost(self):\n C = self.baseline_purchase_costs\n C.clear()\n add_OPEX = self.add_OPEX\n add_OPEX.clear()\n Q_mgd, capacity_factor = self.Q_mgd, self.capacity_factor\n\n start = self._format_key_start_with_prefix('P')\n C[f'{start}ump'] = C[f'{start}ump building'] = 0.\n add_OPEX[f'{start}ump operating'] = add_OPEX[f'{start}ump maintenance'] = 0.\n # Pump\n if self.include_pump_cost:\n C[f'{start}ump'] = 2.065e5 + 7.721*1e4*Q_mgd # fitted curve\n\n # Operations and maintenance\n if self.include_OM_cost:\n FPC = capacity_factor * Q_mgd # firm pumping capacity\n O = M = 0. # USD/yr\n if 0 < FPC <= 7:\n O = 440*25*FPC**0.1285\n M = 360*25*FPC**0.1478\n elif 7 < FPC <= 41:\n O = 294.4*25*FPC**0.3335\n M = 255.2*25*FPC**0.3247\n elif 41 < FPC <= 80:\n O = 40.5*25*FPC**0.8661\n M = 85.7*25*FPC**0.6456\n else:\n O = 21.3*25*FPC**1.012\n M = 30.6*25*FPC**0.8806\n self.add_OPEX = {\n f'{start}ump operating': O/365/24,\n f'{start}ump maintenance': M/365/24,\n }\n\n # Pump building\n if self.include_building_cost:\n # Design capacity of intermediate pumps, gpm,\n GPM = capacity_factor * Q_mgd * 1e6 / 24 / 60\n if GPM == 0:\n N = 0\n else:\n N = 1 # number of buildings\n GPMi = GPM\n while GPMi > 80000:\n N += 1\n GPMi = GPM / N\n PBA = N * (0.0284*GPM+640) # pump building area, [ft2]\n C[f'{start}ump building'] = PBA * self.building_unit_cost\n\n self.power_utility.consumption = self.BHP/self.motor_efficiency * _hp_to_kW\n\n\n # Generic algorithms that will be called by all design functions\n def _design_generic(self, Q_mgd, N_pump=None, L_s=0., L_d=0., H_ts=0., H_p=0.):\n self.Q_mgd = Q_mgd\n self._H_ts = H_ts or self.H_ts\n self._H_p = H_p or self.H_p\n N_pump = N_pump or self.N_pump\n\n v, C, Q_cfs = self.v, self.C, self.Q_cfs # [ft/s], -, [ft3/s]\n\n ### Suction side ###\n # Suction pipe (permeate header) dimensions\n OD_s, t_s, ID_s = select_pipe(Q_cfs/N_pump, v) # [in]\n\n # Suction friction head, [ft]\n self._H_sf = 3.02 * L_s * (v**1.85) * (C**(-1.85)) * ((ID_s/12)**(-1.17))\n\n ### Discharge side ###\n # Discharge pipe (permeate collector) dimensions\n OD_d, t_d, ID_d = select_pipe(Q_cfs, v)\n\n # Discharge friction head, [ft]\n self._H_df = 3.02 * L_d * (v**1.85) * (C**(-1.85)) * ((ID_d/12)**(-1.17))\n\n ### Material usage ###\n # Pipe SS, assume stainless steel, density = 0.29 lbs/in3\n # SS volume for suction, [in3]\n self._N_pump = N_pump\n V_s = N_pump * pi/4*((OD_s)**2-(ID_s)**2) * (L_s*12)\n # SS volume for discharge, [in3]\n V_d = pi/4*((OD_d)**2-(ID_d)**2) * (L_d*12)\n\n # Total SS mass, [kg]\n M_SS_pipe = 0.29 * (V_s+V_d) * _lb_to_kg\n M_SS_pump = N_pump * self.SS_per_pump\n return M_SS_pipe, M_SS_pump\n\n\n def design_permeate_cross_flow(self, Q_mgd=None, N_pump=None, D=None,\n TMP=None, include_aerobic_filter=False,\n **kwargs):\n '''\n Design pump for the permeate stream of cross-flow membrane configuration.\n\n Parameters defined through the `add_inputs` argument upon initialization of\n this unit (Q_mgd listed separately) will be used if not provided\n when calling this function.\n\n Parameters\n ----------\n Q_mgd : float\n Volumetric flow rate in million gallon per day, [mgd].\n N_pump : int\n Number of the pumps.\n D : float\n Depth of the reactor, [ft].\n TMP : float\n Transmembrane pressure, [psi].\n include_aerobic_filter : bool\n Whether aerobic filter is included in the reactor design,\n additional head will be added if the filter is included.\n kwargs : dict\n Additional attribute values to set (e.g., `L_s`, `H_ts`),\n this will overwrite the default values.\n '''\n add_inputs = self.add_inputs\n Q_mgd = Q_mgd or self.Q_mgd\n N_pump = N_pump or add_inputs[0]\n D = D or add_inputs[1]\n TMP = TMP or add_inputs[2]\n include_aerobic_filter = include_aerobic_filter or add_inputs[3]\n\n H_ts_PERM = D if include_aerobic_filter else 0\n\n val_dct = dict(\n L_s=20, # based on a 30-module unit with a total length of 6 m, [ft]\n L_d=10*N_pump, # based on a 30-module unit with a total width of 1.6 m and extra space, [ft]\n H_ts=H_ts_PERM, # H_ds_PERM (D_tank) - H_ss_PERM (0 or D_tank)\n H_p=TMP*2.31 # TMP in water head, [ft], comment below on 2.31\n )\n val_dct.update(kwargs)\n M_SS_IR_pipe, M_SS_IR_pump = self._design_generic(\n Q_mgd=Q_mgd, N_pump=N_pump, **val_dct)\n\n # # factor = 2.31 calculated by\n # factor = auom('psi').conversion_factor('Pa') # Pa is kg/m/s2, now in [Pa]\n # factor /= 9.81 # divided by the standard gravity in m/s2, now in [kg/m2]\n # factor /= 1e3 # divided by water's density in kg/m3, now in [m]\n # factor *= auom('m').conversion_factor('ft') # m to ft\n\n return M_SS_IR_pipe, M_SS_IR_pump, 0\n\n\n def design_retentate_CSTR(self, Q_mgd=None, N_pump=None, **kwargs):\n '''\n Design pump for the retent stream of CSTR reactors.\n\n Parameters defined through the `add_inputs` argument upon initialization of\n this unit (Q_mgd listed separately) will be used if not provided\n when calling this function.\n\n Parameters\n ----------\n Q_mgd : float\n Volumetric flow rate in million gallon per day, [mgd].\n N_pump : int\n Number of the pumps.\n kwargs : dict\n Additional attribute values to set (e.g., `L_s`, `H_ts`),\n this will overwrite the default values.\n '''\n Q_mgd = Q_mgd or self.Q_mgd\n N_pump = N_pump or self.add_inputs[0]\n\n val_dct = dict(\n L_s=100, # pipe length per module\n L_d=30, # pipe length per module (same as the discharge side of lift pump)\n H_ts=0., # H_ds_IR (D_tank) - H_ss_IR (D_tank)\n H_p=0. # no pressure\n )\n val_dct.update(kwargs)\n\n M_SS_IR_pipe, M_SS_IR_pump = self._design_generic(\n Q_mgd=Q_mgd, N_pump=N_pump, **val_dct)\n\n return M_SS_IR_pipe, M_SS_IR_pump, 0\n\n\n def design_retentate_AF(self, Q_mgd=None, N_pump=None, D=None, **kwargs):\n '''\n Design pump for the retentate stream of AF reactors.\n\n Parameters defined through the `add_inputs` argument upon initialization of\n this unit (Q_mgd listed separately) will be used if not provided\n when calling this function.\n\n Parameters\n ----------\n Q_mgd : float\n Volumetric flow rate in million gallon per day, [mgd].\n N_pump : int\n Number of the pumps.\n D : float\n Depth of the reactor, [ft].\n kwargs : dict\n Additional attribute values to set (e.g., `L_s`, `H_ts`),\n this will overwrite the default values.\n '''\n add_inputs = self.add_inputs\n Q_mgd = Q_mgd or self.Q_mgd\n N_pump = N_pump or add_inputs[0]\n D = D or add_inputs[1]\n\n val_dct = dict(\n L_s=100, # assumed pipe length per filter, [ft]\n L_d=30, # same as discharge side of lift pumping, [ft]\n H_ts=0., # H_ds_IR (D) - H_ss_IR (D)\n H_p=0. # no pressure\n )\n val_dct.update(kwargs)\n\n M_SS_IR_pipe, M_SS_IR_pump = self._design_generic(\n Q_mgd=Q_mgd, N_pump=N_pump, **val_dct)\n\n return M_SS_IR_pipe, M_SS_IR_pump, 0\n\n\n def design_recirculation_CSTR(self, Q_mgd=None, N_pump=None, L=None, **kwargs):\n '''\n Design pump for the recirculation stream of reactors.\n\n Parameters defined through the `add_inputs` argument upon initialization of\n this unit (Q_mgd listed separately) will be used if not provided\n when calling this function.\n\n Parameters\n ----------\n Q_mgd : float\n Volumetric flow rate in million gallon per day, [mgd].\n N_pump : int\n Number of the pumps.\n L : float\n Length of the reactor, [ft].\n kwargs : dict\n Additional attribute values to set (e.g., `L_s`, `H_ts`),\n this will overwrite the default values.\n '''\n Q_mgd = Q_mgd or self.Q_mgd\n L = L or self.add_inputs[0]\n\n val_dct = dict(\n L_s=0., # ignore suction side\n L_d=L, # pipe length per train\n H_ts=5., # H_ds_IR (5) - H_ss_IR (0)\n H_p=0. # no pressure\n )\n val_dct.update(kwargs)\n\n M_SS_IR_pipe, M_SS_IR_pump = self._design_generic(\n Q_mgd=Q_mgd, N_pump=N_pump, **val_dct)\n\n return M_SS_IR_pipe, M_SS_IR_pump, 0\n\n\n def design_recirculation_AF(self, Q_mgd=None, N_pump=None, d=None,\n D=None, **kwargs):\n '''\n Design pump for the recirculation stream of AF reactors.\n\n Parameters defined through the `add_inputs` argument upon initialization of\n this unit (Q_mgd listed separately) will be used if not provided\n when calling this function.\n\n Parameters\n ----------\n Q_mgd : float\n Volumetric flow rate in million gallon per day, [mgd].\n N_pump : int\n Number of the pumps.\n d : float\n Diameter (or width) of the reactor, [ft].\n D : float\n Depth of the reactor, [ft].\n kwargs : dict\n Additional attribute values to set (e.g., `L_s`, `H_ts`),\n this will overwrite the default values.\n '''\n add_inputs = self.add_inputs\n Q_mgd = Q_mgd or self.Q_mgd\n N_pump = N_pump or add_inputs[0]\n d = d or add_inputs[1]\n D = D or add_inputs[2]\n\n val_dct = dict(\n L_s=d+D, # pipe length per filter, [ft]\n L_d=30, # same as discharge side of lift pumping, [ft]\n H_ts=0., # H_ds_IR (D) - H_ss_IR (D)\n H_p=0. # no pressure\n )\n val_dct.update(kwargs)\n\n M_SS_IR_pipe, M_SS_IR_pump = self._design_generic(\n Q_mgd=Q_mgd, N_pump=N_pump, **kwargs)\n\n return M_SS_IR_pipe, M_SS_IR_pump, 0\n\n\n def design_lift(self, Q_mgd=None, N_pump=None, D=None, **kwargs):\n '''\n Design pump for the filter tank to lift streams.\n\n Parameters defined through the `add_inputs` argument upon initialization of\n this unit (Q_mgd listed separately) will be used if not provided\n when calling this function.\n\n Parameters\n ----------\n Q_mgd : float\n Volumetric flow rate in million gallon per day, [mgd].\n N_pump : int\n Number of the pumps.\n D : float\n Depth of the filter tank, [ft].\n kwargs : dict\n Additional attribute values to set (e.g., `L_s`, `H_ts`),\n this will overwrite the default values.\n '''\n add_inputs = self.add_inputs\n Q_mgd = Q_mgd or self.Q_mgd\n N_pump = N_pump or add_inputs[0]\n D = D or add_inputs[1]\n\n val_dct = dict(\n L_s=150, # length of suction pipe per filter, [ft]\n L_d=30, # pipe length per filter, [ft]\n H_ts=D, # H_ds_LIFT (D) - H_ss_LIFT (0)\n H_p=0. # no pressure\n )\n val_dct.update(kwargs)\n\n M_SS_IR_pipe, M_SS_IR_pump = self._design_generic(\n Q_mgd=Q_mgd, N_pump=N_pump, **kwargs)\n\n return M_SS_IR_pipe, M_SS_IR_pump, 0\n\n\n def design_sludge(self, Q_mgd=None, N_pump=None, **kwargs):\n '''\n Design pump for handling waste sludge.\n\n Parameters\n ----------\n Q_mgd : float\n Volumetric flow rate in million gallon per day, [mgd].\n N_pump : int\n Number of the pumps.\n kwargs : dict\n Additional attribute values to set (e.g., `L_s`, `H_ts`),\n this will overwrite the default values.\n '''\n Q_mgd = Q_mgd or self.Q_mgd\n N_pump = N_pump or 1\n\n val_dct = dict(\n L_s=50, # length of suction pipe, [ft]\n L_d=50, # length of discharge pipe, [ft]\n H_ts=0., # H_ds_LIFT (D) - H_ss_LIFT (0)\n H_p=0. # no pressure\n )\n val_dct.update(kwargs)\n\n M_SS_IR_pipe, M_SS_IR_pump = self._design_generic(\n Q_mgd=Q_mgd, N_pump=N_pump, **kwargs)\n\n return M_SS_IR_pipe, M_SS_IR_pump, 0\n\n\n def design_chemical(self, Q_mgd=None, N_pump=None, **kwargs):\n '''\n Design pump for membrane cleaning chemicals (NaOCl and citric acid),\n storage containers are included, and are assumed to be cubic in shape\n and made of HDPE.\n\n Parameters defined through the `add_inputs` argument upon initialization of\n this unit (Q_mgd listed separately) will be used if not provided\n when calling this function.\n\n Parameters\n ----------\n Q_mgd : float\n Volumetric flow rate in million gallon per day, [mgd].\n N_pump : int\n Number of the pumps.\n kwargs : dict\n Additional attribute values to set (e.g., `L_s`, `H_ts`),\n this will overwrite the default values.\n '''\n if not Q_mgd:\n V_CHEM = self.ins[0].F_vol * 24 * 7 * 2 # for two weeks of storage, [m3]\n Q_CHEM_mgd = self.Q_mgd\n else:\n V_CHEM = (Q_mgd*1e6/_m3_to_gal) * 7 * 2\n Q_CHEM_mgd = Q_mgd\n N_pump = N_pump or 1\n\n # HDPE volume, [m3], 0.003 [m] is the thickness of the container\n V_HDPE = 0.003 * (V_CHEM**(1/3))**2*6\n # # Mass of HDPE, [m3], 950 is the density of the HDPE in [kg/m3]\n # M_HDPE = 950 * V_HDPE\n\n H_ss_CHEM = V_CHEM**(1/3) / _ft_to_m\n # 9'-7\" is the water level in membrane trains\n # 18\" is the distance from C/L of the pump to the ground\n H_ds_CHEM = 9 + 7/12 - 18/12\n H_ts_CHEM = H_ds_CHEM - H_ss_CHEM\n\n val_dct = dict(\n L_s=0., # no suction pipe\n L_d=30.,\n H_ts=H_ts_CHEM,\n H_p=0. # no pressure\n )\n val_dct.update(kwargs)\n\n M_SS_CHEM_pipe, M_SS_CHEM_pump = self._design_generic(\n Q_mgd=Q_CHEM_mgd, N_pump=N_pump, **kwargs)\n\n return M_SS_CHEM_pipe, M_SS_CHEM_pump, V_HDPE\n\n\n @property\n def pump_type(self):\n '''\n [str] The type of the pump that determines the design algorithms to use.\n Use `valid_pump_type` to see acceptable pump types.\n '''\n return self._pump_type\n @pump_type.setter\n def pump_type(self, i):\n i_lower = i.lower()\n i_lower = i_lower.replace('cstr', 'CSTR')\n i_lower = i_lower.replace('af', 'AF')\n if i_lower not in self.valid_pump_types:\n raise ValueError(f'The given `pump_type` \"{i}\" is not valid, '\n 'check `valid_pump_types` for acceptable pump types.')\n self._pump_type = i_lower\n\n @property\n def valid_pump_types(self):\n '''[tuple] Acceptable pump types.'''\n return self._valid_pump_types\n\n @property\n def Q_mgd(self):\n '''\n [float] Volumetric flow rate in million gallon per day, [mgd].\n Will use total volumetric flow through the unit if not provided.\n '''\n if self._Q_mgd:\n return self._Q_mgd\n return self.F_vol_in*_m3_to_gal*24/1e6\n @Q_mgd.setter\n def Q_mgd(self, i):\n self._Q_mgd = i\n\n @property\n def Q_gpm(self):\n '''[float] Volumetric flow rate in gallon per minute, [gpm].'''\n return self.Q_mgd*1e6/24/60\n\n @property\n def Q_cmd(self):\n '''\n [float] Volumetric flow rate in cubic meter per day, [cmd].\n '''\n return self.Q_mgd *1e6/_m3_to_gal # [m3/day]\n\n @property\n def Q_cfs(self):\n '''[float] Volumetric flow rate in cubic feet per second, [cfs].'''\n return self.Q_mgd*1e6/24/60/60/_ft3_to_gal\n\n @property\n def capacity_factor(self):\n '''[float] A safety factor to handle peak flow.'''\n return self._capacity_factor\n @capacity_factor.setter\n def capacity_factor(self, i):\n self._capacity_factor = i\n\n @property\n def N_pump(self):\n '''[int] Number of pumps.'''\n return self._N_pump\n @N_pump.setter\n def N_pump(self, i):\n self._N_pump = ceil(i)\n\n @property\n def v(self):\n '''[float] Fluid velocity, [ft/s].'''\n return self._v\n @v.setter\n def v(self, i):\n self._v = i\n\n @property\n def C(self):\n '''[float] Hazen-Williams coefficient to calculate fluid friction.'''\n return self._C\n @C.setter\n def C(self, i):\n self._C = i\n\n @property\n def SS_per_pump(self):\n '''[float] Quantity of stainless steel per pump, [kg/ea].'''\n return self._SS_per_pump\n @SS_per_pump.setter\n def SS_per_pump(self, i):\n self._SS_per_pump = i\n\n @property\n def H_sf(self):\n '''[float] Suction friction head, [ft].'''\n return self._H_sf\n\n @property\n def H_df(self):\n '''[float] Discharge friction head, [ft].'''\n return self._H_df\n\n @property\n def H_ts(self):\n '''[float] Total static head, [ft].'''\n return self._H_ts\n\n @property\n def H_p(self):\n '''[float] Pressure head, [ft].'''\n return self._H_p\n\n @property\n def TDH(self):\n '''[float] Total dynamic head, [ft].'''\n return self.H_ts+self.H_sf+self.H_df+self.H_p\n\n @property\n def BHP(self):\n '''[float] Brake horsepower, [hp].'''\n return (self.TDH*self.Q_gpm)/3960/self.brake_efficiency\n\n @property\n def brake_efficiency(self):\n '''[float] Brake efficiency.'''\n return brake_eff(self.Q_gpm)\n\n @property\n def motor_efficiency(self):\n '''[float] Motor efficiency.'''\n return motor_eff(self.BHP)\n\n @property\n def building_unit_cost(self):\n '''[float] Unit cost of the pump building, [USD/ft2].'''\n return self._building_unit_cost if self.include_cost else 0.\n @building_unit_cost.setter\n def building_unit_cost(self, i):\n self._building_unit_cost = i\n\n\n# %%\n\n# =============================================================================\n# Decorator\n# =============================================================================\n\ndef wwtpump(ID, ins=(), prefix='', pump_type='', Q_mgd=None, add_inputs=(),\n capacity_factor=1., include_pump_cost=True, include_building_cost=False,\n include_OM_cost=False, F_BM=F_BM_pump, lifetime=default_equipment_lifetime,\n **kwargs):\n '''\n Handy decorator to add a :class:`~.WWTpump` as an attribute of\n a :class:`qsdsan.SanUnit`.\n\n Refer to class:`WWTpump` for the parameters needed for using this decorator.\n\n See Also\n --------\n :class:`~.WWTpump`\n\n :class:`~.AnMBR`\n\n References\n ----------\n [1] Shoener et al., Design of Anaerobic Membrane Bioreactors for the\n Valorization of Dilute Organic Carbon Waste Streams.\n Energy Environ. Sci. 2016, 9 (3), 1102–1112.\n https://doi.org/10.1039/C5EE03715H.\n '''\n return lambda cls: add_pump(cls, ID, ins, prefix, pump_type, Q_mgd, add_inputs,\n capacity_factor, include_pump_cost, include_building_cost,\n include_OM_cost, F_BM, lifetime, **kwargs)\n\n\ndef add_pump(cls, ID, ins, prefix, pump_type, Q_mgd, add_inputs,\n capacity_factor, include_pump_cost, include_building_cost,\n include_OM_cost, F_BM, lifetime, **kwargs):\n if getattr(cls, 'system', None): \n if not main_f is cls.system.flowsheet:\n main_f.set_flowsheet(cls.system.flowsheet)\n pump = WWTpump(\n ID, ins=ins,\n prefix=prefix, pump_type=pump_type, Q_mgd=Q_mgd, add_inputs=add_inputs,\n capacity_factor=capacity_factor,\n include_pump_cost=include_pump_cost,\n include_building_cost=include_building_cost,\n include_OM_cost=include_OM_cost,\n F_BM=F_BM, lifetime=lifetime, **kwargs)\n setattr(cls, f'{ID}_pump', pump)\n return cls", "id": "10443624", "language": "Python", "matching_score": 3.2341387271881104, "max_stars_count": 2, "path": "qsdsan/sanunits/_pumping.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\nfrom math import ceil\nfrom .. import Equipment\nfrom ..utils import select_pipe, calculate_pipe_material\n\n__all__ = ('Blower', 'GasPiping',)\n\n\n# %%\n\nclass Blower(Equipment):\n '''\n Design and cost blowers based on `Shoener et al. <https://doi.org/10.1039/C5EE03715H>`_.\n\n Note that blower building concrete usage and excavation is not included here\n as blowers are often housed together with the reactors.\n\n Refer to :class:`~.sanunits.AnMBR` or :class:`~.sanunits.ActivatedSludgeProcess`\n for examples.\n\n Parameters\n ----------\n N_reactor : int\n Number of the reactors where the gas sparging modules will be installed.\n gas_demand_per_reactor : float\n Gas demand per reactor, [cfm] (cubic ft per minute).\n TDH : float\n Total dynamic head for the blower, [psi].\n eff_blower : float\n Efficiency of the blower in fraction (i.e., 0.7 for 70%).\n eff_motor : float\n Efficiency of the motor in fraction (i.e., 0.7 for 70%).\n AFF : float\n Air flow fraction.\n The default value is calculated as STE/6\n (STE stands for standard oxygen transfer efficiency, and default STE is 20).\n If using different STE value, AFF should be 1 if STE/6<1\n and 3.33 if STE/6>1.\n building_unit_cost : float\n Unit cost of the blower building, [USD/ft2].\n\n References\n ----------\n [1] Shoener, <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.\n Design of Anaerobic Membrane Bioreactors for the Valorization\n of Dilute Organic Carbon Waste Streams.\n Energy Environ. Sci. 2016, 9 (3), 1102–1112.\n https://doi.org/10.1039/C5EE03715H.\n\n See Also\n --------\n :class:`~.sanunits.AnMBR`\n\n :class:`~.sanunits.ActivatedSludgeProcess`\n\n '''\n\n def __init__(self, ID=None, linked_unit=None,\n F_BM={\n 'Blowers': 2.22,\n 'Blower piping': 1,\n 'Blower building': 1.11,\n },\n lifetime=15, lifetime_unit='yr',\n units={\n 'Total gas flow': 'CFM',\n 'Blower capacity': 'CFM',\n 'Number of blowers': '',\n 'Total blower power': 'kW',\n },\n N_reactor=2, gas_demand_per_reactor=1,\n TDH=6, eff_blower=0.7, eff_motor=0.7, AFF=3.33,\n building_unit_cost=90,\n ):\n Equipment.__init__(self=self, ID=ID, linked_unit=linked_unit, F_BM=F_BM,\n lifetime=lifetime, lifetime_unit=lifetime_unit,\n units=units)\n self.N_reactor = N_reactor\n self.gas_demand_per_reactor = gas_demand_per_reactor\n self.TDH = TDH\n self.eff_blower = eff_blower\n self.eff_motor = eff_motor\n self.AFF = AFF\n self.building_unit_cost = building_unit_cost\n\n\n def _design(self):\n N_reactor, gas_demand_per_reactor = \\\n self.N_reactor, self.gas_demand_per_reactor\n gas_tot = N_reactor * gas_demand_per_reactor\n TDH, eff_blower, eff_motor = self.TDH, self.eff_blower, self.eff_motor\n\n # Calculate brake horsepower, 14.7 is atmospheric pressure in psi\n BHP = (gas_tot*0.23)*(((14.7+TDH)/14.7)**0.283-1)/eff_blower\n # 0.746 is horsepower to kW\n D = {'Total blower power': BHP*0.746/eff_motor}\n\n # Calculate the number of blowers\n TCFM = ceil(gas_demand_per_reactor) # total cubic ft per min\n N = 1\n if TCFM <= 30000:\n CFMB = TCFM / N # cubic ft per min per blower\n while CFMB > 7500:\n N += 1\n CFMB = TCFM / N\n elif 30000 < TCFM <= 72000:\n CFMB = TCFM / N\n while CFMB > 18000:\n N += 1\n CFMB = TCFM / N\n else:\n CFMB = TCFM / N\n while CFMB > 100000:\n N += 1\n CFMB = TCFM / N\n\n D['Total gas flow'] = TCFM\n D['Blower capacity'] = CFMB\n D['Number of blowers'] = N\n return D\n\n\n def _cost(self):\n D = self.design_results\n TCFM, CFMB = D['Total gas flow'], D['Blower capacity']\n N_reactor, AFF = self.N_reactor, self.AFF\n # Air pipes\n # Note that the original codes use CFMD instead of TCFM for air pipes,\n # but based on the coding they are equivalent\n # (i.e., just used an alternative name)\n if TCFM <= 1000:\n piping = 617.2 * AFF * (TCFM**0.2553)\n elif 1000 < TCFM <= 10000:\n piping = 1.43 * AFF * (TCFM**1.1337)\n else:\n piping = 28.59 * AFF * (TCFM**0.8085)\n C = {'Blower air piping': piping}\n\n # Blowers\n if TCFM <= 30000:\n ratio = 0.7 * (CFMB**0.6169)\n blowers = 58000*ratio / 100\n elif 30000 < TCFM <= 72000:\n ratio = 0.377 * (CFMB**0.5928)\n blowers = 218000*ratio / 100\n else:\n ratio = 0.964 * (CFMB**0.4286)\n blowers = 480000*ratio / 100\n C['Blowers'] = blowers * N_reactor\n\n # Blower building\n area = 128 * (TCFM**0.256) # building area, [ft2]\n C['Blower building'] = area * self.building_unit_cost\n\n return C\n\n\n# %%\n\nclass GasPiping(Equipment):\n '''\n Design and cost reactor gas header pipes and manifold based on\n `Shoener et al. <https://doi.org/10.1039/C5EE03715H>`_.\n\n The gas pipes will be layed along the length of the reactor with\n manifold along the width of th reactor\n (i.e., gas will be pumped from the manifold to the header then into the reactor).\n\n Refer to :class:`~.sanunits.ActivatedSludgeProcess` for usage.\n\n Parameters\n ----------\n N_reactor : int\n Number of the reactor where the gas sparging modules will be installed.\n N_pipe_per_reactor : int\n Number of the pipes per reactor.\n gas_demand_per_reactor : float\n Gas demand per reactor, [cfm] (cubic ft per minute).\n v_header : float\n Velocity of gas in the header pipe\n (layed along the length of the reactor), [ft/s].\n v_manifold : float\n Velocity of gas in the manifold pipe\n (layed along the width of the reactor), [ft/s].\n L_reactor : float\n Length of the reactor, [ft].\n L_extra : float\n Extra length to be included in piping for each of the reactor, [ft].\n W_reactor : float\n Width of the reactor, [ft].\n W_extra : float\n Extra width to be included in piping for each of the reactor, [ft].\n pipe_density : float\n Density of the pipe, [kg/ft3].\n pipe_unit_cost : float\n Unit cost of the pipe, [USD/kg].\n TDH : float\n Total dynamic head for the blower, [psi].\n eff_blower : float\n Efficiency of the blower in fraction (i.e., 0.7 for 70%).\n eff_motor : float\n Efficiency of the motor in fraction (i.e., 0.7 for 70%).\n\n References\n ----------\n [1] <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.\n Design of Anaerobic Membrane Bioreactors for the Valorization\n of Dilute Organic Carbon Waste Streams.\n Energy Environ. Sci. 2016, 9 (3), 1102–1112.\n https://doi.org/10.1039/C5EE03715H.\n\n See Also\n --------\n :class:`~.sanunits.ActivatedSludgeProcess`\n '''\n\n def __init__(self, ID=None, linked_unit=None,\n F_BM=1., lifetime=15, lifetime_unit='yr',\n units={'Gas pipe material': 'kg',},\n N_reactor=2, N_pipe_per_reactor=1,\n gas_demand_per_reactor=1, v_header=70, v_manifold=70,\n L_reactor=12, L_extra=0, W_reactor=21, W_extra=2,\n pipe_density=227.3, # from 0.29 lb/in3\n pipe_unit_cost=0,):\n Equipment.__init__(self=self, ID=ID, linked_unit=linked_unit, F_BM=F_BM,\n lifetime=lifetime, lifetime_unit=lifetime_unit,\n units=units)\n self.N_reactor = N_reactor\n self.N_pipe_per_reactor = N_pipe_per_reactor\n self.gas_demand_per_reactor = gas_demand_per_reactor\n self.v_header = v_header\n self.v_manifold = v_manifold\n self.L_reactor = L_reactor\n self.L_extra = L_extra\n self.W_reactor = W_reactor\n self.W_extra = W_extra\n self.pipe_density = pipe_density\n self.pipe_unit_cost = pipe_unit_cost\n\n\n def _design(self):\n # Gas piping\n N_reactor, N_pipe_per_reactor, gas_demand_per_reactor, pipe_density = \\\n self.N_reactor, self.N_pipe_per_reactor, self.gas_demand_per_reactor, self.pipe_density\n L_reactor, L_extra, W_reactor, W_extra = \\\n self.L_reactor, self.L_extra, self.W_reactor, self.W_extra\n # Gas header\n L_gh = L_reactor * N_pipe_per_reactor + L_extra\n gas_demand_per_pipe = gas_demand_per_reactor / N_pipe_per_reactor\n OD_gh, t_gh, ID_gh = select_pipe(gas_demand_per_pipe, self.v_header)\n M_gh = N_reactor * \\\n calculate_pipe_material(OD_gh, t_gh, ID_gh, L_gh, pipe_density)\n # Gas supply manifold, used more conservative assumption than the ref\n L_gsm = W_reactor*N_reactor + W_extra\n gas_tot = N_reactor * gas_demand_per_reactor\n OD_gsm, t_gsm, ID_gsm = select_pipe(gas_tot, self.v_manifold)\n M_gsm = calculate_pipe_material(OD_gsm, t_gsm, ID_gsm, L_gsm, pipe_density) # kg\n return {'Gas pipe material': M_gh+M_gsm}\n\n\n def _cost(self):\n return self.pipe_unit_cost*self.design_results['Gas pipe material']", "id": "3028531", "language": "Python", "matching_score": 1.2667450904846191, "max_stars_count": 2, "path": "qsdsan/equipments/_aeration.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\nfrom .. import SanUnit\nfrom ..utils import auom\n\n__all__ = ('Screening',)\n\n\nclass Screening(SanUnit):\n '''\n A non-reactive unit used to estimate the operating cost of screening.\n\n Note that only costs from electricity and screened out solids disposal are considered\n (i.e., no equipment cost).\n\n Parameters\n ----------\n solids_yield : float\n Amount of solids that is screened out, [ft3/hr/MGD].\n compaction : float\n Fraction of the solids that can be compacted\n (i.e., volume after compaction = original volume * (1-compaction)).\n disposal_cost : float\n Cost of compacted solids disposal, [$/ft3].\n power_demand : float\n Power usage for screening, [kW/MGD].\n '''\n def __init__(self, ID='', ins=None, outs=(), thermo=None, init_with='WasteStream',\n solids_yield=2, compaction=0.75,\n disposal_cost=225/20/27, # converted from $225/20 yd3 container\n power_demand=1*0.7457): # converted from 1 hp\n SanUnit.__init__(self, ID, ins, outs, thermo, init_with)\n self.solids_yield = solids_yield\n self.compaction = compaction\n self.disposal_cost = disposal_cost\n self.power_demand = power_demand\n\n\n def _cost(self):\n Q_mgd = self.Q_mgd\n solids = self.solids_yield*Q_mgd*(1-self.compaction)\n self.add_OPEX = {'Solids disposal cost': solids*self.disposal_cost}\n self.power_utility.consumption = Q_mgd*self.power_demand\n\n\n @property\n def Q_mgd(self):\n '''\n [float] Influent volumetric flow rate in million gallon per day, [mgd].\n '''\n return auom('m3').convert(self.ins[0].F_vol, 'gallon')*24/1e6", "id": "2639444", "language": "Python", "matching_score": 0.2721482217311859, "max_stars_count": 2, "path": "qsdsan/sanunits/_screening.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n <NAME> <<EMAIL>>\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\n\nfrom .. import Equipment\n\n__all__ = ('Column',)\n\n\nclass Column(Equipment):\n '''\n Columns to be used in an electrochemical cell.\n Refer to the example in :class:`~.sanunits.ElectroChemCell` for how to use this class.\n\n Parameters\n ----------\n N : int\n Number of units of the given column.\n material: str\n Material of the column.\n unit_cost: float\n Unit cost of the column per m2, will use default cost (if available)\n if not provided.\n surface_area : float\n Surface area of the column in m2.\n\n See Also\n --------\n :class:`~.sanunits.ElectroChemCell`\n\n '''\n\n def __init__(self, ID='', linked_unit=None,\n units={\n 'Number of columns': '',\n 'Material of the column': '',\n 'Surface area of columns': 'm2',\n },\n F_BM=1., lifetime=10000, lifetime_unit='hr', N=0,\n material='resin', unit_cost=0.1, surface_area=1):\n Equipment.__init__(self=self, ID=ID, linked_unit=linked_unit, units=units,\n F_BM=F_BM, lifetime=lifetime, lifetime_unit=lifetime_unit)\n self.N = N\n self.unit_cost = unit_cost\n self.material = material\n self.surface_area = surface_area\n\n\n def _design(self):\n design = {\n 'Number of columns': self.N,\n 'Material of the column': self.material,\n 'Surface area of columns': self.surface_area\n }\n return design\n\n\n def _cost(self):\n return self.unit_cost*self.N*self.surface_area\n\n\n @property\n def N(self):\n '''[str] Number of units of the columns.'''\n return self._N\n @N.setter\n def N(self, i):\n self._N = int(i)", "id": "5089209", "language": "Python", "matching_score": 2.855046510696411, "max_stars_count": 2, "path": "qsdsan/equipments/_column.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n <NAME> <<EMAIL>>\n <NAME> <<EMAIL>>\n <NAME> <<EMAIL>>\n\nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt\nfor license details.\n'''\n\n\nfrom .. import Equipment\n\n__all__ = ('Electrode',)\n\n\nclass Electrode(Equipment):\n '''\n Electrodes to be used in an electrochemical cell.\n Refer to the example in :class:`~.sanunits.ElectroChemCell` for how to use this class.\n\n Parameters\n ----------\n N : int\n Number of units of the given electrode.\n electrode_type : str\n Type of the electrode, can only be \"anode\" or \"cathode\".\n material: str\n Material of the electrode.\n unit_cost: float\n Unit cost of the electrode, will use default cost (if available)\n if not provided.\n surface_area : float\n Surface area of the electrode in m2.\n\n See Also\n --------\n :class:`~.sanunits.ElectroChemCell`\n\n '''\n\n _default_unit_cost = {'graphite': 50}\n\n def __init__(self, ID='', linked_unit=None, F_BM=1.,\n lifetime=10000, lifetime_unit='hr', N=0,\n electrode_type='anode',\n material='graphite', unit_cost=0.1, surface_area=1):\n Equipment.__init__(self=self, ID=ID, linked_unit=linked_unit,\n F_BM=F_BM, lifetime=lifetime, lifetime_unit=lifetime_unit)\n self.N = N\n self.electrode_type = electrode_type\n self.unit_cost = unit_cost\n self.material = material\n self.surface_area = surface_area\n\n\n def _design(self):\n design = {\n f'Number of {self.electrode_type}': self.N,\n f'Material of {self.electrode_type}': self.material,\n f'Surface area of {self.electrode_type}': self.surface_area\n }\n units = self._units = dict.fromkeys(design.keys())\n units[f'Surface area of {self.electrode_type}'] = 'm2'\n return design\n\n\n def _cost(self):\n return self.unit_cost*self.N\n\n\n @property\n def N(self):\n '''[str] Number of units of the electrode.'''\n return self._N\n @N.setter\n def N(self, i):\n self._N = int(i)\n\n @property\n def electrode_type(self):\n '''[str] Type of the electrode, either \"anode\" or \"cathode\".'''\n return self._electrode_type\n @electrode_type.setter\n def electrode_type(self, i):\n if i.lower() in ('anode', 'cathode'):\n self._electrode_type = i\n else:\n raise ValueError(f'Electrode can only be \"anode\" or \"cathode\", not \"{i}\".')\n\n @property\n def unit_cost(self):\n '''[float] Cost of one electrode.'''\n if self._unit_cost:\n return self._unit_cost\n cost = self._default_unit_cost.get(self.material)\n return cost or 0.\n @unit_cost.setter\n def unit_cost(self, i):\n self._unit_cost = i\n\n @property\n def material(self):\n '''[str] Material of the electrode.'''\n return self._material\n @material.setter\n def material(self, i):\n self._material = i.lower()", "id": "6339738", "language": "Python", "matching_score": 2.395321846008301, "max_stars_count": 2, "path": "qsdsan/equipments/_electrode.py" } ]
2.174338
XuCcc
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/14 21:37\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nimport sys\nfrom loguru import logger\n\n\nclass Log(object):\n app = logger.bind(name='app')\n plugin = logger.bind(name='plugin')\n\n @classmethod\n def config(cls, debug):\n level = 'DEBUG' if debug else 'INFO'\n logger.remove(0)\n logger.add(sys.stdout,\n level=level)\n logger.add('app.log',\n filter=lambda record: record['extra'].get('name') == 'app',\n level=level)\n logger.add('plugin.log',\n filter=lambda record: record['extra'].get('name') == 'plugin',\n level=level)\n", "id": "2611485", "language": "Python", "matching_score": 0.8945224285125732, "max_stars_count": 24, "path": "core/log.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/14 21:35\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nfrom colorama import Fore, Style\n\nfrom core.log import Log\nfrom core.item import ItemStream\nfrom core.piper.basePiper import Piper\n\n\nclass LogPiper(Piper):\n name = 'logPiper'\n\n def __init__(self, debug: bool):\n self._log = Log.app\n self._debug = debug\n\n def process(self, item: ItemStream):\n msg = ''\n if (item.payload is not None) & hasattr(item, 'func'):\n if item.func.status:\n msg = f'{item.payload.challenge} {item.payload.name}@{Fore.GREEN}{item.ip}{Fore.RESET} run success, '\n else:\n reason = item.func.message if self._debug else item.func.message[:255]\n msg = f'{item.payload.challenge} {item.payload.name}@{Fore.RED}{item.ip}{Fore.RESET} run fail, ' \\\n f'{Style.DIM}{reason}'\n\n if hasattr(item, 'flag'):\n if item.flag.status:\n msg += f'submit success {Fore.GREEN}{item.flag.value}'\n else:\n reason = item.flag.message if self._debug else item.flag.message[:64]\n msg += f'submit fail {Fore.RED}{item.flag.value}, {Fore.RESET} {Style.DIM}{reason}'\n\n self._log.info(msg)\n", "id": "2789103", "language": "Python", "matching_score": 2.330639600753784, "max_stars_count": 24, "path": "core/piper/logPiper.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/12 20:16\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nimport abc\nfrom core.item import ItemStream\n\n\nclass Piper(object):\n name = 'basePiper'\n\n @abc.abstractmethod\n def process(self, item: ItemStream):\n pass\n", "id": "4872236", "language": "Python", "matching_score": 1.3996174335479736, "max_stars_count": 24, "path": "core/piper/basePiper.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/12 20:15\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nfrom .basePiper import Piper\nfrom .dbPiper import DbPiper\nfrom .flagPiper import FlagPiper\nfrom .funcPiper import FuncHandler\nfrom .logPiper import LogPiper\n\n__all__ = [\n 'Piper',\n 'DbPiper',\n 'FlagPiper',\n 'FuncHandler',\n 'LogPiper'\n]\n", "id": "5956929", "language": "Python", "matching_score": 1.6217812299728394, "max_stars_count": 24, "path": "core/piper/__init__.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/12 20:33\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nimport re\n\nfrom core.config import AttackParser\nfrom core.data import Status\nfrom core.item import ItemStream\nfrom core.piper.basePiper import Piper\n\n\nclass FuncHandler(Piper):\n name = 'funcPiper'\n\n def __init__(self, config: AttackParser):\n self._config = config\n self.regx = re.compile(config.regx)\n\n def find_flag(self, string):\n m = self.regx.search(string)\n if not m:\n return False, ''\n return True, m.group(0)\n\n def process(self, item: ItemStream):\n if not item.has_func():\n return\n try:\n result, msg = item.func.run(item.ip)\n except Exception as e:\n item.func.result = (Status.ERROR, str(e))\n else:\n if not item.payload.flag:\n item.func.result = (Status.SUCCESS, msg) if result else (Status.FAIL, msg)\n return\n if not result:\n item.func.result = (Status.FAIL, msg)\n return\n result, flag = self.find_flag(msg)\n if not result:\n item.func.result = (Status.FAIL, msg)\n return\n item.func.result = (Status.SUCCESS, msg)\n item.set_flag(flag)\n", "id": "1992836", "language": "Python", "matching_score": 2.7819154262542725, "max_stars_count": 24, "path": "core/piper/funcPiper.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/12 18:19\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nfrom typing import Callable, Tuple\n\nfrom core.data import Status, Payload\n\n\nclass Item(object):\n def __init__(self, ):\n self._status = Status.TBD\n self.message = ''\n\n @property\n def status(self) -> bool:\n return Status.SUCCESS == self._status\n\n @property\n def result(self):\n return self._status, self.message\n\n @result.setter\n def result(self, result: Tuple):\n self._status, self.message = result\n\n\nclass FuncItem(Item):\n def __init__(self, func: Callable[[str], str]):\n super(FuncItem, self).__init__()\n self.func = func\n\n def run(self, ip) -> (bool, str):\n return self.func(ip)\n\n\nclass FlagItem(Item):\n def __init__(self, value):\n super(FlagItem, self).__init__()\n self.value = value\n\n\nclass ItemStream(Item):\n flag: FlagItem\n func: FuncItem\n\n def __init__(self, r: int, ip='127.0.0.1', payload: Payload = None, challenge: str = None):\n super(ItemStream, self).__init__()\n self.round = r\n self.ip = ip\n self.payload = payload\n if payload is not None:\n self.set_func(payload.func)\n self.challenge = payload.challenge\n else:\n self.challenge = challenge or ''\n\n def set_flag(self, value):\n self.flag = FlagItem(value)\n\n def set_func(self, func: Callable[[str], str]):\n self.func = FuncItem(func)\n\n def has_flag(self):\n return hasattr(self, 'flag')\n\n def has_func(self):\n return hasattr(self, 'func')\n", "id": "3306830", "language": "Python", "matching_score": 1.5272504091262817, "max_stars_count": 24, "path": "core/item.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/12 18:16\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nimport os\nfrom enum import IntEnum\n\nfrom core.utils import load_py_script\n\n\nclass Status(IntEnum):\n SUCCESS = 1\n FAIL = 2\n ERROR = 9\n TBD = 0\n\n\nclass Payload(object):\n def __init__(self, filename, cls):\n self.name = filename\n self.challenge = cls.challenge\n self.func = cls.run\n self.flag = cls.flag\n\n @staticmethod\n def load(path):\n filename = os.path.basename(path)\n payload = load_py_script('awd.core.payload', path)\n return Payload(filename, payload.Payload)\n\n def __str__(self):\n return f'{self.name} {self.challenge}'\n", "id": "11050709", "language": "Python", "matching_score": 0.9523637294769287, "max_stars_count": 24, "path": "core/data.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/12 17:12\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\n\nclass AwdError(Exception):\n pass\n\n\nclass ConfigSyntaxError(AwdError):\n pass\n\n\nclass ConfigFileError(AwdError):\n pass\n\n\nclass ServiceInitError(AwdError):\n pass\n\n\nclass PayloadTempError(AwdError):\n pass\n", "id": "7888669", "language": "Python", "matching_score": 0.7271517515182495, "max_stars_count": 24, "path": "core/exception.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/12 17:11\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nimport os\n\npath = os.path.dirname(__file__)\nROOT_PATH = os.path.dirname(path)\n", "id": "708120", "language": "Python", "matching_score": 0.8885229229927063, "max_stars_count": 24, "path": "core/__init__.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/15 21:39\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nimport os\nimport glob\nimport queue\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler, FileSystemEvent\n\nfrom core.service import BaseService\nfrom core.config import AttackParser\nfrom core.data import Payload\n\n\nclass PayloadMonitor(BaseService, Observer):\n serviceName = 'PayloadFileMonitor'\n payloadQueue = queue.Queue()\n payloadDict = {}\n\n @classmethod\n def load_payload(cls, path) -> Payload:\n try:\n pd = Payload.load(path)\n except AttributeError as e:\n PayloadMonitor.log.warning(f'{path} missing attribute: {str(e).split(\" \")[-1]}')\n except Exception as e:\n PayloadMonitor.log.warning(f'load {path} error: {e}')\n else:\n PayloadMonitor.payloadDict.update({pd.name: pd})\n PayloadMonitor.payloadQueue.put(pd)\n return pd\n return\n\n @property\n def status(self):\n return ';'.join([str(p) for p in self.payloadDict.values()])\n\n class PayloadEventHandler(FileSystemEventHandler):\n # save file modify time to avoid run modify event twice: https://github.com/gorakhargosh/watchdog/issues/93\n mtime_cache = set()\n\n def on_created(self, event: FileSystemEvent):\n if event.is_directory or not event.src_path.endswith('py'):\n return\n PayloadMonitor.log.info(f'find new payload: {event.src_path}')\n\n def on_modified(self, event: FileSystemEvent):\n if event.is_directory or not event.src_path.endswith('py'):\n return\n\n mtime = os.stat(event.src_path).st_mtime\n if mtime in self.mtime_cache:\n return\n self.mtime_cache.add(mtime)\n if PayloadMonitor.load_payload(event.src_path):\n PayloadMonitor.log.info('update payload: ' + event.src_path)\n\n def on_deleted(self, event: FileSystemEvent):\n if event.is_directory or not event.src_path.endswith('py'):\n return\n filename = os.path.basename(event.src_path)\n if filename in PayloadMonitor.payloadDict:\n PayloadMonitor.payloadDict.pop(filename)\n PayloadMonitor.log.debug(f'remove payload: {event.src_path}')\n\n def __init__(self, config: AttackParser):\n self.dir = config.dir\n self.log.info('payload file monitor dir: ' + self.dir)\n super(PayloadMonitor, self).__init__(1)\n\n self.schedule(PayloadMonitor.PayloadEventHandler(), self.dir, True)\n\n @classmethod\n def get(cls, block=True) -> Payload:\n try:\n return cls.payloadQueue.get(block, timeout=3)\n except queue.Empty:\n return\n\n @classmethod\n def clear(cls):\n PayloadMonitor.PayloadEventHandler.mtime_cache.clear()\n while not cls.payloadQueue.empty():\n cls.payloadQueue.get_nowait()\n for _, p in cls.payloadDict.items():\n cls.payloadQueue.put(p)\n\n def loads(self):\n for abs_path in glob.glob(f'{self.dir}/*.py'):\n if self.load_payload(abs_path):\n self.log.success('find payload: ' + abs_path)\n\n def __str__(self):\n return f'{self.serviceName} payload dir: {self.dir}'\n", "id": "2706835", "language": "Python", "matching_score": 2.223357677459717, "max_stars_count": 24, "path": "core/service/monitor.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/14 22:10\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nimport queue\nimport time\nfrom concurrent.futures import ThreadPoolExecutor, Future\nfrom threading import Thread\nfrom typing import Dict\n\nfrom core.config import AppConfig\nfrom core.item import ItemStream\nfrom core.piper import *\n\n\nclass Pipeline(object):\n name = 'PipelineListener'\n queue = queue.Queue()\n _pipers: Dict[str, Piper] = {}\n _tasks: Dict[ItemStream, Future] = {}\n\n def __init__(self, config: AppConfig):\n self.ips = config.challenges.ips\n self.challenges = config.challenges\n\n self._config = config\n self._pool = ThreadPoolExecutor(config.attack.thread)\n\n def build(self):\n self.add(\n FuncHandler(self._config.attack)\n ).add(\n FlagPiper(self._config.platform)\n ).add(\n LogPiper(self._config.debug)\n ).add(\n DbPiper()\n )\n\n @classmethod\n def add(cls, piper: Piper):\n cls._pipers.update({\n piper.name: piper\n })\n return cls\n\n @classmethod\n def delete(cls, name):\n cls._pipers.pop(name)\n\n @classmethod\n def get(cls, name):\n return cls._pipers.get(name)\n\n @classmethod\n def send(cls, item: ItemStream):\n cls.queue.put(item)\n\n @classmethod\n def clear(cls):\n for item in list(cls._tasks.keys()):\n if cls._tasks.get(item).cancel() or cls._tasks.get(item).done():\n cls._tasks.pop(item)\n while not cls.queue.empty():\n cls.queue.get_nowait()\n\n @classmethod\n def cancel(cls, payload: str):\n c = 0\n for item in list(cls._tasks.keys()):\n if item.payload.name == payload:\n if cls._tasks.get(item).cancel():\n cls._tasks.pop(item)\n c += 1\n return c\n\n def do(self, item: ItemStream) -> ItemStream:\n for piper in self._pipers.values():\n piper.process(item)\n return item\n\n def run(self):\n while True:\n try:\n item: ItemStream = self.queue.get(False, timeout=3)\n except queue.Empty:\n continue\n future = self._pool.submit(self.do, item)\n self._tasks[item] = future\n\n if self._config.platform.interval:\n time.sleep(self._config.platform.interval / 1000)\n\n def start(self):\n thread = Thread(\n target=self.run,\n name=self.name,\n daemon=True\n )\n thread.start()\n\n @property\n def progress(self):\n r = 0\n d = 0\n a = 0\n for i in self._tasks.values():\n a += 1\n if i.running():\n r += 1\n elif i.done():\n d += 1\n return r, d, a\n", "id": "1645922", "language": "Python", "matching_score": 2.6416969299316406, "max_stars_count": 24, "path": "core/piper/pipeline.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/15 22:55\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nimport time\nimport schedule\nimport signal\nfrom threading import Thread\nfrom colorama import Fore\n\nfrom typing import Dict\nfrom core.log import Log\nfrom core.db import init_database\nfrom core.item import ItemStream\nfrom core.config import AppConfig\nfrom core.piper import FlagPiper\nfrom core.piper.pipeline import Pipeline\nfrom core.service import BaseService\nfrom core.service.monitor import PayloadMonitor\n\n\ndef schedule_run():\n while True:\n schedule.run_pending()\n time.sleep(1)\n\n\nclass AwdEngine(object):\n def __init__(self, path):\n AppConfig().load(path)\n self._log = Log.app\n self._config = AppConfig()\n Log.config(self._config.debug)\n\n self.pipeline = Pipeline(self._config)\n self.services: Dict[str, BaseService] = {}\n\n @property\n def is_begin(self) -> bool:\n return True if self._config.time.round > 0 else False\n\n @property\n def payload_monitor(self) -> PayloadMonitor:\n return self.services.get(PayloadMonitor.serviceName)\n\n def init(self):\n init_database(self._config.db)\n self.pipeline.build()\n\n self.services.update({\n PayloadMonitor.serviceName: PayloadMonitor(self._config.attack),\n })\n self.payload_monitor.loads()\n\n def check(self):\n # check flag submit piper\n piper: FlagPiper = self.pipeline.get(FlagPiper.name)\n r, msg = piper.submit_flag('flag')\n if not r:\n self._log.error('submit test flag error: ' + msg)\n elif r and not self._config.platform.success_text:\n self._log.warning('[platform.success_text] is empty when submit flag successful')\n\n def refresh(self):\n for name, service in self.services.items():\n self._log.info(f'[{name}]: {service.status}')\n # do some jobs every round\n self._log.debug('clear queue in pipeline')\n self.pipeline.clear()\n for name, service in self.services.items():\n service.clear()\n self._log.debug(f'clear {name} cache')\n\n def load(self):\n self.pipeline.start()\n self._log.info(f'{self.pipeline.name} is running')\n for name, service in self.services.items():\n service.start()\n self._log.info(f'{name} is running')\n\n schedule.every(self._config.time.interval).minutes.do(self.refresh).tag('refresh')\n Thread(target=schedule_run, daemon=True).start()\n\n def run(self):\n def stop(signum, frame):\n self._log.warning('exit')\n\n import os\n\n os._exit(1)\n\n signal.signal(signal.SIGINT, stop)\n signal.signal(signal.SIGTERM, stop)\n\n while True:\n payload = self.payload_monitor.get(False)\n if payload is None:\n continue\n c = self.pipeline.cancel(payload.name)\n r, d, a = self.pipeline.progress\n self._log.info(f'progress: {Fore.BLUE}{d}{Fore.RESET}/{a} '\n f'running: {Fore.YELLOW}{r}{Fore.RESET} '\n f'cancel: {Fore.RED}{c}{Fore.RESET}')\n for challenge, ips in self._config.challenges:\n if challenge != payload.challenge:\n continue\n for ip in ips:\n self.pipeline.send(ItemStream(self._config.time.round, ip, payload=payload))\n", "id": "3728885", "language": "Python", "matching_score": 3.367201805114746, "max_stars_count": 24, "path": "core/engine.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/21 22:28\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nfrom colorama import Fore\nfrom core.engine import AwdEngine\n\n__version__ = '0.1.0'\n__banner__ = f'''\n********************************************************\n* _ _ ___ ______ \n* / \\ _ _| |_ ___ / \\ \\ / / _ \\ \n* / _ \\| | | | __/ _ \\ / _ \\ \\ /\\ / /| | | |\n* / ___ \\ |_| | || (_) | / ___ \\ V V / | |_| |\n* /_/ \\_\\__,_|\\__\\___/ /_/ \\_\\_/\\_/ |____/ \n*\n* Version: {Fore.YELLOW}{__version__}{Fore.RESET} by Xu\n* {Fore.LIGHTGREEN_EX}~~~~~~Good Luck~~~~~~{Fore.RESET}\n********************************************************\n'''\n\nif __name__ == '__main__':\n print(__banner__)\n c = AwdEngine('config.template')\n c.init()\n c.load()\n c.run()\n", "id": "8002603", "language": "Python", "matching_score": 0.09262621402740479, "max_stars_count": 24, "path": "awd.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/17 21:15\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nimport os\nfrom pony.orm import *\n\nfrom core import ROOT_PATH\nfrom core.log import Log\n\ndb = Database()\n\n\nclass FuncInfo(db.Entity):\n id = PrimaryKey(int, auto=True)\n round = Optional(int)\n message = Optional(str, nullable=True)\n status = Optional(bool)\n name = Required(str)\n ip = Required(str)\n flag = Optional('FlagInfo')\n\n\nclass FlagInfo(db.Entity):\n id = PrimaryKey(int, auto=True)\n round = Required(int)\n value = Required(str, unique=True)\n challenge = Optional(str)\n status = Optional(bool)\n message = Optional(str)\n func = Optional(FuncInfo)\n\n\ndef init_database(path):\n dbPath = os.path.join(ROOT_PATH, path)\n if os.path.exists(path):\n db.bind(provider='sqlite', filename=dbPath)\n db.generate_mapping()\n Log.app.success('load database: ' + dbPath)\n else:\n db.bind(provider='sqlite', filename=dbPath, create_db=True)\n db.generate_mapping(create_tables=True)\n Log.app.info('create database: ' + dbPath)\n", "id": "12310981", "language": "Python", "matching_score": 2.6708624362945557, "max_stars_count": 24, "path": "core/db.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/13 22:45\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nfrom core.db import db_session, FuncInfo, FlagInfo\nfrom core.item import ItemStream\nfrom core.piper.basePiper import Piper\n\n\nclass DbPiper(Piper):\n name = 'databasePiper'\n\n def process(self, item: ItemStream):\n with db_session:\n if item.has_func():\n funcinfo = FuncInfo(\n round=item.round,\n message='' if item.func.status else item.func.message,\n status=item.func.status,\n name=item.payload.name,\n ip=item.ip,\n )\n if item.has_flag():\n flaginfo = FlagInfo(\n round=item.round,\n value=item.flag.value,\n status=item.flag.status,\n message='' if item.flag.status else item.flag.message,\n challenge=item.challenge\n )\n if item.has_func() & item.has_flag():\n funcinfo.flag = flaginfo\n", "id": "4958678", "language": "Python", "matching_score": 1.8010913133621216, "max_stars_count": 24, "path": "core/piper/dbPiper.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/15 22:18\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nimport pytest\nfrom unittest.mock import MagicMock\n\nfrom core.item import ItemStream\nfrom core.data import Status\nfrom core.piper.pipeline import Pipeline\nfrom core.piper import FlagPiper, DbPiper\n\n\[email protected]()\ndef pipeline(config):\n p = Pipeline(config)\n p.build()\n p.delete(DbPiper.name)\n return p\n\n\ndef test_item_process_good_func_find_flag_submit_fail(pipeline, find_flag_payload):\n item = pipeline.do(ItemStream(1, payload=find_flag_payload))\n assert item.has_func()\n assert item.has_flag()\n assert item.func.status\n assert not item.flag.status\n\n\ndef test_item_process_good_func_find_no_flag(pipeline, find_flag_payload):\n find_flag_payload.func = MagicMock(return_value=(True, 'not flag'))\n item = pipeline.do(ItemStream(1, payload=find_flag_payload))\n assert item.has_func()\n assert not item.func.status\n assert item.func._status == Status.FAIL\n assert item.func.message == 'not flag'\n assert not item.has_flag()\n\n\ndef test_item_process_bad_func(pipeline, find_flag_payload):\n find_flag_payload.func = MagicMock(side_effect=KeyError)\n item = pipeline.do(ItemStream(1, payload=find_flag_payload))\n assert item.has_func()\n assert item.func._status == Status.ERROR\n assert not item.func.status\n assert not item.has_flag()\n\n\ndef test_item_process_good_func_submit_success(config, pipeline, find_flag_payload):\n config.platform.success_text = ['submit ok']\n FlagPiper._parse_shell_output = MagicMock(return_value=(True, \"{'result':'submit ok'}\"))\n item = pipeline.do(ItemStream(1, payload=find_flag_payload))\n assert item.has_flag()\n assert item.flag.status\n\n\ndef test_item_process_only_run(pipeline, only_run_payload):\n item = pipeline.do(ItemStream(1, payload=only_run_payload))\n assert item.has_func()\n assert item.func.status\n assert not hasattr(item, 'flag')\n", "id": "2052086", "language": "Python", "matching_score": 2.334303617477417, "max_stars_count": 24, "path": "tests/test_pipeline.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/12 22:44\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nimport requests\nimport subprocess\nimport traceback\n\nfrom core.data import Status\nfrom core.item import ItemStream\nfrom core.piper.basePiper import Piper\nfrom core.config import PlatformParser\n\n\nclass FlagPiper(Piper):\n name = 'flagPiper'\n\n def __init__(self, config: PlatformParser):\n self._config = config\n\n # TODO: retry when timeout/50x\n def submit_flag(self, flag) -> (bool, str):\n if self._config.isCurl:\n r, msg = self._parse_shell_output(self._config.curl.format(flag=flag))\n else:\n r, msg = self._parse_request_output(flag)\n if not r:\n return False, msg\n\n if not self._config.success_text:\n return True, msg\n for text in self._config.success_text:\n if text in msg:\n return True, msg\n return False, msg\n\n def _parse_shell_output(self, cmd: str) -> (bool, str):\n with subprocess.Popen(cmd.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p:\n try:\n exitcode = p.wait(self._config.timeout)\n stdout, stderr = p.communicate()\n except:\n p.kill()\n return False, f'Timeout: {cmd}'\n else:\n if exitcode != 0:\n return False, stderr.decode('utf-8')\n return True, stdout.decode('utf-8')\n\n def _parse_request_output(self, flag) -> (bool, str):\n try:\n rep: requests.Response = self._config.py(flag)\n except:\n return False, traceback.format_exc()\n else:\n if not isinstance(rep, requests.Response):\n return False, \\\n f\"[platform.python] {self._config.data.get('python')} return value is not requests.Response\"\n if rep.status_code != 200:\n return False, rep.reason\n return True, rep.text\n\n def process(self, item: ItemStream):\n if not item.has_flag():\n return\n\n try:\n r, msg = self.submit_flag(item.flag.value)\n except:\n item.flag.result = (Status.ERROR, traceback.format_exc())\n else:\n item.flag.result = (Status.FAIL, msg) if not r else (Status.SUCCESS, msg)\n", "id": "2080637", "language": "Python", "matching_score": 2.5171778202056885, "max_stars_count": 24, "path": "core/piper/flagPiper.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/12 17:12\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nimport requests\nfrom typing import List, Dict, Callable\nimport yaml\nimport re\nimport os\nimport time\n\nfrom core.utils import load_py_script, SingletonType\nfrom core.exception import ConfigSyntaxError, ConfigFileError\n\n\nclass ParserUtil(object):\n @staticmethod\n def ip(ips: str, include: str = '', exclude: str = '') -> List[str]:\n a, b, c, start, end = re.split(r'[.~]', ips)\n result = ['.'.join((a, b, c, str(i))) for i in range(int(start), int(end) + 1)]\n result += [i for i in include.split(',') if i]\n result = [i for i in result if i not in exclude.split(',')]\n return result\n\n\nclass BaseParser(object):\n def __init__(self, data: dict):\n self.data = data\n\n\nclass TimeParser(BaseParser):\n def __init__(self, data):\n super(TimeParser, self).__init__(data)\n self.date = time.strftime('%Y-%m-%d', time.localtime())\n self.start = TimeParser.format_time(data.get('start', '00:00'))\n self.interval: int = data['interval']\n\n @staticmethod\n def format_time(t) -> str:\n try:\n return time.strftime('%H:%M', time.strptime(t, '%H:%M'))\n except ValueError as e:\n raise ConfigSyntaxError(f'time format error: {e}')\n\n @property\n def round(self):\n localtime = time.localtime()\n if time.strftime('%H:%M', localtime) < self.start:\n return 0\n else:\n h, m = map(lambda x: int(x), self.start.split(':'))\n return ((localtime.tm_hour - h) * 60 + (localtime.tm_min - m)) // self.interval\n\n @property\n def next_round_time(self):\n r = self.round\n seconds = ((r + 1) * self.interval) * 60\n start = time.mktime(time.strptime(f'{self.date} {self.start}', '%Y-%m-%d %H:%M'))\n return time.strftime('%H:%M', time.localtime(start + seconds))\n\n\nclass PlatformParser(BaseParser):\n def __init__(self, data):\n super(PlatformParser, self).__init__(data)\n self.isCurl = True\n if 'curl' in data and 'python' in data:\n raise ConfigSyntaxError('[platform.curl] or [platform.python] not both')\n\n if 'curl' in data:\n self.curl: str = data.get('curl')\n if '{flag}' not in self.curl:\n raise ConfigSyntaxError(\"[platform.curl] missing formatter: {flag}\")\n elif 'python' in data:\n self.isCurl = False\n try:\n self.py: Callable[[str], requests.Response] = load_py_script('awd.core.submit',\n data.get('python')).submit\n except AttributeError:\n raise ConfigFileError(f'[platform.python] miss function: submit(flag)')\n except SyntaxError as e:\n raise ConfigSyntaxError(f'[platform.python] file syntax error: {e}')\n\n self.timeout: int = data.get('timeout', 3)\n self.success_text = data.get('success_text', [])\n self.interval: int = data.get('interval', 0)\n\n\nclass ChallengeParser(BaseParser):\n def __init__(self, data):\n super(ChallengeParser, self).__init__(data)\n self.challenges: Dict[str, List[str]] = {}\n self.ips = set()\n\n if (raw := data.get('raw')) is not None:\n self.challenges = raw\n else:\n for challenge, ip_data in data.items():\n self.challenges[challenge] = ParserUtil.ip(ip_data['ips'],\n ip_data.get('include', ''), ip_data.get('exclude', ''))\n\n for ips in self.challenges.values():\n for ip in ips:\n self.ips.add(ip)\n\n def __iter__(self):\n return iter(self.challenges.items())\n\n\nclass AttackParser(BaseParser):\n def __init__(self, data):\n super(AttackParser, self).__init__(data)\n self.dir = data.get('dir', 'payloads')\n if not os.path.exists(self.dir):\n raise ConfigFileError(f'payload dir:{self.dir} not find')\n self.thread: int = data.get('thread', 8)\n self.regx = data['regx']\n\n\nclass PluginParser(BaseParser):\n def __init__(self, data):\n super(PluginParser, self).__init__(data)\n self.plugins = data\n\n\nclass AppConfig(metaclass=SingletonType):\n data: dict\n db: str\n debug: bool\n time: TimeParser\n platform: PlatformParser\n challenges: ChallengeParser\n plugins: PluginParser\n attack: AttackParser\n\n def load(self, config: str):\n if not os.path.exists(config):\n raise ConfigFileError(f'config file {config} not find')\n with open(config, 'r') as f:\n self.data = yaml.safe_load(f.read())\n\n self.db = self.data.get('db', 'awd.db')\n self.debug = self.data.get('debug', False)\n self.time = TimeParser(self.data.get('time'))\n self.platform = PlatformParser(self.data.get('platform'))\n self.challenges = ChallengeParser(self.data.get('challenge'))\n self.plugins = PluginParser(self.data.get('plugin', {}))\n self.attack = AttackParser(self.data.get('attack', {}))\n", "id": "8433023", "language": "Python", "matching_score": 3.133668899536133, "max_stars_count": 24, "path": "core/config.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/12 17:29\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nimport pytest\nfrom unittest import mock\nfrom core.config import ParserUtil, ChallengeParser\n\n\[email protected]('ips,include,exclude,result',\n [\n ('127.0.0.1~3', '127.0.0.128', '127.0.0.2',\n ['127.0.0.1', '127.0.0.3', '127.0.0.128']),\n ('127.0.0.1~3', '127.0.0.128', '',\n ['127.0.0.1', '127.0.0.2', '127.0.0.3', '127.0.0.128']),\n ('127.0.0.1~3', '', '127.0.0.2',\n ['127.0.0.1', '127.0.0.3']),\n ('127.0.0.1~3', '', '',\n ['127.0.0.1', '127.0.0.2', '127.0.0.3'])\n ])\ndef test_ip(ips, include, exclude, result):\n assert ParserUtil.ip(ips, include, exclude) == result\n\n\ndef test_ip_parse():\n c = ChallengeParser({\n 'easyWeb': {\n 'ips': '172.18.0.1~5',\n 'include': '172.18.0.8',\n 'exclude': '172.18.0.4',\n },\n 'hardWeb': {\n 'ips': '172.18.1.1~5',\n 'include': '172.18.1.8',\n 'exclude': '172.18.1.4',\n }\n })\n assert c.ips == {'172.18.0.1', '172.18.0.2', '172.18.0.3', '172.18.0.5', '172.18.0.8',\n '172.18.1.1', '172.18.1.2', '172.18.1.3', '172.18.1.5', '172.18.1.8'}\n\n\ndef test_config_ip_parse(config):\n assert config.challenges.ips == {'172.18.0.1',\n '172.18.0.2',\n '172.18.1.1',\n '172.18.1.2'\n }\n\n\ndef test_next_round_time_calc(config):\n with mock.patch('core.config.TimeParser.round', new_callable=mock.PropertyMock) as m:\n m.return_value = 3\n assert config.time.next_round_time == '08:20'\n", "id": "1724053", "language": "Python", "matching_score": 1.7971349954605103, "max_stars_count": 24, "path": "tests/test_config.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/12 17:23\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nimport pytest\n\nfrom core.config import AppConfig\nfrom core.data import Payload\n\nYAML = \"\"\"\n# db: awd.db\ntime:\n start: '8:00'\n interval: 5\nplatform:\n curl: curl http://127.0.0.1:8000/submit?flag={flag}&token=fc067281e151a0b929f5056f22298490\n # python: submit.py\n# timeout: 3\n# success_text: ''\nattack:\n regx: \\w{32}\n dir: %s\n# thread: 8\nchallenge:\n raw:\n easyWeb:\n - 172.18.0.1\n - 172.18.0.2\n hardWeb:\n - 172.18.1.1\n - 172.18.1.2\n\"\"\"\n\nFIND_FLAG_PAYLOAD = \"\"\"\nimport uuid\nimport random\nimport time\n\n\nclass Payload(object):\n challenge = 'easyWeb'\n flag = True\n\n @staticmethod\n def run(ip):\n time.sleep(random.randint(1, 3))\n return True, 'flag is here: ' + uuid.uuid4().hex\n\"\"\"\n\nONLY_RUN_PAYLOAD = \"\"\"\nimport uuid\nimport random\nimport time\n\n\nclass Payload(object):\n challenge = 'hardWeb'\n flag = False\n\n @staticmethod\n def run(ip):\n time.sleep(random.randint(1, 3))\n return True, 'attack success'\n\n\n\"\"\"\n\n\[email protected]()\ndef config(tmpdir):\n c = tmpdir.join('config.yml')\n c.write(YAML % tmpdir)\n AppConfig().load(c)\n return AppConfig()\n\n\[email protected]()\ndef find_flag_payload(tmpdir, config):\n p = tmpdir.join('find_flag.py')\n p.write(FIND_FLAG_PAYLOAD)\n return Payload.load(p)\n\n\[email protected]()\ndef only_run_payload(tmpdir, config):\n p = tmpdir.join('only_run.py')\n p.write(ONLY_RUN_PAYLOAD)\n return Payload.load(p)\n", "id": "11118461", "language": "Python", "matching_score": 0.13189922273159027, "max_stars_count": 24, "path": "tests/conftest.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/19 21:32\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nimport abc\nfrom threading import Thread\n\nfrom core.log import Log\nfrom core.utils import SingletonType\n\n\nclass BaseService(Thread, metaclass=SingletonType):\n serviceName = 'BaseService'\n log = Log.app\n\n @property\n @abc.abstractmethod\n def status(self) -> str:\n return ''\n\n @abc.abstractmethod\n def clear(self):\n pass\n", "id": "3199546", "language": "Python", "matching_score": 2.142409563064575, "max_stars_count": 24, "path": "core/service/__init__.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/04/21 22:06\n# @Author : Xu\n# @Site : https://xuccc.github.io/\n\nimport os\nimport threading\nimport importlib.util\n\n\ndef load_py_script(module: str, path):\n if not os.path.exists(path):\n raise ValueError(f'{path} is not exists')\n spec = importlib.util.spec_from_file_location(module, path)\n m = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(m)\n return m\n\n\nclass SingletonType(type):\n _instance_lock = threading.Lock()\n\n def __call__(cls, *args, **kwargs):\n if not hasattr(cls, \"_instance\"):\n with SingletonType._instance_lock:\n if not hasattr(cls, \"_instance\"):\n cls._instance = super(SingletonType, cls).__call__(*args, **kwargs)\n return cls._instance\n", "id": "5389515", "language": "Python", "matching_score": 1.3097822666168213, "max_stars_count": 24, "path": "core/utils.py" } ]
1.799113
rbarillec
[ { "content": "def Euler0001():\n max = 1000\n sum = 0\n for i in range(1, max):\n if i%3 == 0 or i%5 == 0:\n sum += i\n\n print(sum)\n\nEuler0001()", "id": "2737", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "Euler0001.py" }, { "content": "def Euler0002():\n max = 4e6\n i = 0\n sum = 0\n \n fib1 = 0\n fib2 = 1\n \n while fib2 < max:\n print(fib2)\n if (fib2 %2 == 0):\n sum += fib2\n fib1, fib2 = fib2, fib2 + fib1\n print(\"The sum is\", sum)\n\nEuler0002()\n", "id": "11911386", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "Euler0002.py" }, { "content": "# Project Euler - Problem 4\n# -----------------------------\n# A palindromic number reads the same both ways. The largest palindrome made \n# from the product of two 2-digit numbers is 9009 = 91 × 99.\n# Find the largest palindrome made from the product of two 3-digit numbers.\n\ndef IsPalyndrom(intValue):\n value = str(intValue)\n return value == value[::-1]\n\ndef Palyndrome(numberOfDigits):\n min = 10 ** (numberOfDigits-1)\n max = (10 ** (numberOfDigits)) - 1\n \n values = range(max, min-1, -1)\n largestPalyndrome = 0\n\n for i in values:\n for j in range(i, min-1, -1):\n product = i*j\n if IsPalyndrom(product) and (product > largestPalyndrome):\n largestPalyndrome = product\n\n if largestPalyndrome > 0:\n print (\"Largest palyndrom of 2 \", numberOfDigits, \"digit numbers is \", largestPalyndrome)\n else:\n print (\"No palyndrom of 2 \", numberOfDigits, \"digit numbers could be found\")\n\nPalyndrome(3)", "id": "6108112", "language": "Python", "matching_score": 0.20182672142982483, "max_stars_count": 0, "path": "Euler0004.py" }, { "content": "def IsDivisibleBy(value, factors):\n for factor in factors:\n if value % factor == 0:\n return True\n return False\n\ndef Euler0003(value):\n factors = []\n factorProduct = 1\n candidate = 1\n \n while (factorProduct < value) and (candidate / value < value):\n candidate += 1\n if (value % candidate == 0) and (not IsDivisibleBy(candidate, factors)):\n factors.append(candidate)\n factorProduct *= candidate\n print(candidate)\n\n print(\"The largest prime factor of \", value, \" is \", factors[-1]) \n\nEuler0003(600851475143)\n", "id": "7498146", "language": "Python", "matching_score": 1.738115668296814, "max_stars_count": 0, "path": "Euler0003.py" }, { "content": "# Project Euler - Problem 4\n# -----------------------------\n# 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.\n# What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?\n\nimport math\n\ndef IsDivisibleByAll(value, factors):\n for factor in factors[::-1]:\n if value % factor != 0:\n return False\n return True\n\ndef IsDivisibleByAny(value, factors):\n for factor in factors:\n if value % factor == 0:\n return True\n return False\n\ndef GetPrimeFactors(number):\n primeFactors = [2]\n for i in range(3, number+1):\n if not IsDivisibleByAny(i, primeFactors):\n primeFactors.append(i)\n return primeFactors\n \ndef Euler(number):\n factors = range(2, number+1)\n candidate = number\n\n print(\"Smallest number evenly divisible by all numbers up to \", number, \"is\", candidate)\n\nEuler(20)\n", "id": "2455259", "language": "Python", "matching_score": 1.289299726486206, "max_stars_count": 0, "path": "Euler0005.py" } ]
0.201827
IMTMarburg
[ { "content": "from .star import STAR\nfrom .subread import Subread, Subjunc\nfrom .bowtie import Bowtie\nfrom .salmon import Salmon\nfrom .bwa import BWA\nfrom .bbmap import BBMap, ExtendCigarBBMap\n\nall = [Bowtie, Subread, STAR, Salmon, Subjunc, BWA, BBMap, ExtendCigarBBMap]\n", "id": "1191018", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "src/mbf_externals/aligners/__init__.py" }, { "content": "from . import fixtures\nall = [fixtures]\n", "id": "10804679", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "src/mbf_genomes/testing/__init__.py" }, { "content": "import sys\nimport io\nimport time\nimport os\nimport signal\nfrom .util import (\n log_info,\n log_error,\n log_warning,\n log_debug,\n log_job_trace,\n shorten_job_id,\n)\nimport select\nimport termios\nimport tty\nimport threading\nfrom .util import console\nimport rich.status\nfrom .parallel import async_raise\nfrom collections import namedtuple\n\nStatusReport = namedtuple(\n \"StatusReport\", [\"running\", \"waiting\", \"done\", \"total\", \"failed\"]\n)\n\n\nclass ConsoleInteractive:\n def _set_terminal_raw(self):\n \"\"\"Set almost all raw settings on the terminal, except for the output meddling\n - if we did that we get weird newlines from rich\"\"\"\n try:\n self.old_settings = termios.tcgetattr(sys.stdin)\n fd = sys.stdin.fileno()\n when = termios.TCSAFLUSH\n tty.setraw(fd)\n mode = termios.tcgetattr(sys.stdin.fileno())\n mode[1] = mode[1] | termios.OPOST # termios.tcgetattr(fd)\n termios.tcsetattr(sys.stdin.fileno(), when, mode)\n except io.UnsupportedOperation: # happens in tests that set it to console mode\n pass\n\n def _end_terminal_raw(self):\n if hasattr(self, \"old_settings\"):\n try:\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.old_settings)\n except io.UnsupportedOperation: # see _set_terminal_raw\n pass\n\n def start(self, runner):\n self.runner = runner\n self.last_report_status_args = StatusReport(0, 0, 0, len(runner.jobs), 0)\n self.breaker = os.pipe()\n self.thread = threading.Thread(target=self.loop)\n self._set_terminal_raw()\n self.stopped = False\n self.leave_thread = False\n self.thread.start()\n log_info(\"Type 'help<enter>' to receive a list of valid commands\")\n self._cmd = \"\"\n self.status = rich.status.Status(\"\", console=console)\n self.status.start()\n self.report_status(self.last_report_status_args)\n\n def stop(self):\n \"\"\"Called from the runner\"\"\"\n log_info(\"Leaving interactive mode\")\n self.stopped = True\n if hasattr(self, \"thread\"):\n self.leave_thread = True\n #async_raise(self.thread.ident, KeyboardInterrupt)\n os.write(self.breaker[1], b\"x\")\n self._end_terminal_raw()\n log_job_trace(\"Terminating interactive thread\")\n self.thread.join()\n log_job_trace(\"Terminated interactive thread\")\n self.status.stop()\n del self.runner\n log_info(\"Left interactive mode\")\n\n @property\n def cmd(self):\n return self._cmd\n\n @cmd.setter\n def cmd(self, value):\n self._cmd = value\n self.report_status(self.last_report_status_args)\n\n def loop(self):\n log_info(\"Entering interactive loop\")\n while True:\n try:\n if self.leave_thread:\n break\n try:\n input = select.select([sys.stdin, self.breaker[0]], [], [], 10)[0]\n except io.UnsupportedOperation as e:\n if \"redirected stdin is pseudofile\" in str(\n e\n ): # running under pytest - no interactivity, but suport requesting it?\n input = False\n else:\n raise\n if input:\n if self.breaker[0] in input:\n break\n else: # must have been stdin.\n value = sys.stdin.read(1)\n # log_info(f\"received {repr(value)}\")\n if value == \"\\x03\": # ctrl-c:\n self.cmd = \"\"\n elif value == \"\\x1a\": # ctrl-z\n os.kill(os.getpid(), signal.SIGTSTP)\n elif ord(\"0\") <= ord(value) <= ord(\"z\") or value == \" \":\n self.cmd += value\n elif value == \"\\x7f\": # backspace\n self.cmd = self.cmd[:-1]\n elif value == \"\\n\" or value == \"\\r\":\n try:\n if self.cmd:\n command = self.cmd\n args = \"\"\n if \" \" in command:\n command = command[: command.find(\" \")]\n args = self.cmd[len(command) + 1 :].strip()\n self.cmd = \"\"\n if hasattr(self, \"_cmd_\" + command):\n getattr(self, \"_cmd_\" + command)(args)\n else:\n print(\"No such command\")\n else:\n self._cmd_default()\n self.report_status(self.last_report_status_args)\n except Exception as e:\n log_error(\n f\"An exception occured during command: {e} {type(e)}\"\n )\n self.cmd = \"\"\n continue\n\n except KeyboardInterrupt:\n break\n # log_job_trace(\"Leaving interactive loop\")\n\n def report_status(self, report):\n self.last_report_status_args = report\n # msg = f\"[dim]Running/Waiting Done/Total[/dim] {report.running} / {report.waiting} {report.done} / {report.total}.\" # In flight: {len(self.runner.jobs_in_flight)} \"\n msg = f\"[dim]T:[/dim]{report.total} D:{report.done} R:{report.running} W:{report.waiting} F:{report.failed}\"\n if self.cmd:\n msg += f\" Cmd: {self.cmd}\"\n else:\n if self.stopped:\n msg += \" Exiting...\"\n else:\n # msg += \"Type help<enter> for commands\"\n pass\n self.status.update(status=msg + \"\\r\\n\")\n\n def _cmd_help(self, _args):\n \"\"\"print help\"\"\"\n print(\"Help for interactive mode\")\n print(\"You have the following commands available\")\n print(\"\\t- <enter> - Show currently running jobs\")\n for x in dir(self):\n if x.startswith(\"_cmd\"):\n cmd = x[5:]\n if cmd:\n print(f\"\\t {cmd} - {getattr(self, x).__doc__}\")\n # print(\"\\t- help - this command\")\n # print(\"\\t- abort - kill current jobs and exit asap\")\n # print(\"\\t- stop - Wait for the currently running jobs to finish, then exit\")\n # print(\"\\t- reboot - After the pipegraph has ended, restart the current python script\")\n # print(\"\\t- restart - After the currently running jobs have ended, restart the current python script\")\n\n def _cmd_default(self):\n \"\"\"print the currently running jobs (mapped to enter)\"\"\"\n t = time.time()\n to_sort = []\n for job_id in self.runner.jobs_in_flight:\n try:\n rt = t - self.runner.jobs[job_id].start_time\n to_sort.append((rt, job_id))\n except KeyError:\n pass\n to_sort.sort()\n print(\" | \".join((\"Job_no\", \"Runtime\", \"Cores\", \"Job_id\")))\n print(\" | \".join((\"------\", \"-------\", \"-----\", \"------\")))\n for rt, job_id in to_sort:\n job = self.runner.jobs[job_id]\n job_no = job.job_number\n if job.waiting:\n rt = \"waiting\"\n else:\n rt = f\"{rt:>6.2f}s\"\n display_job_id = shorten_job_id(job_id)\n cores = job.actual_cores_needed if job.actual_cores_needed != -1 else \"?\"\n print(f\"{job_no:>6} | {rt} | {cores} | {display_job_id}\")\n print(\"\")\n\n def _cmd_abort(self, _args):\n \"\"\"Kill current jobs and exit (safely) asap\"\"\"\n log_info(\"Run aborted by command. Safely shutting down\")\n self.runner.abort()\n self.stopped = True\n\n def _cmd_die(self, _args):\n \"\"\"kill the current process without saving history\"\"\"\n log_error(\"Sic semper running processes\")\n os.kill(os.getpid(), signal.SIGTERM)\n sys.exit(1)\n\n def _cmd_stop(self, _args):\n \"\"\"Exit after current jobs finished\"\"\"\n if not self.stopped:\n log_info(\"Run stopped by command\")\n waiting_for = []\n for job_id in self.runner.jobs_in_flight:\n try:\n if not getattr(self.runner.jobs[job_id], \"waiting\", False):\n waiting_for.append(job_id)\n except KeyError:\n pass\n\n log_info(f\"Having to wait for jobs: {sorted(waiting_for)}\")\n self.runner.stop()\n self.stopped = True\n\n def _cmd_again(self, _args):\n \"\"\"Restart the current python program after all jobs have completed\"\"\"\n log_info(\"Again command issued\")\n self.runner.job_graph.restart_afterwards()\n\n def _cmd_stop_and_again(self, _args):\n \"Stop after current jobs, then restart the current python program\"\n # log_info(\"Stop_and_again command issued\")\n # self.runner.stop()\n # self.stopped = True\n # self.runner.job_graph.restart_afterwards()\n self._cmd_stop(_args)\n self._cmd_again(_args)\n\n def _cmd_kill(self, args):\n \"\"\"kill a running job (by id)\"\"\"\n try:\n job_no = int(args)\n except ValueError:\n print(f\"Could not understand job number {repr(args)}- must be an integer\")\n return\n for job in self.runner.jobs.values():\n if job.job_number == job_no:\n break\n else:\n print(\"Could not find job number\")\n return\n if not job.job_id in self.runner.jobs_in_flight:\n print(\"Job not currently in flight - can't kill it\")\n return\n if not job.resources.is_external():\n print(\"Job is not running in an external process - can't kill\")\n return\n print(\"ok, killing job\", job.job_id)\n log_info(f\"Command kill {job.job_id} \")\n job.kill_if_running()\n", "id": "9487615", "language": "Python", "matching_score": 4.8339691162109375, "max_stars_count": 0, "path": "src/pypipegraph2/interactive.py" }, { "content": "from . import exceptions\nimport sys\nimport os\nimport queue\nimport time\nimport networkx\nfrom .util import escape_logging\nfrom .enums import (\n JobKind,\n ValidationState,\n ProcessingStatus,\n JobOutcome,\n RunMode,\n ShouldRun,\n Resources,\n)\nfrom .exceptions import _RunAgain\nfrom .parallel import CoreLock, async_raise\nfrom threading import Thread\nfrom . import ppg_traceback\nimport threading\nfrom rich.console import Console\nfrom .interactive import ConsoleInteractive, StatusReport\nfrom .util import log_info, log_error, log_warning, log_debug, log_trace, log_job_trace\nimport copy\nfrom .job_status import JobStatus\nfrom collections import deque\n\nljt = log_job_trace\n\n\nExitNow = \"___!!!ExitNow!!___\"\n#class ExitNow:\n# \"\"\"Token for leave-this-thread-now-signal\"\"\"\n\n# pass\n\n\nclass Runner:\n \"\"\"Run a given JobGraph\"\"\"\n\n def __init__(\n self,\n job_graph,\n history,\n event_timeout,\n focus_on_these_jobs,\n jobs_already_run_previously,\n dump_graphml,\n run_id,\n jobs_do_dump_subgraph_debug,\n ):\n from . import _with_changed_global_pipegraph\n\n log_trace(\"Runner.__init__\")\n self.event_timeout = event_timeout\n with _with_changed_global_pipegraph(JobCollector(job_graph.run_mode)):\n self.job_graph = job_graph\n self.jobs = job_graph.jobs.copy()\n self.job_inputs = copy.deepcopy(\n job_graph.job_inputs\n ) # job_graph.job_inputs.copy()\n self.outputs_to_job_ids = job_graph.outputs_to_job_ids.copy()\n self.next_job_number = self.job_graph.next_job_number\n self.core_lock = CoreLock(job_graph.cores)\n self.fail_counter = 0\n self.job_states = (\n {}\n ) # get's partially filled by modify_dag, and then later in this function\n self.run_id = (\n run_id # to allow jobgenerating jobs to run just once per graph.run()\n )\n\n # flat_before = networkx.readwrite.json_graph.node_link_data(\n # job_graph.job_dag\n # )\n if not networkx.algorithms.is_directed_acyclic_graph(\n self.job_graph.job_dag\n ): # pragma: no cover - defensive\n error_fn = self.job_graph.log_dir / \"debug_edges_with_cycles.txt\"\n networkx.write_edgelist(self.job_graph.job_dag, error_fn)\n cycles = list(networkx.simple_cycles(self.job_graph.job_dag))\n raise exceptions.NotADag(\n f\"Not a directed *acyclic* graph. See {error_fn}. Cycles between {cycles}\"\n )\n assert len(self.jobs) == len(job_graph.job_dag)\n\n log_job_trace(f\"Focus on these jobs: {focus_on_these_jobs}\")\n log_job_trace(f\"jobs_already_run_previously: {jobs_already_run_previously}\")\n self.dag, self.pruned = self.modify_dag(\n job_graph,\n focus_on_these_jobs,\n jobs_already_run_previously,\n history,\n dump_graphml,\n )\n # flat_after = networkx.readwrite.json_graph.node_link_data(job_graph.job_dag)\n # import json\n\n # assert flat_before == flat_after\n import json\n\n job_numbers = set()\n for job_id, job in self.jobs.items():\n # log_job_trace(f\"{job_id} {type(self.jobs[job_id])}\")\n if job.job_number in job_numbers:\n raise ValueError(\"Duplicate job_number\", job.job_number, job_id,job)\n job_numbers.add(job.job_number)\n assert len(job_numbers) == len(self.jobs)\n if len(self.jobs) - len(self.pruned) != len(self.dag):\n raise NotImplementedError(\n f\"Mismatch between len(self.jobs) {len(self.jobs)} - prune_counter {prune_counter} and len(self.dag) {len(self.dag)}\"\n )\n\n log_job_trace(\n \"dag \"\n + escape_logging(\n json.dumps(\n networkx.readwrite.json_graph.node_link_data(self.dag), indent=2\n )\n ),\n )\n\n if not networkx.algorithms.is_directed_acyclic_graph(\n self.dag\n ): # pragma: no cover - defensive\n error_fn = self.job_graph.log_dir / \"debug_edges_with_cycles.txt\"\n networkx.write_edgelist(self.dag, error_fn)\n cycles = list(networkx.simple_cycles(self.dag))\n raise exceptions.NotADag(\n f\"Not a directed *acyclic* graph after modification. See {error_fn}. Cycles between {cycles}\"\n )\n\n for topo_order_number, job_id in enumerate(networkx.algorithms.dag.topological_sort(\n self.dag\n )): # must be topological so we can do upstreams whilst building\n historical_input, historical_output = history.get(\n job_id, ({}, {})\n ) # todo: support renaming jobs.\n s = JobStatus(job_id, self, historical_input, historical_output, topo_order_number)\n log_trace(\n f\"Loaded history for {job_id} in: {len(s.historical_input)}, out: {len(s.historical_output)}\"\n )\n self.job_states[job_id] = s\n s.initialize() # so that output_needed can access the history\n self.event_lock = threading.Lock()\n self.jobs_to_run_que = queue.PriorityQueue()\n self.threads = []\n self.jobs_that_need_propagation = deque()\n if jobs_do_dump_subgraph_debug:\n j1 = self.jobs[list(jobs_do_dump_subgraph_debug)[0]]\n j1.dump_subgraph_for_debug(jobs_do_dump_subgraph_debug, self.jobs, self.dag)\n\n\n def _apply_pruning(self, dag, focus_on_these_jobs, jobs_already_run_previously):\n def _recurse_pruning(job_id, reason):\n \"\"\"This goes forward/downstream\"\"\"\n pruned.add(job_id)\n if not hasattr(self.jobs[job_id], \"prune_reason\"):\n self.jobs[job_id].prune_reason = reason\n for downstream_job_id in dag.successors(job_id):\n _recurse_pruning(downstream_job_id, reason)\n\n def _recurse_unpruning(job_id):\n \"\"\"This goes upstream\"\"\"\n try:\n pruned.remove(job_id)\n del self.jobs[job_id].prune_reason\n except (KeyError, AttributeError):\n pass\n for downstream_job_id in dag.predecessors(job_id):\n _recurse_unpruning(downstream_job_id)\n\n if jobs_already_run_previously:\n new_jobs = set(dag.nodes).difference(jobs_already_run_previously)\n ljt(f\"new jobs {new_jobs}\")\n else:\n new_jobs = set()\n\n if focus_on_these_jobs: # which is only set in the first run...\n # prune all jobs,\n # then unprune this one and it's predecessors\n pruned = set(dag.nodes) # prune all...\n for job_id in set((x.job_id for x in focus_on_these_jobs)).union(new_jobs):\n _recurse_unpruning(job_id)\n else:\n # apply regular pruning\n if jobs_already_run_previously:\n pruned = jobs_already_run_previously\n else:\n pruned = set()\n for job_id in new_jobs:\n _recurse_unpruning(job_id)\n for job_id in self.jobs:\n if self.jobs[job_id]._pruned:\n _recurse_pruning(job_id, job_id)\n for job_id in pruned:\n log_job_trace(f\"pruned {job_id}\")\n try:\n dag.remove_node(job_id)\n except networkx.exception.NetworkXError: # happens with cleanup nodes that we omitted\n pass\n # del self.jobs[job_id]\n return pruned\n\n def _add_cleanup(self, dag, job):\n downstreams = [\n x\n for x in dag.neighbors(job.job_id)\n if self.jobs[x].job_kind is not JobKind.Cleanup\n ] # depending on other cleanups makes littlesense\n if not downstreams:\n # if the job has no downstreams\n # it won't run.\n log_debug(f\"{job.job_id} had no downstreams - not adding a cleanup\")\n return\n\n cleanup_job = job.cleanup_job_class(job)\n cleanup_job.job_number = self.next_job_number\n self.next_job_number += 1\n self.jobs[cleanup_job.job_id] = cleanup_job\n dag.add_node(cleanup_job.job_id)\n log_debug(f\"creating cleanup {cleanup_job.job_id}\")\n for o in cleanup_job.outputs:\n log_trace(f\"Storing cleanup oututs_to_job_ids {o} = {cleanup_job.job_id}\")\n self.outputs_to_job_ids[o] = cleanup_job.job_id\n log_trace(f\"{job.job_id} cleanup adding\")\n for downstream_job_id in downstreams:\n log_trace(f\"add downstream edge: {downstream_job_id}, {cleanup_job.job_id}\")\n\n dag.add_edge(downstream_job_id, cleanup_job.job_id)\n self.job_inputs[cleanup_job.job_id].update(\n self.jobs[downstream_job_id].outputs\n )\n return cleanup_job\n\n def _modify_dag_for_conditional_job(self, dag, job, history):\n \"\"\"A a conditional job is one that only runs if it's downstreams need it.\n Examples are DataLoadingJobs and TempFileGeneratingJobs.\n\n We prune them if they have no downstreams,\n and we add cleanups\n\n \"\"\"\n # upstreams = dag.predecessors(job.job_id)\n # todo: should just prune instead?\n # but we need to prune before this, and missing downstreams\n # might be the result of pruning...\n downstreams = list(dag.successors(job.job_id))\n if not downstreams:\n log_job_trace(f\"ommiting conditional job because of no output {job.job_id}\")\n dag.remove_node(job.job_id)\n del self.jobs[job.job_id]\n # mark it as skipped\n historical_input, historical_output = history.get(\n job.job_id, ({}, {})\n ) # todo: support renaming jobs.\n self.job_states[job.job_id] = JobStatus(\n job.job_id, self, historical_input, historical_output, \n topo_order_number=-1\n )\n # no need to do the downstream calls - this is just an ignored job\n self.job_states[job.job_id].proc_state = ProcessingStatus.Done\n self.job_states[job.job_id].outcome = JobOutcome.Skipped\n return 0\n elif job.cleanup_job_class:\n cleanup_job = self._add_cleanup(dag, job)\n return 1\n\n def modify_dag( # noqa: C901\n self,\n job_graph,\n focus_on_these_jobs,\n jobs_already_run_previously,\n history,\n dump_graphml,\n ):\n \"\"\"Modify the DAG to be executed\n by\n - splitting conditional jobs (DataLoading, TempFile)\n into one virtual job per downstream that is dependend\n on the downstreams hull (see below)\n - adding CleanupJobs, (e.g. for TempFileGeneratingJobs)\n - pruning\n - focusing on selected jobs (i.e. prune everything outside of their connected component)\n - removing jobs we ran in the last run-through\n\n \"\"\"\n # import json\n\n dag = job_graph.job_dag.copy()\n if dump_graphml:\n for node in dag.nodes():\n dag.nodes[node][\"label\"] = node\n dag.nodes[node][\"shape\"] = self.jobs[node].__class__.__name__\n networkx.readwrite.graphml.write_graphml(\n dag,\n self.job_graph.log_dir / \"graph_pre_mod.graphml\",\n named_key_ids=True,\n )\n\n pruned = self._apply_pruning(\n dag, focus_on_these_jobs, jobs_already_run_previously\n )\n\n known_job_ids = list(networkx.algorithms.dag.topological_sort(dag))\n for job_id in reversed(known_job_ids): # todo: do we need reversed\n job = self.jobs[job_id]\n if job.job_kind in (JobKind.Temp, JobKind.Loading):\n self._modify_dag_for_conditional_job(dag, job, history)\n elif job.cleanup_job_class:\n log_error(\n f\"Unconditionaly, but cleanup? {job}, {job.cleanup_job_class}\"\n )\n raise NotImplementedError(\n \"Currently only 'conditional' jobs support cleanup jobs.\"\n ) # probably easy to fix though, just call _add_cleanup_job on it?\n\n else:\n log_trace(f\"no modify dag for {job.job_id}\")\n if dump_graphml:\n for node in dag.nodes():\n dag.nodes[node][\"label\"] = node\n dag.nodes[node][\"shape\"] = self.jobs[node].__class__.__name__\n networkx.readwrite.graphml.write_graphml(\n dag,\n self.job_graph.log_dir / \"graph_post_mod.graphml\",\n named_key_ids=True,\n )\n\n return dag, pruned\n\n def run(self, last_job_states, print_failures): # noqa:C901\n \"\"\"Actually run the current DAG\"\"\"\n from . import global_pipegraph\n\n job_count = len(global_pipegraph.jobs) # track if new jobs are being created\n\n log_trace(\"Runner.__run__\")\n\n self.pid = (\n os.getpid()\n ) # so we can detect if we return inside a forked process and exit (safety net)\n self.start_time = time.time()\n self.aborted = False\n self.stopped = False\n self.print_failures = print_failures\n self.output_hashes = {}\n self.new_history = {} # what are the job outputs this time.\n self.last_job_states = last_job_states\n\n self.events = queue.Queue()\n\n todo = len(self.dag)\n log_job_trace(\"here we go\")\n for job_id in self.dag.nodes: # those are without the pruned nodes\n no_inputs = not self.job_inputs[job_id]\n # output_needed = self.jobs[job_id].output_needed(self)\n failed_last_time = self._job_failed_last_time(job_id)\n if no_inputs: # could be an initial job\n log_job_trace(\n f\"{job_id} no inputs. failed_last_time: {failed_last_time}\"\n )\n if failed_last_time:\n log_job_trace(f\"{job_id} Failing because of failure last time (1)\")\n self.job_states[job_id].failed(self.job_states[job_id].error, True)\n todo -= 1 # no need to send a message for this\n else:\n self.jobs_that_need_propagation.append(job_id)\n elif failed_last_time:\n log_job_trace(f\"{job_id} Failing because of failure last time (2)\")\n self.job_states[job_id].failed(self.job_states[job_id].error, True)\n todo -= 1 # no need to send a message for this\n log_job_trace(\n f\"Finished initial pass, jobs_that_need_propagation now filled {len(self.jobs_that_need_propagation)}\"\n )\n\n self.jobs_in_flight = []\n self.jobs_all_cores_in_flight = 0\n self._start_job_executing_threads()\n\n self.jobs_done = 0\n try:\n self._interactive_start()\n # self._interactive_report()\n while todo:\n while self.jobs_that_need_propagation:\n log_job_trace(f\"jtnp: {len(self.jobs_that_need_propagation)}\")\n # log_job_trace(f\"jtnp: {self.jobs_that_need_propagation}\")\n check_job_id = self.jobs_that_need_propagation.popleft()\n check_state = self.job_states[check_job_id]\n check_job = self.jobs[check_job_id]\n should_run_before = check_state.should_run\n validation_before = check_state.validation_state\n new = check_state.update()\n if new is None:\n raise ValueError(\"none return\")\n log_job_trace(f\"New for checking {new}\")\n self.jobs_that_need_propagation.extend(new)\n log_job_trace(\n f\"{check_job_id}: State: {check_state.proc_state} {check_state.should_run}\"\n )\n if check_state.proc_state is ProcessingStatus.ReadyToRun:\n # this job has transitioned into ReadyToRun .\n if check_state.should_run is ShouldRun.No:\n check_state.skipped()\n self._push_event(\n \"JobSkipped\", (check_job_id,)\n ) # for accounting\n # tell teh upstreams - we won't receive the other one.\n for upstream_job_id in check_state.upstreams:\n self.jobs_that_need_propagation.append(upstream_job_id)\n elif check_state.should_run is ShouldRun.Yes:\n check_state.proc_state = ProcessingStatus.Schedulded\n if (\n check_state.outcome is JobOutcome.Failed\n ): # internal failuer\n self._push_event(\n \"JobFailed\",\n (\n check_job_id,\n exceptions.JobEvaluationFailed(\n \"output_needed raised an exception\",\n check_state.error,\n ),\n ),\n ) # which will in turn upstream fail all downstreams\n else:\n #log_job_trace(\n #f\"{check_job_id} priority {check_job.que_priority}\"\n #)\n self.jobs_to_run_que.put(\n (check_job.que_priority, check_job_id)\n )\n elif check_state.proc_state is ProcessingStatus.Schedulded:\n pass\n elif (check_state.validation_state != validation_before) or (\n check_state.should_run != should_run_before\n ):\n log_job_trace(\n f\"validation changed. Tell upstreams {check_job_id}\"\n )\n # we have just changed the validation state\n # and the upstreams might care about that to decide\n # whether they need to run\n for upstream_job_id in check_state.upstreams:\n if (\n self.jobs[upstream_job_id].is_conditional()\n and not self.job_states[\n upstream_job_id\n ].should_run.is_decided()\n ):\n self.jobs_that_need_propagation.append(upstream_job_id)\n\n try:\n ev = self.events.get(timeout=self.event_timeout)\n if ev[0] == \"AbortRun\":\n log_info(\"Aborting/Stopping run as requested\")\n todo = 0\n # self.stopped = True no need to tell the threads, we KeyboardInterrupt them.\n break\n except queue.Empty:\n # long time, no event.\n if not self.jobs_in_flight:\n log_error(\n f\"Coding error lead to que empty with no jobs in flight? todo: {todo}, {len(self.jobs_that_need_propagation)}\"\n )\n # ok, a coding error has lead to us not finishing\n # the todo graph.\n for job_id in self.job_states:\n if self.job_states[job_id].proc_state != ProcessingStatus.Done:\n log_warning(\n f\"{job_id}, {self.job_states[job_id].proc_state} {self.jobs[job_id].depth}\"\n )\n raise exceptions.RunFailedInternally\n continue\n\n log_job_trace(\n f\"<-handle {ev[0]} {escape_logging(ev[1][0])}, todo: {todo}\"\n )\n d = self._handle_event(ev)\n todo += d\n self.jobs_done -= d\n self._interactive_report()\n log_trace(f\"<-done - todo: {todo}\")\n\n if not self.aborted:\n while self.jobs_in_flight and not self.aborted:\n try:\n ev = self.events.get(0.1)\n except queue.Empty: # pragma: no cover\n break\n else:\n # log_trace(f\"<-handle {ev[0]} {escape_logging(ev[1][0])}\")\n self._handle_event(ev)\n # once more for good measure...\n while True:\n try:\n ev = self.events.get_nowait()\n except queue.Empty:\n break\n else:\n # log_trace(f\"<-handle {ev[0]} {escape_logging(ev[1][0])}\")\n self._handle_event(ev)\n\n if (\n self.aborted\n ): # it might have gotten set by an 'abort' following a stop in the meantim!\n # log_job_tarce(f\"No of threads when aborting {len(self.threads)}\")\n for t in self.threads:\n log_job_trace(\n f\"Asking thread {t.ident} to terminate at next Python call {time.time() - self.abort_time}\"\n )\n try:\n async_raise(t.ident, KeyboardInterrupt)\n except ValueError:\n pass\n\n finally:\n # log_job_trace(\"Joining threads\")\n for t in self.threads:\n self.jobs_to_run_que.put((0, ExitNow))\n for t in self.threads:\n t.join()\n # log_job_trace(\"Joined threads\")\n # now capture straglers\n # todo: replace this with something guranteed to work.\n while True:\n try:\n ev = self.events.get_nowait()\n except queue.Empty:\n break\n else:\n log_trace(f\"<-handle {ev[0]} {escape_logging(ev[1][0])}\")\n self._handle_event(ev)\n\n if hasattr(self, \"_status\"):\n self._status.stop()\n log_info(\"interactive stop\")\n self._interactive_stop()\n\n for job_id in self.pruned:\n ljt(f\"Logging as pruned {job_id}\")\n assert not job_id in self.job_states\n self.job_states[job_id] = JobStatus(job_id, self, None, None, topo_order_number=-1)\n self.job_states[job_id].was_pruned()\n\n if len(global_pipegraph.jobs) != job_count and not self.aborted:\n log_info(\n f\"created new jobs. _RunAgain issued {len(global_pipegraph.jobs)} != {job_count}\"\n )\n for job_id in global_pipegraph.jobs:\n if job_id not in self.jobs:\n log_job_trace(f\"new job {job_id}\")\n raise _RunAgain(self.job_states)\n log_trace(\"Left runner.run()\")\n\n return self.job_states\n\n def _interactive_start(self):\n \"\"\"Activate the interactive thread\"\"\"\n if self.job_graph.run_mode is RunMode.CONSOLE:\n self.interactive = ConsoleInteractive()\n self.last_status_time = time.time()\n self.interactive.start(self)\n\n def _interactive_stop(self):\n \"\"\"Stop the interactive thread (if present)\"\"\"\n if hasattr(self, \"interactive\"):\n self.interactive.stop()\n\n def _interactive_report(self):\n if hasattr(self, \"interactive\"):\n t = time.time()\n if (\n t - self.last_status_time >= 0.5\n ): # don't update more than every half second.\n waiting = len(\n [\n x\n for x in self.jobs_in_flight\n if getattr(self.jobs[x], \"waiting\", False)\n ]\n )\n self.interactive.report_status(\n StatusReport(\n len(self.jobs_in_flight) - waiting,\n waiting,\n self.jobs_done,\n len(self.dag),\n self.fail_counter,\n )\n )\n self.last_status_time = t\n\n def abort(self):\n \"\"\"Kill all running jobs and leave runner.\n Called from the interactive interface\n \"\"\"\n self.abort_time = time.time()\n self.aborted = True\n self._push_event(\"AbortRun\", (False,))\n\n def stop(self):\n \"\"\"Leave runner after current jobs\n Called from the interactive interface\n\n \"\"\"\n self.stopped = True\n self.abort_time = time.time()\n self._push_event(\"AbortRun\", (False,))\n\n def _handle_event(self, event):\n \"\"\"A job came back\"\"\"\n todo = 0\n log_job_trace(f\"received event {escape_logging(event)}\")\n if event[0] == \"JobSuccess\":\n self._handle_job_success(*event[1])\n todo -= 1\n elif event[0] == \"JobSkipped\":\n # self._handle_job_skipped(*event[1])\n todo -= 1\n elif event[0] == \"JobFailed\":\n self.fail_counter += 1\n self._handle_job_failed(*event[1])\n todo -= 1\n elif event[0] == \"JobUpstreamFailed\":\n todo -= 1\n elif event[0] == \"AbortRun\":\n todo = 0\n else: # pragma: no cover # defensive\n raise NotImplementedError(event[0])\n return todo\n\n # def _handle_job_skipped(self, job_id):\n # self.job_states[job_id].skipped()\n\n def _handle_job_success(self, job_id, job_outputs):\n \"\"\"A job was done correctly. Record it's outputs,\n decide on downstreams\"\"\"\n job = self.jobs[job_id]\n job_state = self.job_states[job_id]\n msg = f\"Done in {job_state.run_time:.2f}s {job_id}\"\n if job.run_time >= self.job_graph.report_done_filter:\n if job.job_kind in (\n JobKind.Temp,\n JobKind.Output,\n JobKind.JobGenerating,\n JobKind.Loading,\n ):\n log_info(msg)\n else:\n log_debug(msg)\n pass\n else:\n # this appears to be a dramatic slowdown. (factor 2!\n # log_debug(f\"Done in {job_state.run_time:.2}s {job_id}\")\n log_job_trace(f\"{job_id} success\")\n pass\n # record our success\n # or failure if thue job did not do what it said on the tin.\n # log_trace(f\"\\t{escape_logging(str(job_outputs)[:500])}...\")\n if set(job_outputs.keys()) != set(job.outputs):\n log_trace(\n f\"\\t{job_id} returned the wrong set of outputs. \"\n f\"Should be {escape_logging(str(set(job.outputs)))}, was {escape_logging(str(set(job_outputs.keys())))}\"\n )\n job_state.failed(\n exceptions.JobContractError(\n f\"\\t{job_id} returned the wrong set of outputs. \"\n f\"Should be {escape_logging(str(set(job.outputs)))}, was {escape_logging(str(set(job_outputs.keys())))}\"\n )\n )\n log_error(job_state.error)\n else:\n for name, hash in job_outputs.items():\n log_trace(f\"\\tCapturing hash for {name} {escape_logging(hash)}\")\n self.output_hashes[name] = hash\n job_state.succeeded(job_outputs)\n\n def _handle_job_failed(self, job_id, error):\n \"\"\"A job did not succeed (wrong output, no output, exception...0, - log the error, fail all downstreams\"\"\"\n log_job_trace(f\"{job_id} failed\")\n job = self.jobs[job_id]\n job_state = self.job_states[job_id]\n job_state.failed(error)\n\n # log_error(f\"Failed {job_id}\")\n if self.print_failures:\n log = log_error\n else:\n log = log_job_trace\n if not self._job_failed_last_time(job_id):\n try:\n # mock failure in case of abort/stop\n if isinstance(job_state.error.args[0], exceptions.JobCanceled):\n if self.aborted or self.stopped:\n return\n else:\n raise NotImplementedError(\n \"JobCanceled outside of stopped/aborted state?!\"\n )\n # log error to file. Todo: move to job_state\n if hasattr(job_state.error.args[1], \"stacks\"):\n stacks = job_state.error.args[1]\n else:\n stacks = None\n if self.job_graph.error_dir is not None:\n error_file = (\n self.job_graph.error_dir\n / self.job_graph.time_str\n / (str(job.job_number) + \"_exception.txt\")\n )\n with open(error_file, \"w\") as ef:\n ef.write(f\"JobId: {job_id}\\n\")\n ef.write(f\"Class: {job.__class__.__name__}\\n\")\n ef.write(\"Input jobs:\\n\")\n for parent_id in sorted(self.dag.predecessors(job_id)):\n ef.write(\n f\"\\t{parent_id} ({self.jobs[parent_id].__class__.__name__})\\n\"\n )\n ef.write(\"\\n\\n\")\n if stacks is not None:\n ef.write(\n stacks._format_rich_traceback_fallback(\n include_locals=True, include_formating=False\n )\n )\n else:\n ef.write(str(job_state.error))\n ef.write(\"no stack available\")\n if hasattr(job, \"stdout\"):\n ef.write(\"\\n\\n\")\n ef.write(\"job stdout:\\n\")\n ef.write(str(job.stdout))\n else:\n ef.write(\"\\n\\nstdout: not available\\n\")\n if hasattr(job, \"stderr\"):\n ef.write(\"\\n\\n\")\n ef.write(\"job stderr:\\n\")\n ef.write(str(job.stderr))\n else:\n ef.write(\"\\n\\nstderr: not available\\n\")\n ef.flush()\n\n log(\n f\"Failed after {job_state.run_time:.2}s: {job_id}. Exception (incl. locals, stdout and stderr) logged to {error_file}\"\n )\n else:\n log(f\"Failed job: {job_id}\")\n if stacks is not None:\n log(escape_logging(stacks._format_rich_traceback_fallback(False)))\n else:\n log(job_state.error)\n log(\"no stack available\")\n\n except Exception as e:\n log_error(\n f\"An exception ocurred reporting on a job failure for {job_id}: {e}. The original job failure has been swallowed.\"\n )\n else:\n raise ValueError(\"Did not expect this\")\n\n def _job_failed_last_time(self, job_id) -> bool:\n \"\"\"Did this job fail last time?\"\"\"\n res = (\n self.last_job_states\n and job_id in self.last_job_states\n and self.last_job_states[job_id].outcome == JobOutcome.Failed\n )\n log_trace(f\"_job_failed_last_time: {job_id}: {res}\")\n return res\n\n def _push_event(self, event, args, indent=0):\n \"\"\"Push an event to be handled by the control thread\"\"\"\n with self.event_lock:\n log_trace(\"\\t\" * indent + f\"->push {event} {args[0]}\")\n self.events.put((event, args))\n\n def _start_job_executing_threads(self):\n \"\"\"Fire up the default number of threads\"\"\"\n for ii in range(self.job_graph.cores):\n self._start_another_thread()\n\n def _start_another_thread(self):\n \"\"\"Fire up another thread (if all current threads are blocked with multi core threads.\n\n This prevents stalling, since it will ensure that there's a thread around\n to do the SingleCore jobs.\n\n Note that we don't fire up threads without limit - at one point, you can still\n stall the graph\n \"\"\"\n if self.stopped or self.aborted:\n return\n t = Thread(target=self._executing_thread)\n self.threads.append(t)\n t.start()\n\n def _executing_thread(self):\n \"\"\"The inner function of the threads actually executing the jobs\"\"\"\n cwd = (\n os.getcwd()\n ) # so we can detect if the job cahnges the cwd (don't do that!)\n job_id = None\n try:\n while not self.stopped:\n _que_priority, job_id = self.jobs_to_run_que.get()\n self.jobs_in_flight.append(job_id)\n #log_job_trace(f\"Executing thread, got {job_id}\")\n if job_id is ExitNow:\n break\n job = self.jobs[job_id]\n job.waiting = True\n job.actual_cores_needed = -1\n job_state = self.job_states[job_id]\n self._interactive_report()\n event = None\n try:\n job.start_time = (\n time.time()\n ) # assign it just in case anything fails before acquiring the lock\n job.stop_time = float(\"nan\")\n job.run_time = float(\"nan\")\n\n c = job.resources.to_number(self.core_lock.max_cores)\n job.actual_cores_needed = c\n log_trace(\n f\"{job_id} cores: {c}, max: {self.core_lock.max_cores}, jobs_in_flight: {len(self.jobs_in_flight)}, all_cores_in_flight: {self.jobs_all_cores_in_flight}, threads: {len(self.threads)}\"\n )\n if c > 1:\n # we could stall all SingleCores/RunsHere by having all_cores blocking all but one thread (which executes another all_core).\n # if we detect that situation, we spawn another one.\n self.jobs_all_cores_in_flight += 1\n if (\n self.jobs_all_cores_in_flight >= len(self.threads)\n and len(self.threads)\n <= self.job_graph.cores\n * 5 # at one point, we either have to let threads die again, or live with\n # the wasted time b y stalling.\n ):\n log_trace(\n \"All threads blocked by Multi core jobs - starting another one\"\n )\n self._start_another_thread()\n\n log_trace(f\"wait for {job_id}\")\n if c == 0:\n log_error(f\"Cores was 0! {job.job_id} {job.resources}\")\n with self.core_lock.using(c):\n if self.stopped or self.aborted:\n # log_job_trace(f\"aborted waiting {job_id} -> skip\")\n event = (\"JobSkipped\", (job_id,)) # for accounting\n # self._push_event(\"JobFailed\", (job_id, exceptions.JobError(exceptions.JobCanceled(), None)))\n continue # -> while not stopped -> break\n job.start_time = time.time() # the *actual* start time\n job.waiting = False\n self._interactive_report()\n log_trace(f\"Go {job_id}\")\n log_trace(f\"\\tExecuting {job_id}\")\n\n try:\n outputs = job.run(self, job_state.historical_output)\n finally:\n # we still check the cwd, even if the job failed!\n if os.getcwd() != cwd:\n os.chdir(\n cwd\n ) # restore and hope we can recover enough to actually print the exception, I suppose.\n log_error(\n f\"{job_id} changed current_working_directory. Since ppg2 is multithreaded, you must not do this in jobs that RunHere\"\n )\n raise exceptions.JobContractError(\n f\"{job_id} changed current_working_directory. Since ppg2 is multithreaded, you must not do this in jobs that RunHere\"\n )\n # log_job_trace(f\"pushing success {job_id}\")\n event = (\"JobSuccess\", (job_id, outputs))\n except SystemExit as e: # pragma: no cover - happens in spawned process, and we don't get coverage logging for it thanks to os._exit\n log_trace(\n \"SystemExit in spawned process -> converting to hard exit\"\n )\n if os.getpid() != self.pid:\n os._exit(e.args[0])\n except Exception as e:\n if isinstance(e, KeyboardInterrupt): # happens on abort\n\n raise\n elif isinstance(e, exceptions.JobError):\n pass # take it at face value\n else:\n exception_type, exception_value, tb = sys.exc_info()\n captured_tb = ppg_traceback.Trace(\n exception_type, exception_value, tb\n )\n e = exceptions.JobError(\n e,\n captured_tb,\n )\n event = (\"JobFailed\", (job_id, e))\n finally:\n job.stop_time = time.time()\n job.run_time = job.stop_time - job.start_time\n self.job_states[job_id].run_time = job.run_time\n log_trace(f\"end {job_id}\")\n self.jobs_in_flight.remove(job_id)\n if c > 1:\n self.jobs_all_cores_in_flight -= 1\n if event is not None:\n self._push_event(*event)\n # log_trace(f\"Leaving thread for {job_id}\")\n except (KeyboardInterrupt, SystemExit): # happens on abort\n log_trace(f\"Keyboard Interrupt received {time.time() - self.abort_time}\")\n pass\n except Exception as e:\n log_error(\n f\"Captured exception outside of loop - should not happen {type(e)} {str(e)}. Check error log\"\n )\n # log_job_trace(f\"left thread {len(self.threads)} {job_id}\")\n\n\nclass JobCollector:\n \"\"\"only in place during the dag modification step of Runner.__init__,\n so that the jobs that are only created during run (cleanup, )\n do not end up in the actual graph.\n \"\"\"\n\n def __init__(self, run_mode):\n self.clear()\n self.run_mode = run_mode\n\n def add(self, job):\n self.jobs[job] = job\n\n def clear(self):\n self.jobs = {}\n self.edges = set()\n", "id": "5614985", "language": "Python", "matching_score": 7.048020362854004, "max_stars_count": 0, "path": "src/pypipegraph2/runner.py" }, { "content": "from typing import Optional, Union, Dict\nimport gzip\nimport threading\nimport logging\nimport shutil\nimport collections\nimport os\nimport sys\nimport pickle\nimport signal\nimport networkx\nimport subprocess\nimport time\nimport datetime\nfrom pathlib import Path\nfrom loguru import logger\n\nfrom . import exceptions\nfrom .runner import Runner, JobOutcome\nfrom .util import CPUs, console\nfrom .enums import RunMode\nfrom .exceptions import JobsFailed, _RunAgain\nfrom .util import log_info, log_error, log_warning, log_debug, log_trace\nfrom . import util\nfrom rich.logging import RichHandler\nfrom rich.console import Console\n\n\nlogger.level(\"JT\", no=6, color=\"<yellow>\", icon=\"🐍\")\nlogger.level(\"INFO\", color=\"\")\n# if \"pytest\" in sys.modules: # pragma: no branch\n# log_out = sys.stderr\n# else: # pragma: no cover\n# log_out = RichHandler(markup=True, console=console)\n# logger.add(\n# sink=log_out,\n# format=\"{elapsed} {message}\",\n# level=logging.INFO,\n# )\n\ntime_format = \"%Y-%m-%d_%H-%M-%S\"\n\nstart_cwd = Path(\".\").absolute()\n\n\nclass ALL_CORES:\n pass\n\n\nclass PyPipeGraph:\n history_dir: Optional[Path]\n log_dir: Optional[Path]\n log_level: int\n running: bool\n\n def __init__(\n self,\n cores: Union[int, ALL_CORES],\n log_dir: Optional[Path],\n error_dir: Optional[Path],\n history_dir: Path,\n run_dir: Path,\n cache_dir: Path,\n log_level: int,\n run_mode: RunMode,\n paths: Optional[Dict[str, Union[Path, str]]] = None,\n allow_short_filenames=False,\n log_retention=None,\n prevent_absolute_paths=True,\n report_done_filter=1,\n ):\n\n if cores is ALL_CORES:\n self.cores = CPUs()\n else:\n self.cores = int(cores)\n if log_dir:\n self.log_dir = Path(log_dir)\n else:\n self.log_dir = None\n if error_dir:\n self.error_dir = error_dir\n else:\n self.error_dir = None\n self.history_dir = Path(history_dir)\n self.run_dir = Path(run_dir)\n self.log_level = log_level\n self.log_retention = log_retention\n # self.paths = {k: Path(v) for (k, v) in paths} if paths else {}\n self.run_mode = run_mode\n self.jobs = {} # the job objects, by id\n self.job_dag = (\n networkx.DiGraph()\n ) # a graph. Nodes: job_ids, edges -> must be done before\n self.job_inputs = collections.defaultdict(\n set\n ) # necessary inputs (ie. outputs of other jobs)\n self.outputs_to_job_ids = (\n {}\n ) # so we can find the job that generates an output: todo: should be outputs_to_job_id or?\n self.run_id = 0\n self.allow_short_filenames = allow_short_filenames\n if cache_dir:\n self.cache_dir = Path(cache_dir)\n self.cache_dir.mkdir(exist_ok=True, parents=True)\n else:\n self.cache_dir = None\n self.cache_folder = self.cache_dir # todo: change all occurances?\n self.running = False\n self.prevent_absolute_paths = prevent_absolute_paths\n self._debug_allow_ctrl_c = False # see examples/abort_when_stalled.py\n self.next_job_number = 0\n self.next_job_number_lock = threading.Lock()\n self._path_cache = {}\n self.report_done_filter = report_done_filter\n self.func_cache = {}\n self.dir_absolute = Path(\".\").absolute()\n self._jobs_do_dump_subgraph_debug = False\n\n def run(\n self,\n print_failures: bool = True,\n raise_on_job_error=True,\n event_timeout=5,\n dump_graphml=False,\n ) -> Dict[str, JobOutcome]:\n \"\"\"Run the complete pypipegraph\"\"\"\n try:\n return self._run(\n print_failures,\n raise_on_job_error,\n event_timeout,\n None,\n dump_graphml=dump_graphml,\n )\n except JobsFailed as e: # shorten the traceback considerably!\n raise JobsFailed(e.args[0], exceptions=e.exceptions)\n\n def _run(\n self,\n print_failures: bool = True,\n raise_on_job_error=True,\n event_timeout=5,\n focus_on_these_jobs=None,\n dump_graphml=False,\n ) -> Dict[str, JobOutcome]:\n \"\"\"Run the jobgraph - possibly focusing on a subset of jobs (ie. ignoring\n anything that's not necessary to calculate them - activated by calling a Job\n \"\"\"\n ts = str(\n time.time()\n ) # include subsecond in log names - usefull for the testing, I suppose.\n ts = ts[ts.rfind(\".\") :]\n self.time_str = datetime.datetime.now().strftime(time_format) + ts\n if not networkx.algorithms.is_directed_acyclic_graph(self.job_dag):\n print(networkx.readwrite.json_graph.node_link_data(self.job_dag))\n raise exceptions.NotADag()\n else:\n # print(networkx.readwrite.json_graph.node_link_data(self.job_dag))\n pass\n start_time = time.time()\n self._resolve_dependency_callbacks()\n self.running = True # must happen after dependency callbacks\n if self.error_dir:\n self._cleanup_errors()\n (self.error_dir / self.time_str).mkdir(exist_ok=True, parents=True)\n self._link_errors()\n log_id = None\n if self.log_dir:\n self._cleanup_logs()\n self.log_dir.mkdir(exist_ok=True, parents=True)\n fn = Path(sys.argv[0]).name\n self.log_file = self.log_dir / f\"{fn}-{self.time_str}.log\"\n logger.remove() # no default logging\n logger.add(\n open(self.log_file, \"w\"), level=min(self.log_level, logging.DEBUG)\n )\n if False:\n logger.add(\n RichHandler(\n markup=False,\n console=Console(\n file=open(self.log_dir / f\"{fn}-{self.time_str}.log\", \"w\"),\n width=120, #\n ),\n ),\n level=self.log_level,\n )\n # if \"pytest\" in sys.modules: # pragma: no branch\n log_id = logger.add(\n sink=sys.stdout,\n level=logging.INFO\n if not util.do_jobtrace_log\n else 6, # don't spam stdout\n format=(\n \"\\r <blue>{elapsed}s</blue> <bold>|</bold> <level>{message}</level>\"\n if not util.do_jobtrace_log\n else \"<blue>{elapsed}s</blue> | <level>{level.icon}</level> <bold>|</bold>{file:8.8}:{line:4} <level>{message}</level>\"\n ),\n ) # pragma: no cover\n\n self._link_logs()\n\n log_info(\n f\"Run is go {threading.get_ident()} pid: {os.getpid()}, run_id {self.run_id}, log_level = {self.log_level}\"\n )\n self.history_dir.mkdir(exist_ok=True, parents=True)\n self.run_dir.mkdir(exist_ok=True, parents=True)\n self.do_raise = []\n self._restart_afterwards = False\n ok = False\n try:\n result = None\n self._install_signals()\n history = self._load_history()\n max_runs = 5\n jobs_already_run = set()\n final_result = {}\n aborted = False\n while True:\n max_runs -= 1\n if max_runs == 0: # pragma: no cover\n raise ValueError(\n \"Maximum graph-generating-jobs recursion depth exceeded\"\n )\n do_break = False\n job_count = len(self.job_dag)\n try:\n self.runner = Runner(\n self,\n history,\n event_timeout,\n focus_on_these_jobs,\n jobs_already_run,\n dump_graphml,\n self.run_id,\n self._jobs_do_dump_subgraph_debug,\n )\n result = self.runner.run(result, print_failures=print_failures)\n aborted = self.runner.aborted\n del self.runner\n self.run_id += 1\n do_break = True\n except _RunAgain as e:\n log_info(\"Jobs created - running again\")\n result = e.args[0]\n self._update_history(result, history)\n self._log_runtimes(result, start_time)\n # assert len(result) == job_count # does not account for cleanup jobs...\n # leave out the cleanup jobs added virtually by the run\n jobs_already_run.update((k for k in result.keys() if k in self.jobs))\n for k, v in result.items():\n if (\n not k in final_result\n or final_result[k].outcome != JobOutcome.Failed\n ):\n final_result[k] = v\n # final_result.update(result)\n if do_break:\n break\n # final_result.update(result)\n del result\n for job_id, job_state in final_result.items():\n if job_state.outcome == JobOutcome.Failed:\n self.do_raise.append(job_state.error)\n self.last_run_result = final_result\n if raise_on_job_error and self.do_raise and not self._restart_afterwards:\n raise exceptions.JobsFailed(\"At least one job failed\", self.do_raise)\n if aborted:\n raise KeyboardInterrupt(\"Run aborted\")\n ok = True\n return final_result\n finally:\n if ok:\n log_info(\"Run is done\")\n else:\n log_info(\"Run is done - with failure\")\n log_info(\"\")\n if print_failures:\n self._print_failures()\n self._restore_signals()\n if log_id is not None:\n logger.remove(log_id)\n self.running = False\n if (\n self._restart_afterwards\n ): # pragma: no cover - todo: test with interactive\n log_info(\"Restart again issued - restarting via subprocess.check_call\")\n subprocess.check_call([sys.executable] + sys.argv, cwd=start_cwd)\n\n def run_for_these(self, jobs):\n \"\"\"Run graph for just these jobs (and their upstreams), ignoring everything else\"\"\"\n if not isinstance(jobs, list):\n jobs = [jobs]\n return self._run(\n print_failures=True, raise_on_job_error=True, focus_on_these_jobs=jobs\n )\n\n def _cleanup_logs(self):\n \"\"\"Clean up old logs and drop a 'latest' symlink\"\"\"\n if not self.log_dir or self.log_retention is None: # pragma: no cover\n return\n fn = Path(sys.argv[0]).name\n pattern = f\"{fn}-*.log\"\n files = sorted(self.log_dir.glob(pattern))\n if len(files) > self.log_retention:\n remove = files[: -self.log_retention]\n for f in remove:\n os.unlink(f)\n\n def _link_latest(self, dir, pattern, latest_name, target_is_directory):\n link_name = dir / latest_name\n if link_name.exists() or link_name.is_symlink():\n # print(\"unlinking\", link_name)\n link_name.unlink()\n # else:\n # print(\"not found\", link_name)\n\n files = sorted(dir.glob(pattern))\n if files:\n link_name.symlink_to(\n files[-1].name, target_is_directory=target_is_directory\n )\n\n def _link_logs(self):\n fn = Path(sys.argv[0]).name\n self._link_latest(self.log_dir, f\"{fn}-*.log\", \"latest\", False)\n\n def _cleanup_errors(self):\n \"\"\"Cleanup old errors and drop a 'latest' symlink\"\"\"\n if not self.error_dir or self.log_retention is None: # pragma: no cover\n return\n err_dirs = sorted(\n [\n x\n for x in (self.error_dir / self.time_str).parent.glob(\"*\")\n if x.is_dir() and not x.is_symlink()\n ]\n )\n if len(err_dirs) > self.log_retention:\n remove = err_dirs[: -self.log_retention]\n for f in remove:\n shutil.rmtree(f)\n\n def _link_errors(self):\n self._link_latest(self.error_dir, f\"*\", \"latest\", True)\n\n def _update_history(self, job_results, history):\n \"\"\"Merge history from previous and this run\"\"\"\n # we must keep the history of jobs unseen in this run.\n # to to allow partial runs\n org_history = history.copy()\n new_history = (\n history # .copy() don't copy. we reuse this in the subsequent runs\n )\n new_history.update(\n {\n job_id: (jr.updated_input, jr.updated_output,)\n for job_id, jr in job_results.items()\n if jr.outcome is not JobOutcome.Pruned\n }\n )\n done = False\n while not done:\n try:\n if new_history != org_history:\n self._save_history(new_history)\n else:\n log_info(\"Skipped saving history - unchanged\")\n done = True\n except KeyboardInterrupt as e:\n self.do_raise.append(e)\n pass\n\n def _log_runtimes(self, job_results, run_start_time):\n \"\"\"Log the runtimes to a file (ever growing. But only runtimes over a threshold)\"\"\"\n if self.log_dir:\n rt_file = self.log_dir / \"runtimes.tsv\"\n lines = []\n if not rt_file.exists():\n lines.append(\"jobid\\trun_start_time\\truntime_s\")\n for job_id, job_result in job_results.items(): # pragma: no branch\n if job_result.outcome is JobOutcome.Success:\n if job_result.run_time >= 1:\n lines.append(\n f\"{job_id}\\t{int(run_start_time)}\\t{job_result.run_time:.2f}\"\n )\n with open(rt_file, \"a+\") as op:\n op.write(\"\\n\".join(lines))\n\n def get_history_filename(self):\n \"\"\"where do we store the graph's history?\"\"\"\n # we by default share the history file\n # if it's the same history dir, it's the same project\n # and you'd retrigger the calculations too often otherwise\n return self.history_dir / \"ppg_history.gz\" # don't end on .py\n\n def _load_history(self):\n log_trace(\"_load_history\")\n fn = self.get_history_filename()\n history = {}\n self.invariant_loading_issues = set()\n if fn.exists():\n log_trace(\"Historical existed\")\n try:\n with gzip.GzipFile(fn, \"rb\") as op:\n try:\n counter = 0\n while True:\n try:\n # log_trace(f\"History read {counter}\")\n counter += 1\n job_id = None\n job_id = pickle.load(op)\n # log_trace(f\"read job_id {job_id}\")\n inputs_and_outputs = pickle.load(op)\n history[job_id] = inputs_and_outputs\n except (TypeError, pickle.UnpicklingError) as e:\n # log_trace(f\"unpickleing error {e}\")\n if job_id is None:\n raise exceptions.JobsFailed(\n \"Could not depickle job id - history file is borked beyond automatic recovery\",\n [],\n )\n else:\n msg = (\n f\"Could not depickle invariant for {job_id} - \"\n \"check code for depickling bugs. \"\n \"Job will rerun, probably until the (de)pickling bug is fixed.\"\n f\"\\n Exception: {e}\"\n )\n self.do_raise.append(ValueError(msg))\n self.invariant_loading_issues.add(job_id)\n # use pickle tools to read the pickles op codes until\n # the end of the current pickle, hopefully allowing decoding of the next one\n # of course if the actual on disk file is messed up beyond this,\n # we're done for.\n import pickletools\n\n try:\n list(pickletools.genops(op))\n except Exception as e:\n raise exceptions.JobsFailed(\n \"Could not depickle invariants - \"\n f\"depickling of {job_id} failed, could not skip to next pickled dataset\"\n f\" Exception was {e}\",\n [],\n )\n\n except EOFError:\n pass\n except exceptions.JobsFailed:\n raise\n # except Exception as e: coverage indicates this never runs.\n # raise exceptions.FatalGraphException( # that's pretty terminal\n # \"Could not load history data\", e, fn.absolute()\n # )\n\n log_info(f\"Loaded {len(history)} history entries\")\n return history\n\n def _save_history(self, historical):\n log_trace(\"_save_history\")\n fn = self.get_history_filename()\n if Path(fn).exists():\n fn.rename(fn.with_suffix(fn.suffix + \".backup\"))\n raise_keyboard_interrupt = False\n raise_run_failed_internally = False\n with gzip.GzipFile(fn, \"wb\") as op:\n # robust history saving.\n # for KeyboardInterrupt, write again\n # for other exceptions: skip job\n for job_id, input_and_output_hashes in historical.items():\n try_again = True\n while try_again:\n try_again = False\n try:\n a = pickle.dumps(\n job_id, pickle.HIGHEST_PROTOCOL\n ) + pickle.dumps(\n input_and_output_hashes, pickle.HIGHEST_PROTOCOL\n )\n op.write(a)\n except KeyboardInterrupt:\n try_again = True\n raise_keyboard_interrupt = True\n except Exception as e:\n log_error(f\"Could not pickle state for {job_id} - {e}\")\n raise_run_failed_internally = (job_id, e)\n if raise_run_failed_internally:\n job_id, exc = raise_run_failed_internally\n raise exceptions.RunFailedInternally(\n f\"Pickling of {job_id} inputs/outputs failed.\", exc\n )\n if raise_keyboard_interrupt:\n log_error(\"Keyboard interrupt\")\n raise KeyboardInterrupt()\n\n def _resolve_dependency_callbacks(self):\n \"\"\"jobs may depend on functions that return their actual dependencies.\n This resolves them\n \"\"\"\n # we need this copy,\n # for the callbacks may create jobs\n # so we can't simply iterate over the jobs.values()\n with_callback = [j for j in self.jobs.values() if j.dependency_callbacks]\n # log_info(f\"with callbacks {[j.job_id for j in with_callback]}\")\n if not with_callback:\n return\n for j in with_callback:\n dc = j.dependency_callbacks\n j.dependency_callbacks = (\n []\n ) # must reset before run, might add new ones, right?\n for c in dc:\n # log_info(f\"{j.job_id}, {c}\")\n j.depends_on(c())\n self._resolve_dependency_callbacks() # nested?\n\n def _print_failures(self):\n log_trace(\"print_failures\")\n # TODO - actually, we kind of already do that inline, don't we.\n\n def _install_signals(self):\n \"\"\"make sure we don't crash just because the user logged of.\n Also blocks CTRl-c in console, and transaltes into save shutdown otherwise.\n \"\"\"\n log_trace(\"_install_signals\")\n\n def hup(*args, **kwargs): # pragma: no cover\n log_warning(\"user logged off - continuing run\")\n\n def sigint(*args, **kwargs):\n if self.run_mode is (RunMode.CONSOLE):\n if self._debug_allow_ctrl_c == \"abort\":\n log_info(\"CTRL-C from debug - calling interactive abort\")\n self.runner.interactive._cmd_abort(\n None\n ) # for testing the abort facility.\n elif self._debug_allow_ctrl_c == \"stop\":\n log_info(\"CTRL-C from debug - calling interactive stop\")\n self.runner.interactive._cmd_default()\n self.runner.interactive._cmd_stop(\n None\n ) # for testing the abort facility.\n elif self._debug_allow_ctrl_c == \"stop&abort\":\n log_info(\"CTRL-C from debug - calling interactive stop\")\n self.runner.interactive._cmd_stop(\n None\n ) # for testing the abort facility.\n self._debug_allow_ctrl_c = \"abort\"\n\n else:\n log_info(\"CTRL-C has been disabled. Type 'abort<CR>' to abort\")\n # TODO remove\n else: # pragma: no cover - todo: interactive\n log_info(\"CTRL-C received. Killing all running jobs.\")\n if hasattr(self, \"runner\"):\n print(\"calling abort\")\n self.runner.abort()\n\n if self.run_mode is (RunMode.CONSOLE):\n self._old_signal_hup = signal.signal(signal.SIGHUP, hup)\n # if self.run_mode in (RunMode.CONSOLE, RunMode.NOTEBOOK):\n # we always steal ctrl c\n self._old_signal_int = signal.signal(signal.SIGINT, sigint)\n\n def _restore_signals(self):\n \"\"\"Restore signals to pre-run values\"\"\"\n log_trace(\"_restore_signals\")\n if hasattr(self, \"_old_signal_hup\"): # pragma: no branch\n signal.signal(signal.SIGHUP, self._old_signal_hup)\n if hasattr(self, \"_old_signal_int\"): # pragma: no branch\n signal.signal(signal.SIGINT, self._old_signal_int)\n\n def add(self, job):\n \"\"\"Add a job.\n Automatically called when a Job() is created\n \"\"\"\n\n for output in job.outputs:\n if output in self.outputs_to_job_ids:\n # already being done somewhere else\n if self.outputs_to_job_ids[output] == job.job_id:\n # but it is in essence the same same job\n pass # we replace the job, keeping upstreams/downstream edges\n else:\n # if self.run_mode != RunMode.NOTEBOOK: todo: accept in notebooks by removing the other jobs and warning.\n raise exceptions.JobOutputConflict(\n job, self.jobs[self.outputs_to_job_ids[output]]\n )\n self.outputs_to_job_ids[\n output\n ] = job.job_id # todo: seperate this into two dicts?\n # we use job numbers during run\n # to keep output files unique etc.\n if job.job_id in self.jobs and self.jobs[job.job_id] is not job:\n if self.run_mode.is_strict():\n raise ValueError(\n \"Added new job in place of old not supported in run_mode == strict\"\n f\"new job: {job} id: {id(job)}\",\n f\"old job: {self.jobs[job.job_id]} id: {id(self.jobs[job.job_id])}\",\n )\n if not self.running: # one core., no locking\n job.job_number = self.next_job_number\n self.next_job_number += 1\n else: # multiple jobGeneratingJobs might be creating jobs at the same time.\n with self.next_job_number_lock:\n job.job_number = self.next_job_number\n self.next_job_number += 1\n self.jobs[job.job_id] = job\n self.job_dag.add_node(job.job_id)\n # assert len(self.jobs) == len(self.job_dag) - we verify this when running\n\n def add_edge(self, upstream_job, downstream_job):\n \"\"\"Declare a dependency between jobs\n\n Implementation note:\n While we connect the DAG based on the Jobs,\n we calculate invalidation based on the job_inputs (see Job.depends_on),\n allowing a MultiFileGeneratingJob to (optionally) only invalidate some of it's downstreams.\n \"\"\"\n if not upstream_job.job_id in self.jobs:\n raise KeyError(f\"{upstream_job} not in this graph. Call job.readd() first\")\n if not downstream_job.job_id in self.jobs:\n raise KeyError(\n f\"{downstream_job} not in this graph. Call job.readd() first\"\n )\n\n self.job_dag.add_edge(upstream_job.job_id, downstream_job.job_id)\n\n def has_edge(self, upstream_job, downstream_job):\n \"\"\"Does this edge already exist?\"\"\"\n if not isinstance(upstream_job, str):\n upstream_job_id = upstream_job.job_id\n else:\n upstream_job_id = upstream_job\n if not isinstance(downstream_job, str):\n downstream_job_id = downstream_job.job_id\n else:\n downstream_job_id = downstream_job\n return self.job_dag.has_edge(upstream_job_id, downstream_job_id)\n\n def restart_afterwards(self):\n \"\"\"Restart the whole python program afterwards?\n Used by the interactive console\n \"\"\"\n self._restart_afterwards = True # pragma: no cover - todo: interactive\n\n def dump_subgraph_for_debug(self, jobs):\n \"\"\"Write a subgraph_debug.py\n with a faked-out version of this graph.\n See Job.dump_subgraph_for_debug for details\"\"\"\n if jobs:\n jall = list(jobs)\n else:\n jall = list(self.jobs.keys())\n j1 = self.jobs[jall[0]]\n j1.dump_subgraph_for_debug(jall)\n\n def dump_subgraph_for_debug_at_run(self, jobs):\n \"\"\"Write a subgraph_debug.py\n with a faked-out version of this graph.\n See Job.dump_subgraph_for_debug for details.\n\n This version dumps *after* the cleanup jobs\n and so on have been assembled (ie. when run is called)\n\n \"\"\"\n if jobs:\n jall = list(jobs)\n else:\n jall = list(self.jobs.keys())\n j1 = self.jobs[jall[0]]\n # j1.dump_subgraph_for_debug(jall)\n self._jobs_do_dump_subgraph_debug = jall\n", "id": "4522541", "language": "Python", "matching_score": 3.6405766010284424, "max_stars_count": 0, "path": "src/pypipegraph2/graph.py" }, { "content": "# check if we can stop whilst stalling\n# only one job should finish.\nimport pypipegraph2 as ppg\nimport time\nfrom pathlib import Path\nimport logging\nimport os\nimport shutil\nimport psutil\nimport subprocess\n\np = Path(\"run/stall\")\nif p.exists():\n shutil.rmtree(p)\np.mkdir(exist_ok=True, parents=True)\nos.chdir(p)\n\ntime_before_abort = 1\ntimeout = 10\njobcount = 5\nppg.new(log_level=logging.INFO, cores=5)\n#tell the ppg to accept the SIGINT we're actually sending\nppg.global_pipegraph._debug_allow_ctrl_c = 'stop&abort'\n\n\ndef all_cores(ii):\n def inner(of, ii=ii):\n of.write_text(str(time.time()))\n proc = psutil.Process()\n parent = proc.parent()\n if ii == 0: # only the first guy kills us\n time.sleep(time_before_abort)\n subprocess.check_call([\"kill\", \"--signal\", \"SIGINT\", str(parent.pid)])\n time.sleep(1)\n subprocess.check_call([\"kill\", \"--signal\", \"SIGINT\", str(parent.pid)])\n\n time.sleep(timeout)\n\n return ppg.FileGeneratingJob(\n f\"all_cores{ii}\",\n inner,\n resources=ppg.Resources.AllCores,\n depend_on_function=False,\n )\n\n\nac = [all_cores(ii) for ii in range(jobcount)]\n\nstart = time.time()\ntry:\n ppg.run()\nexcept KeyboardInterrupt:\n print(\"Received expected Keyboard interrupt\")\n stop = time.time()\n print(f\"stop took {stop-start-time_before_abort-1:.2f} seconds.\\n We expected less than{timeout}.\\n\")\n\n", "id": "3974828", "language": "Python", "matching_score": 6.688746452331543, "max_stars_count": 0, "path": "examples/stop_then_abort_when_stalled.py" }, { "content": "# check if we can stop whilst stalling\n# only one job should finish.\nimport pypipegraph2 as ppg\nimport time\nfrom pathlib import Path\nimport logging\nimport os\nimport shutil\nimport psutil\nimport subprocess\n\np = Path(\"run/stall\")\nif p.exists():\n shutil.rmtree(p)\np.mkdir(exist_ok=True, parents=True)\nos.chdir(p)\n\ntime_before_abort = 1\ntimeout = 1\njobcount = 5\nppg.new(log_level=logging.INFO, cores=5)\n#tell the ppg to accept the SIGINT we're actually sending\nppg.global_pipegraph._debug_allow_ctrl_c = 'stop'\n\n\ndef all_cores(ii):\n if ii == 3:\n return ppg.MultiFileGeneratingJob(['A','B','C'], lambda ofs: [of.write_text(of.name) for of in ofs], resources=ppg.Resources.AllCores)\n def inner(of, ii=ii):\n of.write_text(str(time.time()))\n proc = psutil.Process()\n parent = proc.parent()\n if ii == 0: # only the first guy kills us\n time.sleep(time_before_abort)\n subprocess.check_call([\"kill\", \"--signal\", \"SIGINT\", str(parent.pid)])\n\n time.sleep(timeout)\n\n return ppg.FileGeneratingJob(\n f\"all_cores{ii}\",\n inner,\n resources=ppg.Resources.AllCores,\n depend_on_function=False,\n )\n\n\nac = [all_cores(ii) for ii in range(jobcount)]\n\nstart = time.time()\ntry:\n ppg.run()\nexcept KeyboardInterrupt:\n print(\"Received expected Keyboard interrupt\")\n stop = time.time()\n print(f\"stop took {stop-start-time_before_abort:.2f} seconds.\\n We expected {timeout}.\\nNot multiples.\")\n\n", "id": "11846984", "language": "Python", "matching_score": 6.28682279586792, "max_stars_count": 0, "path": "examples/stop_when_stalled.py" }, { "content": "# check if we can abort long running & stalled jobs\nimport pypipegraph2 as ppg\nimport time\nfrom pathlib import Path\nimport logging\nimport os\nimport shutil\nimport psutil\nimport subprocess\n\np = Path(\"run/stall\")\nif p.exists():\n shutil.rmtree(p)\np.mkdir(exist_ok=True, parents=True)\nos.chdir(p)\n\ntime_before_abort = 1\ntimeout = 60\njobcount = 5\nppg.new(log_level=logging.INFO, cores=5)\n#tell the ppg to accept the SIGINT we're actually sending\nppg.global_pipegraph._debug_allow_ctrl_c = 'abort'\n\n\ndef all_cores(ii):\n def inner(of, ii=ii):\n of.write_text(str(time.time()))\n proc = psutil.Process()\n parent = proc.parent()\n if ii == 0: # only the first guy kills us\n time.sleep(time_before_abort)\n for tt in range(timeout):\n subprocess.check_call([\"kill\", \"--signal\", \"SIGINT\", str(parent.pid)])\n time.sleep(1)\n\n return ppg.FileGeneratingJob(\n f\"all_cores{ii}\",\n inner,\n resources=ppg.Resources.AllCores,\n depend_on_function=False,\n )\n\n\nac = [all_cores(ii) for ii in range(jobcount)]\n\nstart = time.time()\ntry:\n ppg.run()\nexcept KeyboardInterrupt:\n print(\"Received expected Keyboard interrupt\")\n stop = time.time()\n print(f\"Abort took {stop-start-time_before_abort:.2f} seconds.\\n If jobs were completed, you'd have observed something closer to {timeout}\")\n\n", "id": "8173728", "language": "Python", "matching_score": 3.695880889892578, "max_stars_count": 0, "path": "examples/abort_when_stalled.py" }, { "content": "# check if 'stalling' occurs - ie.\n# will having to many all_cores jobs ready to run\n# prevent all single cores to run from in parallel\nimport pypipegraph2 as ppg\nimport time\nfrom pathlib import Path\nimport logging\nimport os\nimport shutil\n\np = Path(\"run/stall\")\nif p.exists():\n shutil.rmtree(p)\np.mkdir(exist_ok=True, parents=True)\nos.chdir(p)\n\ntimeout = 2\njobcount = 5\nppg.new(log_level=logging.INFO, cores=5)\n\n\ndef all_cores(ii):\n def inner(of):\n of.write_text(str(time.time()))\n time.sleep(timeout)\n\n return ppg.FileGeneratingJob(\n f\"all_cores{ii}\",\n inner,\n resources=ppg.Resources.AllCores,\n depend_on_function=False,\n )\n\n\ndef single_cores(ii):\n def inner(of):\n of.write_text(str(time.time()))\n time.sleep(timeout)\n\n return ppg.FileGeneratingJob(\n f\"single{ii}\",\n inner,\n resources=ppg.Resources.SingleCore,\n depend_on_function=False,\n )\n\n\nsc = [single_cores(ii) for ii in range(jobcount)]\nac = [all_cores(ii) for ii in range(jobcount)]\n\nstart = time.time()\nppg.run()\nstop = time.time()\n\nprint(\n f\"took {stop-start:.2f}. If this is close to {timeout * jobcount}, all is well. If it's more, we have an ordering problem\"\n)\n", "id": "5321455", "language": "Python", "matching_score": 4.640326976776123, "max_stars_count": 0, "path": "examples/stall.py" }, { "content": "# check if 'stalling' occurs - ie.\n# will having to many all_cores jobs ready to run\n# prevent all single cores to run from in parallel\nimport pypipegraph2 as ppg\nimport subprocess\nimport time\nfrom pathlib import Path\nimport logging\nimport os\nimport shutil\n\np = Path(\"run/call_externals\")\nif p.exists():\n shutil.rmtree(p)\np.mkdir(exist_ok=True, parents=True)\nos.chdir(p)\n\ntimeout = 1\njobcount = 10\nppg.new(log_level=logging.INFO, cores=5)\n\n\ndef single_cores(ii):\n def inner(of):\n pid = os.fork()\n if pid == 0:\n of.write_bytes(subprocess.check_output(f\"sleep {timeout} && date\", shell=True))\n sys.stdout.close()\n sys.stderr.close()\n time.sleep(1)\n else:\n os.waitpid(pid, 0)\n\n #time.sleep(timeout)\n\n return ppg.FileGeneratingJob(\n f\"single{ii}\",\n inner,\n resources=ppg.Resources.SingleCore,\n depend_on_function=False,\n )\n\nsc = [single_cores(ii) for ii in range(jobcount)]\n\nstart = time.time()\nppg.run()\nstop = time.time()\n\nprint(\n f\"took {stop-start:.2f}.\"\n)\n", "id": "714288", "language": "Python", "matching_score": 0.7022129893302917, "max_stars_count": 0, "path": "examples/call_externals.py" }, { "content": "\"\"\"Parlallel computation / threading support helpers\"\"\"\nfrom threading import Lock, Condition\nfrom .util import log_info\nimport ctypes\nfrom .util import log_info\n\nclass _CoreLockContextManager:\n def __init__(self, core_lock, cores):\n self.core_lock = core_lock\n self.cores = cores\n\n def __enter__(self):\n self.core_lock._acquire(self.cores)\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.core_lock._release(self.cores)\n\n\nclass CoreLock:\n \"\"\"Allow threads to request n 'cores',\n if they're available, let it proceed.\n If they're not available, block.\n If they exceed the maxmimum number available: raise.\n\n Essentially, this is a Semaphore with multi-resource-one-step-acquisition.\n\n \"\"\"\n\n def __init__(self, max_cores):\n self.max_cores = int(max_cores)\n self.remaining = max_cores\n self.lock = Lock()\n self.condition = Condition()\n self.terminated = False\n\n def using(self, cores):\n return _CoreLockContextManager(self, cores)\n\n def _acquire(self, count):\n # logger.info(f\" {_thread.get_ident()} - acquire({count}) called\")\n if count > self.max_cores:\n raise ValueError(f\"Count {count} > max_cores {self.max_cores}\")\n if count == 0:\n raise ValueError(\"Count == 0\")\n while True:\n with self.lock:\n if self.remaining >= count:\n self.remaining -= count\n # logger.info(f\"{_thread.get_ident()}, had available\")\n return\n\n # not enough remaining. try again once somebody releases something\n with self.condition:\n self.condition.wait()\n # logger.info(f\"{_thread.get_ident()} condition triggered\")\n\n def _release(self, count):\n # logger.info(f\"{_thread.get_ident()} release({count}) called\")\n if count == 0: # pragma: no cover\n raise ValueError(\"Count == 0\")\n with self.lock:\n self.remaining += count\n if self.remaining > self.max_cores: # pragma: no cover\n raise ValueError(\"Remaining exceeded max_cores\")\n\n # logger.info(f\"{_thread.get_ident()} remaning: {self.remaining}\")\n with self.condition:\n self.condition.notify_all()\n\n\ndef async_raise(target_tid, exception):\n \"\"\"Raises an asynchronous exception in another thread.\n Read http://docs.python.org/c-api/init.html#PyThreadState_SetAsyncExc\n for further enlightenments.\n :param target_tid: target thread identifier\n :param exception: Exception class to be raised in that thread\n \"\"\"\n # Ensuring and releasing GIL are useless since we're not in C\n # gil_state = ctypes.pythonapi.PyGILState_Ensure()\n ret = ctypes.pythonapi.PyThreadState_SetAsyncExc(\n ctypes.c_long(target_tid), ctypes.py_object(exception)\n )\n # ctypes.pythonapi.PyGILState_Release(gil_state)\n if ret == 0:\n raise ValueError(\"Invalid thread ID {}\".format(target_tid))\n elif ret > 1:\n ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(target_tid), None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")\n", "id": "5435124", "language": "Python", "matching_score": 2.399689197540283, "max_stars_count": 0, "path": "src/pypipegraph2/parallel.py" }, { "content": "import pytest\nimport threading\nfrom pypipegraph2.parallel import CoreLock\n\n\nclass TestCoreLock:\n def test_single_thread(self):\n mylock = CoreLock(1)\n with pytest.raises(ValueError):\n mylock._acquire(2)\n mylock._acquire(1)\n mylock._release(1)\n\n def test_multithreading_simple(self):\n mylock = CoreLock(1)\n counter = []\n threads = []\n\n def inner(c):\n mylock._acquire(1)\n counter.append(c)\n mylock._release(1)\n\n for i in range(5):\n t = threading.Thread(target=inner, args=(i,))\n threads.append(t)\n t.start()\n for t in threads:\n t.join()\n assert len(counter) == 5\n assert set(counter) == set([0, 1, 2, 3, 4])\n\n def test_multithreading_two_by_two(self):\n mylock = CoreLock(3)\n counter = []\n threads = []\n\n def inner(c):\n with mylock.using(2):\n counter.append(c)\n\n for i in range(5):\n t = threading.Thread(target=inner, args=(i,))\n threads.append(t)\n t.start()\n for t in threads:\n t.join()\n assert len(counter) == 5\n assert set(counter) == set([0, 1, 2, 3, 4])\n\n def test_multithreading_complex(self):\n mylock = CoreLock(8)\n counter = []\n threads = []\n\n def inner(c):\n with mylock.using(c % 8 + 1):\n counter.append(c)\n\n for i in range(8 * 4 + 1):\n t = threading.Thread(target=inner, args=(i,))\n threads.append(t)\n t.start()\n for t in threads:\n t.join()\n assert len(counter) == 33\n assert set(counter) == set(range(33))\n\n def test_multithreading_dieing(self):\n mylock = CoreLock(1)\n counter = []\n threads = []\n\n def inner(c):\n with mylock.using(1):\n if c == 1:\n raise ValueError()\n counter.append(c)\n\n for i in range(5):\n t = threading.Thread(target=inner, args=(i,))\n threads.append(t)\n t.start()\n for t in threads:\n t.join()\n assert len(counter) == 4\n assert set(counter) == set([0, 2, 3, 4])\n\n def test_aquire_0_raise(self):\n mylock = CoreLock(1)\n with pytest.raises(ValueError):\n mylock._acquire(0)\n with pytest.raises(ValueError):\n with mylock.using(0):\n pass\n", "id": "10594042", "language": "Python", "matching_score": 0.11112276464700699, "max_stars_count": 0, "path": "tests/test_parallel.py" }, { "content": "from pathlib import Path\nfrom xxhash import xxh3_128\n\n\ndef hash_file(path: Path):\n \"\"\"delegate to a fast and somewhat collision resistant hash function\"\"\"\n # I profiled a bunch of hash functions\n # and xx3 and spooky were the fastest 128bit hashers\n # (we want 128 bit to prevent collisions).\n # single core, spooky seemed a bit faster\n # but the xxhash implementation releases the gil\n # when passed more than 100kb (otherwise it's a\n # faster *not* to acquire the lock!)\n hasher = xxh3_128()\n # if known_st_size is None:\n # known_st_size = path.stat().st_size\n # we are not acquiring the core lock here.\n # why? because this is essentially always\n # limited by the read-bandwidth, not the\n # cpu.\n # (even on a very fast Samsung EVO equivalent SSD\n # (about 3gb/s), doing it from memory is 4 times faster)\n # so we shouldn't be stalling everything else much\n # (except for memory bandwidth. oh well, at least\n # it should not go into swap with the tiny buffer we use here)\n with open(path, \"rb\") as op:\n block = op.read(1024 * 512)\n while block:\n hasher.update(block)\n block = op.read(1024 * 512)\n stat = path.stat()\n\n return {\n \"hash\": hasher.hexdigest(),\n \"mtime\": int(stat.st_mtime),\n \"size\": stat.st_size,\n }\n\ndef hash_bytes(input: bytes):\n hasher = xxh3_128()\n hasher.update(input)\n return hasher.hexdigest()\n\ndef hash_str(input: str):\n return hash_bytes(input.encode('utf-8'))\n", "id": "12206715", "language": "Python", "matching_score": 0.4825769364833832, "max_stars_count": 0, "path": "src/pypipegraph2/hashers.py" }, { "content": "import pandas as pd\nimport numpy as np\n\n\nclass RegionAsIs:\n \"\"\"Take the regions as they are.\n Explodes if they are not all the same size!\n \"\"\"\n\n def __init__(self):\n self.name = \"RegionAsIs\"\n\n def calc(self, gr):\n \"\"\"Must return a pandas dataframe with chr, start, stop, flip\"\"\"\n starts = gr.df[\"start\"].astype(int)\n stops = gr.df[\"stop\"].astype(int)\n chrs = gr.df[\"chr\"]\n if len((stops - starts).unique()) > 1:\n raise ValueError(\n \"Not all input regions were the same size - can't use RegionAsIS\"\n )\n\n return pd.DataFrame(\n {\"chr\": chrs, \"start\": starts, \"stop\": stops, \"flip\": False}\n )\n\n def get_dependencies(self, gr):\n return [gr.load()]\n\n\nclass RegionFromCenter:\n \"\"\"Take the regions as they are, cookie cut into center +- xbp\n No region get's flipped.\n \"\"\"\n\n def __init__(self, total_size):\n self.total_size = total_size\n self.name = \"Region_From_Center_%i\" % self.total_size\n\n def calc(self, gr):\n \"\"\"Must return a pandas dataframe with chr, start, stop, flip\"\"\"\n starts = gr.df[\"start\"]\n stops = gr.df[\"stop\"]\n chrs = gr.df[\"chr\"]\n centers = ((stops - starts) / 2.0 + starts).astype(int)\n left = centers - self.total_size / 2\n right = centers + self.total_size / 2\n return pd.DataFrame({\"chr\": chrs, \"start\": left, \"stop\": right, \"flip\": False})\n\n def get_dependencies(self, gr):\n return [gr.load()]\n\n\nclass RegionFromSummit:\n \"\"\"Take the regions from their summits as defined by @summit_annotator (mbf_genomics.regions.annotators.Summit*,\n basically an annotator defining an offset to start),\n then +- 0.5 * total_size\n No region get's flipped.\n \"\"\"\n\n def __init__(self, total_size, summit_annotator):\n self.name = \"RegionFromSummit_%i_%s\" % (\n total_size,\n summit_annotator.column_names[0],\n )\n self.total_size = total_size\n self.summit_annotator = summit_annotator\n\n def calc(self, gr):\n \"\"\"Must return a pandas dataframe with chr, start, stop, flip\"\"\"\n starts = gr.df[\"start\"]\n summit = gr.df[self.summit_annotator.column_name]\n chrs = gr.df[\"chr\"]\n centers = starts + summit\n left = centers - self.total_size / 2\n right = centers + self.total_size / 2\n if len(set(right - left)) > 1:\n raise ValueError(\"not all regions were created with the same size\")\n return pd.DataFrame({\"chr\": chrs, \"start\": left, \"stop\": right, \"flip\": False})\n\n def get_dependencies(self, gr):\n return [gr.add_annotator(self.summit_annotator), gr.load()]\n\n\nclass RegionFromCenterFlipByNextGene:\n \"\"\"Todo\"\"\"\n\n def __init__(self, total_size):\n raise NotImplementedError()\n\n\nclass RegionSample(object):\n \"\"\"Subsample regions (without replacement).\n (ie. turn this into fewer regions)\n\n Uses a a nested RegionStrategy for initial conversion,\n then keeps only randomly choosen ones\n\n \"\"\"\n\n def __init__(self, inner_region_strategy, ratio_or_count, seed=500):\n \"\"\"\n @inner_region_strategy: another region strategy to preprocess the regions first\n\n @ratio_or_count either a number 0.0 .. 1.0, in which case we subsample\n to total count * @ratio_or_count, or a hard number to sample to\n\n @seed: initial seed for random number generator - to make heatmaps repeatable\n \"\"\"\n self.name = \"Region_Sample_%f_%i_%s\" % (\n ratio_or_count,\n seed,\n inner_region_strategy.name,\n )\n self.inner_region_strategy = inner_region_strategy\n self.ratio_or_count = ratio_or_count\n self.seed = seed\n\n def calc(self, gr):\n res = self.inner_region_strategy.calc(gr)\n np.random.seed(self.seed)\n if self.ratio_or_count < 1:\n count = int(len(res) * self.ratio_or_count)\n else:\n count = self.ratio_or_count\n return res.sample(n=count)\n\n def get_dependencies(self, gr):\n return self.inner_region_strategy.get_dependencies(gr)\n\n # todo: handle changing seed by rebuilding the heatmap.\n", "id": "7991690", "language": "Python", "matching_score": 1.2203288078308105, "max_stars_count": 0, "path": "src/mbf_heatmap/chipseq/regions.py" }, { "content": "from pathlib import Path\n\n\ndef open_file(fileNameOrHandle, mode=\"rb\"):\n \"\"\"Transparently open compressed or uncompressed files\"\"\"\n if hasattr(fileNameOrHandle, \"read\"):\n return fileNameOrHandle\n elif isinstance(fileNameOrHandle, Path):\n fileNameOrHandle = str(fileNameOrHandle)\n if fileNameOrHandle.endswith(\".gz\"):\n import gzip\n\n return gzip.GzipFile(fileNameOrHandle, mode)\n elif fileNameOrHandle.endswith(\".bz2\"):\n import bz2\n\n return bz2.BZ2File(fileNameOrHandle, mode)\n else:\n return open(fileNameOrHandle, mode)\n\n\ndef chunkify(handle, separator, block_size=None):\n \"\"\"take a file handle and split it at separator, reading in efficently in 50 mb blocks or so\"\"\"\n if block_size is None:\n block_size = 50 * 1024 * 1024\n chunk = handle.read(block_size)\n chunk = chunk.split(separator)\n while True:\n for k in chunk[:-1]:\n yield k\n next = handle.read(block_size)\n if next:\n chunk = chunk[-1] + next\n chunk = chunk.split(separator)\n else:\n yield chunk[-1]\n break\n\n\ndef pathify(output_filename, default, create_parents=True):\n if output_filename is None:\n res = Path(default)\n else:\n res = Path(output_filename)\n\n if create_parents:\n res.parent.mkdir(exist_ok=True)\n return res\n\n", "id": "8712465", "language": "Python", "matching_score": 0.15614654123783112, "max_stars_count": 0, "path": "src/mbf_fileformats/util.py" }, { "content": "import hashlib\nimport collections\nimport itertools\nimport time\nimport pathlib\nimport subprocess\n\ncmd = [\"python\", \"setup.py\", \"docs\"]\nhashes = collections.defaultdict(lambda: \"\")\n\n\ndef get_hash(fn, second=False):\n try:\n with open(fn, \"rb\") as d:\n return hashlib.md5(d.read()).hexdigest()\n except FileNotFoundError:\n if not second:\n time.sleep(1)\n return get_hash(fn, True)\n else:\n raise\n\n\nwhile True:\n rebuild = False\n files = [\"docs/conf.py\", \"docs/_static/my-styles.css\"]\n for fn in itertools.chain(\n files,\n pathlib.Path(\".\").glob(\"**/*.md\"),\n pathlib.Path(\".\").glob(\"**/*.rst\"),\n pathlib.Path(\"./src\").glob(\"**/*.py\"),\n ):\n fn = str(fn)\n new_hash = get_hash(fn)\n if hashes[fn] != new_hash:\n rebuild = True\n hashes[fn] = new_hash\n if rebuild:\n subprocess.check_call(cmd)\n", "id": "4527705", "language": "Python", "matching_score": 4.009187698364258, "max_stars_count": 3, "path": "autobuild_docs.py" }, { "content": "# run tests whenever a file changes\nimport hashlib\nimport collections\nimport itertools\nimport time\nimport pathlib\nimport subprocess\n\ncmd = [\"pytest\"]\nhashes = collections.defaultdict(lambda: \"\")\n\n\ndef get_hash(fn, second=False):\n try:\n with open(fn, \"rb\") as d:\n return hashlib.md5(d.read()).hexdigest()\n except FileNotFoundError:\n if not second:\n time.sleep(1)\n return get_hash(fn, True)\n else:\n raise\n\n\nknown_files = None\ni = 1\nwhile True:\n rebuild = False\n if not known_files or i % 10 == 0:\n known_files = list(\n itertools.chain(\n pathlib.Path(\"./src\").glob(\"**/*.py\"),\n pathlib.Path(\"./src\").glob(\"**/*.pyx\"),\n pathlib.Path(\"./tests\").glob(\"**/*.py\"),\n )\n )\n i += 1\n for fn in known_files:\n fn = str(fn)\n new_hash = get_hash(fn)\n if hashes[fn] != new_hash:\n rebuild = True\n hashes[fn] = new_hash\n if rebuild:\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError:\n continue\n time.sleep(0.5)\n", "id": "10207948", "language": "Python", "matching_score": 0.9585182070732117, "max_stars_count": 0, "path": "autotest.py" }, { "content": "import os\nimport sys\nimport subprocess\nfrom pathlib import Path\n\nvenv_path = Path(sys.prefix)\nnotebook_path = venv_path.parent\nis_windows = sys.platform.lower() in (\"win32\", \"windows\")\nif is_windows:\n jupyter_cmd = (\n venv_path / \"Scripts\" / \"jupyter.exe\"\n ) # should use the venv's jupyter, right\nelse:\n jupyter_cmd = venv_path / \"bin\" / \"jupyter\" # should use the venv's jupyter, right\n\n\ndef disable_use_redirect_file():\n jupyter_config = Path(\"~/.jupyter/jupyter_notebook_config.py\").expanduser()\n if not jupyter_config.exists():\n subprocess.check_call([jupyter_cmd, \"notebook\", \"--generate-config\"])\n fix = \"\\nc.NotebookApp.use_redirect_file = False\\n\"\n if not fix in jupyter_config.read_text():\n with open(jupyter_config, \"a+\") as op:\n op.write(fix)\n print(\"Disabled file based jupyter redirect\")\n\n\ndef place_shortcut_on_desktop():\n if is_windows:\n cmd = \"PowerShell -NoProfile -Command \\\"Write-Host([Environment]::GetFolderPath('Desktop'))\\\"\"\n desktop_folder = Path(subprocess.check_output(cmd, shell=True).decode(\"utf-8\").strip())\n else:\n desktop_folder = Path(\"~/Desktop\").expanduser()\n target = desktop_folder / (\"jupyter notebook \" + notebook_path.name + \".py\")\n target.write_text(\n f\"\"\"#!/usr/bin/env python3\nimport subprocess\nsubprocess.call([r\"{jupyter_cmd}\", 'notebook'], cwd=r\"{notebook_path}\")\n\"\"\"\n )\n if is_windows:\n os.chmod(target, 0o755)\n print(\"Placed jupyter shortcut on desktop\")\n\n\ndef main():\n if is_windows:\n disable_use_redirect_file()\n place_shortcut_on_desktop()\n", "id": "9322320", "language": "Python", "matching_score": 1.251543641090393, "max_stars_count": 0, "path": "src/marburg_biobank/jupyter_venv_on_desktop.py" }, { "content": "import subprocess\nsubprocess.check_call(\"python setup.py build_ext -i\", shell=True)\n", "id": "11981308", "language": "Python", "matching_score": 0.9420549273490906, "max_stars_count": 0, "path": "make.py" }, { "content": "\n# This setup.py will compile and install the subsets extension\n# Use:\n# setup.py install\n#\n# In Python 2.2 the extension is copied into\n# <pythonhome>/lib/site-packages\n# in earlier versions it may be put directly in the python directory.\n\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\nimport Cython.Compiler.Options\nCython.Compiler.Options.embed_pos_in_docstring = True\nimport numpy\ntry:\n numpy_include = numpy.get_include()\nexcept AttributeError:\n numpy_include = numpy.get_numpy_include()\n\nprint numpy_include\n\n\nsetup(name = \"marsnpdiff\",\n version = \"1.0\",\n maintainer = \"<NAME>\",\n maintainer_email = \"<EMAIL>\",\n description = \"\",\n cmdclass = {'build_ext': build_ext},\n\n ext_modules = [\n \n Extension(\"_marsnpdiff\", [\"_marsnpdiff.pyx\"]),\n ]\n)\n# end of file: setup.py\n\n", "id": "5392396", "language": "Python", "matching_score": 2.149412155151367, "max_stars_count": 0, "path": "setup.py" }, { "content": "#!/usr/bin/python\n\nimport sys\nfrom distutils.core import setup\n\n\ndef main():\n if not (3,) > sys.version_info >= (2, 7):\n sys.stderr.write('This backport is for Python 2.7 only.\\n')\n sys.exit(1)\n\n setup(\n name='functools32',\n version='3.2.3-2',\n description='Backport of the functools module from Python 3.2.3 for use on 2.7 and PyPy.',\n long_description=\"\"\"\nThis is a backport of the functools standard library module from\nPython 3.2.3 for use on Python 2.7 and PyPy. It includes\nnew features `lru_cache` (Least-recently-used cache decorator).\"\"\",\n license='PSF license',\n\n maintainer='<NAME>',\n maintainer_email='<EMAIL>',\n url='https://github.com/MiCHiLU/python-functools32',\n\n packages=['functools32'],\n )\n\n\nif __name__ == '__main__':\n main()\n", "id": "2310552", "language": "Python", "matching_score": 0.4550032615661621, "max_stars_count": 34, "path": "src/marburg_biobank/functools32/setup.py" }, { "content": "# -*- coding: future_fstrings -*-\nimport requests\nimport tempfile\nimport re\nimport os\nimport subprocess\nimport packaging.version\nimport pkg_resources\nfrom pathlib import Path\nfrom .util import (\n combine_volumes,\n find_storage_path_from_other_machine,\n dict_to_toml,\n clone_repo,\n re_github,\n)\nimport tomlkit\n\n\nclass DockFill_Python:\n def __init__(self, anysnake):\n self.anysnake = anysnake\n self.python_version = self.anysnake.python_version\n\n self.paths = self.anysnake.paths\n\n self.paths.update(\n {\n \"storage_python\": find_storage_path_from_other_machine(\n self.anysnake, Path(\"python\") / self.python_version\n ),\n \"docker_storage_python\": \"/anysnake/python\",\n # \"docker_code\": \"/project/code\",\n \"log_python\": self.paths[\"log_storage\"]\n / f\"anysnake.python.{self.python_version}.log\",\n }\n )\n self.volumes = {\n anysnake.paths[\"docker_storage_python\"]: anysnake.paths[\"storage_python\"]\n }\n\n def get_additional_docker_build_cmds(self):\n if self.python_version.startswith(\"2\"):\n # python beyond these versions needs libssl 1.1\n # the older ones need libssl1.0\n # on older debians/ubuntus that would be libssl-dev\n # but on 18.04+ it's libssl1.0-dev\n # and we're not anticipating building on something older\n return \"\\nRUN apt-get install -y libssl1.0-dev\\n\"\n else:\n return \"\"\n\n def pprint(self):\n print(f\" Python version={self.python_version}\")\n\n def ensure(self):\n\n python_version = self.anysnake.python_version\n\n return self.anysnake.build(\n target_dir=self.paths[\"storage_python\"],\n target_dir_inside_docker=self.paths[\"docker_storage_python\"],\n relative_check_filename=\"bin/virtualenv\",\n log_name=\"log_python\",\n additional_volumes={},\n version_check=self.check_python_version_exists,\n root=True,\n build_cmds=f\"\"\"\n#/bin/bash\ncd ~/\ngit clone http://github.com/pyenv/pyenv.git\ncd pyenv/plugins/python-build\n./install.sh\n\nexport MAKE_OPTS=-j{self.anysnake.cores}\nexport CONFIGURE_OPTS=\"--enable-shared --enable-optimizations\"\n\nexport PYTHON_CFLAGS=\"-fno-semantic-interposition\"\nexport PYTHON_CONFIGURE_OPTS=\"--enable-shared --enable-optimizations\"\n\npython-build {python_version} {self.paths['docker_storage_python']}\n#curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py\n#{self.paths['docker_storage_python']}/bin/python get-pip.py\n{self.paths['docker_storage_python']}/bin/pip install -U pip virtualenv\nchown {os.getuid()}:{os.getgid()} {self.paths['docker_storage_python']} -R\necho \"done\"\n\"\"\",\n )\n\n def check_python_version_exists(self):\n version = self.python_version\n r = requests.get(\"https://www.python.org/doc/versions/\").text\n if not (\n f'release/{version}/\"' in r or f'release/{version}\"'\n ): # some have / some don't\n raise ValueError(\n f\"Unknown python version {version} - check https://www.python.org/doc/versions/\"\n )\n\n def freeze(self):\n return {\"base\": {\"python\": self.python_version}}\n\n\ndef safe_name(name):\n return pkg_resources.safe_name(name).lower()\n\n\nclass _Dockfill_Venv_Base:\n def create_venv(self):\n additional_cmd = \"\"\n if self.python_version[0] == \"2\":\n additional_cmd = f\"{self.target_path_inside_docker}/bin/pip install pyopenssl ndg-httpsclient pyasn1\"\n return self.anysnake.build(\n target_dir=self.target_path,\n target_dir_inside_docker=self.target_path_inside_docker,\n relative_check_filename=Path(\"bin\") / \"activate.fish\",\n log_name=f\"log_{self.name}_venv\",\n additional_volumes=self.dockfill_python.volumes,\n build_cmds=f\"\"\"\n{self.paths['docker_storage_python']}/bin/virtualenv -p {self.paths['docker_storage_python']}/bin/python {self.target_path_inside_docker}\n{additional_cmd}\necho \"done\"\n\"\"\",\n )\n\n\nclass Dockfill_PythonPoetry(_Dockfill_Venv_Base):\n def __init__(self, anysnake, dockfill_python):\n self.anysnake = anysnake\n self.paths = self.anysnake.paths\n self.python_version = self.anysnake.python_version\n self.dockfill_python = dockfill_python\n self.name = \"python_poetry\"\n self.paths.update(\n {\n \"poetry_venv\": (\n self.paths[\"storage\"] / \"poetry_venv\" / self.python_version\n ),\n \"docker_poetry_venv\": \"/anysnake/poetry_venv\",\n \"log_python_poetry_venv\": self.paths[\"log_storage\"]\n / f\"anysnake.poetry_venv.{self.python_version}.log\",\n }\n )\n self.target_path = self.paths[\"poetry_venv\"]\n self.target_path_inside_docker = self.paths[\"docker_poetry_venv\"]\n self.volumes = {}\n\n def pprint(self):\n pass\n\n def ensure(self):\n res = self.create_venv()\n res |= self.install_poetry()\n return res\n\n def install_poetry(self):\n poetry_bin = Path(self.target_path / \"bin\" / \"poetry\")\n if not poetry_bin.exists():\n print(\"install poetry\")\n volumes_ro = self.dockfill_python.volumes.copy()\n volumes_rw = {self.target_path_inside_docker: self.target_path}\n env = {}\n paths = [self.target_path_inside_docker + \"/bin\"]\n\n env[\"EXTPATH\"] = \":\".join(paths)\n cmd = \"/anysnake/poetry_venv/bin/pip install poetry\"\n if self.python_version[0] == \"2\":\n cmd += f\" pyopenssl ndg-httpsclient pyasn1\"\n return_code, logs = self.anysnake._run_docker(\n f\"\"\"\n #!/bin/bash\n export PATH=$PATH:$EXTPATH\n {cmd}\n echo \"done\"\n \n \"\"\",\n {\n \"volumes\": combine_volumes(ro=volumes_ro, rw=volumes_rw),\n \"environment\": env,\n },\n f\"log_python_poetry_venv\",\n append_to_log=True,\n )\n return True # please run post_build_cmd\n return False\n\n\nclass _DockerFillVenv(_Dockfill_Venv_Base):\n def __init__(self):\n self.paths.update(\n {\n f\"log_{self.name}_venv\": (\n self.log_path / f\"anysnake.{self.name}_venv.log\"\n ),\n f\"log_{self.name}_venv_poetry\": (\n self.log_path / f\"anysnake.{self.name}_venv_poetry.log\"\n ),\n f\"log_{self.name}_venv_poetry_cmd\": (\n self.log_path / f\"anysnake.{self.name}_venv_poetry_cmd.log\"\n ),\n }\n )\n self.poetry_path = self.clone_path / f\"poetry_{self.anysnake.python_version}\"\n self.poetry_path.mkdir(exist_ok=True, parents=True)\n self.poetry_path_inside_docker = str(\n Path(self.clone_path_inside_docker) / f\"poetry_{self.anysnake.python_version}\"\n )\n\n def ensure(self):\n res = self.create_venv()\n res |= self.fill_venv()\n return res\n\n def fill_venv(self, rebuild=False):\n code_packages = {\n k: v\n for (k, v) in self.packages.items()\n if v.startswith(\"@git+\")\n or v.startswith(\"@hg+\")\n or v.startswith(\"@\")\n and re.match(re_github, v[1:]) # github\n }\n code_names = set(code_packages.keys())\n any_cloned = self.clone_code_packages(code_packages)\n if rebuild or any_cloned:\n # force rebuild\n if Path(self.poetry_path / \"pyproject.toml\").exists():\n Path(self.poetry_path / \"pyproject.toml\").unlink()\n packages_missing = set([safe_name(x) for x in self.packages]) - set(\n [\n safe_name(x)\n for x in self.find_installed_packages(\n self.anysnake.major_python_version\n )\n ]\n )\n\n return self.install_with_poetry(self.packages, code_packages, packages_missing)\n\n def clone_code_packages(self, code_packages):\n result = set()\n for name, url_spec in code_packages.items():\n log_key = f\"log_{self.name}_venv_{name}\"\n self.paths[log_key + \"_clone\"] = self.log_path / (\n f\"anysnake.{self.name}_venv_{name}.pip.log\"\n )\n target_path = self.clone_path / name\n with open(str(self.paths[log_key + \"_clone\"]), \"wb\") as log_file:\n if not target_path.exists():\n result.add(name)\n url = url_spec\n clone_repo(url, name, target_path, log_file)\n\n return result\n\n def find_installed_packages(self, major_python_version):\n return list(self.find_installed_package_versions(major_python_version).keys())\n\n def find_extras(self, editable_package):\n import configparser\n\n fn = self.clone_path / editable_package / \"setup.cfg\"\n if fn.exists():\n c = configparser.ConfigParser()\n c.read(str(fn))\n try:\n return list(set(c[\"options.extras_require\"].keys()) - set([\"doc\"]))\n except KeyError:\n pass\n return []\n\n def find_installed_package_versions(self, major_python_version):\n venv_dir = (\n self.target_path\n / \"lib\"\n / (\"python\" + major_python_version)\n / \"site-packages\"\n )\n result = {}\n for p in venv_dir.glob(\"*\"):\n if p.name.endswith(\".dist-info\"):\n name = p.name[: p.name.rfind(\"-\", 0, -5)]\n version = p.name[p.name.rfind(\"-\", 0, -5) + 1 : -1 * len(\".dist-info\")]\n result[safe_name(name)] = version\n elif p.name.endswith(\".egg-link\"):\n name = p.name[: -1 * len(\".egg-link\")]\n version = \"unknown\"\n result[safe_name(name)] = version\n elif p.name.endswith(\".so\"):\n name = p.name[: p.name.find(\".\")]\n version = \"unknown\"\n result[safe_name(name)] = version\n\n return result\n\n def install_with_poetry(self, packages, editable_packages, packages_missing):\n \"\"\"packages are parse_requirements results with method == 'pip'\n we now use poetry for this\n \n \"\"\"\n toml = f\"\"\"\n[tool.poetry]\n name = \"{self.anysnake.project_name}\"\n version = \"0.1.0\"\n description = \"\"\n authors = []\n\n[build-system\"]\n requires = [\"poetry>=0.12\"]\n build-backend = \"poetry.masonry.api\"\n\n[tool.poetry.dependencies]\n python = \"{self.anysnake.python_version}\"\n\"\"\"\n for k, v in sorted(packages.items()):\n if k not in editable_packages and safe_name(k) not in editable_packages:\n toml += f'\\t{k} = \"{v}\"\\n'\n else:\n extras = [f'\"{x}\"' for x in self.find_extras(k)]\n toml += f'\\t{k} = {{path = \"{self.clone_path_inside_docker}/{k}\", extras = [{\", \".join(extras)}], develop = true}}\\n'\n new_toml = toml\n pyproject_toml = Path(self.poetry_path / \"pyproject.toml\")\n pyproject_toml.parent.mkdir(exist_ok=True)\n if pyproject_toml.exists():\n old_toml = pyproject_toml.read_text()\n else:\n old_toml = \"\"\n if new_toml != old_toml or packages_missing:\n print(f\"poetry for {self.name} (slow, stand by)\")\n import difflib\n for row in (difflib.context_diff(old_toml.split(\"\\n\") ,new_toml.split(\"\\n\"))):\n print(row)\n pyproject_toml.write_text(new_toml)\n cmd = [\n f\"source {self.target_path_inside_docker}/bin/activate\",\n f\"cd {self.poetry_path_inside_docker} && {self.paths['docker_poetry_venv']}/bin/poetry update --verbose\",\n ]\n cmd = \"\\n\".join(cmd)\n # poetry cache needs to be writable, and we're not mounting .cache\n # by default in .ensure dockers.\n pypoetry_cache_path = Path(\"~\").expanduser() / \".cache\" / \"pypoetry\"\n pypoetry_cache_path.mkdir(parents=True, exist_ok=True)\n volumes_ro = self.dockfill_python.volumes.copy()\n volumes_rw = {\n self.target_path_inside_docker: self.target_path,\n self.clone_path_inside_docker: self.clone_path,\n self.paths[\"docker_poetry_venv\"]: self.paths[\"poetry_venv\"],\n pypoetry_cache_path: pypoetry_cache_path\n }\n env = {}\n paths = [self.target_path_inside_docker + \"/bin\"]\n if self.anysnake.dockfill_rust is not None: # if we have a rust, use it\n volumes_ro.update(self.anysnake.dockfill_rust.volumes)\n volumes_rw.update(self.anysnake.dockfill_rust.rw_volumes)\n paths.append(self.anysnake.dockfill_rust.shell_path)\n env.update(self.anysnake.dockfill_rust.env)\n from .cli import home_files\n\n home_inside_docker = self.anysnake.paths['home_inside_docker']\n for h in home_files:\n p = Path(\"~\").expanduser() / h\n if p.exists():\n volumes_ro[str(Path(home_inside_docker) / h)] = p\n env[\"EXTPATH\"] = \":\".join(paths)\n # /anysnake/code_venv/bin /anysnake/cargo/bin /anysnake/code_venv/bin /anysnake/storage_venv/bin /anysnake/R/bin /usr/local/sbin /usr/local/bin /usr/sbin /usr/bin /sbin /bin /machine/opt/infrastructure/client /machine/opt/infrastructure/repos/FloatingFileSystemClient\n return_code, logs = self.anysnake._run_docker(\n f\"\"\"\n #!/bin/bash\n export PATH=$PATH:$EXTPATH\n echo \"Path: $PATH\"\n {cmd}\n echo \"done\"\n \n \"\"\",\n {\n \"volumes\": combine_volumes(ro=volumes_ro, rw=volumes_rw),\n \"environment\": env,\n },\n f\"log_{self.name}_venv_poetry\",\n )\n installed_now = self.find_installed_packages(\n self.anysnake.major_python_version\n )\n still_missing = set([safe_name(k) for k in packages.keys()]).difference(\n [safe_name(k) for k in installed_now]\n )\n if still_missing:\n msg = f\"Installation of packages failed: {still_missing}\\n\"\n elif (isinstance(return_code, int) and (return_code != 0)) or (\n not isinstance(return_code, int) and (return_code[\"StatusCode\"] != 0)\n ):\n msg = f\"Installation of packages failed: return code was not 0 (was {return_code})\\n\"\n else:\n msg = \"\"\n if msg:\n print(self.paths[f\"log_{self.name}_venv_poetry\"].read_text())\n raise ValueError(\n msg\n + \"Check log in \"\n + str(self.paths[f\"log_{self.name}_venv_poetry\"])\n )\n return True\n else:\n return False # everything ok\n\n\nclass DockFill_GlobalVenv(_DockerFillVenv):\n def __init__(self, anysnake, dockfill_python):\n self.anysnake = anysnake\n self.paths = self.anysnake.paths\n self.python_version = self.anysnake.python_version\n self.name = \"storage\"\n self.paths.update(\n {\n \"storage_venv\": (self.paths[\"storage\"] / \"venv\" / self.python_version),\n \"docker_storage_venv\": \"/anysnake/storage_venv\",\n \"storage_clones\": self.paths[\"storage\"] / \"code\",\n \"docker_storage_clones\": \"/anysnake/storage_venv_clones\",\n }\n )\n self.target_path = self.paths[\"storage_venv\"]\n self.target_path_inside_docker = self.paths[\"docker_storage_venv\"]\n self.clone_path = self.paths[\"storage_clones\"]\n self.clone_path_inside_docker = self.paths[\"docker_storage_clones\"]\n self.log_path = self.paths[\"log_storage\"]\n\n self.dockfill_python = dockfill_python\n self.volumes = {\n anysnake.paths[\"docker_storage_venv\"]: self.paths[\"storage_venv\"],\n anysnake.paths[\"docker_storage_clones\"]: self.paths[\"storage_clones\"],\n }\n self.packages = self.anysnake.global_python_packages\n self.shell_path = str(Path(self.paths[\"docker_storage_venv\"]) / \"bin\")\n super().__init__()\n\n def pprint(self):\n print(\" Global python packages\")\n for entry in self.anysnake.global_python_packages.items():\n print(f\" {entry}\")\n\n def freeze(self):\n \"\"\"Return a toml string with all the installed versions\"\"\"\n result = {}\n for k, v in self.find_installed_package_versions(\n self.anysnake.major_python_version\n ).items():\n result[k] = f\"{v}\"\n return {\"global_python\": result}\n\n\nclass DockFill_CodeVenv(_DockerFillVenv):\n def __init__(self, anysnake, dockfill_python, dockfill_global_venv):\n self.anysnake = anysnake\n self.dockfill_global_venv = dockfill_global_venv\n self.paths = self.anysnake.paths\n self.name = \"code\"\n self.log_path = self.paths[\"log_code\"]\n self.python_version = self.anysnake.python_version\n self.paths.update(\n {\n \"code_venv\": self.paths[\"code\"] / \"venv\" / self.python_version,\n \"docker_code_venv\": \"/anysnake/code_venv\",\n \"code_clones\": self.paths[\"code\"],\n \"docker_code_clones\": self.paths[\"docker_code\"],\n }\n )\n self.target_path = self.paths[\"code_venv\"]\n self.target_path_inside_docker = self.paths[\"docker_code_venv\"]\n self.clone_path = self.paths[\"code_clones\"]\n self.clone_path_inside_docker = self.paths[\"docker_code_clones\"]\n self.dockfill_python = dockfill_python\n self.volumes = {anysnake.paths[f\"docker_code_venv\"]: self.paths[\"code_venv\"]}\n self.rw_volumes = {anysnake.paths[f\"docker_code\"]: self.paths[\"code\"]}\n self.packages = self.anysnake.local_python_packages\n self.shell_path = str(Path(self.paths[\"docker_code_venv\"]) / \"bin\")\n super().__init__()\n\n def ensure(self):\n super().ensure()\n self.copy_bins_from_global()\n self.fill_sitecustomize()\n return False\n\n def copy_bins_from_global(self):\n source_dir = self.paths[\"storage_venv\"] / \"bin\"\n target_dir = self.paths[\"code_venv\"] / \"bin\"\n for input_fn in source_dir.glob(\"*\"):\n if not input_fn.is_dir() and not input_fn.is_symlink():\n output_fn = target_dir / input_fn.name\n if not output_fn.exists():\n input = input_fn.read_bytes()\n if input.startswith(b\"#\"):\n n_pos = input.find(b\"\\n\")\n first_line = input[:n_pos]\n if (\n first_line\n == f\"#!{self.paths['docker_storage_venv']}/bin/python\".encode(\n \"utf-8\"\n )\n ):\n output = (\n f\"#!{self.paths['docker_code_venv']}/bin/python\".encode(\n \"utf-8\"\n )\n + input[n_pos:]\n )\n output_fn.write_bytes(output)\n else:\n output_fn.write_bytes(input)\n output_fn.chmod(input_fn.stat().st_mode)\n pth_path = (\n self.paths[\"code_venv\"]\n / \"lib\"\n / (\"python\" + self.anysnake.major_python_version)\n / \"site-packages\"\n / \"anysnake.pth\"\n )\n if not pth_path.exists():\n pth_path.write_text(\n str(\n self.paths[\"docker_storage_venv\"]\n / \"lib\"\n / (\"python\" + self.anysnake.major_python_version)\n / \"site-packages\"\n )\n + \"\\n\"\n )\n\n def pprint(self):\n print(\" Local python packages\")\n for entry in self.anysnake.local_python_packages.items():\n print(f\" {entry}\")\n\n def fill_sitecustomize(self):\n lib_code = (\n Path(self.paths[\"docker_code_venv\"])\n / \"lib\"\n / (\"python\" + self.anysnake.major_python_version)\n )\n lib_storage = (\n Path(self.paths[\"docker_storage_venv\"])\n / \"lib\"\n / (\"python\" + self.anysnake.major_python_version)\n )\n if \"docker_storage_rpy2\" in self.paths:\n lib_rpy2 = (\n Path(self.paths[\"docker_storage_rpy2\"])\n / \"lib\"\n / (\"python\" + self.anysnake.major_python_version)\n )\n rpy2_venv_str = f\"'{lib_rpy2}/site-packages',\"\n else:\n rpy2_venv_str = \"\"\n sc_file = str(\n self.paths[\"code_venv\"]\n / \"lib\"\n / (\"python\" + self.anysnake.major_python_version)\n / \"site-packages\"\n / \"sitecustomize.py\"\n )\n\n tf = open(sc_file, \"w\")\n tf.write(\n f\"\"\"\nimport sys\nfor x in [\n {rpy2_venv_str}\n '{lib_storage}/site-packages',\n '{lib_code}/site-packages',\n '{lib_code}',\n ]:\n if x in sys.path:\n sys.path.remove(x)\n sys.path.insert(0, x)\n\"\"\"\n )\n tf.flush()\n\n def rebuild(self):\n self.fill_venv(rebuild=True)\n\n def fill_venv(self, rebuild=False):\n super().fill_venv(rebuild=rebuild)\n return False\n\n def freeze(self):\n \"\"\"Return a toml string with all the installed versions\"\"\"\n result = {}\n for k, v in self.find_installed_package_versions(\n self.anysnake.major_python_version\n ).items():\n result[k] = f\"{v}\"\n return {\"python\": result}\n", "id": "9845752", "language": "Python", "matching_score": 3.734790802001953, "max_stars_count": 0, "path": "src/mbf_anysnake/dockfill_python.py" }, { "content": "# -*- coding: future_fstrings -*-\n\nimport re\nfrom pathlib import Path\nimport requests\nfrom .util import combine_volumes, find_storage_path_from_other_machine\n\n\nclass DockFill_R:\n def __init__(self, anysnake):\n self.anysnake = anysnake\n self.paths = self.anysnake.paths\n self.R_version = self.anysnake.R_version\n self.cran_mirror = self.anysnake.cran_mirror\n\n self.paths.update(\n {\n \"storage_r\": find_storage_path_from_other_machine(\n self.anysnake, Path(\"R\") / self.R_version\n ),\n \"docker_storage_r\": \"/anysnake/R\",\n \"log_r\": self.paths[\"log_storage\"]\n / f\"anysnake.R.{self.R_version}.log\",\n }\n )\n self.volumes = {\n self.paths[\"docker_storage_r\"]: self.paths[\"storage_r\"]\n }\n self.shell_path = str(Path(self.paths[\"docker_storage_r\"]) / \"bin\")\n\n def pprint(self):\n print(f\" R version={self.R_version}\")\n\n def check_r_version_exists(self):\n if not re.match(r\"\\d+\\.\\d+\\.\\d\", self.R_version):\n raise ValueError(\n \"Incomplete R version specified - bust look like e.g 3.5.3\"\n )\n url = self.cran_mirror + \"src/base/R-\" + self.R_version[0]\n r = requests.get(url).text\n if not f\"R-{self.R_version}.tar.gz\" in r:\n raise ValueError(\n (f\"Unknown R version {self.R_version} - check {url} for list\")\n )\n\n def ensure(self):\n # todo: switch to cdn by default / config in file\n r_url = (\n self.anysnake.cran_mirror\n + \"src/base/R-\"\n + self.anysnake.R_version[0]\n + \"/R-\"\n + self.anysnake.R_version\n + \".tar.gz\"\n )\n return self.anysnake.build(\n target_dir=self.paths[\"storage_r\"],\n target_dir_inside_docker=self.paths[\"docker_storage_r\"],\n relative_check_filename=\"bin/R\",\n log_name=f\"log_r\",\n additional_volumes={},\n version_check=self.check_r_version_exists(),\n build_cmds=f\"\"\"\ncd ~\nwget {r_url} -O R.tar.gz\ntar xf R.tar.gz\ncd R-{self.anysnake.R_version}\n./configure --prefix={self.paths['docker_storage_r']} --enable-R-shlib --with-blas --with-lapack --with-x=no\nmake -j {self.anysnake.cores}\nmake install\n\necho \"done\"\n\"\"\",\n )\n\n\nclass DockFill_Rpy2:\n def __init__(self, anysnake, dockfill_py, dockfill_r):\n self.anysnake = anysnake\n self.paths = self.anysnake.paths\n self.python_version = self.anysnake.python_version\n self.R_version = self.anysnake.R_version\n self.rpy2_version = self.anysnake.rpy2_version\n self.dockfill_python = dockfill_py\n self.dockfill_r = dockfill_r\n\n self.paths.update(\n {\n \"storage_rpy2\": (\n find_storage_path_from_other_machine(\n self.anysnake,\n Path(\"rpy2\") / f\"py={self.python_version}_r={self.R_version}_rpy2={self.rpy2_version}\",\n )\n ),\n \"docker_storage_rpy2\": \"/anysnake/rpy2\",\n \"log_rpy2\": self.paths[\"log_storage\"]\n / f\"anysnake.rpy2.{self.python_version}-{self.R_version}.log\",\n }\n )\n self.volumes = {\n self.paths[\"docker_storage_rpy2\"]: self.paths[\"storage_rpy2\"]\n }\n self.env = {'LD_LIBRARY_PATH': \"/anysnake/R/lib/R/lib\"}\n\n def pprint(self):\n pass\n\n def ensure(self):\n # TODO: This will probably need fine tuning for combining older Rs and the\n # latest rpy2 version that supported them\n return self.anysnake.build(\n target_dir=self.paths[\"storage_rpy2\"],\n target_dir_inside_docker=self.paths[\"docker_storage_rpy2\"],\n relative_check_filename=f\"lib/python{self.anysnake.major_python_version}/site-packages/rpy2/__init__.py\",\n log_name=f\"log_rpy2\",\n additional_volumes=combine_volumes(\n ro=[self.dockfill_python.volumes, self.dockfill_r.volumes]\n ),\n build_cmds=f\"\"\"\n\nexport R_HOME={self.paths['docker_storage_r']}\nexport PATH={self.paths['docker_storage_r']}/bin:$PATH\n# use the (hopefully more robust) API mode rpy2\nexport RPY2_CFFI_MODE=API\n{self.paths['docker_storage_python']}/bin/virtualenv -p {self.paths['docker_storage_python']}/bin/python {self.paths['docker_storage_rpy2']}\nmkdir /tmp/rpy2\ncd /tmp/rpy2\n{self.paths['docker_storage_rpy2']}/bin/pip3 download rpy2=={self.rpy2_version}\n#this might not be enough later on, if rpy2 gains a version that is\n# dependend on something we don't get as a wheel\n{self.paths['docker_storage_rpy2']}/bin/pip3 install *.whl\ntar xf rpy2-*.tar.gz\nrm rpy2-*.tar.gz\nmv rpy2* rpy2\ncd rpy2\n{self.paths['docker_storage_rpy2']}/bin/pip install .\n\n{self.paths['docker_storage_rpy2']}/bin/pip install tzlocal\ntouch {self.paths['docker_storage_rpy2']}/done\nchown 1001 {self.paths['docker_storage_rpy2']} -R\necho \"done\"\n\"\"\",\n )\n", "id": "10812513", "language": "Python", "matching_score": 1.2582378387451172, "max_stars_count": 0, "path": "src/mbf_anysnake/dockfill_r.py" }, { "content": "import sys\nimport pytest\nimport os\nimport shutil\nfrom loguru import logger\n\nfrom pathlib import Path\nimport pypipegraph2 as ppg2\nimport pypipegraph2.ppg1_compatibility\n\nif \"pytest\" not in sys.modules:\n raise ValueError(\"fixtures can only be used together with pytest\")\n\n\[email protected](tryfirst=True, hookwrapper=True)\ndef pytest_runtest_makereport(item, call):\n # execute all other hooks to obtain the report object\n outcome = yield\n rep = outcome.get_result()\n\n # set a report attribute for each phase of a call, which can\n # be \"setup\", \"call\", \"teardown\"\n\n setattr(item, \"rep_\" + rep.when, rep)\n\n\[email protected]\ndef new_pipegraph(request):\n import sys\n\n if request.cls is None:\n target_path = Path(request.fspath).parent / \"run\" / (\".\" + request.node.name)\n else:\n target_path = (\n Path(request.fspath).parent\n / \"run\"\n / (request.cls.__name__ + \".\" + request.node.name)\n )\n target_path = target_path.absolute()\n old_dir = Path(os.getcwd()).absolute()\n if old_dir == target_path:\n pass\n else:\n if target_path.exists(): # pragma: no cover\n shutil.rmtree(target_path)\n\n try:\n first = [False]\n\n def np(**kwargs):\n if not first[0]:\n Path(target_path).mkdir(parents=True, exist_ok=True)\n os.chdir(target_path)\n Path(\"cache\").mkdir()\n Path(\"results\").mkdir()\n Path(\"out\").mkdir()\n\n first[0] = True\n if not \"log_level\" in kwargs:\n kwargs[\"log_level\"] = 40\n if not \"cores\" in kwargs:\n kwargs[\"cores\"] = 1\n if not \"allow_short_filenames\" in kwargs:\n kwargs[\"allow_short_filenames\"] = True\n if not \"prevent_absolute_paths\" in kwargs:\n kwargs[\"prevent_absolute_paths\"] = False\n if not \"run_mode\" in kwargs:\n kwargs[\"run_mode\"] = ppg2.RunMode.NONINTERACTIVE\n\n g = ppg2.new(\n # log_level=5,\n **kwargs,\n )\n g.new = np\n g.new_pipegraph = g.new # ppg1 test case compatibility\n g.result_dir = Path(\"results\") # ppg test case compatibility\n if ppg2.ppg1_compatibility.patched:\n g.rc = ppg2.ppg1_compatibility.FakeRC()\n return g\n\n def finalize():\n if hasattr(request.node, \"rep_setup\"):\n\n if request.node.rep_setup.passed and (\n request.node.rep_call.passed\n or request.node.rep_call.outcome == \"skipped\"\n ):\n try:\n # if not hasattr(ppg2.util.global_pipegraph, \"test_keep_output\"):\n if \"--profile\" not in sys.argv:\n shutil.rmtree(target_path)\n except OSError: # pragma: no cover\n pass\n\n request.addfinalizer(finalize)\n yield np()\n\n finally:\n os.chdir(old_dir)\n\n\[email protected]\ndef no_pipegraph(request):\n \"\"\"No pipegraph, but seperate directory per test\"\"\"\n if request.cls is None:\n target_path = Path(request.fspath).parent / \"run\" / (\".\" + request.node.name)\n else:\n target_path = (\n Path(request.fspath).parent\n / \"run\"\n / (request.cls.__name__ + \".\" + request.node.name)\n )\n if target_path.exists(): # pragma: no cover\n shutil.rmtree(target_path)\n target_path = target_path.absolute()\n target_path.mkdir()\n old_dir = Path(os.getcwd()).absolute()\n os.chdir(target_path)\n try:\n\n def np():\n ppg2.global_pipegraph = None\n return None\n\n def finalize():\n if hasattr(request.node, \"rep_setup\"):\n\n if request.node.rep_setup.passed and (\n request.node.rep_call.passed\n or request.node.rep_call.outcome == \"skipped\"\n ):\n try:\n shutil.rmtree(target_path)\n except OSError: # pragma: no cover\n pass\n\n request.addfinalizer(finalize)\n ppg2.global_pipegraph = None\n yield np()\n\n finally:\n os.chdir(old_dir)\n\n\[email protected]\ndef both_ppg_and_no_ppg(request):\n \"\"\"Create both an inside and an outside ppg test case.\n don't forgot to add this to your conftest.py\n\n Use togother with run_ppg and force_load\n\n ```\n def pytest_generate_tests(metafunc):\n if \"both_ppg_and_no_ppg\" in metafunc.fixturenames:\n metafunc.parametrize(\"both_ppg_and_no_ppg\", [True, False], indirect=True)\n ```\n \"\"\"\n\n if request.param:\n if request.cls is None:\n target_path = (\n Path(request.fspath).parent\n / \"run\"\n / (\".\" + request.node.name + str(request.param))\n )\n else:\n target_path = (\n Path(request.fspath).parent\n / \"run\"\n / (request.cls.__name__ + \".\" + request.node.name)\n )\n if target_path.exists(): # pragma: no cover\n shutil.rmtree(target_path)\n target_path = target_path.absolute()\n old_dir = Path(os.getcwd()).absolute()\n try:\n first = [False]\n\n def np(quiet=True, **kwargs):\n if not first[0]:\n Path(target_path).mkdir(parents=True, exist_ok=True)\n os.chdir(target_path)\n Path(\"cache\").mkdir()\n Path(\"results\").mkdir()\n Path(\"out\").mkdir()\n\n first[0] = True\n if not \"log_level\" in kwargs:\n kwargs[\"log_level\"] = 40\n if not \"cores\" in kwargs:\n kwargs[\"cores\"] = 1\n if not \"allow_short_filenames\" in kwargs:\n kwargs[\"allow_short_filenames\"] = True\n if not \"prevent_absolute_paths\" in kwargs:\n kwargs[\"prevent_absolute_paths\"] = False\n if not \"run_mode\" in kwargs:\n kwargs[\"run_mode\"] = ppg2.RunMode.NONINTERACTIVE\n\n g = ppg2.new(**kwargs)\n g.new = np\n g.new_pipegraph = np # ppg1 test case compatibility\n g.result_dir = Path(\"results\") # ppg test case compatibility\n return g\n\n def finalize():\n if hasattr(request.node, \"rep_setup\"):\n\n if request.node.rep_setup.passed and (\n hasattr(request.node, \"rep_call\")\n and (\n request.node.rep_call.passed\n or request.node.rep_call.outcome == \"skipped\"\n )\n ):\n try:\n shutil.rmtree(target_path)\n except OSError: # pragma: no cover\n pass\n\n request.addfinalizer(finalize)\n yield np()\n\n finally:\n os.chdir(old_dir)\n else:\n if request.cls is None:\n target_path = (\n Path(request.fspath).parent\n / \"run\"\n / (\".\" + request.node.name + str(request.param))\n )\n else:\n target_path = (\n Path(request.fspath).parent\n / \"run\"\n / (request.cls.__name__ + \".\" + request.node.name)\n )\n if target_path.exists(): # pragma: no cover\n shutil.rmtree(target_path)\n target_path = target_path.absolute()\n target_path.mkdir()\n old_dir = Path(os.getcwd()).absolute()\n os.chdir(target_path)\n try:\n\n def np():\n ppg2.global_pipegraph = None\n\n class Dummy:\n pass\n\n d = Dummy\n d.new = lambda: None\n d.new_pipegraph = lambda: None # ppg test case compatibility\n d.result_dir = Path(\"results\") # ppg test case compatibility\n ppg2.change_global_pipegraph(None)\n return d\n\n def finalize():\n if hasattr(request.node, \"rep_setup\"):\n\n if request.node.rep_setup.passed and (\n request.node.rep_call.passed\n or request.node.rep_call.outcome == \"skipped\"\n ):\n try:\n shutil.rmtree(target_path)\n except OSError: # pragma: no cover\n pass\n\n request.addfinalizer(finalize)\n ppg2.change_global_pipegraph(None)\n print('gloabl', ppg2.global_pipegraph)\n yield np()\n\n finally:\n os.chdir(old_dir)\n\n\[email protected]\ndef job_trace_log():\n def fmt(record):\n lvl = str(record[\"level\"].name).ljust(8)\n m = record[\"module\"] + \":\"\n func = f\"{m:12}{record['line']:4}\"\n func = func.ljust(12 + 4)\n out = f\"{record['level'].icon} {lvl} | {record['elapsed']} | {func} | {record['message']}\\n\"\n if record[\"level\"].name == \"ERROR\":\n out = f\"<blue>{out}</blue>\"\n return out\n\n # logger.remove()\n # handler_id = logger.add(sys.stderr, format=fmt, level=6)\n ppg2.util.do_jobtrace_log = True\n yield\n # logger.remove(handler_id)\n", "id": "3988308", "language": "Python", "matching_score": 4.740372180938721, "max_stars_count": 0, "path": "src/pypipegraph2/testing/fixtures.py" }, { "content": "# -*-tcoding: utf-8 -*-\n\"\"\"\n Dummy conftest.py for pypipegraph2.\n\n If you don't know what this is for, just leave it empty.\n Read more about conftest.py under:\n https://pytest.org/latest/plugins.html\n\"\"\"\n\n# import pytest\nimport pytest\nfrom pathlib import Path\nimport shutil\nimport os\nimport pypipegraph2 as ppg2\nimport pypipegraph2.testing.fixtures\nimport sys\nimport plotnine # so it's available in the plot tests - saves about 10% of runtime\nfrom loguru import logger\n\n# support code to remove test created files\n# only if the test succeedd\n# ppg2.util._running_inside_test = True\nif \"pytest\" not in sys.modules:\n raise ValueError(\"fixtures can only be used together with pytest\")\n\n\[email protected](tryfirst=True, hookwrapper=True)\ndef pytest_runtest_makereport(item, call):\n # execute all other hooks to obtain the report object\n outcome = yield\n rep = outcome.get_result()\n\n # set a report attribute for each phase of a call, which can\n # be \"setup\", \"call\", \"teardown\"\n\n setattr(item, \"rep_\" + rep.when, rep)\n\n\nppg2_per_test = ppg2.testing.fixtures.new_pipegraph\n\n\[email protected]\ndef dir_per_test(request):\n \"\"\"No pipegraph, but seperate directory per test\"\"\"\n if request.cls is None:\n target_path = Path(request.fspath).parent / \"run\" / (\".\" + request.node.name)\n else:\n target_path = (\n Path(request.fspath).parent\n / \"run\"\n / (request.cls.__name__ + \".\" + request.node.name)\n )\n if target_path.exists(): # pragma: no cover\n shutil.rmtree(target_path)\n target_path = target_path.absolute()\n target_path.mkdir()\n old_dir = Path(os.getcwd()).absolute()\n os.chdir(target_path)\n try:\n\n def np():\n ppg2.util.global_pipegraph = None\n return None\n\n def finalize():\n if hasattr(request.node, \"rep_setup\"):\n\n if request.node.rep_setup.passed and (\n request.node.rep_call.passed\n or request.node.rep_call.outcome == \"skipped\"\n ):\n try:\n shutil.rmtree(target_path)\n except OSError: # pragma: no cover\n pass\n\n request.addfinalizer(finalize)\n ppg2.util.global_pipegraph = None\n yield np()\n\n finally:\n os.chdir(old_dir)\n\n\[email protected]\ndef create_out_dir(request):\n Path(\"out\").mkdir(exist_ok=True)\n yield\n\n\nfrom pypipegraph2.testing.fixtures import job_trace_log\n\n\ntrace_log = job_trace_log\n\n\[email protected]\ndef ppg1_compatibility_test(request):\n import sys\n import pypipegraph as ppg\n\n ppg2.replace_ppg1()\n\n if request.cls is None:\n target_path = Path(request.fspath).parent / \"run\" / (\".\" + request.node.name)\n else:\n target_path = (\n Path(request.fspath).parent\n / \"run\"\n / (request.cls.__name__ + \".\" + request.node.name)\n )\n target_path = target_path.absolute()\n old_dir = Path(os.getcwd()).absolute()\n if old_dir == target_path:\n pass\n else:\n if target_path.exists(): # pragma: no cover\n shutil.rmtree(target_path)\n\n try:\n first = [False]\n\n def np(quiet=True, **kwargs):\n if not first[0]:\n Path(target_path).mkdir(parents=True, exist_ok=True)\n os.chdir(target_path)\n Path(\"logs\").mkdir()\n Path(\"cache\").mkdir()\n Path(\"results\").mkdir()\n Path(\"out\").mkdir()\n import logging\n\n h = logging.getLogger(\"pypipegraph\")\n h.setLevel(logging.WARNING)\n first[0] = True\n\n if not \"resource_coordinator\" in kwargs:\n kwargs[\"resource_coordinator\"] = ppg.resource_coordinators.LocalSystem(\n 1, interactive=False\n )\n if not \"dump_graph\" in kwargs:\n kwargs[\"dump_graph\"] = False\n if not \"quiet\" in kwargs:\n kwargs[\"quiet\"] = quiet\n ppg.new_pipegraph(**kwargs)\n ppg.util.global_pipegraph.result_dir = Path(\"results\")\n g = ppg.util.global_pipegraph\n g.new_pipegraph = np\n return g\n\n def finalize():\n if hasattr(request.node, \"rep_setup\"):\n\n if request.node.rep_setup.passed and (\n request.node.rep_call.passed\n or request.node.rep_call.outcome == \"skipped\"\n ):\n try:\n if not hasattr(ppg.util.global_pipegraph, \"test_keep_output\"):\n if \"--profile\" not in sys.argv:\n shutil.rmtree(target_path)\n except OSError: # pragma: no cover\n pass\n\n request.addfinalizer(finalize)\n yield np()\n\n finally:\n os.chdir(old_dir)\n ppg2.unreplace_ppg1()\n", "id": "11845107", "language": "Python", "matching_score": 2.9107308387756348, "max_stars_count": 0, "path": "tests/conftest.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n Read more about conftest.py under:\n https://pytest.org/latest/plugins.html\n\"\"\"\n\nimport sys\nimport pytest # noqa:F401\nfrom pathlib import Path\nimport pypipegraph2 # noqa:F401\n\n# pypipegraph2.replace_ppg1()\n\nfrom pypipegraph.testing.fixtures import ( # noqa:F401\n new_pipegraph,\n both_ppg_and_no_ppg,\n no_pipegraph,\n pytest_runtest_makereport,\n) # noqa:F401\nfrom pypipegraph2.testing.fixtures import job_trace_log # noqa:F401\nfrom mbf_qualitycontrol.testing.fixtures import ( # noqa:F401\n new_pipegraph_no_qc,\n both_ppg_and_no_ppg_no_qc,\n)\nfrom mbf_genomics.testing.fixtures import clear_annotators # noqa:F401\n\n# from mbf_externals.testing.fixtures import local_store, global_store # noqa:F401\nroot = Path(__file__).parent.parent\nsys.path.append(str(root / \"src\"))\n\nfrom plotnine.tests.conftest import ( # noqa:F401\n _setup,\n _teardown, # noqa:F401\n pytest_assertrepr_compare,\n)\n\n_setup()\n\n\ndef pytest_generate_tests(metafunc):\n if \"both_ppg_and_no_ppg\" in metafunc.fixturenames:\n metafunc.parametrize(\"both_ppg_and_no_ppg\", [True, False], indirect=True)\n", "id": "5662339", "language": "Python", "matching_score": 3.768977642059326, "max_stars_count": 0, "path": "tests/conftest.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n Dummy conftest.py for mbf_align.\n\n Read more about conftest.py under:\n https://pytest.org/latest/plugins.html\n\"\"\"\n\n# import pytest\nimport sys\nimport subprocess\nimport pathlib\nfrom pypipegraph.testing.fixtures import ( # noqa:F401\n new_pipegraph,\n pytest_runtest_makereport,\n)\nfrom mbf_externals.testing.fixtures import local_store # noqa:F401\nfrom mbf_qualitycontrol.testing.fixtures import new_pipegraph_no_qc # noqa:F401\n\nroot = pathlib.Path(__file__).parent.parent\nlocal_store_path = root / \"tests\" / \"run\" / \"local_store\"\nlocal_store_path.mkdir(exist_ok=True, parents=True)\nlocal_store = local_store(local_store_path)\nprint(\"root\", root)\nsys.path.append(str(root / \"src\"))\nprint(\"the path is\", sys.path)\nsubprocess.check_call([\"python3\", \"setup.py\", \"build_ext\", \"-i\"], cwd=root)\n", "id": "4417952", "language": "Python", "matching_score": 3.785463571548462, "max_stars_count": 0, "path": "tests/conftest.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n Read more about conftest.py under:\n https://pytest.org/latest/plugins.html\n\"\"\"\n\nimport sys\n\n# import pytest\nfrom pathlib import Path\nfrom pypipegraph.testing.fixtures import ( # noqa: F401\n new_pipegraph, # noqa: F401\n pytest_runtest_makereport, # noqa: F401\n) # noqa: F401\nfrom mbf_externals.testing.fixtures import local_store # noqa:F401\n\nroot = Path(__file__).parent.parent\nsys.path.append(str(root / \"src\"))\n\nlocal_store_path = root / \"tests\" / \"run\" / \"local_store\"\nlocal_store_path.mkdir(exist_ok=True, parents=True)\nlocal_store = local_store(local_store_path)\n\nfrom mbf_genomes.testing.fixtures import mock_download, shared_prebuild # noqa: F401\n", "id": "3162891", "language": "Python", "matching_score": 1.6301814317703247, "max_stars_count": 0, "path": "tests/conftest.py" }, { "content": "import pytest\nfrom pathlib import Path\n\n\ndef _mock_get_page(url):\n import hashlib\n import mbf_externals\n\n p = (\n Path(__file__).parent\n / \".testing_download_cache\"\n / hashlib.md5(url.encode(\"utf-8\")).hexdigest()\n )\n p.parent.mkdir(exist_ok=True)\n if not p.exists():\n p.write_text(mbf_externals.util.get_page(url))\n return p.read_text()\n\n\ndef _mock_download_file_and_gunzip(url, filename):\n import shutil\n import hashlib\n import mbf_externals\n\n p = (\n Path(__file__).parent\n / \".testing_download_cache\"\n / hashlib.md5(url.encode(\"utf-8\")).hexdigest()\n )\n p.parent.mkdir(exist_ok=True)\n if not p.exists():\n mbf_externals.util.download_file_and_gunzip(url, p)\n return shutil.copyfile(p, filename)\n\n\[email protected]\ndef mock_download():\n import mbf_genomes\n\n org_get_page = mbf_genomes.ensembl.get_page\n org_download_file_and_gunzip = mbf_genomes.ensembl.download_file_and_gunzip\n mbf_genomes.ensembl.get_page = _mock_get_page\n mbf_genomes.ensembl.download_file_and_gunzip = _mock_download_file_and_gunzip\n yield\n mbf_genomes.ensembl.get_page = org_get_page\n mbf_genomes.ensembl.download_file_and_gunzip = org_download_file_and_gunzip\n\n\nfirst_shared_prebuild = True\n\n\[email protected]()\ndef shared_prebuild():\n global first_shared_prebuild\n p = Path(\"../prebuild\")\n if first_shared_prebuild:\n if p.exists():\n import shutil\n\n shutil.rmtree(p)\n p.mkdir()\n first_shared_prebuild = False\n from mbf_externals import PrebuildManager\n\n return PrebuildManager(p)\n\n\nall = [shared_prebuild, mock_download]\n", "id": "2956293", "language": "Python", "matching_score": 1.902274250984192, "max_stars_count": 0, "path": "src/mbf_genomes/testing/fixtures.py" }, { "content": "import pytest\nfrom mbf_genomes import EnsemblGenome\nfrom mbf_externals import PrebuildManager\nfrom mbf_externals.aligners.subread import Subread\nimport pypipegraph as ppg\nfrom pypipegraph.util import checksum_file\nfrom unittest.mock import patch\n\n\[email protected](\"new_pipegraph\")\nclass TestEnsembl:\n def test_download(self, new_pipegraph, mock_download, shared_prebuild):\n species = \"Ashbya_gossypii\" # the smallest eukaryotic species at the time of writing this at 2.8 mb\n g = EnsemblGenome(species, \"41\", prebuild_manager=shared_prebuild)\n\n def shorten_genome_fasta(output_path):\n with open(g.find_file(\"genome.fasta\")) as op:\n head = op.read(1024 * 100)\n (output_path / \"test.fasta\").write_text(head)\n\n test_fasta_job = g.prebuild_manager.prebuild(\n f\"ensembl/{g.species}_{g.revision}/test_fasta\",\n \"1\",\n [],\n [\"test.fasta\"],\n shorten_genome_fasta,\n )\n test_fasta_job.depends_on(g.download_genome())\n g._prebuilds.append(test_fasta_job)\n\n subread = Subread(version=\"1.6.3\")\n index = g.build_index(subread, \"test.fasta\")\n subread_old = Subread(version=\"1.4.3-p1\")\n index_old = g.build_index(subread_old, \"test.fasta\")\n\n new_pipegraph.run()\n # note that these are not the checksums from CHECKSUMS files (those are fore\n # the gziped variants, we keep them ungziped and let the filesystem handle\n # the gzip, since we can't rely on the downstream reading gzip...\n assert (\n checksum_file(g.find_file(\"genome.fasta\"))\n == \"584a734589964a654c7c1dc23b0167ab\"\n )\n assert (\n checksum_file(g.find_file(\"cdna.fasta\"))\n == \"3fc1f19ab829573169cb2488abe39211\"\n )\n assert (\n checksum_file(g.find_file(\"genes.gtf\"))\n == \"8bdeec9b3db5278668dbff8b34e9d93b\"\n )\n assert (\n checksum_file(g.find_file(\"genes.gtf\"))\n == \"8bdeec9b3db5278668dbff8b34e9d93b\"\n )\n assert (\n checksum_file(g.find_file(\"pep.fasta\"))\n == \"9580fd44832d419c38469d657f6e2484\"\n )\n with pytest.raises(OSError):\n g.find_file(\"no such file\")\n assert index.name_file(\"subread_index.reads\").exists()\n assert index.name_file(\"subread_index.files\").exists()\n assert index.name_file(\"subread_index.00.b.array\").exists()\n assert index_old.name_file(\"subread_index.reads\").exists()\n assert index_old.name_file(\"subread_index.files\").exists()\n assert index_old.name_file(\"subread_index.00.b.array\").exists()\n assert index.name_file(\"subread_index.reads\") != index_old.name_file(\n \"subread_index.reads\"\n )\n assert g.find_file(\"test.fasta.md5sum\").exists()\n with pytest.raises(OSError):\n assert g.find_file(\"test.fasta.md5sum.nosuchfile\").exists()\n assert g.find_prebuild(\"test.fasta\") is test_fasta_job\n with pytest.raises(OSError):\n assert g.find_prebuild(\"test.fasta.md5sum.nosuchfile\").exists()\n assert g.find_file(\"genome.fasta.fai\").exists()\n assert g.find_file(\"cdna.fasta.fai\").exists()\n\n new_pipegraph.new_pipegraph()\n pb = PrebuildManager(shared_prebuild.prebuilt_path)\n g = EnsemblGenome(species, \"41\", prebuild_manager=pb)\n test_fasta_job = g.prebuild_manager.prebuild(\n f\"ensembl/{g.species}_{g.revision}/test_fasta\",\n \"1\",\n [],\n [\"test.fasta\"],\n shorten_genome_fasta,\n )\n g._prebuilds.append(test_fasta_job)\n\n subread_intermediate = Subread(version=\"1.5.0\")\n index_intermediate = g.build_index(subread_intermediate, \"test.fasta\")\n assert index_intermediate.name_file(\n \"subread_index.reads\"\n ) == index_old.name_file(\"subread_index.reads\")\n index_genome = g.build_index(subread_intermediate)\n assert \"/genome/\" in str(index_genome.filenames[0])\n\n assert g.get_chromosome_lengths() == {\n \"IV\": 1_467_287,\n \"MT\": 23564,\n \"V\": 1_519_140,\n \"III\": 907_494,\n \"II\": 870_771,\n \"VII\": 1_800_949,\n \"I\": 693_414,\n \"VI\": 1_836_693,\n }\n\n assert g.get_genome_sequence(\"VI\", 20, 30) == \"ACCGCTGAGA\"\n assert (\n g.get_cdna_sequence(\"EFAGOT00000000349\")\n == \"GCTCGCGTGGCGTAATGGCAACGCGTCTGACTTCTAATCAGAAGATTGTGGGTTCGACCC\"\n \"CCACCGTGAGTG\"\n )\n assert (\n g.get_protein_sequence(\"AAS53315\")\n == \"MFSTRICSLLARPFMVPIVPRFGSALLQKPLNGVVVPQFTRGFKVRTSVKKFCAHCYIVR\"\n \"RKGRVYVYCKSNNKHKQRQG\"\n )\n assert (\n g.genetic_code.translate_dna(g.get_cds_sequence(\"AAS53315\"))\n == \"MFSTRICSLLARPFMVPIVPRFGSALLQKPLNGVVVPQFTRGFKVRTSVKKFCAHCYIVR\"\n \"RKGRVYVYCKSNNKHKQRQG\"\n )\n\n assert (\n g.genetic_code.translate_dna(\n g.get_cds_sequence(\"AAS53315\", g.df_proteins.loc[\"AAS53315\"])\n )\n == \"MFSTRICSLLARPFMVPIVPRFGSALLQKPLNGVVVPQFTRGFKVRTSVKKFCAHCYIVR\"\n \"RKGRVYVYCKSNNKHKQRQG\"\n )\n with pytest.raises(ValueError):\n g.get_cds_sequence(\"AAS53315\", g.df_proteins.loc[\"AAS53316\"])\n\n assert (\n g.df_genes_meta.loc[\"AGOS_ADL186C\"][\"description\"]\n == \"Restriction of telomere capping protein 1 [Source:UniProtKB/Swiss-Prot;Acc:Q75AV6]\"\n )\n\n def test_download_jobs_called_init(\n self, new_pipegraph, mock_download, shared_prebuild\n ):\n species = \"Ashbya_gossypii\" # the smallest eukaryotic species at the time of writing this at 2.8 mb\n g = EnsemblGenome(species, \"41\", prebuild_manager=shared_prebuild)\n g.find_prebuild(\"genome.fasta\") # this is the actual test.\n\n def test_species_formating(self, shared_prebuild):\n species = \"ashbya_gossypii\" # the smallest eukaryotic species at the time of writing this at 2.8 mb\n with pytest.raises(ValueError):\n EnsemblGenome(species, \"41\", prebuild_manager=shared_prebuild)\n\n def test_unknown_species_raises(self, mock_download, shared_prebuild):\n species = \"Unknown_unknown\" # the smallest eukaryotic species at the time of writing this at 2.8 mb\n EnsemblGenome(species, \"41\", prebuild_manager=shared_prebuild).download_genome()\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n\n def test_all_genes(self, mock_download, shared_prebuild):\n g = EnsemblGenome(\"Ustilago_maydis\", 33, shared_prebuild)\n ppg.run_pipegraph()\n\n df = g.df_genes\n df2 = g.df_genes # caching\n assert len(df) == 6910 + 4 # from the a2 locus\n assert df is df2\n assert df.loc[\"UMAG_12015\"].strand == 1\n assert df.loc[\"UMAG_12015\"].tss == 4370\n assert df.loc[\"UMAG_12015\"].tes == 5366\n assert df.loc[\"UMAG_12015\"].transcript_stable_ids == (\"KIS66832\",)\n\n assert df.loc[\"UMAG_00663\"].strand == -1\n assert df.loc[\"UMAG_00663\"].tss == 1_947_590\n assert df.loc[\"UMAG_00663\"].tes == 1_945_040\n assert df.loc[\"UMAG_00663\"].transcript_stable_ids == (\"KIS72250\",)\n\n assert df.loc[\"UMAG_03168\"].transcript_stable_ids == (\"KIS68597\", \"KIS68596\")\n assert df[\"chr\"].dtype.name == \"category\"\n assert df[\"biotype\"].dtype.name == \"category\"\n\n def test_all_transcripts(self, mock_download, shared_prebuild):\n g = EnsemblGenome(\"Ustilago_maydis\", 33, shared_prebuild)\n ppg.run_pipegraph()\n\n df = g.df_transcripts\n assert \"gene_stable_id\" in df.columns\n assert len(df) == 6928 + 4 # from the a2 locus\n assert df[\"chr\"].dtype.name == \"category\"\n assert df[\"biotype\"].dtype.name == \"category\"\n assert df.loc[\"KIS71021\"].chr == \"2\"\n assert df.loc[\"KIS71021\"].strand == 1\n assert df.loc[\"KIS71021\"].start == 354_742\n assert df.loc[\"KIS71021\"].stop == 356_690\n assert df.loc[\"KIS71021\"].gene_stable_id == \"UMAG_12118\"\n assert df.loc[\"KIS71021\"].biotype == \"protein_coding\"\n assert df.loc[\"KIS71021\"].exons == ((354_742, 354_936), (355_222, 356_690))\n assert df.loc[\"KIS71021\"].exon_stable_ids == (\"KIS71021-1\", \"KIS71021-2\")\n\n def test_df_exons(self, mock_download, shared_prebuild):\n g = EnsemblGenome(\"Ustilago_maydis\", 33, shared_prebuild)\n ppg.run_pipegraph()\n ppg.util.global_pipegraph.test_keep_output = True\n ppg.util.global_pipegraph.dump_runtimes(\"logs/runtimes.txt\")\n exon_count = sum([len(x) for x in g.df_transcripts[\"exons\"]])\n df_exons = g.df_exons\n assert len(df_exons) > 0\n assert len(g.df_exons) == exon_count\n assert hasattr(type(g).df_exons, \"__call__\")\n\n def test_all_proteins(self, mock_download, shared_prebuild):\n g = EnsemblGenome(\"Ustilago_maydis\", 33, shared_prebuild)\n ppg.run_pipegraph()\n\n df = g.df_proteins\n assert df.strand.isin([1, -1]).all()\n\n def test_transcript_ids(self, mock_download, shared_prebuild):\n # test that ustilago has at least one gene with multilpe transcripts\n g = EnsemblGenome(\"Ustilago_maydis\", 33, shared_prebuild)\n ppg.run_pipegraph()\n\n df = g.df_genes\n assert (df.transcript_stable_ids.apply(lambda x: len(x)) > 1).any()\n\n def test_multiple_exon_transcripts(self, mock_download, shared_prebuild):\n # test that ustilago has at least one transcript with multiple exons\n g = EnsemblGenome(\"Ustilago_maydis\", 33, shared_prebuild)\n ppg.run_pipegraph()\n\n df = g.df_transcripts\n print(df.exons)\n assert (df.exons.apply(lambda x: len(x)) > 1).any()\n\n def test_get_additional_gene_gtfs(self, mock_download, shared_prebuild):\n g = EnsemblGenome(\"Ashbya_gossypii\", 33, shared_prebuild)\n assert len(g.get_additional_gene_gtfs()) == 0\n\n g = EnsemblGenome(\"Homo_sapiens\", 74, shared_prebuild)\n assert \"ribosomal_genes_grch37\" in g.get_additional_gene_gtfs()[0].name\n assert g.get_additional_gene_gtfs()[0].exists()\n g = EnsemblGenome(\"Homo_sapiens\", 75, shared_prebuild)\n assert \"ribosomal_genes_grch38\" in g.get_additional_gene_gtfs()[0].name\n assert g.get_additional_gene_gtfs()[0].exists()\n g = EnsemblGenome(\"Mus_musculus\", 68, shared_prebuild)\n assert \"ribosomal_genes_mm10\" in g.get_additional_gene_gtfs()[0].name\n assert g.get_additional_gene_gtfs()[0].exists()\n g = EnsemblGenome(\"Mus_musculus\", 67, shared_prebuild)\n assert len(g.get_additional_gene_gtfs()) == 0\n\n def test_get_additional_gene_gtfs_land_in_df_genes(\n self, mock_download, shared_prebuild\n ):\n g = EnsemblGenome(\"Ustilago_maydis\", 33, shared_prebuild)\n assert len(g.get_additional_gene_gtfs()) == 1\n ppg.run_pipegraph()\n\n print(g.df_genes.chr.unique())\n assert \"A2_pra2\" in g.df_genes.index\n assert \"A2_pra2.1\" in g.df_transcripts.index\n\n def test_genes_iterator(self, mock_download, shared_prebuild):\n g = EnsemblGenome(\"Ashbya_gossypii\", 41, shared_prebuild)\n ppg.run_pipegraph()\n genes = list(g.genes.values())\n assert len(genes) == len(g.df_genes)\n assert set([x.gene_stable_id for x in genes]) == set(g.df_genes.index)\n\n def test_transcript_iterator(self, mock_download, shared_prebuild):\n g = EnsemblGenome(\"Ashbya_gossypii\", 41, shared_prebuild)\n ppg.run_pipegraph()\n transcripts = list(g.transcripts.values())\n assert len(transcripts) == len(g.df_transcripts)\n assert set([x.transcript_stable_id for x in transcripts]) == set(\n g.df_transcripts.index\n )\n\n def test_outside_of_ppg_after_download(self, mock_download, shared_prebuild):\n species = \"Ashbya_gossypii\" # the smallest eukaryotic species at the time of writing this at 2.8 mb\n g = EnsemblGenome(species, \"41\", prebuild_manager=shared_prebuild)\n ppg.run_pipegraph()\n len_genes = len(g.df_genes)\n len_transcripts = len(g.df_transcripts)\n len_proteins = len(g.df_proteins)\n assert len_genes > 0\n assert len_transcripts > 0\n assert len_proteins > 0\n ppg.util.global_pipegraph = None\n g = EnsemblGenome(species, \"41\", prebuild_manager=shared_prebuild)\n assert len_genes == len(g.df_genes)\n assert len_transcripts == len(g.df_transcripts)\n assert len_proteins == len(g.df_proteins)\n\n def test_get_true_chromosomes(self, mock_download, shared_prebuild):\n # we need something with contigs and chromosomes\n g = EnsemblGenome(\"Ustilago_maydis\", 33, shared_prebuild)\n ppg.run_pipegraph()\n should = [\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n \"7\",\n \"8\",\n \"9\",\n \"10\",\n \"11\",\n \"12\",\n \"13\",\n \"14\",\n \"15\",\n \"16\",\n \"17\",\n \"18\",\n \"19\",\n \"20\",\n \"21\",\n \"22\",\n \"23\",\n \"U37796.1\", # it is a true chromosome if there's chromosome: in the fasta description\n ]\n all_contigs = should + [\n \"um_scaf_contig_1.256\",\n \"um_scaf_contig_1.265\",\n \"um_scaf_contig_1.271\",\n \"um_scaf_contig_1.264\",\n ]\n assert set(g.get_true_chromosomes()) == set(should)\n assert set(g.get_chromosome_lengths()) == set(should + all_contigs)\n\n def test_get_true_chromosomes_genome_without_chromosomes(\n self, mock_download, shared_prebuild\n ):\n # we need something with contigs and chromosomes\n g = EnsemblGenome(\"Giardia_lamblia\", 43, shared_prebuild)\n ppg.run_pipegraph()\n should = [\n \"ctg02_1\",\n \"ctg02_2\",\n \"ctg02_3\",\n \"ctg02_4\",\n \"ctg02_5\",\n \"ctg02_6\",\n \"ctg02_7\",\n \"ctg02_8\",\n \"ctg02_9\",\n \"ctg02_10\",\n \"ctg02_11\",\n \"ctg02_12\",\n \"ctg02_13\",\n \"ctg02_14\",\n \"ctg02_15\",\n \"ctg02_16\",\n \"ctg02_17\",\n \"ctg02_18\",\n \"ctg02_19\",\n \"ctg02_20\",\n \"ctg02_21\",\n \"ctg02_22\",\n \"ctg02_23\",\n \"ctg02_24\",\n \"ctg02_25\",\n \"ctg02_26\",\n \"ctg02_27\",\n \"ctg02_28\",\n \"ctg02_29\",\n \"ctg02_30\",\n \"ctg02_31\",\n \"ctg02_32\",\n \"ctg02_33\",\n \"ctg02_34\",\n \"ctg02_35\",\n \"ctg02_36\",\n \"ctg02_37\",\n \"ctg02_38\",\n \"ctg02_39\",\n \"ctg02_40\",\n \"ctg02_41\",\n \"ctg02_42\",\n \"ctg02_43\",\n \"ctg02_44\",\n \"ctg02_45\",\n \"ctg02_46\",\n \"ctg02_47\",\n \"ctg02_48\",\n \"ctg02_49\",\n \"ctg02_50\",\n \"ctg02_51\",\n \"ctg02_52\",\n \"ctg02_53\",\n \"ctg02_54\",\n \"ctg02_55\",\n \"ctg02_56\",\n \"ctg02_57\",\n \"ctg02_58\",\n \"ctg02_59\",\n \"ctg02_60\",\n \"ctg02_61\",\n \"ctg02_62\",\n \"ctg02_63\",\n \"ctg02_64\",\n \"ctg02_65\",\n \"ctg02_66\",\n \"ctg02_67\",\n \"ctg02_68\",\n \"ctg02_69\",\n \"ctg02_70\",\n \"ctg02_71\",\n \"ctg02_72\",\n \"ctg02_73\",\n \"ctg02_74\",\n \"ctg02_75\",\n \"ctg02_76\",\n \"ctg02_77\",\n \"ctg02_78\",\n \"ctg02_79\",\n \"ctg02_80\",\n \"ctg02_81\",\n \"ctg02_82\",\n \"ctg02_83\",\n \"ctg02_84\",\n \"ctg02_85\",\n \"ctg02_86\",\n \"ctg02_87\",\n \"ctg02_88\",\n \"ctg02_89\",\n \"ctg02_90\",\n \"ctg02_91\",\n \"ctg02_92\",\n \"ctg02_93\",\n \"ctg02_94\",\n \"ctg02_95\",\n \"ctg02_96\",\n \"ctg02_97\",\n \"ctg02_99\",\n \"ctg02_98\",\n \"ctg02_100\",\n \"ctg02_101\",\n \"ctg02_102\",\n \"ctg02_103\",\n \"ctg02_104\",\n \"ctg02_105\",\n \"ctg02_106\",\n \"ctg02_107\",\n \"ctg02_108\",\n \"ctg02_109\",\n \"ctg02_110\",\n \"ctg02_111\",\n \"ctg02_112\",\n \"ctg02_113\",\n \"ctg02_114\",\n \"ctg02_115\",\n \"ctg02_116\",\n \"ctg02_117\",\n \"ctg02_118\",\n \"ctg02_120\",\n \"ctg02_119\",\n \"ctg02_121\",\n \"ctg02_122\",\n \"ctg02_123\",\n \"ctg02_124\",\n \"ctg02_125\",\n \"ctg02_127\",\n \"ctg02_126\",\n \"ctg02_129\",\n \"ctg02_128\",\n \"ctg02_130\",\n \"ctg02_131\",\n \"ctg02_132\",\n \"ctg02_133\",\n \"ctg02_134\",\n \"ctg02_135\",\n \"ctg02_136\",\n \"ctg02_137\",\n \"ctg02_138\",\n \"ctg02_139\",\n \"ctg02_140\",\n \"ctg02_141\",\n \"ctg02_142\",\n \"ctg02_144\",\n \"ctg02_143\",\n \"ctg02_145\",\n \"ctg02_146\",\n \"ctg02_147\",\n \"ctg02_148\",\n \"ctg02_149\",\n \"ctg02_151\",\n \"ctg02_150\",\n \"ctg02_152\",\n \"ctg02_153\",\n \"ctg02_155\",\n \"ctg02_154\",\n \"ctg02_157\",\n \"ctg02_156\",\n \"ctg02_158\",\n \"ctg02_159\",\n \"ctg02_160\",\n \"ctg02_161\",\n \"ctg02_162\",\n \"ctg02_163\",\n \"ctg02_164\",\n \"ctg02_165\",\n \"ctg02_167\",\n \"ctg02_168\",\n \"ctg02_166\",\n \"ctg02_169\",\n \"ctg02_170\",\n \"ctg02_171\",\n \"ctg02_172\",\n \"ctg02_174\",\n \"ctg02_176\",\n \"ctg02_175\",\n \"ctg02_178\",\n \"ctg02_177\",\n \"ctg02_179\",\n \"ctg02_180\",\n \"ctg02_181\",\n \"ctg02_183\",\n \"ctg02_182\",\n \"ctg02_184\",\n \"ctg02_185\",\n \"ctg02_186\",\n \"ctg02_187\",\n \"ctg02_188\",\n \"ctg02_189\",\n \"ctg02_190\",\n \"ctg02_191\",\n \"ctg02_193\",\n \"ctg02_192\",\n \"ctg02_194\",\n \"ctg02_196\",\n \"ctg02_195\",\n \"ctg02_197\",\n \"ctg02_198\",\n \"ctg02_200\",\n \"ctg02_199\",\n \"ctg02_201\",\n \"ctg02_202\",\n \"ctg02_203\",\n \"ctg02_204\",\n \"ctg02_206\",\n \"ctg02_208\",\n \"ctg02_207\",\n \"ctg02_205\",\n \"ctg02_209\",\n \"ctg02_210\",\n \"ctg02_211\",\n \"ctg02_212\",\n \"ctg02_214\",\n \"ctg02_213\",\n \"ctg02_215\",\n \"ctg02_216\",\n \"ctg02_220\",\n \"ctg02_218\",\n \"ctg02_221\",\n \"ctg02_217\",\n \"ctg02_219\",\n \"ctg02_223\",\n \"ctg02_222\",\n \"ctg02_224\",\n \"ctg02_225\",\n \"ctg02_226\",\n \"ctg02_227\",\n \"ctg02_228\",\n \"ctg02_229\",\n \"ctg02_231\",\n \"ctg02_230\",\n \"ctg02_232\",\n \"ctg02_234\",\n \"ctg02_235\",\n \"ctg02_233\",\n \"ctg02_237\",\n \"ctg02_238\",\n \"ctg02_236\",\n \"ctg02_239\",\n \"ctg02_241\",\n \"ctg02_240\",\n \"ctg02_242\",\n \"ctg02_245\",\n \"ctg02_243\",\n \"ctg02_246\",\n \"ctg02_244\",\n \"ctg02_247\",\n \"ctg02_249\",\n \"ctg02_250\",\n \"ctg02_248\",\n \"ctg02_252\",\n \"ctg02_251\",\n \"ctg02_253\",\n \"ctg02_254\",\n \"ctg02_255\",\n \"ctg02_256\",\n \"ctg02_257\",\n \"ctg02_258\",\n \"ctg02_259\",\n \"ctg02_260\",\n \"ctg02_173\",\n \"ctg02_261\",\n \"ctg02_262\",\n \"ctg02_263\",\n \"ctg02_266\",\n \"ctg02_265\",\n \"ctg02_268\",\n \"ctg02_264\",\n \"ctg02_267\",\n \"ctg02_269\",\n \"ctg02_271\",\n \"ctg02_270\",\n \"ctg02_273\",\n \"ctg02_272\",\n \"ctg02_274\",\n \"ctg02_275\",\n \"ctg02_276\",\n \"ctg02_277\",\n \"ctg02_278\",\n \"ctg02_279\",\n \"ctg02_280\",\n \"ctg02_282\",\n \"ctg02_281\",\n \"ctg02_283\",\n \"ctg02_284\",\n \"ctg02_285\",\n \"ctg02_286\",\n \"ctg02_287\",\n \"ctg02_289\",\n \"ctg02_288\",\n \"ctg02_290\",\n \"ctg02_291\",\n \"ctg02_292\",\n \"ctg02_293\",\n \"ctg02_294\",\n \"ctg02_295\",\n \"ctg02_296\",\n \"ctg02_297\",\n \"ctg02_298\",\n \"ctg02_299\",\n \"ctg02_300\",\n \"ctg02_301\",\n \"ctg02_302\",\n \"ctg02_303\",\n \"ctg02_304\",\n \"ctg02_305\",\n \"ctg02_306\",\n ]\n\n assert set(g.get_true_chromosomes()) == set(should)\n assert set(g.get_true_chromosomes()) == set(g.get_chromosome_lengths())\n\n def test_newest_gene_ids(self, new_pipegraph, mock_download, shared_prebuild):\n # the smallest eukaryotic species at the time of writing this at 2.8 mb\n g = EnsemblGenome(\"Ustilago_maydis\", 33, shared_prebuild)\n ppg.run_pipegraph()\n assert g.newest_stable_ids_for(\"UM05644\") == set([\"UMAG_05644\"])\n assert g.newest_stable_ids_for(\"UMAG_05629\") == set([\"UMAG_05629\"])\n assert g.newest_stable_ids_for(\"UM06501P0\") == set([])\n assert g.newest_stable_ids_for(\"UM04933T0\") == set([])\n with pytest.raises(KeyError):\n g.newest_stable_ids_for(\"no_such_gene\")\n\n def test_get_external_dbs(self, new_pipegraph, mock_download, shared_prebuild):\n g = EnsemblGenome(\"Ustilago_maydis\", 33, shared_prebuild)\n ppg.run_pipegraph()\n assert g.get_external_dbs() == [\n \"BRENDA\",\n \"BioCyc\",\n \"ChEMBL\",\n \"ENA_FEATURE_GENE\",\n \"ENA_FEATURE_PROTEIN\",\n \"ENA_FEATURE_TRANSCRIPT\",\n \"ENA_GENE\",\n \"Ensembl_Fungi\",\n \"GO\",\n \"GOA\",\n \"IntAct\",\n \"IntEnz\",\n \"Interpro\",\n \"KEGG\",\n \"KEGG_Enzyme\",\n \"MEROPS\",\n \"MINT\",\n \"MetaCyc\",\n \"NCBI_TAXONOMY\",\n \"PHI\",\n \"PHIE\",\n \"PHIP\",\n \"PRIDE\",\n \"PUBMED\",\n \"PeroxiBase\",\n \"Reactome\",\n \"SWISS_MODEL\",\n \"UniParc\",\n \"UniPathway\",\n \"Uniprot/SPTREMBL\",\n \"Uniprot/SWISSPROT\",\n \"protein_id\",\n ]\n\n def test_external_db_mapping(self, new_pipegraph, mock_download, shared_prebuild):\n # the smallest eukaryotic species at the time of writing this at 2.8 mb\n g = EnsemblGenome(\"Ustilago_maydis\", 33, shared_prebuild)\n ppg.run_pipegraph()\n goa = g.get_external_db_to_gene_id_mapping(\"GOA\")\n assert goa[\"A0A0D1CJ64\"] == set([\"UMAG_05734\"])\n with pytest.raises(KeyError):\n g.get_external_db_to_gene_id_mapping(\"GOAnosuchthing\")\n\n def test_external_db_mapping_transcript(\n self, new_pipegraph, mock_download, shared_prebuild\n ):\n # the smallest eukaryotic species at the time of writing this at 2.8 mb\n g = EnsemblGenome(\"Ustilago_maydis\", 33, shared_prebuild)\n ppg.run_pipegraph()\n ena = g.get_external_db_to_gene_id_mapping(\"ENA_FEATURE_TRANSCRIPT\")\n assert ena[\"CM003155.1:CDS:9690..12623\"] == set([\"UMAG_05624\"])\n\n def test_external_db_mapping_translation(\n self, new_pipegraph, mock_download, shared_prebuild\n ):\n # the smallest eukaryotic species at the time of writing this at 2.8 mb\n g = EnsemblGenome(\"Ustilago_maydis\", 33, shared_prebuild)\n ppg.run_pipegraph()\n up = g.get_external_db_to_gene_id_mapping(\"Uniprot/SWISSPROT\")\n assert up[\"P30598\"] == set([\"UMAG_10718\"])\n upp = g.get_external_db_to_translation_id_mapping(\"Uniprot/SWISSPROT\")\n assert upp[\"P30598\"] == set([\"KIS66849.N\"])\n\n @patch(\"pypipegraph.util.checksum_file\", return_value=\"5\")\n def test_get_canonical_ids(self, new_pipegraph, mock_download, shared_prebuild):\n g = EnsemblGenome(\"Homo_sapiens\", 96, shared_prebuild)\n g._pb_find_server().callback()\n g._pb_download_sql_table(\"gene\").callback()\n g._pb_download_sql_table(\"alt_allele\").callback()\n g._pb_download_sql_table(\"seq_region\").callback()\n g._pb_download_sql_table(\"seq_region_attrib\").callback()\n g._pb_download_sql_table(\"attrib_type\").callback()\n g._pb_download_gtf().callback()\n g._pb_download_sql_table_definitions().callback()\n g.job_genes().callback()\n g.job_transcripts().callback()\n assert g.name_to_canonical_id(\"DSEL\") == \"ENSG00000171451\"\n assert g.name_to_canonical_id(\"THEMIS\") == \"ENSG00000172673\"\n # test the breakage\n with pytest.raises(ValueError):\n g.name_to_canonical_id(\"SOD2\")\n with pytest.raises(ValueError):\n g.name_to_canonical_id(\"IGF2\")\n with pytest.raises(ValueError):\n g.name_to_canonical_id(\"ABCF2\")\n with pytest.raises(ValueError):\n g.name_to_canonical_id(\"TBCE\")\n\n assert g.name_to_canonical_id(\"SOD2\", True) == \"ENSG00000112096\"\n assert g.name_to_canonical_id(\"IGF2\", True) == \"ENSG00000167244\"\n assert g.name_to_canonical_id(\"ABCF2\", True) == \"ENSG00000033050\"\n assert g.name_to_canonical_id(\"TBCE\", True) == \"ENSG00000284770\"\n # assert g.name_to_canonical_id('HLA-DRB3') == 'ENSG00000230463'\n\n def test_same_same(self, new_pipegraph):\n g = EnsemblGenome(\"Ustilago_maydis\", 33)\n g2 = EnsemblGenome(\"Ustilago_maydis\", 33)\n assert g is g2\n new_pipegraph.new_pipegraph()\n g3 = EnsemblGenome(\"Ustilago_maydis\", 33)\n assert g is not g3\n\n def test_additional_fasta(self, mock_download, shared_prebuild):\n g = EnsemblGenome(\"Ustilago_maydis\", 33, shared_prebuild)\n ppg.run_pipegraph()\n seq = g.get_genome_sequence(\"U37796.1\", 0, 100)\n assert (\n seq\n == (\n \"taatcgtgaattgagctaggggcgccaagttacgtggcaaaagcgggctgactggcggcgaagatgtgt\"\n \"tggtctgcacctgagttcacgaacctgagac\"\n ).upper()\n )\n\n def test_transcript_sequence(self, shared_prebuild):\n g = EnsemblGenome(\"Ustilago_maydis\", 33, shared_prebuild)\n ppg.run_pipegraph()\n\n assert g.transcripts[\"KIS71708\"].mrna == (\n \"ATGAACGTCAAGCTTGCGCCGCCCGACGAGATGAACGGCGAGATCATTGCCATCCTCATT\"\n \"CTCTTCAACTTTCATTGCCGCGTTCAACTTAATGACGAGGCACAAGCCGCTCGCAAAAAG\"\n \"TTGCTCCTCTTCCTCATGGACAAAATTTACCAGACACGCGCACCTGCGCCTTCGTATGCA\"\n \"GCATTTGCAGACGAGCTCGGGGCCGCGATGGAAGGAGACGAGGACAATCACCGTATCATG\"\n \"ACCGACTACCTCGAGACCATGCTCGATCTGCTCCATGTTCCCGATGGTTTGACCAAGCTG\"\n \"TTCAACGAAAAGCTGAACAGAATCCTGCCTAGCTACGAGCCCATGGGATTGCTCAATGCC\"\n \"ACCGACATCTTCTTCGAGCGAAGGTCCTTCTTCGGCCTCTTTTTCCGTCGCATCAAGTTG\"\n \"ATCTTTGACAGTCTGGATTTGCAAACAAGGGATCACTTGACAATAGCTGCGCGCGCATGG\"\n \"AAAGAGGGACAGGCTTTCGACTTGAATGACAGCGAGCTCTCCGGTATCGACTCGGCGCAT\"\n \"TTGCTCGATGCAAGGCTTGGCGCGTTCCGAGACTACCAGCTTGGCTTACTCCGCGGCGAC\"\n \"TATACCATGGCAAAGGACAACATGGAAAGGTTCTTTGACTTTTACGCTCCTGGCGCTGAC\"\n \"CGCGAGCTTCATCAACACACGCTTCTTCATCTAGCCGCTTTCCATGTCAGGACTGAGAGC\"\n \"TTCTGCGCAGCCAAAGCAGCCTTGGACGAGGCTATAAGCCTTGCGCGGTCGGCAAACGAC\"\n \"AGCGAGTGCATCTCTGCCTGCGAGAGCCTCATGCAATACATCCAAGGTGTGGGCACTAGC\"\n \"ACCCTCGCTTCTGTTCCGGGAGCAGCCAATGTCTCGACGAACGAGAGGCAACGCCGACCT\"\n \"ATCTATGACGCGGTGTGGCAGACCCGTTGCAGCCTAGCTAAGGGCCGCTCAGCAATCGAG\"\n \"ACACTTCAAGACTTGGAGGATTGTGCTGCACCTTCTCAGCCTTCTAGGGACTCGTTGGCG\"\n \"GCCAGCGAGGCCTCCTTGCAGCTCATTCAGGATGCCAAACGACGTCTCGGCAGGGATACA\"\n \"TTTCCAAGCGACGGTGAAGTCGCGCGTCTTTGGGATACCTTGGGCCAGCCAGCGCTTGCT\"\n \"GACGTCTATCGGAATCGGAACATTGCAGGTGCAAATGGTCGAGCACGCTCAGCTTTGCAA\"\n \"GAAGAAAGCCGAATCGATTGCATCTGTCACAAAGCCAAAACG\"\n \"CTTGCACGTGCGGGAGAGTACGAAGCGGCGCTCAGCTTGCTGGTCTCACCGGCCACTTTC\"\n \"GAAGCGATCTCATTCTGCGAGTACACGATCTGGCACAGGGCAATCGCTGAGGTCCTGCGC\"\n \"TTACGAGCAACGCGCAGACAAGATGTAGCGACCTTGCAATTGTTGGCAGAGAGCCTGCCG\"\n \"GGTTCCGACCAGGCTCATGTCGATCGCGACGTTGAAGATGCTGTCGATTCGCCGAGCGCT\"\n \"TTGGTCGAGCTCGCGTTGCGCTGCCTCAAGTCCGGAAAATCGAGTGCGACTGAGAAGAAT\"\n \"TTCGGATTGCGATTCAAAGAGTACAGCCGACTTACAGCAGAGCAAGTGGCAGAAGCGCTG\"\n \"CTGAACAAAGCTGCGATAAGGATGAAACGTGGTCGACCGATCCTCGCACTGATGCCTACA\"\n \"CTGGCAAGTCTATCGATTGCCAAGGACATGGAGTGCAATCGGTTGATCCTCAATGCAAGA\"\n \"GTTCAGCTAGCTGAGGCATTGGGGCTTCAGCTCAAGATGCCAGATGGAGCACGGCTGCTC\"\n \"TTGGAATCGGATCTGCCCAACTGCCTGTCGAGTGATGATGTCGAGCTGAGAGCGCGCGCG\"\n \"AAGTGGACGTATGCACGGATGCTGCTCTCATGCTCGGACAAGCAAGAGCGCGAAGATCTG\"\n \"ACCAAGGTGCTCTACTGGTTGCGAGAAGCCGAAAGAG\"\n \"ACGCACAACAGGCCGAATGCCTCGAGCTTCACACGCAGATTCTCTACTATATGTTGCGGC\"\n \"TGCACCACCACCTGGGCGATGACAGAGAAACAATCTCTGTGACAGCTCGTTTAGATACAG\"\n \"TAGAGCGCGCTTGGACTCGCTTGGATGCTTCGCAGGATCAAGCCCATCTGCAACAGGTTC\"\n \"GCCAAATCCTAGATATCGTTGTCTCTGTCGCAGGTTATGTGGCTAGCGGAGAAGCTGCGA\"\n \"ACAAGCGTCTTGAAATGGTCTAG\"\n )\n\n assert (\n g.transcripts[\"KIS71709\"].mrna == \"ATGGCATTTTCAGAAGATACCAAG\"\n \"GAGCGAATCATTAAGGCGGTCGATGTCTCCAAGACTTTGTTGCATTACGGCTG\"\n \"GGTGCCTTTCGTTCTTTACATCGGCTTCACCCGAAGCACGCCCCAGCCTAGCTTGATCAA\"\n \"GCTCATCAGTCCTCTCGCATGA\"\n )\n assert g.transcripts[\"KIS71709\"].mrna == g.transcripts[\"KIS71709\"].cdna\n", "id": "8302438", "language": "Python", "matching_score": 5.272249698638916, "max_stars_count": 0, "path": "tests/test_ensembl.py" }, { "content": "import re\nfrom pathlib import Path\nimport pandas as pd\nimport mbf_externals\nfrom mbf_externals.util import (\n download_file_and_gunzip,\n # download_file_and_gzip,\n download_file,\n lazy_property,\n get_page,\n lazy_method,\n)\nfrom .base import (\n GenomeBase,\n include_in_downloads,\n class_with_downloads,\n MsgPackProperty,\n msgpack_unpacking_class,\n)\nimport pypipegraph as ppg\nfrom .common import EukaryoticCode\nimport mbf_pandas_msgpack as pandas_msgpack\nimport pkg_resources\n\n\ndef download_gunzip_and_attach(url, unzipped_filename, files_to_attach):\n import shutil\n import gzip\n import tempfile\n\n tf = tempfile.NamedTemporaryFile(suffix=\".gz\")\n download_file(url, tf)\n tf.flush()\n\n attach = b\"\"\n for f in files_to_attach:\n attach += Path(f).read_bytes()\n\n with gzip.GzipFile(tf.name, \"rb\") as gz_in:\n with open(unzipped_filename, \"wb\") as op:\n shutil.copyfileobj(gz_in, op)\n op.write(attach)\n\n\n_ensembl_genome_cache = {}\n\n\ndef EnsemblGenome(species, revision, prebuild_manager=None):\n if prebuild_manager is None: # pragma: no cover\n prebuild_manager = mbf_externals.get_global_manager()\n if ppg.util.global_pipegraph is not None:\n if not hasattr(ppg.util.global_pipegraph, \"_ensembl_genome_dedup\"):\n ppg.util.global_pipegraph._ensembl_genome_dedup = {}\n cache = ppg.util.global_pipegraph._ensembl_genome_dedup\n else:\n cache = _ensembl_genome_cache\n if (species, revision) in cache:\n res = cache[species, revision]\n if res.prebuild_manager != prebuild_manager: # pragma: no cover\n raise ValueError(\n \"Changing prebuild manager within one pipegraph is not supported\"\n )\n return res\n else:\n res = _EnsemblGenome(species, revision, prebuild_manager)\n cache[species, revision] = res\n return res\n\n\n@msgpack_unpacking_class\n@class_with_downloads\nclass _EnsemblGenome(GenomeBase):\n def __init__(self, species, revision, prebuild_manager):\n super().__init__()\n self.prebuild_manager = prebuild_manager\n\n self.species = species\n if not re.match(r\"^[A-Z][a-z]+_[a-z]+$\", species):\n raise ValueError(\"Species must be capitalized like 'Homo_sapiens\")\n self.revision = str(int(revision))\n self.name = f\"{self.species}_{self.revision}\"\n if ppg.inside_ppg():\n ppg.util.assert_uniqueness_of_object(self)\n self.genetic_code = EukaryoticCode\n self.download_genome()\n self._seq_region_is_canonical = {}\n self._canonical_cache = {}\n\n def __repr__(self):\n return f\"EnsemblGenome({self.species}, {self.revision})\"\n\n @include_in_downloads\n def _pb_find_server(self):\n ensembl_urls = [\n \"ftp://ftp.ensembl.org/pub/release-%i/\",\n \"ftp://ftp.ensemblgenomes.org/pub/release-%i/fungi/\",\n \"ftp://ftp.ensemblgenomes.org/pub/release-%i/metazoa/\",\n \"ftp://ftp.ensemblgenomes.org/pub/release-%i/plants/\",\n \"ftp://ftp.ensemblgenomes.org/pub/release-%i/protists/\",\n # \"http://ftp.ensemblgenomes.org/pub/release-%i/bacteria/\", # bacteria are complicated / subdivided?\n ]\n\n def find_ensembl_server(output_path):\n for proto_url in ensembl_urls:\n url = proto_url % (int(self.revision),) + \"fasta/\"\n r = get_page(url)\n if self.species.lower() in r:\n (output_path / \"url.txt\").write_text(\n proto_url % (int(self.revision),)\n )\n return\n raise ValueError(\"Could not find this species on any ensembl server\")\n\n server_job = self.prebuild_manager.prebuild(\n f\"ensembl/{self.species}_{self.revision}/server\",\n # we don't use the version for this, since we need it for building\n # various aligner versioned indices\n \"1\",\n [],\n [\"url.txt\"],\n find_ensembl_server,\n minimum_acceptable_version=\"1\",\n maximum_acceptable_version=\"1\",\n )\n self.server_job = server_job\n return server_job\n\n @include_in_downloads\n def _pb_download_gtf(self):\n return self._pb_download_and_gunzip(\n \"gtf\",\n \"gtf/\" + self.species.lower() + \"/\",\n (fr\"{self.species}\\..+\\.{self.revision}.gtf.gz\",),\n \"genes.gtf\",\n ) # can't have this unziped star wants it unziped\n\n def get_additional_gene_gtfs(self):\n data_path = Path(pkg_resources.resource_filename('mbf_genomes', 'data/'))\n if self.species == \"Homo_sapiens\":\n if int(self.revision) <= 74:\n return [\n data_path / \"ribosomal_genes_grch37.gtf.gz.full.gtf.gz\"\n ]\n else:\n return [\n data_path\n / \"ribosomal_genes_grch38.gtf.gz.full.gtf.gz\"\n ]\n elif self.species == \"Mus_musculus\":\n if int(self.revision) > 67:\n return [\n data_path\n / \"ribosomal_genes_mm10.gtf.gz.full.gtf.gz\"\n ]\n elif self.species == \"Ustilago_maydis\":\n return [\n data_path\n / \"ustilago_maydis_a2_locus.gff\"\n ]\n return []\n\n def get_additional_fastas(self):\n \"\"\"Add additional fasta files to the genome.\n\n They are considered true chromosomes if 'chromosome: something'\n is in the fasta description line.\n \"\"\"\n\n if self.species == \"Ustilago_maydis\":\n return [\n Path(__file__).parent.parent.parent\n / \"data\"\n / \"ustilago_maydis_a2_locus.fasta\"\n ]\n return None\n\n @property\n def gene_gtf_dependencies(self):\n return [self._pb_download_gtf()]\n\n @include_in_downloads\n def _pb_download_genome_fasta(self):\n\n additional_fastas = self.get_additional_fastas()\n if additional_fastas:\n return self._pb_download(\n pb_name=\"dna\",\n url=\"fasta/\" + self.species.lower() + \"/dna/\",\n regexps=(\n fr\"{self.species}\\..+\\.dna.primary_assembly.fa.gz\",\n fr\"{self.species}\\..+\\.dna.toplevel.fa.gz\",\n ),\n output_filename=\"genome.fasta\",\n download_func=lambda url, unzipped_filename: download_gunzip_and_attach(\n url, unzipped_filename, additional_fastas\n ),\n additional_input_files=additional_fastas,\n )\n\n else:\n return self._pb_download_and_gunzip(\n \"dna\",\n \"fasta/\" + self.species.lower() + \"/dna/\",\n (\n fr\"{self.species}\\..+\\.dna.primary_assembly.fa.gz\",\n fr\"{self.species}\\..+\\.dna.toplevel.fa.gz\",\n ),\n \"genome.fasta\",\n )\n\n @include_in_downloads\n def _pb_extract_keys_from_genome(self):\n output_filename = \"references.txt\"\n\n def extract(output_path):\n from .common import iter_fasta\n\n fn = self.find_file(\"genome.fasta\")\n keys = []\n for key, seq in iter_fasta(fn):\n keys.append(key)\n (output_path / output_filename).write_bytes(b\"\\n\".join(keys))\n\n job = self.prebuild_manager.prebuild(\n f\"ensembl/{self.species}_{self.revision}/chromosomes_and_contigs\",\n \"1\",\n [],\n [output_filename],\n extract,\n ).depends_on(self._pb_download_genome_fasta())\n return job\n\n @include_in_downloads\n def _pb_download_cdna_fasta(self):\n return self._pb_download_and_gunzip(\n \"cdna\",\n \"fasta/\" + self.species.lower() + \"/cdna/\",\n (fr\"{self.species}\\..+\\.cdna.all.fa.gz\",),\n \"cdna.fasta\",\n )\n\n @include_in_downloads\n def _pb_download_protein_fasta(self):\n return self._pb_download_and_gunzip(\n \"pep\",\n f\"fasta/{self.species.lower()}/pep/\",\n (fr\"{self.species}\\..+\\.pep.all.fa.gz\",),\n \"pep.fasta\",\n )\n\n @include_in_downloads\n def _pb_download_sql_table_definitions(self):\n return self._pb_download_straight(\n \"sql/core/sql_def\",\n \"mysql/\",\n (fr\"{self.species.lower()}_core_.+\",),\n \"core.sql.gz\",\n lambda match: f\"{match.strip()}/{match.strip()}.sql.gz\",\n )\n\n def _pb_download_sql_table(self, table_name):\n \"\"\"Helper to download sql tables as mysql dumps\"\"\"\n job = self._pb_download_straight(\n f\"sql/core/{table_name}\",\n \"mysql/\",\n (fr\"{self.species.lower()}_core_.+\",),\n f\"{table_name}.txt.gz\",\n lambda match: f\"{match.strip()}/{table_name.strip()}.txt.gz\",\n ).depends_on(self._pb_download_sql_table_definitions())\n job.table_name = table_name\n return job\n\n @include_in_downloads\n @lazy_method\n def _pb_download_sql_tables(self):\n tables = [\n (\"gene\"), # for description\n (\"transcript\"), # for external name lookup transcript -> gene\n (\"translation\"), # for external name lookup translation -> gene\n (\"stable_id_event\"), # for stable_id changes\n (\"external_db\"), # for external name lookup\n (\"object_xref\"), # for external name lookup\n (\"xref\"), # for external name lookup\n (\"alt_allele\"), # for finding 'canonical' ids\n (\"seq_region\"), # for finding 'canonical' ids\n (\"seq_region_attrib\"), # for finding 'canonical' ids\n (\"attrib_type\"), # for finding 'canonical' ids\n ]\n return [self._pb_download_sql_table(x) for x in tables]\n\n def _pb_download(\n self,\n pb_name,\n url,\n regexps,\n output_filename,\n download_func,\n match_transformer=lambda x: x,\n additional_input_files=[],\n ):\n \"\"\"regexps may be multiple - then the first one matching is used\"\"\"\n\n def do_download(output_path):\n real_url = self.base_url + url\n raw = get_page(real_url)\n if not raw: # pragma: no cover\n raise ValueError(\"Retrieving url failed: %s\" % real_url)\n for aregexps in regexps:\n matches = re.findall(aregexps, raw)\n if len(matches) == 1:\n Path(str(output_path / output_filename) + \".url\").write_text(\n (real_url + matches[0])\n )\n download_func(\n real_url + match_transformer(matches[0]),\n output_path / output_filename,\n )\n break\n else:\n raise ValueError( # pragma: no cover - defensive\n \"Found either too few or too many for every regexps. \\nRaw was %s\"\n % (raw,)\n )\n\n if Path(output_filename).suffix == \".fasta\":\n import pysam\n\n pysam.faidx(str((output_path / output_filename).absolute()))\n\n job = self.prebuild_manager.prebuild(\n f\"ensembl/{self.species}_{self.revision}/{pb_name}\",\n \"1\",\n [] + additional_input_files,\n [output_filename],\n do_download,\n )\n job.depends_on(self._pb_find_server())\n return job\n\n def _pb_download_straight(\n self,\n pb_name,\n url,\n regexps,\n output_filename,\n match_transformer=lambda x: x, # pragma: no cover\n ):\n def df(url, filename):\n with open(filename, \"wb\") as op:\n download_file(url, op)\n\n return self._pb_download(\n pb_name, url, regexps, output_filename, df, match_transformer\n )\n\n def _pb_download_and_gunzip(self, pb_name, url, regexps, output_filename):\n return self._pb_download(\n pb_name, url, regexps, output_filename, download_file_and_gunzip\n )\n\n # def _pb_download_and_gzip(self, pb_name, url, regexps, output_filename):\n # return self._pb_download(\n # pb_name, url, regexps, output_filename, download_file_and_gzip\n # )\n\n @lazy_property\n def base_url(self):\n return self.server_job.find_file(\"url.txt\").read_text()\n\n def _msg_pack_job(\n self, property_name, filename, callback_function, files_to_invariant_on\n ):\n def dump(output_filename):\n df = callback_function(self)\n pandas_msgpack.to_msgpack(output_filename / filename, df)\n\n j = self.prebuild_manager.prebuild(\n f\"ensembl/{self.species}_{self.revision}/{property_name}\",\n # we don't use the version for this, since we need it for building\n # various aligner versioned indices\n \"4\",\n files_to_invariant_on,\n [filename],\n dump,\n )\n j.depends_on_func(property_name, callback_function)\n self._prebuilds.append(j)\n return j\n\n @lazy_method\n def get_true_chromosomes(self):\n \"\"\"Get the names of 'true' chromosomes, ie. no scaffolds/contigs\n in genomes that have chromosomes, otherwise all\"\"\"\n fn = self.find_file(\"references.txt\")\n keys = Path(fn).read_text().split(\"\\n\")\n chroms = [x for x in keys if \"chromosome:\" in x]\n if not chroms:\n chroms = keys\n return [x[: x.find(\" \")] for x in chroms]\n\n def _load_from_sql(\n self, table_name, columns=None, check_for_columns=None, **kwargs\n ):\n table_columns = self._get_sql_table_column_names(table_name)\n for c in columns:\n if not c in table_columns:\n raise ValueError(c, \"available\", table_columns)\n\n df = pd.read_csv(\n self.find_file(f\"{table_name}.txt.gz\"),\n sep=\"\\t\",\n header=None,\n names=table_columns,\n usecols=columns,\n na_values=\"\\\\N\",\n lineterminator=\"\\n\",\n escapechar=\"\\\\\",\n **kwargs,\n )\n if check_for_columns:\n for c in check_for_columns:\n if not c in table_columns: # pragma: no cover\n raise KeyError(c, \"availabel\", table_columns)\n return df\n\n def _prepare_df_genes_meta(self):\n \"\"\"Meta data for genes.\n Currently contains:\n 'description'\n \"\"\"\n try:\n df = self._load_from_sql(\n \"gene\", [\"stable_id\", \"description\"], [\"stable_id\"]\n )\n except KeyError: # pragma: no cover\n raise ValueError(\n \"No stable_id column found - \"\n \"old ensembl, split into seperate table, add support code?\"\n )\n res = df.set_index(\"stable_id\")\n res.index.name = \"gene_stable_id\"\n return res\n\n df_genes_meta = MsgPackProperty(\n lambda self: [\n x for x in self._pb_download_sql_tables() if x.table_name == \"gene\"\n ]\n )\n\n def _get_sql_table_column_names(self, sql_table_name):\n \"\"\"Read the sql definition and extract column names\"\"\"\n import gzip\n\n with gzip.GzipFile(self.find_file(\"core.sql.gz\")) as op:\n raw = op.read().decode(\"utf-8\")\n if '\\n-- Table structure' in raw:\n parts = raw.split(\"\\n-- Table structure\")[1:]\n else:\n parts = raw.split(\"\\n\\n\")\n for p in parts:\n if 'CREATE TABLE' not in p:\n continue\n p = p[p.find(\"CREATE TABLE\") :]\n if \" PRIMARY\" in p:\n p = p[: p.find(\" PRIMARY\")]\n elif \" UNIQUE\" in p:\n p = p[: p.find(\" UNIQUE\")]\n elif \" KEY\" in p:\n p = p[: p.find(\" KEY\")]\n\n else: # pragma: no cover\n raise ValueError(p)\n names = re.findall(\"`([^`]+)`\", p)\n table_name, *columns = names\n if table_name == sql_table_name:\n return columns\n raise KeyError(f\"{sql_table_name} not in core.sql.gz\") # pragma: no cover\n\n def _prepare_lookup_stable_id_events(self):\n \"\"\"Lookup old_stable_id -> new_stable_id\"\"\"\n df = self._load_from_sql(\"stable_id_event\", [\"old_stable_id\", \"new_stable_id\"])\n lookup = {}\n olds = [str(x) for x in df[\"old_stable_id\"].values]\n news = [str(x) for x in df[\"new_stable_id\"].values]\n for old in olds:\n lookup[old] = set()\n for old, new in zip(olds, news):\n lookup[old].add(new)\n return pd.DataFrame(\n {\"old\": list(lookup.keys()), \"new\": [list(x) for x in lookup.values()]}\n ).set_index(\"old\")\n\n lookup_stable_id_events = MsgPackProperty(\n lambda self: [\n x\n for x in self._pb_download_sql_tables()\n if x.table_name == \"stable_id_event\"\n ]\n )\n\n def newest_stable_ids_for(self, stable_id):\n \"\"\"Get the most up to date and current stable_ids for genes, transcripts, proteins).\n Plurarl for gene might have split, or have been deleted.\n returns a set of new ids.\n \"\"\"\n try:\n valid_ids = set(self.df_genes.index)\n valid_ids.update(self.df_transcripts.index)\n valid_ids.update(self.df_proteins.index)\n res = set(self.lookup_stable_id_events.loc[stable_id][\"new\"])\n res = set(\n [x for x in res if x in valid_ids]\n ) # filter those that are no longer in the database - no matter that they were m apped somewhere else in between\n return res\n except KeyError as e:\n # see if it's a current id where we're simply lacking the stable_id_event for some reason\n if stable_id in valid_ids:\n return set([stable_id])\n else:\n raise e\n\n def get_external_dbs(self):\n \"\"\"Return the names of all external dbs that actually have xrefs\"\"\"\n df_external_db = self._load_from_sql(\n \"external_db\", [\"external_db_id\", \"db_name\"]\n )\n with_data = set(\n self._load_from_sql(\"xref\", [\"external_db_id\"])[\"external_db_id\"].unique()\n )\n return sorted(\n df_external_db[\"db_name\"][df_external_db[\"external_db_id\"].isin(with_data)]\n )\n\n def get_external_db_to_gene_id_mapping(self, external_db_name):\n \"\"\"Return a dict external id -> set(stable_id, ...)\n for a given external db - e.g. EntrezGene, list\n with get_external_dbs()\n \"\"\"\n df_external_db = self._load_from_sql(\n \"external_db\", [\"external_db_id\", \"db_name\"]\n ).set_index(\"db_name\")\n external_db_id = df_external_db.at[external_db_name, \"external_db_id\"]\n xref = self._load_from_sql(\n \"xref\", [\"dbprimary_acc\", \"external_db_id\", \"xref_id\"]\n ).set_index(\"xref_id\")\n xref = xref[xref.external_db_id == external_db_id]\n object_xref = self._load_from_sql(\n \"object_xref\", [\"ensembl_object_type\", \"xref_id\", \"ensembl_id\"]\n )\n object_xref = object_xref[object_xref.xref_id.isin(set(xref.index))]\n # object_xref = object_xref[object_xref[\"ensembl_object_type\"] == \"Gene\"]\n result = {}\n transcripts = None\n translations = None\n genes = self._load_from_sql(\"gene\", [\"gene_id\", \"stable_id\"]).set_index(\n \"gene_id\"\n )\n for row in object_xref.itertuples(index=False):\n if row.ensembl_object_type == \"Gene\":\n gene_id = row.ensembl_id\n elif row.ensembl_object_type == \"Transcript\":\n if transcripts is None:\n transcripts = self._load_from_sql(\n \"transcript\", [\"transcript_id\", \"gene_id\"]\n ).set_index(\"transcript_id\")\n\n gene_id = transcripts.at[row.ensembl_id, \"gene_id\"]\n elif row.ensembl_object_type == \"Translation\":\n if translations is None:\n translations = self._load_from_sql(\n \"translation\", [\"translation_id\", \"transcript_id\"]\n ).set_index(\"translation_id\")\n if transcripts is None:\n transcripts = self._load_from_sql(\n \"transcript\", [\"transcript_id\", \"gene_id\"]\n ).set_index(\"transcript_id\")\n\n transcript_id = translations.at[row.ensembl_id, \"transcript_id\"]\n gene_id = transcripts.at[transcript_id, \"gene_id\"]\n else:\n print(row)\n raise ValueError(\"Mapped to neiter a transcript, nor a gene\")\n\n gene_stable_id = genes.at[gene_id, \"stable_id\"]\n\n db_primary = xref.at[row.xref_id, \"dbprimary_acc\"]\n if not db_primary in result:\n result[db_primary] = set()\n result[db_primary].add(gene_stable_id)\n return result\n\n def get_external_db_to_translation_id_mapping(self, external_db_name):\n \"\"\"Return a dict external id -> set(translation_stable_id, ...)\n for a given external db - e.g. Uniprot/SWISSPROT\n see get_external_dbs() for a list\n \"\"\"\n df_external_db = self._load_from_sql(\n \"external_db\", [\"external_db_id\", \"db_name\"]\n ).set_index(\"db_name\")\n external_db_id = df_external_db.at[external_db_name, \"external_db_id\"]\n xref = self._load_from_sql(\n \"xref\", [\"dbprimary_acc\", \"external_db_id\", \"xref_id\"]\n ).set_index(\"xref_id\")\n xref = xref[xref.external_db_id == external_db_id]\n object_xref = self._load_from_sql(\n \"object_xref\", [\"ensembl_object_type\", \"xref_id\", \"ensembl_id\"]\n )\n object_xref = object_xref[object_xref.xref_id.isin(set(xref.index))]\n # object_xref = object_xref[object_xref[\"ensembl_object_type\"] == \"Gene\"]\n result = {}\n translations = self._load_from_sql(\n \"translation\", [\"translation_id\", \"stable_id\", \"version\"]\n ).set_index(\"translation_id\")\n\n for row in object_xref.itertuples(index=False):\n if row.ensembl_object_type == \"Translation\":\n translation_stable_id = (\n translations.at[row.ensembl_id, \"stable_id\"]\n + \".\"\n + str(translations.at[row.ensembl_id, \"version\"])\n )\n else:\n print(row)\n raise ValueError(\"not at translation mapping\")\n\n db_primary = xref.at[row.xref_id, \"dbprimary_acc\"]\n if not db_primary in result:\n result[db_primary] = set()\n result[db_primary].add(translation_stable_id)\n return result\n\n @lazy_property\n def allele_groups(self):\n df = self._load_from_sql(\"alt_allele\", [\"alt_allele_group_id\", \"gene_id\"])\n gene_df = self._load_from_sql(\"gene\", [\"gene_id\", \"stable_id\"]).rename(\n columns={\"gene_stable_id\": \"stable_id\"}\n )\n df = df.join(gene_df.set_index(\"gene_id\"), \"gene_id\")\n return df.set_index(\"stable_id\")\n\n def name_to_canonical_id(self, name, break_ties_by_number_of_transcripts=False):\n \"\"\"Given a gene name, lookup up it's stable ids, and return the\n one that's on the primary assembly from the allele group\"\"\"\n key = name, break_ties_by_number_of_transcripts\n if not key in self._canonical_cache:\n r = self._name_to_canonical_id(name, break_ties_by_number_of_transcripts)\n self._canonical_cache[key] = r\n else:\n r = self._canonical_cache[name, break_ties_by_number_of_transcripts]\n return r\n\n def _name_to_canonical_id(self, name, break_ties_by_number_of_transcripts=False):\n name_candidates = set(\n [x for x in self.name_to_gene_ids(name) if not x.startswith(\"LRG\")]\n )\n if not name_candidates: # pragma: no cover\n raise KeyError(\"No gene named %s\" % name)\n ag = self.allele_groups\n ag_ids = [\n x\n for x in ag.alt_allele_group_id.reindex(name_candidates).unique()\n if not pd.isnull(x)\n ]\n ag_candidates = set(ag.index[ag.alt_allele_group_id.isin(ag_ids)])\n if len(ag_ids) == 1 and name_candidates.issubset(ag_candidates):\n # the easy case, everything matches\n on_primary = [\n x\n for x in ag_candidates\n if x\n in self.df_genes.index # for there is no entry in genes.gtf if it's not on a not 'non_ref' chromosome.\n ]\n if len(on_primary) == 1:\n return on_primary[0]\n elif len(on_primary) == 0: # pragma: no cover\n # if self.species == \"Homo_sapiens\" and name == \"HLA-DRB3\": # HLA-DRB3 is not in genes.gtf!\n # known issue - return basically any of the candidates on alternate regions, but be consistent.\n # return sorted(ag_candidates)[0]\n raise ValueError(\"No primary gene found for %s\" % name)\n else: # pragma: no cover\n raise ValueError(\n \"Multiple gene on primary assemblies found for %s\" % name\n )\n elif len(ag_ids) == 0 and len(name_candidates) == 1: # pragma: no cover\n # another easy case, there are no alternatives\n return list(name_candidates)[0]\n else:\n if break_ties_by_number_of_transcripts:\n name_candidates = list(name_candidates)\n name_candidates.sort(\n key=lambda gene_stable_id: len(\n self.genes[gene_stable_id].transcripts\n )\n )\n return name_candidates[-1]\n else:\n raise ValueError( # pragma: no cover\n \"Could not determine canonical gene for '%s'. \"\n \"Either pass break_ties_by_number_of_transcripts=True, \"\n \"or use name_to_gene_ids()\"\n \" and have a look yourself (don't forget the allele groups).\\n\"\n \"Name candidates: %s\\n\"\n \"AG candidates: %s\\n\"\n \"AG ids: %s\" % (name, name_candidates, ag_candidates, ag_ids)\n )\n\n def build_index(self, aligner, fasta_to_use=None, gtf_to_use=None):\n if fasta_to_use is None: # pragma: no cover\n _fasta_to_use = \"genome.fasta\"\n else:\n _fasta_to_use = fasta_to_use\n if gtf_to_use is None: # pragma: no cover\n _gtf_to_use = \"genes.gtf\"\n else:\n _gtf_to_use = gtf_to_use\n name = Path(_fasta_to_use).stem\n\n deps = []\n if hasattr(aligner, \"build_index\"):\n deps.append(self.find_prebuild(_fasta_to_use))\n deps.append(self.find_prebuild(_gtf_to_use))\n postfix = \"\"\n func_deps = {}\n\n def do_align(output_path):\n aligner.build_index(\n [self.find_file(_fasta_to_use)],\n self.find_file(_gtf_to_use) if gtf_to_use is not None else None,\n output_path,\n )\n\n elif hasattr(aligner, \"build_index_from_genome\"):\n if fasta_to_use or gtf_to_use:\n raise ValueError(\n \"Aligner had no build_index, just build_index_from_genome, but fasta_to_use or gtf_to_use were set\"\n )\n deps.extend(aligner.get_genome_deps(self))\n func_deps = {\n \"build_index_from_genome\": aligner.__class__.build_index_from_genome\n }\n postfix = \"/\" + aligner.get_build_key()\n\n def do_align(output_path):\n aligner.build_index_from_genome(self, output_path)\n\n else:\n raise ValueError(\"Could not find build_index* function\")\n\n min_ver, max_ver = aligner.get_index_version_range()\n\n job = self.prebuild_manager.prebuild(\n f\"ensembl/{self.species}_{self.revision}/indices/{name}/{aligner.name}{postfix}\",\n aligner.version,\n [],\n [\"sentinel.txt\", \"stdout.txt\", \"stderr.txt\", \"cmd.txt\"],\n do_align,\n minimum_acceptable_version=min_ver,\n maximum_acceptable_version=max_ver,\n )\n self.download_genome() # so that the jobs are there\n job.depends_on(deps)\n for name, f in func_deps.items():\n job.depends_on_func(name, f)\n return job\n", "id": "8369901", "language": "Python", "matching_score": 4.803089618682861, "max_stars_count": 0, "path": "src/mbf_genomes/ensembl.py" }, { "content": "from pathlib import Path\nfrom abc import ABC, abstractmethod\nimport pandas as pd\nfrom dppd import dppd\nimport pysam\nfrom .common import reverse_complement, df_to_rows\nfrom .gene import Gene, Transcript\nfrom mbf_externals.util import lazy_method\nimport weakref\nimport mbf_pandas_msgpack as pandas_msgpack\nimport numpy as np\n\npd.read_msgpack = pandas_msgpack.read_msgpack\n\ndp, X = dppd()\n\n\ndef include_in_downloads(func):\n \"\"\"A decorator to collect the download funcs\"\"\"\n func._include_in_downloads = True\n return func\n\n\ndef class_with_downloads(cls):\n cls._download_methods = []\n for f in cls.__dict__.items():\n if hasattr(f[1], \"_include_in_downloads\"):\n cls._download_methods.append(f[1])\n return cls\n\n\ndef ReadOnlyPropertyWithFunctionAccess(func):\n \"\"\"With normal property, you can not (easily) retrieve\n the function. This will return the value of the func\n if you do x.prop and the func itsealf if you do type(x).prop\n \"\"\"\n\n class Property:\n def __get__(self, inst, instcls):\n if inst is None:\n # instance attribute accessed on class, return self\n return func\n else:\n return func(inst)\n\n return Property()\n\n\nclass MsgPackProperty:\n \"\"\"\n a message pack property is a property x_y that get's\n calculated by a method _prepare_x_y\n and automatically stored/loaded by a caching job\n as msgpack file.\n the actual job used depends on the GenomeBase subclass\n\n The dependency_callback get's called with the GenomeBase subclass\n instance and can return dependencys for the generated job\n\n The object has three members afterwards:\n x_y -> get the value returned by _prepare_x_y (lazy load)\n _prepare_x_y -> that's the one you need to implement,\n it's docstring is copied to this propery\n job_y -> the job that caches _prepare_x_y() results\n\n \"\"\"\n\n def __init__(self, dependency_callback=None, files_to_invariant_on_callback=None):\n self.dependency_callback = dependency_callback\n self.files_to_invariant_on_callback = files_to_invariant_on_callback\n\n\ndef msgpack_unpacking_class(cls):\n msg_pack_properties = []\n for d in list(cls.__dict__):\n v = cls.__dict__[d]\n if isinstance(v, MsgPackProperty):\n if not \"_\" in d:\n raise NotImplementedError(\n \"Do not know how to create job name for msg_pack_properties that do not containt _\"\n )\n msg_pack_properties.append(d)\n job_name = \"job_\" + d[d.find(\"_\") + 1 :]\n filename = d + \".msgpack\"\n calc_func = getattr(cls, f\"_prepare_{d}\")\n\n def load(self, d=d, filename=filename, job_name=job_name):\n if not hasattr(self, \"_\" + d):\n fn = self.find_file(filename)\n if not fn.exists():\n raise ValueError(\n f\"{d} accessed before the respecting {job_name} call\"\n )\n df = pd.read_msgpack(fn)\n setattr(self, \"_\" + d, df)\n return getattr(self, \"_\" + d)\n\n p = property(load)\n p.__doc__ == calc_func.__doc__\n setattr(cls, d, p)\n if not hasattr(cls, job_name):\n\n def gen_job(\n self,\n d=d,\n filename=filename,\n calc_func=calc_func,\n dependency_callback=v.dependency_callback,\n files_to_invariant_on_callback=v.files_to_invariant_on_callback,\n ):\n if files_to_invariant_on_callback:\n files_to_invariant_on = files_to_invariant_on_callback(self)\n else:\n files_to_invariant_on = []\n j = self._msg_pack_job(\n d, filename, calc_func, files_to_invariant_on\n )\n if j is not None:\n j.depends_on(dependency_callback(self))\n return j\n\n setattr(cls, job_name, gen_job)\n else: # pragma: no cover\n pass\n if hasattr(cls, \"_msg_pack_properties\"):\n msg_pack_properties.extend(cls._msg_pack_properties)\n cls._msg_pack_properties = msg_pack_properties\n return cls\n\n\n@msgpack_unpacking_class\nclass GenomeBase(ABC):\n def __init__(self):\n self._filename_lookups = []\n self._prebuilds = []\n self._download_jobs = []\n\n @abstractmethod\n def _msg_pack_job(\n self, property_name, filename, callback_function, files_to_invariant_on\n ):\n raise NotImplementedError # pragma: no cover\n\n @lazy_method\n def download_genome(self):\n \"\"\"All the jobs needed to download the genome and prepare it for usage\"\"\"\n result = []\n for method in self.__class__._download_methods:\n j = method(self)\n if isinstance(j, list):\n if j is not None: # pragma: no branch\n result.extend(j)\n elif j is not None: # pragma: no branch\n result.append(j)\n # for j in result:\n # if isinstance(j, list):\n # raise ValueError(method)\n for j in self._download_jobs:\n if j is not None: # pragma: no branch\n result.append(j)\n for j in result:\n if not j in self._prebuilds: # pragma: no branch\n self._prebuilds.append(j)\n for msg_pack_prop in self.__class__._msg_pack_properties:\n job_name = \"job\" + msg_pack_prop[msg_pack_prop.find(\"_\") :]\n j = getattr(self, job_name)()\n self._prebuilds.append(j)\n return result\n\n def find_file(self, name):\n if name in self._filename_lookups:\n return self._filename_lookups[name]\n for job in self._prebuilds:\n if hasattr(job, \"find_file\"):\n try:\n return job.find_file(name)\n except KeyError:\n pass\n else:\n for j in job:\n for f in j.filenames:\n if Path(f).name == name:\n return Path(f)\n # now search for undeclared, but created files\n # mostly for aligners, where we only track the sentinels, not the index\n # files\n for job in self._prebuilds:\n if hasattr(job, \"name_file\"): # pragma: no branch\n if job.name_file(name).exists():\n return job.name_file(name)\n raise OSError(f\"File not found: {name}\")\n\n def find_prebuild(self, name):\n \"\"\"Find which prebuild created the file named @name.\n Must be in the list of job.filenames\"\"\"\n\n for job in self._prebuilds:\n if hasattr(job, \"find_file\"):\n try:\n job.find_file(name)\n return job\n except KeyError:\n pass\n else:\n for j in job:\n for f in j.filenames:\n if Path(f).name == name:\n return job\n raise OSError(f\"File not found: {name}\")\n\n @lazy_method\n def get_chromosome_lengths(self):\n \"\"\"Return a dict name -> length for the primary assembly\"\"\"\n f = pysam.FastaFile(str(self.find_file(\"genome.fasta\")))\n return dict(zip(f.references, f.lengths))\n\n @lazy_method\n def get_true_chromosomes(self):\n \"\"\"Get the names of 'true' chromosomes, ie. no scaffolds/contigs\n in genomes that have chromosomes, otherwise all\"\"\"\n return list(self.get_chromosome_lengths().keys())\n\n def get_genome_sequence(self, chr, start, stop):\n f = pysam.FastaFile(str(self.find_file(\"genome.fasta\")))\n return f.fetch(chr, start, stop)\n\n def get_cdna_sequence(self, transcript_stable_id):\n with pysam.FastaFile(str(self.find_file(\"cdna.fasta\"))) as f:\n return f.fetch(transcript_stable_id)\n\n def get_cds_sequence(self, protein_id, protein_info=None):\n \"\"\"Get the coding sequence (rna) of a protein\"\"\"\n if protein_info is None:\n protein_info = self.df_proteins.loc[protein_id]\n elif protein_info.name != protein_id:\n raise ValueError(\"protein_id != protein_info['protein_id']\")\n cdna = \"\"\n chr = protein_info[\"chr\"]\n for start, stop in protein_info[\"cds\"]:\n cdna += self.get_genome_sequence(chr, start, stop)\n if protein_info[\"strand\"] not in (1, -1): # pragma: no cover\n raise ValueError(f'{protein_info[\"strand\"]} was not 1/-1')\n if protein_info[\"strand\"] == -1:\n cdna = reverse_complement(cdna)\n return cdna\n\n def get_protein_sequence(self, protein_id):\n \"\"\"Get the AA sequence of a protein\"\"\"\n with pysam.FastaFile(str(self.find_file(\"pep.fasta\"))) as f:\n return f.fetch(protein_id)\n\n def get_additional_gene_gtfs(self):\n return []\n\n def get_gtf(self, features=[]):\n import mbf_gtf\n\n filenames = [self.find_file(\"genes.gtf\")]\n filenames.extend(self.get_additional_gene_gtfs())\n dfs = {}\n for gtf_filename in filenames:\n if gtf_filename is None:\n pass\n else:\n r = mbf_gtf.parse_ensembl_gtf(str(gtf_filename), list(features))\n for k, df in r.items():\n if not k in dfs:\n dfs[k] = []\n dfs[k].append(df)\n for k in features:\n if not k in dfs:\n dfs[k] = [pd.DataFrame({})]\n result = {k: pd.concat(dfs[k], sort=False) for k in dfs}\n return result\n\n @property\n def genes(self):\n \"\"\"a dictionary gene_stable_id -> gene.Gene\n \"\"\"\n if not hasattr(self, \"_genes\"):\n self.build_genes_and_transcripts()\n return self._genes\n\n @property\n def transcripts(self):\n \"\"\"a dictionary transcript_stable_id -> gene.Transcript\"\"\"\n if not hasattr(self, \"_transcripts\"):\n self.build_genes_and_transcripts()\n return self._transcripts\n\n def name_to_gene_ids(self, name):\n if not hasattr(self, \"_name_to_gene_lookup\"):\n lookup = {}\n for (a_name, stable_id) in zip(self.df_genes[\"name\"], self.df_genes.index):\n a_name = a_name.upper()\n if not a_name in lookup:\n lookup[a_name] = set([stable_id])\n else:\n lookup[a_name].add(stable_id)\n self._name_to_gene_lookup = lookup\n try:\n return set(self._name_to_gene_lookup[name.upper()])\n except KeyError:\n return set()\n # return set(self.df_genes.index[self.df_genes.name.str.upper() == name.upper()])\n\n def build_genes_and_transcripts(self):\n genes = {}\n for tup in self.df_genes.itertuples():\n g = Gene(\n tup[0],\n tup.name,\n tup.chr,\n tup.start,\n tup.stop,\n tup.strand,\n tup.biotype,\n transcripts=[],\n genome=weakref.proxy(self),\n )\n genes[tup[0]] = g\n transcripts = {}\n for tup in self.df_transcripts.itertuples():\n g = genes[tup.gene_stable_id]\n t = Transcript(\n tup[0],\n tup.gene_stable_id,\n tup.name,\n tup.chr,\n tup.start,\n tup.stop,\n tup.strand,\n tup.biotype,\n tup.exons,\n tup.exon_stable_ids,\n weakref.proxy(g),\n genome=weakref.proxy(self),\n )\n transcripts[tup[0]] = t\n g.transcripts.append(t)\n self._genes = genes\n self._transcripts = transcripts\n\n @ReadOnlyPropertyWithFunctionAccess\n def df_exons(self):\n \"\"\"a dataframe of all exons (on canonical chromosomes - ie those in get_chromosome_lengths())\"\"\"\n res = {\n \"chr\": [],\n \"start\": [],\n \"stop\": [],\n \"transcript_stable_id\": [],\n \"gene_stable_id\": [],\n \"strand\": [],\n }\n canonical_chromosomes = self.get_chromosome_lengths()\n for tr in self.transcripts.values():\n if not tr.chr in canonical_chromosomes: # pragma: no cover\n continue\n for start, stop in tr.exons:\n res[\"chr\"].append(tr.chr)\n res[\"start\"].append(start)\n res[\"stop\"].append(stop)\n res[\"transcript_stable_id\"].append(tr.transcript_stable_id)\n res[\"gene_stable_id\"].append(tr.gene_stable_id)\n res[\"strand\"].append(tr.strand)\n return pd.DataFrame(res)\n\n def _prepare_df_genes(self):\n \"\"\"Return a DataFrame with gene information:\n gene_stable_id\n name\n chr\n start\n stop\n strand\n tss\n tes\n biotype\n \"\"\"\n gtf = self.get_gtf([\"gene\", \"transcript\"])\n genes = gtf[\"gene\"]\n transcripts = gtf[\"transcript\"]\n if len(genes) == 0: # a genome without gene information\n return pd.DataFrame(\n {\n \"gene_stable_id\": [],\n \"name\": [],\n \"chr\": [],\n \"start\": [],\n \"stop\": [],\n \"strand\": [],\n \"tss\": [],\n \"tes\": [],\n \"biotype\": [],\n }\n )\n elif len(transcripts) == 0: # pragma: no cover\n raise ValueError(\n \"Genome with gene but no transcript information \"\n \"not supported: len(genes) %i, len(transcripts) %i\"\n % (len(genes), len(transcripts))\n )\n\n transcripts = transcripts.set_index(\"gene_id\").sort_values(\n [\"seqname\", \"start\", \"end\"]\n )\n genes = (\n dp(genes)\n .transassign(\n gene_stable_id=X.gene_id,\n name=list(\n X.gene_name if hasattr(X, \"gene_name\") else X.gene_id\n ), # this makes sure we have a str(object) column in the dataframe\n # which triggers msgpack not to mess up our tuple columns.\n chr=pd.Categorical(X.seqname),\n start=X.start,\n stop=X.end,\n strand=X.strand,\n tss=(genes.start).where(genes.strand == 1, genes.end),\n tes=(genes.end).where(genes.strand == 1, genes.start),\n biotype=pd.Categorical(X.gene_biotype),\n )\n .sort_values([\"chr\", \"start\"])\n .set_index(\"gene_stable_id\")\n .pd\n )\n if not genes.index.is_unique:\n raise ValueError(\"gene_stable_ids were not unique\")\n tr = {}\n for gene_stable_id, transcript_stable_id in transcripts[\n \"transcript_id\"\n ].items():\n if not gene_stable_id in tr:\n tr[gene_stable_id] = []\n tr[gene_stable_id].append(transcript_stable_id)\n genes = genes.assign(\n transcript_stable_ids=pd.Series(list(tr.values()), index=list(tr.keys()))\n )\n self.sanity_check_genes(genes)\n return genes\n\n def sanity_check_genes(self, df_genes):\n strand_values = set(df_genes.strand.unique())\n if strand_values.difference([1, -1]): # pragma: no cover\n # this is currently already being handled by the gtf parser - defensive\n raise ValueError(f\"Gene strand was outside of 1, -1: {strand_values}\")\n wrong_order = df_genes[\"start\"] > df_genes[\"stop\"]\n if wrong_order.any():\n raise ValueError(\"start > stop %s\" % df_genes[wrong_order].head())\n\n def _prepare_df_transcripts(self):\n \"\"\"Get a DataFrame with all the transcript information\n transcript_stable_id (index),\n gene_stable_id,\n name,\n chr, start, stop, strand,\n biotype,\n exons - list (start, stop)\n \"\"\"\n gtf = self.get_gtf([\"transcript\", \"exon\"])\n transcripts = gtf[\"transcript\"]\n exons = gtf[\"exon\"]\n if len(transcripts) == 0:\n df = (\n pd.DataFrame(\n {\n \"transcript_stable_id\": [],\n \"gene_stable_id\": [],\n \"name\": [],\n \"chr\": [],\n \"start\": [],\n \"stop\": [],\n \"strand\": [],\n \"biotype\": [],\n \"exons\": [],\n \"exon_stable_ids\": [],\n \"translation_start\": [],\n \"translation_start_exon\": [],\n \"translation_stop\": [],\n \"translation_stop_exon\": [],\n \"protein_id\": [],\n }\n )\n .set_index(\"transcript_stable_id\")\n .sort_values([\"chr\", \"start\"])\n )\n return df\n all_exons = exons.set_index(\"transcript_id\").sort_values(\"start\")\n\n result = (\n dp(transcripts)\n .transassign(\n transcript_stable_id=X.transcript_id,\n gene_stable_id=X.gene_id,\n name=X.transcript_name\n if hasattr(X, \"transcript_name\")\n else X.transcript_id,\n chr=pd.Categorical(X.seqname),\n start=X.start,\n stop=X.end,\n strand=X.strand,\n biotype=pd.Categorical(X.transcript_biotype),\n msg_pack_fix=\"\" # stupid msg_pack writer will mess up the exon tuples\n # if it has no str-object columns in the datafram.\n )\n .set_index(\"transcript_stable_id\")\n .pd\n )\n\n if not result.index.is_unique:\n raise ValueError(\"transcript_stable_ids were not unique\")\n result_exons = {}\n result_exon_ids = {}\n for (transcript_stable_id, estart, estop, eid) in zip(\n all_exons.index, all_exons[\"start\"], all_exons[\"end\"], all_exons[\"exon_id\"]\n ):\n if not transcript_stable_id in result_exons:\n result_exons[transcript_stable_id] = []\n result_exon_ids[transcript_stable_id] = []\n result_exons[transcript_stable_id].append((estart, estop))\n result_exon_ids[transcript_stable_id].append(eid)\n\n result_exons = pd.Series(\n list(result_exons.values()), index=list(result_exons.keys())\n )\n result_exon_ids = pd.Series(\n list(result_exon_ids.values()), index=list(result_exon_ids.keys())\n )\n result = result.assign(exons=result_exons, exon_stable_ids=result_exon_ids)\n self.sanity_check_transcripts(result)\n\n return result\n\n def sanity_check_transcripts(self, df_transcripts):\n strand_values = set(df_transcripts.strand.unique())\n if strand_values.difference(\n [1, -1]\n ): # pragma: no cover - defensive, currently handled in gtf parser\n raise ValueError(f\"Transcript strand was outside of 1, -1: {strand_values}\")\n\n # can't use self.genes or self.transcript at this point,\n # they rely on df_genes and df_transcripts being set\n genes = df_to_rows(self.df_genes, [\"start\", \"stop\"])\n for transcript_stable_id, start, stop, exons, gene_stable_id in zip(\n df_transcripts.index,\n df_transcripts.start,\n df_transcripts.stop,\n df_transcripts.exons,\n df_transcripts.gene_stable_id,\n ):\n\n if start > stop:\n raise ValueError(\"start > stop {row}\")\n try:\n for estart, estop in exons:\n if estart < start or estop > stop:\n raise ValueError(\n f\"Exon outside of transcript: {transcript_stable_id}\"\n f\"\\ngene was {start}..{stop}\"\n f\"\\nexon was {estart}..{estop}\"\n )\n except TypeError: # pragma: no cover\n print(repr((transcript_stable_id, start, stop, exons, gene_stable_id)))\n gene_info = genes[gene_stable_id]\n if start < gene_info.start or stop > gene_info.stop:\n raise ValueError(\n f\"Transcript outside of gene: {transcript_stable_id} {start} {stop} {gene_info.start} {gene_info.stop}\"\n )\n\n def _prepare_df_proteins(self):\n \"\"\"Get a DataFrame with protein information\n protein_stable_id (index)\n transcript_stable_id,\n gene_stable_id,\n chr,\n strand\n cds - [(start, stop)] # in genomic coordinates\n \"\"\"\n gtf = self.get_gtf([\"CDS\"])\n cds = gtf[\"CDS\"]\n if len(cds) == 0:\n df = pd.DataFrame(\n {\n \"protein_stable_id\": [],\n \"transcript_stable_id\": [],\n \"gene_stable_id\": [],\n \"chr\": [],\n \"strand\": [],\n \"cds\": [],\n }\n ).set_index(\"protein_stable_id\")\n return df\n result = {\n \"protein_stable_id\": [],\n \"transcript_stable_id\": [],\n \"gene_stable_id\": [],\n \"chr\": [],\n \"strand\": [],\n \"cds\": [],\n }\n\n for protein_stable_id, tuples in dp(cds).groupby(\"protein_id\").itertuples():\n transcript_stable_id = tuples[0].transcript_id\n gene_stable_id = tuples[0].gene_id\n chr = tuples[0].seqname\n strand = tuples[0].strand\n local_cds = list(\n zip((tup.start for tup in tuples), (tup.end for tup in tuples))\n )\n result[\"protein_stable_id\"].append(protein_stable_id[0])\n result[\"gene_stable_id\"].append(gene_stable_id)\n result[\"transcript_stable_id\"].append(transcript_stable_id)\n result[\"chr\"].append(chr)\n result[\"strand\"].append(strand)\n result[\"cds\"].append(local_cds)\n result = pd.DataFrame(result).set_index(\"protein_stable_id\")\n return result\n\n df_genes = MsgPackProperty(\n lambda self: self.gene_gtf_dependencies,\n lambda self: self.get_additional_gene_gtfs(),\n )\n df_transcripts = MsgPackProperty(\n lambda self: [self.gene_gtf_dependencies, self.job_genes()],\n lambda self: self.get_additional_gene_gtfs(),\n )\n df_proteins = MsgPackProperty(\n lambda self: self.gene_gtf_dependencies,\n lambda self: self.get_additional_gene_gtfs(),\n )\n\n def get_genes_overlapping(self, chr, start, stop):\n raise ValueError(\n \"Use mbf_genomics.Genes.get_overlapping instead. This has no test cases.\"\n )\n check_overlap = lambda df, interval: np.max( # noqa: E731\n [\n np.zeros(len(df)),\n np.min(\n [df.stop.values, np.ones(len(df), dtype=int) * interval[1]], axis=0\n )\n - np.max(\n [df.start.values, np.ones(len(df), dtype=int) * interval[0]], axis=0\n ),\n ],\n axis=0,\n )\n filter = (self.df_genes[\"chr\"] == chr) & (\n check_overlap(self.df_genes, [start, stop]) > 0\n )\n return self.df_genes[filter]\n\n\n@class_with_downloads\nclass HardCodedGenome(GenomeBase):\n def __init__(self, name, chr_lengths, df_genes, df_transcripts, df_proteins):\n super().__init__()\n self.name = name\n self._chr_lengths = chr_lengths\n self._df_genes = df_genes\n self._df_transcripts = df_transcripts\n self._df_proteins = df_proteins\n\n def get_chromosome_lengths(self):\n return self._chr_lengths.copy()\n\n def _msg_pack_job(\n self, property_name, filename, callback_function, files_to_invariant_on\n ):\n pass\n", "id": "11807360", "language": "Python", "matching_score": 4.24968147277832, "max_stars_count": 0, "path": "src/mbf_genomes/base.py" }, { "content": "import pytest\nimport pandas as pd\n\n# from pathlib import Path\n# from mbf_externals import PrebuildManager\n# from mbf_genomes import EnsemblGenome\n# import pypipegraph as ppg\n\n\[email protected](\"new_pipegraph\")\nclass TestBase:\n pass\n\n\ndef test_msgpack_unpacking_class_wrong_property_name():\n from mbf_genomes.base import msgpack_unpacking_class, MsgPackProperty\n\n with pytest.raises(NotImplementedError):\n\n @msgpack_unpacking_class\n class Shu:\n def _pepare_dfnolower():\n return pd.DataFrame()\n\n dfnolower = MsgPackProperty()\n", "id": "5202417", "language": "Python", "matching_score": 1.255089282989502, "max_stars_count": 0, "path": "tests/test_genomebase.py" }, { "content": "import pytest\nimport pypipegraph as ppg\nimport itertools\nfrom pytest import approx\nimport pandas as pd\nimport numpy\nfrom mbf_genomics import DelayedDataFrame\nfrom mbf_genomics.annotator import Constant\nfrom mbf_comparisons import (\n Comparisons,\n Log2FC,\n TTest,\n TTestPaired,\n EdgeRUnpaired,\n EdgeRPaired,\n DESeq2Unpaired,\n NOISeq,\n DESeq2MultiFactor,\n)\nfrom mbf_qualitycontrol import prune_qc, get_qc_jobs\nfrom mbf_qualitycontrol.testing import assert_image_equal\nfrom mbf_sampledata import get_pasilla_data_subset\n\nfrom pypipegraph.testing import (\n # RaisesDirectOrInsidePipegraph,\n run_pipegraph,\n force_load,\n) # noqa: F401\nfrom dppd import dppd\n\ndp, X = dppd()\n\n\[email protected](\"both_ppg_and_no_ppg_no_qc\")\nclass TestComparisons:\n def test_simple(self):\n d = DelayedDataFrame(\"ex1\", pd.DataFrame({\"a\": [1, 2, 3], \"b\": [2, 8, 16 * 3]}))\n c = Comparisons(d, {\"a\": [\"a\"], \"b\": [\"b\"]})\n a = c.a_vs_b(\"a\", \"b\", Log2FC, laplace_offset=0)\n assert d.has_annotator(a)\n force_load(d.add_annotator(a), \"fl1\")\n run_pipegraph()\n assert (d.df[a[\"log2FC\"]] == [-1.0, -2.0, -4.0]).all()\n\n def test_simple_from_anno(self):\n d = DelayedDataFrame(\"ex1\", pd.DataFrame({\"a\": [1, 2, 3], \"b\": [2, 8, 16 * 3]}))\n a = Constant(\"five\", 5)\n b = Constant(\"ten\", 10)\n c = Comparisons(d, {\"a\": [a], \"b\": [b]})\n a = c.a_vs_b(\"a\", \"b\", Log2FC(), laplace_offset=0)\n force_load(d.add_annotator(a), \"fl1\")\n run_pipegraph()\n assert (d.df[a[\"log2FC\"]] == [-1, -1, -1]).all()\n\n def test_simple_from_anno_plus_column_name(self):\n d = DelayedDataFrame(\"ex1\", pd.DataFrame({\"a\": [1, 2, 3], \"b\": [2, 8, 16 * 3]}))\n a = Constant(\"five\", 5)\n b = Constant(\"ten\", 10)\n c = Comparisons(d, {\"a\": [(a, \"five\")], \"b\": [(b, \"ten\")]})\n a = c.a_vs_b(\"a\", \"b\", Log2FC(), laplace_offset=0)\n force_load(d.add_annotator(a), \"fl1\")\n run_pipegraph()\n assert (d.df[a[\"log2FC\"]] == [-1, -1, -1]).all()\n\n def test_simple_from_anno_plus_column_pos(self):\n d = DelayedDataFrame(\"ex1\", pd.DataFrame({\"a\": [1, 2, 3], \"b\": [2, 8, 16 * 3]}))\n a = Constant(\"five\", 5)\n b = Constant(\"ten\", 10)\n c = Comparisons(d, {\"a\": [(a, 0)], \"b\": [(b, 0)]})\n a = c.a_vs_b(\"a\", \"b\", Log2FC(), laplace_offset=0)\n force_load(d.add_annotator(a), \"fl1\")\n run_pipegraph()\n assert (d.df[a[\"log2FC\"]] == [-1, -1, -1]).all()\n\n def test_input_checking(self):\n d = DelayedDataFrame(\"ex1\", pd.DataFrame({\"a\": [1, 2, 3], \"b\": [2, 8, 16 * 3]}))\n with pytest.raises(ValueError):\n Comparisons(None, {})\n with pytest.raises(ValueError):\n Comparisons(d, {55: {\"a\"}, \"b\": [\"b\"]})\n\n def test_multi_plus_filter(self, clear_annotators):\n d = DelayedDataFrame(\n \"ex1\",\n pd.DataFrame(\n {\n \"a1\": [1 / 0.99, 2 / 0.99, 3 / 0.99],\n \"a2\": [1 * 0.99, 2 * 0.99, 3 * 0.99],\n \"b1\": [2 * 0.99, 8 * 0.99, (16 * 3) * 0.99],\n \"b2\": [2 / 0.99, 8 / 0.99, (16 * 3) / 0.99],\n \"delta\": [10, 20, 30],\n }\n ),\n )\n c = Comparisons(d, {\"a\": [\"a1\", \"a2\"], \"b\": [\"b1\", \"b2\"]})\n a = c.a_vs_b(\"a\", \"b\", Log2FC(), laplace_offset=0)\n anno1 = Constant(\"shu1\", 5)\n anno2 = Constant(\"shu2\", 5) # noqa: F841\n anno3 = Constant(\"shu3\", 5) # noqa: F841\n to_test = [\n ((\"log2FC\", \"==\", -1.0), [-1.0]),\n ((\"log2FC\", \">\", -2.0), [-1.0]),\n ((\"log2FC\", \"<\", -2.0), [-4.0]),\n ((\"log2FC\", \">=\", -2.0), [-1.0, -2.0]),\n ((\"log2FC\", \"<=\", -2.0), [-2.0, -4.0]),\n ((\"log2FC\", \"|>\", 2.0), [-4.0]),\n ((\"log2FC\", \"|<\", 2.0), [-1.0]),\n ((\"log2FC\", \"|>=\", 2.0), [-2.0, -4.0]),\n ((\"log2FC\", \"|<=\", 2.0), [-1.0, -2.0]),\n ((a[\"log2FC\"], \"<\", -2.0), [-4.0]),\n ((\"log2FC\", \"|\", -2.0), ValueError),\n ([(\"log2FC\", \"|>=\", 2.0), (\"log2FC\", \"<=\", 0)], [-2.0, -4.0]),\n ((anno1, \">=\", 5), [-1, -2.0, -4.0]),\n (((anno1, 0), \">=\", 5), [-1, -2.0, -4.0]),\n ((\"shu2\", \">=\", 5), [-1, -2.0, -4.0]),\n ((\"delta\", \">\", 10), [-2.0, -4.0]),\n ]\n if not ppg.inside_ppg(): # can't test for missing columns in ppg.\n to_test.extend([((\"log2FC_no_such_column\", \"<\", -2.0), KeyError)])\n filtered = {}\n for ii, (f, r) in enumerate(to_test):\n if r in (ValueError, KeyError):\n with pytest.raises(r):\n a.filter([f], \"new%i\" % ii)\n else:\n filtered[tuple(f)] = a.filter(\n [f] if isinstance(f, tuple) else f, \"new%i\" % ii\n )\n assert filtered[tuple(f)].name == \"new%i\" % ii\n force_load(filtered[tuple(f)].annotate(), filtered[tuple(f)].name)\n\n force_load(d.add_annotator(a), \"somethingsomethingjob\")\n run_pipegraph()\n c = a[\"log2FC\"]\n assert (d.df[c] == [-1.0, -2.0, -4.0]).all()\n for f, r in to_test:\n if r not in (ValueError, KeyError):\n try:\n assert filtered[tuple(f)].df[c].values == approx(r)\n except AssertionError:\n print(f)\n raise\n\n def test_ttest(self):\n data = pd.DataFrame(\n {\n \"A.R1\": [0, 0, 0, 0],\n \"A.R2\": [0, 0, 0, 0],\n \"A.R3\": [0, 0.001, 0.001, 0.001],\n \"B.R1\": [0.95, 0, 0.56, 0],\n \"B.R2\": [0.99, 0, 0.56, 0],\n \"B.R3\": [0.98, 0, 0.57, 0.5],\n \"C.R1\": [0.02, 0.73, 0.59, 0],\n \"C.R2\": [0.03, 0.75, 0.57, 0],\n \"C.R3\": [0.05, 0.7, 0.58, 1],\n }\n )\n ddf = DelayedDataFrame(\"ex1\", data)\n gts = {\n k: list(v)\n for (k, v) in itertools.groupby(sorted(data.columns), lambda x: x[0])\n }\n\n c = Comparisons(ddf, gts)\n a = c.a_vs_b(\"A\", \"B\", TTest)\n b = a.filter([(\"log2FC\", \">\", 2.38), (\"p\", \"<\", 0.05)])\n assert b.name == \"Filtered_A-B_log2FC_>_2.38__p_<_0.05\"\n force_load(ddf.add_annotator(a))\n run_pipegraph()\n # value calculated with R to double check.\n assert ddf.df[a[\"p\"]].iloc[0] == pytest.approx(8.096e-07, abs=1e-4)\n # value calculated with scipy to double check.\n assert ddf.df[a[\"p\"]].iloc[1] == pytest.approx(0.42264973081037427, abs=1e-4)\n assert ddf.df[a[\"p\"]].iloc[2] == pytest.approx(0.04157730613277929, abs=1e-4)\n assert ddf.df[a[\"p\"]].iloc[3] == pytest.approx(0.703158104919873, abs=1e-4)\n assert ddf.df[a[\"FDR\"]].values == pytest.approx(\n [3.238535e-06, 5.635329e-01, 8.315462e-02, 7.031581e-01], abs=1e-4\n )\n\n def test_ttest_min_sample_count(self):\n df = pd.DataFrame(\n {\"A.R1\": [0, 0, 0, 0], \"A.R2\": [0, 0, 0, 0], \"B.R1\": [0.95, 0, 0.56, 0]}\n )\n ddf = DelayedDataFrame(\"x\", df)\n gts = {\n k: list(v)\n for (k, v) in itertools.groupby(sorted(df.columns), lambda x: x[0])\n }\n\n c = Comparisons(ddf, gts)\n with pytest.raises(ValueError):\n c.a_vs_b(\"A\", \"B\", TTest())\n\n def test_ttest_paired(self):\n data = pd.DataFrame(\n {\n \"A.R1\": [0, 0, 0, 0],\n \"A.R2\": [0, 0, 0, 0],\n \"A.R3\": [0, 0.001, 0.001, 0.001],\n \"B.R1\": [0.95, 0, 0.56, 0],\n \"B.R2\": [0.99, 0, 0.56, 0],\n \"B.R3\": [0.98, 0, 0.57, 0.5],\n \"C.R1\": [0.02, 0.73, 0.59, 0],\n \"C.R2\": [0.03, 0.75, 0.57, 0],\n \"C.R3\": [0.05, 0.7, 0.58, 1],\n }\n )\n ddf = DelayedDataFrame(\"ex1\", data)\n gts = {\n k: list(v)\n for (k, v) in itertools.groupby(sorted(data.columns), lambda x: x[0])\n }\n\n c = Comparisons(ddf, gts)\n a = c.a_vs_b(\"A\", \"B\", TTestPaired())\n force_load(ddf.add_annotator(a))\n run_pipegraph()\n assert ddf.df[a[\"p\"]].iloc[0] == pytest.approx(8.096338300746213e-07, abs=1e-4)\n assert ddf.df[a[\"p\"]].iloc[1] == pytest.approx(0.42264973081037427, abs=1e-4)\n assert ddf.df[a[\"p\"]].iloc[2] == pytest.approx(0.041378369826042816, abs=1e-4)\n assert ddf.df[a[\"p\"]].iloc[3] == pytest.approx(0.42264973081037427, abs=1e-4)\n assert ddf.df[a[\"FDR\"]].values == pytest.approx(\n [3.238535e-06, 4.226497e-01, 8.275674e-02, 4.226497e-01], abs=1e-4\n )\n\n def test_double_comparison_with_different_strategies(self):\n data = pd.DataFrame(\n {\n \"A.R1\": [0, 0, 0, 0],\n \"A.R2\": [0, 0, 0, 0],\n \"A.R3\": [0, 0.001, 0.001, 0.001],\n \"B.R1\": [0.95, 0, 0.56, 0],\n \"B.R2\": [0.99, 0, 0.56, 0],\n \"B.R3\": [0.98, 0, 0.57, 0.5],\n \"C.R1\": [0.02, 0.73, 0.59, 0],\n \"C.R2\": [0.03, 0.75, 0.57, 0],\n \"C.R3\": [0.05, 0.7, 0.58, 1],\n }\n )\n ddf = DelayedDataFrame(\"ex1\", data)\n gts = {\n k: list(v)\n for (k, v) in itertools.groupby(sorted(data.columns), lambda x: x[0])\n }\n\n c = Comparisons(ddf, gts)\n a = c.a_vs_b(\"A\", \"B\", TTestPaired())\n force_load(ddf.add_annotator(a))\n b = c.a_vs_b(\"A\", \"B\", TTest())\n force_load(ddf.add_annotator(b))\n run_pipegraph()\n assert ddf.df[a[\"p\"]].iloc[0] == pytest.approx(8.096338300746213e-07, abs=1e-4)\n assert ddf.df[a[\"p\"]].iloc[1] == pytest.approx(0.42264973081037427, abs=1e-4)\n assert ddf.df[a[\"p\"]].iloc[2] == pytest.approx(0.041378369826042816, abs=1e-4)\n assert ddf.df[a[\"p\"]].iloc[3] == pytest.approx(0.42264973081037427, abs=1e-4)\n assert ddf.df[a[\"FDR\"]].values == pytest.approx(\n [3.238535e-06, 4.226497e-01, 8.275674e-02, 4.226497e-01], abs=1e-4\n )\n assert ddf.df[b[\"p\"]].iloc[0] == pytest.approx(8.096e-07, abs=1e-4)\n # value calculated with scipy to double check.\n assert ddf.df[b[\"p\"]].iloc[1] == pytest.approx(0.42264973081037427, abs=1e-4)\n assert ddf.df[b[\"p\"]].iloc[2] == pytest.approx(0.04157730613277929, abs=1e-4)\n assert ddf.df[b[\"p\"]].iloc[3] == pytest.approx(0.703158104919873, abs=1e-4)\n assert ddf.df[b[\"FDR\"]].values == pytest.approx(\n [3.238535e-06, 5.635329e-01, 8.315462e-02, 7.031581e-01], abs=1e-4\n )\n\n def _get_tuch_data(self):\n import mbf_sampledata\n import mbf_r\n import rpy2.robjects as ro\n\n path = mbf_sampledata.get_sample_path(\"mbf_comparisons/TuchEtAlS1.csv\")\n # directly from the manual.\n # plus minus \"\"\"To make\n # this file, we downloaded Table S1 from Tuch et al. [39], deleted some unnecessary columns\n # and edited the column headings slightly:\"\"\"\n ro.r(\n \"\"\"load_data = function(path) {\n rawdata <- read.delim(path, check.names=FALSE, stringsAsFactors=FALSE)\n library(edgeR)\n y <- DGEList(counts=rawdata[,3:8], genes=rawdata[,1:2])\n library(org.Hs.eg.db)\n idfound <- y$genes$idRefSeq %in% mappedRkeys(org.Hs.egREFSEQ)\n y <- y[idfound,]\n egREFSEQ <- toTable(org.Hs.egREFSEQ)\n m <- match(y$genes$idRefSeq, egREFSEQ$accession)\n y$genes$EntrezGene <- egREFSEQ$gene_id[m]\n egSYMBOL <- toTable(org.Hs.egSYMBOL)\n m <- match(y$genes$EntrezGene, egSYMBOL$gene_id)\n y$genes$Symbol <- egSYMBOL$symbol[m]\n\n o <- order(rowSums(y$counts), decreasing=TRUE)\n y <- y[o,]\n d <- duplicated(y$genes$Symbol)\n y <- y[!d,]\n\n cbind(y$genes, y$counts)\n }\n\"\"\"\n )\n df = mbf_r.convert_dataframe_from_r(ro.r(\"load_data\")(str(path)))\n df.columns = [\n \"idRefSeq\",\n \"nameOfGene\",\n \"EntrezGene\",\n \"Symbol\",\n \"8.N\",\n \"8.T\",\n \"33.N\",\n \"33.T\",\n \"51.N\",\n \"51.T\",\n ]\n assert len(df) == 10519\n return df\n\n def test_edgeR(self):\n df = self._get_tuch_data()\n\n ddf = DelayedDataFrame(\"ex1\", df)\n gts = {\n \"T\": [x for x in df.columns if \".T\" in x],\n \"N\": [x for x in df.columns if \".N\" in x],\n }\n\n c = Comparisons(ddf, gts)\n a = c.a_vs_b(\"T\", \"N\", EdgeRUnpaired())\n force_load(ddf.add_annotator(a))\n run_pipegraph()\n # these are from the last run - the manual has no simple a vs b comparison...\n # at least we'l notice if this changes\n assert ddf.df[ddf.df.nameOfGene == \"PTHLH\"][a[\"log2FC\"]].values == approx(\n [4.003122]\n )\n assert ddf.df[ddf.df.nameOfGene == \"PTHLH\"][a[\"FDR\"]].values == approx(\n [1.332336e-11]\n )\n assert ddf.df[ddf.df.nameOfGene == \"PTHLH\"][a[\"p\"]].values == approx(\n [5.066397e-15]\n )\n df = ddf.df.set_index(\"nameOfGene\")\n t_columns = [x[1] for x in gts[\"T\"]]\n n_columns = [x[1] for x in gts[\"N\"]]\n assert df.loc[\"PTHLH\"][t_columns].sum() > df.loc[\"PTHLH\"][n_columns].sum()\n\n assert ddf.df[ddf.df.nameOfGene == \"PTGFR\"][a[\"log2FC\"]].values == approx(\n [-5.127508]\n )\n assert ddf.df[ddf.df.nameOfGene == \"PTGFR\"][a[\"FDR\"]].values == approx(\n [6.470885e-10]\n )\n assert ddf.df[ddf.df.nameOfGene == \"PTGFR\"][a[\"p\"]].values == approx(\n [3.690970e-13]\n )\n assert df.loc[\"PTGFR\"][t_columns].sum() < df.loc[\"PTGFR\"][n_columns].sum()\n\n def test_edgeR_paired(self):\n df = self._get_tuch_data()\n\n ddf = DelayedDataFrame(\"ex1\", df)\n gts = {\n \"T\": [x for x in sorted(df.columns) if \".T\" in x],\n \"N\": [x for x in sorted(df.columns) if \".N\" in x],\n }\n\n c = Comparisons(ddf, gts)\n a = c.a_vs_b(\"T\", \"N\", EdgeRPaired())\n force_load(ddf.add_annotator(a))\n run_pipegraph()\n # these are from the last run - the manual has no simple a vs b comparison...\n # at least we'l notice if this changes\n assert ddf.df[ddf.df.nameOfGene == \"PTHLH\"][a[\"log2FC\"]].values == approx(\n [3.97], abs=1e-3\n )\n assert ddf.df[ddf.df.nameOfGene == \"PTHLH\"][a[\"FDR\"]].values == approx(\n [4.27e-18]\n )\n assert ddf.df[ddf.df.nameOfGene == \"PTHLH\"][a[\"p\"]].values == approx([8.13e-22])\n df = ddf.df.set_index(\"nameOfGene\")\n t_columns = [x[1] for x in gts[\"T\"]]\n n_columns = [x[1] for x in gts[\"N\"]]\n assert df.loc[\"PTHLH\"][t_columns].sum() > df.loc[\"PTHLH\"][n_columns].sum()\n\n assert ddf.df[ddf.df.nameOfGene == \"PTGFR\"][a[\"log2FC\"]].values == approx(\n [-5.18], abs=1e-2\n )\n assert ddf.df[ddf.df.nameOfGene == \"PTGFR\"][a[\"FDR\"]].values == approx(\n [3.17e-19]\n )\n assert ddf.df[ddf.df.nameOfGene == \"PTGFR\"][a[\"p\"]].values == approx([3.01e-23])\n assert df.loc[\"PTGFR\"][t_columns].sum() < df.loc[\"PTGFR\"][n_columns].sum()\n\n def test_edgeR_filter_on_max_count(self):\n ddf, a, b = get_pasilla_data_subset()\n gts = {\"T\": a, \"N\": b}\n c = Comparisons(ddf, gts)\n a = c.a_vs_b(\"T\", \"N\", EdgeRUnpaired(ignore_if_max_count_less_than=100))\n force_load(ddf.add_annotator(a))\n run_pipegraph()\n assert pd.isnull(ddf.df[a[\"log2FC\"]]).any()\n assert (pd.isnull(ddf.df[a[\"log2FC\"]]) == pd.isnull(ddf.df[a[\"p\"]])).all()\n assert (pd.isnull(ddf.df[a[\"FDR\"]]) == pd.isnull(ddf.df[a[\"p\"]])).all()\n\n def test_deseq2(self):\n import mbf_sampledata\n\n pasilla_data = pd.read_csv(\n mbf_sampledata.get_sample_path(\n \"mbf_comparisons/pasillaCount_deseq2.tsv.gz\"\n ),\n sep=\" \",\n )\n # pasilla_data = pasilla_data.set_index('Gene')\n pasilla_data.columns = [str(x) for x in pasilla_data.columns]\n gts = {\n \"treated\": [x for x in pasilla_data.columns if x.startswith(\"treated\")],\n \"untreated\": [x for x in pasilla_data.columns if x.startswith(\"untreated\")],\n }\n ddf = DelayedDataFrame(\"ex\", pasilla_data)\n c = Comparisons(ddf, gts)\n a = c.a_vs_b(\"treated\", \"untreated\", DESeq2Unpaired())\n force_load(ddf.add_annotator(a))\n run_pipegraph()\n check = \"\"\"# This is deseq2 version specific data- probably needs fixing if upgrading deseq2\n## baseMean log2FoldChange lfcSE stat pvalue padj\n## <numeric> <numeric> <numeric> <numeric> <numeric> <numeric>\n## FBgn0039155 453 -3.72 0.160 -23.2 1.63e-119 1.35e-115\n## FBgn0029167 2165 -2.08 0.103 -20.3 1.43e-91 5.91e-88\n## FBgn0035085 367 -2.23 0.137 -16.3 6.38e-60 1.75e-56\n## FBgn0029896 258 -2.21 0.159 -13.9 5.40e-44 1.11e-40\n## FBgn0034736 118 -2.56 0.185 -13.9 7.66e-44 1.26e-40\n\"\"\"\n df = ddf.df.sort_values(a[\"FDR\"])\n df = df.set_index(\"Gene\")\n for row in check.split(\"\\n\"):\n row = row.strip()\n if row and not row[0] == \"#\":\n row = row.split()\n self.assertAlmostEqual(\n df.ix[row[0]][a[\"log2FC\"]], float(row[2]), places=2\n )\n self.assertAlmostEqual(df.ix[row[0]][a[\"p\"]], float(row[5]), places=2)\n self.assertAlmostEqual(df.ix[row[0]][a[\"FDR\"]], float(row[6]), places=2)\n\n def _get_pasilla_3(self):\n import mbf_sampledata\n\n pasilla_data = pd.read_csv(\n mbf_sampledata.get_sample_path(\n \"mbf_comparisons/pasillaCount_deseq2.tsv.gz\"\n ),\n sep=\" \",\n )\n pasilla_data = pasilla_data.set_index(\"Gene\")\n pasilla_data.columns = [str(x) for x in pasilla_data.columns]\n seed = 12345\n numpy.random.seed(seed)\n for i in range(3):\n pasilla_data[f\"other{i}fb\"] = (\n pasilla_data[\"untreated4fb\"].values\n + numpy.abs(numpy.random.randn(len(pasilla_data)) * 10)\n ).astype(int)\n for i in range(3):\n pasilla_data[f\"otherse{i}fb\"] = (\n pasilla_data[\"untreated4fb\"].values\n + numpy.abs(numpy.random.randn(len(pasilla_data)) * 15)\n ).astype(int)\n return pasilla_data\n\n def test_deseq2_3groups(self):\n import mbf_r\n import rpy2.robjects as robjects\n\n robjects.r(\"library(DESeq2)\")\n pasilla_data = self._get_pasilla_3()\n condition_data = pd.DataFrame(\n {\n \"condition\": [x[:-3] for x in pasilla_data.columns],\n \"type\": [\n \"se\"\n if x\n in [\"treated1fb\", \"untreated1fb\", \"untreated2fb\"]\n + [f\"otherse{i}fb\" for i in range(3)]\n else \"pe\"\n for x in pasilla_data.columns\n ],\n },\n index=pasilla_data.columns,\n )\n gts = {}\n for cond, sub in condition_data.groupby(\"condition\"):\n gts[cond] = list(sub.index.values)\n cts = mbf_r.convert_dataframe_to_r(pasilla_data)\n col = mbf_r.convert_dataframe_to_r(condition_data)\n rresults = robjects.r(\n \"\"\"\n function (cts, col){\n dds = DESeqDataSetFromMatrix(countData=cts, colData=col, design = ~ condition)\n dds = DESeq(dds)\n print(resultsNames(dds))\n res = results(dds, contrast=c(\"condition\", \"treated\", \"untreated\"))\n res = as.data.frame(res)\n res\n }\n \"\"\"\n )(cts=cts, col=col)\n ddf = DelayedDataFrame(\"ex\", pasilla_data)\n c = Comparisons(ddf, gts)\n a = c.a_vs_b(\n \"treated\",\n \"untreated\",\n DESeq2Unpaired(),\n include_other_samples_for_variance=True,\n )\n force_load(ddf.add_annotator(a))\n run_pipegraph()\n rresults = mbf_r.convert_dataframe_from_r(rresults)\n numpy.testing.assert_almost_equal(\n rresults[\"log2FoldChange\"].values,\n ddf.df[\n \"Comp. treated - untreated log2FC (DESeq2unpaired,Other=True)\"\n ].values,\n decimal=4,\n )\n numpy.testing.assert_almost_equal(\n rresults[\"pvalue\"].values,\n ddf.df[\"Comp. treated - untreated p (DESeq2unpaired,Other=True)\"].values,\n decimal=4,\n )\n numpy.testing.assert_almost_equal(\n rresults[\"padj\"].values,\n ddf.df[\"Comp. treated - untreated FDR (DESeq2unpaired,Other=True)\"].values,\n decimal=4,\n )\n\n def test_deseq2_multi(self):\n import mbf_r\n import rpy2.robjects as robjects\n\n robjects.r(\"library(DESeq2)\")\n pasilla_data = self._get_pasilla_3()\n condition_data = pd.DataFrame(\n {\n \"condition\": [\n \"treated\"\n if (x.startswith(\"treated\") | x.startswith(\"otherse\"))\n else \"base.untreated\"\n for x in pasilla_data.columns\n ],\n \"type\": [\n \"se\" if x.startswith(\"other\") else \"pe\"\n for x in pasilla_data.columns\n ],\n },\n index=pasilla_data.columns,\n )\n gts = {}\n groups = []\n df_factors = pd.DataFrame(\n {\n \"group\": [\n \"base.untreated_pe\",\n \"base.untreated_se\",\n \"treated_pe\",\n \"treated_se\",\n ],\n \"condition\": [\"base.untreated\", \"base.untreated\", \"treated\", \"treated\"],\n \"type\": [\"pe\", \"se\", \"pe\", \"se\"],\n }\n )\n for cond1, sub in condition_data.groupby(\"condition\"):\n for cond2, sub2 in sub.groupby(\"type\"):\n group = f\"{cond1}_{cond2}\"\n groups.extend([group for i in sub2.index])\n gts[group] = list(sub2.index.values)\n cts = mbf_r.convert_dataframe_to_r(pasilla_data)\n col = mbf_r.convert_dataframe_to_r(condition_data)\n rresults_pe = robjects.r(\n \"\"\"\n function (cts, col){\n dds = DESeqDataSetFromMatrix(countData=cts, colData=col, design = ~ type + condition + type:condition)\n dds = DESeq(dds)\n res = results(dds, contrast=c(\"condition\", \"treated\", \"base.untreated\"))\n res = as.data.frame(res)\n res\n }\n \"\"\"\n )(cts=cts, col=col)\n rresults_se = robjects.r(\n \"\"\"\n function (cts, col){\n dds = DESeqDataSetFromMatrix(countData=cts, colData=col, design = ~ type + condition + type:condition)\n dds = DESeq(dds)\n res = results(dds, list( c(\"condition_treated_vs_base.untreated\", \"typese.conditiontreated\") ))\n res = as.data.frame(res)\n res\n }\n \"\"\"\n )(cts=cts, col=col)\n ddf = DelayedDataFrame(\"ex\", pasilla_data)\n c = Comparisons(ddf, gts)\n factor_reference = {\"condition\": \"base.untreated\", \"type\": \"pe\"}\n condition_data[\"group\"] = groups\n a = c.multi(\n name=\"multi\",\n main_factor=\"condition\",\n factor_reference=factor_reference,\n df_factors=df_factors,\n interactions=[(\"type\", \"condition\")],\n method=DESeq2MultiFactor(),\n test_difference=True,\n compare_non_reference=False,\n )\n force_load(ddf.add_annotator(a))\n run_pipegraph()\n rresults_pe = mbf_r.convert_dataframe_from_r(rresults_pe)\n rresults_se = mbf_r.convert_dataframe_from_r(rresults_se)\n numpy.testing.assert_almost_equal(\n rresults_pe[\"log2FoldChange\"].values,\n ddf.df[\n \"treated:base.untreated(condition) effect for pe(type) log2FC (Comp. multi)\"\n ].values,\n decimal=4,\n )\n numpy.testing.assert_almost_equal(\n rresults_pe[\"padj\"].values,\n ddf.df[\n \"treated:base.untreated(condition) effect for pe(type) FDR (Comp. multi)\"\n ].values,\n decimal=4,\n )\n numpy.testing.assert_almost_equal(\n rresults_pe[\"pvalue\"].values,\n ddf.df[\n \"treated:base.untreated(condition) effect for pe(type) p (Comp. multi)\"\n ].values,\n decimal=4,\n )\n numpy.testing.assert_almost_equal(\n rresults_se[\"log2FoldChange\"].values,\n ddf.df[\n \"treated:base.untreated(condition) effect for se:pe(type) log2FC (Comp. multi)\"\n ].values,\n decimal=4,\n )\n numpy.testing.assert_almost_equal(\n rresults_se[\"padj\"].values,\n ddf.df[\n \"treated:base.untreated(condition) effect for se:pe(type) FDR (Comp. multi)\"\n ].values,\n decimal=4,\n )\n numpy.testing.assert_almost_equal(\n rresults_se[\"pvalue\"].values,\n ddf.df[\n \"treated:base.untreated(condition) effect for se:pe(type) p (Comp. multi)\"\n ].values,\n decimal=4,\n )\n\n def test_other_sample_dependencies(self):\n import mbf_sampledata\n\n pasilla_data = pd.read_csv(\n mbf_sampledata.get_sample_path(\n \"mbf_comparisons/pasillaCount_deseq2.tsv.gz\"\n ),\n sep=\" \",\n )\n # pasilla_data = pasilla_data.set_index('Gene')\n pasilla_data.columns = [str(x) for x in pasilla_data.columns]\n\n gts = {\n \"treated\": [x for x in pasilla_data.columns if x.startswith(\"treated\")],\n \"untreated\": [x for x in pasilla_data.columns if x.startswith(\"untreated\")],\n }\n ddf = DelayedDataFrame(\"ex\", pasilla_data)\n c = Comparisons(ddf, gts)\n a = c.a_vs_b(\"treated\", \"untreated\", DESeq2Unpaired())\n force_load(ddf.add_annotator(a))\n run_pipegraph()\n check = \"\"\"# This is deseq2 version specific data- probably needs fixing if upgrading deseq2\n## baseMean log2FoldChange lfcSE stat pvalue padj\n## <numeric> <numeric> <numeric> <numeric> <numeric> <numeric>\n## FBgn0039155 453 -3.72 0.160 -23.2 1.63e-119 1.35e-115\n## FBgn0029167 2165 -2.08 0.103 -20.3 1.43e-91 5.91e-88\n## FBgn0035085 367 -2.23 0.137 -16.3 6.38e-60 1.75e-56\n## FBgn0029896 258 -2.21 0.159 -13.9 5.40e-44 1.11e-40\n## FBgn0034736 118 -2.56 0.185 -13.9 7.66e-44 1.26e-40\n\"\"\"\n df = ddf.df.sort_values(a[\"FDR\"])\n df = df.set_index(\"Gene\")\n for row in check.split(\"\\n\"):\n row = row.strip()\n if row and not row[0] == \"#\":\n row = row.split()\n self.assertAlmostEqual(\n df.ix[row[0]][a[\"log2FC\"]], float(row[2]), places=2\n )\n self.assertAlmostEqual(df.ix[row[0]][a[\"p\"]], float(row[5]), places=2)\n self.assertAlmostEqual(df.ix[row[0]][a[\"FDR\"]], float(row[6]), places=2)\n\n def _get_marioni_data(self):\n import mbf_r\n import rpy2.robjects as robjects\n\n robjects.r(\"library(NOISeq)\")\n robjects.r(\"data(Marioni)\")\n counts = mbf_r.convert_dataframe_from_r(robjects.r(\"mycounts\"))\n counts[\"gene_stable_id\"] = counts.index.values\n factors = mbf_r.convert_dataframe_from_r(robjects.r(\"myfactors\"))\n chroms = mbf_r.convert_dataframe_from_r(robjects.r(\"mychroms\"))\n counts[\"chr\"] = chroms[\"Chr\"]\n counts[\"start\"] = chroms[\"GeneStart\"]\n counts[\"stop\"] = chroms[\"GeneEnd\"]\n biotypes = robjects.r(\"mybiotypes\")\n counts[\"biotype\"] = biotypes\n mynoiseq = robjects.r(\n \"\"\"\n mydata = readData(data=mycounts, length = mylength, biotype = mybiotypes, chromosome=mychroms, factors=myfactors)\n mynoiseq = noiseq(mydata, k = 0.5, norm = \"tmm\", factor = \"Tissue\", pnr = 0.2, nss = 5, v = 0.02, lc = 0, replicates = \"technical\")\n \"\"\"\n )\n results = robjects.r(\"function(mynoiseq){as.data.frame(mynoiseq@results)}\")(\n mynoiseq\n )\n results = mbf_r.convert_dataframe_from_r(results)\n up = robjects.r(\n \"function(mynoiseseq){as.data.frame(degenes(mynoiseq, q = 0.8, M = 'up'))}\"\n )(mynoiseq)\n up = mbf_r.convert_dataframe_from_r(up)\n return counts, factors, results, up\n\n def test_noiseq(self):\n df_counts, df_factors, results, up = self._get_marioni_data()\n ddf = DelayedDataFrame(\"ex1\", df_counts)\n gts = {}\n for tissue, sub in df_factors.groupby(\"Tissue\"):\n gts[tissue] = list(sub.index.values)\n c = Comparisons(ddf, gts)\n noise = NOISeq()\n assert noise.norm == \"tmm\"\n assert noise.lc == 0\n assert noise.v == 0.02\n assert noise.nss == 5\n a = c.a_vs_b(\"Kidney\", \"Liver\", noise, laplace_offset=0.5)\n force_load(ddf.add_annotator(a))\n run_pipegraph()\n numpy.testing.assert_array_equal(\n results[\"ranking\"], ddf.df[\"Comp. Kidney - Liver Rank (NOIseq,Other=False)\"]\n )\n numpy.testing.assert_array_equal(\n results[\"prob\"], ddf.df[\"Comp. Kidney - Liver Prob (NOIseq,Other=False)\"]\n )\n numpy.testing.assert_array_equal(\n results[\"M\"], ddf.df[\"Comp. Kidney - Liver log2FC (NOIseq,Other=False)\"]\n )\n numpy.testing.assert_array_equal(\n results[\"D\"], ddf.df[\"Comp. Kidney - Liver D (NOIseq,Other=False)\"]\n )\n upregulated = ddf.df[\n (ddf.df[\"Comp. Kidney - Liver Prob (NOIseq,Other=False)\"] >= 0.8)\n & (ddf.df[\"Comp. Kidney - Liver log2FC (NOIseq,Other=False)\"] > 0)\n ]\n genes_up = set(upregulated[\"gene_stable_id\"])\n genes_should_up = set(up.index.values)\n assert (\n len(genes_up.intersection(genes_should_up))\n == len(genes_up)\n == len(genes_should_up)\n )\n\n\[email protected](\"no_pipegraph\")\nclass TestComparisonsNoPPG:\n def test_deseq2_with_and_without_additional_columns(self):\n import mbf_sampledata\n\n pasilla_data = pd.read_csv(\n mbf_sampledata.get_sample_path(\n \"mbf_comparisons/pasillaCount_deseq2.tsv.gz\"\n ),\n sep=\" \",\n )\n # pasilla_data = pasilla_data.set_index('Gene')\n pasilla_data.columns = [str(x) for x in pasilla_data.columns]\n print(pasilla_data.columns)\n pasilla_data = pasilla_data.assign(\n treated_fake=pasilla_data.treated2fb,\n untreated_fake=pasilla_data.untreated2fb,\n )\n\n gts = {\n \"treated\": [\n x\n for x in pasilla_data.columns\n if x.startswith(\"treated\") and \"3\" not in x\n ],\n \"untreated\": [\n x\n for x in pasilla_data.columns\n if x.startswith(\"untreated\") and \"3\" not in x\n ],\n \"other\": [x for x in pasilla_data.columns if \"3\" in x],\n }\n assert len(gts[\"other\"]) == 2\n assert sum((len(x) for x in gts.values())) + 1 == len(\n pasilla_data.columns\n ) # GeneId\n ddf = DelayedDataFrame(\"ex\", pasilla_data)\n c = Comparisons(ddf, gts)\n with_other = c.a_vs_b(\n \"treated\",\n \"untreated\",\n DESeq2Unpaired(),\n include_other_samples_for_variance=True,\n )\n without_other = c.a_vs_b(\n \"treated\",\n \"untreated\",\n DESeq2Unpaired(),\n include_other_samples_for_variance=False,\n )\n force_load(ddf.add_annotator(with_other))\n force_load(ddf.add_annotator(without_other))\n # run_pipegraph()\n df = ddf.df\n print(df.head())\n df.to_csv(\"test.csv\")\n # this is a fairly weak test, but it shows that it at least does *something*\n assert (df[with_other[\"p\"]] != pytest.approx(df[without_other[\"p\"]])).all()\n assert (\n df[with_other[\"log2FC\"]] != pytest.approx(df[without_other[\"log2FC\"]])\n ).all()\n\n\[email protected](\"new_pipegraph\")\nclass TestQC:\n def test_distribution(self):\n ppg.util.global_pipegraph.quiet = False\n import mbf_sampledata\n\n pasilla_data = pd.read_csv(\n mbf_sampledata.get_sample_path(\n \"mbf_comparisons/pasillaCount_deseq2.tsv.gz\"\n ),\n sep=\" \",\n )\n # pasilla_data = pasilla_data.set_index('Gene')\n pasilla_data.columns = [str(x) for x in pasilla_data.columns]\n treated = [x for x in pasilla_data.columns if x.startswith(\"treated\")]\n untreated = [x for x in pasilla_data.columns if x.startswith(\"untreated\")]\n pasilla_data = DelayedDataFrame(\"pasilla\", pasilla_data)\n Comparisons(pasilla_data, {\"treated\": treated, \"untreated\": untreated})\n prune_qc(lambda job: \"distribution\" in job.job_id)\n run_pipegraph()\n qc_jobs = list(get_qc_jobs())\n qc_jobs = [x for x in qc_jobs if not x._pruned]\n print(qc_jobs)\n assert len(qc_jobs) == 1\n assert_image_equal(qc_jobs[0].filenames[0])\n\n def test_pca(self):\n ppg.util.global_pipegraph.quiet = False\n import mbf_sampledata\n\n pasilla_data = pd.read_csv(\n mbf_sampledata.get_sample_path(\n \"mbf_comparisons/pasillaCount_deseq2.tsv.gz\"\n ),\n sep=\" \",\n )\n # pasilla_data = pasilla_data.set_index('Gene')\n pasilla_data.columns = [str(x) for x in pasilla_data.columns]\n treated = [x for x in pasilla_data.columns if x.startswith(\"treated\")]\n untreated = [x for x in pasilla_data.columns if x.startswith(\"untreated\")]\n pasilla_data = DelayedDataFrame(\"pasilla\", pasilla_data)\n Comparisons(pasilla_data, {\"treated\": treated, \"untreated\": untreated})\n prune_qc(lambda job: \"pca\" in job.job_id)\n run_pipegraph()\n qc_jobs = list(get_qc_jobs())\n qc_jobs = [x for x in qc_jobs if not x._pruned]\n print(qc_jobs)\n assert len(qc_jobs) == 1\n assert_image_equal(qc_jobs[0].filenames[0])\n\n def test_correlation(self):\n ppg.util.global_pipegraph.quiet = False\n import mbf_sampledata\n\n pasilla_data = pd.read_csv(\n mbf_sampledata.get_sample_path(\n \"mbf_comparisons/pasillaCount_deseq2.tsv.gz\"\n ),\n sep=\" \",\n )\n # pasilla_data = pasilla_data.set_index('Gene')\n pasilla_data.columns = [str(x) for x in pasilla_data.columns]\n treated = [x for x in pasilla_data.columns if x.startswith(\"treated\")]\n untreated = [x for x in pasilla_data.columns if x.startswith(\"untreated\")]\n pasilla_data = DelayedDataFrame(\"pasilla\", pasilla_data)\n Comparisons(pasilla_data, {\"treated\": treated, \"untreated\": untreated})\n prune_qc(lambda job: \"correlation\" in job.job_id)\n run_pipegraph()\n qc_jobs = list(get_qc_jobs())\n qc_jobs = [x for x in qc_jobs if not x._pruned]\n print(qc_jobs)\n assert len(qc_jobs) == 1\n assert_image_equal(qc_jobs[0].filenames[0])\n\n def test_volcano_plot(self):\n ppg.util.global_pipegraph.quiet = False\n import mbf_sampledata\n\n pasilla_data = pd.read_csv(\n mbf_sampledata.get_sample_path(\n \"mbf_comparisons/pasillaCount_deseq2.tsv.gz\"\n ),\n sep=\" \",\n )\n # pasilla_data = pasilla_data.set_index('Gene')\n pasilla_data.columns = [str(x) for x in pasilla_data.columns]\n treated = [x for x in pasilla_data.columns if x.startswith(\"treated\")]\n untreated = [x for x in pasilla_data.columns if x.startswith(\"untreated\")]\n pasilla_data = DelayedDataFrame(\"pasilla\", pasilla_data)\n comp = Comparisons(\n pasilla_data, {\"treated\": treated, \"untreated\": untreated}\n ).a_vs_b(\"treated\", \"untreated\", TTest())\n comp.filter([(\"log2FC\", \"|>=\", 2.0), (\"FDR\", \"<=\", 0.05)])\n prune_qc(lambda job: \"volcano\" in job.job_id)\n run_pipegraph()\n qc_jobs = list(get_qc_jobs())\n qc_jobs = [x for x in qc_jobs if not x._pruned]\n print(qc_jobs)\n assert len(qc_jobs) == 1\n assert_image_equal(qc_jobs[0].filenames[0])\n\n @pytest.mark.skip # no ma plots right now, they're way to slow for general usage :(\n def test_ma_plot(self):\n ppg.util.global_pipegraph.quiet = False\n pasilla_data, treated, untreated = get_pasilla_data_subset()\n import numpy\n\n numpy.random.seed(500)\n\n comp = Comparisons(\n pasilla_data, {\"treated\": treated, \"untreated\": untreated}\n ).a_vs_b(\"treated\", \"untreated\", TTest(), laplace_offset=1)\n\n comp.filter(\n [\n (\"log2FC\", \"|>=\", 2.0),\n # ('FDR', '<=', 0.05),\n ]\n )\n prune_qc(lambda job: \"ma_plot\" in job.job_id)\n run_pipegraph()\n qc_jobs = list(get_qc_jobs())\n qc_jobs = [x for x in qc_jobs if not x._pruned]\n assert len(qc_jobs) == 1\n assert_image_equal(qc_jobs[0].filenames[0])\n", "id": "2413943", "language": "Python", "matching_score": 5.810509204864502, "max_stars_count": 0, "path": "tests/test_comparisons.py" }, { "content": "from typing import List, Tuple, Dict, Callable\nfrom pandas import DataFrame\nfrom pypipegraph import Job\nfrom statsmodels.stats.multitest import multipletests\nfrom mbf_genomics.genes.anno_tag_counts import IntervalStrategyGene\nfrom mbf_genomes import GenomeBase\nimport pypipegraph as ppg\nimport pandas as pd\nimport scipy.stats as ss\nimport numpy as np\nimport rpy2.robjects as robjects\nimport rpy2.robjects.numpy2ri as numpy2ri\nimport mbf_r\nimport re\n\n\nclass Log2FC:\n min_sample_count = 0\n supports_other_samples = False\n\n def __init__(self):\n self.columns = [\"log2FC\", \"minExpression\"]\n self.name = \"simple\"\n\n def compare(self, df, columns_a, columns_b, columns_other, laplace_offset):\n a = np.log2(df[columns_a] + laplace_offset)\n b = np.log2(df[columns_b] + laplace_offset)\n logFC = a.mean(axis=1, skipna=True) - b.mean(axis=1, skipna=True)\n min_expression = df[columns_a + columns_b].min(axis=1)\n return pd.DataFrame({\"log2FC\": logFC, \"minExpression\": min_expression})\n\n\nclass TTest:\n \"\"\"Standard students t-test, independent on log2FC + <NAME>\"\"\"\n\n min_sample_count = 3\n supports_other_samples = False\n\n def __init__(self, equal_variance=False):\n self.equal_var = equal_variance\n self.columns = [\"log2FC\", \"p\", \"FDR\"]\n self.name = \"ttest\"\n\n def compare(self, df, columns_a, columns_b, columns_other, laplace_offset):\n a = np.log2(df[columns_a] + laplace_offset)\n b = np.log2(df[columns_b] + laplace_offset)\n logFC = a.mean(axis=1, skipna=True) - b.mean(axis=1, skipna=True)\n p = ss.ttest_ind(a, b, axis=1, equal_var=self.equal_var, nan_policy=\"omit\")[1]\n fdr = multipletests(p, method=\"fdr_bh\")[1]\n return pd.DataFrame({\"log2FC\": logFC, \"p\": p, \"FDR\": fdr})\n\n\nclass TTestPaired:\n \"\"\"Standard students t-test, paired, on log2FC + benjamini hochberg\"\"\"\n\n min_sample_count = 3\n supports_other_samples = False\n\n def __init__(self):\n self.columns = [\"log2FC\", \"p\", \"FDR\"]\n self.name = \"ttest_paired\"\n\n def compare(self, df, columns_a, columns_b, columns_other, laplace_offset):\n a = np.log2(df[columns_a] + laplace_offset)\n b = np.log2(df[columns_b] + laplace_offset)\n logFC = a.mean(axis=1, skipna=True) - b.mean(axis=1, skipna=True)\n p = ss.ttest_rel(a, b, axis=1, nan_policy=\"omit\")[1]\n fdr = multipletests(p, method=\"fdr_bh\")[1]\n return pd.DataFrame({\"log2FC\": logFC, \"p\": p, \"FDR\": fdr})\n\n\nclass EdgeRUnpaired:\n\n min_sample_count = 3\n name = \"edgeRUnpaired\"\n columns = [\"log2FC\", \"p\", \"FDR\"]\n supports_other_samples = False\n\n def __init__(self, ignore_if_max_count_less_than=None, manual_dispersion_value=0.4):\n self.ignore_if_max_count_less_than = ignore_if_max_count_less_than\n self.manual_dispersion_value = manual_dispersion_value\n\n def deps(self):\n import rpy2.robjects as ro\n\n ro.r(\"library('edgeR')\")\n version = str(ro.r(\"packageVersion\")(\"edgeR\"))\n return ppg.ParameterInvariant(\n self.__class__.__name__ + \"_\" + self.name,\n (version, self.ignore_if_max_count_less_than),\n )\n\n def edgeR_comparison(\n self, df, columns_a, columns_b, library_sizes=None, manual_dispersion_value=0.4\n ):\n \"\"\"Call edgeR exactTest comparing two groups.\n Resulting dataframe is in df order.\n \"\"\"\n import mbf_r\n import math\n import rpy2.robjects as ro\n import rpy2.robjects.numpy2ri as numpy2ri\n\n ro.r(\"library(edgeR)\")\n input_df = df[columns_a + columns_b]\n input_df.columns = [\"X_%i\" % x for x in range(len(input_df.columns))]\n if library_sizes is not None: # pragma: no cover\n samples = pd.DataFrame({\"lib.size\": library_sizes})\n else:\n samples = pd.DataFrame({\"lib.size\": input_df.sum(axis=0)})\n # this looks like it inverts the columns,\n # but it doesnt'\n samples.insert(0, \"group\", [\"z\"] * len(columns_a) + [\"x\"] * len(columns_b))\n r_counts = mbf_r.convert_dataframe_to_r(input_df)\n r_samples = mbf_r.convert_dataframe_to_r(samples)\n y = ro.r(\"DGEList\")(\n counts=r_counts,\n samples=r_samples,\n **{\n \"lib.size\": ro.r(\"as.vector\")(\n numpy2ri.py2rpy(np.array(samples[\"lib.size\"]))\n )\n },\n )\n # apply TMM normalization\n y = ro.r(\"calcNormFactors\")(y)\n if len(columns_a) == 1 and len(columns_b) == 1: # pragma: no cover\n # not currently used.\n z = manual_dispersion_value\n e = ro.r(\"exactTest\")(y, dispersion=math.pow(manual_dispersion_value, 2))\n \"\"\"\n you are attempting to estimate dispersions without any replicates.\n Since this is not possible, there are several inferior workarounds to come up with something\n still semi-useful.\n 1. pick a reasonable dispersion value from \"Experience\": 0.4 for humans, 0.1 for genetically identical model organisms, 0.01 for technical replicates. We'll try this for now.\n 2. estimate dispersions on a number of genes that you KNOW to be not differentially expressed.\n 3. In case of multiple factor experiments, discard the least important factors and treat the samples as replicates.\n 4. just use logFC and forget about significance.\n \"\"\"\n else:\n z = ro.r(\"estimateDisp\")(y, robust=True)\n e = ro.r(\"exactTest\")(z)\n res = ro.r(\"topTags\")(e, n=len(input_df), **{\"sort.by\": \"none\"})\n result = mbf_r.convert_dataframe_from_r(res[0])\n return result\n\n def compare(self, df, columns_a, columns_b, columns_other, _laplace_offset):\n # laplace offset is ignored, edgeR works on raw data\n value_columns = columns_a + columns_b\n # we need to go by key, since filter out nan rows.\n idx = [\"G%i\" % ii for ii in range(len(df))]\n input_df = df[value_columns]\n input_df = input_df.assign(idx=idx)\n input_df = input_df.set_index(\"idx\")\n if pd.isnull(input_df).any().any(): # pragma: no cover\n raise ValueError(\"Nans before filtering in edgeR input\")\n\n if self.ignore_if_max_count_less_than is not None:\n max_raw_count_per_gene = input_df.max(axis=1)\n input_df.loc[\n max_raw_count_per_gene < self.ignore_if_max_count_less_than, :\n ] = np.nan\n # does not matter any or all since we set them all above.\n input_df = input_df[~pd.isnull(input_df[value_columns]).all(axis=1)]\n\n differential = self.edgeR_comparison(\n input_df,\n columns_a,\n columns_b,\n manual_dispersion_value=self.manual_dispersion_value,\n )\n result = {\"FDR\": [], \"p\": [], \"log2FC\": []}\n for key in idx:\n try:\n row = differential.loc[key]\n result[\"FDR\"].append(row[\"FDR\"])\n result[\"p\"].append(row[\"PValue\"])\n result[\"log2FC\"].append(row[\"logFC\"])\n except KeyError:\n result[\"FDR\"].append(np.nan)\n result[\"p\"].append(np.nan)\n result[\"log2FC\"].append(np.nan)\n return pd.DataFrame(result)\n\n\nclass EdgeRPaired(EdgeRUnpaired):\n\n min_sample_count = 3\n name = \"edgeRPaired\"\n columns = [\"log2FC\", \"p\", \"FDR\"]\n supports_other_samples = False\n\n def __init__(self, ignore_if_max_count_less_than=None, manual_dispersion_value=0.4):\n self.ignore_if_max_count_less_than = ignore_if_max_count_less_than\n self.manual_dispersion_value = manual_dispersion_value\n\n def edgeR_comparison(\n self, df, columns_a, columns_b, library_sizes=None, manual_dispersion_value=0.4\n ):\n \"\"\"Call edgeR exactTest comparing two groups.\n Resulting dataframe is in df order.\n \"\"\"\n import mbf_r\n import rpy2.robjects as ro\n import rpy2.robjects.numpy2ri as numpy2ri\n\n if len(columns_a) != len(columns_b):\n raise ValueError(\"paired requires equal length groups\")\n\n ro.r(\"library(edgeR)\")\n input_df = df[columns_a + columns_b]\n input_df.columns = [\"X_%i\" % x for x in range(len(input_df.columns))]\n if library_sizes is not None: # pragma: no cover\n samples = pd.DataFrame({\"lib.size\": library_sizes})\n else:\n samples = pd.DataFrame({\"lib.size\": input_df.sum(axis=0)})\n # remember, edgeR does b-a not a-b...\n samples.insert(0, \"group\", [\"z\"] * len(columns_b) + [\"y\"] * len(columns_a))\n samples.insert(\n 1,\n \"pairs\",\n [str(x) for x in list(range(len(columns_a))) + list(range(len(columns_a)))],\n )\n\n r_counts = mbf_r.convert_dataframe_to_r(input_df)\n r_samples = mbf_r.convert_dataframe_to_r(samples)\n design = ro.r(\"model.matrix\")(ro.r(\"~pairs+group\"), data=r_samples)\n y = ro.r(\"DGEList\")(\n counts=r_counts,\n samples=r_samples,\n **{\n \"lib.size\": ro.r(\"as.vector\")(\n numpy2ri.py2rpy(np.array(samples[\"lib.size\"]))\n )\n },\n )\n # apply TMM normalization\n y = ro.r(\"calcNormFactors\")(y)\n z = ro.r(\"estimateDisp\")(y, design, robust=True)\n fit = ro.r(\"glmFit\")(z, design)\n lrt = ro.r(\"glmLRT\")(fit)\n res = ro.r(\"topTags\")(lrt, n=len(input_df), **{\"sort.by\": \"none\"})\n result = mbf_r.convert_dataframe_from_r(res[0])\n return result\n\n\nclass DESeq2Unpaired:\n min_sample_count = 3\n name = \"DESeq2unpaired\"\n columns = [\"log2FC\", \"p\", \"FDR\"]\n supports_other_samples = True\n\n def deps(self):\n import rpy2.robjects as ro\n\n ro.r(\"library('DESeq2')\")\n version = str(ro.r(\"packageVersion\")(\"DESeq2\"))\n return ppg.ParameterInvariant(\n self.__class__.__name__ + \"_\" + self.name, (version,)\n )\n\n def compare(self, df, columns_a, columns_b, columns_other, _laplace_offset):\n # laplace_offset is ignored\n import rpy2.robjects as robjects\n\n robjects.r('library(\"DESeq2\")')\n columns = []\n conditions = []\n samples = []\n name_cols = [\n (\"c\", columns_a),\n (\"base\", columns_b),\n ]\n for g in columns_other:\n name_cols.append(\n (\n \"other_\" + g.replace(\"-\", \"m\").replace(\"+\", \"p\").replace(\"_\", \"\"),\n columns_other[g],\n )\n )\n for (name, cols) in name_cols:\n for col in cols:\n columns.append(col)\n conditions.append(name)\n samples.append(col)\n count_data = df[columns]\n df = self.call_DESeq2(count_data, samples, conditions)\n df = df.rename(\n columns={\"log2FoldChange\": \"log2FC\", \"pvalue\": \"p\", \"padj\": \"FDR\"}\n )\n return df[self.columns].reset_index(drop=True)\n\n def call_DESeq2(self, count_data, samples, conditions):\n \"\"\"Call DESeq2.\n @count_data is a DataFrame with 'samples' as the column names.\n @samples is a list. @conditions as well. Condition is the one you're contrasting on.\n You can add additional_conditions (a DataFrame, index = samples) which DESeq2 will\n keep under consideration (changes the formula).\n \"\"\"\n import rpy2.robjects as robjects\n import rpy2.robjects.numpy2ri as numpy2ri\n import mbf_r\n\n count_data = count_data.values\n count_data = np.array(count_data)\n nr, nc = count_data.shape\n count_data = count_data.reshape(count_data.size) # turn into 1d vector\n count_data = robjects.r.matrix(\n numpy2ri.py2rpy(count_data), nrow=nr, ncol=nc, byrow=True\n )\n col_data = pd.DataFrame({\"sample\": samples, \"condition\": conditions}).set_index(\n \"sample\"\n )\n formula = \"~ condition\"\n col_data = col_data.reset_index(drop=True)\n col_data = mbf_r.convert_dataframe_to_r(pd.DataFrame(col_data.to_dict(\"list\")))\n deseq_experiment = robjects.r(\"DESeqDataSetFromMatrix\")(\n countData=count_data, colData=col_data, design=robjects.Formula(formula)\n )\n deseq_experiment = robjects.r(\"DESeq\")(deseq_experiment)\n res = robjects.r(\"results\")(\n deseq_experiment, contrast=robjects.r(\"c\")(\"condition\", \"c\", \"base\")\n )\n df = mbf_r.convert_dataframe_from_r(robjects.r(\"as.data.frame\")(res))\n return df\n\n\nclass DESeq2MultiFactor:\n def __init__(self):\n self.min_sample_count = 3\n self.name = \"DESeq2unpairedMulti\"\n self.supports_other_samples = True\n self.columns = [\"log2FC\", \"p\", \"FDR\", \"mean\", \"lfcSE\"]\n pattern0 = re.compile(\n \"(?P<main>[^:]*):(?P<main_ref>[^:()]*)\\((?P<main_factor>.*)\\) effect \\(controlling for .*\" # noqa\n )\n pattern1 = re.compile(\n \"(?P<main>[^:]*):(?P<main_ref>[^:()]*)\\((?P<main_factor>.*)\\) effect for (?P<other1>[^:]*)\\((?P<other_factor>.*)\\)\" # noqa\n )\n pattern2 = re.compile(\n \"(?P<main>[^:]*):(?P<main_ref>[^:()]*)\\((?P<main_factor>.*)\\) effect for (?P<other1>[^:]*):(?P<other2>[^:()]*)\\((?P<other_factor>.*)\\)\" # noqa\n )\n pattern3 = re.compile(\n \"(?P<main>[^:]*):(?P<main_ref>[^:()]*)\\((?P<main_factor>.*)\\) effect difference for (?P<other1>[^:]*):(?P<other2>[^:()]*)\\((?P<other_factor>.*)\\)\" # noqa\n )\n self.patterns = [pattern0, pattern1, pattern2, pattern3]\n\n def select_contrast_c(self, factor: str, level: str, reference: str) -> Callable:\n \"\"\"\n Returns a callable to select results from a multi-factor deseq\n experiment using the contrast keyword.\n\n Parameters\n ----------\n main_factor : str\n The effect factor to select for.\n level : str\n The level to be compared to the factor reference.\n reference : str\n The reference level.\n Returns\n -------\n Callable\n A selection function that takes an robject instance from a deseq\n experiment.\n \"\"\"\n\n def __select(dds):\n return robjects.r(\"results\")(\n dds, contrast=robjects.r(\"c\")(factor, level, reference)\n )\n\n return __select\n\n def select_contrast_list(self, selection: str, interaction: str) -> Callable:\n \"\"\"\n Returns a callable to select results from a multi-cator deseq\n experiment using the contrast keyword with to selection terms.\n\n Parameters\n ----------\n selection : str\n The effect selection term, e.g. \"condition_B_vs_A\".\n interaction : str\n The interaction term to select for, e.g. \"genotypeIII.conditionB\".\n\n Returns\n -------\n Callable\n A selection function that takes an robject instance from a deseq\n experiment.\n \"\"\"\n\n def __select(dds):\n return robjects.r(\"results\")(\n dds,\n contrast=robjects.r(\"list\")(robjects.r(\"c\")(selection, interaction)),\n )\n\n return __select\n\n def select_name(self, interaction: str):\n \"\"\"\n Returns a callable to select results from a multi-cator deseq\n experiment using the name keyword.\n\n Parameters\n ----------\n interaction : str\n Term to select for.\n\n Returns\n -------\n Callable\n A selection function that takes an robject instance from a deseq\n experiment.\n \"\"\"\n\n def __select(dds):\n return robjects.r(\"results\")(dds, name=interaction)\n\n return __select\n\n def get_selector(self, prefix: str, factor_reference: Dict[str, str],) -> Callable:\n \"\"\"\n Returns the appropriate select function for a given prefix.\n\n This matches the prefix to one of four possible re patterns, extracts\n relevant factors and levels and generates the appropriate parameters\n for one of the thre selection function generators defined in this class.\n\n Parameters\n ----------\n prefix : str\n The column prefix, specifying a specific aspect of the deseq analysis,\n e.g. selecting the main condition effect for a specific genotype.\n factor_reference : Dict[str, str]\n Dictionary of factor names (key) to base level (value), e.g.\n {\"treatment\": \"DMSO\"}.\n\n Returns\n -------\n Callable\n The appropriate selection function for a given prefix that takes an\n robject instance from a deseq experiment.\n\n Raises\n ------\n ValueError\n If a prefix matches to multiple patterns.\n ValueError\n If a prefix does not match to any pattern.\n \"\"\"\n found = -1\n selector = None\n for i, pattern in enumerate(self.patterns):\n match = pattern.match(prefix)\n if match:\n if found != -1:\n raise ValueError(\n f\"Prefix {prefix} matched to multiple patterns (pattern{found}, pattern{i}).\"\n )\n main = match.group(\"main\")\n main_ref = match.group(\"main_ref\")\n main_factor = match.group(\"main_factor\")\n if i == 0:\n # not interactions\n selector = self.select_contrast_c(\n main_factor, f\"z{main}\", f\"b{main_ref}\"\n )\n elif i == 1:\n # main effect with interaction versus other reference\n selector = self.select_contrast_c(\n main_factor, f\"z{main}\", f\"b{main_ref}\"\n )\n elif i == 2:\n other_factor = match.group(\"other_factor\")\n other1 = match.group(\"other1\")\n selection = f\"{main_factor}_z{main}_vs_b{main_ref}\"\n interaction = f\"{other_factor}z{other1}.{main_factor}z{main}\"\n selector = self.select_contrast_list(selection, interaction)\n elif i == 3:\n other_factor = match.group(\"other_factor\")\n other1 = match.group(\"other1\")\n other2 = match.group(\"other2\")\n interaction = f\"{other_factor}z{other1}.{main_factor}z{main}\"\n if other2 == factor_reference[other_factor]:\n selector = self.select_name(interaction)\n else:\n interaction2 = f\"{other_factor}z{other2}.{main_factor}z{main}\"\n selector = self.select_contrast_list(interaction, interaction2)\n found = i\n if selector is None:\n raise ValueError(f\"prefix {prefix} did not match to any pattern.\")\n return selector\n\n def prefixes(\n self,\n main_factor: str,\n factor_reference: Dict[str, str],\n df_factors: DataFrame,\n interactions: List[Tuple[str, str]],\n test_difference: bool,\n compare_non_reference: bool,\n ) -> List[str]:\n \"\"\"\n Generates prefixes for each aspect to be selected from the multi-factor\n deseq run.\n\n This generates list of prefixes that define a certain aspect of interest\n to be obtained from the deseq run. These are used to select different\n aspects from the DEseq result and as prefixes for columns that are\n reported for each such aspect.\n\n Parameters\n ----------\n main_factor : str\n The main factor, e.g. treatment/condition.\n factor_reference : Dict[str, str]\n Dictionary of factor names (key) to base level (value), e.g.\n {\"treatment\": \"DMSO\"}.\n df_factors : DataFrame\n A DataFrame containing all factors and levels of the experiment.\n It should contain a column group as well as a column for each factor\n which is the column name.\n interactions : List[Tuple[str, str]]\n List if interaction terms. If this is empty, the analysis will\n report the main factor effects controlling for the other factors.\n test_difference : bool, optional\n Test for differences in the main effects for different levels\n of other factors, by default True.\n compare_non_reference : bool, optional\n Test for difference of the main effects for different levels of other\n factors compared to non-reference levels, by default False.\n\n Returns\n -------\n List[str]\n List of column prefixes.\n \"\"\"\n prefixes = []\n main_reference = factor_reference[main_factor]\n main_levels = [\n level for level in df_factors[main_factor] if level != main_reference\n ]\n other_factors = [factor for factor in factor_reference if factor != main_factor]\n if len(interactions) == 0:\n # without interaction, DEseq just controls for the other factors\n for main_level in main_levels:\n prefix = f\"{main_level}:{main_reference}({main_factor}) effect (controlling for {other_factors})\"\n # N3a:untr(treatment) effect (controlling for ['genotype'])\n prefixes.append(prefix)\n else:\n for main_level in main_levels:\n common = f\"{main_level}:{main_reference}({main_factor})\"\n for factor in other_factors:\n reference = factor_reference[factor]\n # test for the main factor effect (e.g. treatment effect B vs A) for\n # the reference level of other factor, (e.g. genotype I).\n # results(dds, contrast=c(\"condition\",\"B\",\"A\"))\n prefix = f\"{common} effect for {reference}({factor})\"\n # N3a:untr(treatment) effect for LSL(genotype)\n prefixes.append(prefix)\n levels = [\n level for level in df_factors[factor] if level != reference\n ]\n for i1, level in enumerate(levels):\n # test for the main factor effect (e.g. treatment effect B vs A) for\n # another level of other factor (e.g. genotype III). It is the\n # main effect plus interaction\n # results(dds, contrast=list( c(\"condition_B_vs_A\",\"genotypeIII.conditionB\") ))\n prefix = f\"{common} effect for {level}:{reference}({factor})\"\n # N3a:untr(treatment) effect for WT:LSL(genotype)\n prefixes.append(prefix)\n if test_difference:\n # test if the main factor effect is different\n # for the level of other factor compared to the reference level.\n # results(dds, name=\"genotypeIII.conditionB\")\n prefix = f\"{common} effect difference for {level}:{reference}({factor})\"\n # N3a:untr(treatment) effect difference for WT:LSL(genotype)\n prefixes.append(prefix)\n if compare_non_reference:\n # test if the main factor effect is different\n # for the level of other factor compared to a non-reference level of factor.\n # results(dds, contrast=list(\"genotypeIII.conditionB\", \"genotypeII.conditionB\"))\n for i2, level2 in enumerate(levels):\n if i2 <= i1:\n continue\n prefix = f\"{common} effect difference for {level}:{level2}({factor})\"\n # N3a:untr(treatment) effect difference for WT:KO(genotype)\n prefixes.append(prefix)\n return prefixes\n\n def compare(\n self,\n df: DataFrame,\n main_factor: str,\n factor_reference: Dict[str, str],\n columns_by_group: Dict[str, List],\n df_factors: DataFrame,\n interactions: List[Tuple[str, str]],\n test_difference: bool,\n compare_non_reference: bool,\n laplace_offset: float,\n prefixes: List[str],\n ) -> DataFrame:\n \"\"\"\n Returns a dataframe containing a multi-factor analysis.\n\n This implements the compare method which is called from the\n annotator. This method prepares the input for DEseq2 and calls the\n DEseq2 subsequently.\n\n Parameters\n ----------\n df : DataFrame\n The dataframe containing the raw counts.\n main_factor : str\n The main effect variable.\n factor_reference : Dict[str, str]\n Dictionary of factor names (key) to base level (value), e.g.\n {\"treatment\": \"DMSO\"}.\n columns_by_group : Dict[str, List]\n A dictionary containg groups as key and the raw count column names\n as list as values.\n df_factors : DataFrame\n A DataFrame containing all factors and levels of the experiment.\n It should contain a column group as well as a column for each factor\n which is the column name.\n interactions : List[Tuple[str, str]]\n List if interaction terms. If this is empty, the analysis will\n report the main factor effects controlling for the other factors.\n test_difference : bool, optional\n Test for differences in the main effects for different levels\n of other factors, by default True.\n compare_non_reference : bool, optional\n Test for difference of the main effects for different levels of other\n factors compared to non-reference levels, by default False.\n laplace_offset : float, optional\n laplace offset for methods that cannot handle zeros, this is ignored.\n prefixes : List[str]\n List of column prefixes.\n\n Returns\n -------\n DataFrame\n A result DataFrame returned from DEseq2 call.\n \"\"\"\n import rpy2.robjects as robjects\n\n robjects.r('library(\"DESeq2\")')\n columns = []\n to_df: List[pd.Series] = []\n prefix_select = {}\n for prefix in prefixes:\n select = self.get_selector(prefix, factor_reference)\n prefix_select[prefix] = select\n\n # make sure the reference level is lexicographically first\n other_factors = []\n for factor in factor_reference:\n ref_level = factor_reference[factor]\n replace = [\n f\"z{level}\" if level != ref_level else f\"b{level}\"\n for level in df_factors[factor]\n ]\n df_factors.replace(df_factors[factor].values, replace, inplace=True)\n if factor != main_factor:\n other_factors.append(factor)\n for _, row in df_factors.iterrows():\n group = row[\"group\"]\n # name = trim_group_name(row[\"group\"])\n for col in columns_by_group[group]:\n columns.append(col)\n new_row = row.copy()\n new_row[\"sample\"] = col\n to_df.append(new_row)\n count_data = df[columns]\n column_data = pd.DataFrame(to_df)\n column_data = column_data.set_index(\"sample\")\n formula = \"~ \" + \" + \".join(other_factors)\n formula += f\" + {main_factor}\"\n for f1, f2 in interactions:\n formula += f\" + {f1}:{f2}\"\n df = self.call_DESeq2(count_data, column_data, formula, prefix_select,)\n df = df.reset_index(drop=True)\n return df\n\n def call_DESeq2(\n self,\n count_data: DataFrame,\n column_data: DataFrame,\n formula: str,\n prefix_select: Dict[str, Callable],\n ) -> DataFrame:\n \"\"\"\n Returns a dataframe containing a multi-factor analysis.\n\n This actually calls DEseq2 via robjects, prepares the formula and\n joins the result dataframes.\n\n Parameters\n ----------\n count_data : DataFrame\n DataFrame with raw counts.\n column_data : DataFrame\n DataFrame with factor data.\n formula : str\n The formula to use for the DEseq analysis.\n prefix_select : Dict[str, Callable]\n A dictionary prefix to appropriate selector function.\n\n Returns\n -------\n DataFrame\n A result DataFrame which is annotated to the DelayedDataFrame.\n \"\"\"\n\n def res_to_df(res, prefix):\n rename = {\n \"log2FoldChange\": \"log2FC\",\n \"pvalue\": \"p\",\n \"padj\": \"FDR\",\n \"baseMean\": \"mean\",\n }\n df = mbf_r.convert_dataframe_from_r(robjects.r(\"as.data.frame\")(res))\n df = df[[\"baseMean\", \"log2FoldChange\", \"lfcSE\", \"pvalue\", \"padj\"]]\n df = df.rename(columns=rename)\n df = df.rename(columns=dict([(col, f\"{prefix} {col}\") for col in df]))\n return df\n\n count_data = count_data.values\n count_data = np.array(count_data)\n nr, nc = count_data.shape\n count_data = count_data.reshape(count_data.size) # turn into 1d vector\n count_data = robjects.r.matrix(\n numpy2ri.py2rpy(count_data), nrow=nr, ncol=nc, byrow=True\n )\n # col_data = col_data.reset_index(drop=True)\n col_data = mbf_r.convert_dataframe_to_r(\n pd.DataFrame(column_data.to_dict(\"list\"))\n )\n deseq_experiment = robjects.r(\"DESeqDataSetFromMatrix\")(\n countData=count_data, colData=col_data, design=robjects.Formula(formula)\n )\n dds = robjects.r(\"DESeq\")(deseq_experiment)\n # all that is left is extracting the results.\n dfs_to_concat = []\n for prefix in prefix_select:\n select = prefix_select[prefix]\n res = select(dds)\n dfs_to_concat.append(res_to_df(res, prefix))\n df = dfs_to_concat[0]\n for df2 in dfs_to_concat[1:]:\n df = df.join(df2)\n return df\n\n def get_columns(self, prefixes: List[str],) -> List[str]:\n \"\"\"\n Returns a list of all columns generated by the compare method.\n\n This is used in the ComparisonAnnotatorMulti class to declare the\n generated columns in advance.\n\n Parameters\n ----------\n prefixes : List[str]\n List of column prefixes as generated by self.prefixes.\n\n Returns\n -------\n List[str]\n List of columns.\n \"\"\"\n columns = []\n for col in self.columns:\n for prefix in prefixes:\n columns.append(f\"{prefix} {col}\")\n return sorted(columns)\n\n\nclass NOISeq:\n \"\"\"\n NoiseSeq comparison strategy that returns a probability measure for\n genes being differentially expressed. Use this if you have only single samples\n or technical replicates.\n\n This uses the R package NOISeq and calculates log2FC as well as difference\n in in mean expression values after performing an appropriate normalization step.\n Then it it simulates noise and estimates probabilities that the obtained\n values are due to conditions. This can be used as a ranking score for\n DEG, not as a p-value.\n This can only do pairwise comparisons on a single factor.\n\n Parameters\n ----------\n norm : str, optional\n The normalization to be applied, by default \"tmm\".\n nss : int, optional\n Number of simulated noise samples, by default 5.\n lc : int, optional\n Whether length normalization is performed (rpkm), by default 0 (ie. off).\n v : float, optional\n The variance for the simulated noise, by default 0.02.\n pnr : float, optional\n Percentage of the total reads used to simulated each sample when no replicates\n are available, by default 0.2.\n replicates: 'technical', 'biological' or 'no', default 'technical'\n\n Raises\n ------\n ValueError\n If an unknown normalization method is given.\n \"\"\"\n\n min_sample_count = 1\n name = \"NOIseq\"\n supports_other_samples = True\n columns = [\"log2FC\", \"Prob\", \"Rank\", \"D\"]\n\n def __init__(\n self,\n norm: str = \"tmm\",\n nss: int = 5,\n lc: int = 0,\n v: float = 0.02,\n pnr: float = 0.2,\n interval_strategy: IntervalStrategyGene = None,\n genome: GenomeBase = None,\n replicates: str = \"technical\",\n ):\n \"\"\"Constructor\"\"\"\n self.norm = norm\n accepted = [\"tmm\", \"rpkm\", \"uqua\", \"n\"]\n if self.norm not in accepted:\n raise ValueError(\n f\"Only {accepted} are accepted as values for norm, given was {norm}\"\n )\n if self.norm == \"rpkm\":\n if interval_strategy is None or genome is None:\n raise ValueError(\n \"If you choose 'rpkm' as norm, you need to supply an IntervalStrategy and a genome.\"\n )\n self.columns = [\"log2FC\", \"Prob\", \"Rank\", \"D\"]\n self.interval_strategy = interval_strategy\n self.lc = lc\n self.v = v\n self.nss = nss\n self.pnr = pnr\n self.genome = genome\n self.replicates = replicates\n\n def compare(\n self,\n df: DataFrame,\n columns_a: List[str],\n columns_b: List[str],\n columns_other: Dict[str, List[str]],\n _laplace_offset: float = 0.5,\n ) -> DataFrame:\n \"\"\"\n Performas the comparison a vs b by preparing the NOISeq inputs and\n calling NOISeq.\n\n Parameters\n ----------\n df : DataFrame\n DataFrame to be annotated.\n columns_a : List[str]\n Column names for condition a.\n columns_b : List[str]\n Column names for condition b.\n columns_other : Dict[str, str]\n Dictionary group to list of column names for other groups.\n _laplace_offset : float, optional\n offset to be added to prevent zero division (k for NOIseq), by default 0.5.\n\n Returns\n -------\n DataFrame\n DataFrame with Log2FC, prob and rank columns.\n \"\"\"\n robjects.r('library(\"NOISeq\")')\n columns = []\n condition = []\n name_cols = [\n (\"a\", columns_a),\n (\"base\", columns_b),\n ]\n for i, g in enumerate(columns_other):\n name_cols.append((f\"other_{i}\", columns_other[g],))\n factor = \"condition\"\n rename = {}\n for (name, cols) in name_cols:\n for i, col in enumerate(cols):\n rename[col] = f\"{name}_{i}\"\n columns.append(col)\n condition.append(name)\n count_data = df[columns + [\"gene_stable_id\"]].rename(columns=rename)\n count_data = count_data.set_index(\"gene_stable_id\")\n factors = pd.DataFrame({factor: condition}, index=count_data.columns)\n df_chrom = df[[\"chr\", \"start\", \"stop\"]]\n df_chrom.index = df[\"gene_stable_id\"]\n biotypes = df[\"biotype\"].values\n if \"length\" not in df.columns:\n if self.interval_strategy is not None:\n lengths = self.interval_strategy.get_interval_lengths_by_gene(\n self.genome\n )\n else:\n lengths = df[\"stop\"] - df[\"start\"]\n else:\n lengths = df[\"length\"].values\n df = self.call_noiseq(\n count_data, factors, biotypes, lengths, df_chrom, _laplace_offset,\n )\n df = df.rename(columns={\"M\": \"log2FC\", \"prob\": \"Prob\", \"ranking\": \"Rank\"})\n return df[self.columns].reset_index(drop=True)\n\n def deps(self) -> List[Job]:\n \"\"\"Returns a list of job dependencies.\"\"\"\n\n robjects.r(\"library('NOISeq')\")\n version = str(robjects.r(\"packageVersion\")(\"NOISeq\"))\n return [\n ppg.ParameterInvariant(\n self.__class__.__name__ + \"_\" + self.name,\n (version, self.lc, self.nss, self.norm, self.v, self.columns),\n ),\n ppg.FunctionInvariant(f\"FI_{self.name}_compare\", self.compare),\n ppg.FunctionInvariant(f\"FI_{self.name}_call_noiseq\", self.call_noiseq),\n ]\n\n def call_noiseq(\n self,\n count_data: DataFrame,\n factors: DataFrame,\n biotypes: List[str],\n lengths: List[int],\n df_chrom: DataFrame,\n _laplace_offset: float,\n ) -> DataFrame:\n \"\"\"\n Calls NOISeq via r2py.\n\n Prior to calling NOISeq, all input data is converted to something R\n can understand.\n\n Parameters\n ----------\n count_data : DataFrame\n DataFrame with count data.\n factors : DataFrame\n DataFrame with factor data.\n biotypes : List[str]\n List of biotypes ordered as in cout_data.\n lengths : List[int]\n List of gene lengths ordered as in cout_data.\n df_chrom : DataFrame\n DataFrame with 'chr', 'start', 'stop'\n _laplace_offset : float\n Offset to add to avoid zero division.\n\n Returns\n -------\n DataFrame\n Result DataFrame from NOISeq.\n \"\"\"\n data = mbf_r.convert_dataframe_to_r(count_data)\n factors = mbf_r.convert_dataframe_to_r(factors)\n df_chrom = df_chrom.astype({\"start\": \"int32\", \"stop\": \"int32\"})\n chromosome = mbf_r.convert_dataframe_to_r(df_chrom)\n biotype = robjects.vectors.StrVector(biotypes)\n stable_ids = robjects.vectors.StrVector(list(df_chrom.index.values))\n biotype.names = stable_ids\n length = robjects.vectors.IntVector(lengths)\n length.names = stable_ids\n conditions = robjects.vectors.StrVector([\"a\", \"base\"])\n noisedata = robjects.r(\"readData\")(\n data=data,\n factors=factors,\n biotype=biotype,\n length=length,\n chromosome=chromosome,\n )\n noiseq = robjects.r(\"noiseq\")(\n noisedata,\n k=_laplace_offset,\n norm=self.norm,\n factor=\"condition\",\n replicates=self.replicates,\n conditions=conditions,\n lc=self.lc,\n pnr=self.pnr,\n nss=self.nss,\n v=self.v,\n )\n results = robjects.r(\"function(mynoiseq){m<EMAIL>}\")(noiseq)\n df = mbf_r.convert_dataframe_from_r(robjects.r(\"as.data.frame\")(results))\n return df\n\n\nclass DESeq2UnpairedOld(DESeq2Unpaired):\n # this is the original deseq2unpaired, i keep it to reproduce the erroneus results, please delete this\n name = \"DESeq2UnpairedOld\"\n\n def call_DESeq2(self, count_data, samples, conditions):\n \"\"\"Call DESeq2.\n @count_data is a DataFrame with 'samples' as the column names.\n @samples is a list. @conditions as well. Condition is the one you're contrasting on.\n You can add additional_conditions (a DataFrame, index = samples) which DESeq2 will\n keep under consideration (changes the formula).\n \"\"\"\n import rpy2.robjects as robjects\n import rpy2.robjects.numpy2ri as numpy2ri\n import mbf_r\n\n count_data = count_data.values\n count_data = np.array(count_data)\n nr, nc = count_data.shape\n count_data = count_data.reshape(count_data.size) # turn into 1d vector\n count_data = robjects.r.matrix(\n numpy2ri.py2rpy(count_data), nrow=nr, ncol=nc, byrow=True\n )\n col_data = pd.DataFrame({\"sample\": samples, \"condition\": conditions}).set_index(\n \"sample\"\n )\n formula = \"~ condition\"\n col_data = col_data.reset_index(drop=True)\n col_data = mbf_r.convert_dataframe_to_r(pd.DataFrame(col_data.to_dict(\"list\")))\n deseq_experiment = robjects.r(\"DESeqDataSetFromMatrix\")(\n countData=count_data, colData=col_data, design=robjects.Formula(formula)\n )\n deseq_experiment = robjects.r(\"DESeq\")(deseq_experiment)\n res = robjects.r(\"results\")(\n deseq_experiment, contrast=robjects.r(\"c\")(\"condition\", \"c\", \"base\")\n )\n df = mbf_r.convert_dataframe_from_r(robjects.r(\"as.data.frame\")(res))\n return df\n\n def compare(self, df, columns_a, columns_b, columns_other, _laplace_offset):\n # laplace_offset is ignored\n import rpy2.robjects as robjects\n\n robjects.r('library(\"DESeq2\")')\n columns = []\n conditions = []\n samples = []\n for (name, cols) in [\n (\"c\", columns_a),\n (\"other\", columns_other),\n (\"base\", columns_b),\n ]:\n for col in cols:\n columns.append(col)\n conditions.append(name)\n samples.append(col)\n for col in df.columns:\n print(col)\n print(\"-------------\")\n for c in df.columns:\n print(c, c in columns)\n count_data = df[columns]\n df = self.call_DESeq2(count_data, samples, conditions)\n df = df.rename(\n columns={\"log2FoldChange\": \"log2FC\", \"pvalue\": \"p\", \"padj\": \"FDR\"}\n )\n return df[self.columns].reset_index(drop=True)\n", "id": "596226", "language": "Python", "matching_score": 4.816626071929932, "max_stars_count": 0, "path": "src/mbf_comparisons/methods.py" }, { "content": "import itertools\nimport hashlib\nimport pypipegraph as ppg\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame\nfrom mbf_genomics import DelayedDataFrame\nfrom mbf_qualitycontrol import register_qc, qc_disabled\nfrom mbf_genomics.util import parse_a_or_c_to_anno\nfrom mbf_genomics.annotator import Annotator\nfrom typing import List, Dict, Tuple, Any\nfrom pypipegraph import Job\nimport dppd\nimport dppd_plotnine # noqa: F401\n\ndp, X = dppd.dppd()\n\n# import pypipegraph as ppg\n\n\nclass ComparisonAnnotator(Annotator):\n def __init__(\n self,\n comparisons,\n group_a,\n group_b,\n comparison_strategy,\n laplace_offset=1 / 1e6,\n other_groups_for_variance=[],\n ):\n \"\"\"Create a comparison (a - b)\"\"\"\n self.comparisons = comparisons\n\n if hasattr(comparison_strategy, \"__call__\"):\n self.comparison_strategy = comparison_strategy()\n else:\n self.comparison_strategy = comparison_strategy\n if isinstance(\n self.comparison_strategy.columns, str\n ): # pragma: no cover definsive\n raise ValueError(\n \"ComparisonStrategy %s had a string as columns, must be a list\"\n % self.comparison_strategy\n )\n self.comp = (group_a, group_b)\n self.other_groups_for_variance = other_groups_for_variance\n self.columns = []\n self.column_lookup = {}\n for col in sorted(self.comparison_strategy.columns):\n cn = self.name_column(col)\n self.columns.append(cn)\n self.column_lookup[col] = cn\n self.laplace_offset = laplace_offset\n self.result_dir = self.comparisons.result_dir / f\"{group_a}_vs_{group_b}\"\n self.result_dir.mkdir(exist_ok=True, parents=True)\n self._check_comparison_groups(group_a, group_b)\n if len(self.columns[0]) >= 60:\n self.cache_name = (\n \"Comp %s\" % hashlib.md5(self.columns[0].encode(\"utf-8\")).hexdigest()\n )\n try:\n self.vid = self._build_vid()\n except AttributeError: # the sample annotators don't have a vid\n pass\n\n def _build_vid(self):\n a = set()\n b = set()\n all_columns = True\n for s in self.comparisons.groups_to_samples[self.comp[0]]:\n if s[0] is not None:\n a.add(s[0].vid)\n all_columns = False\n for s in self.comparisons.groups_to_samples[self.comp[1]]:\n if s[0] is not None:\n b.add(s[0].vid)\n all_columns = False\n if a or b:\n return sorted(a) + [\"vs\"] + sorted(b)\n elif all_columns:\n raise AttributeError(\"No vids - as expected\")\n\n def name_column(self, col):\n if self.comparison_strategy.supports_other_samples:\n supports_other_samples = \",Other=%s\" % bool(self.other_groups_for_variance)\n else:\n supports_other_samples = \"\"\n return f\"Comp. {self.comp[0]} - {self.comp[1]} {col} ({self.comparison_strategy.name}{supports_other_samples})\"\n\n def __getitem__(self, itm):\n \"\"\"look up the full column name from log2FC, p, FDR, etc\"\"\"\n return self.column_lookup[itm]\n\n def filter(self, filter_definition, new_name=None, sheet_name=None):\n \"\"\"Turn a filter definition [(column, operator, threshold)...]\n into a filtered genes object.\n\n Example:\n comp.filter(genes, '2x', [\n ('FDR', '<=', 0.05) # a name from our comparison strategy - inspect column_lookup to list\n ('log2FC', '|>', 1), #absolute\n ...\n (anno, '>=', 50),\n ((anno, 1), '>=', 50), # for the second column of the annotator\n ((anno, 'columnX'), '>=', 50), # for the second column of the annotator\n ('annotator_columnX', '=>' 50), # search for an annotator with that column. Use if exactly one, complain otherwise\n\n\n\n ]\n \"\"\"\n lookup = self.column_lookup.copy()\n for c in self.columns:\n lookup[c] = c\n\n subset_relevant_columns = set(lookup.values())\n subset_relevant_columns.update(self.sample_columns(self.comp[0]))\n subset_relevant_columns.update(self.sample_columns(self.comp[1]))\n for g in self.other_groups_for_variance:\n subset_relevant_columns.update(self.sample_columns(g))\n\n further_filters = []\n add_direction = False\n thresholds = {}\n filter_str = []\n for column, op, threshold in sorted(filter_definition):\n if op == \"==\":\n oop = \"=\"\n elif op == \">\":\n oop = \">\"\n elif op == \"<\":\n oop = \"<\"\n elif op == \">=\":\n oop = \"≥\"\n elif op == \"<=\":\n oop = \"≤\"\n elif op == \"|>\":\n oop = \"|>\"\n elif op == \"|<\":\n oop = \"|<\"\n elif op == \"|>=\":\n oop = \"|≥\"\n elif op == \"|<=\":\n oop = \"|≤\"\n else:\n oop = op\n filter_str.append(f\"{column}_{oop}_{threshold:.2f}\")\n subset_relevant_columns.add(lookup[column])\n if column == \"log2FC\":\n if \"|\" in op:\n add_direction = True\n thresholds[column] = threshold\n\n if new_name is None:\n filter_str = \"__\".join(filter_str)\n new_name = f\"Filtered_{self.comparison_strategy.name}_{self.comp[0]}-{self.comp[1]}_{filter_str}\"\n\n if \"log2FC\" in lookup:\n further_filters.append((\"logFC\", lookup[\"log2FC\"], 2, thresholds.get('log2FC', 0)))\n if add_direction:\n further_filters.append((\"Direction\", lookup[\"log2FC\"], 1, 0))\n for x in [\"p\", \"FDR\"]: # less than\n if x in lookup:\n further_filters.append((x, lookup[x], 5, thresholds.get(x, 1)))\n\n for x in [\"minExpression\"]: # min of columns > x\n if x in lookup:\n further_filters.append((x, [lookup[x]], 4, thresholds.get(x, 0)))\n\n # we need the filter func for the plotting, so we do it ourselves\n filter_func, annos = self.comparisons.ddf.definition_to_function(\n filter_definition, lookup\n )\n kwargs = {}\n if hasattr(self, \"vid\"):\n kwargs[\"vid\"] = self.vid\n res = self.comparisons.ddf.filter(\n new_name,\n filter_func,\n annotators=annos,\n column_lookup=lookup,\n result_dir=self.result_dir / new_name,\n sheet_name=sheet_name,\n **kwargs,\n )\n if not qc_disabled():\n if \"p\" in self.comparison_strategy.columns:\n self.register_qc_volcano(self.comparisons.ddf, res, filter_func)\n # self.register_qc_ma_plot(self.comparisons.ddf, res, filter_func)\n res.plot_columns = self.samples()\n res.venn_annotator = self\n res.subset_relevant_columns = subset_relevant_columns\n res.further_filter_columns = further_filters\n return res\n\n def calc(self, df):\n columns_a = list(self.sample_columns(self.comp[0]))\n columns_b = list(self.sample_columns(self.comp[1]))\n columns_other = {}\n for g in self.other_groups_for_variance:\n columns_other[g] = self.sample_columns(g)\n comp = self.comparison_strategy.compare(\n df, columns_a, columns_b, columns_other, self.laplace_offset,\n )\n res = {}\n for col in sorted(self.comparison_strategy.columns):\n res[self.name_column(col)] = comp[col]\n return pd.DataFrame(res)\n\n def dep_annos(self):\n \"\"\"Return other annotators\"\"\"\n res = []\n for generator in [self.samples(), self.other_samples()]:\n for k in generator:\n a = parse_a_or_c_to_anno(k)\n if a is not None:\n res.append(a)\n return res\n\n def deps(self, ddf):\n from mbf_genomics.util import freeze\n\n sample_info = []\n for ac in self.samples():\n group = self.comparisons.sample_column_to_group[ac[1]]\n sample_info.append(\n (group, ac[0].get_cache_name() if ac[0] is not None else \"None\", ac[1])\n )\n sample_info.sort()\n parameters = freeze(\n [\n (\n # self.comparison_strategy.__class__.__name__ , handled by column name\n sample_info,\n # self.comp, # his handled by column name\n self.laplace_offset,\n )\n ]\n )\n res = [ppg.ParameterInvariant(self.get_cache_name(), parameters)]\n res.extend(getattr(self.comparison_strategy, \"deps\", lambda: [])())\n return res\n\n def samples(self):\n \"\"\"Return anno, column for samples used\"\"\"\n for x in list(self.comp) + self.other_groups_for_variance:\n for s in self.comparisons.groups_to_samples[x]:\n yield s\n\n def other_samples(self):\n \"\"\"Return anno, column for additional samples used for variance\"\"\"\n for x in self.other_groups_for_variance:\n for s in self.comparisons.groups_to_samples[x]:\n yield s\n\n def sample_columns(self, group):\n for s in self.comparisons.groups_to_samples[group]:\n yield s[1]\n\n def _check_comparison_groups(self, *groups):\n for x in groups:\n if x not in self.comparisons.groups_to_samples:\n raise ValueError(f\"Comparison group {x} not found\")\n if (\n len(self.comparisons.groups_to_samples[x])\n < self.comparison_strategy.min_sample_count\n ):\n raise ValueError(\n \"Too few samples in %s for %s\" % (x, self.comparison_strategy.name)\n )\n\n def register_qc_volcano(self, genes, filtered=None, filter_func=None):\n \"\"\"perform a volcano plot\n \"\"\"\n if filtered is None:\n output_filename = genes.result_dir / \"volcano.png\"\n else:\n output_filename = filtered.result_dir / \"volcano.png\"\n\n def plot(output_filename):\n df = (\n dp(genes.df)\n .mutate(\n significant=filter_func(genes.df)\n if filter_func is not None\n else \"tbd.\"\n )\n .pd\n )\n\n no_sig_lower = (df[\"significant\"] & (df[self[\"log2FC\"]] < 0)).sum()\n no_sig_higher = (df[\"significant\"] & (df[self[\"log2FC\"]] > 0)).sum()\n\n (\n dp(df)\n .p9()\n .scale_color_many_categories(name=\"regulated\", shift=3)\n .scale_y_continuous(\n name=\"p\",\n trans=dp.reverse_transform(\"log10\"),\n labels=lambda xs: [\"%.2g\" % x for x in xs],\n )\n .add_vline(xintercept=1, _color=\"blue\")\n .add_vline(xintercept=-1, _color=\"blue\")\n .add_hline(yintercept=0.05, _color=\"blue\")\n .add_rect( # shade 'simply' significant regions\n xmin=\"xmin\",\n xmax=\"xmax\",\n ymin=\"ymin\",\n ymax=\"ymax\",\n _fill=\"lightgrey\",\n data=pd.DataFrame(\n {\n \"xmin\": [-np.inf, 1],\n \"xmax\": [-1, np.inf],\n \"ymin\": [0, 0],\n \"ymax\": [0.05, 0.05],\n }\n ),\n _alpha=0.8,\n )\n .add_scatter(self[\"log2FC\"], self[\"p\"], color=\"significant\")\n .title(f\"# regulated down/ up: {no_sig_lower} / {no_sig_higher}\")\n # .coord_trans(x=\"reverse\", y=\"reverse\") #broken as of 2019-01-31\n .render(output_filename, width=8, height=6, dpi=300)\n )\n\n return register_qc(\n ppg.FileGeneratingJob(output_filename, plot).depends_on(\n genes.add_annotator(self),\n ppg.FunctionInvariant(\n str(output_filename) + \"_filter_func\", filter_func\n ),\n )\n )\n\n def register_qc_ma_plot(self, genes, filtered, filter_func):\n \"\"\"perform an MA plot - not a straight annotator.register_qc function,\n but called by .filter\n \"\"\"\n output_filename = filtered.result_dir / \"ma_plot.png\"\n\n def plot(output_filename):\n from statsmodels.nonparametric.smoothers_lowess import lowess\n\n print(genes.df.columns)\n print(list(self.sample_columns(self.comp[0])))\n print(list(self.sample_columns(self.comp[1])))\n df = genes.df[\n list(self.sample_columns(self.comp[0]))\n + list(self.sample_columns(self.comp[1]))\n ]\n df = df.assign(significant=filter_func(genes.df))\n pdf = []\n loes_pdfs = []\n # Todo: how many times can you over0lopt this?\n for a, b in itertools.combinations(\n [x for x in df.columns if not \"significant\" == x], 2\n ):\n np_a = np.log2(df[a] + self.laplace_offset)\n np_b = np.log2(df[b] + self.laplace_offset)\n A = (np_a + np_b) / 2\n M = np_a - np_b\n local_pdf = pd.DataFrame(\n {\n \"A\": A,\n \"M\": M,\n \"a\": self.comparisons.get_plot_name(a),\n \"b\": self.comparisons.get_plot_name(b),\n \"significant\": df[\"significant\"],\n }\n ).sort_values(\"M\")\n chosen = np.zeros(len(local_pdf), bool)\n chosen[:500] = True\n chosen[-500:] = True\n chosen[np.random.randint(0, len(chosen), 1000)] = True\n pdf.append(local_pdf)\n fitted = lowess(M, A, is_sorted=False)\n loes_pdfs.append(\n pd.DataFrame(\n {\n \"a\": self.comparisons.get_plot_name(a),\n \"b\": self.comparisons.get_plot_name(b),\n \"A\": fitted[:, 0],\n \"M\": fitted[:, 1],\n }\n )\n )\n pdf = pd.concat(pdf)\n pdf = pdf.assign(ab=[a + \":\" + b for (a, b) in zip(pdf[\"a\"], pdf[\"b\"])])\n loes_pdf = pd.concat(loes_pdfs)\n loes_pdf = loes_pdf.assign(\n ab=[a + \":\" + b for (a, b) in zip(loes_pdf[\"a\"], loes_pdf[\"b\"])]\n )\n (\n dp(pdf)\n .p9()\n .theme_bw(10)\n .add_hline(yintercept=0, _color=\"lightblue\")\n .add_hline(yintercept=1, _color=\"lightblue\")\n .add_hline(yintercept=-1, _color=\"lightblue\")\n .scale_color_many_categories(name=\"significant\", shift=3)\n .add_point(\"A\", \"M\", color=\"significant\", _size=1, _alpha=0.3)\n .add_line(\"A\", \"M\", _color=\"blue\", data=loes_pdf)\n .facet_wrap([\"ab\"])\n .title(f\"MA {filtered.name}\\n{self.comparisons.find_variable_name()}\")\n .render(output_filename, width=8, height=6)\n )\n\n return register_qc(\n ppg.FileGeneratingJob(output_filename, plot)\n .depends_on(genes.add_annotator(self))\n .depends_on(self.comparisons.deps)\n )\n\n\nclass ComparisonAnnotatorMulti(ComparisonAnnotator):\n \"\"\"\n Annotator for multi-factor comparison.\n\n Based on a main factor and a list of multiple other factor, this\n creates an annotator that annotates DEG analysis results for a multi-factor\n design. Interaction terms may be specified as a list of tuples which may be\n empty.\n if an empty interactions list is provided, the analysis just controls\n for different levels of other_factors and report the main effect.\n\n Parameters\n ----------\n name : str\n Annotator name, used for cache names and test of uniqueness.\n comparisons : Comparisons\n Comparisons instance containing the groups to be analyzed.\n main_factor : str\n The main factor, e.g. treatment/condition.\n factor_reference : Dict[str, str]\n Dictionary of factor names (key) to base level (value), e.g.\n {\"treatment\": \"DMSO\"}.\n groups : List[str]\n Groups to be included in the DE analysis.\n df_factors : DataFrame\n A dataframe containing all groups and factor levels\n relevant for the variance calculation. This may include groups\n beyond the groups of interest. If so, these groups are used for\n estimating dispersion but not reported in the results.\n interactions : List[Tuple[str, str]]\n List if interaction terms. If this is empty, the analysis will\n report the main factor effects controlling for the other factors.\n method : Any\n The DEG method to use, e.g. DESeq2MultiFactor.\n test_difference : bool, optional\n Test for differences in the main effects for different levels\n of other factors, by default True.\n compare_non_reference : bool, optional\n Test for difference of the main effects for different levels of other\n factors compared to non-reference levels, by default False.\n laplace_offset : float, optional\n laplace offset for methods that cannot handle zeros, by default 1/1e6.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n comparisons: Any,\n main_factor: str,\n factor_reference: Dict[str, str],\n groups: List[str],\n df_factors: DataFrame,\n interactions: List[Tuple[str, str]],\n comparison_strategy: Any,\n test_difference: bool,\n compare_non_reference: bool,\n laplace_offset: float,\n ):\n \"\"\"Contructor\"\"\"\n self.comparisons = comparisons\n if hasattr(comparison_strategy, \"__call__\"):\n self.comparison_strategy = comparison_strategy()\n else:\n self.comparison_strategy = comparison_strategy\n self.comparison_name = name\n reserved_chars = \":()\"\n for factor in factor_reference:\n if any([x in factor for x in reserved_chars]):\n raise ValueError(f\"Factor values must not contain any of {list(reserved_chars)}.\")\n for level in df_factors[factor].unique():\n if any([x in level for x in reserved_chars]):\n raise ValueError(f\"Level values must not contain any of {list(reserved_chars)}.\")\n self.columns = []\n self.column_lookup = {}\n self.groups = groups\n prefixes = comparison_strategy.prefixes(\n main_factor,\n factor_reference,\n df_factors,\n interactions,\n test_difference,\n compare_non_reference,\n )\n self.prefixes = prefixes\n for col in comparison_strategy.get_columns(prefixes):\n cn = self.name_column(col)\n self.columns.append(cn)\n self.column_lookup[col] = cn\n self.laplace_offset = laplace_offset\n self.result_dir = self.comparisons.result_dir / f\"{self.comparison_name}\"\n self.result_dir.mkdir(exist_ok=True, parents=True)\n self.df_factors = df_factors\n self.factor_reference = factor_reference\n self.interactions = interactions\n self.main_factor = main_factor\n self.test_difference = test_difference\n self.compare_non_reference = compare_non_reference\n columns = [\"group\"] + list(factor_reference.keys())\n df_factors = df_factors[columns]\n self._check_comparison_groups(*self.groups)\n self.cache_name = f\"{ComparisonAnnotatorMulti}_{name}\"\n if len(self.cache_name) >= 60:\n self.cache_name = (\n \"Comp %s\" % hashlib.md5(self.cache_name.encode(\"utf-8\")).hexdigest()\n )\n try:\n self.vid = self._build_vid()\n except AttributeError: # the sample annotators don't have a vid\n pass\n self.other_groups_for_variance = []\n\n def samples(self):\n \"\"\"\n Return anno, column for samples used.\n\n Overrides the parent method, since we now have more than 2 groups to\n be considered.\n\n Yields\n -------\n Tuple[Annotator, str]\n (Annotator, column_name) for each sample used.\n \"\"\"\n for group in self.groups:\n for s in self.comparisons.groups_to_samples[group]:\n yield s\n\n def deps(self, ddf: DelayedDataFrame) -> List[Job]:\n \"\"\"\n Returns list of dependencies.\n\n Parameters\n ----------\n ddf : DelayedDataFrame\n The DelayedDataFrame instance to be annotated, e.g. genes.\n\n Returns\n -------\n List[Job]\n List of jobs this calc_ddf function depends on.\n \"\"\"\n res = super().deps(ddf)\n res.append(ddf.load())\n for anno in self.dep_annos():\n res.append(ddf.add_annotator(anno))\n return res\n\n def calc_ddf(self, ddf: DelayedDataFrame) -> DataFrame:\n \"\"\"\n Calculates a dataframe with new columns to be added to the ddf.\n\n This overrides the method from the parent class and calls the\n compare function from the comparison method given.\n\n Parameters\n ----------\n ddf : DelayedDataFrame\n The ddf to be annotated.\n\n Returns\n -------\n DataFrame\n DataFrame with additional columns to be added to the ddf.df.\n \"\"\"\n df = ddf.df\n columns_by_group = {}\n for group in self.groups:\n columns_by_group[group] = list(self.sample_columns(group))\n columns_other = []\n for g in self.other_groups_for_variance:\n columns_other.extend(self.sample_columns(g))\n res = self.comparison_strategy.compare(\n df,\n self.main_factor,\n self.factor_reference,\n columns_by_group,\n self.df_factors,\n self.interactions,\n self.test_difference,\n self.compare_non_reference,\n self.laplace_offset,\n self.prefixes\n )\n rename = {}\n for col in res.columns:\n rename[col] = self.name_column(col)\n res = res.rename(columns=rename)\n res = res.set_index(df.index)\n return res\n\n def name_column(self, col: str) -> str:\n \"\"\"\n Name mangler function that adds the annotator name to the new\n columns.\n\n Comparison name is added as a suffix.\n\n Parameters\n ----------\n col : [str]\n A column name to be changed.\n\n Returns\n -------\n [str]\n The new column name.\n \"\"\"\n return f\"{col} (Comp. {self.comparison_name})\"\n\n\nclass ComparisonAnnotatorOld(ComparisonAnnotator):\n \"\"\"\n I needed to adjust the calc fuction of ComparisonAnnotator to account for\n other_groups_fro_variance. This is the old function, I need it to generate the\n original results.\n \"\"\"\n def calc(self, df):\n columns_a = list(self.sample_columns(self.comp[0]))\n columns_b = list(self.sample_columns(self.comp[1]))\n columns_other = []\n for g in self.other_groups_for_variance:\n columns_other.extend(self.sample_columns(g))\n comp = self.comparison_strategy.compare(\n df, columns_a, columns_b, columns_other, self.laplace_offset\n )\n res = {}\n for col in sorted(self.comparison_strategy.columns):\n res[self.name_column(col)] = comp[col]\n return pd.DataFrame(res)\n", "id": "3459850", "language": "Python", "matching_score": 6.719765663146973, "max_stars_count": 0, "path": "src/mbf_comparisons/annotator.py" }, { "content": "from typing import List, Dict, Any, Tuple\nfrom mbf_qualitycontrol import register_qc, qc_disabled\nfrom dppd import dppd\nfrom mbf_genomics.util import parse_a_or_c, freeze\nfrom mbf_genomics import DelayedDataFrame\nfrom pandas import DataFrame\nfrom .annotator import (\n ComparisonAnnotator,\n ComparisonAnnotatorOld,\n ComparisonAnnotatorMulti,\n)\nimport functools\nimport pandas as pd\nimport pypipegraph as ppg\nimport dppd_plotnine # noqa: F401\nimport itertools\n\ndp, X = dppd()\n\n\nclass Comparisons:\n \"\"\"A ddf + comparison groups,\n ready for actually doing comparisons\n\n Paramaters:\n\n groups_to_samples: { keyX: [columnA, annoB, (annoC, column_name), (annoC, 2),\n keyY: ..., ...}\n keyX: one of the keys of groups_to_samples\n keyY: one of the keys of groups_to_samples\n \"\"\"\n\n def __init__(self, ddf, groups_to_samples, name=None):\n if not isinstance(ddf, DelayedDataFrame):\n raise ValueError(\"Ddf must be a DelayedDataFrame\")\n self.ddf = ddf\n self.groups_to_samples = self._check_input_dict(groups_to_samples)\n self.sample_column_to_group = self._sample_columns_to_group()\n self.samples = functools.reduce(\n list.__add__, [x[1] for x in sorted(self.groups_to_samples.items())]\n )\n if name is None:\n self.name = \"comparison__\" + \"_\".join(sorted(self.groups_to_samples.keys()))\n else:\n self.name = \"comparison__\" + name\n self.result_dir = self.ddf.result_dir / self.name\n self.result_dir.mkdir(exist_ok=True, parents=True)\n if ppg.inside_ppg():\n ppg.assert_uniqueness_of_object(self)\n if not hasattr(ppg.util.global_pipegraph, \"_mbf_comparisons_name_dedup\"):\n ppg.util.global_pipegraph._mbf_comparisons_name_dedup = set()\n for name in self.groups_to_samples:\n if name in ppg.util.global_pipegraph._mbf_comparisons_name_dedup:\n raise ValueError(\n f\"Comparisons group {name} defined in multiple Comparisons - not supported\"\n )\n self.register_qc()\n\n def a_vs_b_old(\n self,\n a,\n b,\n method,\n laplace_offset=1 / 1e6,\n include_other_samples_for_variance=True,\n ):\n if a not in self.groups_to_samples:\n raise KeyError(a)\n if b not in self.groups_to_samples:\n raise KeyError(b)\n if not hasattr(method, \"compare\"):\n raise TypeError(f\"{method} had no method compare\")\n if include_other_samples_for_variance:\n other_groups = []\n for group_name in self.groups_to_samples:\n if group_name != a and group_name != b:\n other_groups.append(group_name)\n else:\n other_groups = []\n res = ComparisonAnnotatorOld(self, a, b, method, laplace_offset, other_groups)\n self.ddf += res\n return res\n\n def a_vs_b(\n self,\n a,\n b,\n method,\n laplace_offset=1 / 1e6,\n include_other_samples_for_variance=True,\n ):\n # this is the right way to do it\n if a not in self.groups_to_samples:\n raise KeyError(a)\n if b not in self.groups_to_samples:\n raise KeyError(b)\n if not hasattr(method, \"compare\"):\n raise TypeError(f\"{method} had no method compare\")\n if include_other_samples_for_variance:\n other_groups = []\n for group_name in self.groups_to_samples:\n if group_name != a and group_name != b:\n other_groups.append(group_name)\n else:\n other_groups = []\n res = ComparisonAnnotator(self, a, b, method, laplace_offset, other_groups)\n self.ddf += res\n return res\n\n def multi(\n self,\n name: str,\n main_factor: str,\n factor_reference: Dict[str, str],\n df_factors: DataFrame,\n interactions: List[Tuple[str, str]],\n method: Any,\n test_difference: bool = True,\n compare_non_reference: bool = False,\n laplace_offset: float = 1 / 1e6,\n ) -> ComparisonAnnotatorMulti:\n \"\"\"\n Initializes and returns an annotator for multi-factor analysis.\n\n Based on a main factor and a list of multiple other factor, this\n creates an annotator that annotates DEG analysis results for a multi-factor\n design. Interaction terms may be specified as a list of tuples which may be\n empty.\n if an empty interactions list is provided, the analysis just controls\n for different levels of other_factors and report the main effect.\n\n Parameters\n ----------\n name : str\n Annotator name, used for cache names and test of uniqueness.\n main_factor : str\n The main factor, usually condition or treatment.\n factor_reference : Dict[str, str]\n Dictionary of factor names (key) to base level (value), e.g.\n {\"treatment\": \"DMSO\"}.\n df_factors : DataFrame\n A dataframe containing all groups and factor levels\n relevant for the variance calculation. This may include groups\n beyond the groups of interest. If so, these groups are used for\n estimating dispersion but not reported in the results.\n interactions : List[Tuple[str, str]]\n List if interaction terms. If this is empty, the analysis will\n report the main factor effects controlling for the other factors.\n method : Any\n The DEG method to use, e.g. DESeq2MultiFactor.\n test_difference : bool, optional\n Test for differences in the main effects for different levels\n of other factors, by default True.\n compare_non_reference : bool, optional\n Test for difference of the main effects for different levels of other\n factors compared to non-reference levels, by default False.\n laplace_offset : float, optional\n laplace offset for methods that cannot handle zeros, by default 1/1e6.\n\n Returns\n -------\n ComparisonAnnotatorMulti\n Multi-factor comparison annotator.\n\n Raises\n ------\n ValueError\n If the df_factors does not contain a group column.\n ValueError\n If a factor is not found in df_factors.\n ValueError\n If a level is specified in the dictionaryx that is not present in df_factors.\n ValueError\n If less than 2 factors are given.\n KeyError\n If a specified group in not in the comparisons.\n TypeError\n if the given compare method does not have a compare function.\n \"\"\"\n if \"group\" not in df_factors.columns:\n raise ValueError(\n \"Column 'group' not in df_factors, please provide a group column containing all groups affecting the counts.\"\n )\n for factor in factor_reference:\n if factor not in df_factors.columns:\n raise ValueError(f\"Factor {factor} not in df_factors.\")\n # for level in factor_levels_ordered[factor]:\n # if level not in df_factors.values:\n # raise ValueError(f\"Unknown factor level {level} for factor {factor}.\")\n if len(factor_reference) < 2:\n raise ValueError(\n f\"You need at least 2 factors for a multi-factor design', factors given were {list(factor_reference.keys())}.\"\n )\n groups = list(df_factors[\"group\"].values)\n for group in groups:\n if group not in self.groups_to_samples:\n raise KeyError(group)\n if not hasattr(method, \"compare\"):\n raise TypeError(f\"{method} had no method compare\")\n res = ComparisonAnnotatorMulti(\n name,\n self,\n main_factor,\n factor_reference,\n groups,\n df_factors,\n interactions,\n method,\n test_difference,\n compare_non_reference,\n laplace_offset,\n )\n return res\n\n def all_vs_b(self, b, method, laplace_offset=1 / 1e6):\n res = {}\n for a in self.groups_to_samples:\n if a != b:\n res[a] = self.a_vs_b(a, b, method, laplace_offset)\n return res\n\n def all_vs_all(self, method, laplace_offset=1 / 1e6):\n res = {}\n for a, b in itertools.combinations(self.groups_to_samples, 2):\n res[a, b] = self.a_vs_b(a, b, method, laplace_offset)\n return res\n\n def _check_input_dict(self, groups_to_samples):\n if not isinstance(groups_to_samples, dict):\n raise ValueError(\"groups_to_samples must be a dict\")\n for k, v in groups_to_samples.items():\n if not isinstance(k, str):\n raise ValueError(\"keys must be str, was %s %s\" % (k, type(k)))\n v = [parse_a_or_c(x) for x in v]\n groups_to_samples[k] = v\n\n return groups_to_samples\n\n def _sample_columns_to_group(self):\n result = {}\n for group, samples in self.groups_to_samples.items():\n for ac in samples:\n c = ac[1]\n if c in result:\n raise ValueError(\n f\"Sample in multiple groups - not supported {ac}, {group}, {result[c]}\"\n )\n result[c] = group\n return result\n\n def register_qc(self):\n if not qc_disabled():\n self.register_qc_distribution()\n self.register_qc_pca()\n self.register_qc_correlation()\n\n def find_variable_name(self):\n for anno, column in self.samples:\n if anno is not None and hasattr(anno, \"unit\"):\n return anno.unit\n return \"value\"\n\n def get_plot_name(self, column):\n for ac in self.samples:\n if ac[1] == column:\n if ac[0] is not None:\n return getattr(ac[0], \"plot_name\", column)\n else:\n return column\n raise KeyError(column)\n\n def get_df(self):\n return self.ddf.df[[column for anno, column in self.samples]]\n\n def register_qc_distribution(self):\n output_filename = self.result_dir / \"distribution.png\"\n\n def plot(output_filename):\n df = self.get_df()\n sample_count = df.shape[1]\n sample_names = [self.get_plot_name(x) for x in df.columns]\n sample_groups = [self.sample_column_to_group[x] for x in df.columns]\n df.columns = pd.MultiIndex.from_tuples(\n zip(sample_names, sample_groups), names=(\"sample\", \"group\")\n )\n order = [\n x[0]\n for x in sorted(zip(sample_names, sample_groups), key=lambda v: v[1])\n ]\n return (\n dp(df)\n .melt(value_name=\"y\")\n .categorize(\"sample\", order)\n .p9()\n .theme_bw()\n .annotation_stripes()\n #.geom_violin(dp.aes(\"sample\", \"y\"), width=0.5)\n .add_boxplot(x=\"sample\", y=\"y\", _width=0.1, _fill=None, color=\"group\")\n .scale_color_many_categories()\n .scale_y_continuous(trans=\"log10\", name=self.find_variable_name())\n .turn_x_axis_labels()\n .hide_x_axis_title()\n .render(\n output_filename,\n height=5,\n width=1 + 0.25 * sample_count,\n limitsize=False,\n )\n )\n\n return register_qc(\n ppg.FileGeneratingJob(output_filename, plot).depends_on(self.deps())\n )\n\n def deps(self):\n input_columns = []\n for k in sorted(self.groups_to_samples):\n for ac in self.groups_to_samples[k]:\n input_columns.append(ac[1])\n\n return [\n self.ddf.add_annotator(ac[0]) for ac in self.samples if ac[0] is not None\n ] + [\n self.ddf.load(),\n ppg.ParameterInvariant(self.name, freeze(input_columns)),\n ] # you might be working with an anno less ddf afterall\n\n def register_qc_pca(self):\n output_filename = self.result_dir / \"pca.png\"\n\n def plot():\n import sklearn.decomposition as decom\n\n pca = decom.PCA(n_components=2, whiten=False)\n data = self.get_df()\n # min max scaling 0..1 per gene\n data = data.sub(data.min(axis=1), axis=0)\n data = data.div(data.max(axis=1), axis=0)\n\n data = data[~pd.isnull(data).any(axis=1)] # can' do pca on NAN values\n pca.fit(data.T)\n xy = pca.transform(data.T)\n title = \"PCA %s (%s)\\nExplained variance: x %.2f%%, y %.2f%%\" % (\n self.ddf.name,\n self.find_variable_name(),\n pca.explained_variance_ratio_[0] * 100,\n pca.explained_variance_ratio_[1] * 100,\n )\n plot_df = pd.DataFrame(\n {\n \"x\": xy[:, 0],\n \"y\": xy[:, 1],\n \"label\": [self.get_plot_name(c) for (a, c) in self.samples],\n \"group\": [\n self.sample_column_to_group[c] for (a, c) in self.samples\n ],\n }\n )\n p = dp(plot_df).p9().theme_bw().add_scatter(\"x\", \"y\", color=\"group\")\n if data.shape[1] < 15:\n p = p.add_text(\n \"x\",\n \"y\",\n \"label\",\n _alpha=0.5,\n # _adjust_text={\n # \"expand_points\": (2, 2),\n # \"arrowprops\": {\"arrowstyle\": \"->\", \"color\": \"darkgrey\"},\n # },\n )\n p = (\n p.scale_color_many_categories()\n .title(title)\n .render(output_filename, width=8, height=6, dpi=72)\n )\n plot_df.to_csv(output_filename.with_suffix(\".tsv\"), sep=\"\\t\")\n\n return register_qc(\n ppg.MultiFileGeneratingJob(\n [output_filename, output_filename.with_suffix(\".tsv\")], plot\n ).depends_on(self.deps())\n )\n\n def register_qc_correlation(self):\n output_filename = self.result_dir / \"pearson_correlation.png\"\n\n def plot(output_filename):\n data = self.get_df()\n data = data.sub(data.min(axis=1), axis=0)\n data = data.div(data.max(axis=1), axis=0)\n # data -= data.min() # min max scaling 0..1 per gene\n # data /= data.max()\n data = data[\n ~pd.isnull(data).any(axis=1)\n ] # can' do correlation on NAN values\n sample_names = [self.get_plot_name(x) for x in data.columns]\n sample_groups = [self.sample_column_to_group[x] for x in data.columns]\n data.columns = sample_names\n\n order_pdf = pd.DataFrame(\n {\"sample\": sample_names, \"group\": sample_groups}\n ).sort_values([\"group\", \"sample\"])\n ordered_names = [\"group\"] + list(order_pdf[\"sample\"])\n sample_count = data.shape[1]\n pdf = (\n data.corr().transpose().assign(group=0).transpose()\n ) # value doesn't matter, this just reserves space on the plot\n pdf = pd.melt(pdf.reset_index(), \"index\")\n (\n dp(pdf)\n .categorize(\"index\", ordered_names)\n .categorize(\"variable\", ordered_names)\n .p9()\n .add_tile(\"index\", \"variable\", fill=\"value\")\n .scale_fill_gradient2(\n \"blue\", \"white\", \"red\", limits=[-1, 1], midpoint=0\n )\n .add_scatter(\n _x=1, y=\"sample\", color=\"group\", _shape=\"s\", data=order_pdf, _size=3\n )\n .scale_color_many_categories()\n .hide_x_axis_title()\n .hide_y_axis_title()\n .turn_x_axis_labels()\n .render(\n output_filename,\n width=1 + 0.15 * sample_count,\n height=0.15 * sample_count,\n )\n )\n\n return register_qc(\n ppg.FileGeneratingJob(output_filename, plot).depends_on(self.deps())\n )\n", "id": "4047358", "language": "Python", "matching_score": 2.589792490005493, "max_stars_count": 0, "path": "src/mbf_comparisons/comparisons.py" }, { "content": "\"\"\"Support for heatmap images of chipseq data.\n\nYou need\n - a genomic regions which you want to plot (may be of differing size)\n - a number of AlignedLanes to plot.\nYou do\n - create a Heatmap object\n - call plot(output_filename,...) on it\n\nYou need to decide and pass in appropriate strategies:\n\n - How the regions get cookie cut (e.g. RegionFromCenter)\n - How the reads are counted (e.g. SmoothExtendedReads)\n - How the data is normalized (NormLaneTPMInterpolate is fast and sensible)\n - How the regions are Ordered (OrderIthLaneSum, OrderClusterKMeans)\n\n\n\"\"\"\nimport pypipegraph as ppg\nfrom mbf_genomics.util import parse_a_or_c\nfrom . import plot_strategies\nfrom . import norm, order\nfrom mbf_genomics import DelayedDataFrame\nfrom mbf_genomics.util import freeze\n\n\nclass HeatmapPlot:\n def __init__(\n self,\n ddf: DelayedDataFrame,\n columns,\n output_filename,\n normalization_strategy: norm._NormStrategy,\n order_strategy: order.OrderStrategy,\n names=None,\n plot_options={},\n ):\n \"\"\"\n ddf: The genomir regions you want to plot with a minimum of chr, start, stop, columns\n columns: The columns from ddf to plot, in order.\n output_filename: a path to store the heatmap in\n normalization_strategy: the normalization strategy to apply, see heatmap.norm\n order_strategy: how to order the rows from the ddf, see heatmap_order\n\n plot_options:\n\n show_cluster_ids - whether to show cluster ids as little colored dots at the left hand side\n \"\"\"\n self.ddf = ddf\n self.columns = [parse_a_or_c(x) for x in columns]\n self.output_filename = ddf.pathify(output_filename)\n if not isinstance(normalization_strategy, norm._NormStrategy):\n raise ValueError(\n \"normalization_strategy must be a heatmap.norm._NormStrategy descendend\"\n )\n self.normalization_strategy = normalization_strategy\n if not isinstance(order_strategy, order.OrderStrategy):\n raise ValueError(\n \"order_strategy must be a heatmap.norm._NormStrategy descendend\"\n )\n\n self.order_strategy = order_strategy\n self.plot_strategy = plot_strategies.Plot_Matplotlib()\n self.names = names\n self.plot_options = plot_options\n self.plot()\n\n def plot(self):\n normed = self.normed_ddf(self.ddf)\n ordered = self.ordered_ddf(normed)\n names = self.handle_names()\n\n def plot():\n p = self.plot_strategy.plot(ordered.df, names, self.plot_options)\n self.plot_strategy.render(str(self.output_filename), p)\n\n if ppg.inside_ppg():\n ppg.util.global_pipegraph.quiet = False\n deps = [\n ordered.load(),\n ppg.FunctionInvariant(\n \"mbf_heatmap.\" + self.plot_strategy.name + \"plot_func\",\n self.plot_strategy.__class__.plot,\n ),\n ppg.FunctionInvariant(\n \"mbf_heatmap\" + self.plot_strategy.name + \"render_func\",\n self.plot_strategy.__class__.render,\n ),\n ppg.ParameterInvariant(\n self.output_filename, freeze((self.names, self.plot_options))\n ),\n ]\n return ppg.FileGeneratingJob(self.output_filename, plot).depends_on(deps)\n else:\n plot()\n return self.output_filename\n\n def normed_ddf(self, input_ddf):\n def load():\n df = input_ddf.df[[ac[1] for ac in self.columns]]\n normed_df = self.normalization_strategy.calc(\n df, [ac[1] for ac in self.columns]\n )\n return normed_df\n output_name = input_ddf.name + \"_heatmap_\" + self.normalization_strategy.name\n if ppg.inside_ppg():\n deps = [\n self.ddf.add_annotator(ac[0])\n for ac in self.columns\n if ac[0] is not None\n ] + [self.normalization_strategy.deps(), input_ddf.load(),\n ppg.FunctionInvariant(output_name + '_calc', self.normalization_strategy.calc)]\n else:\n deps = []\n\n return DelayedDataFrame(\n output_name,\n load,\n deps,\n input_ddf.result_dir,\n )\n\n def ordered_ddf(self, input_ddf):\n def load():\n df = input_ddf.df[[ac[1] for ac in self.columns]]\n return self.order_strategy.calc(df, [ac[1] for ac in self.columns])\n output_name = input_ddf.name + self.order_strategy.name\n if ppg.inside_ppg():\n deps = [\n self.ddf.add_annotator(ac[0])\n for ac in self.columns\n if ac[0] is not None\n ] + [self.order_strategy.deps(), input_ddf.load(),\n ppg.FunctionInvariant(output_name + '_calc', self.order_strategy.calc)]\n else:\n deps = []\n\n return DelayedDataFrame(\n output_name, load, deps, input_ddf.result_dir\n )\n\n def handle_names(self):\n if self.names is None:\n names = [\n getattr(ac[0], \"plot_name\", ac[1]) if ac[0] is not None else ac[1]\n for ac in self.columns\n ]\n elif isinstance(self.names, dict):\n if isinstance(iter(self.names.values()).next(), tuple):\n names = [self.names[ac] for ac in self.columns]\n else: # byi column name\n names = [self.names[ac[1]] for ac in self.columns]\n elif isinstance(self.names, list):\n if len(self.names) != len(self.columns):\n raise ValueError(\"Name length did not match column length\")\n names = self.names\n else:\n raise ValueError(\"Could not handle names %s\" % (names,))\n return names\n", "id": "5646069", "language": "Python", "matching_score": 4.99445104598999, "max_stars_count": 0, "path": "src/mbf_heatmap/ddf/heatmap.py" }, { "content": "\"\"\"Support for heatmap images of chipseq data.\n\nYou need\n - a genomic regions which you want to plot (may be of differing size)\n - a number of AlignedLanes to plot.\nYou do\n - create a Heatmap object\n - call plot(output_filename,...) on it\n\nYou need to decide and pass in appropriate strategies:\n\n - How the regions get cookie cut (e.g. RegionFromCenter)\n - How the reads are counted (e.g. SmoothExtendedReads)\n - How the data is normalized (NormLaneTPMInterpolate is fast and sensible)\n - How the regions are Ordered (order.IthLaneSum, order.ClusterKMeans)\n \"\"\"\n# flake8: noqa\nfrom pathlib import Path\nimport hashlib\nimport pandas as pd\nfrom typing import List\nimport pypipegraph as ppg\nimport numpy as np\nfrom . import regions\nfrom . import smooth\nfrom . import order\nfrom . import norm\nfrom .plot_strategies import Plot_Matplotlib\n\n\nclass Heatmap:\n def __init__(\n self,\n regions_to_draw,\n lanes_to_draw: List,\n region_strategy=regions.RegionFromCenter(2000),\n smoothing_strategy=smooth.SmoothExtendedReads(200),\n ):\n \"\"\"\n A one-line-per-region, one column per (chipseq-) lane signal-intensity\n mapped to a color heatmap object.\n\n Parameters:\n @regions_to_draw:\n Which genomic regions do you want to draw? Will be processed by the @region_strategy\n @lanes_to_draw:\n Which Chipseq lanes shall we draw, left to right? a list\n @region_strategy:\n How to convert the regions intervals into the same-sized regions to plot (One of the Region_* classes)\n @smoothing_strategy:\n How shall the reads be proprocessed (e.g. extended, background substracted...) - one of the Smooth_*\n\n This is half the process of drawing a heatmap - just the parts that take the largest amout of time.\n \"\"\"\n self.gr_to_draw = regions_to_draw\n self.lanes_to_draw = lanes_to_draw\n if len(set(lanes_to_draw)) != len(lanes_to_draw):\n raise ValueError(\"Duplicate lanes passed\")\n self.region_strategy = region_strategy\n self.smoothing_strategy = smoothing_strategy\n self.cache_dir = self.cache_dir = Path(\"cache\") / \"ChipseqHeatmap\"\n\n def plot(\n self,\n output_filename,\n normalization_strategy: norm._Normalization = norm.NormLaneTPM(),\n order_strategy: order._Order = order.FirstLaneSum(),\n names=None,\n plot_options: dict = None,\n ):\n \"\"\"Plot the heatmap into @output_file\n Parameters:\n @output_file:\n Where to plot the heatmap (.png / .pdf)\n @normalization_strategy:\n How shall the signal (from the smoothing_strategy) be normalized? Use of the Norm_* classes\n @order_strategy:\n In which order shall the regions be drawen (one of ther Order_* classes)\n @names:\n None - use aligned_lane.name\n 'short' - use aligned_lane.short_name\n list - use names in order (see Heatmap.lanes_to_draw)\n dictionary - partial lookup - either dict[lane], or lane.name if missing\n function - called get_name(lane) for each lane\n\n @plot_options: get's passed on to plot_strategies.Plot_Matplotlib, check there for valid parameters.\n\n \"\"\"\n plot_strategy = Plot_Matplotlib()\n if plot_options is None:\n plot_options = {}\n res = _HeatmapPlot(\n self,\n output_filename,\n normalization_strategy,\n order_strategy,\n plot_strategy,\n names,\n plot_options,\n )\n res()\n return res\n\n def calc_regions(self):\n def calc():\n return self.do_calc_regions()\n\n key = hashlib.md5(\n \",\".join(\n [self.gr_to_draw.name, self.region_strategy.name]\n + list(set([x.name for x in self.lanes_to_draw]))\n ).encode()\n ).hexdigest()\n # technically, we could share the regions job between heatmaps with the same regions but differen lanes\n # but we're using a CachedAttributeLoadingJob and that would.. .complicate things quite a bit\n of = self.cache_dir / \"regions\" / key\n of.parent.mkdir(exist_ok=True, parents=True)\n return ppg.CachedAttributeLoadingJob(of, self, \"regions_\", calc).depends_on(\n [\n ppg.ParameterInvariant(\n of, (self.region_strategy.name, self.gr_to_draw.name)\n ),\n ppg.FunctionInvariant(\n \"genomics.regions.heatmap.\"\n + self.region_strategy.name\n + \"calc_func\",\n self.region_strategy.__class__.calc,\n ),\n ]\n + self.region_strategy.get_dependencies(self.gr_to_draw)\n )\n\n def calc_raw_data(self):\n # we don't use a CachedAttributeLoadingJob so that we can compress the output.\n # don't knock that, it easily saves a gigabyte of data on a larger GR\n\n cache_dir = self.cache_dir / \"raw_data\"\n cache_dir.mkdir(exist_ok=True, parents=True)\n\n jobs = []\n smoothing_invariant = (\n ppg.FunctionInvariant(\n \"genomics.regions.heatmap.\"\n + self.smoothing_strategy.name\n + \"calc_func\",\n self.smoothing_strategy.__class__.calc,\n ),\n )\n for lane in self.lanes_to_draw:\n key = \",\".join(\n [\n self.gr_to_draw.name,\n self.region_strategy.name,\n self.smoothing_strategy.name,\n lane.name,\n ]\n )\n key = hashlib.md5(key.encode()).hexdigest()\n of = cache_dir / (key + \".npz\")\n\n def calc(lane=lane, of=of):\n \"\"\"Raw data is a dictionary: lane_name: 2d matrix\"\"\"\n raw_data = {lane.name: self.do_calc_raw_data(lane)}\n np.savez_compressed(of, **raw_data)\n\n jobs.append(\n ppg.FileGeneratingJob(of, calc).depends_on(\n [\n ppg.ParameterInvariant(\n of,\n (\n self.smoothing_strategy.name,\n lane.name,\n self.gr_to_draw.name,\n ),\n ),\n smoothing_invariant,\n self.calc_regions(),\n ppg.FunctionInvariant(\n \"genomics.regions.heatmap.do_calc_raw_data\",\n Heatmap.do_calc_raw_data,\n ),\n ]\n + self.smoothing_strategy.get_dependencies(lane)\n )\n )\n\n def load():\n result = {}\n for job in jobs:\n npzfile = np.load(job.job_id)\n for f in npzfile.files:\n result[f] = npzfile[f]\n return result\n\n key = \",\".join(\n [\n self.gr_to_draw.name,\n self.region_strategy.name,\n self.smoothing_strategy.name,\n \",\".join(list(sorted([x.name for x in self.lanes_to_draw]))),\n ]\n )\n return ppg.AttributeLoadingJob(\n key + \"_load\", self, \"raw_data_\", load\n ).depends_on(jobs)\n\n def do_calc_regions(self):\n self.regions_ = self.region_strategy.calc(self.gr_to_draw)\n return self.regions_\n\n def do_calc_raw_data(self, lane):\n if not hasattr(self, \"raw_data_\"):\n self.raw_data_ = {}\n lane_raw_data = self.smoothing_strategy.calc(self.regions_, lane)\n self.raw_data_[lane.name] = lane_raw_data\n return lane_raw_data\n\n\nclass _HeatmapPlot:\n \"\"\"This class encapsulates the heatmap parts that are specific to each rendering.\n The common parts such as raw data generation (and the user interface) are in Heatmap\"\"\"\n\n def __init__(\n self,\n heatmap,\n output_filename,\n normalization_strategy,\n order_strategy,\n plot_strategy,\n names,\n plot_options,\n ):\n \"\"\"See Heatmap.plot for details\"\"\"\n self.heatmap = heatmap\n self.output_filename = Path(output_filename)\n self.output_filename.parent.mkdir(exist_ok=True, parents=True)\n self.name = \"HeatmapPlot\" + hashlib.md5(output_filename.encode()).hexdigest()\n ppg.util.assert_uniqueness_of_object(self)\n del self.name # only used for uniqueness check...\n self.cache_dir = Path(\"cache\") / \"ChipseqHeatmap\" / self.output_filename.name\n self.cache_dir.mkdir(exist_ok=True, parents=True)\n\n self.normalization_strategy = normalization_strategy\n self.order_strategy = order_strategy\n self.plot_strategy = plot_strategy\n self.names = names\n self.plot_options = plot_options\n\n def __call__(self):\n norm_job = self.calc_norm_data()\n order_job = self.calc_order()\n names_in_order = [\n self.handle_name(self.names, x, ii)\n for (ii, x) in enumerate(self.heatmap.lanes_to_draw)\n ]\n\n def plot():\n p = self.do_plot()\n self.plot_strategy.render(self.output_filename, p)\n\n plot_job = ppg.FileGeneratingJob(self.output_filename, plot)\n plot_job.ignore_code_changes()\n plot_job.depends_on(norm_job)\n plot_job.depends_on(order_job)\n plot_job.depends_on(\n ppg.FunctionInvariant(\n \"genomics.regions._HeatmapPlot.do_plot\", _HeatmapPlot.do_plot\n )\n )\n plot_job.depends_on(\n ppg.FunctionInvariant(\n \"genomics.regions.heatmap.\" + self.plot_strategy.name + \"plot_func\",\n self.plot_strategy.__class__.plot,\n )\n )\n plot_job.depends_on(\n ppg.FunctionInvariant(\n \"genomics.regions.heatmap.\" + self.plot_strategy.name + \"render_func\",\n self.plot_strategy.__class__.render,\n )\n )\n plot_job.depends_on(self.heatmap.gr_to_draw.load())\n plot_job.depends_on(\n ppg.ParameterInvariant(\n self.output_filename,\n self.plot_strategy.get_parameters(\n self.plot_options, self.heatmap.lanes_to_draw\n )\n + (names_in_order,),\n )\n )\n plot_job.depends_on(\n self.plot_strategy.get_dependencies(self, self.plot_options)\n )\n if hasattr(self.names, \"__call__\"):\n plot_job.depends_on(\n ppg.FunctionInvariant(self.output_filename + \"_names\", self.names)\n )\n\n def handle_name(self, names, aligned_lane, lane_no):\n if names is None:\n return aligned_lane.name\n elif names is \"short\":\n return aligned_lane.short_name\n elif isinstance(names, list):\n return names[lane_no]\n elif isinstance(names, dict):\n return names.get(aligned_lane, aligned_lane.name)\n elif hasattr(names, \"__call__\"):\n return names(aligned_lane)\n else:\n raise ValueError(\"Invalid parameter for names: %s\" % (names,))\n\n def calc_norm_data(self):\n def calc():\n \"\"\"Normalized data is a dictionary: lane_name: 2d matrix\"\"\"\n return self.do_calc_norm_data()\n\n of = self.cache_dir / \"norm_data\"\n return ppg.AttributeLoadingJob(of, self, \"norm_data_\", calc).depends_on(\n [\n ppg.ParameterInvariant(of, (self.normalization_strategy.name,)),\n self.heatmap.calc_raw_data(),\n ppg.FunctionInvariant(\n \"genomics.regions.heatmap.\"\n + self.normalization_strategy.name\n + \"calc_func\",\n self.normalization_strategy.__class__.calc,\n ),\n ]\n + self.normalization_strategy.get_dependencies(self.heatmap.lanes_to_draw)\n )\n\n def calc_order(self):\n def calc():\n return self.do_calc_order()\n\n of = self.cache_dir / \"order\"\n deps = self.order_strategy.get_dependencies(\n self.heatmap.gr_to_draw, self.heatmap.lanes_to_draw\n )\n if len(deps) == 2:\n order_deps, order_params = deps\n order_func = None\n else:\n order_deps, order_params, order_func = deps\n\n return ppg.CachedAttributeLoadingJob(of, self, \"order_\", calc).depends_on(\n [\n self.heatmap.calc_raw_data(),\n self.calc_norm_data(),\n ppg.ParameterInvariant(of, (self.order_strategy.name,) + order_params),\n ppg.FunctionInvariant(\n of.name + \"_secondary_func\", order_func\n ),\n ppg.FunctionInvariant(\n \"genomics.regions.heatmap.\"\n + self.order_strategy.name\n + \"calc_func\",\n self.order_strategy.__class__.calc,\n ),\n ]\n + order_deps\n )\n\n def do_calc_norm_data(self):\n self.norm_data_ = self.normalization_strategy.calc(\n self.heatmap.lanes_to_draw, self.heatmap.raw_data_.copy()\n )\n return self.norm_data_\n\n def do_calc_order(self):\n self.order_ = self.order_strategy.calc( # remember, the order is a tuple, 0 => index order to plot, 1 -> optional cluster number\n self.heatmap.gr_to_draw,\n self.heatmap.lanes_to_draw,\n self.heatmap.raw_data_.copy(),\n self.norm_data_,\n )\n if not isinstance(self.order_, tuple):\n raise ValueError(\n \"Invalid self.order value, should have been a tuple, was: %s\"\n % (self.order_,)\n )\n return self.order_\n\n def do_plot(self):\n \"\"\"Return the prepared plot object, ready for rendering\"\"\"\n names_in_order = [\n self.handle_name(self.names, x, ii)\n for (ii, x) in enumerate(self.heatmap.lanes_to_draw)\n ]\n return self.plot_strategy.plot(\n self.heatmap.gr_to_draw,\n self.heatmap.lanes_to_draw,\n self.heatmap.raw_data_,\n self.norm_data_,\n self.order_,\n names_in_order,\n self.plot_options,\n )\n", "id": "5182939", "language": "Python", "matching_score": 3.896005153656006, "max_stars_count": 0, "path": "src/mbf_heatmap/chipseq/__init__.py" }, { "content": "import numpy as np\nimport pypipegraph as ppg\nimport pandas as pd\nfrom .norm import _apply_tpm\n\n\nclass _Order:\n \"\"\"Baseclass for all Orders.\n An order returns an iloc-list to re-sort (from top to bottom!)\n and a cluster association (in the original order I believe)\n \"\"\"\n\n\nclass AsIs(_Order):\n \"\"\"Take the order as it was in the regions passed to Heatmap\"\"\"\n\n name = \"Order_As_Is\"\n\n def calc(self, gr, lanes, raw_data, norm_data):\n any_one = next(iter(raw_data.values()))\n return (list(range(0, any_one.shape[0])), None)\n\n def get_dependencies(self, gr, lanes):\n return [gr.load()], (None,)\n\n\nclass _IthLane(_Order):\n \"\"\"Order by the ith lane and and arbitrary function\"\"\"\n\n def __init__(self, i, func, func_name):\n \"\"\"i may be either an int or a lane object\"\"\"\n self.i = i\n self.func = func\n try:\n self.name = \"IthLane_%i_%s\" % (i, func_name)\n except TypeError: # raised on non integer...\n self.name = \"IthLane_%s_%s\" % (i.name, func_name)\n\n def calc(self, gr, lanes, raw_data, norm_data):\n \"\"\"Returns the indices of the order, and an (optional) cluster number - None for no clustering\"\"\"\n if hasattr(self.i, \"name\"):\n lane_name = self.i.name\n else:\n if isinstance(self.i, int):\n if isinstance(lanes, dict):\n lane_name = list(lanes.values())[self.i].name\n else:\n lane_name = list(lanes)[self.i].name\n else:\n lane_name = lanes[self.i].name\n values = raw_data[lane_name]\n sums = self.func(values)\n print(\"sums\", sums)\n return (np.argsort(sums), None)\n\n def get_dependencies(self, gr, lanes):\n if isinstance(self.i, int):\n params = (lanes[self.i].name,)\n else:\n params = ()\n return [gr.load()], params\n\n\ndef FirstLaneSum():\n \"\"\"Order by the sum of the signal in the very first lane.\n Identical to IthLaneSum(0)\n\n \"\"\"\n return IthLaneSum(0)\n\n\nclass IthLaneSum(_IthLane):\n \"\"\"Order by the sum(signal strength) in the ith lane.\n @i may be either an integer index into the list of lanes_to_draw\n or or a AlignedLane object (which needs to be in lanes_to_draw!)\n\n Returns the indices of the order, and an (optional) cluster number - None for no clustering\n \"\"\"\n\n def __init__(self, i):\n _IthLane.__init__(self, i, lambda values: values.sum(axis=1), \"sum\")\n\n\nclass IthLaneMax(_IthLane):\n \"\"\"Order by the max(signal strength) in the ith lane.\n @i may be either an integer index into the list of lanes_to_draw\n or or a AlignedLane object (which needs to be in lanes_to_draw!)\n Returns the indices of the order, and an (optional) cluster number - None for no clustering\n \"\"\"\n\n def __init__(self, i):\n _IthLane.__init__(self, i, lambda values: values.max(axis=1), \"max\")\n\n\nclass ByAnnotator(_Order):\n \"\"\"Order the regions by a (numeric) annotator either low to high (ascending) or\n reverse\n\n You can transform the annotator's values first by passing in a func.\n which get's called with the values of the annotator column.\n\n Annotator may be anything accepted by mbf_genomics.util.parse_a_or_c\n\n \"\"\"\n\n def __init__(self, annotator_to_order_by, ascending=True, func=None, name=None):\n import mbf_genomics.util\n\n self.annotator, self.column = mbf_genomics.util.parse_a_or_c(\n annotator_to_order_by\n )\n self.func = func\n self.ascending = ascending\n if name:\n self.name = name\n else:\n self.name = \"OrderByAnnotator_%s\" % (self.column)\n\n def calc(self, gr, lanes, raw_data, norm_data):\n \"\"\"Returns the indices of the order when sorted by annotator\"\"\"\n df_sorted = gr.df\n if self.func is not None:\n df_sorted[\"sortme\"] = self.func(df_sorted[self.column].values)\n df_sorted = df_sorted.sort_values(\"sortme\", ascending=self.ascending)\n else:\n df_sorted = df_sorted.sort_values(self.column, ascending=self.ascending)\n return (df_sorted.index.values, None)\n\n def get_dependencies(self, gr, lanes):\n return (\n [gr.load(), gr.add_annotator(self.annotator)],\n (gr.name,),\n self.func,\n )\n\n\nclass _OrderKMeans(_Order):\n def do_cluster(\n self, for_clustering, gr, lanes, raw_data, norm_data, seed=(1000, 2000)\n ):\n import scipy.cluster.vq\n import random\n\n random.seed(seed)\n np.random.seed(seed)\n for_clustering_whitened = scipy.cluster.vq.whiten(for_clustering)\n del for_clustering\n if self.no_of_clusters is None:\n no_of_clusters_to_use = 2 ** len(lanes)\n else:\n no_of_clusters_to_use = self.no_of_clusters\n while no_of_clusters_to_use > 0: # retry with fewer clusters if it\n try:\n codebook, distortion = scipy.cluster.vq.kmeans(\n for_clustering_whitened, no_of_clusters_to_use\n )\n labels, distortion = scipy.cluster.vq.vq(\n for_clustering_whitened, codebook\n )\n break\n except np.linalg.linalg.LinAlgError:\n no_of_clusters_to_use -= 1\n del for_clustering_whitened\n\n first_lane_region_intensity_sum = norm_data[\n lanes[self.lane_to_sort_by].name\n ].max(axis=1)\n region_no_to_cluster = {}\n tuples_for_sorting = []\n for region_no, cluster_id in enumerate(labels):\n region_no_to_cluster[region_no] = cluster_id\n tuples_for_sorting.append(\n (cluster_id, first_lane_region_intensity_sum[region_no], region_no)\n )\n tuples_for_sorting.sort(reverse=False)\n region_order = np.zeros(len(tuples_for_sorting), dtype=np.uint32)\n cluster_id_in_order = []\n for yy_value, tup in enumerate(tuples_for_sorting):\n cluster_id, donotcare, region_no = tup\n region_order[yy_value] = region_no\n cluster_id_in_order.append(cluster_id)\n return region_order, cluster_id_in_order\n\n def get_dependencies(self, gr, lanes):\n return (\n [\n gr.load(),\n ppg.FunctionInvariant(\n \"genomics.regions.heatmap._OrderKMeans.do_cluster\",\n self.__class__.do_cluster,\n ),\n ],\n (self.lane_to_sort_by,),\n )\n\n\nclass ClusterKMeans(_OrderKMeans):\n \"\"\"Order by clustering the regions across all lanes (or lanes_to_include),\n sort by lane_to_sort_by inside one cluster (based on sum of signal).\n \"\"\"\n\n def __init__(self, no_of_clusters=None, lane_to_sort_by=0, lanes_to_include=None):\n \"\"\"Within a cluster, sort by lane_to_sort_by sum normalized signal\"\"\"\n if no_of_clusters is not None and not isinstance(no_of_clusters, int):\n raise ValueError(\"Invalid no_of_clusters\")\n if not isinstance(lane_to_sort_by, int):\n raise ValueError(\"@lane_to_sort_by must be an integer\")\n if lanes_to_include is not None:\n lane_str = \",\".join([x.name for x in lanes_to_include])\n else:\n lane_str = \"None\"\n self.name = \"OrderClusterKMeans_%s_%s\" % (no_of_clusters, lane_str)\n self.no_of_clusters = no_of_clusters\n self.lane_to_sort_by = lane_to_sort_by\n self.lanes_to_include = lanes_to_include\n\n def calc(self, gr, lanes, raw_data, norm_data):\n row_count = raw_data[lanes[0].name].shape[0]\n if self.lanes_to_include is not None:\n cluster_lanes = self.lanes_to_include\n else:\n cluster_lanes = lanes\n for_clustering = np.empty((row_count, len(cluster_lanes)))\n for lane_no, lane in enumerate(cluster_lanes):\n lane_data = raw_data[lane.name]\n lane_data = lane_data.sum(axis=1)\n lane_data = lane_data - lane_data.min() # norm to 0..1\n lane_data = lane_data / lane_data.max()\n for_clustering[:, lane_no] = lane_data\n\n return self.do_cluster(for_clustering, gr, lanes, raw_data, norm_data)\n\n\nclass ClusterKMeans_ClusterSortedSignalCompatible(_OrderKMeans):\n \"\"\"A reimplementation of the oldschool ClusterSortedSignal plot clustering\"\"\"\n\n def __init__(self, no_of_clusters=None):\n if no_of_clusters is not None and not isinstance(no_of_clusters, int):\n raise ValueError(\"Invalid no_of_clusters\")\n self.name = \"OrderClusterKMeans_CSSC_%s\" % no_of_clusters\n self.lane_to_sort_by = 0\n self.no_of_clusters = no_of_clusters\n\n def calc(self, gr, lanes, raw_data, norm_data):\n row_count = raw_data[lanes[0].name].shape[0]\n vector_length = raw_data[lanes[0].name].shape[1]\n lane_count = len(lanes)\n for_clustering = np.empty((row_count, vector_length * lane_count))\n for lane_no, lane in enumerate(lanes):\n lane_data = raw_data[lane.name]\n offset = vector_length * lane_no\n for_clustering[:, offset : offset + vector_length] = lane_data\n return self.do_cluster(for_clustering, gr, lanes, raw_data, norm_data)\n\n\nclass Combinatorics(_Order):\n \"\"\"A heatmap that ranks the conditions in each peak,\n than orders by the rank tuples (so first all peaks where condition 0 is the strongest,\n within that first all where condition 1 is the 2nd strongest, and so on)\n Experimental, usefulness in question\n \"\"\"\n\n name = \"OrderCombinatorics\"\n\n def calc(self, gr, lanes, raw_data, norm_data):\n _apply_tpm(lanes, raw_data)\n for_clustering = {}\n for lane_no, lane in enumerate(lanes):\n lane_data = raw_data[lane.name].sum(axis=1)\n lane_data -= lane_data.min() # so each lane is from 0..1\n lane_data /= lane_data.max()\n for_clustering[lane.name] = lane_data / len(\n lanes\n ) # make them go from 0.. 1/lane_count\n df = pd.DataFrame(for_clustering)\n df += df.rank(\n axis=1\n ) # Ranks start at 1. So most siginficant digits are the ranks, then within one rank, sort by intensity of the respective lane...\n df = df.sort_values(list(df.columns[:-1]))\n order = list(df.index)\n dots = []\n last_key = False\n current_dot = 0\n for ii, pos in enumerate(order):\n key = tuple(df.iloc[pos].astype(int))\n if last_key is None or (key != last_key):\n if current_dot:\n current_dot = 0\n else:\n current_dot = 1\n last_key = key\n dots.append(current_dot)\n return (order, dots)\n\n def get_dependencies(self, gr, lanes):\n return [gr.load()], (None,)\n\n\nclass StealFromOtherHeatmapPlot(_Order):\n \"\"\"Copy the ordering from another heatmap.plot() call\"\"\"\n\n def __init__(self, other_heatmap_plot):\n from . import _HeatmapPlot\n\n self.name = \"StealFromOtherHeatmap\" + other_heatmap_plot.heatmap.gr_to_draw.name\n if not isinstance(other_heatmap_plot, _HeatmapPlot):\n raise ValueError(\n \"@other_heatmap_plot must be thue result of a Heatmap.plot call\"\n )\n self.other_heatmap_plot = other_heatmap_plot\n\n def calc(self, gr, lanes, raw_data, norm_data):\n return self.other_heatmap_plot.order_\n\n def get_dependencies(self, gr, lanes):\n return [gr.load(), self.other_heatmap_plot.calc_order()], (None,)\n\n\nclass OverlappingClustersBySignalSumIthLane(_Order):\n \"\"\"\n Create a cluster that overlaps with your gr_to_overlap and a cluster that does not overlap. And sort each cluster individually by the\n \"\"\"\n\n def __init__(self, gr_to_overlap, lane_to_sort_by):\n if not isinstance(lane_to_sort_by, int):\n raise ValueError(\"@lane_to_sort_by must be an integer\")\n self.name = \"OrderOverlappingClustersBySignalSum_%s_th_lane\" % str(\n lane_to_sort_by\n )\n self.lane_to_sort_by = lane_to_sort_by\n self.gr_to_overlap = gr_to_overlap\n\n def calc(self, gr, lanes, raw_data, norm_data):\n lane_name = lanes[self.lane_to_sort_by].name\n values = raw_data[lane_name]\n sums = values.sum(axis=1)\n out_list = []\n\n for index_row, my_sum in zip(gr.df[[\"chr\", \"start\", \"stop\"]].iterrows(), sums):\n if self.gr_to_overlap.has_overlapping(\n index_row[1][\"chr\"], index_row[1][\"start\"], index_row[1][\"stop\"]\n ):\n cluster_id = 1\n else:\n cluster_id = 0\n out_list.append(\n (cluster_id, my_sum, index_row[0])\n ) # index_row[0] is needed to identify the orignal row in the GenomicRegion the values belong to\n sorted_tuples = sorted(out_list, reverse=False)\n \"\"\"\n eine calc(self, gr, lanes, raw_data, norm_data), und gibt zurueck: [ Reihenfolge in der die Regions aus GR gemalt werden sollen, Cluster-no], und jede clusterno wird nachher eine farbe\n \"\"\"\n # first_lane_region_intensity_sum = norm_data[lanes[self.lane_to_sort_by].name].max(axis=1)\n\n return ([x[2] for x in sorted_tuples], [x[0] for x in sorted_tuples])\n\n def get_dependencies(self, gr, lanes):\n return (\n [\n gr.load(),\n gr.build_intervals(),\n self.gr_to_overlap.load(),\n self.gr_to_overlap.build_intervals(),\n ],\n (None,),\n )\n", "id": "4380699", "language": "Python", "matching_score": 2.912869691848755, "max_stars_count": 0, "path": "src/mbf_heatmap/chipseq/order.py" }, { "content": "import numpy as np\nimport pandas as pd\n\n\ndef _apply_tpm(lanes_to_draw, raw_data):\n \"\"\"Convert read counts in raw_data into TPMs - in situ\"\"\"\n for lane in lanes_to_draw:\n norm_factor = 1e6 / lane.mapped_reads()\n raw_data[lane.name] = raw_data[lane.name] * norm_factor\n\n\nclass _Normalization:\n \"\"\"Base class for all normalizations\"\"\"\n\n pass\n\n\nclass AsIs(_Normalization):\n name = \"NormRaw\"\n\n def get_dependencies(self, lanes_to_draw):\n return []\n\n def calc(self, lanes_to_draw, raw_data):\n return raw_data\n\n\nclass NormLaneTPM(_Normalization):\n \"\"\"Normalize to TPM based on lane.get_aligned_read_count\"\"\"\n\n name = \"Norm_Lane_TPM\"\n\n def get_dependencies(self, lanes_to_draw):\n return [x.load() for x in lanes_to_draw]\n\n def calc(self, lanes_to_draw, raw_data):\n _apply_tpm(lanes_to_draw, raw_data)\n return raw_data\n\n\nclass NormLaneTPMInterpolate(_Normalization):\n \"\"\"Normalize to TPM based on lane.get_aligned_read_count, then reduce data by interpolation (for large regions)\"\"\"\n\n def __init__(self, samples_per_region=100):\n self.name = \"Norm_Lane_TPM_interpolated_%i\" % samples_per_region\n self.samples_per_region = samples_per_region\n\n def get_dependencies(self, lanes_to_draw):\n return [x.load() for x in lanes_to_draw]\n\n def calc(self, lanes_to_draw, raw_data):\n for lane in lanes_to_draw:\n _apply_tpm(lanes_to_draw, raw_data)\n cv = raw_data[lane.name]\n new_rows = []\n for row_no in range(0, cv.shape[0]):\n row = cv[row_no]\n interp = np.interp(\n [\n len(row) / float(self.samples_per_region) * ii\n for ii in range(0, self.samples_per_region)\n ],\n range(0, len(row)),\n row,\n )\n new_rows.append(interp)\n raw_data[lane.name] = np.array(new_rows)\n return raw_data\n\n\nclass NormLaneMax(_Normalization):\n \"\"\"Normalize to the maximum value of the regions in each lane\"\"\"\n\n def __init__(self):\n self.name = \"NormLaneMax\"\n\n def get_dependencies(self, lanes_to_draw):\n return [x.load() for x in lanes_to_draw]\n\n def calc(self, lanes_to_draw, raw_data):\n for lane in lanes_to_draw:\n norm_factor = 1.0 / raw_data[lane.name].max()\n raw_data[lane.name] *= norm_factor\n return raw_data\n\n\nclass NormLaneMaxLog2(_Normalization):\n \"\"\"Normalize to the maximum value of the regions in each lane, then log2\"\"\"\n\n def __init__(self):\n self.name = \"NormLaneMaxLog2\"\n\n def get_dependencies(self, lanes_to_draw):\n return []\n\n def calc(self, lanes_to_draw, raw_data):\n for lane in lanes_to_draw:\n norm_factor = 1.0 / raw_data[lane.name].max()\n raw_data[lane.name] *= norm_factor\n raw_data[lane.name] = np.log2(raw_data[lane.name] + 1)\n return raw_data\n\n\nclass NormPerPeak(_Normalization):\n \"\"\"Highest value in each peak is 1, lowest is 0\"\"\"\n\n name = \"Norm_PerPeak\"\n\n def get_dependencies(self, lanes_to_draw):\n return [x.load() for x in lanes_to_draw]\n\n def calc(self, lanes_to_draw, raw_data):\n for lane in lanes_to_draw:\n data = raw_data[lane.name]\n minimum = data.min(axis=1)\n maximum = data.max(axis=1)\n data = data.transpose()\n data = data - minimum # start from 0\n data = data / (maximum - minimum) # norm to 0..1\n data = data.transpose()\n raw_data[lane.name] = data\n return raw_data\n\n\nclass NormPerRow(_Normalization):\n \"\"\"Highest value in each row (ie in each peak across samples is 1, lowest is 0\"\"\"\n\n name = \"NormPerRow\"\n\n def get_dependencies(self, lanes_to_draw):\n return []\n\n def calc(self, lanes_to_draw, raw_data):\n maxima = {}\n minima = {}\n for lane in lanes_to_draw:\n maxima[lane.name] = raw_data[lane.name].max(axis=1)\n minima[lane.name] = raw_data[lane.name].min(axis=1)\n maxima = np.array(pd.DataFrame(maxima).max(axis=1))\n minima = np.array(pd.DataFrame(minima).max(axis=1))\n\n for lane in lanes_to_draw:\n data = raw_data[lane.name]\n data = data.transpose()\n data = data - minima # start from 0\n data = data / (maxima - minima) # norm to 0..1\n data = data.transpose()\n raw_data[lane.name] = data\n return raw_data\n\n\nclass NormPerRowTPM(_Normalization):\n \"\"\"Highest value in each row (ie in each peak across samples is 1, lowest is 0),\n lanes are first converted to TPMs based on lane.get_aligned_read_count()\"\"\"\n\n name = \"NormPerRowTPM\"\n\n def get_dependencies(self, lanes_to_draw):\n return [x.load() for x in lanes_to_draw]\n\n def calc(self, lanes_to_draw, raw_data):\n _apply_tpm(lanes_to_draw, raw_data)\n maxima = {}\n minima = {}\n for lane in lanes_to_draw:\n maxima[lane.name] = raw_data[lane.name].max(axis=1)\n minima[lane.name] = raw_data[lane.name].min(axis=1)\n maxima = np.array(pd.DataFrame(maxima).max(axis=1))\n minima = np.array(pd.DataFrame(minima).max(axis=1))\n\n for lane in lanes_to_draw:\n data = raw_data[lane.name]\n data = data.transpose()\n data = data - minima # start from 0\n data = data / (maxima - minima) # norm to 0..1\n data = data.transpose()\n raw_data[lane.name] = data\n return raw_data\n\n\nclass NormLaneQuantile(_Normalization):\n \"\"\"Normalize so that everything above the quantile is max\n Start high, with 0.99 for example, when trying different values\n \"\"\"\n\n def __init__(self, quantile):\n self.quantile = quantile\n self.name = \"NormLaneQuantile_%s\" % quantile\n\n def calc(self, lanes_to_draw, raw_data):\n _apply_tpm(lanes_to_draw, raw_data)\n for lane in lanes_to_draw:\n data = raw_data[lane.name]\n q = np.percentile(data, self.quantile * 100)\n data[data > q] = q\n raw_data[lane.name] = data\n return raw_data\n\n def get_dependencies(self, lanes_to_draw):\n return [x.load() for x in lanes_to_draw]\n\n\nclass NormLaneQuantileIthLane(_Normalization):\n \"\"\"Normalize TPM so that everything above the quantile is max\n But only use the quantile from the Ith Lane\n Start high, with 0.99 for example, when trying different values\n \"\"\"\n\n def __init__(self, quantile, ith_lane):\n self.quantile = quantile\n self.name = \"NormLaneQuantileIthLane_%i_%s\" % (ith_lane, quantile)\n self.ith = ith_lane\n\n def calc(self, lanes_to_draw, raw_data):\n _apply_tpm(lanes_to_draw, raw_data)\n import pickle\n\n with open(\"debug.dat\", \"wb\") as op:\n pickle.dump(raw_data[lanes_to_draw[self.ith].name], op)\n q = np.percentile(raw_data[lanes_to_draw[self.ith].name], self.quantile * 100)\n for lane in lanes_to_draw:\n data = raw_data[lane.name]\n data[data > q] = q\n raw_data[lane.name] = data\n return raw_data\n\n def get_dependencies(self, lanes_to_draw):\n return [x.load() for x in lanes_to_draw]\n", "id": "11933407", "language": "Python", "matching_score": 1.135575532913208, "max_stars_count": 0, "path": "src/mbf_heatmap/chipseq/norm.py" }, { "content": "import pypipegraph as ppg\nimport subprocess\nimport pandas as pd\nfrom pathlib import Path\nimport abc\nimport dppd\n\ndp, X = dppd.dppd()\nfrom mbf_qualitycontrol import register_qc, QCCollectingJob\n\n\nclass _PostProcessor(abc.ABC):\n \"\"\"Postprocess an AlignedSample into a new AlignedSample\"\"\"\n\n @abc.abstractmethod\n def process(self, input_bam_name, output_bam_name, result_dir):\n pass # pragma: no cover\n\n def further_jobs(self, new_lane, parent_lane):\n \"\"\"further jobs beyond the processing - But not qc, do that in register_qc\"\"\"\n pass # pragma: no cover\n\n @abc.abstractmethod\n def register_qc(self, new_lane):\n pass # pragma: no cover\n\n def get_dependencies(self):\n return [ppg.FunctionInvariant(self.name + \"_post_process\", self.process)]\n\n def get_parameters(self):\n return ()\n\n def get_vid(self, source_vid): # pragma: no cover\n return source_vid\n\n\nclass SubtractOtherLane(_PostProcessor):\n \"\"\"Subtract all reads (by name) matching in other_alignment.\n Probably only useful for single end data.\n \"\"\"\n\n def __init__(self, other_alignment):\n self.other_alignment = other_alignment\n self.name = \"_minus_\" + other_alignment.name\n self.result_folder_name = \"subtracted\"\n\n def process(self, input_bam_name, output_bam_name, result_dir):\n import mbf_bam\n\n mbf_bam.subtract_bam(\n str(output_bam_name),\n str(input_bam_name),\n str(self.other_alignment.get_bam_names()[0]),\n )\n\n def get_dependencies(self):\n import mbf_bam\n\n return super().get_dependencies() + [\n self.other_alignment.load(),\n ppg.ParameterInvariant(\n \"SubtractOtherLane.mbf_bam.version\", mbf_bam.__version__\n ),\n ]\n\n def get_vid(self, source_vid):\n if source_vid == self.other_alignment.vid:\n vid = source_vid\n else:\n vid = [source_vid, \"-\", self.other_alignment.vid]\n return vid\n\n def further_jobs(self, new_lane, parent_lane):\n def write_delta(of):\n was = parent_lane.mapped_reads()\n now = new_lane.mapped_reads()\n delta = was - now\n Path(of).write_text(\n f\"Subtracted {self.other_alignment.name} from {parent_lane.name}.\\nLost {delta} reads of {was} ({delta / was * 100:.2f}%)\"\n )\n\n delta_job = ppg.FileGeneratingJob(\n new_lane.result_dir / \"subtract_delta.txt\", write_delta\n ).depends_on(new_lane.load())\n return [delta_job]\n\n def register_qc(self, new_lane):\n \"\"\"Plot for to see how much you lost.\n\n \"\"\"\n output_filename = (\n new_lane.result_dir / \"..\" / \"alignment_substract.png\"\n ).resolve()\n print(output_filename)\n\n def calc_and_plot(output_filename, lanes):\n parts = []\n for lane in lanes:\n was = lane.parent.mapped_reads()\n now = lane.mapped_reads()\n lost = was - now\n parts.append(\n pd.DataFrame(\n {\n \"what\": [\"kept\", \"lost\"],\n \"count\": [now, lost],\n \"sample\": lane.name,\n }\n )\n )\n df = pd.concat(parts)\n return (\n dp(df)\n .categorize(\"what\", [\"lost\", \"kept\"])\n .p9()\n .theme_bw()\n .annotation_stripes()\n .add_bar(\n \"sample\", \"count\", fill=\"what\", position=\"stack\", stat=\"identity\"\n )\n .title(lanes[0].genome.name + \" substraction\")\n .turn_x_axis_labels()\n .scale_y_continuous(labels=lambda xs: [\"%.2g\" % x for x in xs])\n .render_args(width=len(parts) * 0.2 + 1, height=5)\n .render(output_filename)\n )\n\n return register_qc(\n QCCollectingJob(output_filename, calc_and_plot)\n .depends_on(new_lane.load())\n .add(new_lane)\n ) # since everybody says self.load, we get them all\n\n\nclass UmiTools_Dedup(_PostProcessor):\n def __init__(self, method=\"directional\"):\n self.method = method\n allowed_methods = (\n \"unique\",\n \"percentile\",\n \",cluster\",\n \"adjacency\",\n \"directional\",\n )\n if method not in allowed_methods:\n raise ValueError(f\"Method not in allowed methods '{allowed_methods}'\")\n self.name = f\"UMI-tools_dedup-{method}\"\n self.result_folder_name = self.name\n\n def process(self, input_bam_name, output_bam_name, result_dir):\n cmd = [\n \"umi_tools\",\n \"dedup\",\n \"-I\",\n str(input_bam_name.absolute().resolve()),\n \"-S\",\n str(output_bam_name.absolute().resolve()),\n \"-L\",\n str((result_dir / \"umi_tools.log\").absolute().resolve()),\n \"--output-stats\",\n str(result_dir / \"umi_tools_stats\"),\n \"--method\",\n self.method,\n ]\n import umi_tools.dedup\n\n # we are running umitools within the slave process\n # no real need to fork, and\n # I couldn't get them to die on 'interactive abort'...\n umi_tools.dedup.main(cmd)\n\n def register_qc(self, new_lane):\n pass # pragma: no cover\n\n def get_version(self):\n return (\n subprocess.check_output([\"umi_tools\", \"--version\"]).decode(\"utf-8\").strip()\n )\n\n def get_parameters(self):\n return (self.get_version(),) # method is being taken care of by name\n\n\nclass AnnotateFastqBarcodes(_PostProcessor):\n \"\"\"Annotate cell and umi barcodes from _R1_ fastq files.\n ala dropseq\"\"\"\n\n def __init__(self, raw_lane, barcodes_to_slices):\n self.name = \"AnnotateCellAndUMI\"\n self.result_folder_name = self.name\n self.raw_lane = raw_lane\n self.barcodes_to_slices = [\n (x, y[0], y[1]) for (x, y) in barcodes_to_slices.items()\n ]\n for tag, start, end in self.barcodes_to_slices:\n if len(tag) != 2:\n raise ValueError(\"Tag must be two uppercase characters\")\n if tag.upper() != tag:\n raise ValueError(\"Tag must be two uppercase characters\")\n if (type(start) != int) or (type(end) != int):\n raise ValueError(\n f\"Indices must be exactly 2 integers - was {repr(start)}, {repr(end)}\"\n )\n if start >= end or start < 0 or end < 0:\n raise ValueError(\n \"Invalid index. Must be (start,end) with start < end. No python slicing.\"\n )\n\n def process(self, input_bam_name, output_bam_name, result_dir):\n fastq2_filenames = [x[0] for x in self.raw_lane.input_strategy()]\n import mbf_bam\n\n mbf_bam.annotate_barcodes_from_fastq(\n str(output_bam_name),\n str(input_bam_name),\n [str(x) for x in fastq2_filenames],\n self.barcodes_to_slices,\n )\n\n def register_qc(self, new_lane):\n pass # pragma: no cover\n", "id": "9033744", "language": "Python", "matching_score": 4.99050235748291, "max_stars_count": 0, "path": "src/mbf_align/post_process.py" }, { "content": "# forwards for compatibility with old chipseq code\n\nfrom .raw import Sample\nimport pypipegraph as ppg\nimport pysam\nfrom pathlib import Path\nimport pandas as pd\nimport collections\nfrom dppd import dppd\nimport dppd_plotnine # noqa:F401 -\nfrom mbf_qualitycontrol import register_qc, QCCollectingJob, qc_disabled\n\ndp, X = dppd()\n\n\nclass _ChromosomeMangledSamFile(pysam.Samfile):\n \"\"\"Wraps a samfile so that it understands targets that don't quite\n have the right name (eg. missing 'chr', additional 'chr' in front\n of chromosomes, etc.\n Usage:\n b = _ChromosomeMangledSamFile((\"my.bam\",'rb')\n b.chromosome_mangler = lambda x: 'chr' + x\n\n the chromosome mangler may return False, which means 'that's a legitimate region, but missing here - just act as if there are no reads on it'\n \"\"\"\n\n def parse_region(\n self,\n contig=None,\n start=None,\n stop=None,\n region=None,\n tid=None,\n reference=None,\n end=None,\n ):\n if not hasattr(self, \"chromosome_mangler\"):\n raise ValueError(\n \"You need to set a .chromosome_mangler on ChromosomeMangledSamFiles. Sorry about that - could not extend c__init__\"\n )\n if reference: # old name...\n contig = reference\n reference = None\n if end:\n stop = end\n end = None\n if contig:\n contig = self.chromosome_mangler(contig)\n print(\"new reference\", contig)\n if contig is False:\n raise ValueError(\n \"Chromosome mangler for %s returned a region not in the file\"\n % (self,)\n )\n return pysam.Samfile.parseRegion(\n self,\n contig=contig,\n start=start,\n stop=stop,\n region=region,\n tid=tid,\n reference=None,\n end=None,\n )\n\n\nclass _BamDerived:\n def _parse_alignment_job_input(self, alignment_job):\n if isinstance(alignment_job, (str, Path)):\n alignment_job = ppg.FileInvariant(alignment_job)\n if not isinstance(alignment_job, (ppg.FileInvariant, ppg.FileGeneratingJob)):\n raise ValueError(\n \"alignment_job must be a ppg.FileGeneratingJob or FileChecksumInvariant\"\n \"was %s\" % (type(alignment_job))\n )\n bam_name = None\n bai_name = None\n for fn in alignment_job.filenames:\n if str(fn).endswith(\".bam\"):\n if bam_name is None:\n bam_name = str(fn)\n else:\n raise ValueError(\n \"Job passed to AlignedSample had multiple .bam filenames\"\n )\n elif str(fn).endswith(\".bai\"):\n if bai_name is None:\n index_fn = str(fn)\n bai_name = index_fn\n else:\n raise ValueError(\n \"Job passed to AlignedSample had multiple .bai filenames\"\n )\n\n if bam_name is None:\n raise ValueError(\"Job passed to AlignedSample had no .bam filenames\")\n\n if isinstance(alignment_job, ppg.MultiFileGeneratingJob):\n if bai_name is None:\n index_fn = bam_name + \".bai\"\n index_job = ppg.FileGeneratingJob(\n index_fn, self._index(bam_name, index_fn)\n )\n index_job.depends_on(alignment_job)\n\n else:\n index_fn = bai_name\n index_job = alignment_job\n\n elif isinstance(alignment_job, ppg.FileGeneratingJob):\n index_fn = bam_name + \".bai\"\n index_job = ppg.FileGeneratingJob(index_fn, self._index(bam_name, index_fn))\n index_job.depends_on(alignment_job)\n elif isinstance(alignment_job, ppg.FileInvariant):\n index_fn = bam_name + \".bai\"\n if Path(index_fn).exists():\n index_job = ppg.FileInvariant(index_fn)\n else:\n cache_dir = Path(ppg.util.global_pipegraph.cache_folder) / \"bam_indices\"\n cache_dir.mkdir(exist_ok=True, parents=True)\n index_fn = cache_dir / (self.name + \"_\" + Path(bam_name).name + \".bai\")\n index_job = ppg.FileGeneratingJob(\n index_fn, self._index(bam_name, index_fn)\n )\n index_job.depends_on(alignment_job)\n else:\n raise NotImplementedError(\"Should not happe / covered by earlier if\")\n return alignment_job, index_job, Path(bam_name), Path(index_fn)\n\n def _index(self, input_fn, output_fn):\n def do_index():\n pysam.index(str(Path(input_fn).absolute()), str(Path(output_fn).absolute()))\n\n return do_index\n\n def __hash__(self):\n return hash(self.__class__.__name__ + self.name)\n\n def load(self):\n return self.alignment_job, self.index_job\n\n def get_bam(self):\n import multiprocessing\n\n mapper = getattr(self, \"chromosome_mapper\", None)\n if mapper is not None:\n r = _ChromosomeMangledSamFile(\n self.bam_filename,\n index_filename=str(self.index_filename),\n threads=multiprocessing.cpu_count(),\n )\n r.chromosome_mangler = self.chromosome_mapper\n else:\n r = pysam.Samfile(\n self.bam_filename,\n index_filename=str(self.index_filename),\n threads=multiprocessing.cpu_count(),\n )\n return r\n\n def get_bam_names(self):\n \"\"\"Retrieve the bam filename and index name as strings\"\"\"\n return (str(self.bam_filename), str(self.index_filename))\n\n\nclass AlignedSample(_BamDerived):\n def __init__(\n self,\n name,\n alignment_job,\n genome,\n is_paired,\n vid,\n result_dir=None,\n aligner=None,\n chromosome_mapper=None,\n ):\n \"\"\"\n Create an aligned sample from a BAM producing job.\n See Sample.align()\n\n Parameters:\n alignment_job: FileGeneratingJob, FileInvariant, str, pathlib.Path\n Where does the BAM come from?\n str and Path get's converted into a FileInvariant\n genome:\n an mbf_genomes.* Genome\n is_paired: bool\n whether this is a paired end sequencing run\n vid: str\n a unique, external sample management id\n chromosome_mapper: Option[function]\n A function mapping pipeline chromosomes to those used in the BAM\n \"\"\"\n\n self.name = name\n ppg.util.assert_uniqueness_of_object(self)\n (\n self.alignment_job,\n self.index_job,\n bam_name,\n index_fn,\n ) = self._parse_alignment_job_input(alignment_job)\n self.result_dir = (\n Path(result_dir)\n if result_dir\n else (Path(\"results\") / \"aligned\" / self.name)\n )\n self.result_dir.mkdir(exist_ok=True, parents=True)\n self.genome = genome\n self.is_paired = is_paired\n self.vid = vid\n self.bam_filename = bam_name\n self.index_filename = index_fn\n self.aligner = aligner\n self.register_qc()\n self.chromosome_mapper = chromosome_mapper\n\n def get_unique_aligned_bam(self):\n \"\"\"Deprecated compability with older pipeline\"\"\"\n return self.get_bam()\n\n def _parse_idxstat(self):\n by_chr = self.get_bam().get_index_statistics()\n mapped = 0\n unmapped = 0\n for record in by_chr:\n mapped += record.mapped\n unmapped += record.unmapped\n return mapped, unmapped\n\n def mapped_reads(self):\n \"\"\"How many mapped entrys are in the bam?\"\"\"\n return self._parse_idxstat()[0]\n\n def unmapped_reads(self):\n \"\"\"How many unmapped entrys are in the bam?\"\"\"\n return self._parse_idxstat()[1]\n\n def post_process(self, post_processor, new_name=None, result_dir=None):\n \"\"\"Postprocess this lane using a mbf_align.postprocess.*\n Ie. Turn a lane into a 'converted' lane.\n\n \"\"\"\n if new_name is None:\n new_name = self.name + \"_\" + post_processor.name\n if result_dir is None:\n result_dir = (\n self.result_dir\n / \"..\"\n / post_processor.result_folder_name\n / self.result_dir.name\n )\n result_dir = Path(result_dir)\n result_dir.mkdir(exist_ok=True, parents=True)\n bam_filename = result_dir / (new_name + \".bam\")\n\n def inner(output_filename):\n post_processor.process(\n Path(self.get_bam_names()[0]), Path(output_filename), result_dir\n )\n\n alignment_job = ppg.FileGeneratingJob(bam_filename, inner).depends_on(\n self.load(),\n post_processor.get_dependencies(),\n ppg.ParameterInvariant(bam_filename, post_processor.get_parameters()),\n )\n vid = post_processor.get_vid(self.vid)\n\n new_lane = AlignedSample(\n new_name,\n alignment_job,\n self.genome,\n self.is_paired,\n vid,\n result_dir=result_dir,\n )\n\n new_lane.post_processor_jobs = post_processor.further_jobs(new_lane, self)\n new_lane.parent = self\n new_lane.post_processor_qc_jobs = post_processor.register_qc(new_lane)\n return new_lane\n\n def to_fastq(self, output_filename, as_temp_file=False):\n \"\"\"Convert a (single end) bam back into a fastq\"\"\"\n if self.is_paired:\n raise ValueError(\n \"No support for -> fastq for paired end files at the the moment\"\n )\n\n def convert():\n import mbf_bam\n\n mbf_bam.bam_to_fastq(output_filename, self.get_bam_names()[0])\n\n if as_temp_file:\n cls = ppg.TempFileGeneratingJob\n else:\n cls = ppg.FileGeneratingJob\n return cls(output_filename, convert).depends_on(self.load())\n\n def get_alignment_stats(self):\n if self.aligner is not None and hasattr(self.aligner, \"get_alignment_stats\"):\n return self.aligner.get_alignment_stats(Path(self.bam_filename))\n else:\n with self.get_bam() as f:\n return {\"Mapped\": f.mapped, \"Unmapped\": f.unmapped}\n\n def register_qc(self):\n if not qc_disabled():\n self.register_qc_complexity()\n self.register_qc_gene_strandedness()\n self.register_qc_biotypes()\n self.register_qc_alignment_stats()\n self.register_qc_subchromosomal()\n self.register_qc_splicing()\n\n def register_qc_complexity(self):\n\n output_filename = self.result_dir / f\"{self.name}_complexity.png\"\n\n def calc():\n import mbf_bam\n\n counts = mbf_bam.calculate_duplicate_distribution(\n str(self.bam_filename), str(self.index_filename)\n )\n return pd.DataFrame(\n {\n \"source\": self.name,\n \"Repetition count\": list(counts.keys()),\n \"Count\": list(counts.values()),\n }\n )\n\n def plot(df):\n import numpy as np\n\n unique_count = df[\"Count\"].sum()\n total_count = (df[\"Count\"] * df[\"Repetition count\"]).sum()\n pcb = float(unique_count) / total_count\n if pcb >= 0.9: # pragma: no cover\n severity = \"none\"\n elif pcb >= 0.8: # pragma: no cover\n severity = \"mild\"\n elif pcb >= 0.5: # pragma: no cover\n severity = \"moderate\"\n else:\n severity = \"severe\"\n title = (\n \"Genomic positions with repetition count reads\\nTotal read count: %i\\nPCR Bottleneck coefficient: %.2f (%s)\"\n % (total_count, pcb, severity)\n )\n return (\n dp(df)\n .p9()\n .theme_bw()\n .add_point(\"Repetition count\", \"Count\")\n .add_line(\"Repetition count\", \"Count\")\n .scale_y_continuous(\n trans=\"log2\",\n breaks=[2 ** x for x in range(1, 24)],\n labels=lambda x: [\"2^%0.f\" % np.log(xs) for xs in x],\n )\n .title(title)\n .pd\n )\n\n return register_qc(\n ppg.PlotJob(output_filename, calc, plot)\n .depends_on(self.load())\n .use_cores(-1)\n )\n\n def register_qc_gene_strandedness(self): # noqa: C901\n from mbf_genomics.genes.anno_tag_counts import _IntervalStrategy\n\n class IntervalStrategyExonIntronClassification(_IntervalStrategy):\n \"\"\"For QC purposes, defines all intron/exon intervals tagged\n with nothing but intron/exon\n\n See mbf_align.lanes.AlignedLane.register_qc_gene_strandedness\n\n \"\"\"\n\n def _get_interval_tuples_by_chr(self, genome):\n from mbf_nested_intervals import IntervalSet\n\n coll = {chr: [] for chr in genome.get_chromosome_lengths()}\n for g in genome.genes.values():\n exons = g.exons_overlapping\n if len(exons[0]) == 0: # pragma: no cover\n exons = g.exons_merged\n for start, stop in zip(*exons):\n coll[g.chr].append(\n (start, stop, 0b0101 if g.strand == 1 else 0b0110)\n )\n for start, stop in zip(*g.introns_strict):\n coll[g.chr].append(\n (start, stop, 0b1001 if g.strand == 1 else 0b1010)\n )\n result = {}\n for chr, tups in coll.items():\n iset = IntervalSet.from_tuples_with_id(tups)\n # iset = iset.merge_split()\n iset = iset.merge_hull()\n if iset.any_overlapping():\n raise NotImplementedError(\"Should not be reached\")\n result[chr] = []\n for start, stop, ids in iset.to_tuples_with_id():\n ids = set(ids)\n if len(ids) == 1:\n id = list(ids)[0]\n if id == 0b0101:\n tag = \"exon\"\n strand = +1\n elif id == 0b0110:\n tag = \"exon\"\n strand = -1\n elif id == 0b1001:\n tag = \"intron\"\n strand = +1\n elif id == 0b1010:\n tag = \"intron\"\n strand = -1\n else: # pragma: no cover\n raise NotImplementedError(\"Should not be reached\")\n else:\n down = 0\n for i in ids:\n down |= i\n if down & 0b1100 == 0b1100:\n tag = \"both\"\n elif down & 0b0100 == 0b0100:\n tag = \"exon\"\n else: # pragma: no cover haven't observed this case in the wild yet.\n tag = \"intron\" # pragma: no cover # pragma: no cover # pragma: no cover haven't observed this case in the wild yet.\n if down & 0b11 == 0b11:\n tag += \"_undecidable\"\n strand = (\n 1 # doesn't matter, but must be one or the other\n )\n elif down & 0b01:\n strand = 1\n else:\n strand -= 1\n\n result[chr].append((tag, strand, [start], [stop]))\n return result\n\n output_filename = self.result_dir / f\"{self.name}_strandedness.png\"\n\n def calc():\n from mbf_genomics.genes.anno_tag_counts import IntervalStrategyGene\n from mbf_bam import count_reads_stranded\n\n interval_strategy = IntervalStrategyExonIntronClassification()\n intervals = interval_strategy._get_interval_tuples_by_chr(self.genome)\n\n bam_filename, bam_index_name = self.get_bam_names()\n forward, reverse = count_reads_stranded(\n bam_filename,\n bam_index_name,\n intervals,\n IntervalStrategyGene()._get_interval_tuples_by_chr(self.genome),\n each_read_counts_once=True,\n )\n result = {\"what\": [], \"count\": [], \"sample\": self.name}\n for k in forward.keys() | reverse.keys():\n if k.endswith(\"_undecidable\"):\n result[\"what\"].append(k)\n result[\"count\"].append(forward.get(k, 0) + reverse.get(k, 0))\n elif not k.startswith(\"_\"):\n result[\"what\"].append(k + \"_correct\")\n result[\"count\"].append(forward.get(k, 0))\n result[\"what\"].append(k + \"_reversed\")\n result[\"count\"].append(reverse.get(k, 0))\n elif k == \"_outside\":\n result[\"what\"].append(\"outside\")\n result[\"count\"].append(forward.get(k, 0))\n\n return pd.DataFrame(result)\n\n def plot(df):\n return (\n dp(df)\n .mutate(\n what=pd.Categorical(\n df[\"what\"],\n [\n \"exon_correct\",\n \"exon_reversed\",\n \"exon_undecidable\",\n \"intron_correct\",\n \"intron_reversed\",\n \"intron_undecidable\",\n \"both_correct\",\n \"both_reversed\",\n \"both_undecidable\",\n \"outside\",\n ],\n )\n )\n .p9()\n .add_bar(\"sample\", \"count\", fill=\"what\", position=\"dodge\")\n .scale_y_continuous(labels=lambda xs: [\"%.2g\" % x for x in xs])\n .turn_x_axis_labels()\n .pd\n )\n\n return register_qc(\n ppg.PlotJob(output_filename, calc, plot)\n .depends_on(self.load())\n .use_cores(-1)\n )\n\n def register_qc_biotypes(self):\n output_filename = self.result_dir / f\"{self.name}_reads_per_biotype.png\"\n\n from mbf_genomics.genes import Genes\n from mbf_genomics.genes.anno_tag_counts import GeneUnstranded\n\n genes = Genes(self.genome)\n anno = GeneUnstranded(self)\n\n def plot(output_filename):\n print(genes.df.columns)\n return (\n dp(genes.df)\n .groupby(\"biotype\")\n .summarize((anno.columns[0], lambda x: x.sum(), \"read count\"))\n .mutate(sample=self.name)\n .p9()\n .theme_bw()\n .annotation_stripes()\n .add_bar(\"biotype\", \"read count\", stat=\"identity\")\n .scale_y_continuous(labels=lambda xs: [\"%.2g\" % x for x in xs])\n # .turn_x_axis_labels()\n .coord_flip()\n .title(self.name)\n .render(\n output_filename,\n width=6,\n height=2 + len(genes.df.biotype.unique()) * 0.25,\n )\n )\n\n return register_qc(\n ppg.FileGeneratingJob(output_filename, plot).depends_on(\n genes.add_annotator(anno)\n )\n )\n\n def register_qc_alignment_stats(self):\n output_filename = self.result_dir / \"..\" / \"alignment_statistics.png\"\n\n def calc_and_plot(output_filename, lanes):\n parts = []\n for lane in lanes:\n p = lane.get_alignment_stats()\n parts.append(\n pd.DataFrame(\n {\n \"what\": list(p.keys()),\n \"count\": list(p.values()),\n \"sample\": lane.name,\n }\n )\n )\n df = pd.concat(parts)\n order = sorted(df[\"what\"].unique())\n umrn = \"Uniquely mapped reads number\"\n if umrn in order:\n order = [x for x in order if x != umrn] + [umrn]\n return (\n dp(df)\n .categorize(\"what\", order)\n .p9()\n .theme_bw()\n .annotation_stripes()\n .add_bar(\n \"sample\", \"count\", fill=\"what\", position=\"stack\", stat=\"identity\"\n )\n .title(lanes[0].genome.name)\n .turn_x_axis_labels()\n .scale_y_continuous(labels=lambda xs: [\"%.2g\" % x for x in xs])\n .render_args(width=len(parts) * 0.2 + 1, height=5, limitsize=False)\n .render(output_filename)\n )\n\n return register_qc(\n QCCollectingJob(output_filename, calc_and_plot)\n .depends_on(self.load())\n .add(self)\n ) # since everybody says self.load, we get them all\n\n def register_qc_subchromosomal(self):\n \"\"\"Subchromosom distribution plot - good to detect amplified regions\n or ancient virus awakening\"\"\"\n import mbf_genomics\n\n output_filename = (\n self.result_dir / f\"{self.name}_subchromosomal_distribution.png\"\n )\n\n class IntervalStrategyWindows(\n mbf_genomics.genes.anno_tag_counts._IntervalStrategy\n ):\n \"\"\"For QC purposes, spawn all chromosomes with\n windows of the definied size\n\n See mbf_align.lanes.AlignedLane.register_qc_subchromosomal\n\n \"\"\"\n\n def __init__(self, window_size):\n self.window_size = window_size\n\n def _get_interval_tuples_by_chr(self, genome):\n result = {}\n for chr, length in genome.get_chromosome_lengths().items():\n result[chr] = []\n for ii in range(0, length, self.window_size):\n result[chr].append(\n (\"%s_%i\" % (chr, ii), 0, [ii], [ii + self.window_size])\n )\n return result\n\n def calc():\n from mbf_bam import count_reads_unstranded\n\n interval_strategy = IntervalStrategyWindows(250_000)\n intervals = interval_strategy._get_interval_tuples_by_chr(self.genome)\n\n bam_filename, bam_index_name = self.get_bam_names()\n counts = count_reads_unstranded(\n bam_filename,\n bam_index_name,\n intervals,\n intervals,\n each_read_counts_once=True,\n )\n true_chromosomes = set(self.genome.get_true_chromosomes())\n result = {\"chr\": [], \"window\": [], \"count\": []}\n for key, count in counts.items():\n if not key.startswith(\"_\"):\n # must handle both 2R_1234\n # and Unmapped_scaffold_29_D1705_1234\n *c, window = key.split(\"_\")\n chr = \"_\".join(c)\n if chr in true_chromosomes: # pragma: no branch\n window = int(window)\n result[\"chr\"].append(chr)\n result[\"window\"].append(window)\n result[\"count\"].append(count)\n return pd.DataFrame(result)\n\n def plot(df):\n import natsort\n\n df[\n \"count\"\n ] += 1 # so we don't crash in the log scale if all values are 0 for a chr\n return (\n dp(df)\n .categorize(\"chr\", natsort.natsorted(X[\"chr\"].unique()))\n .p9()\n .theme_bw()\n .add_line(\"window\", \"count\", _alpha=0.3)\n .scale_y_log10()\n .facet_wrap(\"chr\", scales=\"free\", ncol=1)\n .hide_x_axis_labels()\n .title(self.name)\n .render_args(\n width=6, height=2 + len(df[\"chr\"].unique()) * 1, limitsize=False\n )\n .pd\n )\n\n return register_qc(\n ppg.PlotJob(output_filename, calc, plot)\n .depends_on(self.load())\n .use_cores(-1)\n )\n\n def register_qc_splicing(self):\n \"\"\"How many reads were spliced? How many of those splices were known splice sites,\n how many were novel\"\"\"\n output_filename = self.result_dir / f\"{self.name}_splice_sites.png\"\n\n def calc():\n from mbf_bam import count_introns\n\n bam_filename, bam_index_name = self.get_bam_names()\n counts_per_chromosome = count_introns(bam_filename, bam_index_name)\n known_splice_sites_by_chr = {\n chr: set() for chr in self.genome.get_chromosome_lengths()\n }\n for gene in self.genome.genes.values():\n for start, stop in zip(*gene.introns_all):\n known_splice_sites_by_chr[gene.chr].add((start, stop))\n total_counts = collections.Counter()\n known_count = 0\n unknown_count = 0\n for chr, counts in counts_per_chromosome.items():\n for k, v in counts.items():\n if k[0] == 0xFFFFFFFF:\n intron_counts = 0xFFFFFFFF - k[1]\n total_counts[intron_counts] += v\n else:\n if k in known_splice_sites_by_chr[chr]:\n known_count += v\n else:\n unknown_count += v\n result = {\"side\": [], \"x\": [], \"count\": []}\n result[\"side\"].append(\"splice sites\")\n result[\"x\"].append(\"unknown\")\n result[\"count\"].append(unknown_count)\n result[\"side\"].append(\"splice sites\")\n result[\"x\"].append(\"known\")\n result[\"count\"].append(known_count)\n\n for x, count in total_counts.items():\n result[\"side\"].append(\"reads with x splices\")\n result[\"x\"].append(x)\n result[\"count\"].append(count)\n\n return pd.DataFrame(result)\n\n def plot(df):\n return (\n dp(df)\n .p9()\n .theme_bw()\n .add_bar(\"x\", \"count\", stat=\"identity\")\n .facet_wrap(\"side\", scales=\"free\", ncol=1)\n .scale_y_continuous(labels=lambda xs: [\"%.2g\" % x for x in xs])\n .title(self.name)\n .theme(panel_spacing_y=0.2)\n .render(output_filename)\n )\n\n return register_qc(\n ppg.PlotJob(output_filename, calc, plot)\n .depends_on(self.load())\n .use_cores(-1)\n )\n\n\n__all__ = [Sample, AlignedSample]\n", "id": "7039585", "language": "Python", "matching_score": 3.982595443725586, "max_stars_count": 0, "path": "src/mbf_align/lanes.py" }, { "content": "import pytest\nimport shutil\nfrom pathlib import Path\nimport pypipegraph as ppg\nimport mbf_align\nimport pysam\nfrom mbf_qualitycontrol.testing import assert_image_equal\nfrom mbf_sampledata import get_sample_data, get_sample_path\nfrom mbf_qualitycontrol import prune_qc, get_qc_jobs\n\n\nclass DummyGenome:\n name = \"Dummy_genome\"\n\n def job_genes(self):\n return None\n\n def job_transcripts(self):\n return None\n\n def download_genome(self):\n return []\n\n\[email protected](\"new_pipegraph_no_qc\")\nclass TestAligned:\n def test_from_existing_bam(self):\n bam_path = get_sample_data(Path(\"mbf_align/ex2.bam\"))\n bam_job = ppg.FileInvariant(bam_path)\n genome = object()\n lane = mbf_align.AlignedSample(\"test_lane\", bam_job, genome, False, \"AA123\")\n assert lane.name == \"test_lane\"\n assert lane.load()[0] is bam_job\n assert isinstance(lane.load()[1], ppg.FileInvariant)\n assert lane.genome is genome\n assert not lane.is_paired\n assert lane.vid == \"AA123\"\n\n with pytest.raises(ValueError):\n mbf_align.AlignedSample(\"test_lane\", bam_job, genome, False, \"AA123\")\n lane2 = mbf_align.AlignedSample(\"test_lane2\", bam_job, genome, True, \"AA123\")\n assert lane2.is_paired\n\n b = lane.get_bam()\n assert isinstance(b, pysam.Samfile)\n b = lane.get_unique_aligned_bam()\n assert isinstance(b, pysam.Samfile)\n assert lane.get_bam_names()[0] == bam_path\n assert lane.get_bam_names()[1] == bam_path + \".bai\"\n\n assert lane.mapped_reads() == 8\n assert lane.unmapped_reads() == 0\n for job in get_qc_jobs():\n assert job._pruned\n\n def test_lane_invariants_on_non_accepted_value(self):\n genome = object()\n with pytest.raises(ValueError):\n mbf_align.AlignedSample(\"test_lane\", 123, genome, False, \"AA123\")\n\n def test_lane_raises_on_multifilegeneratingJobWithTwoBams(self):\n mfg = ppg.MultiFileGeneratingJob([\"a.bam\", \"b.bam\"], lambda: 5)\n genome = object()\n with pytest.raises(ValueError):\n mbf_align.AlignedSample(\"test_lane\", mfg, genome, False, \"AA123\")\n\n def test_lane_raises_on_multifilegeneratingJobWithTwoBais(self):\n mfg = ppg.MultiFileGeneratingJob([\"a.bam\", \"a.bam.bai\", \"b.bam.bai\"], lambda: 5)\n genome = object()\n with pytest.raises(ValueError):\n mbf_align.AlignedSample(\"test_lane\", mfg, genome, False, \"AA123\")\n\n def test_lane_raises_on_multifilegeneratingJobWithNoBAM(self):\n mfg = ppg.MultiFileGeneratingJob([\"a.sam\"], lambda: 5)\n genome = object()\n with pytest.raises(ValueError):\n mbf_align.AlignedSample(\"test_lane\", mfg, genome, False, \"AA123\")\n\n def test_lane_invariants_on_string(self):\n bam_path = get_sample_data(Path(\"mbf_align/ex2.bam\"))\n genome = object()\n lane = mbf_align.AlignedSample(\"test_lane\", bam_path, genome, False, \"AA123\")\n assert isinstance(lane.load()[0], ppg.FileInvariant)\n\n def test_missing_index_file(self):\n bam_path = get_sample_data(Path(\"mbf_align/ex2.bam\"))\n no_index = \"noindex.bam\"\n shutil.copy(bam_path, no_index)\n genome = object()\n lane = mbf_align.AlignedSample(\"test_lane\", no_index, genome, False, \"AA123\")\n assert isinstance(lane.load()[0], ppg.FileInvariant)\n assert isinstance(lane.load()[1], ppg.FileGeneratingJob)\n assert lane.load()[1].job_id != \"noindex.bam.bai\"\n assert lane.load()[0] in lane.load()[1].prerequisites\n with pytest.raises(FileNotFoundError):\n lane.mapped_reads()\n ppg.run_pipegraph()\n assert lane.mapped_reads() == 8\n\n def test_creating_index_for_fg_job(self):\n def gen():\n shutil.copy(get_sample_data(Path(\"mbf_align/ex2.bam\")), \"sample.bam\")\n\n ppg.util.global_pipegraph.quiet = False\n\n job = ppg.FileGeneratingJob(\"sample.bam\", gen)\n genome = object()\n lane = mbf_align.AlignedSample(\"test_lane\", job, genome, False, \"AA123\")\n assert isinstance(lane.load()[1], ppg.FileGeneratingJob)\n assert lane.load()[0] in lane.load()[1].prerequisites\n ppg.run_pipegraph()\n assert Path(\"sample.bam\").exists()\n assert Path(\"sample.bam.bai\").exists()\n\n def test_creating_index_for_mfg(self):\n def gen():\n shutil.copy(get_sample_data(Path(\"mbf_align/ex2.bam\")), \"sample.bam\")\n\n ppg.util.global_pipegraph.quiet = False\n\n job = ppg.MultiFileGeneratingJob([\"sample.bam\"], gen)\n genome = object()\n lane = mbf_align.AlignedSample(\"test_lane\", job, genome, False, \"AA123\")\n assert isinstance(lane.load()[1], ppg.FileGeneratingJob)\n assert lane.load()[0] in lane.load()[1].prerequisites\n ppg.run_pipegraph()\n assert Path(\"sample.bam\").exists()\n assert Path(\"sample.bam.bai\").exists()\n\n def test_subtraction_by_read(self):\n from mbf_sampledata import get_human_22_fake_genome\n\n genome = get_human_22_fake_genome()\n lane = mbf_align.AlignedSample(\n \"test_lane\",\n get_sample_data(Path(\"mbf_align/rnaseq_spliced_chr22.bam\")),\n genome,\n False,\n \"AA123\",\n ) # index creation is automatic\n lane2 = mbf_align.AlignedSample(\n \"test_lane2\",\n get_sample_data(Path(\"mbf_align/rnaseq_spliced_chr22.bam\")),\n genome,\n False,\n \"AA124\",\n ) # index creation is automatic\n lane3 = mbf_align.AlignedSample(\n \"test_lane3\",\n get_sample_data(Path(\"mbf_align/chipseq_chr22.bam\")),\n genome,\n False,\n \"AA123\",\n ) # index creation is automatic\n lane3_subset = mbf_align.AlignedSample(\n \"test_lane3_subset\",\n get_sample_data(Path(\"mbf_align/chipseq_chr22_subset.bam\")),\n genome,\n False,\n \"AA123\",\n ) # index creation is automatic\n\n lane_empty = lane.post_process(\n mbf_align.post_process.SubtractOtherLane(lane2), new_name=\"empty\"\n )\n lane_full = lane.post_process(\n mbf_align.post_process.SubtractOtherLane(lane3), new_name=\"full\"\n )\n lane_some = lane3.post_process(\n mbf_align.post_process.SubtractOtherLane(lane3_subset),\n result_dir=\"results/aligned/shu\",\n )\n qc_jobs = [lane_some.post_processor_qc_jobs, lane_full.post_processor_qc_jobs]\n prune_qc(lambda job: job in qc_jobs)\n ppg.run_pipegraph()\n assert Path(lane_empty.get_bam_names()[1]).exists()\n assert Path(lane_full.get_bam_names()[1]).exists()\n assert lane_empty.mapped_reads() == 0\n assert lane_full.mapped_reads() == lane.mapped_reads()\n assert lane.mapped_reads() != 0\n assert (\n lane_some.mapped_reads()\n == lane3.mapped_reads() - lane3_subset.mapped_reads()\n )\n assert lane3_subset.mapped_reads() # make sure there was something to subtract\n assert \"shu\" in lane_some.get_bam_names()[0]\n assert_image_equal(qc_jobs[0].filenames[0], \"_result_dir\")\n assert_image_equal(qc_jobs[0].filenames[0])\n\n def test_to_fastq(self):\n bam_path = get_sample_data(Path(\"mbf_align/ex2.bam\"))\n bam_job = ppg.FileInvariant(bam_path)\n genome = object()\n lane = mbf_align.AlignedSample(\"test_lane\", bam_job, genome, False, \"AA123\")\n fastq_path = \"out.fastq\"\n lane.to_fastq(fastq_path)\n ppg.run_pipegraph()\n assert Path(fastq_path).exists()\n assert (\n Path(fastq_path).read_text()\n == \"\"\"@read_28833_29006_6945\nAGCTTAGCTAGCTACCTATATCTTGGTCTTGGCCG\n+\n<<<<<<<<<<<<<<<<<<<<<:<9/,&,22;;<<<\n@read_28701_28881_323b\nTGCAAGGCCGCATCGGCCAAGGCCAAGATATAGGT\n+\n<<<<7<<<<<<<<<<<<;6<<<:;7<<<<;<<<<<\n@read_28701_28881_323c\nTGCAAGGCCGCATCGGCCAAGGCCAAGATATAGGT\n+\n<<<<7<<<<<<<<<<<<;6<<<:;7<<<<;<<<<<\n@read_28701_28881_324a\nTGCAAGGCCGCATCGGCCAAGGCCAAGATATAGGT\n+\n<<<<7<<<<<<<<<<<<;6<<<:;7<<<<;<<<<<\n@read_28701_28881_324b\nTGCAAGGCCGCATCGGCCAAGGCCAAGATATAGGT\n+\n<<<<7<<<<<<<<<<<<;6<<<:;7<<<<;<<<<<\n@read_28701_28881_324c\nTGCAAGGCCGCATCGGCCAAGGCCAAGATATAGGT\n+\n<<<<7<<<<<<<<<<<<;6<<<:;7<<<<;<<<<<\n@test_clipped1\nAGCTTAGCTAGCTACCTATATCTTGGTCTTGGCCG\n+\n<<<<<<<<<<<<<<<<<<<<<:<9/,&,22;;<<<\n@test_clipped1\nAGCTTAGCTAGCTACCTATATCTTGGTCTTGGCCG\n+\n<<<<<<<<<<<<<<<<<<<<<:<9/,&,22;;<<<\n\"\"\"\n )\n lane2 = mbf_align.AlignedSample(\n \"test_lane2\", bam_job, genome, is_paired=True, vid=\"AA123\"\n )\n with pytest.raises(ValueError):\n lane2.to_fastq(\"nope.fastq\") # no support for paired end data at this point\n\n\[email protected](\"new_pipegraph\")\nclass TestQualityControl:\n def prep_lane(self):\n from mbf_sampledata import get_human_22_fake_genome\n\n # straight from chr22 of the human genome\n genome = get_human_22_fake_genome()\n\n lane = mbf_align.AlignedSample(\n \"test_lane\",\n get_sample_data(Path(\"mbf_align/rnaseq_spliced_chr22.bam\")),\n genome,\n False,\n \"AA123\",\n )\n return lane\n\n def _test_qc_plots(self, filename, remaining_job_count, chdir=\".\"):\n lane = self.prep_lane()\n prune_qc(lambda job: filename in job.job_id)\n not_pruned_count = sum([1 for x in get_qc_jobs() if not x._pruned])\n assert not_pruned_count == remaining_job_count # plot cache, plot_table, plot\n ppg.run_pipegraph()\n if chdir == \"..\":\n fn = lane.result_dir / chdir / filename\n else:\n fn = lane.result_dir / chdir / f\"{lane.name}_{filename}\"\n assert_image_equal(fn, suffix=\"_\" + filename)\n\n def test_qc_complexity(self):\n self._test_qc_plots(\"complexity.png\", 3)\n\n def test_qc_strandedness(self):\n self._test_qc_plots(\"strandedness.png\", 3)\n\n def test_qc_reads_per_biotype(self):\n self._test_qc_plots(\"reads_per_biotype.png\", 1)\n\n def test_qc_alignment_statistics(self):\n self._test_qc_plots(\"alignment_statistics.png\", 1, \"..\")\n\n def test_qc_subchromal_distribution(self):\n self._test_qc_plots(\"subchromosomal_distribution.png\", 3)\n\n def test_qc_splice_sites(self):\n self._test_qc_plots(\"splice_sites.png\", 3)\n\n def test_alignment_stats(self):\n from mbf_sampledata import get_human_22_fake_genome\n\n genome = get_human_22_fake_genome()\n lane = mbf_align.AlignedSample(\n \"test_lane\",\n get_sample_data(Path(\"mbf_align/rnaseq_spliced_chr22.bam\")),\n genome,\n False,\n \"AA123\",\n ) # index creation is automatic\n counts = {\"get_bam\": 0}\n\n def get_bam():\n counts[\"get_bam\"] += 1\n\n class DummySam:\n mapped = 5\n unmapped = 10\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n pass\n\n return DummySam()\n\n lane.get_bam = get_bam\n assert lane.get_alignment_stats() == {\"Mapped\": 5, \"Unmapped\": 10}\n assert counts[\"get_bam\"] == 1\n\n class DummyAlignerWithout:\n pass\n\n lane = mbf_align.AlignedSample(\n \"test_lane2\",\n get_sample_data(Path(\"mbf_align/rnaseq_spliced_chr22.bam\")),\n genome,\n False,\n \"AA123\",\n aligner=DummyAlignerWithout(),\n ) # index creation is automatic\n lane.get_bam = get_bam\n assert counts[\"get_bam\"] == 1\n assert lane.get_alignment_stats() == {\"Mapped\": 5, \"Unmapped\": 10}\n assert counts[\"get_bam\"] == 2\n\n class DummyAlignerWith:\n def get_alignment_stats(self, bam_filename):\n assert (\n Path(bam_filename).resolve()\n == get_sample_path(\"mbf_align/rnaseq_spliced_chr22.bam\").resolve()\n )\n return {\"Hello\": 23}\n\n lane = mbf_align.AlignedSample(\n \"test_lane3\",\n get_sample_data(\"mbf_align/rnaseq_spliced_chr22.bam\"),\n genome,\n False,\n \"AA123\",\n aligner=DummyAlignerWith(),\n ) # index creation is automatic\n lane.get_bam = get_bam\n assert counts[\"get_bam\"] == 2\n assert lane.get_alignment_stats() == {\"Hello\": 23}\n assert counts[\"get_bam\"] == 2\n\n def test_chromosome_mapping(self):\n bam_path = get_sample_data(Path(\"mbf_align/ex2.bam\"))\n bam_job = ppg.FileInvariant(bam_path)\n genome = DummyGenome()\n lane = mbf_align.AlignedSample(\"test_lane\", bam_job, genome, False, \"AA123\")\n assert lane.name == \"test_lane\"\n assert lane.load()[0] is bam_job\n assert isinstance(lane.load()[1], ppg.FileInvariant)\n assert lane.genome is genome\n assert not lane.is_paired\n assert lane.vid == \"AA123\"\n\n with pytest.raises(ValueError):\n mbf_align.AlignedSample(\"test_lane\", bam_job, genome, False, \"AA123\")\n lane2 = mbf_align.AlignedSample(\"test_lane2\", bam_job, genome, True, \"AA123\")\n assert lane2.is_paired\n\n b = lane.get_bam()\n assert isinstance(b, pysam.Samfile)\n b\n", "id": "3302112", "language": "Python", "matching_score": 4.455129623413086, "max_stars_count": 0, "path": "tests/test_aligned.py" }, { "content": "from pathlib import Path\nimport pypipegraph as ppg\nfrom mbf_align import Sample, AlignedSample\nfrom mbf_sampledata import get_sample_path, get_human_22_fake_genome\nimport mbf_qualitycontrol\n\n\ndef test_align_and_extract_umis(new_pipegraph):\n from mbf_align.post_process import AnnotateFastqBarcodes\n\n for folder in [\n get_sample_path(Path(\"mbf_align/sample_extract_barcodes\")),\n get_sample_path(Path(\"mbf_align/sample_extract_barcodes_gz\")),\n ]:\n new_pipegraph.new_pipegraph()\n genome = get_human_22_fake_genome()\n\n mbf_qualitycontrol.prune_qc(lambda _: False)\n r = Sample(\"test\", str(folder), False, pairing=\"only_second\", vid=\"AA123\")\n al = AlignedSample(\"test\", str(folder / \"test.bam\"), genome, False, \"AA123\")\n\n x = al.post_process(AnnotateFastqBarcodes(r, {\"XC\": [0, 4], \"XM\": [7, 7 + 4]}))\n ppg.run_pipegraph()\n f = x.get_bam()\n r = next(f.fetch())\n print(r.tags)\n assert r.get_tag(\"XC\") == \"AGTC\"\n assert r.get_tag(\"XM\") == \"TGAC\"\n", "id": "3116668", "language": "Python", "matching_score": 2.2631473541259766, "max_stars_count": 0, "path": "tests/test_postprocess.py" }, { "content": "import pypipegraph as ppg\nimport pytest\nimport pandas as pd\nimport numpy as np\nimport mbf_heatmap\nimport mbf_genomics\nimport mbf_sampledata\nimport mbf_align\nfrom mbf_heatmap.chipseq import regions, smooth, norm, order\nfrom mbf_qualitycontrol.testing import assert_image_equal\nfrom mbf_genomics.testing import MockGenome\n\n\ndef get_human_22_fake_genome():\n import gzip\n\n genes = pd.read_msgpack(\n gzip.GzipFile(\n mbf_sampledata.get_sample_path(\"mbf_align/hs_22_genes.msgpack.gz\")\n )\n ).reset_index()\n tr = pd.read_msgpack(\n gzip.GzipFile(\n mbf_sampledata.get_sample_path(\"mbf_align/hs_22_transcripts.msgpack.gz\")\n )\n ).reset_index()\n genes[\"chr\"] = \"chr22\"\n tr[\"chr\"] = \"chr22\"\n return MockGenome(\n df_genes=genes, df_transcripts=tr, chr_lengths={\"chr22\": 50_818_468}\n )\n\n\nclass TestHeatmapChipSeq:\n def test_simple(self, new_pipegraph_no_qc):\n genome = get_human_22_fake_genome()\n start = 17750239\n df = pd.DataFrame(\n [\n {\"chr\": \"chr22\", \"start\": start, \"stop\": start + 1000},\n {\"chr\": \"chr22\", \"start\": start + 20000, \"stop\": start + 20000 + 1000},\n {\"chr\": \"chr22\", \"start\": start + 30000, \"stop\": start + 30000 + 1000},\n ]\n )\n plot_regions = mbf_genomics.regions.GenomicRegions(\n \"testregions\", lambda: df, [], genome\n )\n lane1 = mbf_align.lanes.AlignedSample(\n \"one\",\n mbf_sampledata.get_sample_path(\"mbf_align/chipseq_chr22.bam\"),\n genome,\n False,\n None,\n )\n lane2 = mbf_align.lanes.AlignedSample(\n \"two\",\n mbf_sampledata.get_sample_path(\"mbf_align/chipseq_chr22.bam\"),\n genome,\n False,\n None,\n )\n\n h = mbf_heatmap.chipseq.Heatmap(\n plot_regions,\n [lane1, lane2],\n region_strategy=regions.RegionAsIs(),\n smoothing_strategy=smooth.SmoothRaw(),\n )\n fn = \"test.png\"\n h.plot(fn, norm.AsIs(), order.AsIs())\n ppg.run_pipegraph()\n assert_image_equal(fn)\n\n def test_smooth(self, new_pipegraph_no_qc):\n genome = get_human_22_fake_genome()\n df = pd.DataFrame(\n [\n {\n \"chr\": \"chr22\",\n \"start\": 36925 * 1000 - 1000,\n \"stop\": 36925 * 1000 + 1000,\n },\n {\n \"chr\": \"chr22\",\n \"start\": 31485 * 1000 - 2000,\n \"stop\": 31485 * 1000 + 2000,\n },\n {\"chr\": \"chr22\", \"start\": 41842 * 1000, \"stop\": (41842 * 1000) + 1},\n ]\n )\n plot_regions = mbf_genomics.regions.GenomicRegions(\n \"testregions\", lambda: df, [], genome\n )\n lane1 = mbf_align.lanes.AlignedSample(\n \"one\",\n mbf_sampledata.get_sample_path(\"mbf_align/chipseq_chr22.bam\"),\n genome,\n False,\n None,\n )\n lane2 = mbf_align.lanes.AlignedSample(\n \"two\",\n mbf_sampledata.get_sample_path(\"mbf_align/chipseq_chr22.bam\"),\n genome,\n False,\n None,\n )\n\n h = mbf_heatmap.chipseq.Heatmap(\n plot_regions,\n [lane1, lane2],\n region_strategy=regions.RegionFromCenter(1000),\n smoothing_strategy=smooth.SmoothExtendedReads(),\n )\n fn = \"test.png\"\n h.plot(fn, norm.AsIs(), order.FirstLaneSum())\n ppg.run_pipegraph()\n assert_image_equal(fn)\n\n\nclass TestSmooth:\n def test_raw(self, new_pipegraph):\n genome = get_human_22_fake_genome()\n lane1 = mbf_align.lanes.AlignedSample(\n \"one\",\n mbf_sampledata.get_sample_path(\"mbf_align/chipseq_chr22.bam\"),\n genome,\n False,\n None,\n )\n start = 41842000\n regions = pd.DataFrame(\n {\n \"chr\": [\"chr22\"],\n \"start\": [\n start,\n ],\n \"stop\": [start + 1000],\n }\n )\n calculated = smooth.SmoothRaw().calc(regions, lane1)\n should = np.zeros(1000)\n known = [\n (41842170, True, [(0, 36)]),\n (41842241, False, [(0, 36)]),\n (41842399, False, [(0, 36)]),\n (41842416, False, [(0, 36)]),\n (41842602, True, [(0, 36)]),\n (41842687, False, [(0, 36)]),\n (41842689, True, [(0, 36)]),\n (41842730, True, [(0, 36)]),\n (41842750, False, [(0, 36)]),\n (41842770, True, [(0, 36)]),\n (41842796, True, [(0, 36)]),\n (41842942, False, [(0, 36)]),\n (41842985, False, [(0, 36)]),\n ]\n\n for pos, is_reverse, cigar in known:\n pos -= start\n # orientation does not matter for non-extended reads\n should[pos : pos + cigar[0][1]] += 1\n should = should.reshape((1, 1000))\n assert should.shape == calculated.shape\n if (should != calculated).any():\n for ii in range(1000):\n if should[0, ii] != calculated[0, ii]:\n print(ii, should[0, ii], calculated[0, ii])\n assert (should == calculated).all()\n\n def test_extended(self, new_pipegraph):\n genome = get_human_22_fake_genome()\n lane1 = mbf_align.lanes.AlignedSample(\n \"one\",\n mbf_sampledata.get_sample_path(\"mbf_align/chipseq_chr22.bam\"),\n genome,\n False,\n None,\n )\n start = 41842000\n regions = pd.DataFrame(\n {\n \"chr\": [\"chr22\"],\n \"start\": [\n start,\n ],\n \"stop\": [start + 1000],\n }\n )\n extend = 10\n calculated = smooth.SmoothExtendedReads(extend).calc(regions, lane1)\n should = np.zeros(1000)\n known = [\n (41842170, True, [(0, 36)]),\n (41842241, False, [(0, 36)]),\n (41842399, False, [(0, 36)]),\n (41842416, False, [(0, 36)]),\n (41842602, True, [(0, 36)]),\n (41842687, False, [(0, 36)]),\n (41842689, True, [(0, 36)]),\n (41842730, True, [(0, 36)]),\n (41842750, False, [(0, 36)]),\n (41842770, True, [(0, 36)]),\n (41842796, True, [(0, 36)]),\n (41842942, False, [(0, 36)]),\n (41842985, False, [(0, 36)]),\n ]\n\n for pos, is_reverse, cigar in known:\n pos -= start\n print(pos)\n if is_reverse: # downstream verlaengern!\n should[pos - extend : pos + cigar[0][1]] += 1\n else:\n should[pos : pos + cigar[0][1] + extend] += 1\n should = should.reshape((1, 1000))\n assert should.shape == calculated.shape\n if (should != calculated).any():\n for ii in range(1000):\n if should[0, ii] != calculated[0, ii]:\n print(ii, should[0, ii], calculated[0, ii])\n assert (should == calculated).all()\n\n def test_extended_larger_then_region(self, new_pipegraph):\n genome = get_human_22_fake_genome()\n lane1 = mbf_align.lanes.AlignedSample(\n \"one\",\n mbf_sampledata.get_sample_path(\"mbf_align/chipseq_chr22.bam\"),\n genome,\n False,\n None,\n )\n start = 41842000\n regions = pd.DataFrame(\n {\n \"chr\": [\"chr22\"],\n \"start\": [\n start,\n ],\n \"stop\": [start + 1000],\n }\n )\n extend = 1500\n calculated = smooth.SmoothExtendedReads(extend).calc(regions, lane1)\n should = np.zeros(1000)\n known = [\n (41842170, True, [(0, 36)]),\n (41842241, False, [(0, 36)]),\n (41842399, False, [(0, 36)]),\n (41842416, False, [(0, 36)]),\n (41842602, True, [(0, 36)]),\n (41842687, False, [(0, 36)]),\n (41842689, True, [(0, 36)]),\n (41842730, True, [(0, 36)]),\n (41842750, False, [(0, 36)]),\n (41842770, True, [(0, 36)]),\n (41842796, True, [(0, 36)]),\n (41842942, False, [(0, 36)]),\n (41842985, False, [(0, 36)]),\n ]\n\n for pos, is_reverse, cigar in known:\n pos -= start\n if is_reverse:\n should[max(0, pos - extend) : min(1000, pos + cigar[0][1])] += 1\n else:\n should[max(pos, 0) : min(1000, pos + cigar[0][1] + extend)] += 1\n should = should.reshape((1, 1000))\n assert should.shape == calculated.shape\n if (should != calculated).any():\n for ii in range(1000):\n if should[0, ii] != calculated[0, ii]:\n print(ii, should[0, ii], calculated[0, ii])\n assert (should == calculated).all()\n\n def test_extended_minus_background(self, new_pipegraph):\n genome = get_human_22_fake_genome()\n lane1 = mbf_align.lanes.AlignedSample(\n \"one\",\n mbf_sampledata.get_sample_path(\"mbf_align/chipseq_chr22.bam\"),\n genome,\n False,\n None,\n )\n start = 41842000\n regions = pd.DataFrame(\n {\n \"chr\": [\"chr22\"],\n \"start\": [\n start,\n ],\n \"stop\": [start + 1000],\n }\n )\n extend = 10\n sermb = smooth.SmoothExtendedReadsMinusBackground({lane1.name: lane1}, extend)\n calculated = sermb.calc(regions, lane1)\n should = np.zeros((1, 1000))\n assert (should == calculated).all()\n assert lane1.load() in sermb.get_dependencies(lane1)\n\n\nclass TestOrder:\n def test_ithlane_sum(self, new_pipegraph):\n genome = get_human_22_fake_genome()\n start = 17750239\n df = pd.DataFrame(\n [\n {\"chr\": \"chr22\", \"start\": start, \"stop\": start + 1000},\n {\"chr\": \"chr22\", \"start\": start + 20000, \"stop\": start + 20000 + 1000},\n {\"chr\": \"chr22\", \"start\": start + 30000, \"stop\": start + 30000 + 1000},\n ]\n )\n lane1 = mbf_align.lanes.AlignedSample(\n \"one\",\n mbf_sampledata.get_sample_path(\"mbf_align/chipseq_chr22.bam\"),\n genome,\n False,\n None,\n )\n lane2 = mbf_align.lanes.AlignedSample(\n \"two\",\n mbf_sampledata.get_sample_path(\"mbf_align/chipseq_chr22.bam\"),\n genome,\n False,\n None,\n )\n with pytest.raises(AttributeError):\n order.IthLaneSum(lane1.name)\n\n o = order.IthLaneSum(1)\n # raw_data = {lane1.name: smooth.SmoothRaw().calc(df, lane1)}\n raw_data = {\n lane1.name: np.array(\n [\n [0, 0, 4, 0],\n [1, 1, 1, 0],\n [1, 0, 0, 0],\n ]\n )\n }\n\n print(raw_data)\n print(raw_data[lane1.name].sum(axis=1))\n lanes = {lane1.name: lane1}\n lanes[lane2.name] = lane2 # make sure they have a defined order!\n norm_data = norm.AsIs().calc(lanes, raw_data)\n plot_regions = mbf_genomics.regions.GenomicRegions(\n \"testregions\", lambda: df, [], genome\n )\n\n with pytest.raises(KeyError):\n o.calc(\n plot_regions,\n {lane1.name: lane1, lane2.name: lane2},\n raw_data,\n norm_data,\n )\n\n o = order.IthLaneSum(lane2)\n with pytest.raises(KeyError):\n o.calc(plot_regions, {lane1.name: lane1}, raw_data, norm_data)\n\n raw_data[lane2.name] = raw_data[lane1.name].copy()\n res_order, clusters = o.calc(plot_regions, lanes, raw_data, norm_data)\n assert clusters is None\n assert (\n res_order == [2, 1, 0]\n ).all() # remember, from top to bottom in plotting later on.\n\n raw_data[lane2.name] = np.array(\n [\n [0, 0, 0, 0],\n [4, 1, 1, 0],\n [1, 0, 0, 0],\n ]\n )\n o = order.IthLaneSum(0)\n res_order, clusters = o.calc(plot_regions, lanes, raw_data, norm_data)\n assert (\n res_order == [2, 1, 0]\n ).all() # remember, from top to bottom in plotting later on.\n\n o = order.IthLaneSum(1)\n res_order, clusters = o.calc(plot_regions, lanes, raw_data, norm_data)\n\n assert (\n res_order == [0, 2, 1]\n ).all() # remember, from top to bottom in plotting later on.\n\n def test_ithlane_max(self, new_pipegraph):\n genome = get_human_22_fake_genome()\n start = 17750239\n df = pd.DataFrame(\n [\n {\"chr\": \"chr22\", \"start\": start, \"stop\": start + 1000},\n {\"chr\": \"chr22\", \"start\": start + 20000, \"stop\": start + 20000 + 1000},\n {\"chr\": \"chr22\", \"start\": start + 30000, \"stop\": start + 30000 + 1000},\n ]\n )\n lane1 = mbf_align.lanes.AlignedSample(\n \"one\",\n mbf_sampledata.get_sample_path(\"mbf_align/chipseq_chr22.bam\"),\n genome,\n False,\n None,\n )\n lane2 = mbf_align.lanes.AlignedSample(\n \"two\",\n mbf_sampledata.get_sample_path(\"mbf_align/chipseq_chr22.bam\"),\n genome,\n False,\n None,\n )\n with pytest.raises(AttributeError):\n order.IthLaneMax(lane1.name)\n\n o = order.IthLaneMax(1)\n # raw_data = {lane1.name: smooth.SmoothRaw().calc(df, lane1)}\n raw_data = {\n lane1.name: np.array(\n [\n [0, 0, 5, 0],\n [2, 1, 1, 1],\n [1, 0, 0, 0],\n ]\n )\n }\n\n print(raw_data)\n print(raw_data[lane1.name].max(axis=1))\n lanes = {lane1.name: lane1}\n lanes[lane2.name] = lane2\n norm_data = norm.AsIs().calc(lanes, raw_data)\n plot_regions = mbf_genomics.regions.GenomicRegions(\n \"testregions\", lambda: df, [], genome\n )\n\n with pytest.raises(KeyError):\n o.calc(\n plot_regions,\n {lane1.name: lane1, lane2.name: lane2},\n raw_data,\n norm_data,\n )\n\n o = order.IthLaneMax(lane2)\n with pytest.raises(KeyError):\n o.calc(plot_regions, {lane1.name: lane1}, raw_data, norm_data)\n\n raw_data[lane2.name] = raw_data[lane1.name].copy()\n res_order, clusters = o.calc(plot_regions, lanes, raw_data, norm_data)\n assert clusters is None\n assert (\n res_order == [2, 1, 0]\n ).all() # remember, from top to bottom in plotting later on.\n\n raw_data[lane2.name] = np.array(\n [\n [0, 0, 0, 0],\n [5, 1, 1, 0],\n [1, 0, 0, 4],\n ]\n )\n o = order.IthLaneMax(0)\n res_order, clusters = o.calc(plot_regions, lanes, raw_data, norm_data)\n assert (\n res_order == [2, 1, 0]\n ).all() # remember, from top to bottom in plotting later on.\n\n o = order.IthLaneMax(1)\n res_order, clusters = o.calc(plot_regions, lanes, raw_data, norm_data)\n\n assert (\n res_order == [0, 2, 1]\n ).all() # remember, from top to bottom in plotting later on.\n\n def test_by_column(self, new_pipegraph_no_qc):\n genome = get_human_22_fake_genome()\n start = 17750239\n df = pd.DataFrame(\n [\n {\n \"chr\": \"chr22\",\n \"start\": start,\n \"stop\": start + 1000,\n \"colA\": \"a\",\n },\n {\n \"chr\": \"chr22\",\n \"start\": start + 20000,\n \"stop\": start + 20000 + 1000,\n \"colA\": \"c\",\n },\n {\n \"chr\": \"chr22\",\n \"start\": start + 30000,\n \"stop\": start + 30000 + 1000,\n \"colA\": \"b\",\n },\n ]\n )\n lane1 = mbf_align.lanes.AlignedSample(\n \"one\",\n mbf_sampledata.get_sample_path(\"mbf_align/chipseq_chr22.bam\"),\n genome,\n False,\n None,\n )\n lanes = {lane1.name: lane1}\n o = order.ByAnnotator(\"colA\", func=lambda x: [ord(y) for y in x])\n raw_data = {\n lane1.name: np.array(\n [\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n ]\n )\n }\n plot_regions = mbf_genomics.regions.GenomicRegions(\n \"testregions\", lambda: df, [], genome\n )\n ppg.JobGeneratingJob(\"shu\", lambda: None).depends_on(plot_regions.load())\n ppg.run_pipegraph()\n plot_regions._load()\n\n norm_data = norm.AsIs().calc(lanes, raw_data)\n res_order, clusters = o.calc(plot_regions, lanes, raw_data, norm_data)\n assert (res_order == [0, 2, 1]).all()\n\n def test_by_annotator(self, new_pipegraph_no_qc):\n genome = get_human_22_fake_genome()\n start = 17750239\n df = pd.DataFrame(\n [\n {\n \"chr\": \"chr22\",\n \"start\": start,\n \"stop\": start + 1000,\n },\n {\n \"chr\": \"chr22\",\n \"start\": start + 20000,\n \"stop\": start + 20000 + 1000,\n },\n {\n \"chr\": \"chr22\",\n \"start\": start + 30000,\n \"stop\": start + 30000 + 1000,\n },\n ]\n )\n lane1 = mbf_align.lanes.AlignedSample(\n \"one\",\n mbf_sampledata.get_sample_path(\"mbf_align/chipseq_chr22.bam\"),\n genome,\n False,\n None,\n )\n lanes = {lane1.name: lane1}\n raw_data = {\n lane1.name: np.array(\n [\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n ]\n )\n }\n plot_regions = mbf_genomics.regions.GenomicRegions(\n \"testregions\", lambda: df, [], genome\n )\n\n class FakeAnno(mbf_genomics.annotator.Annotator):\n columns = [\"colA\"]\n\n def calc(self, df):\n return pd.Series([1, 3, 2])\n\n o = order.ByAnnotator(FakeAnno())\n ppg.JobGeneratingJob(\"shu\", lambda: None).depends_on(\n o.get_dependencies(plot_regions, lanes)[0]\n )\n ppg.run_pipegraph()\n plot_regions._load()\n\n norm_data = norm.AsIs().calc(lanes, raw_data)\n res_order, clusters = o.calc(plot_regions, lanes, raw_data, norm_data)\n assert (res_order == [0, 2, 1]).all()\n", "id": "2611267", "language": "Python", "matching_score": 3.3570291996002197, "max_stars_count": 0, "path": "tests/test_chipseq_heatmap.py" }, { "content": "import pypipegraph as ppg\nimport numpy as np\n\n\ndef get_coverage_vector(bam, chr, start, stop, extend_reads_bp=0):\n # todo: replace with rust\n length = int(\n stop - start\n ) # here we still need floats for the calculation (if start and stop are floats, otherwise rounding down will make the array too short), but we need intgers for the np.zeros-array\n start = int(\n start\n ) # start and stop need to be integers, since we will not get reads from position x.5\n stop = int(stop)\n res = np.zeros(length, dtype=np.float)\n for read in bam.fetch(chr, max(start, 0), stop):\n if read.is_reverse:\n add_start = read.pos - extend_reads_bp\n add_stop = read.pos + read.qlen\n else:\n add_start = read.pos\n add_stop = read.pos + read.qlen + extend_reads_bp\n add_start = max(start, add_start)\n add_stop = min(stop, add_stop)\n res[add_start - start : add_stop - start] += 1\n return res\n\n\nclass SmoothExtendedReads(object):\n \"\"\"Each read extended by x bp in 3' direction\"\"\"\n\n def __init__(self, extend_by_bp=200):\n self.name = \"Smooth_Extended_%ibp\" % extend_by_bp\n self.extend_by_bp = extend_by_bp\n\n def get_dependencies(self, lane):\n deps = [lane.load()]\n deps.append(\n ppg.FunctionInvariant(\n \"genomics.regions.heatmaps.\" + self.name, self.__class__.calc\n )\n )\n return deps\n\n def calc(self, regions, lane):\n result = []\n for ii, row in regions.iterrows():\n signal = get_coverage_vector(\n lane.get_bam(), row[\"chr\"], row[\"start\"], row[\"stop\"], self.extend_by_bp\n )\n if len(signal) != row[\"stop\"] - row[\"start\"]: # pragma: no cover\n raise ValueError( # pragma: no cover\n \"Signal had wrong length:\\nrow: %s,\\nsignal_shape: %s,\\nstop-start=%s\"\n % (row, signal.shape, row[\"stop\"] - row[\"start\"])\n )\n result.append(signal)\n return np.vstack(result)\n\n\nclass SmoothExtendedReadsMinusBackground(object):\n \"\"\"Each read extended by x bp in 3' direction\"\"\"\n\n def __init__(self, background_lanes, extend_by_bp=200):\n \"\"\"\n @background_lanes a dictionary of lane_names to background lanes. lane_names means the foreground lanes as in lane.name!\n \"\"\"\n self.name = \"Smooth_Extended_%ibp_minus_background\" % extend_by_bp\n self.extend_by_bp = extend_by_bp\n self.background = background_lanes\n\n def get_dependencies(self, lane):\n deps = [lane.load()]\n deps.append(self.background[lane.name].load())\n deps.append(\n ppg.FunctionInvariant(\n \"genomics.regions.heatmaps.\" + self.name, self.__class__.calc\n )\n )\n return deps\n\n def calc(self, regions, lane):\n bg_lane = self.background[lane.name]\n result = []\n for ii, row in regions.iterrows():\n signal = get_coverage_vector(\n lane.get_bam(), row[\"chr\"], row[\"start\"], row[\"stop\"], self.extend_by_bp\n )\n background_signal = get_coverage_vector(\n bg_lane.get_bam(),\n row[\"chr\"],\n row[\"start\"],\n row[\"stop\"],\n self.extend_by_bp,\n )\n result.append(signal - background_signal)\n return np.array(result)\n\n\nclass SmoothRaw(SmoothExtendedReads):\n \"\"\"just the reads, no smoothing\"\"\"\n\n def __init__(self):\n self.name = \"Smooth_Raw\"\n self.extend_by_bp = 0\n", "id": "2181823", "language": "Python", "matching_score": 0.4994605481624603, "max_stars_count": 0, "path": "src/mbf_heatmap/chipseq/smooth.py" }, { "content": "from .base import Aligner\nimport pypipegraph as ppg\nfrom pathlib import Path\nfrom ..util import download_file, Version, download_tar_bz2_and_turn_into_tar_gzip\nimport subprocess\nimport tempfile\nimport os\n\nclass BWA(Aligner):\n def __init__(self, version=\"_last_used\", store=None):\n super().__init__(version, store)\n self.accepted_algos = [\"mem\", \"aln\", \"samse\", \"sampe\", \"bwasw\"]\n\n @property\n def name(self):\n return \"BWA\"\n\n @property\n def multi_core(self):\n return True\n\n def get_latest_version(self):\n return \"0.7.17\"\n\n def fetch_version(self, version, target_filename): # pragma: no cover\n url = f\"https://sourceforge.net/projects/bio-bwa/files/bwa-{version}.tar.bz2/download\"\n download_tar_bz2_and_turn_into_tar_gzip(url, target_filename, version, chmod_x_files=[])\n\n def build_index_func(self, fasta_files, gtf_input_filename, output_fileprefix):\n print(self.path)\n cmd = [\n \"FROM_ALIGNER\",\n self.path / \"bwa-0.7.17\" / \"bwa\",\n \"index\",\n \"-p\",\n str((output_fileprefix / \"bwa_index\").absolute()),\n \"-a\",\n \"bwtsw\",\n ]\n if not hasattr(fasta_files, \"__iter__\"):\n fasta_files = [fasta_files]\n cmd.extend([str(Path(x).absolute()) for x in fasta_files])\n return self.get_run_func(output_fileprefix, cmd)\n\n def align_job(\n self,\n input_fastq,\n paired_end_filename,\n index_basename,\n output_bam_filename,\n parameters,\n ):\n output_bam_filename = Path(output_bam_filename)\n algorithm = parameters.get(\"algorithm\", \"mem\")\n if algorithm not in self.accepted_algos:\n raise ValueError(f\"Parameter 'algorithm' must be one of {self.accepted_algos}, was {algorithm}.\")\n cmd = [\n \"FROM_ALIGNER\",\n str(self.path / \"bwa-0.7.17\" / \"bwa\"), \n algorithm\n ]\n if algorithm == \"mem\":\n cmd.extend([\"-k\", str(parameters.get(\"-k\", 19))])\n cmd.extend([\"-w\", str(parameters.get(\"-w\", 100))])\n cmd.extend([\"-d\", str(parameters.get(\"-d\", 100))])\n cmd.extend([\"-r\", str(parameters.get(\"-r\", 1.5))])\n cmd.extend([\"-c\", str(parameters.get(\"-c\", 10000))])\n cmd.extend([\"-A\", str(parameters.get(\"-A\", 1))])\n cmd.extend([\"-B\", str(parameters.get(\"-B\", 4))])\n cmd.extend([\"-O\", str(parameters.get(\"-O\", 6))])\n cmd.extend([\"-E\", str(parameters.get(\"-E\", 1))])\n cmd.extend([\"-L\", str(parameters.get(\"-L\", 5))])\n cmd.extend([\"-U\", str(parameters.get(\"-U\", 9))])\n cmd.extend([\"-T\", str(parameters.get(\"-T\", 30))])\n cmd.append(str(index_basename / \"bwa_index\"))\n cmd.append(str(Path(input_fastq).absolute()))\n if paired_end_filename:\n cmd.append(str(Path(paired_end_filename).absolute()))\n job = self.run(\n Path(output_bam_filename).parent,\n cmd,\n additional_files_created=[\n output_bam_filename,\n ],\n call_afterwards=self.sam_to_bam(output_bam_filename.parent / \"stdout.txt\", output_bam_filename),\n )\n job.depends_on(\n ppg.ParameterInvariant(output_bam_filename, sorted(parameters.items()))\n )\n return job\n\n def sam_to_bam(self, infile: Path, outfile: Path):\n\n def __convert():\n cmd = [\"samtools\", \"view\", \"-b\", str(infile)]\n tmp = tempfile.NamedTemporaryFile(\"w\")\n with tmp:\n subprocess.check_call(cmd, stdout = tmp)\n cmd = [\"samtools\", \"sort\", tmp.name]\n subprocess.check_call(cmd, stdout = outfile.open(\"w\"))\n with infile.open('w') as op:\n op.write(\"Moved to bam via samtools\")\n\n return __convert\n\n def _aligner_build_cmd(self, output_dir, ncores, arguments):\n if \"mem\" == arguments[1]:\n return arguments + [\"-t\", str(ncores)]\n else:\n return arguments\n", "id": "4564280", "language": "Python", "matching_score": 4.05983304977417, "max_stars_count": 0, "path": "src/mbf_externals/aligners/bwa.py" }, { "content": "from .base import Aligner\nimport pypipegraph as ppg\nimport mbf_align\nfrom pathlib import Path\nfrom ..util import download_file, Version, download_tar_bz2_and_turn_into_tar_gzip\nimport subprocess\nimport tempfile\nimport os\n\n\nclass BBMap(Aligner):\n def __init__(self, version=\"38.86\", store=None):\n super().__init__(version, store)\n\n @property\n def name(self):\n return \"BBMap\"\n\n @property\n def multi_core(self):\n return False\n\n def get_latest_version(self):\n return \"38.86\"\n\n def fetch_version(self, version, target_filename): # pragma: no cover\n url = f\"https://sourceforge.net/projects/bbmap/files/BBMap_{version}.tar.gz/download\"\n# cmd = [\"curl\", url, \"-L\", \"--output\", \"bb.tar.gz\"]\n# subprocess.check_call(cmd)\n with open(target_filename, \"wb\") as op:\n download_file(url, op)\n\n def build_index_func(self, fasta_files, gtf_input_filename, output_fileprefix):\n raise NotImplementedError\n\n def align_job(\n self,\n input_fastq,\n paired_end_filename,\n index_basename,\n output_bam_filename,\n parameters,\n ):\n raise NotImplementedError\n\n def _aligner_build_cmd(self, output_dir, ncores, arguments):\n raise NotImplementedError\n\n\nclass ExtendCigarBBMap(mbf_align.post_process._PostProcessor):\n def __init__(self, samformat=\"1.4\"):\n self.samformat = samformat\n self.bbmap = BBMap()\n self.name = \"BBMap_reformat\"\n self.result_folder_name = Path(\"results\") / \"aligned\" / \"ExtendCigarBBMap\"\n\n def process(self, input_bam_name, output_bam_name, result_dir):\n self.bbmap.store.unpack_version(self.bbmap.name, self.bbmap.version)\n cmd = [\n str(self.bbmap.path / \"bbmap\" / \"reformat.sh\"),\n f\"in={str(input_bam_name.absolute().resolve())}\",\n f\"out={str(output_bam_name.absolute().resolve())}\",\n f\"sam={self.samformat}\",\n ]\n print(\" \".join(cmd))\n subprocess.check_call(cmd)\n\n def register_qc(self, new_lane):\n pass # pragma: no cover\n\n def get_version(self):\n return self.bbmap.version\n\n def get_parameters(self):\n return self.samformat\n\n", "id": "7714622", "language": "Python", "matching_score": 4.154815673828125, "max_stars_count": 0, "path": "src/mbf_externals/aligners/bbmap.py" }, { "content": "from ..externals import ExternalAlgorithm\nfrom pathlib import Path\nfrom abc import abstractmethod\nimport pypipegraph as ppg\n\n\nclass Aligner(ExternalAlgorithm):\n @abstractmethod\n def align_job(\n self,\n input_fastq,\n paired_end_filename,\n index_basename,\n output_bam_filename,\n parameters,\n ):\n pass # pragma: no cover\n\n @abstractmethod\n def build_index_func(self, fasta_files, gtf_input_filename, output_prefix):\n pass # pragma: no cover\n\n @abstractmethod\n def _aligner_build_cmd(self, output_dir, ncores, arguments):\n pass # pragma: no cover\n\n def build_cmd(self, output_dir, ncores, arguments):\n if (\n not isinstance(arguments, list)\n or len(arguments) < 2\n or arguments[0] != \"FROM_ALIGNER\"\n ):\n raise ValueError(\n \"Please call one of the following functions instead: .align_job, .build_index_job\"\n + str(arguments)\n )\n return self._aligner_build_cmd(output_dir, ncores, arguments[1:])\n\n def build_index_job(self, fasta_files, gtf_input_filename, output_fileprefix):\n output_directory = Path(output_fileprefix)\n output_directory.mkdir(parents=True, exist_ok=True)\n sentinel = output_directory / \"sentinel.txt\"\n job = ppg.FileGeneratingJob(\n sentinel,\n self.build_index_func(fasta_files, gtf_input_filename, output_directory),\n ).depends_on(\n ppg.FileChecksumInvariant(\n self.store.get_zip_file_path(self.name, self.version)\n )\n )\n if self.multi_core:\n job.cores_needed = -1\n job.index_path = output_fileprefix\n return job\n\n def build_index(self, fasta_files, gtf_input_filename, output_fileprefix):\n output_fileprefix = Path(output_fileprefix)\n output_fileprefix.mkdir(parents=True, exist_ok=True)\n func = self.build_index_func(fasta_files, gtf_input_filename, output_fileprefix)\n print(func)\n print(func.__code__)\n func()\n\n def get_index_version_range(self): # pragma: no cover\n return None, None\n", "id": "2857704", "language": "Python", "matching_score": 1.2269021272659302, "max_stars_count": 0, "path": "src/mbf_externals/aligners/base.py" }, { "content": "from matplotlib import pyplot as plt\nimport pypipegraph as ppg\nimport venn\n\n\ndef plot_venn(output_prefix, a_dict):\n if hasattr(next(iter(a_dict.values())), \"venn_annotator\"):\n return plot_venn_from_genes_with_comparisons(output_prefix, a_dict)\n else:\n raise NotImplementedError(\"Expand!\")\n\n\ndef plot_venn_from_genes_with_comparisons(\n output_prefix, a_dict, id_column=\"gene_stable_id\"\n):\n if len(a_dict) not in (2, 3):\n raise ValueError(\"Max support 3 sets currently\")\n\n def plot():\n up = {}\n down = {}\n for name, genes_ddf in sorted(a_dict.items()):\n df = genes_ddf.df\n stable_ids = df[id_column]\n column = genes_ddf.venn_annotator[\"log2FC\"]\n up[name] = set(stable_ids[df[column] > 0])\n down[name] = set(stable_ids[df[column] < 0])\n plt.figure(figsize=(4, 4))\n venn.venn(up)\n plt.savefig(str(output_prefix) + \".up.png\", dpi=72)\n plt.figure(figsize=(4, 4))\n venn.venn(down)\n plt.savefig(str(output_prefix) + \".down.png\", dpi=72)\n\n return (\n ppg.MultiFileGeneratingJob(\n [str(output_prefix) + \".up.png\", str(output_prefix) + \".down.png\"], plot\n )\n .depends_on([x.add_annotator(x.venn_annotator) for x in a_dict.values()])\n .depends_on(ppg.ParameterInvariant(output_prefix, id_column))\n )\n", "id": "575060", "language": "Python", "matching_score": 1.6957677602767944, "max_stars_count": 0, "path": "src/mbf_comparisons/venn.py" }, { "content": "import pytest\nimport pypipegraph as ppg\nimport pandas as pd\nfrom mbf_genomics import DelayedDataFrame\nfrom mbf_comparisons import Comparisons, venn, Log2FC\nfrom mbf_qualitycontrol.testing import assert_image_equal\n\n\[email protected](\"new_pipegraph_no_qc\")\nclass TestVenn:\n def test_venn_from_logfcs(self):\n ppg.util.global_pipegraph.quiet = False\n d = DelayedDataFrame(\n \"ex1\",\n pd.DataFrame(\n {\n \"gene_stable_id\": [\"A\", \"B\", \"C\", \"D\", \"E\"],\n \"a\": [1, 1, 1, 1, 1],\n \"b\": [1, 2, 3, 4, 5],\n \"c\": [1, 1, 3, 0.5, 0.75],\n }\n ),\n )\n comp = Comparisons(d, {\"a\": [\"a\"], \"b\": [\"b\"], \"c\": [\"c\"]})\n a = comp.all_vs_b(\"a\", Log2FC())\n selected = {name: x.filter([(\"log2FC\", \"|>=\", 1)]) for name, x in a.items()}\n plot_job = venn.plot_venn(\"test\", selected)\n ppg.run_pipegraph()\n assert_image_equal(plot_job.filenames[0], \"_down\")\n assert_image_equal(plot_job.filenames[1], \"_up\")\n", "id": "114062", "language": "Python", "matching_score": 2.693570613861084, "max_stars_count": 0, "path": "tests/test_venn.py" }, { "content": "import pytest\nimport numpy as np\nimport pandas as pd\nfrom mbf_genomics import DelayedDataFrame\nfrom mbf_qualitycontrol.testing import assert_image_equal\nfrom mbf_heatmap.ddf import HeatmapPlot, order as heatmap_order, norm as heatmap_norm\n\nfrom pypipegraph.testing import run_pipegraph\n\n\[email protected](\"both_ppg_and_no_ppg_no_qc\")\nclass TestComplete:\n def test_very_simple(self):\n df = pd.DataFrame(\n {\n \"a1\": [0, 1, 2],\n \"a2\": [0.5, 1.5, 2.5],\n \"b1\": [2, 1, 0],\n \"b2\": [2.5, 0.5, 1],\n }\n )\n ddf = DelayedDataFrame(\"test\", df)\n of = \"test.png\"\n h = HeatmapPlot(\n ddf, df.columns, of, heatmap_norm.Unchanged(), heatmap_order.Unchanged()\n )\n run_pipegraph()\n assert_image_equal(h.output_filename)\n\n def test_hierarchical_pearson(self):\n df = pd.DataFrame(\n {\n \"a1\": [0, 1, 2],\n \"a2\": [0.5, 1.5, 2.5],\n \"b1\": [2, 1, 0],\n \"b2\": [0.5, 0.5, 1],\n }\n )\n df = df.sample(200, replace=True, random_state=500)\n np.random.seed(500)\n df += np.random.normal(0, 1, df.shape)\n ddf = DelayedDataFrame(\"test\", df)\n of = \"test.png\"\n h = HeatmapPlot(\n ddf,\n df.columns,\n of,\n heatmap_norm.Unchanged(),\n heatmap_order.HierarchicalPearson(),\n )\n run_pipegraph()\n assert_image_equal(h.output_filename)\n", "id": "9870219", "language": "Python", "matching_score": 2.5714244842529297, "max_stars_count": 0, "path": "tests/test_ddf_heatmap.py" }, { "content": "from .heatmap import HeatmapPlot\nfrom . import order\nfrom . import norm\n\nall = [HeatmapPlot, order, norm]\n", "id": "7560393", "language": "Python", "matching_score": 0.07357213646173477, "max_stars_count": 0, "path": "src/mbf_heatmap/ddf/__init__.py" }, { "content": "import pandas as pd\nimport numpy as np\n\n\nclass _NormStrategy:\n pass\n\n\nclass Unchanged(_NormStrategy):\n \"\"\"straight pass through\"\"\"\n\n name = \"Unchanged\"\n\n def calc(self, df, columns):\n return df[columns]\n\n def deps(self):\n return []\n\n\nclass Log2(_NormStrategy):\n \"\"\"straight pass through\"\"\"\n\n name = \"Unchanged\"\n\n def calc(self, df, columns):\n return pd.DataFrame({x: np.log2(df[x].values) for x in columns}, index=df.index)\n\n def deps(self):\n return []\n", "id": "2742194", "language": "Python", "matching_score": 1.2565492391586304, "max_stars_count": 0, "path": "src/mbf_heatmap/ddf/norm.py" }, { "content": "class OrderStrategy:\n pass\n\n\nclass Unchanged(OrderStrategy):\n \"\"\"straight pass through\"\"\"\n\n name = \"Unchanged\"\n\n def calc(self, df, columns):\n df = df[columns].assign(cluster=0)\n return df\n\n def deps(self):\n return []\n\n\nclass HierarchicalPearson(OrderStrategy):\n name = \"HierarchicalPearson\"\n\n def calc(self, df, columns):\n import scipy.cluster.hierarchy as hc\n\n matrix = df[columns].transpose().corr()\n z = hc.linkage(matrix)\n new_order = [x for x in hc.leaves_list(z)]\n df = df[columns].iloc[new_order].assign(cluster=0)\n return df\n\n def deps(self):\n return []\n", "id": "7361192", "language": "Python", "matching_score": 0.05167185515165329, "max_stars_count": 0, "path": "src/mbf_heatmap/ddf/order.py" }, { "content": "import unittest\nfrom __init__ import *\n\n\ndef read_exported_data(instrument_export_filename, text_export_filename):\n well_name_lookup = {}\n text_report_count = 0\n df = pd.read_csv(text_export_filename, sep=\"\\t\",low_memory=False)\n df.columns = [x.strip() for x in df.columns]\n for ii, row in df.iterrows():\n if row['Well'].strip() == '---':\n print 'Warning: Condensed data (not usable) in %s' % fn\n break\n well_no = code_to_well_no(row['Well'].strip())\n if well_no < 1 or well_no > 96:\n raise ValueError(\"invalid Well number\")\n well_name_lookup[well_no] = str(row['Well Name']).strip()\n raw_df = pd.read_csv(instrument_export_filename, sep=\"\\t\", skiprows=1, engine='python')\n raw_df = raw_df[raw_df['Segment'] == 2] #we only care about the amplification part, not the melting curve\n well_names = []\n for ii, row in raw_df.iterrows():\n try:\n well_name = well_name_lookup[row['Well']]\n except KeyError: # manual checking showed that these are empty wells indeed\n well_name = np.nan\n well_names.append(well_name)\n raw_df.insert(0, 'Well Name', well_names)\n return raw_df.reset_index()\n\n\ndef compare_mxp_and_exported(mxp_df, exported_df):\n mxp_df = mxp_df.copy()\n mxp_df.Well = mxp_df.Well + 1\n mxp_df = mxp_df.set_index(['Well','Cycle'])\n exported_df = exported_df.set_index(['Well','Cycle #']).copy()\n exported_df.ix[exported_df['Well Name'] == '---', 'Well Name'] = ''\n mxp_df = mxp_df.ix[exported_df.index]\n return (\n (exported_df.Fluorescence == mxp_df.Fluorescence) & \n (exported_df['Well Name'] == mxp_df['Well Name']) &\n (exported_df['Dye'] == mxp_df['Assay']) \n ).all()\n \nclass MXPTests(unittest.TestCase):\n\n def test_format_3000(self):\n #anonymized test file, some empty wells, nothing fancy\n fn = 'testfiles/3000.mxp'\n data = read_mxp(fn)\n exported_data = read_exported_data(\n \"testfiles/3000 - Instrument Data - Text Format 1.txt\",\n \"testfiles/3000 - Text Report Data.txt\")\n self.assertTrue(compare_mxp_and_exported(data, exported_data))\n\n def test_format_3005(self):\n #anonymized test file, some empty wells, nothing fancy\n fn = 'testfiles/3005.mxp'\n data = read_mxp(fn)\n exported_data = read_exported_data(\n \"testfiles/3005 - Instrument Data - Text Format 1.txt\",\n \"testfiles/3005 - Text Report Data.txt\")\n self.assertTrue(compare_mxp_and_exported(data, exported_data))\n\n\n\nif __name__ == '__main__': \n unittest.main()\n\n\n", "id": "7350388", "language": "Python", "matching_score": 2.7635691165924072, "max_stars_count": 0, "path": "mxp/tests.py" }, { "content": "\"\"\"Support to read the .mxp file format used by Stratagenes qPCR machines.\n\n\nread_mxp(filename) is the basic function you need, returns\na DataFrame containing the amplification curves (40 cycles, <=96 wells...)\n with the following columns:\n Well: 0..96\n Well Key A1..H12\n Assay - the assay in this well - exported as 'Dye' by MxPro\n Well Name - the name the user assigned\n Fluorescence - the raw measurement at this cyle/well\n Temperature - temperature at this cycle/well\n Cycle - 0..40\n\nCurrently only tested for 96 well plates on Mx3000P and Mx3005P machines.\n\n\n\"\"\"\nimport pandas as pd\nimport olefile\nimport numpy as np\n\n__version__ = \"0.1.3\"\n\ndef read_mxp(filename):\n \"\"\"Read an MXP file and return a dataframe with the annotated amplification curves\"\"\"\n ole = olefile.OleFileIO(filename)\n fileformat = discover_fileformat(ole)\n #print 'fileformat', fileformat\n well_names, assay_names = np.array(extract_well_names_and_assay_names(ole, fileformat))\n well_numbers = np.array(xrange(0, 97))\n #empty wells that were not read, and are not in the amplification curve file...\n ok_wells = (well_names != '') | (assay_names != '')\n well_names = well_names[ok_wells]\n assay_names = assay_names[ok_wells]\n well_numbers = well_numbers[ok_wells]\n\n amplification_data = extract_amplification_curves(ole, fileformat, len(assay_names))\n\n #melting_data = extract_melting_curves(ole)\n \n cycle_count = len(amplification_data[0][0])\n well_count = sum(ok_wells)\n amp_data = {'Well': [], 'Well Key': [], 'Assay': [], 'Well Name': [], 'Fluorescence': [], 'Temperature': [], 'Cycle': []}\n for ii in xrange(0, well_count):\n amp_data['Cycle'].extend(list(xrange(1, cycle_count + 1)))\n amp_data['Well'].extend([well_numbers[ii]] * cycle_count)\n amp_data['Well Key'].extend([well_no_to_code(well_numbers[ii] + 1)] * cycle_count)\n amp_data['Assay'].extend([assay_names[ii]] * cycle_count)\n amp_data['Well Name'].extend([well_names[ii]] * cycle_count)\n amp_data['Fluorescence'].extend(amplification_data[0][ii])\n amp_data['Temperature'].extend(amplification_data[1][ii])\n return pd.DataFrame(amp_data)\n\ndef well_no_to_code(well_no):\n \"\"\"1 -> A1, 2 -> A2, 96 -> H12\"\"\"\n well_no = well_no -1\n first = well_no / 12\n second = well_no % 12\n return chr(ord('A') + first) + str(second + 1)\n\ndef code_to_well_no(code):\n \"\"\"A1 -> 1, H12 -> 96\"\"\"\n if len(code) not in (2, 3):\n raise ValueError(\"Invalid code\")\n first = ord(code[0]) - ord('A')\n second = int(code[1:])\n return 12 * first + second\n\ndef discover_fileformat(ole):\n path = 'Storage2/Stream2'\n with ole.openstream(path) as op:\n x = op.read()\n if ord(x[0xf32]) != 0:\n return 0\n else:\n return 1\n \ndef extract_well_names_and_assay_names(ole, fileformat):\n \"\"\"Read well and assay names from an MXP file\"\"\"\n if isinstance(ole, olefile.OleFileIO):\n path = 'Storage0/Stream0'\n with ole.openstream(path) as op:\n d = op.read()\n else:\n path = os.path.join(ole, 'Storage0_Stream0')\n with open(path, 'rb') as op:\n d = op.read()\n parts = d.split('\\xf0\\xe7i\\xa5') \n if fileformat == 1:\n well_parts = [parts[x] for x in xrange(2, len(parts), 12)]\n assay_parts = [parts[x] for x in xrange(9, len(parts), 12)]\n fileformat = 1\n elif fileformat == 0:\n well_parts = [parts[x] for x in xrange(2, len(parts), 11)]\n assay_parts = [parts[x] for x in xrange(8, len(parts), 11)] \n fileformat = 0\n else:\n raise ValueError('fileformat')\n if len(well_parts) != 96:\n raise ValueError(\"Did not find 96 well parts in Storage0\")\n well_names = []\n for part in well_parts:\n length = ord(part[4])\n name = part[5:5 + length]\n well_names.append(name)\n assay_names = []\n for part in assay_parts:\n length = ord(part[8])\n name = part[9:9 + length]\n assay_names.append(name)\n well_names = np.array(well_names)\n assay_names = np.array(assay_names)\n #if (well_names == '').all(): # I have seen files without well names asigned...\n #raise ValueError(\"Could not find a single well name in that file!\")\n if (assay_names == '').all():\n raise ValueError(\"Could not find a single assay name in that file!\")\n return well_names, assay_names\n\ndef to_16_bit(letters):\n \"\"\"Convert the internal (little endian) number format to an int\"\"\"\n a = ord(letters[0])\n b = ord(letters[1])\n return (b << 8) + a\n\ndef extract_amplification_curves(ole, fileformat, supposed_wells = 96):\n \"\"\"extract amplification curves (40 cycles, 96 wells...) from an mxp file\"\"\"\n path = 'Storage2/Stream2'\n with ole.openstream(path) as op:\n x = op.read()\n if fileformat == 0:\n no_of_cyles = ord(x[0xf32]) # offset...\n elif fileformat == 1:\n no_of_cyles = ord(x[0x12f4]) # offset...\n else:\n raise ValueError(\"Unknown fileformat\")\n if no_of_cyles != 40:\n raise ValueError(\"File does not contain 40 cycles. Were: %i\" % no_of_cyles)\n y = x.split(\"\\x00\\x00\\x00\\x60\") #96...\n y = y[4]\n y = x.split(\"\\x00\\x00\\x00\\x28\")#possibly use 00 00 00 60 to split first\n y = y[1:supposed_wells + 1]\n y[-1] = y[-1][:len(y[0])] # there is a different seperator after the last one, so we set it to the first length\n if max(set([len(a) for a in y])) > 466: #428:\n raise ValueError(\"Seen a very large chun: %i \" %max(set([len(a) for a in y])))\n result_fluorescence = []\n result_temperatures = []\n for block in y:\n numbers = [to_16_bit(block[3+offset*10:3+2+offset*10]) for offset in xrange(0, no_of_cyles)]\n if len(numbers) != no_of_cyles:\n raise ValueError(\"Not exactly 40 datapoints in amplification curve\")\n result_fluorescence .append(numbers)\n temperatures = [to_16_bit(block[3+4+offset*10:3+4+2+offset*10]) / 10.0 for offset in xrange(0, no_of_cyles)]\n result_temperatures.append(temperatures)\n if len(result_fluorescence) != supposed_wells:\n raise ValueError(\"Not exactly the supposed %i wells - was %i\" % (supposed_wells, len(y)))\n return result_fluorescence, result_temperatures\n", "id": "6217924", "language": "Python", "matching_score": 0.5807756185531616, "max_stars_count": 0, "path": "mxp/__init__.py" }, { "content": "import zipfile\nimport os\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\n\n__version__ = '0.155'\n\ntry:\n from functools import lru_cache\nexcept (ImportError, AttributeError):\n # don't know how to tell setup.py that we only need functools32 when under 2.7.\n # so we'll just include a copy (*bergh*)\n import sys\n\n sys.path.append(os.path.join(os.path.dirname(__file__), \"functools32\"))\n from functools32 import lru_cache\n\n\nclass WideNotSupported(ValueError):\n def __init__(self):\n self.message = (\n \".get_wide() is not supported for this dataset. Use .get_dataset() instead\"\n )\n\n\nclass CantApplyExclusion(ValueError):\n pass\n\ndatasets_to_cache = 32\n\nknown_compartment_columns = [\n \"compartment\",\n \"cell_type\",\n \"disease\",\n \"culture_method\", # for those cells we can't take into sequencing ex vivo\n # these are only for backward compability\n \"tissue\",\n \"disease-state\",\n] # tissue\n\n\ndef lazy_member(field):\n \"\"\"Evaluate a function once and store the result in the member (an object specific in-memory cache)\n Beware of using the same name in subclasses!\n \"\"\"\n\n def decorate(func):\n if field == func.__name__:\n raise ValueError(\n \"lazy_member is supposed to store it's value in the name of the member function, that's not going to work. Please choose another name (prepend an underscore...\"\n )\n\n def doTheThing(*args, **kw):\n if not hasattr(args[0], field):\n setattr(args[0], field, func(*args, **kw))\n return getattr(args[0], field)\n\n return doTheThing\n\n return decorate\n\n\nclass Biobank(object):\n \"\"\"An interface to a dump of our Biobank.\n Also used internally by the biobank website to access the data.\n\n In essence, a souped up dict of pandas dataframes stored\n as pickles in a zip file with memory caching\"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.zf = zipfile.ZipFile(filename)\n if not \"_meta/_data_format\" in self.zf.namelist():\n self.data_format = \"msg_pack\"\n else:\n with self.zf.open(\"_meta/_data_format\") as op:\n self.data_format = op.read().decode(\"utf-8\")\n if self.data_format not in (\"msg_pack\", \"parquet\"):\n raise ValueError(\n \"Unexpected data format (%s). Do you need to update marburg_biobank\"\n % (self.data_format)\n )\n self._cached_datasets = {}\n\n @property\n def tall(self):\n return _BiobankItemAccessor(self.list_datasets, lambda dataset: self.get_dataset(dataset, apply_exclusion=True))\n\n @property\n def wide(self):\n return _BiobankItemAccessor(self.list_datasets, lambda dataset: self.get_wide(dataset, apply_exclusion=True))\n\n def get_all_patients(self):\n df = self.get_dataset(\"_meta/patient_compartment_dataset\")\n return set(df[\"patient\"].unique())\n\n def number_of_patients(self):\n \"\"\"How many patients/indivuums are in all datasets?\"\"\"\n return len(self.get_all_patients())\n\n def number_of_datasets(self):\n \"\"\"How many different datasets do we have\"\"\"\n return len(self.list_datasets())\n\n def get_compartments(self):\n \"\"\"Get all compartments we have data for\"\"\"\n pcd = self.get_dataset(\"_meta/patient_compartment_dataset\")\n return pcd\n\n @lru_cache(datasets_to_cache)\n def get_dataset_compartments(self, dataset):\n \"\"\"Get available compartments in dataset @dataset\"\"\"\n ds = self.get_dataset(dataset)\n columns = self.get_dataset_compartment_columns(dataset)\n if not columns:\n return []\n else:\n sub_ds = ds[columns]\n sub_ds = sub_ds[~sub_ds.duplicated()]\n result = []\n for dummy_idx, row in sub_ds.iterrows():\n result.append(tuple([row[x] for x in columns]))\n return set(result)\n\n @lru_cache(datasets_to_cache)\n def get_dataset_compartment_columns(self, dataset):\n \"\"\"Get available compartments columns in dataset @dataset\"\"\"\n ds = self.get_dataset(dataset)\n columns = [\n x for x in known_compartment_columns if x in ds.columns\n ] # compartment included for older datasets\n return columns\n\n @lru_cache(datasets_to_cache)\n def get_variables_and_units(self, dataset):\n \"\"\"What variables are availabe in a dataset?\"\"\"\n df = self.get_dataset(dataset)\n if len(df[\"unit\"].cat.categories) == 1:\n vars = df[\"variable\"].unique()\n unit = df[\"unit\"].iloc[0]\n return set([(v, unit) for v in vars])\n else:\n x = df[[\"variable\", \"unit\"]].drop_duplicates([\"variable\", \"unit\"])\n return set(zip(x[\"variable\"], x[\"unit\"]))\n\n def get_possible_values(self, dataset, variable, unit):\n df = self.get_dataset(dataset)\n return df[\"value\"][(df[\"variable\"] == variable) & (df[\"unit\"] == unit)].unique()\n\n @lazy_member(\"_cache_list_datasets\")\n def list_datasets(self):\n \"\"\"What datasets to we have\"\"\"\n if self.data_format == \"msg_pack\":\n return sorted(\n [\n name\n for name in self.zf.namelist()\n if not name.startswith(\"_\")\n and not os.path.basename(name).startswith(\"_\")\n ]\n )\n elif self.data_format == \"parquet\":\n return sorted(\n [\n name[: name.rfind(\"/\")]\n for name in self.zf.namelist()\n if not name.startswith(\"_\")\n and not os.path.basename(name[: name.rfind(\"/\")]).startswith(\"_\")\n and name.endswith(\"/0\")\n ]\n )\n\n @lazy_member(\"_cache_list_datasets_incl_meta\")\n def list_datasets_including_meta(self):\n \"\"\"What datasets to we have\"\"\"\n if self.data_format == \"msg_pack\":\n return sorted(self.zf.namelist())\n elif self.data_format == \"parquet\":\n import re\n\n raw = self.zf.namelist()\n without_numbers = [\n x if not re.search(\"/[0-9]+$\", x) else x[: x.rfind(\"/\")] for x in raw\n ]\n return sorted(set(without_numbers))\n\n @lazy_member(\"_datasets_with_name_lookup\")\n def datasets_with_name_lookup(self):\n return [ds for (ds, df) in self.iter_datasets() if \"name\" in df.columns]\n\n def name_lookup(self, dataset, variable):\n df = self.get_dataset(dataset)\n # todo: optimize using where?\n return df[df.variable == variable][\"name\"].iloc[0]\n\n def variable_or_name_to_variable_and_unit(self, dataset, variable_or_name):\n df = self.get_dataset(dataset)[[\"variable\", \"name\", \"unit\"]]\n rows = df[(df.variable == variable_or_name) | (df.name == variable_or_name)]\n if len(rows[\"variable\"].unique()) > 1:\n raise ValueError(\n \"variable_or_name_to_variable led to multiple variables (%i): %s\"\n % (len(rows[\"variable\"].unique()), rows[\"variable\"].unique())\n )\n try:\n r = rows.iloc[0]\n except IndexError:\n raise KeyError(\"Not found: %s\" % variable_or_name)\n return r[\"variable\"], r[\"unit\"]\n\n def _get_dataset_columns_meta(self):\n import json\n\n with self.zf.open(\"_meta/_to_wide_columns\") as op:\n return json.loads(op.read().decode(\"utf-8\"))\n\n def has_wide(self, dataset):\n if dataset.startswith(\"tertiary/genelists\") or \"_differential/\" in dataset:\n return False\n try:\n columns_to_use = self._get_dataset_columns_meta()\n except KeyError:\n return True\n if dataset in columns_to_use and not columns_to_use[dataset]:\n return False\n return True\n\n @lru_cache(maxsize=datasets_to_cache)\n def get_wide(\n self,\n dataset,\n apply_exclusion=True,\n standardized=False,\n filter_func=None,\n column=\"value\",\n ):\n \"\"\"Return dataset in row=variable, column=patient format.\n if @standardized is True Index is always (variable, unit) or (variable, unit, name), \n and columns always (patient, [compartment, cell_type, disease])\n\n Otherwise, unit and compartment will be left off if there is only a \n single value for them in the dataset\n if @apply_exclusion is True, excluded patients will be filtered from DataFrame\n\n @filter_func is run on the dataset before converting to wide, it\n takes a df, returns a modified df\n\n \"\"\"\n dataset = self.dataset_exists(dataset)\n if not self.has_wide(dataset):\n raise WideNotSupported()\n df = self.get_dataset(dataset)\n if filter_func:\n df = filter_func(df)\n\n index = [\"variable\"]\n columns = self._get_wide_columns(dataset, df, standardized)\n if standardized or len(df.unit.cat.categories) > 1:\n index.append(\"unit\")\n if \"name\" in df.columns:\n index.append(\"name\")\n # if 'somascan' in dataset:\n # raise ValueError(dataset, df.columns, index ,columns)\n dfw = self.to_wide(df, index, columns, column=column)\n if apply_exclusion:\n try:\n return self.apply_exclusion(dataset, dfw)\n except CantApplyExclusion:\n return dfw\n else:\n return dfw\n\n def _get_wide_columns(self, dataset, tall_df, standardized):\n try:\n columns_to_use = self._get_dataset_columns_meta()\n except KeyError:\n columns_to_use = {}\n if dataset in columns_to_use:\n columns = columns_to_use[dataset]\n if standardized:\n for x in known_compartment_columns:\n if not x in columns:\n columns.append(x)\n if x in tall_df.columns and (\n (\n hasattr(tall_df[x], \"cat\")\n and (len(tall_df[x].cat.categories) > 1)\n )\n or (len(tall_df[x].unique()) > 1)\n ):\n pass\n else:\n if standardized and x not in tall_df.columns:\n tall_df = tall_df.assign(**{x: np.nan})\n else:\n if \"vid\" in tall_df.columns and not \"patient\" in tall_df.columns:\n columns = [\"vid\"]\n elif \"patient\" in tall_df.columns:\n columns = [\"patient\"]\n else:\n raise ValueError(\n \"Do not know how to convert this dataset to wide format.\"\n \" Retrieve it get_dataset() and call to_wide() manually with appropriate parameters.\"\n )\n for x in known_compartment_columns:\n if x in tall_df.columns or (standardized and x != \"compartment\"):\n if not x in columns:\n columns.append(x)\n if x in tall_df.columns and (\n (\n hasattr(tall_df[x], \"cat\")\n and (len(tall_df[x].cat.categories) > 1)\n )\n or (len(tall_df[x].unique()) > 1)\n ):\n pass\n else:\n if standardized and x not in tall_df.columns:\n tall_df = tall_df.assign(**{x: np.nan})\n elif not standardized:\n if (\n hasattr(tall_df[x], \"cat\")\n and (len(tall_df[x].cat.categories) == 1)\n ) or (len(tall_df[x].unique()) == 1):\n if x in columns:\n columns.remove(x)\n return columns\n\n def to_wide(\n self,\n df,\n index=[\"variable\"],\n columns=known_compartment_columns,\n sort_on_first_level=False,\n column='value',\n ):\n \"\"\"Convert a dataset (or filtered dataset) to a wide DataFrame.\n Preferred to pd.pivot_table manually because it is\n a) faster and\n b) avoids a bunch of pitfalls when working with categorical data and\n c) makes sure the columns are dtype=float if they contain nothing but floats\n\n index = variable,unit\n columns = (patient, compartment, cell_type)\n \"\"\"\n if columns == known_compartment_columns:\n columns = [x for x in columns if x in df.columns]\n # raise ValueError(df.columns,index,columns)\n chosen = [column] + index + columns\n df = df.loc[:, [x for x in chosen if x in df.columns]]\n for x in chosen:\n if x not in df.columns:\n df = df.assign(**{x: np.nan})\n set_index_on = index + columns\n columns_pos = tuple(range(len(index), len(index) + len(columns)))\n res = df.set_index(set_index_on).unstack(columns_pos)\n c = res.columns\n c = c.droplevel(0)\n # this removes categories from the levels of the index. Absolutly\n # necessary, or you can't add columns later otherwise\n if isinstance(c, pd.MultiIndex):\n try:\n c = pd.MultiIndex(\n [list(x) for x in c.levels], codes=c.codes, names=c.names\n )\n except AttributeError:\n c = pd.MultiIndex(\n [list(x) for x in c.levels], labels=c.labels, names=c.names\n )\n else:\n c = list(c)\n res.columns = c\n single_unit = not 'unit' in df.columns or len(df['unit'].unique()) == 1\n if isinstance(c, list):\n res.columns.names = columns\n if sort_on_first_level:\n # sort on first level - ie. patient, not compartment - slow though\n res = res[sorted(list(res.columns))]\n for c in res.columns:\n x = res[c].fillna(value=np.nan, inplace=False)\n if (x == None).any(): # noqa: E711\n raise ValueError(\"here\")\n if single_unit: # don't do this for multiple units -> might have multiple dtypes\n try:\n res[c] = pd.to_numeric(x, errors=\"raise\")\n except (ValueError, TypeError): # leaving the Nones as Nones\n pass\n return res\n\n @lru_cache(maxsize=datasets_to_cache)\n def get_excluded_patients(self, dataset):\n \"\"\"Which patients are excluded from this particular dataset (or globally)?.\n\n May return a set of patient_id, or tuples of (('patient', 'x'y'), ('compartment1', 'xyz'),...) tuples if only\n certain compartments where excluded.\n\n \"\"\"\n try:\n global_exclusion_df = self.get_dataset(\"clinical/_other_exclusion\")\n excluded = set(global_exclusion_df[\"patient\"].unique())\n except KeyError:\n excluded = set()\n # local exclusion from this dataset\n try:\n exclusion_path = (\n os.path.dirname(dataset)\n + \"/\"\n + \"_\"\n + os.path.basename(dataset)\n + \"_exclusion\"\n )\n exclusion_df = self.get_dataset(exclusion_path)\n except KeyError:\n return excluded\n columns = [\"patient\"] + self.get_dataset_compartment_columns(dataset)\n columns = [x for x in columns if x in exclusion_df.columns]\n res = exclusion_df[columns]\n if set(res.columns) == set([\"patient\"]):\n excluded.update(exclusion_df[\"patient\"].unique())\n else:\n for idx, row in exclusion_df.iterrows():\n d = []\n for c in columns:\n d.append((c, row[c]))\n excluded.add(tuple(d))\n return excluded\n\n def apply_exclusion(self, dataset_name, df):\n dataset_name = self.dataset_exists(dataset_name)\n excluded = self.get_excluded_patients(dataset_name)\n # columns = [\"patient\"] + self.get_dataset_compartment_columns(dataset_name)\n if \"patient\" in df.columns: # a tall dataset\n keep = np.ones((len(df),), np.bool)\n for x in excluded:\n if isinstance(x, tuple):\n matching = np.ones((len(df),), np.bool)\n for column, value in x:\n matching &= df[column] == value\n keep = keep & ~matching\n else:\n keep = keep & ~(df[\"patient\"] == x)\n return df[keep]\n elif df.index.names[0] == \"variable\": # a wide dataset...\n to_remove = []\n for c in df.columns:\n if isinstance(c, tuple):\n if c[0] in excluded: # patient totaly excluded\n to_remove.append(c)\n else:\n key = tuple(zip(df.columns.names, c))\n if key in excluded:\n to_remove.append(c)\n else:\n if c in excluded:\n to_remove.append(c)\n return df.drop(to_remove, axis=1)\n else:\n raise CantApplyExclusion(\n \"Sorry, not a tall or wide DataFrame that I know how to handle.\"\n )\n\n @lru_cache(maxsize=1)\n def get_exclusion_reasons(self):\n \"\"\"Get exclusion information for all the datasets + globally\"\"\"\n result = {}\n global_exclusion_df = self.get_dataset(\"clinical/_other_exclusion\")\n for tup in global_exclusion_df.itertuples():\n if tup.patient not in result:\n result[tup.patient] = {}\n result[tup.patient][\"global\"] = tup.reason\n for dataset in self.list_datasets():\n try:\n exclusion_df = self.get_dataset(\n os.path.dirname(dataset)\n + \"/\"\n + \"_\"\n + os.path.basename(dataset)\n + \"_exclusion\"\n )\n for tup in exclusion_df.itertuples():\n if tup.patient not in result:\n result[tup.patient] = {}\n result[tup.patient][dataset] = tup.reason\n except KeyError:\n pass\n return result\n\n def iter_datasets(self, yield_meta=False):\n if yield_meta:\n lst = self.list_datasets_including_meta()\n else:\n lst = self.list_datasets()\n for name in lst:\n yield name, self.get_dataset(name)\n\n def dataset_exists(self, name):\n datasets = self.list_datasets_including_meta()\n out = False\n if name in datasets:\n out = name\n else:\n next = \"primary/\" + name\n if next in datasets:\n out = next\n else:\n if name.startswith('secondary/'):\n next = 'tertiary' + name[name.find('/'):]\n if next in datasets:\n out = next\n if not out:\n msg = \"No such dataset: %s.\" % name\n import difflib\n\n msg += \"Suggestions: \"\n for x in difflib.get_close_matches(name, datasets):\n msg += \" \" + x + \" \"\n msg += \". Use .list_datasets() to view all datasets\"\n raise KeyError(msg)\n return out\n\n def __load_df_from_parquet(self, name):\n import pyarrow\n\n try:\n with self.zf.open(name) as op:\n return pd.read_parquet(op)\n except Exception as e:\n if (\n \"UnsupportedOperation\" in str(e)\n or \"has no attribute\" in str(e)\n or \"UnsupportedOperation\" in repr(e)\n ): # python prior 3.7 has no seek on zipfiles\n import io\n\n with self.zf.open(name) as op:\n b = io.BytesIO()\n b.write(op.read())\n b.seek(0)\n return pd.read_parquet(b)\n elif 'not a path-like object' in str(e):\n import tempfile\n with tempfile.NamedTemporaryFile(suffix=\".biobank.parquet\") as tf:\n with self.zf.open(name) as op:\n tf.write(op.read())\n tf.flush()\n return pd.read_parquet(tf.name)\n else:\n raise\n raise NotImplementedError()\n\n @lru_cache(datasets_to_cache)\n def get_dataset(self, name, apply_exclusion=False):\n \"\"\"Retrieve a dataset\"\"\"\n name = self.dataset_exists(name)\n if self.data_format == \"msg_pack\":\n try:\n import mbf_pandas_msgpack\n except (ImportError, AttributeError):\n raise ImportError(\"Please install mbf-pandas-msgpack to read this old school biobank file\")\n with self.zf.open(name) as op:\n try:\n df = mbf_pandas_msgpack.read_msgpack(op.read())\n except KeyError as e:\n if \"KeyError: u'category'\" in str(e):\n raise ValueError(\n \"Your pandas is too old. You need at least version 0.18\"\n )\n elif self.data_format == \"parquet\":\n import pyarrow\n\n ds = self.zf.namelist()\n ii = 0\n dfs = []\n sub_name = name + \"/\" + str(ii)\n while sub_name in ds:\n dfs.append(self.__load_df_from_parquet(sub_name))\n ii += 1\n sub_name = name + \"/\" + str(ii)\n if not dfs: # not actually a unit splitted dataframe - meta?\n df = self.__load_df_from_parquet(name)\n elif len(dfs) == 1:\n df = dfs[0]\n else:\n categoricals = set()\n for df in dfs:\n for c, dt in df.dtypes.items():\n if dt.name == \"category\":\n categoricals.add(c)\n df = pd.concat(dfs)\n reps = {c: pd.Categorical(df[c]) for c in categoricals}\n if reps:\n df = df.assign(**reps)\n else:\n raise ValueError(\n \"Unexpected data format. Do you need to upgrade marburg_biobank?\"\n )\n if apply_exclusion:\n try:\n df = self.apply_exclusion(name, df)\n except CantApplyExclusion:\n return df\n return df\n\n def get_comment(self, name):\n comments = self.get_dataset(\"_meta/comments\")\n if len(comments) == 0:\n return \"\"\n match = comments.path == name\n if match.any():\n return comments[match].iloc[0][\"comment\"]\n else:\n return \"\"\n\n def get_changelog(self):\n try:\n return self.get_dataset(\"_meta/_changelog\").sort_values(\"revision\")\n except KeyError:\n raise ValueError(\n \"This revision of the biobank did not include a change log.\"\n )\n\ndef biobank_to_url(biobank):\n if biobank.lower() == 'ovca':\n return \"https://mbf.imt.uni-marburg.de/biobank\"\n elif biobank.lower() == 'paad':\n return \"https://mbf.imt.uni-marburg.de/biobank_paad\"\n else:\n raise ValueError(f\"Don't know how to download {biobank}\")\n\n\ndef _find_newest_revision(username, password, revision, biobank):\n import requests\n url = biobank_to_url(biobank) + '/download/find_newest_revision'\n if revision: # find teh newest sub release (eg. find 20.3 from 20)\n url += \"?revision=%s\" % revision\n r = requests.get(\n url, stream=True, auth=requests.auth.HTTPBasicAuth(username, password)\n )\n if r.status_code != 200:\n raise ValueError(\"Non 200 OK Return - was %s\" % r.status_code)\n return r.text\n\n\npasswd_file = Path(\"~/.ovca_biobank_password\").expanduser()\ndef query_user():\n import sys\n from getpass import getpass\n if passwd_file.exists():\n print(\"Reading password from \" + str(passwd_file))\n username, password = passwd_file.read_text().split(\"\\n\")[:2]\n store = False\n else:\n username = input(\"please enter your username\")\n password = getpass(\"please enter your password\")\n store = True\n return username, password, store\n\n\n\ndef download_and_open(username=False, password=False, revision=None, biobank='ovca'):\n from pathlib import Path\n import requests\n import shutil\n\n store = False\n if username is False and password is False:\n print(\"query\")\n username,password, store = query_user()\n\n newest = _find_newest_revision(username, password, revision, biobank)\n if store:\n print(\"Storing password to \" + str(passwd_file))\n passwd_file.write_text(\"%s\\n%s\\n\" % (username, password))\n if revision is None:\n print(\"newest revision is\", newest)\n else:\n print(\"newest revision for %s is %s\" % (revision, newest))\n fn = \"marburg_%s_biobank_%s.zip\" % (biobank, newest)\n if not Path(fn).exists():\n print(\"downloading biobank revision %s\" % newest)\n url = biobank_to_url(biobank) + \"/download/marburg_biobank?revision=%s\" % newest\n r = requests.get(\n url, stream=True, auth=requests.auth.HTTPBasicAuth(username, password)\n )\n if r.status_code != 200:\n raise ValueError(\"Non 200 OK Return - was %s\" % r.status_code)\n r.raw.decode_content = True\n fh = open(fn, \"wb\")\n shutil.copyfileobj(r.raw, fh)\n fh.close()\n else:\n print(\"using local copy %s\" % fn)\n return Biobank(fn)\n\n\nclass _BiobankItemAccessor:\n def __init__(self, list_callback, get_callback):\n self.list_callback = list_callback\n self.get_callback = get_callback\n\n def __getitem__(self, key):\n return self.get_callback(key)\n\n def _ipython_key_completions_(self):\n return self.list_callback()\n\nOvcaBiobank = Biobank # old school code support\n", "id": "9805336", "language": "Python", "matching_score": 5.449071407318115, "max_stars_count": 0, "path": "src/marburg_biobank/__init__.py" }, { "content": "import pandas as pd\nimport tempfile\nimport inspect\nimport pypipegraph as ppg\nfrom pathlib import Path\nimport time\nimport re\nimport pickle\nimport zipfile\nimport os\nimport json\nimport base64\nfrom . import WideNotSupported\n\n\nsettings = None\n\n\ndef apply_ovca_settings():\n global settings\n if settings is not None and settings[\"what\"] != \"OVCA\":\n raise ValueError(\"different apply_*_settings being called\")\n\n def check_patient_id(patient_id):\n if patient_id.startswith(\"OVCA\"):\n if not re.match(r\"^OVCA\\d+(R[0-9]*)?$\", patient_id):\n raise ValueError(\"Patient id must follow OVCA\\\\d(R[0-9]*)? if it starts with OVCA\")\n return \"cancer\"\n elif patient_id.startswith(\"OC\"):\n raise ValueError(\"OVCA patients must not start with OC\")\n else:\n return \"non-cancer\"\n\n settings = {\n \"what\": \"OVCA\",\n # for the primary data\n \"must_have_columns\": [\"variable\", \"unit\", \"value\", \"patient\"],\n # for 'secondary' datasets\n \"must_have_columns_secondary\": [\"variable\", \"unit\", \"value\"],\n # for gene lists\n \"must_have_columns_tertiary_genelists\": [\"stable_id\", \"gene\"],\n \"allowed_cells\": {\n \"T\",\n \"macrophage\",\n \"tumor\",\n \"tumor_s\",\n \"tumor_sc\",\n \"tumor_m\",\n \"tumor_L\",\n \"tumor_G\",\n \"MDSC\",\n \"NK\",\n \"n.a.\",\n \"adipocyte\",\n \"HPMC\",\n \"CAF\",\n },\n \"allowed_compartments\": {\"blood\", \"ascites\", \"n.a.\", \"omentum\"},\n \"allowed_disease_states\": {\"cancer\", \"healthy\", \"benign\", \"n.a.\"},\n \"check_patient_id\": check_patient_id,\n 'database_filename_template': 'marburg_ovca_revision_%s.zip'\n }\n\n\ndef apply_paad_settings():\n \"for the pancreas biobank\"\n global settings\n if settings is not None and settings[\"what\"] != \"PAAD\":\n raise ValueError(\"different apply_*_settings being called\")\n\n def check_patient_id(patient_id):\n if patient_id.startswith(\"ACH\"):\n if not re.match(r\"^ACH-\\d+$\", patient_id):\n raise ValueError(\"Patient id must be ACH\\\\d if it starts with ACH\")\n return \"PAAD\"\n else:\n raise ValueError(\n \"PAAD patients must start with ACH (non-cancer samples yet to be suported in apply_paad_settings\"\n )\n\n settings = {\n \"what\": \"PAAD\",\n # for the primary data\n \"must_have_columns\": [\"variable\", \"unit\", \"value\", \"patient\"],\n # for 'secondary' datasets\n \"must_have_columns_secondary\": [\"variable\", \"unit\", \"value\"],\n # for gene lists\n \"must_have_columns_tertiary_genelists\": [\"stable_id\", \"gene\"],\n \"allowed_cells\": {\"solid_tumor_mix\",},\n \"allowed_compartments\": {\"tumor\"}, # -\n \"allowed_disease_states\": {\"PAAD\",},\n \"check_patient_id\": check_patient_id,\n 'database_filename_template': 'marburg_paad_biobank_revision_%s.zip'\n }\n\n\ndef check_dataframe(name, df):\n # why was this done?\n # if \"variable\" in df.columns:\n # df = df.assign(\n # variable=[\n # x.encode(\"utf-8\") if isinstance(x, str) else x for x in df.variable\n # ]\n # )\n if settings is None:\n raise ValueError(\"Must call apply_*_settings (eg. apply_ovca_settings) first\")\n for c in \"seperate_me\":\n if c in df.columns:\n raise ValueError(\"%s must no longer be a df column - %s \" % (c, name))\n if \"compartment\" in df.columns and not \"disease\" in df.columns:\n raise ValueError(\"Columns must now be cell_type/disease/compartment split\")\n if \"patient\" in df.columns:\n for patient in df[\"patient\"]:\n settings[\"check_patient_id\"](patient)\n #\n # dataframes ofter now are _actual_name/0-9+,\n # but possibly only after writing it out...\n if re.search(\"/[0-9]+$\", name):\n name = name[: name.rfind(\"/\")]\n basename = os.path.basename(name)\n # no fixed requirements on _meta dfs\n if not basename.startswith(\"_\") and not name.startswith(\"_\"):\n if (\n \"_differential/\" in name\n or \"/genomics/\" # no special requirements for differential datasets for now\n in name # mutation data is weird enough.\n ):\n mh = set()\n elif name.startswith(\"secondary\") or name.startswith('tertiary/transcriptomics'):\n mh = set(settings[\"must_have_columns_secondary\"])\n elif name.startswith(\"tertiary/genelists\"):\n mh = set(settings[\"must_have_columns_tertiary_genelists\"])\n elif name.startswith(\"tertiary/survival\"):\n mh = set()\n else:\n mh = set(settings[\"must_have_columns\"])\n for c in \"cell\", \"disease_state\", \"tissue\":\n if c in df.columns:\n raise ValueError(\n \"%s must no longer be a df column - %s \" % (c, name)\n )\n\n missing = mh.difference(df.columns)\n if missing:\n raise ValueError(\n \"%s is missing columns: %s, had %s\" % (name, missing, df.columns)\n )\n elif name.endswith(\"_exclusion\"):\n mhc = [\"patient\", \"reason\"]\n missing = set(mhc).difference(df.columns)\n if missing:\n raise ValueError(\n \"%s is missing columns: %s, had %s\" % (name, missing, df.columns)\n )\n\n for column, allowed_values in [\n (\"cell_type\", settings[\"allowed_cells\"]),\n (\"compartment\", settings[\"allowed_compartments\"]),\n (\"disease\", settings[\"allowed_disease_states\"]),\n ]:\n if column in df.columns and not name.startswith(\"secondary/\") and not name.startswith('tertiary/'):\n x = set(df[column].unique()).difference(allowed_values)\n if x:\n raise ValueError(\n \"invalid %s found in %s: %s - check marburg_biobank/create.py, allowed_* if you want to extend it\"\n % (column, name, x)\n )\n\n if \"patient\" in df.columns and not name.endswith(\"_exclusion\"):\n states = set([settings[\"check_patient_id\"](x) for x in df[\"patient\"]])\n if len(states) > 1:\n if \"disease\" not in df.columns:\n raise ValueError(\n \"Datasets mixing cancer and non cancer data need a disease column:%s\"\n % (name,)\n )\n\n for x in \"variable\", \"unit\":\n if x in df.columns:\n try:\n if pd.isnull(df[x]).any():\n raise ValueError(\"%s must not be nan in %s\" % (x, name))\n if df[x].str.startswith(\" \").any():\n raise ValueError(\"At least one %s started with a space\" % x)\n if df[x].str.endswith(\" \").any():\n raise ValueError(\"At least one %s ended with a space\" % x)\n except:\n print(\"column\", x)\n raise\n\n if (\n not basename.startswith(\"_\")\n and not name.startswith(\"_\")\n and not name.startswith(\"tertiary\")\n and mh # was not '_differential/' in name\n ):\n for vu, group in df.groupby([\"variable\", \"unit\"]):\n variable, unit = vu\n if unit == \"string\":\n pass\n elif unit == \"timestamp\":\n for v in group.value:\n if not isinstance(v, pd.Timestamp):\n raise ValueError(\"Not timestamp data in %s %s\" % vu)\n elif unit == \"bool\":\n if set(group.value.unique()) != set([True, False]):\n raise ValueError(\n \"Unexpected values for bool variables in %s %s\" % vu\n )\n else:\n if not (\n (group.value.dtype == int) & (group.value.dtype == float)\n ): # might not be floaty enough\n for v in group.value:\n if not isinstance(v, float) and not isinstance(v, int):\n raise ValueError(\"Non float in %s, %s\" % vu)\n\n\ndef fix_the_darn_string(x):\n if isinstance(x, bool):\n return x\n if isinstance(x, bytes):\n x = x.decode(\"utf-8\")\n try:\n return str(x)\n except: # noqa:E722\n print(repr(x))\n print(type(x))\n print(x)\n import pickle\n\n with open(\"debug.dat\", \"w\") as op:\n pickle.dump(x, op)\n raise\n\n\ndef categorical_where_appropriate(df):\n \"\"\"make sure numerical columns are numeric\n and string columns that have less than 10% unique values are categorical\n and everything is unicode!\n\n \"\"\"\n to_assign = {}\n for c in df.columns:\n if df.dtypes[c] == object:\n try:\n to_assign[c] = pd.to_numeric(df[c], errors=\"raise\")\n except (ValueError, TypeError):\n if len(df[c].unique()) <= len(df) * 0.3 or c == \"patient\":\n to_assign[c] = pd.Categorical(df[c])\n new_cats = [fix_the_darn_string(x) for x in to_assign[c].categories]\n to_assign[c].categories = new_cats\n else:\n to_assign[c] = [fix_the_darn_string(x) for x in df[c]]\n df = df.assign(**to_assign)\n df.columns = [fix_the_darn_string(x) for x in df.columns]\n df.index.names = [fix_the_darn_string(x) for x in df.index.names]\n return df\n\n\ndef extract_patient_compartment_meta(dict_of_dfs):\n output = []\n from . import known_compartment_columns\n\n columns = [\"patient\"] + known_compartment_columns\n for name in dict_of_dfs:\n if (\n not name.startswith(\"secondary/\")\n and not name.startswith(\"tertiary/\")\n and not name.startswith(\"_\")\n and not os.path.basename(name).startswith(\"_\")\n ):\n df = dict_of_dfs[name]\n subset = df[[x for x in columns if x in df.columns]]\n subset = subset[~subset.duplicated()]\n for idx, row in subset.iterrows():\n row[u\"dataset\"] = str(name)\n output.append(row)\n return pd.DataFrame(output)\n\n\ndef create_biobank(dict_of_dataframes, name, revision, filename, to_wide_columns):\n \"\"\"Create a file suitable for biobank consumption.\n Assumes all dataframes pass check_dataframe\n \"\"\"\n if settings is None:\n raise ValueError(\"Must call apply_*_settings (eg. apply_ovca_settings) first\")\n dict_of_dataframes[\"_meta/biobank\"] = pd.DataFrame(\n [\n {\"variable\": \"biobank\", \"value\": name},\n {\"variable\": \"revision\", \"value\": revision},\n ]\n )\n for name, df in dict_of_dataframes.items():\n print(\"handling\", name)\n # basename = os.path.basename(name)\n s = time.time()\n check_dataframe(name, df)\n print(\"check time\", time.time() - s)\n s = time.time()\n df = categorical_where_appropriate(df)\n print(\"cat time\", time.time() - s)\n s = time.time()\n # enforce alphabetical column order after default columns\n df = df[\n [x for x in settings[\"must_have_columns\"] if x in df.columns]\n + sorted([x for x in df.columns if x not in settings[\"must_have_columns\"]])\n ]\n print(\"column order time\", time.time() - s)\n dict_of_dataframes[name] = df\n s = time.time()\n dict_of_dataframes[\n \"_meta/patient_compartment_dataset\"\n ] = extract_patient_compartment_meta(dict_of_dataframes)\n print(\"patient_compartment_dataset_time\", time.time() - s)\n print(\"now writing zip file\")\n zfs = zipfile.ZipFile(filename, \"w\")\n for name, df in dict_of_dataframes.items():\n tf = tempfile.NamedTemporaryFile(mode=\"w+b\", suffix=\".pq\")\n df.to_parquet(tf)\n tf.flush()\n tf.seek(0, 0)\n zfs.writestr(name, tf.read())\n zfs.writestr(\"_meta/_to_wide_columns\", json.dumps(to_wide_columns))\n zfs.writestr(\"_meta/_data_format\", \"parquet\")\n zfs.close()\n # one last check it's all numbers...\n print(\"checking float\")\n from . import OvcaBiobank\n\n # check that we can do the get_wide on all of them\n bb = OvcaBiobank(filename)\n for ds in bb.list_datasets():\n try:\n df = bb.get_wide(\n ds,\n filter_func=lambda df: df[\n ~df.unit.isin([\"timestamp\", \"string\", \"bool\"])\n ],\n )\n except WideNotSupported:\n continue\n except:\n print(\"issue is in\", ds)\n raise\n # df = bb.get_wide(ds)\n for idx, row in df.iterrows():\n if row.dtype != float:\n print(\"Error in %s %s, dtype was %s\" % (ds, idx, row.dtype))\n\n\ndef split_seperate_me(out_df, in_order=[\"patient\", \"compartment\"]):\n \"\"\"Helper for creating biobank compatible dataframes.\n splits a column 'seperate_me' with OVCA12-compartment\n into seperate patient and compartment columns\"\"\"\n split = [x.split(\"-\") for x in out_df[\"seperate_me\"]]\n return out_df.assign(\n **{x: [y[ii] for y in split] for (ii, x) in enumerate(in_order)}\n ).drop(\"seperate_me\", axis=1)\n\n\ndef write_dfs(dict_of_dfs):\n \"\"\"Helper used by the notebooks to dump the dataframes for import\"\"\"\n for name, df_and_comment in dict_of_dfs.items():\n if isinstance(df_and_comment, tuple):\n df, _comment = df_and_comment\n else:\n df = df_and_comment\n check_dataframe(name, df)\n d = os.path.dirname(name)\n target_path = os.path.join(\"/project/processed\", d)\n if not os.path.exists(target_path):\n os.makedirs(target_path)\n fn = os.path.join(target_path, os.path.basename(name))\n df.to_pickle(fn)\n # with open(fn, \"a\") as op:\n # pickle.dump(comment, op, pickle.HIGHEST_PROTOCOL)\n\n\nexporting_classes = []\n\n\ndef exporting_class(cls):\n exporting_classes.append(cls)\n return cls\n\n\ndef prep_desc(x):\n x = x.strip()\n import re\n\n x = re.sub(\"\\n[ ]+\", \"\\n\", x)\n return x\n\n\ndef exporting_method(output_name, description, input_files, deps, wide_columns):\n def inner(func):\n frame = inspect.stack()[1]\n filename = frame.filename\n cwd = os.getcwd()\n os.chdir(Path(filename).parent)\n func._output_name = output_name\n func._description = prep_desc(description)\n func._input_files = [Path(x).absolute() for x in input_files]\n func._deps = deps\n func._wide_columns = wide_columns\n if not isinstance(wide_columns, list):\n raise ValueError(\"wide_columns must be a list of str\")\n os.chdir(cwd)\n func._abs_filename = str(Path(func.__code__.co_filename).absolute())\n return func\n\n return inner\n\n\ndef run_exports(gen_additional_jobs=None, handle_ppg=True, settings='ovca'):\n if settings == 'ovca':\n apply_ovca_settings()\n else:\n raise ValueError(\"unknow setting value\", settings)\n\n old = Path(os.getcwd()).absolute()\n os.chdir(\"/project\")\n if handle_ppg:\n ppg.new_pipegraph()\n # os.chdir(old)\n to_wide_columns = {}\n jobs = []\n for cls in exporting_classes:\n instance = cls()\n if hasattr(instance, \"exports\"):\n instance.exports()\n \n out_prefix = getattr(instance, \"out_prefix\", \"\")\n for method_name in dir(instance):\n method = getattr(instance, method_name)\n if hasattr(method, \"_output_name\"):\n print(cls.__name__, method.__name__)\n output_filename = (\n \"/project/processed/\" + out_prefix + method._output_name + \".units\"\n )\n cwd = str(Path(method._abs_filename).parent)\n\n def write(output_filename=output_filename, method=method, cwd=cwd):\n os.chdir(cwd)\n df = method()\n os.chdir(\"/project\")\n check_dataframe(out_prefix + method._output_name, df)\n Path(output_filename).parent.mkdir(exist_ok=True, parents=True)\n if \"unit\" in df:\n for ii, (unit, sub_df) in enumerate(\n df.groupby(\"unit\", sort=True)\n ):\n try:\n sub_df.to_parquet(\n output_filename[: output_filename.rfind(\".\")]\n + \".\"\n + str(ii)\n + \".parquet\"\n )\n except:\n sub_df.to_pickle(\"debug.pickle\")\n raise\n\n Path(output_filename).write_text(\n json.dumps(sorted(df.unit.unique()))\n )\n else:\n df.to_parquet(\n output_filename[: output_filename.rfind(\".\")] + \".0.parquet\"\n )\n Path(output_filename).write_text(json.dumps([\"nounit\"]))\n Path(output_filename + \".desc\").write_text(method._description)\n\n job = ppg.MultiFileGeneratingJob(\n [output_filename, output_filename + \".desc\"], write\n )\n job.depends_on(\n ppg.FunctionInvariant(output_filename + \"_inner_func\", method)\n )\n if method._input_files:\n job.depends_on(ppg.MultiFileInvariant(method._input_files))\n if method._deps:\n if hasattr(method._deps, \"__call__\"):\n deps = method._deps(method.__self__)\n else:\n deps = method._deps\n job.depends_on(deps)\n\n print(output_filename)\n print(\"\")\n os.chdir(\"/project\")\n jobs.append(job)\n to_wide_columns[out_prefix + method._output_name] = method._wide_columns\n\n def dump_to_wide_columns(output_filename):\n Path(output_filename).write_text(json.dumps(to_wide_columns))\n\n jobs.append(\n ppg.FileGeneratingJob(\n \"/project/processed/_to_wide_columns.json\", dump_to_wide_columns\n ).depends_on(\n ppg.ParameterInvariant(\n \"/project/processed/_to_wide_columns.json\",\n ppg.util.freeze(to_wide_columns),\n )\n )\n )\n\n old = Path(os.getcwd()).absolute()\n if handle_ppg:\n os.chdir(\"/project\")\n ppg.run_pipegraph()\n os.chdir(old)\n return jobs\n\n\ndef PseudoNotebookRun(notebook_python_file, target_object, chdir=False):\n notebook_python_file = str(notebook_python_file)\n inv = ppg.FileInvariant(notebook_python_file)\n\n def run():\n import marburg_biobank.create\n\n source = Path(notebook_python_file).read_text()\n collector = {}\n\n def write_dfs(d):\n res = {}\n for k, v in d.items():\n if isinstance(v, tuple):\n collector[k] = v[0] # throw away description\n else:\n collector[k] = v\n return res\n\n def get_dummy_ipython():\n class DummyIpython:\n def run_line_magic(self, *args, **kwargs):\n pass\n\n return DummyIpython()\n\n marburg_biobank.create.write_dfs = write_dfs\n g = globals().copy()\n g[\"get_ipython\"] = get_dummy_ipython\n g['here'] = Path(notebook_python_file).parent.absolute()\n ppg.util.global_pipegraph = None\n if chdir:\n os.chdir(Path(notebook_python_file).parent)\n exec(source, g)\n os.chdir(\"/project\")\n return collector\n\n return ppg.CachedAttributeLoadingJob(\n notebook_python_file + \".result\", target_object, \"data\", run\n ).depends_on(inv)\n", "id": "6734781", "language": "Python", "matching_score": 3.440810441970825, "max_stars_count": 0, "path": "src/marburg_biobank/create.py" }, { "content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2012, <NAME> <<EMAIL>>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport os\nimport sys\nimport pytest\nimport pypipegraph as ppg\nfrom pathlib import Path\nfrom .shared import write, assertRaises, read, Dummy, append\n\nglobal_test = 0\n\n\[email protected](\"new_pipegraph\")\nclass TestJobs:\n def test_assert_singletonicity_of_jobs(self, new_pipegraph):\n new_pipegraph.new_pipegraph()\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write():\n write(of, data_to_write)\n\n job = ppg.FileGeneratingJob(of, do_write)\n job2 = ppg.FileGeneratingJob(of, do_write)\n assert job is job2\n\n def test_add_job_twice_is_harmless(self, new_pipegraph):\n job = ppg.FileGeneratingJob(\"A\", lambda: 5)\n assert job.job_id in ppg.util.global_pipegraph.jobs\n assert ppg.util.global_pipegraph.jobs[job.job_id] is job\n ppg.util.global_pipegraph.add_job(job)\n assert job.job_id in ppg.util.global_pipegraph.jobs\n assert ppg.util.global_pipegraph.jobs[job.job_id] is job\n\n def test_redifining_a_jobid_with_different_class_raises(self, new_pipegraph):\n new_pipegraph.new_pipegraph()\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write():\n write(of, data_to_write)\n\n ppg.FileGeneratingJob(of, do_write)\n\n def load():\n return \"shu\"\n\n def inner():\n ppg.DataLoadingJob(of, load)\n\n assertRaises(ValueError, inner)\n\n def test_addition(self, new_pipegraph):\n def write_func(of):\n def do_write():\n write(of, \"do_write done\")\n\n return of, do_write\n\n new_pipegraph.new_pipegraph()\n jobA = ppg.FileGeneratingJob(*write_func(\"out/a\"))\n jobB = ppg.FileGeneratingJob(*write_func(\"out/b\"))\n jobC = ppg.FileGeneratingJob(*write_func(\"out/c\"))\n jobD = ppg.FileGeneratingJob(*write_func(\"out/d\"))\n\n aAndB = jobA + jobB\n assert len(aAndB) == 2\n assert jobA in aAndB\n assert jobB in aAndB\n\n aAndBandC = aAndB + jobC\n assert jobA in aAndBandC\n assert jobB in aAndBandC\n assert jobC in aAndBandC\n\n aAndBAndD = jobD + aAndB\n assert jobA in aAndBAndD\n assert jobB in aAndBAndD\n assert jobD in aAndBAndD\n\n cAndD = jobC + jobD\n all = aAndB + cAndD\n assert len(all) == 4\n assert jobA in all\n assert jobB in all\n assert jobC in all\n assert jobD in all\n\n def test_raises_on_non_str_job_id(self):\n def inner():\n ppg.FileGeneratingJob(1234, lambda: None)\n\n assertRaises(TypeError, inner)\n\n def test_equality_is_identity(self, new_pipegraph):\n def write_func(of):\n def do_write():\n write(of, \"do_write done\")\n\n return of, do_write\n\n new_pipegraph.new_pipegraph()\n jobA = ppg.FileGeneratingJob(*write_func(\"out/a\"))\n jobA1 = ppg.FileGeneratingJob(*write_func(\"out/a\"))\n jobB = ppg.FileGeneratingJob(*write_func(\"out/b\"))\n assert jobA is jobA1\n assert jobA == jobA1\n assert not (jobA == jobB)\n\n def test_has_hash(self, new_pipegraph):\n new_pipegraph.new_pipegraph()\n jobA = ppg.FileGeneratingJob(\"out/\", lambda: None)\n assert hasattr(jobA, \"__hash__\")\n\n\[email protected](\"new_pipegraph\")\nclass TestJobs2:\n def test_ignore_code_changes_is_ignored(self):\n jobA = ppg.Job(\"shu\")\n jobA.ignore_code_changes()\n\n def test_load_raises(self):\n jobA = ppg.Job(\"shu\")\n\n def inner():\n jobA.load()\n\n assertRaises(ValueError, inner)\n\n def test_is_in_dependency_chain_direct(self):\n jobA = ppg.Job(\"A\")\n jobB = ppg.Job(\"B\")\n jobA.depends_on(jobB)\n assert jobA.is_in_dependency_chain(jobB, 100)\n\n def test_is_in_dependency_chain_direct2(self):\n jobA = ppg.Job(\"A\")\n jobs = []\n for x in range(0, 10):\n j = ppg.Job(str(x))\n if jobs:\n j.depends_on(jobs[-1])\n jobs.append(j)\n jobA.depends_on(jobs[-1])\n assert jobA.is_in_dependency_chain(jobs[0], 100)\n assert jobA.is_in_dependency_chain(jobs[0], 10)\n # max_depth reached -> answer with false\n assert not (jobA.is_in_dependency_chain(jobs[0], 5))\n\n def test_str(self):\n a = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"hello\"))\n assert isinstance(str(a), str)\n\n a = ppg.ParameterInvariant(\"out/A\", \"hello\")\n assert isinstance(str(a), str)\n\n a = ppg.JobGeneratingJob(\"out/Ax\", lambda: \"hello\")\n assert isinstance(str(a), str)\n\n\[email protected](\"new_pipegraph\")\nclass TestFileGeneratingJob:\n def test_basic(self):\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write():\n print(\"do_write was called\")\n write(of, data_to_write)\n\n job = ppg.FileGeneratingJob(of, do_write)\n job.ignore_code_changes()\n ppg.run_pipegraph()\n assert not (job.failed)\n assert os.path.exists(of)\n op = open(of, \"r\")\n data = op.read()\n op.close()\n assert data == data_to_write\n assert job.was_run\n\n def test_cores_needed(self):\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write():\n print(\"do_write was called\")\n write(of, data_to_write)\n\n job = ppg.FileGeneratingJob(of, do_write)\n job.ignore_code_changes()\n with pytest.raises(ValueError):\n job.cores_needed = 0\n with pytest.raises(ValueError):\n job.cores_needed = \"5\"\n with pytest.raises(ValueError):\n job.cores_needed = -3\n job.cores_needed = 1\n job.cores_needed = 5\n job.cores_needed = -1\n job.cores_needed = -2\n\n for i in range(10):\n j = ppg.FileGeneratingJob(\n \"out/%i\" % i, lambda i=i: write(\"out/%i\" % i, \"b\")\n )\n if i % 2 == 0:\n j.cores_needed = -2\n\n ppg.util.global_pipegraph.rc.cores_available = 5\n ppg.run_pipegraph()\n assert not (job.failed)\n assert os.path.exists(of)\n op = open(of, \"r\")\n data = op.read()\n op.close()\n assert data == data_to_write\n assert job.was_run\n\n def test_cores_needed2(self):\n # this is meant to trigger the\n # \"this job needed to much resources, or was not runnable\"\n # case of graph.start_jobs\n\n for i in range(20):\n j = ppg.FileGeneratingJob(\n \"out/%i\" % i, lambda i=i: write(\"out/%i\" % i, \"b\")\n )\n if i % 2 == 0:\n j.cores_needed = 2\n\n ppg.util.global_pipegraph.rc.cores_available = 3\n ppg.run_pipegraph()\n\n def test_needing_more_cores_then_available_raises(self):\n j = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"A\"))\n j.cores_needed = 50\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert j.exception is None\n assert j.error_reason == \"Needed to much memory/cores\"\n\n def test_basic_with_parameter(self):\n data_to_write = \"hello\"\n\n def do_write(filename):\n print(\"do_write was called\")\n write(filename, data_to_write)\n\n job = ppg.FileGeneratingJob(\"out/a\", do_write)\n job.ignore_code_changes()\n ppg.run_pipegraph()\n assert not (job.failed)\n assert os.path.exists(\"out/a\")\n op = open(\"out/a\", \"r\")\n data = op.read()\n op.close()\n assert data == data_to_write\n assert job.was_run\n\n def test_simple_filegeneration_with_function_dependency(self):\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write():\n print(\"do_write was called\")\n write(of, data_to_write)\n\n job = ppg.FileGeneratingJob(of, do_write)\n # job.ignore_code_changes() this would be the magic line to remove the function dependency\n ppg.run_pipegraph()\n assert not (job.failed)\n assert os.path.exists(of)\n op = open(of, \"r\")\n data = op.read()\n op.close()\n assert data == data_to_write\n\n def test_filejob_raises_if_no_data_is_written(self):\n of = \"out/a\"\n\n def do_write():\n write(\"out/A\", \"\")\n\n job = ppg.FileGeneratingJob(of, do_write)\n\n def inner():\n ppg.run_pipegraph()\n\n assertRaises(ppg.RuntimeError, inner)\n assert job.failed\n assert isinstance(job.exception, ppg.JobContractError)\n assert not (os.path.exists(of))\n\n def test_filejob_empty_allowed(self):\n ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"\"), empty_ok=True)\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"\"\n\n def test_filejob_raising_exception_bubbles(self):\n def do():\n raise ValueError(\"Hello Exception\")\n\n job = ppg.FileGeneratingJob(\"out/A\", do)\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"Hello Exception\" in str(job.exception)\n\n def test_simple_filegeneration_removes_file_on_exception(self):\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write():\n write(of, data_to_write)\n raise ValueError(\"shu\")\n\n job = ppg.FileGeneratingJob(of, do_write)\n try:\n ppg.run_pipegraph()\n raise ValueError(\"should have raised RuntimeError\")\n except ppg.RuntimeError:\n pass\n assert job.failed\n assert not (os.path.exists(of))\n assert isinstance(job.exception, ValueError)\n\n def test_simple_filegeneration_renames_file_on_exception(self):\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write():\n write(of, data_to_write)\n raise ValueError(\"shu\")\n\n job = ppg.FileGeneratingJob(of, do_write, rename_broken=True)\n try:\n ppg.run_pipegraph()\n raise ValueError(\"should have raised RuntimeError\")\n except ppg.RuntimeError:\n pass\n assert job.failed\n assert not (os.path.exists(of))\n assert os.path.exists(of + \".broken\")\n assert isinstance(job.exception, ValueError)\n\n def test_simple_filegeneration_captures_stdout_stderr(self):\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write():\n op = open(of, \"w\")\n op.write(data_to_write)\n op.close()\n print(\"stdout is cool\")\n sys.stderr.write(\"I am stderr\")\n\n job = ppg.FileGeneratingJob(of, do_write)\n ppg.run_pipegraph()\n assert not (job.failed)\n assert os.path.exists(of)\n op = open(of, \"r\")\n data = op.read()\n op.close()\n assert data == data_to_write\n assert job.stdout == \"stdout is cool\\n\"\n assert job.stderr == \"I am stderr\" # no \\n here\n\n def test_filegeneration_does_not_change_mcp(self):\n global global_test\n global_test = 1\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write():\n write(of, data_to_write)\n global global_test\n global_test = 2\n\n ppg.FileGeneratingJob(of, do_write)\n ppg.run_pipegraph()\n assert global_test == 1\n\n def test_file_generation_chaining_simple(self):\n ofA = \"out/a\"\n\n def writeA():\n write(ofA, \"Hello\")\n\n jobA = ppg.FileGeneratingJob(ofA, writeA)\n ofB = \"out/b\"\n\n def writeB():\n op = open(ofB, \"w\")\n ip = open(ofA, \"r\")\n op.write(ip.read()[::-1])\n op.close()\n ip.close()\n\n jobB = ppg.FileGeneratingJob(ofB, writeB)\n jobB.depends_on(jobA)\n ppg.run_pipegraph()\n assert read(ofA) == read(ofB)[::-1]\n\n def test_file_generation_multicore(self):\n # one fork per FileGeneratingJob...\n ofA = \"out/a\"\n\n def writeA():\n write(ofA, \"%i\" % os.getpid())\n\n ofB = \"out/b\"\n\n def writeB():\n write(ofB, \"%i\" % os.getpid())\n\n ppg.FileGeneratingJob(ofA, writeA)\n ppg.FileGeneratingJob(ofB, writeB)\n ppg.run_pipegraph()\n assert read(ofA) != read(ofB)\n\n def test_filegenerating_two_jobs_same_file(self):\n ppg.MultiFileGeneratingJob([\"out/A\", \"out/B\"], lambda: write(\"out/A\", \"hello\"))\n with pytest.raises(ValueError):\n ppg.MultiFileGeneratingJob(\n [\"out/A\", \"out/C\"], lambda: write(\"out/A\", \"world\")\n )\n ppg.FileGeneratingJob(\"out/C\", lambda: write(\"out/C\", \"C\"))\n with pytest.raises(ValueError):\n ppg.MultiFileGeneratingJob([\"out/C\"], lambda: write(\"out/A\", \"world\"))\n ppg.MultiFileGeneratingJob([\"out/D\", \"out/E\"], lambda: write(\"out/A\", \"world\"))\n with pytest.raises(ValueError):\n ppg.FileGeneratingJob(\"out/D\", lambda: write(\"out/C\", \"C\"))\n\n def test_invaliding_removes_file(self, new_pipegraph):\n of = \"out/a\"\n sentinel = \"out/b\"\n\n def do_write():\n if os.path.exists(sentinel):\n raise ValueError(\"second run\")\n write(of, \"shu\")\n write(sentinel, \"done\")\n\n job = ppg.FileGeneratingJob(of, do_write, empty_ok=True)\n dep = ppg.ParameterInvariant(\"my_params\", (1,))\n job.depends_on(dep)\n ppg.run_pipegraph()\n assert os.path.exists(of)\n assert os.path.exists(sentinel)\n\n new_pipegraph.new_pipegraph()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.ParameterInvariant(\n \"my_params\", (2,)\n ) # same name ,changed params, job needs to rerun, but explodes...\n job.depends_on(dep) # on average, half the mistakes are in the tests...\n try:\n ppg.run_pipegraph()\n raise ValueError(\"Should not have been reached\")\n except ppg.RuntimeError:\n pass\n assert not (os.path.exists(of))\n\n def test_passing_non_function(self):\n def inner():\n ppg.FileGeneratingJob(\"out/a\", \"shu\")\n\n assertRaises(ValueError, inner)\n\n def test_passing_non_string_as_jobid(self):\n def inner():\n ppg.FileGeneratingJob(5, lambda: 1)\n\n assertRaises(TypeError, inner)\n\n def test_exceptions_are_preserved(self):\n def shu():\n write(\"out/A\", \"A\")\n write(\"out/Ay\", \"ax\")\n raise IndexError(\"twenty-five\") # just some exception\n\n jobA = ppg.FileGeneratingJob(\"out/A\", shu)\n\n def inner():\n ppg.run_pipegraph()\n\n assertRaises(ppg.RuntimeError, inner)\n print(jobA.exception)\n assert isinstance(jobA.exception, IndexError)\n assert not (\n os.path.exists(\"out/A\")\n ) # should clobber the resulting files in this case - just a double check to test_invaliding_removes_file\n assert read(\"out/Ay\") == \"ax\" # but the job did run, right?\n\n def test_exceptions_are_preserved2(self):\n def shu():\n write(\"out/A\", \"A\")\n write(\"out/Ay\", \"ax\")\n raise TypeError(\"twenty-five\") # just some exception\n\n jobA = ppg.FileGeneratingJob(\"out/A\", shu)\n\n def inner():\n ppg.run_pipegraph()\n\n assertRaises(ppg.RuntimeError, inner)\n print(jobA.exception)\n assert isinstance(jobA.exception, TypeError)\n assert \"twenty-five\" in str(jobA.exception)\n assert not (\n os.path.exists(\"out/A\")\n ) # should clobber the resulting files in this case - just a double check to test_invaliding_removes_file\n assert read(\"out/Ay\") == \"ax\" # but the job did run, right?\n\n def test_dumping_graph(self):\n ppg.new_pipegraph(\n quiet=True, invariant_status_filename=\"shu.dat\", dump_graph=True\n )\n ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"A\"))\n ppg.run_pipegraph()\n pid = ppg.util.global_pipegraph.dump_pid\n os.waitpid(pid, 0)\n print(os.listdir(\"logs\"))\n assert os.path.exists(\"logs/ppg_graph.gml\")\n\n def test_rename_broken(self):\n def do(of):\n write(of, \"hello\")\n raise ValueError()\n\n ppg.FileGeneratingJob(\"out/A\", do, rename_broken=True)\n\n def do():\n write(\"out/B\", \"world\")\n raise ValueError()\n\n ppg.FileGeneratingJob(\"out/B\", do, rename_broken=True)\n\n def do():\n raise ValueError()\n\n ppg.FileGeneratingJob(\"out/C\", do, rename_broken=True)\n\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert read(\"out/A.broken\") == \"hello\"\n assert read(\"out/B.broken\") == \"world\"\n assert not os.path.exists(\"out/A\")\n assert not os.path.exists(\"out/B\")\n assert not os.path.exists(\"out/C\")\n assert not os.path.exists(\"out/C.broken\")\n\n\[email protected](\"new_pipegraph\")\nclass TestMultiFileGeneratingJob:\n def test_basic(self, new_pipegraph):\n of = [\"out/a\", \"out/b\"]\n\n def do_write():\n for f in of:\n append(f, \"shu\")\n\n ppg.MultiFileGeneratingJob(of, do_write)\n ppg.run_pipegraph()\n for f in of:\n assert read(f) == \"shu\"\n new_pipegraph.new_pipegraph()\n ppg.MultiFileGeneratingJob(of, do_write)\n ppg.run_pipegraph()\n for f in of:\n assert read(f) == \"shu\" # ie. job has net been rerun...\n # but if I now delete one...\n os.unlink(of[0])\n new_pipegraph.new_pipegraph()\n ppg.MultiFileGeneratingJob(of, do_write)\n ppg.run_pipegraph()\n assert read(of[0]) == \"shu\"\n assert (\n read(of[1]) == \"shu\"\n ) # Since that file was also deleted when MultiFileGeneratingJob was invalidated...\n\n def test_empty_raises(self):\n of = [\"out/a\", \"out/b\"]\n\n def do_write():\n for f in of:\n append(f, \"shu\")\n write(f, \"\") # one is empty\n\n ppg.MultiFileGeneratingJob(of, do_write)\n with pytest.raises(ValueError):\n ppg.run_pipegraph()\n\n def test_empty_ok(self):\n of = [\"out/a\", \"out/b\"]\n\n def do_write():\n for f in of:\n append(f, \"shu\")\n write(f, \"\") # one is empty\n\n ppg.MultiFileGeneratingJob(of, do_write, empty_ok=True)\n ppg.run_pipegraph()\n for f in of[:-1]:\n assert read(f) == \"shu\"\n assert read(of[-1]) == \"\"\n\n def test_empty_dict(self, new_pipegraph):\n of = [\"out/a\", \"out/b\"]\n\n def do_write():\n for f in of:\n append(f, \"shu\")\n write(of[-1], \"\") # last one is empty\n\n ppg.MultiFileGeneratingJob(of, do_write, empty_ok={of[0]: False, of[1]: True})\n ppg.run_pipegraph()\n for f in of[:-1]:\n assert read(f) == \"shu\"\n assert read(of[-1]) == \"\"\n\n new_pipegraph.new_pipegraph()\n of = [\"out/a1\", \"out/b2\"]\n j = ppg.MultiFileGeneratingJob(\n of, do_write, empty_ok={of[0]: False, of[1]: False}\n )\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert isinstance(j.exception, ppg.JobContractError)\n\n new_pipegraph.new_pipegraph()\n with pytest.raises(ValueError):\n j = ppg.MultiFileGeneratingJob(\n of, do_write, empty_ok={of[0] + \"a\": False, of[1]: False}\n )\n with pytest.raises(ValueError):\n j = ppg.MultiFileGeneratingJob(\n of, do_write, empty_ok={of[0]: False, of[1]: False}\n )\n with pytest.raises(ValueError):\n j = ppg.MultiFileGeneratingJob(\n of, do_write, empty_ok={of[0]: False, of[1]: False, \"nosuchfile\": True}\n )\n\n def test_exception_destroys_all_files(self):\n of = [\"out/a\", \"out/b\"]\n\n def do_write():\n for f in of:\n append(f, \"shu\")\n raise ValueError(\"explode\")\n\n ppg.MultiFileGeneratingJob(of, do_write)\n try:\n ppg.run_pipegraph()\n raise ValueError(\"should not be reached\")\n except ppg.RuntimeError:\n pass\n for f in of:\n assert not (os.path.exists(f))\n\n def test_exception_destroys_renames_files(self):\n of = [\"out/a\", \"out/b\"]\n\n def do_write():\n for f in of:\n append(f, \"shu\")\n raise ValueError(\"explode\")\n\n ppg.MultiFileGeneratingJob(of, do_write, rename_broken=True)\n try:\n ppg.run_pipegraph()\n raise ValueError(\"should not be reached\")\n except ppg.RuntimeError:\n pass\n for f in of:\n assert os.path.exists(f + \".broken\")\n\n def test_invalidation_removes_all_files(self, new_pipegraph):\n of = [\"out/a\", \"out/b\"]\n sentinel = \"out/sentinel\" # hack so this one does something different the second time around...\n\n def do_write():\n if os.path.exists(sentinel):\n raise ValueError(\"explode\")\n write(sentinel, \"shu\")\n for f in of:\n append(f, \"shu\")\n\n ppg.MultiFileGeneratingJob(of, do_write).depends_on(\n ppg.ParameterInvariant(\"myparam\", (1,))\n )\n ppg.run_pipegraph()\n for f in of:\n assert os.path.exists(f)\n new_pipegraph.new_pipegraph()\n ppg.MultiFileGeneratingJob(of, do_write).depends_on(\n ppg.ParameterInvariant(\"myparam\", (2,))\n )\n try:\n ppg.run_pipegraph() # since this should blow up\n raise ValueError(\"should not be reached\")\n except ppg.RuntimeError:\n pass\n for f in of:\n assert not (os.path.exists(f))\n\n def test_passing_not_a_list_of_str(self):\n def inner():\n ppg.MultiFileGeneratingJob(\"out/a\", lambda: 1)\n\n assertRaises(ValueError, inner)\n\n def test_passing_non_function(self):\n def inner():\n ppg.MultiFileGeneratingJob([\"out/a\"], \"shu\")\n\n assertRaises(ValueError, inner)\n\n def test_exceptions_are_preserved(self):\n def shu():\n write(\"out/A\", \"A\")\n write(\"out/B\", \"B\")\n write(\"out/Az\", \"ax\")\n raise IndexError(\"twenty-five\") # just some exception\n\n jobA = ppg.MultiFileGeneratingJob([\"out/A\", \"out/B\"], shu)\n\n def inner():\n ppg.run_pipegraph()\n\n assertRaises(ppg.RuntimeError, inner)\n assert isinstance(jobA.exception, IndexError)\n assert not (\n os.path.exists(\"out/A\")\n ) # should clobber the resulting files in this case - just a double check to test_invaliding_removes_file\n assert not (\n os.path.exists(\"out/B\")\n ) # should clobber the resulting files in this case - just a double check to test_invaliding_removes_file\n assert read(\"out/Az\") == \"ax\" # but the job did run, right?\n\n def raises_on_non_string_filnames(self):\n def inner():\n ppg.MultiFileGeneratingJob([\"one\", 2], lambda: write(\"out/A\"))\n\n assertRaises(ValueError, inner)\n\n def test_raises_on_collision(self):\n def inner():\n ppg.MultiFileGeneratingJob([\"test1\", \"test2\"], lambda: 5)\n ppg.MultiFileGeneratingJob([\"test2\", \"test3\"], lambda: 5)\n\n assertRaises(ValueError, inner)\n\n def test_duplicate_prevention(self):\n param = \"A\"\n ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", param))\n\n def inner():\n ppg.MultiFileGeneratingJob([\"out/A\"], lambda: write(\"out/A\", param))\n\n assertRaises(ValueError, inner)\n\n def test_non_str(self):\n param = \"A\"\n\n def inner():\n ppg.MultiFileGeneratingJob([25], lambda: write(\"out/A\", param))\n\n assertRaises(TypeError, inner)\n\n def test_non_iterable(self):\n param = \"A\"\n try:\n ppg.MultiFileGeneratingJob(25, lambda: write(\"out/A\", param))\n assert not (\"Exception not raised\")\n except TypeError as e:\n print(e)\n assert \"filenames was not iterable\" in str(e)\n\n def test_single_stre(self):\n param = \"A\"\n\n def inner():\n ppg.MultiFileGeneratingJob(\"A\", lambda: write(\"out/A\", param))\n\n assertRaises(ValueError, inner)\n\n def test_can_run_now_case_1(self):\n class HonestIsDoneJob(ppg.FileGeneratingJob):\n def is_done(self, depth=0):\n # we have to force it, otherwise\n # the caching of is_done and stat calls\n # will prevent this case from happening\n return os.path.exists(self.job_id)\n\n ppg.util.global_pipegraph.rc.cores_available = 2\n\n def a():\n write(\"out/A\", \"A\")\n import time\n\n time.sleep(1)\n\n a = HonestIsDoneJob(\"out/A\", a)\n b = ppg.FileGeneratingJob(\"out/B\", lambda: write(\"out/B\", \"B\"))\n c = ppg.FileGeneratingJob(\"out/C\", lambda: write(\"out/C\", \"C\"))\n a.ignore_code_changes()\n b.ignore_code_changes()\n c.ignore_code_changes()\n c.depends_on(a)\n c.depends_on(b)\n ppg.run_pipegraph()\n assert read(\"out/C\") == \"C\"\n\n def test_closure_check_fails(self):\n o1 = \"out/s.txt\"\n o2 = \"out/st.txt\"\n\n def do_align():\n job_dir = Path(\"out\")\n for x in [\"s.txt\", \"st.txt\"]:\n with (job_dir / x).open(\"w\") as op:\n op.write(\"ok\")\n\n def do_align2():\n for x in [\"s.txt\", \"st.txt\"]:\n with open(f\"out/{x}\", \"w\") as op:\n op.write(\"ok\")\n\n def inner():\n ppg.MultiFileGeneratingJob([o1, o2], do_align)\n ppg.MultiFileGeneratingJob([o1, o2], do_align2)\n ppg.run_pipegraph()\n\n assertRaises(ValueError, inner)\n\n def test_closure_check_works(self):\n o1 = \"out/s.txt\"\n o2 = \"out/st.txt\"\n\n def do_align():\n job_dir = Path(\"out\")\n for x in [\"s.txt\", \"st.txt\"]:\n with (job_dir / x).open(\"w\") as op:\n op.write(\"ok\")\n\n ppg.MultiFileGeneratingJob([o1, o2], do_align)\n ppg.MultiFileGeneratingJob([o1, o2], do_align)\n ppg.run_pipegraph()\n assert os.path.exists(o1)\n assert os.path.exists(o2)\n\n\ntest_modifies_shared_global = []\nshared_value = \"\"\n\n\[email protected](\"new_pipegraph\")\nclass TestDataLoadingJob:\n def test_modifies_worker(self):\n # global shared\n # shared = \"I was the the global in the mcp\"\n def load():\n test_modifies_shared_global.append(\"shared data\")\n\n of = \"out/a\"\n\n def do_write():\n write(\n of, \"\\n\".join(test_modifies_shared_global)\n ) # this might actually be a problem when defining this?\n\n dlJo = ppg.DataLoadingJob(\"myjob\", load)\n writejob = ppg.FileGeneratingJob(of, do_write)\n writejob.depends_on(dlJo)\n\n writejob2 = ppg.FileGeneratingJob(\n \"out/b\",\n lambda: write(\"out/b\", \"b\" + \"\\n\".join(test_modifies_shared_global)),\n )\n writejob2.depends_on(dlJo)\n ppg.run_pipegraph()\n assert read(of) == \"shared data\"\n assert read(\"out/b\") == \"bshared data\"\n\n def test_global_statement_works(self):\n # this currently does not work in the cloudpickle transmitted jobs -\n # two jobs refereing to global have different globals afterwards\n # or the 'global shared' does not work as expected after loading\n global shared_value\n shared_value = \"I was the the global in the mcp\"\n\n def load():\n global shared_value\n shared_value = \"shared data\"\n\n of = \"out/a\"\n\n def do_write():\n write(of, shared_value)\n\n dlJo = ppg.DataLoadingJob(\"myjob\", load)\n writejob = ppg.FileGeneratingJob(of, do_write)\n writejob.depends_on(dlJo)\n ppg.run_pipegraph()\n assert read(of) == \"shared data\"\n\n def test_does_not_get_run_without_dep_job(self):\n of = \"out/shu\"\n\n def load():\n write(\n of, \"shu\"\n ) # not the fine english way, but we need a sideeffect that's checkable\n\n ppg.DataLoadingJob(\"myjob\", load)\n ppg.run_pipegraph()\n assert not (os.path.exists(of))\n\n def test_does_not_get_run_in_chain_without_final_dep(self):\n of = \"out/shu\"\n\n def load():\n write(\n of, \"shu\"\n ) # not the fine english way, but we need a sideeffect that's checkable\n\n job = ppg.DataLoadingJob(\"myjob\", load)\n ofB = \"out/sha\"\n\n def loadB():\n write(ofB, \"sha\")\n\n ppg.DataLoadingJob(\"myjobB\", loadB).depends_on(job)\n ppg.run_pipegraph()\n assert not (os.path.exists(of))\n assert not (os.path.exists(ofB))\n\n def test_does_get_run_in_chain_all(self):\n of = \"out/shu\"\n\n def load():\n write(\n of, \"shu\"\n ) # not the fine english way, but we need a sideeffect that's checkable\n\n job = ppg.DataLoadingJob(\"myjob\", load)\n ofB = \"out/sha\"\n\n def loadB():\n write(ofB, \"sha\")\n\n jobB = ppg.DataLoadingJob(\"myjobB\", loadB).depends_on(job)\n ofC = \"out/c\"\n\n def do_write():\n write(ofC, ofC)\n\n ppg.FileGeneratingJob(ofC, do_write).depends_on(jobB)\n ppg.run_pipegraph()\n assert os.path.exists(of)\n assert os.path.exists(ofB)\n assert os.path.exists(ofC)\n\n def test_chain_with_filegenerating_works(self):\n of = \"out/a\"\n\n def do_write():\n write(of, of)\n\n jobA = ppg.FileGeneratingJob(of, do_write)\n o = Dummy()\n\n def do_load():\n o.a = read(of)\n\n jobB = ppg.DataLoadingJob(\"loadme\", do_load).depends_on(jobA)\n ofB = \"out/b\"\n\n def write2():\n write(ofB, o.a)\n\n ppg.FileGeneratingJob(ofB, write2).depends_on(jobB)\n ppg.run_pipegraph()\n assert read(of) == of\n assert read(ofB) == of\n\n def test_does_get_run_depending_on_jobgenjob(self):\n of = \"out/shu\"\n\n def load():\n write(\n of, \"shu\"\n ) # not the fine english way, but we need a sideeffect that's checkable\n\n job = ppg.DataLoadingJob(\"myjob\", load)\n\n def gen():\n ofB = \"out/b\"\n\n def do_write():\n write(ofB, \"hello\")\n\n ppg.FileGeneratingJob(ofB, do_write)\n\n ppg.JobGeneratingJob(\"mygen\", gen).depends_on(job)\n ppg.run_pipegraph()\n assert os.path.exists(of) # so the data loading job was run\n assert read(\"out/b\") == \"hello\" # and so was the jobgen and filegen job.\n\n def test_passing_non_function(self):\n def inner():\n ppg.DataLoadingJob(\"out/a\", \"shu\")\n\n assertRaises(ValueError, inner)\n\n def test_passing_non_string_as_jobid(self):\n def inner():\n ppg.DataLoadingJob(5, lambda: 1)\n\n assertRaises(TypeError, inner)\n\n def test_failing_dataloading_jobs(self):\n o = Dummy()\n of = \"out/A\"\n\n def write():\n write(of, o.a)\n\n def load():\n o.a = \"shu\"\n raise ValueError()\n\n job_fg = ppg.FileGeneratingJob(of, write)\n job_dl = ppg.DataLoadingJob(\"doload\", load)\n job_fg.depends_on(job_dl)\n try:\n ppg.run_pipegraph()\n raise ValueError(\"should not be reached\")\n except ppg.RuntimeError:\n pass\n assert not (os.path.exists(of))\n assert job_dl.failed\n assert job_fg.failed\n assert isinstance(job_dl.exception, ValueError)\n\n def test_prev_dataloading_jobs_not_done_if_there_is_a_non_dataloading_job_inbetween_that_is_done(\n self,\n ):\n # so, A = DataLoadingJob, B = FileGeneratingJob, C = DataLoadingJob, D = FileGeneratingJob\n # D.depends_on(C)\n # C.depends_on(B)\n # B.depends_on(A)\n # B is done.\n # D is not\n # since a would be loaded, and then cleaned up right away (because B is Done)\n # it should never be loaded\n o = Dummy()\n\n def a():\n o.a = \"A\"\n append(\"out/A\", \"A\")\n\n def b():\n append(\"out/B\", \"B\")\n append(\"out/Bx\", \"B\")\n\n def c():\n o.c = \"C\"\n append(\"out/C\", \"C\")\n\n def d():\n append(\"out/D\", \"D\")\n append(\"out/Dx\", \"D\")\n\n jobA = ppg.DataLoadingJob(\"out/A\", a)\n jobB = ppg.FileGeneratingJob(\"out/B\", b)\n jobC = ppg.DataLoadingJob(\"out/C\", c)\n jobD = ppg.FileGeneratingJob(\"out/D\", d)\n jobD.depends_on(jobC)\n jobC.depends_on(jobB)\n jobB.depends_on(jobA)\n jobA.ignore_code_changes()\n jobB.ignore_code_changes()\n jobC.ignore_code_changes()\n jobD.ignore_code_changes()\n write(\"out/B\", \"already done\")\n assert not (os.path.exists(\"out/D\"))\n ppg.run_pipegraph()\n assert read(\"out/D\") == \"D\"\n assert read(\"out/Dx\") == \"D\"\n assert not (\n os.path.exists(\"out/A\")\n ) # A was not executed (as per the premise of the test)\n assert not (\n os.path.exists(\"out/Bx\")\n ) # so B was not executed (we removed the function invariants for this test)\n assert read(\"out/C\") == \"C\"\n\n def test_sending_a_non_pickable_exception_data_loading(self):\n class UnpickableException(Exception):\n def __getstate__(self):\n raise ValueError(\"Can't pickle me\")\n\n def load():\n raise UnpickableException()\n\n jobA = ppg.DataLoadingJob(\"out/A\", load)\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda: True)\n jobB.depends_on(jobA)\n\n def inner():\n ppg.run_pipegraph()\n\n assertRaises(ppg.RuntimeError, inner)\n assert isinstance(jobA.exception, str)\n\n def test_sending_a_non_pickable_exception_file_generating(self):\n class UnpickableException(Exception):\n def __getstate__(self):\n raise ValueError(\"Can't pickle me\")\n\n def load():\n raise UnpickableException()\n\n jobB = ppg.FileGeneratingJob(\"out/B\", load)\n\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert isinstance(jobB.exception, str)\n\n def test_creating_jobs_in_file_generating_are_ignored(self):\n def load():\n ppg.util.global_pipegraph.new_jobs = (\n {}\n ) # just to see if we can reach the check in the resource coordinator!\n c = ppg.FileGeneratingJob(\"out/C\", lambda: write(\"out/C\", \"C\"))\n write(\"out/A\", \"A\")\n return [c]\n\n ppg.FileGeneratingJob(\"out/A\", load)\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"A\"\n assert not os.path.exists(\"out/C\")\n\n def test_creating_jobs_in_data_loading(self):\n def load():\n ppg.FileGeneratingJob(\"out/C\", lambda: write(\"out/C\", \"C\"))\n\n a = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"A\"))\n b = ppg.DataLoadingJob(\"out/B\", load)\n a.depends_on(b)\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert isinstance(b.exception, ppg.JobContractError)\n assert (\n \"Trying to add new jobs to running pipeline without having new_jobs \"\n in str(b.exception)\n )\n\n def test_job_returning_value_without_modifying_jobgraph(self):\n class BrokenJob(ppg.FileGeneratingJob):\n def run(self):\n return 55\n\n a = BrokenJob(\"out/A\", lambda: None)\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert isinstance(a.exception, ppg.JobContractError)\n assert \"Job returned a value \" in str(a.exception)\n\n\[email protected](\"new_pipegraph\")\nclass TestAttributeJob:\n def test_basic_attribute_loading(self):\n o = Dummy()\n\n def load():\n return \"shu\"\n\n job = ppg.AttributeLoadingJob(\"load_dummy_shu\", o, \"a\", load)\n of = \"out/a\"\n\n def do_write():\n write(of, o.a)\n\n ppg.FileGeneratingJob(of, do_write).depends_on(job)\n ppg.run_pipegraph()\n assert read(of) == \"shu\"\n\n def test_chained(self):\n o = Dummy()\n\n def load():\n return \"shu\"\n\n def load2():\n return o.a + \"sha\"\n\n job = ppg.AttributeLoadingJob(\"load_dummy_shu\", o, \"a\", load)\n job2 = ppg.AttributeLoadingJob(\"load_dummy_shu2\", o, \"b\", load2)\n of = \"out/a\"\n\n def do_write():\n write(of, o.b)\n\n ppg.FileGeneratingJob(of, do_write).depends_on(job2)\n job2.depends_on(job)\n ppg.run_pipegraph()\n assert read(of) == \"shusha\"\n\n def test_attribute_loading_does_not_affect_mcp(self):\n o = Dummy()\n\n def load():\n return \"shu\"\n\n job = ppg.AttributeLoadingJob(\"load_dummy_shu\", o, \"a\", load)\n of = \"out/a\"\n\n def do_write():\n write(of, o.a)\n\n ppg.FileGeneratingJob(of, do_write).depends_on(job)\n ppg.run_pipegraph()\n assert read(of) == \"shu\"\n assert not (hasattr(o, \"a\"))\n\n def test_attribute_loading_does_not_run_withot_dependency(self):\n o = Dummy()\n tf = \"out/testfile\"\n\n def load():\n write(tf, \"hello\")\n return \"shu\"\n\n ppg.AttributeLoadingJob(\"load_dummy_shu\", o, \"a\", load)\n ppg.run_pipegraph()\n assert not (hasattr(o, \"a\"))\n assert not (os.path.exists(tf))\n\n def test_attribute_disappears_after_direct_dependency(self):\n o = Dummy()\n job = ppg.AttributeLoadingJob(\"load_dummy_shu\", o, \"a\", lambda: \"shu\")\n of = \"out/A\"\n\n def do_write():\n write(of, o.a)\n\n fgjob = ppg.FileGeneratingJob(of, do_write).depends_on(job)\n of2 = \"out/B\"\n\n def later_write():\n write(of2, o.a)\n\n ppg.FileGeneratingJob(of2, later_write).depends_on(fgjob)\n try:\n ppg.run_pipegraph()\n raise ValueError(\"should not be reached\")\n except ppg.RuntimeError:\n pass\n assert read(of) == \"shu\"\n assert not (os.path.exists(of2))\n\n def test_attribute_disappears_after_direct_dependencies(self):\n o = Dummy()\n job = ppg.AttributeLoadingJob(\"load_dummy_shu\", o, \"a\", lambda: \"shu\")\n of = \"out/A\"\n\n def do_write():\n write(of, o.a)\n\n fgjob = ppg.FileGeneratingJob(of, do_write).depends_on(job)\n of2 = \"out/B\"\n\n def later_write():\n write(of2, o.a)\n\n fgjobB = ppg.FileGeneratingJob(of2, later_write).depends_on(\n fgjob\n ) # now, this one does not depend on job, o it should not be able to access o.a\n of3 = \"out/C\"\n\n def also_write():\n write(of3, o.a)\n\n fgjobC = ppg.FileGeneratingJob(of3, also_write).depends_on(job)\n fgjobB.depends_on(\n fgjobC\n ) # otherwise, B might be started C returned, and the cleanup will not have occured!\n try:\n ppg.run_pipegraph()\n raise ValueError(\"should not be reached\")\n except ppg.RuntimeError:\n pass\n assert read(of) == \"shu\"\n assert read(of3) == \"shu\"\n assert not (os.path.exists(of2))\n\n def test_passing_non_string_as_attribute(self):\n o = Dummy()\n\n def inner():\n ppg.AttributeLoadingJob(\"out/a\", o, 5, 55)\n\n assertRaises(ValueError, inner)\n\n def test_passing_non_function(self):\n o = Dummy()\n\n def inner():\n ppg.AttributeLoadingJob(\"out/a\", o, \"a\", 55)\n\n assertRaises(ValueError, inner)\n\n def test_passing_non_string_as_jobid(self):\n o = Dummy()\n\n def inner():\n ppg.AttributeLoadingJob(5, o, \"a\", lambda: 55)\n\n assertRaises(TypeError, inner)\n\n def test_no_swapping_attributes_for_one_job(self):\n def cache():\n return list(range(0, 100))\n\n o = Dummy()\n ppg.AttributeLoadingJob(\"out/A\", o, \"a\", cache)\n\n def inner():\n ppg.AttributeLoadingJob(\"out/A\", o, \"b\", cache)\n\n assertRaises(ppg.JobContractError, inner)\n\n def test_raises_on_non_string_attribute_name(self):\n def inner():\n o = Dummy()\n ppg.AttributeLoadingJob(\"out/A\", o, 23, lambda: 5)\n\n assertRaises(ValueError, inner)\n\n def test_raises_on_non_function_callback(self):\n def inner():\n o = Dummy()\n ppg.AttributeLoadingJob(\"out/A\", o, 23, 55)\n\n assertRaises(ValueError, inner)\n\n def test_no_swapping_objects_for_one_job(self):\n def cache():\n return list(range(0, 100))\n\n o = Dummy()\n o2 = Dummy()\n ppg.CachedAttributeLoadingJob(\"out/A\", o, \"a\", cache)\n\n def inner():\n ppg.CachedAttributeLoadingJob(\"out/A\", o2, \"a\", cache)\n\n assertRaises(ppg.JobContractError, inner)\n\n def test_ignore_code_changes(self, new_pipegraph):\n def a():\n append(\"out/Aa\", \"A\")\n return \"5\"\n\n o = Dummy()\n jobA = ppg.AttributeLoadingJob(\"out/A\", o, \"a\", a)\n jobA.ignore_code_changes()\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda: write(\"out/B\", o.a))\n jobB.depends_on(jobA)\n ppg.run_pipegraph()\n assert read(\"out/Aa\") == \"A\"\n assert read(\"out/B\") == \"5\"\n new_pipegraph.new_pipegraph()\n\n def b():\n append(\"out/Aa\", \"B\")\n return \"5\"\n\n jobA = ppg.AttributeLoadingJob(\"out/A\", o, \"a\", b)\n jobA.ignore_code_changes()\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda: write(\"out/B\", o.a))\n jobB.depends_on(jobA)\n ppg.run_pipegraph()\n # not rerun\n assert read(\"out/Aa\") == \"A\"\n assert read(\"out/B\") == \"5\"\n\n def test_callback_must_be_callable(self):\n def inner():\n o = Dummy()\n ppg.AttributeLoadingJob(\"load_dummy_shu\", o, \"a\", \"shu\")\n\n assertRaises(ValueError, inner)\n\n\[email protected](\"new_pipegraph\")\nclass TestTempFileGeneratingJob:\n def test_basic(self):\n temp_file = \"out/temp\"\n\n def write_temp():\n write(temp_file, \"hello\")\n\n temp_job = ppg.TempFileGeneratingJob(temp_file, write_temp)\n assert temp_job.is_temp_job\n ofA = \"out/A\"\n\n def write_A():\n write(ofA, read(temp_file))\n\n fgjob = ppg.FileGeneratingJob(ofA, write_A)\n fgjob.depends_on(temp_job)\n ppg.run_pipegraph()\n assert read(ofA) == \"hello\"\n assert not (os.path.exists(temp_file))\n\n def test_does_not_get_return_if_output_is_done(self, new_pipegraph):\n temp_file = \"out/temp\"\n out_file = \"out/A\"\n count_file = \"out/count\"\n normal_count_file = \"out/countA\"\n\n def write_count():\n try:\n count = read(out_file)\n count = count[: count.find(\":\")]\n except IOError:\n count = \"0\"\n count = int(count) + 1\n write(out_file, str(count) + \":\" + read(temp_file))\n append(normal_count_file, \"A\")\n\n def write_temp():\n write(temp_file, \"temp\")\n append(count_file, \"X\")\n\n jobA = ppg.FileGeneratingJob(out_file, write_count)\n jobTemp = ppg.TempFileGeneratingJob(temp_file, write_temp)\n jobA.depends_on(jobTemp)\n ppg.run_pipegraph()\n assert not (os.path.exists(temp_file))\n assert read(out_file) == \"1:temp\"\n assert read(count_file) == \"X\"\n assert read(normal_count_file) == \"A\"\n # now, rerun. Tempfile has been deleted,\n # and should not be regenerated\n new_pipegraph.new_pipegraph()\n jobA = ppg.FileGeneratingJob(out_file, write_count)\n jobTemp = ppg.TempFileGeneratingJob(temp_file, write_temp)\n jobA.depends_on(jobTemp)\n ppg.run_pipegraph()\n assert not (os.path.exists(temp_file))\n assert read(out_file) == \"1:temp\"\n assert read(count_file) == \"X\"\n assert read(normal_count_file) == \"A\"\n\n def test_does_not_get_return_if_output_is_not(self, new_pipegraph):\n temp_file = \"out/temp\"\n out_file = \"out/A\"\n count_file = \"out/count\"\n normal_count_file = \"out/countA\"\n\n def write_count():\n try:\n count = read(out_file)\n count = count[: count.find(\":\")]\n except IOError:\n count = \"0\"\n count = int(count) + 1\n write(out_file, str(count) + \":\" + read(temp_file))\n append(normal_count_file, \"A\")\n\n def write_temp():\n write(temp_file, \"temp\")\n append(count_file, \"X\")\n\n jobA = ppg.FileGeneratingJob(out_file, write_count)\n jobTemp = ppg.TempFileGeneratingJob(temp_file, write_temp)\n jobA.depends_on(jobTemp)\n ppg.run_pipegraph()\n assert not (os.path.exists(temp_file))\n assert read(out_file) == \"1:temp\"\n assert read(count_file) == \"X\"\n assert read(normal_count_file) == \"A\"\n # now, rerun. Tempfile has been deleted,\n # and should be regenerated\n os.unlink(out_file)\n new_pipegraph.new_pipegraph()\n jobA = ppg.FileGeneratingJob(out_file, write_count)\n jobTemp = ppg.TempFileGeneratingJob(temp_file, write_temp)\n jobA.depends_on(jobTemp)\n ppg.run_pipegraph()\n assert not (os.path.exists(temp_file))\n assert read(out_file) == \"1:temp\" # since the outfile was removed...\n assert read(count_file) == \"XX\"\n assert read(normal_count_file) == \"AA\"\n\n def test_dependand_explodes(self, new_pipegraph):\n temp_file = \"out/temp\"\n\n def write_temp():\n append(temp_file, \"hello\")\n\n temp_job = ppg.TempFileGeneratingJob(temp_file, write_temp)\n ofA = \"out/A\"\n\n def write_A():\n raise ValueError(\"shu\")\n\n fgjob = ppg.FileGeneratingJob(ofA, write_A)\n fgjob.depends_on(temp_job)\n try:\n ppg.run_pipegraph()\n raise ValueError(\"should not be reached\")\n except ppg.RuntimeError:\n pass\n # ppg.run_pipegraph()\n assert not (os.path.exists(ofA))\n assert os.path.exists(temp_file)\n\n new_pipegraph.new_pipegraph()\n\n def write_A_ok():\n write(ofA, read(temp_file))\n\n temp_job = ppg.TempFileGeneratingJob(temp_file, write_temp)\n fgjob = ppg.FileGeneratingJob(ofA, write_A_ok)\n fgjob.depends_on(temp_job)\n ppg.run_pipegraph()\n\n assert read(ofA) == \"hello\" # tempfile job has not been rerun\n assert not (os.path.exists(temp_file)) # and the tempfile has been removed...\n\n def test_removes_tempfile_on_exception(self):\n temp_file = \"out/temp\"\n\n def write_temp():\n write(temp_file, \"hello\")\n raise ValueError(\"should\")\n\n temp_job = ppg.TempFileGeneratingJob(temp_file, write_temp)\n ofA = \"out/A\"\n\n def write_A():\n write(ofA, read(temp_file))\n\n fgjob = ppg.FileGeneratingJob(ofA, write_A)\n fgjob.depends_on(temp_job)\n try:\n ppg.run_pipegraph()\n raise ValueError(\"should not be reached\")\n except ppg.RuntimeError:\n pass\n assert not (os.path.exists(temp_file))\n assert not (os.path.exists(ofA))\n\n def test_renames_tempfile_on_exception_if_requested(self):\n temp_file = \"out/temp\"\n\n def write_temp():\n write(temp_file, \"hello\")\n raise ValueError(\"should\")\n\n temp_job = ppg.TempFileGeneratingJob(temp_file, write_temp, rename_broken=True)\n ofA = \"out/A\"\n\n def write_A():\n write(ofA, read(temp_file))\n\n fgjob = ppg.FileGeneratingJob(ofA, write_A)\n fgjob.depends_on(temp_job)\n try:\n ppg.run_pipegraph()\n raise ValueError(\"should not be reached\")\n except ppg.RuntimeError:\n pass\n assert not (os.path.exists(temp_file))\n assert os.path.exists(temp_file + \".broken\")\n assert not (os.path.exists(ofA))\n\n def test_passing_non_function(self):\n def inner():\n ppg.TempFileGeneratingJob(\"out/a\", \"shu\")\n\n assertRaises(ValueError, inner)\n\n def test_passing_non_string_as_jobid(self):\n def inner():\n ppg.TempFileGeneratingJob(5, lambda: 1)\n\n assertRaises(TypeError, inner)\n\n def test_rerun_because_of_new_dependency_does_not_rerun_old(self, new_pipegraph):\n jobA = ppg.FileGeneratingJob(\n \"out/A\", lambda: append(\"out/A\", read(\"out/temp\")) or append(\"out/Ab\", \"A\")\n )\n jobB = ppg.TempFileGeneratingJob(\"out/temp\", lambda: write(\"out/temp\", \"T\"))\n jobA.depends_on(jobB)\n ppg.run_pipegraph()\n assert not (os.path.exists(\"out/temp\"))\n assert read(\"out/A\") == \"T\"\n assert read(\"out/Ab\") == \"A\" # ran once\n\n new_pipegraph.new_pipegraph()\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda: append(\"out/A\", read(\"out/temp\")))\n jobB = ppg.TempFileGeneratingJob(\"out/temp\", lambda: write(\"out/temp\", \"T\"))\n jobA.depends_on(jobB)\n jobC = ppg.FileGeneratingJob(\"out/C\", lambda: append(\"out/C\", read(\"out/temp\")))\n jobC.depends_on(jobB)\n ppg.run_pipegraph()\n assert not (os.path.exists(\"out/temp\"))\n assert read(\"out/Ab\") == \"A\" # ran once, not rewritten\n assert read(\"out/C\") == \"T\" # a new file\n\n def test_chaining_multiple(self):\n jobA = ppg.TempFileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"A\"))\n jobB = ppg.TempFileGeneratingJob(\n \"out/B\", lambda: write(\"out/B\", read(\"out/A\") + \"B\")\n )\n jobC = ppg.TempFileGeneratingJob(\n \"out/C\", lambda: write(\"out/C\", read(\"out/A\") + \"C\")\n )\n jobD = ppg.FileGeneratingJob(\n \"out/D\", lambda: write(\"out/D\", read(\"out/B\") + read(\"out/C\"))\n )\n jobD.depends_on(jobC)\n jobD.depends_on(jobB)\n jobC.depends_on(jobA)\n jobB.depends_on(jobA)\n ppg.run_pipegraph()\n assert read(\"out/D\") == \"ABAC\"\n assert not (os.path.exists(\"out/A\"))\n assert not (os.path.exists(\"out/B\"))\n assert not (os.path.exists(\"out/C\"))\n\n def test_chaining_multiple_differently(self):\n jobA = ppg.TempFileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"A\"))\n jobB = ppg.TempFileGeneratingJob(\n \"out/B\", lambda: write(\"out/B\", read(\"out/A\") + \"B\")\n )\n jobD = ppg.FileGeneratingJob(\n \"out/D\", lambda: write(\"out/D\", read(\"out/B\") + \"D\")\n )\n jobE = ppg.FileGeneratingJob(\n \"out/E\", lambda: write(\"out/E\", read(\"out/B\") + \"E\")\n )\n jobF = ppg.FileGeneratingJob(\n \"out/F\", lambda: write(\"out/F\", read(\"out/A\") + \"F\")\n )\n jobD.depends_on(jobB)\n jobE.depends_on(jobB)\n jobB.depends_on(jobA)\n jobF.depends_on(jobA)\n ppg.run_pipegraph()\n assert read(\"out/D\") == \"ABD\"\n assert read(\"out/E\") == \"ABE\"\n assert read(\"out/F\") == \"AF\"\n assert not (os.path.exists(\"out/A\"))\n assert not (os.path.exists(\"out/B\"))\n assert not (os.path.exists(\"out/C\"))\n\n def test_rerun_because_of_new_dependency_does_not_rerun_old_chained(\n self, new_pipegraph\n ):\n jobA = ppg.TempFileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"A\"))\n jobB = ppg.TempFileGeneratingJob(\n \"out/B\", lambda: write(\"out/B\", read(\"out/A\") + \"B\")\n )\n jobC = ppg.FileGeneratingJob(\n \"out/C\",\n lambda: write(\"out/C\", read(\"out/B\") + \"C\") or append(\"out/Cx\", \"1\"),\n )\n jobB.depends_on(jobA)\n jobC.depends_on(jobB)\n ppg.run_pipegraph()\n assert read(\"out/C\") == \"ABC\"\n assert read(\"out/Cx\") == \"1\"\n\n new_pipegraph.new_pipegraph()\n jobA = ppg.TempFileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"A\"))\n jobB = ppg.TempFileGeneratingJob(\n \"out/B\", lambda: write(\"out/B\", read(\"out/A\") + \"B\")\n )\n jobC = ppg.FileGeneratingJob(\n \"out/C\",\n lambda: write(\"out/C\", read(\"out/B\") + \"C\") or append(\"out/Cx\", \"1\"),\n )\n jobD = ppg.FileGeneratingJob(\n \"out/D\",\n lambda: write(\"out/D\", read(\"out/A\") + \"D\") or append(\"out/Dx\", \"1\"),\n )\n jobB.depends_on(jobA)\n jobC.depends_on(jobB)\n jobD.depends_on(jobA)\n ppg.run_pipegraph()\n assert read(\"out/D\") == \"AD\"\n assert read(\"out/Dx\") == \"1\"\n assert read(\"out/C\") == \"ABC\"\n assert read(\"out/Cx\") == \"1\"\n\n new_pipegraph.new_pipegraph()\n jobA = ppg.TempFileGeneratingJob(\n \"out/A\", lambda: write(\"out/A\", \"a\")\n ) # note changing function code!\n jobB = ppg.TempFileGeneratingJob(\n \"out/B\", lambda: write(\"out/B\", read(\"out/A\") + \"B\")\n )\n jobC = ppg.FileGeneratingJob(\n \"out/C\",\n lambda: write(\"out/C\", read(\"out/B\") + \"C\") or append(\"out/Cx\", \"1\"),\n )\n jobD = ppg.FileGeneratingJob(\n \"out/D\",\n lambda: write(\"out/D\", read(\"out/A\") + \"D\") or append(\"out/Dx\", \"1\"),\n )\n jobB.depends_on(jobA)\n jobC.depends_on(jobB)\n jobD.depends_on(jobA)\n ppg.run_pipegraph()\n assert read(\"out/D\") == \"aD\"\n assert read(\"out/Dx\") == \"11\" # both get rerun\n assert read(\"out/C\") == \"aBC\"\n assert read(\"out/Cx\") == \"11\"\n\n def test_cleanup_if_never_run(self, new_pipegraph):\n temp_file = \"out/temp\"\n\n def write_temp():\n write(temp_file, \"hello\")\n\n def write_a():\n write(\"A\", \"hello\")\n\n temp_job = ppg.TempFileGeneratingJob(temp_file, write_temp)\n temp_job.ignore_code_changes()\n jobA = ppg.FileGeneratingJob(\"A\", write_a)\n jobA.ignore_code_changes()\n write_a() # so the file is there!\n ppg.run_pipegraph()\n assert not (os.path.exists(\"out/temp\"))\n new_pipegraph.new_pipegraph()\n write_temp()\n assert os.path.exists(\"out/temp\")\n temp_job = ppg.TempFileGeneratingJob(temp_file, write_temp)\n temp_job.ignore_code_changes()\n temp_job.do_cleanup_if_was_never_run = True\n ppg.run_pipegraph()\n assert not (os.path.exists(\"out/temp\"))\n\n\[email protected](\"new_pipegraph\")\nclass TestMultiTempFileGeneratingJob:\n def test_basic(self):\n temp_files = [\"out/temp\", \"out/temp2\"]\n\n def write_temp():\n for temp_file in temp_files:\n write(temp_file, \"hello\")\n\n temp_job = ppg.MultiTempFileGeneratingJob(temp_files, write_temp)\n assert temp_job.is_temp_job\n ofA = \"out/A\"\n\n def write_A():\n write(ofA, read(temp_files[0]) + read(temp_files[1]))\n\n fgjob = ppg.FileGeneratingJob(ofA, write_A)\n fgjob.depends_on(temp_job)\n ppg.run_pipegraph()\n assert read(ofA) == \"hellohello\"\n assert not (os.path.exists(temp_files[0]))\n assert not (os.path.exists(temp_files[1]))\n\n def test_basic_dependes_were_done(self):\n temp_files = [\"out/temp\", \"out/temp2\"]\n\n def write_temp():\n write(\"temp_sentinel\", \"one\")\n for temp_file in temp_files:\n write(temp_file, \"hello\")\n\n temp_job = ppg.MultiTempFileGeneratingJob(temp_files, write_temp)\n temp_job.ignore_code_changes()\n assert temp_job.is_temp_job\n ofA = \"out/A\"\n\n def write_A():\n write(ofA, read(temp_files[0]) + read(temp_files[1]))\n\n fgjob = ppg.FileGeneratingJob(ofA, write_A)\n fgjob.ignore_code_changes()\n fgjob.depends_on(temp_job)\n write(ofA, \"two\")\n ppg.run_pipegraph()\n assert read(ofA) == \"two\"\n assert not os.path.exists(\"temp_sentinel\")\n\n def raises_on_non_string_filnames(self):\n def inner():\n ppg.MultiTempFileGeneratingJob([\"one\", 2], lambda: write(\"out/A\"))\n\n assertRaises(ValueError, inner)\n\n def test_raises_on_collision(self):\n def inner():\n ppg.MultiTempFileGeneratingJob([\"test1\", \"test2\"], lambda: 5)\n ppg.MultiTempFileGeneratingJob([\"test2\", \"test3\"], lambda: 5)\n\n assertRaises(ValueError, inner)\n\n def test_duplicate_prevention(self):\n param = \"A\"\n ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", param))\n\n def inner():\n ppg.MultiTempFileGeneratingJob([\"out/A\"], lambda: write(\"out/A\", param))\n\n assertRaises(ValueError, inner)\n\n def test_non_str(self):\n param = \"A\"\n\n def inner():\n ppg.MultiTempFileGeneratingJob([25], lambda: write(\"out/A\", param))\n\n assertRaises(TypeError, inner)\n\n def test_non_iterable(self):\n param = \"A\"\n try:\n ppg.MultiTempFileGeneratingJob(25, lambda: write(\"out/A\", param))\n assert not (\"Exception not raised\")\n except TypeError as e:\n print(e)\n assert \"filenames was not iterable\" in str(e)\n\n\[email protected](\"new_pipegraph\")\nclass TestTempFilePlusGeneratingJob:\n def test_basic(self):\n ppg.new_pipegraph(quiet=False, dump_graph=False)\n temp_file = \"out/temp\"\n keep_file = \"out/keep\"\n\n def write_temp():\n write(temp_file, \"hello\")\n write(keep_file, \"hello\")\n\n temp_job = ppg.TempFilePlusGeneratingJob(temp_file, keep_file, write_temp)\n assert temp_job.is_temp_job\n ofA = \"out/A\"\n\n def write_A():\n write(ofA, read(temp_file))\n\n fgjob = ppg.FileGeneratingJob(ofA, write_A)\n fgjob.depends_on(temp_job)\n ppg.run_pipegraph()\n assert read(ofA) == \"hello\"\n assert not (os.path.exists(temp_file))\n assert os.path.exists(keep_file)\n\n def test_not_run_if_dependands_all_done(self):\n ppg.new_pipegraph(quiet=False, dump_graph=False)\n temp_file = \"out/temp\"\n keep_file = \"out/keep\"\n\n def write_temp():\n write(temp_file, \"hello\")\n write(keep_file, \"hello\")\n\n temp_job = ppg.TempFilePlusGeneratingJob(temp_file, keep_file, write_temp)\n temp_job.ignore_code_changes()\n write(keep_file, \"keepme\")\n ofA = \"out/A\"\n\n def write_A():\n write(ofA, read(temp_file))\n\n fgjob = ppg.FileGeneratingJob(ofA, write_A)\n fgjob.ignore_code_changes()\n write(ofA, \"two\")\n fgjob.depends_on(temp_job)\n ppg.run_pipegraph()\n assert read(ofA) == \"two\"\n assert not (os.path.exists(temp_file))\n assert read(keep_file) == \"keepme\"\n\n def test_raises_on_keep_equal_temp_file(self):\n temp_file = \"out/temp\"\n keep_file = temp_file\n\n def write_temp():\n write(temp_file, \"hello\")\n write(keep_file, \"hello\")\n\n def inner():\n ppg.TempFilePlusGeneratingJob(temp_file, keep_file, write_temp)\n\n assertRaises(ValueError, inner)\n\n def test_does_not_get_return_if_output_is_done(self, new_pipegraph):\n temp_file = \"out/temp\"\n keep_file = \"out/keep\"\n out_file = \"out/A\"\n count_file = \"out/count\"\n normal_count_file = \"out/countA\"\n\n def write_count():\n try:\n count = read(out_file)\n count = count[: count.find(\":\")]\n except IOError:\n count = \"0\"\n count = int(count) + 1\n write(out_file, str(count) + \":\" + read(temp_file))\n append(normal_count_file, \"A\")\n\n def write_temp():\n write(temp_file, \"temp\")\n write(keep_file, \"temp\")\n append(count_file, \"X\")\n\n jobA = ppg.FileGeneratingJob(out_file, write_count)\n jobTemp = ppg.TempFilePlusGeneratingJob(temp_file, keep_file, write_temp)\n jobA.depends_on(jobTemp)\n ppg.run_pipegraph()\n assert not (os.path.exists(temp_file))\n assert read(out_file) == \"1:temp\"\n assert read(count_file) == \"X\"\n assert read(normal_count_file) == \"A\"\n assert os.path.exists(keep_file)\n # now, rerun. Tempfile has been deleted,\n # and should not be regenerated\n new_pipegraph.new_pipegraph()\n jobA = ppg.FileGeneratingJob(out_file, write_count)\n jobTemp = ppg.TempFilePlusGeneratingJob(temp_file, keep_file, write_temp)\n jobA.depends_on(jobTemp)\n ppg.run_pipegraph()\n assert not (os.path.exists(temp_file))\n assert read(out_file) == \"1:temp\"\n assert read(count_file) == \"X\"\n assert read(normal_count_file) == \"A\"\n assert os.path.exists(keep_file)\n\n def test_fails_if_keep_file_is_not_generated(self):\n temp_file = \"out/temp\"\n keep_file = \"out/keep\"\n out_file = \"out/A\"\n count_file = \"out/count\"\n normal_count_file = \"out/countA\"\n\n def write_count():\n try:\n count = read(out_file)\n count = count[: count.find(\":\")]\n except IOError:\n count = \"0\"\n count = int(count) + 1\n write(out_file, str(count) + \":\" + read(temp_file))\n append(normal_count_file, \"A\")\n\n def write_temp():\n write(temp_file, \"temp\")\n # write(keep_file, 'temp')\n append(count_file, \"X\")\n\n jobA = ppg.FileGeneratingJob(out_file, write_count)\n jobTemp = ppg.TempFilePlusGeneratingJob(temp_file, keep_file, write_temp)\n jobA.depends_on(jobTemp)\n\n def inner():\n ppg.run_pipegraph()\n\n assertRaises(ppg.RuntimeError, inner)\n assert not (os.path.exists(out_file))\n assert not (os.path.exists(keep_file))\n assert os.path.exists(temp_file)\n\n def test_does_get_rerun_if_keep_file_is_gone(self, new_pipegraph):\n temp_file = \"out/temp\"\n keep_file = \"out/keep\"\n out_file = \"out/A\"\n count_file = \"out/count\"\n normal_count_file = \"out/countA\"\n\n def write_count():\n try:\n count = read(out_file)\n count = count[: count.find(\":\")]\n except IOError:\n count = \"0\"\n count = int(count) + 1\n write(out_file, str(count) + \":\" + read(temp_file))\n append(normal_count_file, \"A\")\n\n def write_temp():\n write(temp_file, \"temp\")\n write(keep_file, \"temp\")\n append(count_file, \"X\")\n\n jobA = ppg.FileGeneratingJob(out_file, write_count)\n jobTemp = ppg.TempFilePlusGeneratingJob(temp_file, keep_file, write_temp)\n jobA.depends_on(jobTemp)\n ppg.run_pipegraph()\n assert not (os.path.exists(temp_file))\n assert read(out_file) == \"1:temp\"\n assert read(count_file) == \"X\"\n assert read(normal_count_file) == \"A\"\n assert os.path.exists(keep_file)\n os.unlink(keep_file)\n new_pipegraph.new_pipegraph()\n jobA = ppg.FileGeneratingJob(out_file, write_count)\n jobTemp = ppg.TempFilePlusGeneratingJob(temp_file, keep_file, write_temp)\n jobA.depends_on(jobTemp)\n ppg.run_pipegraph()\n assert read(out_file) == \"1:temp\"\n assert read(count_file) == \"XX\" # where we see the temp file job ran again\n assert read(normal_count_file) == \"AA\" # which is where we see it ran again...\n assert os.path.exists(keep_file)\n\n\[email protected](\"new_pipegraph\")\nclass TestFinalJobs:\n def test_correct_dependencies(self):\n o = Dummy()\n\n def a():\n o.a = \"A\"\n append(\"out/A\", \"A\")\n\n def b():\n append(\"out/B\", \"B\")\n append(\"out/Bx\", \"B\")\n\n def c():\n o.c = \"C\"\n append(\"out/C\", \"C\")\n\n def d():\n append(\"out/D\", \"D\")\n append(\"out/Dx\", \"D\")\n\n jobA = ppg.DataLoadingJob(\"out/A\", a)\n jobB = ppg.FileGeneratingJob(\"out/B\", b)\n jobC = ppg.DataLoadingJob(\"out/C\", c)\n jobD = ppg.FileGeneratingJob(\"out/D\", d)\n # jobD.depends_on(jobC)\n # jobC.depends_on(jobB)\n jobB.depends_on(jobA)\n final_job = ppg.FinalJob(\n \"da_final\",\n lambda: write(\n \"out/final\", \"done\" + read(\"out/A\") + read(\"out/B\") + read(\"out/D\")\n ),\n )\n ppg.util.global_pipegraph.connect_graph()\n print(final_job.prerequisites)\n for x in jobB, jobC, jobD:\n assert x in final_job.prerequisites\n assert final_job in x.dependants\n assert not (jobA in final_job.prerequisites)\n ppg.run_pipegraph()\n assert read(\"out/final\") == \"doneABD\"\n assert not os.path.exists(\n \"out/C\"\n ) # dataloading job does not get run just because of FinalJob\n\n def test_cannot_depend_on_final_job(self):\n jobA = ppg.FileGeneratingJob(\"A\", lambda: write(\"A\", \"A\"))\n final_job = ppg.FinalJob(\"da_final\", lambda: None)\n try:\n jobA.depends_on(final_job)\n assert not (\"Exception not raised\")\n except ppg.JobContractError as e:\n print(e)\n assert \"No jobs can depend on FinalJobs\" in str(e)\n\n def test_final_job_depends_on_raises(self):\n jobA = ppg.FileGeneratingJob(\"A\", lambda: write(\"A\", \"A\"))\n final_job = ppg.FinalJob(\"da_final\", lambda: None)\n try:\n final_job.depends_on(jobA)\n assert not (\"Exception not raised\")\n except ppg.JobContractError as e:\n print(e)\n assert \"Final jobs can not have explicit dependencies \" in str(e)\n\n\[email protected](\"new_pipegraph\")\nclass TestJobList:\n def raises_on_non_job(self):\n def inner():\n ppg.JobList(\"shu\")\n\n assertRaises(ValueError, inner)\n\n def test_raises_on_non_job_in_list(self):\n with pytest.raises(ValueError):\n ppg.JobList([ppg.ParameterInvariant(\"a\", 23), \"shu\"])\n\n def test_add(self):\n jobA = ppg.FileGeneratingJob(\"A\", lambda: write(\"A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"B\", lambda: write(\"A\", \"A\"))\n jobC = ppg.FileGeneratingJob(\"C\", lambda: write(\"C\", \"A\"))\n jobD = ppg.FileGeneratingJob(\"D\", lambda: write(\"D\", \"A\"))\n l1 = ppg.JobList([jobA])\n l2 = l1 + jobB\n assert len(l2) == 2\n l2 = l1 + jobB\n assert len(l2) == 2\n l3 = l2 + ppg.JobList(jobC)\n assert len(l3) == 3\n l4 = l3 + [jobD]\n assert len(l4) == 4\n\n def test_depends_on(self):\n jobA = ppg.FileGeneratingJob(\"A\", lambda: write(\"A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"B\", lambda: write(\"A\", \"A\"))\n jobC = ppg.FileGeneratingJob(\"C\", lambda: write(\"C\", \"A\"))\n l1 = ppg.JobList([jobA, jobB])\n l1.depends_on(jobC)\n assert jobC in jobA.prerequisites\n assert jobC in jobB.prerequisites\n ppg.util.global_pipegraph.connect_graph()\n assert jobA in jobC.dependants\n assert jobB in jobC.dependants\n\n def test_depends_on_list(self):\n jobA = ppg.FileGeneratingJob(\"A\", lambda: write(\"A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"B\", lambda: write(\"A\", \"A\"))\n jobC = ppg.FileGeneratingJob(\"C\", lambda: write(\"C\", \"A\"))\n l1 = ppg.JobList([jobC])\n l1.depends_on([jobA, jobB])\n assert jobA in jobC.prerequisites\n assert jobB in jobC.prerequisites\n ppg.util.global_pipegraph.connect_graph()\n assert jobC in jobA.dependants\n assert jobC in jobB.dependants\n\n def test_depends_on_args(self):\n jobA = ppg.FileGeneratingJob(\"A\", lambda: write(\"A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"B\", lambda: write(\"A\", \"A\"))\n jobC = ppg.FileGeneratingJob(\"C\", lambda: write(\"C\", \"A\"))\n l1 = ppg.JobList([jobC])\n l1.depends_on(jobA, jobB)\n assert jobA in jobC.prerequisites\n assert jobB in jobC.prerequisites\n ppg.util.global_pipegraph.connect_graph()\n assert jobC in jobA.dependants\n assert jobC in jobB.dependants\n\n def test_str(self):\n jobA = ppg.FileGeneratingJob(\"A\", lambda: write(\"A\", \"A\"))\n l1 = ppg.JobList([jobA])\n x = str(l1)\n assert x.startswith(\"JobList of \")\n\n def test_adding_two_jobs(self):\n jobA = ppg.FileGeneratingJob(\"A\", lambda: write(\"A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"B\", lambda: write(\"A\", \"A\"))\n x = jobA + jobB\n assert isinstance(x, ppg.JobList)\n assert len(x) == 2\n\n\[email protected](\"new_pipegraph\")\nclass TestNotebookJobs:\n def write_notebook(self, filename, payload):\n with open(filename, \"w\") as op:\n op.write(\n \"\"\"{\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"with open(\\\\\"out/A\\\\\", \\\\\"w\\\\\") as op:\\\\n\",\n \" op.write(\\\\\"%s\\\\\")\\\\n\",\n \"with open(\\\\\"out/tag\\\\\", \\\\\"a+\\\\\") as op:\\\\n\",\n \" op.write(\\\\\"x\\\\\")\\\\n\"\n ]\n }\n ],\n \"metadata\": {\n \"kernelspec\": {\n \"display_name\": \"Python 3\",\n \"language\": \"python\",\n \"name\": \"python3\"\n },\n \"language_info\": {\n \"codemirror_mode\": {\n \"name\": \"ipython\",\n \"version\": 3\n },\n \"file_extension\": \".py\",\n \"mimetype\": \"text/x-python\",\n \"name\": \"python\",\n \"nbconvert_exporter\": \"python\",\n \"pygments_lexer\": \"ipython3\",\n \"version\": \"3.6.6\"\n }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\"\"\"\n % (payload,)\n )\n\n def test_notebook(self, new_pipegraph):\n self.write_notebook(\"out/test.ipynb\", \"hello\")\n ppg.NotebookJob(\"out/test.ipynb\", False)\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"hello\"\n assert read(\"out/tag\") == \"x\"\n\n new_pipegraph.new_pipegraph()\n ppg.NotebookJob(\"out/test.ipynb\", False)\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"hello\"\n assert read(\"out/tag\") == \"x\" # no rerun\n\n new_pipegraph.new_pipegraph()\n self.write_notebook(\"out/test.ipynb\", \"world\")\n ppg.NotebookJob(\"out/test.ipynb\", False)\n ppg.run_pipegraph()\n assert read(\"out/tag\") == \"xx\" # rerun\n assert read(\"out/A\") == \"world\"\n\n new_pipegraph.new_pipegraph()\n ppg.NotebookJob(\"out/test.ipynb\", True)\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"world\"\n assert read(\"out/tag\") == \"xx\" # no rerun\n\n new_pipegraph.new_pipegraph()\n ppg.NotebookJob(\"out/test.ipynb\", True)\n ppg.MultiFileGeneratingJob([\"world\"], lambda: write(\"world\", \"two\"))\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"world\"\n assert read(\"out/tag\") == \"xxx\" # no rerun\n\n def test_func_returning_same_job(self):\n def get_job():\n def load():\n return self.id\n\n return ppg.MultiFileGeneratingJob([\"shu\"], load)\n\n a = get_job()\n b = get_job()\n assert a is b\n\n def test_func_returning_different(self):\n first = [True]\n\n def get_job():\n def load():\n return self.id\n\n def load2():\n return self.id\n\n ppg.MultiFileGeneratingJob([\"shu\"], load if first[0] else load2)\n first[0] = False\n\n get_job()\n with pytest.raises(ValueError):\n get_job()\n\n def test_use_cores(self):\n j = ppg.FileGeneratingJob(\"a\", lambda: None)\n assert j.cores_needed == 1\n assert j.use_cores(5) is j\n assert j.cores_needed == 5\n", "id": "9619236", "language": "Python", "matching_score": 9.758336067199707, "max_stars_count": 0, "path": "tests/test_jobs.py" }, { "content": "from pathlib import Path\nimport sys\nimport pytest\nimport pypipegraph2 as ppg\nfrom .shared import write, read, Dummy, append, counter\n\nglobal_test = 0\n\n\[email protected](\"ppg2_per_test\")\nclass TestJobs:\n def test_assert_singletonicity_of_jobs(self):\n ppg.new()\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write(of):\n write(of, data_to_write)\n\n job = ppg.FileGeneratingJob(of, do_write)\n job2 = ppg.FileGeneratingJob(of, do_write)\n assert job is job2 # change from ppg1\n\n def test_add_job_twice_is_harmless(self):\n job = ppg.FileGeneratingJob(\"A\", lambda of: 5)\n # implicit add\n assert job.job_id in ppg.global_pipegraph.jobs\n assert ppg.global_pipegraph.jobs[job.job_id] is job\n ppg.global_pipegraph.add(job)\n assert job.job_id in ppg.global_pipegraph.jobs\n assert ppg.global_pipegraph.jobs[job.job_id] is job\n\n def test_redefining_a_jobid_with_different_class_raises(self):\n ppg.new()\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write(of):\n write(of, data_to_write)\n\n ppg.FileGeneratingJob(of, do_write)\n\n def load():\n return \"shu\"\n\n with pytest.raises(ValueError):\n ppg.DataLoadingJob(of, load)\n\n def test_raises_on_non_str_job_id(self):\n with pytest.raises(TypeError):\n ppg.FileGeneratingJob(1234, lambda of: None)\n\n def test_auto_name(self):\n def inner():\n pass\n\n a = ppg.FunctionInvariant(inner)\n assert a.job_id == \"FITestJobs.test_auto_name.<locals>.inner\"\n with pytest.raises(TypeError):\n ppg.FunctionInvariant(lambda: 55)\n with pytest.raises(TypeError):\n ppg.FunctionInvariant(None)\n\n def test_equality_is_identity(self):\n def write_func(of):\n def do_write(of):\n write(of, \"do_write done\")\n\n return of, do_write\n\n ppg.new()\n jobA = ppg.FileGeneratingJob(*write_func(\"out/a\"))\n jobA1 = ppg.FileGeneratingJob(*write_func(\"out/a\"))\n jobB = ppg.FileGeneratingJob(*write_func(\"out/b\"))\n assert jobA is jobA1\n assert jobA == jobA1\n assert not (jobA == jobB)\n\n def test_has_hash(self):\n ppg.new()\n jobA = ppg.FileGeneratingJob(\"out/\", lambda of: None)\n assert hasattr(jobA, \"__hash__\")\n\n def test_repeated_job_definition_and_dependency_callbacks(self, ppg2_per_test):\n def func(of):\n of.write_text(\"a\")\n\n a = ppg.FileGeneratingJob(\"a\", func)\n a.depends_on(lambda: counter(\"ac\") and None)\n assert len(a.dependency_callbacks) == 1\n assert not Path(\"ac\").exists()\n a = ppg.FileGeneratingJob(\"a\", func)\n a.depends_on(lambda: counter(\"ac\") and counter(\"bc\") and None) # 2nd dependency\n assert len(a.dependency_callbacks) == 2\n assert not Path(\"ac\").exists()\n ppg.run()\n assert read(\"ac\") == \"2\"\n assert read(\"bc\") == \"1\"\n\n def test_dependency_callback_plus_job(self):\n data = []\n def load_a():\n data.append('a')\n a = ppg.DataLoadingJob('A', load_a)\n b = lambda: ppg.FileGeneratingJob('B', lambda of: of.write_text('b'))\n c = ppg.FileGeneratingJob('C', lambda of: \n of.write_text(Path('B').read_text() + data[0]))\n c.depends_on(b, a)\n ppg.run()\n\n def test_data_loading_MultiFile_dowstream(self, job_trace_log):\n def tf(ofs):\n counter(\"A\")\n ofs[0].write_text(\"a1\")\n ofs[1].write_text(\"a1\")\n\n a = ppg.MultiTempFileGeneratingJob([\"a1\", \"a2\"], tf)\n\n def write(ofs):\n counter(\"B\")\n ofs[0].write_text(\"b\")\n ofs[1].write_text(\"c\" + read(\"a1\"))\n\n bc = ppg.MultiFileGeneratingJob([\"b\", \"c\"], write)\n bc.depends_on(a)\n bc()\n assert read(\"c\") == \"ca1\"\n assert read(\"b\") == \"b\"\n assert read(\"A\") == \"1\"\n assert read(\"B\") == \"1\"\n ppg.run()\n assert read(\"c\") == \"ca1\"\n assert read(\"b\") == \"b\"\n assert read(\"B\") == \"1\"\n assert read(\"A\") == \"1\"\n\n\[email protected](\"ppg2_per_test\")\nclass TestJobs2:\n def test_str(self):\n a = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"hello\"))\n assert isinstance(str(a), str)\n\n a = ppg.ParameterInvariant(\"out/A\", \"hello\")\n assert isinstance(str(a), str)\n\n a = ppg.JobGeneratingJob(\"out/Ax\", lambda: \"hello\")\n assert isinstance(str(a), str)\n\n\[email protected](\"create_out_dir\")\[email protected](\"ppg2_per_test\")\nclass TestFileGeneratingJob:\n def test_basic(self):\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write(ofof):\n print(\"do_write was called\")\n write(of, data_to_write)\n\n job = ppg.FileGeneratingJob(of, do_write, depend_on_function=False)\n res = ppg.run()\n assert res[job.job_id].outcome == ppg.enums.JobOutcome.Success\n assert Path(of).exists()\n assert read(of) == data_to_write\n\n def test_cores_needed(self):\n ppg.new(cores=5)\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write(ofof):\n print(\"do_write was called\")\n write(of, data_to_write)\n\n job = ppg.FileGeneratingJob(of, do_write, depend_on_function=False)\n with pytest.raises(TypeError):\n job.use_resources(0)\n job.cores_needed = ppg.Resources.SingleCore\n job.cores_needed = ppg.Resources.AllCores\n job.cores_needed = ppg.Resources.Exclusive\n\n for i in range(10):\n j = ppg.FileGeneratingJob(\"out/%i\" % i, lambda of, i=i: write(of, f\"b{i}\"))\n if i % 2 == 0:\n j.cores_needed = ppg.Resources.Exclusive\n\n ppg.run()\n assert read(of) == data_to_write\n for i in range(10):\n assert read(f\"out/{i}\") == f\"b{i}\"\n\n def test_cores_needed2(self):\n ppg.new(cores=3)\n # this is meant to trigger the\n # \"this job needed to much resources, or was not runnable\"\n # case of graph.start_jobs\n\n for i in range(20):\n j = ppg.FileGeneratingJob(\"out/%i\" % i, lambda of, i=i: write(of, f\"b{i}\"))\n if i % 2 == 0:\n j.use_resources(ppg.Resources.AllCores)\n else:\n j.use_resources(ppg.Resources.Exclusive)\n\n ppg.run()\n for i in range(20):\n assert read(f\"out/{i}\") == f\"b{i}\"\n\n def test_misspecified_job_does_not_hang_graph(self):\n # this occurred during development\n ppg.new(cores=3)\n\n for i in range(1):\n j = ppg.FileGeneratingJob(\n \"out/%i\" % i,\n lambda i=i: write(\"out/%i\" % i, \"b\"),\n depend_on_function=False,\n )\n\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert \"a number is required, not PosixPath\" in str(\n ppg.global_pipegraph.last_run_result[\"out/0\"].error\n )\n\n def test_job_not_creating_its_file(self):\n # this occurred during development\n ppg.new(cores=3)\n\n for i in range(1):\n j = ppg.FileGeneratingJob(\n \"out/%i\" % i,\n lambda of, i=i: write(\"shu\", \"b\"),\n depend_on_function=False,\n )\n\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert \"did not create the following files: ['out/0']\" in str(\n ppg.global_pipegraph.last_run_result[\"out/0\"].error\n )\n\n def test_basic_with_parameter(self):\n data_to_write = \"hello\"\n\n def do_write(filename):\n print(\"do_write was called\")\n write(filename, data_to_write)\n\n job = ppg.FileGeneratingJob(\"out/a\", do_write, depend_on_function=False)\n ppg.run()\n assert Path(\"out/a\").exists()\n op = open(\"out/a\", \"r\")\n data = op.read()\n op.close()\n assert data == data_to_write\n\n def test_simple_filegeneration_with_function_dependency(self):\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write(ofof):\n print(\"do_write was called\")\n write(of, data_to_write)\n\n job = ppg.FileGeneratingJob(of, do_write)\n ppg.run()\n assert Path(of).exists()\n op = open(of, \"r\")\n data = op.read()\n op.close()\n assert data == data_to_write\n\n def test_filejob_raises_if_no_data_is_written(self):\n of = \"out/a\"\n\n def do_write(of):\n write(\"out/A\", \"\")\n\n job = ppg.FileGeneratingJob(of, do_write)\n\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n\n assert isinstance(\n ppg.global_pipegraph.last_run_result[job.job_id].error.args[0],\n ppg.JobContractError,\n )\n assert not (Path(of).exists())\n\n def test_filejob_empty_allowed(self):\n ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"\"), empty_ok=True)\n ppg.run()\n assert read(\"out/A\") == \"\"\n\n def test_filejob_raising_exception_bubbles(self):\n def do(of):\n raise ValueError(\"Hello Exception\")\n\n job = ppg.FileGeneratingJob(\"out/A\", do)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert \"Hello Exception\" in str(\n ppg.global_pipegraph.last_run_result[job.job_id].error\n )\n\n def test_simple_filegeneration_keeps_file_on_exception(self):\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write(of):\n write(of, data_to_write)\n raise ValueError(\"shu\")\n\n job = ppg.FileGeneratingJob(of, do_write)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert Path(\n of\n ).exists() # unlike ppg1, we know by the (lack of) output records that we need to redo this\n assert isinstance(\n ppg.global_pipegraph.last_run_result[of].error.args[0], ValueError\n )\n\n def test_filegenerating_ok_change_fail_ok(self):\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n of = \"A\"\n func1 = lambda of: counter(\"a\") and write(of, \"A\") # noqa: E731\n job = ppg.FileGeneratingJob(of, func1)\n ppg.run()\n assert read(\"a\") == \"1\"\n ppg.run()\n assert read(\"a\") == \"1\"\n assert read(\"A\") == \"A\"\n\n def do(of):\n counter(\"a\")\n write(of, \"B\")\n raise ValueError()\n\n job = ppg.FileGeneratingJob(of, do)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert read(\"a\") == \"2\"\n assert read(\"A\") == \"B\"\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert read(\"a\") == \"3\"\n assert read(\"A\") == \"B\"\n\n job = ppg.FileGeneratingJob(of, func1) # so we get the input we had previously!\n ppg.run()\n assert read(\"a\") == \"4\"\n assert read(\"A\") == \"A\"\n ppg.run()\n assert read(\"a\") == \"4\"\n assert read(\"A\") == \"A\"\n\n def test_simple_filegeneration_captures_stdout_stderr(self):\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write(of):\n op = open(of, \"w\")\n op.write(data_to_write)\n op.close()\n print(\"stdout is cool\")\n sys.stderr.write(\"I am stderr\")\n\n job = ppg.FileGeneratingJob(of, do_write)\n ppg.run()\n assert Path(of).exists()\n op = open(of, \"r\")\n data = op.read()\n op.close()\n assert data == data_to_write\n assert job.stdout == \"stdout is cool\\n\"\n assert job.stderr == \"I am stderr\" # no \\n here\n\n def test_simple_filegeneration_captures_stdout_stderr_of_spawned_processe(self):\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write(of):\n import subprocess\n\n subprocess.check_call(\"echo stdout is cool\", shell=True)\n subprocess.check_call(\"echo I am stderr>&2\", shell=True)\n of.write_text(\"hello\")\n\n job = ppg.FileGeneratingJob(of, do_write)\n ppg.run()\n assert Path(of).exists()\n op = open(of, \"r\")\n data = op.read()\n op.close()\n assert data == data_to_write\n assert job.stdout == \"stdout is cool\\n\"\n assert job.stderr == \"I am stderr\\n\" # no \\n here\n\n def test_simple_filegeneration_disabled_stdout_capture(self):\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write(of):\n write(of, data_to_write)\n print(\"stdout is cool\")\n sys.stderr.write(\"I am stderr\")\n\n job = ppg.FileGeneratingJob(of, do_write, always_capture_output=False)\n ppg.run()\n assert Path(of).exists()\n op = open(of, \"r\")\n data = op.read()\n op.close()\n assert data == data_to_write\n assert job.stdout == \"not captured\"\n assert job.stderr == \"not captured\" # no \\n here\n\n def test_simple_filegeneration_captures_stdout_stderr_giant_response(self, capsys):\n of = \"out/a\"\n\n def do_write(of):\n write(of, \"hello\")\n print(\"s\" * (16 * 1024 ** 2))\n sys.stderr.write(\"I\" * (256 * 1024 ** 2))\n\n job = ppg.FileGeneratingJob(of, do_write)\n with capsys.disabled():\n ppg.run()\n assert Path(of).exists()\n assert read(of) == \"hello\"\n assert job.stdout == \"s\" * (16 * 1024 ** 2) + \"\\n\"\n assert job.stderr == \"I\" * (256 * 1024 ** 2) # no \\n here\n\n def test_simple_filegeneration_captures_stdout_stderr_failure(self):\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write(of):\n op = open(of, \"w\")\n op.write(data_to_write)\n op.close()\n print(\"stdout is cool\")\n sys.stderr.write(\"I am stderr\")\n raise ValueError()\n\n job = ppg.FileGeneratingJob(of, do_write)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert Path(of).exists()\n op = open(of, \"r\")\n data = op.read()\n op.close()\n assert data == data_to_write\n assert job.stdout == \"stdout is cool\\n\"\n assert job.stderr == \"I am stderr\" # no \\n here\n\n def test_filegeneration_does_not_change_mcp(self):\n global global_test\n global_test = 1\n of = \"out/a\"\n data_to_write = \"hello\"\n\n def do_write(of):\n write(of, data_to_write)\n global global_test\n global_test = 2\n\n ppg.FileGeneratingJob(of, do_write)\n ppg.run()\n assert global_test == 1\n\n def test_file_generation_chaining_simple(self):\n ofA = \"out/a\"\n\n def writeA(of):\n write(ofA, \"Hello\")\n\n jobA = ppg.FileGeneratingJob(ofA, writeA)\n ofB = \"out/b\"\n\n def writeB(of):\n op = open(ofB, \"w\")\n ip = open(ofA, \"r\")\n op.write(ip.read()[::-1])\n op.close()\n ip.close()\n\n jobB = ppg.FileGeneratingJob(ofB, writeB)\n jobB.depends_on(jobA)\n ppg.run()\n assert read(ofA) == read(ofB)[::-1]\n\n def test_file_generation_multicore(self):\n # one fork per FileGeneratingJob...\n import os\n\n ofA = \"out/a\"\n\n def writeA(of):\n write(ofA, \"%i\" % os.getpid())\n\n ofB = \"out/b\"\n\n def writeB(of):\n write(ofB, \"%i\" % os.getpid())\n\n ppg.FileGeneratingJob(ofA, writeA)\n ppg.FileGeneratingJob(ofB, writeB)\n ppg.run()\n assert read(ofA) != read(ofB)\n\n def test_filegenerating_two_jobs_same_file(self):\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n ppg.MultiFileGeneratingJob(\n [\"out/A\", \"out/B\"], lambda of: write(\"out/A\", \"hello\")\n )\n with pytest.raises(ppg.JobOutputConflict):\n ppg.MultiFileGeneratingJob(\n [\"out/A\", \"out/C\"], lambda of: write(\"out/A\", \"world\")\n )\n ppg.MultiFileGeneratingJob(\n [\"out/C\"], lambda of: write(\"out/A\", \"world\")\n ) # that's ok, we are replacing out/C\n ppg.FileGeneratingJob(\n \"out/C\", lambda of: write(\"out/C\", \"C\")\n ) # that's ok, we are replacing out/C (at least in relaxed /notebook mode)\n with pytest.raises(ValueError): # just checking the inheritance\n ppg.MultiFileGeneratingJob(\n [\"out/C\", \"out/D\"], lambda of: write(\"out/A\", \"world\")\n ) # note that this one does not replace out/C\n ppg.MultiFileGeneratingJob(\n [\"out/D\", \"out/E\"], lambda of: write(\"out/A\", \"world\")\n )\n with pytest.raises(ppg.JobOutputConflict) as excinfo:\n ppg.FileGeneratingJob(\"out/D\", lambda of: write(\"out/C\", \"C\"))\n\n def test_multi_file_with_exing_files_rerun_to_capture_hashes(self):\n def callback(filenames):\n counter(\"counter\")\n for f in filenames:\n f.write_text(\"hello\")\n\n a = ppg.MultiFileGeneratingJob([\"a\", \"b\"], callback)\n Path(\"a\").write_text(\"shu\")\n Path(\"b\").write_text(\"shu\")\n ppg.run()\n assert read(\"a\") == \"hello\"\n assert read(\"b\") == \"hello\"\n assert read(\"counter\") == \"1\"\n ppg.run()\n assert read(\"counter\") == \"1\"\n\n def test_invaliding_removes_file(self):\n of = \"out/a\"\n sentinel = \"out/b\"\n\n def do_write(of):\n if Path(sentinel).exists():\n raise ValueError(\"second run\")\n write(of, \"shu\")\n write(sentinel, \"done\")\n\n job = ppg.FileGeneratingJob(of, do_write, empty_ok=True)\n dep = ppg.ParameterInvariant(\n \"my_params\",\n {\n 1,\n },\n )\n job.depends_on(dep)\n ppg.run()\n assert Path(of).exists()\n assert Path(sentinel).exists()\n\n ppg.new()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.ParameterInvariant(\n \"my_params\", (2,)\n ) # same name ,changed params, job needs to rerun, but explodes...\n job.depends_on(dep) # on average, half the mistakes are in the tests...\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert not (Path(of).exists())\n\n def test_passing_non_function(self):\n with pytest.raises(TypeError):\n ppg.FileGeneratingJob(\"out/a\", \"shu\")\n\n def test_passing_non_string_as_jobid(self):\n with pytest.raises(TypeError):\n ppg.FileGeneratingJob(5, lambda of: 1)\n\n def test_exceptions_are_preserved(self):\n def shu(of):\n write(\"out/A\", \"A\")\n write(\"out/Ay\", \"ax\")\n raise IndexError(\"twenty-five\") # just some exception\n\n jobA = ppg.FileGeneratingJob(\"out/A\", shu)\n\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n\n assert isinstance(\n ppg.global_pipegraph.last_run_result[jobA.job_id].error.args[0], IndexError\n )\n assert Path(\"out/A\").exists() #\n assert read(\"out/Ay\") == \"ax\" # but the job did run, right?\n\n def test_exceptions_are_preserved2(self):\n def shu(of):\n write(\"out/A\", \"A\")\n write(\"out/Ay\", \"ax\")\n raise TypeError(\"twenty-five\") # just some exception\n\n jobA = ppg.FileGeneratingJob(\"out/A\", shu)\n\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n\n print(ppg.global_pipegraph.last_run_result[jobA.job_id].error)\n assert isinstance(\n ppg.global_pipegraph.last_run_result[jobA.job_id].error.args[0], TypeError\n )\n assert \"twenty-five\" in str(\n ppg.global_pipegraph.last_run_result[jobA.job_id].error\n )\n assert Path(\n \"out/A\"\n ).exists() # should clobber the resulting files in this case - just a double check to test_invaliding_removes_file\n assert read(\"out/Ay\") == \"ax\" # but the job did run, right?\n\n\[email protected](\"create_out_dir\")\[email protected](\"ppg2_per_test\")\nclass TestMultiFileGeneratingJob:\n def test_basic(self):\n of = [\"out/a\", \"out/b\"]\n\n def do_write(of):\n for f in of:\n append(f, \"shu\")\n\n ppg.MultiFileGeneratingJob(of, do_write)\n ppg.run()\n for f in of:\n assert read(f) == \"shu\"\n ppg.new()\n ppg.MultiFileGeneratingJob(of, do_write)\n ppg.run()\n for f in of:\n assert read(f) == \"shu\" # ie. job has net been rerun...\n # but if I now delete one...\n Path(of[0]).unlink()\n ppg.new()\n ppg.MultiFileGeneratingJob(of, do_write)\n ppg.run()\n assert read(of[0]) == \"shu\"\n assert (\n read(of[1]) == \"shu\"\n ) # Since that file was also deleted when MultiFileGeneratingJob was invalidated...\n\n def test_empty_raises(self):\n of = [\"out/a\", \"out/b\"]\n\n def do_write(of):\n for f in of:\n append(f, \"shu\")\n write(f, \"\") # one is empty\n\n job = ppg.MultiFileGeneratingJob(of, do_write, empty_ok=False)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert isinstance(\n ppg.global_pipegraph.last_run_result[job.job_id].error.args[0],\n ppg.JobContractError,\n )\n\n def test_empty_ok(self):\n of = [\"out/a\", \"out/b\"]\n\n def do_write(of):\n for f in of:\n append(f, \"shu\")\n write(f, \"\") # one is empty\n\n ppg.MultiFileGeneratingJob(of, do_write, empty_ok=True)\n ppg.run()\n for f in of[:-1]:\n assert read(f) == \"shu\"\n assert read(of[-1]) == \"\"\n\n def test_exception_destroys_no_files(self):\n of = [\"out/a\", \"out/b\"]\n\n def do_write(of):\n for f in of:\n append(f, \"shu\")\n raise ValueError(\"explode\")\n\n ppg.MultiFileGeneratingJob(of, do_write)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n for f in of:\n assert Path(f).exists()\n\n def test_invalidation_removes_all_files(self):\n of = [\"out/a\", \"out/b\"]\n sentinel = \"out/sentinel\" # hack so this one does something different the second time around...\n\n def do_write(of):\n if Path(sentinel).exists():\n raise ValueError(\"explode\")\n write(sentinel, \"shu\")\n for f in of:\n append(f, \"shu\")\n\n ppg.MultiFileGeneratingJob(of, do_write).depends_on(\n ppg.ParameterInvariant(\"myparam\", (1,))\n )\n ppg.run()\n for f in of:\n assert Path(f).exists()\n ppg.new()\n ppg.MultiFileGeneratingJob(of, do_write).depends_on(\n ppg.ParameterInvariant(\"myparam\", (2,))\n )\n with pytest.raises(ppg.JobsFailed):\n ppg.run() # since this should blow up\n for f in of:\n assert not (Path(f).exists())\n\n def test_passing_not_a_list_of_str(self):\n with pytest.raises(TypeError):\n ppg.MultiFileGeneratingJob(\"out/a\", lambda of: 1)\n\n def test_passing_non_function(self):\n with pytest.raises(TypeError):\n ppg.MultiFileGeneratingJob([\"out/a\"], \"shu\")\n\n def test_exceptions_are_preserved(self):\n def shu(of):\n write(\"out/A\", \"A\")\n write(\"out/B\", \"B\")\n write(\"out/Az\", \"ax\")\n raise IndexError(\"twenty-five\") # just some exception\n\n jobA = ppg.MultiFileGeneratingJob([\"out/A\", \"out/B\"], shu)\n\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n\n assert isinstance(\n ppg.global_pipegraph.last_run_result[jobA.job_id].error.args[0], IndexError\n )\n assert Path(\"out/A\").exists() # ppg1 difference\n assert Path(\"out/B\").exists() # ppg1 difference\n assert read(\"out/Az\") == \"ax\" # but the job did run, right?\n\n def raises_on_non_string_filnames(self):\n with pytest.raises(ValueError):\n ppg.MultiFileGeneratingJob([\"one\", 2], lambda of: write(\"out/A\"))\n\n def test_raises_on_collision(self):\n with pytest.raises(ValueError):\n ppg.MultiFileGeneratingJob([\"test1\", \"test2\"], lambda of: 5)\n ppg.MultiFileGeneratingJob([\"test2\", \"test3\"], lambda of: 5)\n\n def test_duplicate_prevention(self):\n param = \"A\"\n ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", param))\n\n with pytest.raises(ValueError):\n ppg.MultiFileGeneratingJob([\"out/A\"], lambda of: write(\"out/A\", param))\n\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", param))\n ppg.MultiFileGeneratingJob([\"out/A\"], lambda of: write(\"out/A\", param))\n\n def test_non_str(self):\n param = \"A\"\n\n with pytest.raises(TypeError):\n ppg.MultiFileGeneratingJob([25], lambda of: write(\"out/A\", param))\n\n def test_non_iterable(self):\n param = \"A\"\n try:\n ppg.MultiFileGeneratingJob(25, lambda of: write(\"out/A\", param))\n assert not (\"Exception not raised\")\n except TypeError as e:\n print(e)\n assert \"files was not iterable\" in str(e)\n\n def test_single_stre(self):\n param = \"A\"\n\n with pytest.raises(TypeError):\n ppg.MultiFileGeneratingJob(\"A\", lambda of: write(\"out/A\", param))\n\n\ntest_modifies_shared_global = []\nshared_value = \"\"\n\n\[email protected](\"create_out_dir\")\[email protected](\"ppg2_per_test\")\nclass TestDataLoadingJob:\n def test_modifies_worker(self):\n # global shared\n # shared = \"I was the the global in the mcp\"\n def load():\n test_modifies_shared_global.append(\"shared data\")\n\n of = \"out/a\"\n\n def do_write(of):\n write(\n of, \"\\n\".join(test_modifies_shared_global)\n ) # this might actually be a problem when defining this?\n\n dlJo = ppg.DataLoadingJob(\"myjob\", load)\n writejob = ppg.FileGeneratingJob(of, do_write)\n writejob.depends_on(dlJo)\n\n writejob2 = ppg.FileGeneratingJob(\n \"out/b\",\n lambda of: write(\"out/b\", \"b\" + \"\\n\".join(test_modifies_shared_global)),\n )\n writejob2.depends_on(dlJo)\n ppg.run()\n assert read(of) == \"shared data\"\n assert read(\"out/b\") == \"bshared data\"\n\n def test_global_statement_works(self):\n # this currently does not work in the cloudpickle transmitted jobs -\n # two jobs refereing to global have different globals afterwards\n # or the 'global shared' does not work as expected after loading\n global shared_value\n shared_value = \"I was the the global in the mcp\"\n\n def load():\n global shared_value\n shared_value = \"shared data\"\n\n of = \"out/a\"\n\n def do_write(of):\n write(of, shared_value)\n\n dlJo = ppg.DataLoadingJob(\"myjob\", load)\n writejob = ppg.FileGeneratingJob(of, do_write)\n writejob.depends_on(dlJo)\n ppg.run()\n assert read(of) == \"shared data\"\n\n def test_does_not_get_run_without_dep_job(self):\n of = \"out/shu\"\n\n def load():\n counter(of)\n\n ppg.DataLoadingJob(\"myjob\", load)\n ppg.run()\n assert not Path(of).exists()\n ppg.run()\n assert not Path(of).exists()\n ppg.new()\n ppg.DataLoadingJob(\"myjob\", load)\n ppg.run()\n assert not Path(of).exists()\n\n def test_does_not_get_run_in_chain_without_final_dep(self):\n of = \"out/shu\"\n\n def load():\n counter(of)\n\n job = ppg.DataLoadingJob(\"myjob\", load)\n ofB = \"out/sha\"\n\n def loadB():\n counter(ofB)\n\n ppg.DataLoadingJob(\"myjobB\", loadB).depends_on(job)\n ppg.run()\n assert not Path(of).exists()\n assert not Path(ofB).exists()\n ppg.run()\n assert not Path(of).exists()\n assert not Path(ofB).exists()\n\n def test_does_get_run_in_chain_all(self):\n of = \"out/shu\"\n\n def load():\n write(\n of, \"shu\"\n ) # not the fine english way, but we need a sideeffect that's checkable\n\n job = ppg.DataLoadingJob(\"myjob\", load)\n ofB = \"out/sha\"\n\n def loadB():\n write(ofB, \"sha\")\n\n jobB = ppg.DataLoadingJob(\"myjobB\", loadB).depends_on(job)\n ofC = \"out/c\"\n\n def do_write(of):\n write(ofC, ofC)\n\n ppg.FileGeneratingJob(ofC, do_write).depends_on(jobB)\n ppg.run()\n assert Path(of).exists()\n assert Path(ofB).exists()\n assert Path(ofC).exists()\n\n def test_chain_with_filegenerating_works(self):\n of = \"out/a\"\n\n def do_write(of):\n write(of, str(of))\n\n jobA = ppg.FileGeneratingJob(of, do_write)\n o = Dummy()\n\n def do_load():\n o.a = read(of)\n\n jobB = ppg.DataLoadingJob(\"loadme\", do_load).depends_on(jobA)\n ofB = \"out/b\"\n\n def write2(of):\n write(ofB, o.a)\n\n ppg.FileGeneratingJob(ofB, write2).depends_on(jobB)\n ppg.run()\n assert read(of) == of\n assert read(ofB) == of\n\n def test_does_get_run_depending_on_jobgenjob(self):\n of = \"out/shu\"\n\n def load():\n write(\n of, \"shu\"\n ) # not the fine english way, but we need a sideeffect that's checkable\n\n job = ppg.DataLoadingJob(\"myjob\", load)\n\n def gen():\n ofB = \"out/b\"\n\n def do_write(of):\n write(ofB, \"hello\")\n\n ppg.FileGeneratingJob(ofB, do_write)\n\n ppg.JobGeneratingJob(\"mygen\", gen).depends_on(job)\n ppg.run()\n assert Path(of).exists() # so the data loading job was run\n assert read(\"out/b\") == \"hello\" # and so was the jobgen and filegen job.\n\n def test_passing_non_function(self):\n with pytest.raises(TypeError):\n ppg.DataLoadingJob(\"out/a\", \"shu\")\n\n def test_passing_non_string_as_jobid(self):\n with pytest.raises(TypeError):\n ppg.DataLoadingJob(5, lambda: 1)\n\n def test_failing_dataloading_jobs(self):\n o = Dummy()\n of = \"out/A\"\n\n def write(of):\n write(of, o.a)\n\n def load():\n o.a = \"shu\"\n raise ValueError()\n\n job_fg = ppg.FileGeneratingJob(of, write)\n job_dl = ppg.DataLoadingJob(\"doload\", load)\n job_fg.depends_on(job_dl)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert not (Path(of).exists())\n assert isinstance(\n ppg.global_pipegraph.last_run_result[job_dl.job_id].error.args[0],\n ValueError,\n )\n\n def test_prev_dataloading_jobs_not_done_if_there_is_a_non_dataloading_job_inbetween_that_is_done(\n self,\n ):\n # so, A = DataLoadingJob, B = FileGeneratingJob, C = DataLoadingJob, D = FileGeneratingJob\n # D.depends_on(C)\n # C.depends_on(B)\n # B.depends_on(A)\n # B is done.\n # D is not\n # since a would be loaded, and then cleaned up right away (because B is Done)\n # it should not be loaded again\n o = Dummy()\n\n def a():\n o.a = \"A\"\n append(\"out/A\", \"A\")\n\n def b(of):\n append(\"out/B\", \"B\")\n\n def c():\n o.c = \"C\"\n append(\"out/C\", \"C\")\n\n def d(of):\n append(\"out/D\", \"D\")\n\n jobA = ppg.DataLoadingJob(\"out/A\", a, depend_on_function=False)\n jobB = ppg.FileGeneratingJob(\"out/B\", b, depend_on_function=False)\n jobB.depends_on(jobA)\n ppg.run()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n\n jobC = ppg.DataLoadingJob(\"out/C\", c, depend_on_function=False)\n jobD = ppg.FileGeneratingJob(\"out/D\", d, depend_on_function=False)\n jobD.depends_on(jobC)\n jobC.depends_on(jobB)\n ppg.run()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"C\"\n assert read(\"out/D\") == \"D\"\n\n def test_sending_a_non_pickable_exception_data_loading(self):\n class UnpickableException(Exception):\n def __getstate__(self):\n raise ValueError(\"Can't pickle me\")\n\n def load():\n raise UnpickableException()\n\n jobA = ppg.DataLoadingJob(\"out/A\", load)\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda of: True)\n jobB.depends_on(jobA)\n\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n\n assert isinstance(\n ppg.global_pipegraph.last_run_result[jobA.job_id].error.args[0],\n UnpickableException,\n )\n\n def test_sending_a_non_pickable_exception_file_generating(self):\n class UnpickableException(Exception):\n def __getstate__(self):\n raise ValueError(\"Can't pickle me\")\n\n def load(of):\n raise UnpickableException()\n\n jobB = ppg.FileGeneratingJob(\"out/B\", load)\n\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert isinstance(\n ppg.global_pipegraph.last_run_result[jobB.job_id].error.args[0],\n ppg.exceptions.JobDied,\n )\n\n assert isinstance(\n ppg.global_pipegraph.last_run_result[jobB.job_id].error.args[0].args[0], str\n )\n\n assert (\n \"UnpickableException\"\n in ppg.global_pipegraph.last_run_result[jobB.job_id].error.args[0].args[0]\n )\n\n def test_creating_jobs_in_file_generating_are_ignored(self):\n def load(of):\n ppg.global_pipegraph.new_jobs = (\n {}\n ) # just to see if we can reach the check in the resource coordinator!\n c = ppg.FileGeneratingJob(\"out/C\", lambda of: write(\"out/C\", \"C\"))\n write(\"out/A\", \"A\")\n return [c]\n\n ppg.FileGeneratingJob(\"out/A\", load)\n ppg.run()\n assert read(\"out/A\") == \"A\"\n assert not Path(\"out/C\").exists()\n\n def test_creating_jobs_in_data_loading(self):\n def load():\n from loguru import logger # noqa:F401\n\n logger.info(\"in load\")\n ppg.FileGeneratingJob(\n \"out/C\", lambda of: write(\"out/C\", \"C\"), depend_on_function=False\n )\n\n a = ppg.FileGeneratingJob(\n \"out/A\", lambda of: write(\"out/A\", \"A\"), depend_on_function=False\n )\n b = ppg.DataLoadingJob(\"out/B\", load, depend_on_function=False)\n a.depends_on(b)\n ppg.run()\n assert Path(\"out/C\").exists()\n # it is ok to create jobs in dataloading .\n # ppg1 guarded against this, but it had to special case everything\n # around on-the-fly-jobs\n # the drawback here is that the DataLoadingJob might not run,\n # but perhaps that's just what you want.\n\n def test_accept_path_as_job_id(self):\n ppg.DataLoadingJob(Path(\"shu\"), lambda: 55)\n\n def test_job_gen_does_not_clobber_history_of_input_jobs(self):\n a = ppg.FileGeneratingJob(\"a\", lambda of: counter(\"A\") and of.write_text(\"a\"))\n b = ppg.JobGeneratingJob(\n \"b\",\n lambda: counter(\"B\")\n and ppg.FileGeneratingJob(\n \"c\", lambda of: counter(\"C\") and of.write_text(\"c\")\n ),\n )\n b.depends_on(a)\n ppg.run()\n assert Path(\"a\").read_text() == \"a\"\n assert Path(\"A\").read_text() == \"1\"\n assert Path(\"B\").read_text() == \"1\"\n assert Path(\"C\").read_text() == \"1\"\n ppg.run()\n assert Path(\"a\").read_text() == \"a\"\n assert Path(\"A\").read_text() == \"1\"\n assert Path(\"B\").read_text() == \"2\"\n assert Path(\"C\").read_text() == \"1\"\n\n def test_dict_return(self):\n collector = []\n\n def gen():\n collector.append(\"a\")\n return {\"hello\": 123, \"world\": \"world\"}\n\n a = ppg.DataLoadingJob(\"gen\", gen)\n b = ppg.FileGeneratingJob(\"b\", lambda of: of.write_text(collector[0]))\n b.depends_on(a)\n ppg.run()\n assert read(\"b\") == \"a\"\n\n\[email protected](\"create_out_dir\")\[email protected](\"ppg2_per_test\")\nclass TestAttributeJob:\n def test_basic_attribute_loading(self):\n o = Dummy()\n\n def load():\n return \"shu\"\n\n job = ppg.AttributeLoadingJob(\"load_dummy_shu\", o, \"a\", load)\n of = \"out/a\"\n\n def do_write(of):\n write(of, o.a)\n\n ppg.FileGeneratingJob(of, do_write).depends_on(job)\n ppg.run()\n assert read(of) == \"shu\"\n\n def test_chained(self):\n o = Dummy()\n\n def load():\n return \"shu\"\n\n def load2():\n return o.a + \"sha\"\n\n job = ppg.AttributeLoadingJob(\"load_dummy_shu\", o, \"a\", load)\n job2 = ppg.AttributeLoadingJob(\"load_dummy_shu2\", o, \"b\", load2)\n of = \"out/a\"\n\n def do_write(of):\n write(of, o.b)\n\n ppg.FileGeneratingJob(of, do_write).depends_on(job2)\n job2.depends_on(job)\n ppg.run()\n assert read(of) == \"shusha\"\n\n def test_attribute_loading_does_not_affect_mcp(self):\n o = Dummy()\n\n def load():\n return \"shu\"\n\n job = ppg.AttributeLoadingJob(\"load_dummy_shu\", o, \"a\", load)\n of = \"out/a\"\n\n def do_write(of):\n write(of, o.a)\n\n ppg.FileGeneratingJob(of, do_write).depends_on(job)\n ppg.run()\n assert read(of) == \"shu\"\n assert not (hasattr(o, \"a\"))\n\n def test_attribute_loading_does_not_run_without_dependency(self):\n o = Dummy()\n tf = \"out/testfile\"\n\n def load():\n counter(tf)\n return \"shu\"\n\n ppg.AttributeLoadingJob(\n \"load_dummy_shu\", o, \"a\", load, depend_on_function=False\n )\n ppg.run()\n assert not (hasattr(o, \"a\")) # never assigned\n assert not Path(\"tf\").exists()\n ppg.run()\n assert not Path(\"tf\").exists()\n assert not (hasattr(o, \"a\"))\n\n def test_attribute_loading_does_run_without_dependency_if_invalidated(self):\n o = Dummy()\n tf = \"out/testfile\"\n\n def load():\n write(tf, \"hello\")\n return \"shu\"\n\n ppg.AttributeLoadingJob(\"load_dummy_shu\", o, \"a\", load)\n ppg.run()\n assert not Path(tf).exists()\n assert not (hasattr(o, \"a\"))\n\n def test_attribute_disappears_after_direct_dependency(self):\n o = Dummy()\n job = ppg.AttributeLoadingJob(\"load_dummy_shu\", o, \"a\", lambda: \"shu\")\n of = \"out/A\"\n\n def do_write(of):\n write(of, o.a)\n\n fgjob = ppg.FileGeneratingJob(of, do_write).depends_on(job)\n of2 = \"out/B\"\n\n def later_write(of2):\n raise ValueError()\n write(of2, o.a)\n\n # might be pure luck that this job runs after the cleanup\n ppg.FileGeneratingJob(of2, later_write).depends_on(fgjob)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert read(of) == \"shu\"\n assert not (Path(of2).exists())\n\n def ppg1_test_attribute_disappears_after_direct_dependencies(self):\n \"\"\"I can't get tihs test to run in ppg2 - the cleanup does happen,\n but this can't show it. It is now a job, so\n it 's not a given that it runs before B or C\n (actually, I believe test_attribute_disappears_after_direct_dependency\n only works by sheer accident as well?)\n\n \"\"\"\n o = Dummy()\n job = ppg.AttributeLoadingJob(\"load_dummy_shu\", o, \"a\", lambda: \"shu\")\n of = \"out/A\"\n\n def do_write(of):\n write(of, o.a)\n\n fgjob = ppg.FileGeneratingJob(of, do_write).depends_on(job)\n of2 = \"out/B\"\n\n def later_write(of2):\n write(of2, o.a)\n\n fgjobB = ppg.FileGeneratingJob(of2, later_write).depends_on(\n fgjob\n ) # now, this one does not depend on job, o it should not be able to access o.a\n of3 = \"out/C\"\n\n def also_write(of3):\n write(of3, o.a)\n\n fgjobC = ppg.FileGeneratingJob(of3, also_write).depends_on(job)\n fgjobB.depends_on(\n fgjobC\n ) # otherwise, B might be started C returned, and the cleanup will not have occurred!\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n pass\n assert read(of) == \"shu\"\n assert read(of3) == \"shu\"\n assert not (Path(of2).exists())\n\n def test_passing_non_string_as_attribute(self):\n o = Dummy()\n\n with pytest.raises(ValueError):\n ppg.AttributeLoadingJob(\"out/a\", o, 5, 55)\n\n def test_passing_non_function(self):\n o = Dummy()\n\n with pytest.raises(TypeError):\n ppg.AttributeLoadingJob(\"out/a\", o, \"a\", 55)\n\n def test_passing_non_string_as_jobid(self):\n o = Dummy()\n\n with pytest.raises(TypeError):\n ppg.AttributeLoadingJob(5, o, \"a\", lambda: 55)\n\n def test_no_swapping_attributes_for_one_job(self):\n def cache():\n return list(range(0, 100))\n\n o = Dummy()\n ppg.AttributeLoadingJob(\"out/A\", o, \"a\", cache)\n\n with pytest.raises(ppg.JobRedefinitionError):\n ppg.AttributeLoadingJob(\"out/A\", o, \"b\", cache)\n\n def test_raises_on_non_string_attribute_name(self):\n with pytest.raises(ValueError):\n o = Dummy()\n ppg.AttributeLoadingJob(\"out/A\", o, 23, lambda: 5)\n\n def test_raises_on_non_function_callback(self):\n with pytest.raises(ValueError):\n o = Dummy()\n ppg.AttributeLoadingJob(\"out/A\", o, 23, 55)\n\n def test_no_swapping_objects_for_one_job(self):\n def cache():\n return list(range(0, 100))\n\n o = Dummy()\n o2 = Dummy()\n ppg.CachedAttributeLoadingJob(\"out/A\", o, \"a\", cache)\n\n with pytest.raises(ppg.JobRedefinitionError):\n ppg.CachedAttributeLoadingJob(\"out/A\", o2, \"a\", cache)\n\n def test_no_swapping_callbacks(self):\n\n o = Dummy()\n ppg.AttributeLoadingJob(\"out/A\", o, \"a\", lambda: 55, depend_on_function=False)\n\n with pytest.raises(ppg.JobRedefinitionError):\n ppg.AttributeLoadingJob(\n \"out/A\", o, \"a\", lambda: 55 + 1, depend_on_function=False\n )\n\n def test_no_swapping_callbacks_cached(self):\n\n o = Dummy()\n ppg.CachedAttributeLoadingJob(\n \"out/A\", o, \"a\", lambda: 55, depend_on_function=False\n )\n\n with pytest.raises(ppg.JobRedefinitionError):\n ppg.CachedAttributeLoadingJob(\n \"out/A\", o, \"a\", lambda: 55 + 1, depend_on_function=False\n )\n\n def test_ignore_code_changes(self):\n def a():\n append(\"out/Aa\", \"A\")\n return \"5\"\n\n o = Dummy()\n jobA = ppg.AttributeLoadingJob(\"out/A\", o, \"a\", a, depend_on_function=False)\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda of: write(\"out/B\", o.a))\n jobB.depends_on(jobA)\n ppg.run()\n assert read(\"out/Aa\") == \"A\"\n assert read(\"out/B\") == \"5\"\n ppg.new()\n\n def b():\n append(\"out/Aa\", \"B\")\n return \"5\"\n\n jobA = ppg.AttributeLoadingJob(\"out/A\", o, \"a\", b, depend_on_function=False)\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda of: write(\"out/B\", o.a))\n jobB.depends_on(jobA)\n ppg.run()\n # not rerun\n assert read(\"out/Aa\") == \"A\"\n assert read(\"out/B\") == \"5\"\n\n def test_callback_must_be_callable(self):\n with pytest.raises(TypeError):\n o = Dummy()\n ppg.AttributeLoadingJob(\"load_dummy_shu\", o, \"a\", \"shu\")\n\n\[email protected](\"create_out_dir\")\[email protected](\"ppg2_per_test\")\nclass TestTempFileGeneratingJob:\n def test_basic(self):\n temp_file = \"out/temp\"\n\n def write_temp(of):\n write(temp_file, \"hello\")\n\n temp_job = ppg.TempFileGeneratingJob(temp_file, write_temp)\n ofA = \"out/A\"\n\n def write_A(of):\n write(ofA, read(temp_file))\n\n fgjob = ppg.FileGeneratingJob(ofA, write_A)\n fgjob.depends_on(temp_job)\n ppg.run()\n assert read(ofA) == \"hello\"\n assert not (Path(temp_file).exists())\n\n def test_does_not_get_return_if_output_is_done(self):\n temp_file = \"out/temp\"\n out_file = \"out/A\"\n count_file = \"out/count\"\n normal_count_file = \"out/countA\"\n\n def write_count(of):\n try:\n count = read(out_file)\n count = count[: count.find(\":\")]\n except IOError:\n count = \"0\"\n count = int(count) + 1\n write(out_file, str(count) + \":\" + read(temp_file))\n append(normal_count_file, \"A\")\n\n def write_temp(of):\n write(temp_file, \"temp\")\n append(count_file, \"X\")\n\n jobA = ppg.FileGeneratingJob(out_file, write_count)\n jobTemp = ppg.TempFileGeneratingJob(temp_file, write_temp)\n jobA.depends_on(jobTemp)\n ppg.run()\n assert not (Path(temp_file).exists())\n assert read(out_file) == \"1:temp\"\n assert read(count_file) == \"X\"\n assert read(normal_count_file) == \"A\"\n # now, rerun. Tempfile has been deleted,\n # and should not be regenerated\n ppg.new()\n jobA = ppg.FileGeneratingJob(out_file, write_count)\n jobTemp = ppg.TempFileGeneratingJob(temp_file, write_temp)\n jobA.depends_on(jobTemp)\n ppg.run()\n assert not (Path(temp_file).exists())\n assert read(out_file) == \"1:temp\"\n assert read(count_file) == \"X\"\n assert read(normal_count_file) == \"A\"\n\n def test_does_not_get_return_if_output_is_not(self):\n temp_file = \"out/temp\"\n out_file = \"out/A\"\n count_file = \"out/count\"\n normal_count_file = \"out/countA\"\n\n def write_count(out_file):\n try:\n count = read(out_file)\n count = count[: count.find(\":\")]\n except IOError:\n count = \"0\"\n count = int(count) + 1\n write(out_file, str(count) + \":\" + read(temp_file))\n append(normal_count_file, \"A\")\n\n def write_temp(of):\n write(temp_file, \"temp\")\n append(count_file, \"X\")\n\n jobA = ppg.FileGeneratingJob(out_file, write_count)\n jobTemp = ppg.TempFileGeneratingJob(temp_file, write_temp)\n jobA.depends_on(jobTemp)\n ppg.run()\n assert not (Path(temp_file).exists())\n assert read(out_file) == \"1:temp\"\n assert read(count_file) == \"X\"\n assert read(normal_count_file) == \"A\"\n # now, rerun. Tempfile has been deleted,\n # and should be regenerated\n Path(out_file).unlink()\n ppg.new()\n jobA = ppg.FileGeneratingJob(out_file, write_count)\n jobTemp = ppg.TempFileGeneratingJob(temp_file, write_temp)\n jobA.depends_on(jobTemp)\n ppg.run()\n assert read(out_file) == \"1:temp\" # since the outfile was removed...\n assert read(count_file) == \"XX\"\n assert read(normal_count_file) == \"AA\"\n assert not (Path(temp_file).exists())\n\n def test_dependand_explodes(self):\n temp_file = \"out/temp\"\n\n def write_temp(of):\n append(temp_file, \"hello\")\n\n temp_job = ppg.TempFileGeneratingJob(temp_file, write_temp)\n ofA = \"out/A\"\n\n def write_A(of):\n raise ValueError(\"shu\")\n\n fgjob = ppg.FileGeneratingJob(ofA, write_A)\n fgjob.depends_on(temp_job)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n # ppg.run()\n assert not (Path(ofA).exists())\n assert Path(temp_file).exists()\n\n ppg.new()\n\n def write_A_ok(ofA):\n write(ofA, read(temp_file))\n\n temp_job = ppg.TempFileGeneratingJob(temp_file, write_temp)\n fgjob = ppg.FileGeneratingJob(ofA, write_A_ok)\n fgjob.depends_on(temp_job)\n ppg.run()\n\n assert read(ofA) == \"hello\" # tempfile job has not been rerun\n assert not (Path(temp_file).exists()) # and the tempfile has been removed...\n\n def test_removes_tempfile_on_exception(self):\n temp_file = \"out/temp\"\n\n def write_temp(of):\n write(temp_file, \"hello\")\n raise ValueError(\"should\")\n\n temp_job = ppg.TempFileGeneratingJob(temp_file, write_temp)\n ofA = \"out/A\"\n\n def write_A(of):\n write(ofA, read(temp_file))\n\n fgjob = ppg.FileGeneratingJob(ofA, write_A)\n fgjob.depends_on(temp_job)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert Path(temp_file).exists()\n assert not (Path(ofA).exists())\n\n def test_passing_non_function(self):\n with pytest.raises(TypeError):\n ppg.TempFileGeneratingJob(\"out/a\", \"shu\")\n\n def test_passing_non_string_as_jobid(self):\n with pytest.raises(TypeError):\n ppg.TempFileGeneratingJob(5, lambda of: 1)\n\n def test_rerun_because_of_new_dependency_does_not_rerun_old(self):\n jobA = ppg.FileGeneratingJob(\n \"out/A\",\n lambda of: append(\"out/A\", read(\"out/temp\")) or append(\"out/Ab\", \"A\"),\n )\n jobB = ppg.TempFileGeneratingJob(\"out/temp\", lambda of: write(\"out/temp\", \"T\"))\n jobA.depends_on(jobB)\n ppg.run()\n assert not (Path(\"out/temp\").exists())\n assert read(\"out/A\") == \"T\"\n assert read(\"out/Ab\") == \"A\" # ran once\n\n ppg.new()\n jobA = ppg.FileGeneratingJob(\n \"out/A\", lambda of: append(\"out/A\", read(\"out/temp\"))\n )\n jobB = ppg.TempFileGeneratingJob(\"out/temp\", lambda of: write(\"out/temp\", \"T\"))\n jobA.depends_on(jobB)\n jobC = ppg.FileGeneratingJob(\n \"out/C\", lambda of: append(\"out/C\", read(\"out/temp\"))\n )\n jobC.depends_on(jobB)\n ppg.run()\n assert not (Path(\"out/temp\").exists())\n assert read(\"out/Ab\") == \"A\" # ran once, not rewritten\n assert read(\"out/C\") == \"T\" # a new file\n\n def test_chaining_multiple(self):\n jobA = ppg.TempFileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"A\"))\n jobB = ppg.TempFileGeneratingJob(\n \"out/B\", lambda of: write(\"out/B\", read(\"out/A\") + \"B\")\n )\n jobC = ppg.TempFileGeneratingJob(\n \"out/C\", lambda of: write(\"out/C\", read(\"out/A\") + \"C\")\n )\n jobD = ppg.FileGeneratingJob(\n \"out/D\", lambda of: write(\"out/D\", read(\"out/B\") + read(\"out/C\"))\n )\n jobD.depends_on(jobC)\n jobD.depends_on(jobB)\n jobC.depends_on(jobA)\n jobB.depends_on(jobA)\n ppg.run()\n assert read(\"out/D\") == \"ABAC\"\n assert not (Path(\"out/A\").exists())\n assert not (Path(\"out/B\").exists())\n assert not (Path(\"out/C\").exists())\n\n def test_chaining_multiple_differently(self):\n jobA = ppg.TempFileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"A\"))\n jobB = ppg.TempFileGeneratingJob(\n \"out/B\", lambda of: write(\"out/B\", read(\"out/A\") + \"B\")\n )\n jobD = ppg.FileGeneratingJob(\n \"out/D\", lambda of: write(\"out/D\", read(\"out/B\") + \"D\")\n )\n jobE = ppg.FileGeneratingJob(\n \"out/E\", lambda of: write(\"out/E\", read(\"out/B\") + \"E\")\n )\n jobF = ppg.FileGeneratingJob(\n \"out/F\", lambda of: write(\"out/F\", read(\"out/A\") + \"F\")\n )\n jobD.depends_on(jobB)\n jobE.depends_on(jobB)\n jobB.depends_on(jobA)\n jobF.depends_on(jobA)\n ppg.run()\n assert read(\"out/D\") == \"ABD\"\n assert read(\"out/E\") == \"ABE\"\n assert read(\"out/F\") == \"AF\"\n assert not (Path(\"out/A\").exists())\n assert not (Path(\"out/B\").exists())\n assert not (Path(\"out/C\").exists())\n\n def test_rerun_because_of_new_dependency_does_not_rerun_old_chained(self):\n jobA = ppg.TempFileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"A\"))\n jobB = ppg.TempFileGeneratingJob(\n \"out/B\", lambda of: write(\"out/B\", read(\"out/A\") + \"B\")\n )\n jobC = ppg.FileGeneratingJob(\n \"out/C\",\n lambda of: write(\"out/C\", read(\"out/B\") + \"C\") or append(\"out/Cx\", \"1\"),\n )\n jobB.depends_on(jobA)\n jobC.depends_on(jobB)\n ppg.run()\n assert read(\"out/C\") == \"ABC\"\n assert read(\"out/Cx\") == \"1\"\n\n ppg.new()\n jobA = ppg.TempFileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"A\"))\n jobB = ppg.TempFileGeneratingJob(\n \"out/B\", lambda of: write(\"out/B\", read(\"out/A\") + \"B\")\n )\n jobC = ppg.FileGeneratingJob(\n \"out/C\",\n lambda of: write(\"out/C\", read(\"out/B\") + \"C\") or append(\"out/Cx\", \"1\"),\n )\n jobD = ppg.FileGeneratingJob(\n \"out/D\",\n lambda of: write(\"out/D\", read(\"out/A\") + \"D\") or append(\"out/Dx\", \"1\"),\n )\n jobB.depends_on(jobA)\n jobC.depends_on(jobB)\n jobD.depends_on(jobA)\n ppg.run()\n assert read(\"out/D\") == \"AD\"\n assert read(\"out/Dx\") == \"1\"\n assert read(\"out/C\") == \"ABC\"\n assert read(\"out/Cx\") == \"1\"\n\n ppg.new()\n jobA = ppg.TempFileGeneratingJob(\n \"out/A\", lambda of: write(\"out/A\", \"a\")\n ) # note changing function code!\n jobB = ppg.TempFileGeneratingJob(\n \"out/B\", lambda of: write(\"out/B\", read(\"out/A\") + \"B\")\n )\n jobC = ppg.FileGeneratingJob(\n \"out/C\",\n lambda of: write(\"out/C\", read(\"out/B\") + \"C\") or append(\"out/Cx\", \"1\"),\n )\n jobD = ppg.FileGeneratingJob(\n \"out/D\",\n lambda of: write(\"out/D\", read(\"out/A\") + \"D\") or append(\"out/Dx\", \"1\"),\n )\n jobB.depends_on(jobA)\n jobC.depends_on(jobB)\n jobD.depends_on(jobA)\n ppg.run()\n assert read(\"out/D\") == \"aD\"\n assert read(\"out/Dx\") == \"11\" # both get rerun\n assert read(\"out/C\") == \"aBC\"\n assert read(\"out/Cx\") == \"11\"\n\n def test_cleanup_if_never_run(self):\n temp_file = \"out/temp\"\n\n def write_temp(of):\n write(temp_file, \"hello\")\n\n def write_a(of):\n write(\"A\", \"hello\")\n\n temp_job = ppg.TempFileGeneratingJob(\n temp_file, write_temp, depend_on_function=False\n )\n jobA = ppg.FileGeneratingJob(\"A\", write_a, depend_on_function=False)\n write_a(\"A\") # so the file is there!\n ppg.run()\n assert not (Path(\"out/temp\").exists())\n ppg.new()\n write_temp(temp_file)\n assert Path(\"out/temp\").exists()\n # this job never runs...\n temp_job = ppg.TempFileGeneratingJob(\n temp_file, write_temp, depend_on_function=False\n )\n # temp_job.do_cleanup_if_was_never_run = True\n ppg.run()\n assert Path(\"out/temp\").exists() # no run, no cleanup\n\n def test_cleanup_if_fails(self, job_trace_log):\n def fail(of):\n of.write_text(\"hello\")\n raise ValueError(\"thisisit\")\n\n a = ppg.TempFileGeneratingJob(\"a\", fail)\n b = ppg.FileGeneratingJob(\"b\", lambda of: of.write_text(read(\"a\")))\n b.depends_on(a)\n with pytest.raises(ppg.JobsFailed):\n b()\n assert Path(\"a\").exists() # cleanup does not run\n assert not Path(\"b\").exists()\n assert \"thisisit\" in str(a.exception)\n assert \"Upstream\" in str(b.exception)\n\n def test_temp_ds_fail_not_rerun(self, job_trace_log):\n def tf(of):\n of.write_text(\"A\")\n counter(\"tf\")\n\n jtf = ppg.TempFileGeneratingJob(\"A\", tf)\n\n def j(of):\n if counter(\"j\") == \"0\":\n raise ValueError()\n of.write_text(\"J\")\n\n jj = ppg.FileGeneratingJob(\"J\", j)\n jj.depends_on(jtf)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert Path(\"A\").read_text() == \"A\"\n assert Path(\"tf\").read_text() == \"1\"\n assert Path(\"j\").read_text() == \"1\"\n assert not Path(\"J\").exists()\n ppg.run()\n assert Path(\"J\").read_text() == \"J\"\n assert Path(\"j\").read_text() == \"2\"\n assert Path(\"tf\").read_text() == \"1\"\n\n def test_file_already_presen(self):\n def doit(output_filename):\n counter(\"tf\")\n output_filename.write_text(\"done\")\n\n j = ppg.TempFileGeneratingJob(\".ppg/deleteme\", doit)\n j2 = ppg.FileGeneratingJob(\n \".ppg/shu\", lambda of: counter(\"j2\") and of.write_text(\"hello\")\n )\n j2.depends_on(j)\n j2()\n assert Path(\"j2\").read_text() == \"1\"\n assert Path(\"tf\").read_text() == \"1\"\n\n Path(\".ppg/shu\").unlink()\n Path(\".ppg/deleteme\").write_text(\"done\")\n j2()\n assert Path(\"tf\").read_text() == \"1\" # same hash, file present\n assert Path(\"j2\").read_text() == \"2\"\n\n\[email protected](\"create_out_dir\")\[email protected](\"ppg2_per_test\")\nclass TestMultiTempFileGeneratingJob:\n def test_basic(self):\n temp_files = [\"out/temp\", \"out/temp2\"]\n\n def write_temp(of):\n for temp_file in temp_files:\n write(temp_file, \"hello\")\n\n temp_job = ppg.MultiTempFileGeneratingJob(temp_files, write_temp)\n ofA = \"out/A\"\n\n def write_A(of):\n write(ofA, read(temp_files[0]) + read(temp_files[1]))\n\n fgjob = ppg.FileGeneratingJob(ofA, write_A)\n fgjob.depends_on(temp_job)\n ppg.run()\n assert read(ofA) == \"hellohello\"\n assert not (Path(temp_files[0]).exists())\n assert not (Path(temp_files[1]).exists())\n\n def test_basic_dependes_were_done(self):\n temp_files = [\"out/temp\", \"out/temp2\"]\n\n def write_temp(of):\n write(\"temp_sentinel\", \"one\")\n for temp_file in temp_files:\n write(temp_file, \"hello\")\n\n temp_job = ppg.MultiTempFileGeneratingJob(\n temp_files, write_temp, depend_on_function=False\n )\n ofA = \"out/A\"\n\n def write_A(of):\n write(ofA, read(temp_files[0]) + read(temp_files[1]))\n\n fgjob = ppg.FileGeneratingJob(ofA, write_A, depend_on_function=False)\n fgjob.depends_on(temp_job)\n write(ofA, \"two\")\n ppg.run()\n assert (\n read(ofA) == \"hellohello\"\n ) # change from ppg1 - we rerun if we don't have a hash recorded\n assert Path(\"temp_sentinel\").exists()\n\n def raises_on_non_string_filnames(self):\n with pytest.raises(ValueError):\n ppg.MultiTempFileGeneratingJob([\"one\", 2], lambda of: write(\"out/A\"))\n\n def test_raises_on_collision(self):\n with pytest.raises(ppg.JobOutputConflict):\n ppg.MultiTempFileGeneratingJob([\"test1\", \"test2\"], lambda of: 5)\n ppg.MultiTempFileGeneratingJob([\"test2\", \"test3\"], lambda of: 5)\n\n def test_duplicate_prevention(self):\n param = \"A\"\n ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", param))\n\n with pytest.raises(ppg.JobRedefinitionError):\n ppg.MultiTempFileGeneratingJob([\"out/A\"], lambda of: write(\"out/A\", param))\n\n def test_non_str(self):\n param = \"A\"\n\n with pytest.raises(TypeError):\n ppg.MultiTempFileGeneratingJob([25], lambda of: write(\"out/A\", param))\n\n def test_non_iterable(self):\n param = \"A\"\n with pytest.raises(TypeError):\n ppg.MultiTempFileGeneratingJob(25, lambda of: write(\"out/A\", param))\n\n\[email protected](\"ppg2_per_test\")\nclass TestNoDotDotInJobIds:\n def test_no_dot_dot(self):\n \"\"\" all ../ must be resolved before it becomes a job id\"\"\"\n import unittest\n from unittest.mock import patch\n\n collector = set()\n org_dedup = ppg.jobs._dedup_job\n\n def collecting_dedup(cls, job_id):\n collector.add(job_id)\n return org_dedup(cls, job_id)\n\n with patch(\"pypipegraph2.jobs._dedup_job\", collecting_dedup):\n j = ppg.MultiFileGeneratingJob([\"something/../shu\"], lambda of: 5)\n assert j.job_id in collector\n assert not \"..\" in j.job_id\n assert not \"something/../shu\" in collector\n collector.clear()\n\n j = ppg.FileGeneratingJob(\"something/../shu2\", lambda of: 5)\n assert j.job_id in collector\n assert not \"..\" in j.job_id\n assert not \"something/../shu2\" in collector\n collector.clear()\n\n j = ppg.FileInvariant(\"something/../shu3\")\n assert j.job_id in collector\n assert not \"..\" in j.job_id\n assert not \"something/../shu3\" in collector\n collector.clear()\n\n with pytest.raises(TypeError): # we don't resolve function invariant names\n ppg.FunctionInvariant(\"something/../shu3b\")\n\n j = ppg.TempFileGeneratingJob(\"something/../shu4\", lambda of: 5)\n assert j.job_id in collector\n assert not \"..\" in j.job_id\n assert not \"something/../shu4\" in collector\n collector.clear()\n\n j = ppg.MultiTempFileGeneratingJob([\"something/../shu5\"], lambda of: 5)\n assert j.job_id in collector\n assert not \"..\" in j.job_id\n assert not \"something/../shu4\" in collector\n collector.clear()\n\n o = object()\n j = ppg.CachedAttributeLoadingJob(\"something/../shu6\", o, \"attr\", lambda: 5)\n assert j.calc.job_id in collector\n assert j.load.job_id in collector\n assert not \"..\" in j.calc.job_id\n assert not \"..\" in j.load.job_id\n assert not \"something/../shu6\" in collector\n assert not \"loadsomething/../shu6\" in collector\n collector.clear()\n\n j = ppg.CachedDataLoadingJob(\n \"something/../shu7\", lambda: 5, lambda value: 5\n )\n assert j.calc.job_id in collector\n assert j.load.job_id in collector\n assert not \"..\" in j.calc.job_id\n assert not \"..\" in j.load.job_id\n assert not \"something/../shu7\" in collector\n collector.clear()\n\n j = ppg.PlotJob(\n \"something/or_other/../shu.png\", lambda: None, lambda data: None\n )\n assert j.plot.job_id in collector\n assert j.cache.calc.job_id in collector\n assert j.cache.load.job_id in collector\n assert j.table.job_id in collector\n for job_id in collector:\n assert not \"..\" in job_id\n\n", "id": "3867115", "language": "Python", "matching_score": 6.3991780281066895, "max_stars_count": 0, "path": "tests/test_jobs.py" }, { "content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2012, <NAME> <<EMAIL>>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport os\nimport stat\nimport time\nimport hashlib\nimport subprocess\nimport shutil\nimport pytest\nimport pypipegraph as ppg\nimport gzip\nfrom .shared import write, assertRaises, read, append, Dummy\nimport sys\n\n\nclass Undepickable(object):\n def __getstate__(self):\n return {\"shu\": 123} # must not return falsey value\n\n def __setstate__(self, state):\n self.sha = state[\"shu\"]\n import pickle\n\n raise pickle.UnpicklingError(\"SHU\")\n\n\[email protected](\"ppg1_compatibility_test\")\nclass TestInvariant:\n def sentinel_count(self):\n sentinel = \"out/sentinel\"\n try:\n op = open(sentinel, \"r\")\n count = int(op.read())\n op.close()\n except IOError:\n count = 1\n op = open(sentinel, \"w\")\n op.write(\"%i\" % (count + 1))\n op.close()\n return count\n\n def test_filegen_jobs_detect_code_change(self, ppg1_compatibility_test):\n of = \"out/a\"\n\n def do_write():\n append(of, \"shu\" * self.sentinel_count())\n\n ppg.FileGeneratingJob(of, do_write)\n ppg.run_pipegraph()\n assert read(of) == \"shu\"\n ppg1_compatibility_test.new_pipegraph()\n ppg.FileGeneratingJob(of, do_write)\n ppg.run_pipegraph()\n assert read(of) == \"shu\" # has not been run again...\n\n def do_write2():\n append(of, \"sha\")\n\n ppg1_compatibility_test.new_pipegraph()\n ppg.FileGeneratingJob(of, do_write2)\n ppg.run_pipegraph()\n assert read(of) == \"sha\" # has been run again ;).\n\n def test_filegen_jobs_ignores_code_change(self, ppg1_compatibility_test):\n of = \"out/a\"\n\n def do_write():\n append(\"A\", \"a\")\n append(of, \"shu\" * self.sentinel_count())\n\n job = ppg.FileGeneratingJob(of, do_write)\n ppg.run_pipegraph()\n\n assert read(of) == \"shu\"\n assert read(\"A\") == \"a\"\n ppg1_compatibility_test.new_pipegraph()\n job = ppg.FileGeneratingJob(of, do_write)\n ppg.run_pipegraph()\n assert read(of) == \"shu\" # has not been run again, for no change\n assert read(\"A\") == \"a\"\n\n ppg1_compatibility_test.new_pipegraph()\n print(\"secound round\")\n\n def do_write2():\n append(\"A\", \"b\")\n append(of, \"sha\")\n\n job = ppg.FileGeneratingJob(of, do_write2)\n job.ignore_code_changes()\n ppg.run_pipegraph()\n assert read(of) == \"sha\" # removing the invariant does trigger\n assert read(\"A\") == \"ab\"\n\n ppg1_compatibility_test.new_pipegraph()\n job = ppg.FileGeneratingJob(of, do_write2)\n ppg.run_pipegraph()\n assert read(of) == \"sha\"\n assert read(\"A\") == \"abb\" # Adding it again also triggers\n ppg.run_pipegraph()\n assert read(\"A\") == \"abb\" # no change, no trigger\n\n def test_parameter_dependency(self, ppg1_compatibility_test):\n of = \"out/a\"\n\n def do_write():\n append(of, \"shu\" * self.sentinel_count())\n\n job = ppg.FileGeneratingJob(of, do_write)\n param_dep = ppg.ParameterInvariant(\"myparam\", (1, 2, 3))\n job.depends_on(param_dep)\n ppg.run_pipegraph()\n assert read(of) == \"shu\"\n ppg1_compatibility_test.new_pipegraph()\n job = ppg.FileGeneratingJob(of, do_write)\n param_dep = ppg.ParameterInvariant(\"myparam\", (1, 2, 3))\n job.depends_on(param_dep)\n ppg.run_pipegraph()\n assert read(of) == \"shu\" # has not been run again...\n ppg1_compatibility_test.new_pipegraph()\n job = ppg.FileGeneratingJob(of, do_write)\n param_dep = ppg.ParameterInvariant(\"myparam\", (1, 2, 3, 4))\n job.depends_on(param_dep)\n ppg.run_pipegraph()\n assert read(of) == \"shushu\" # has been run again ;).\n\n def test_parameter_invariant_adds_hidden_job_id_prefix(self):\n param = \"A\"\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", param))\n jobB = ppg.ParameterInvariant(\"out/A\", param)\n jobA.depends_on(jobB)\n ppg.run_pipegraph()\n assert read(\"out/A\") == param\n\n def test_depends_on_func(self):\n a = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"a\"))\n b = a.depends_on_func(\"a123\", lambda: 123)\n # ppg2 adjustments\n assert b[0].job_id.startswith(\"FI\" + a.job_id)\n # assert b in a.prerequisites\n assert ppg.util.global_pipegraph.has_edge(b[0], a)\n\n def test_depends_on_file(self):\n a = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"a\"))\n write(\"shu\", \"hello\")\n b = a.depends_on_file(\"shu\")\n assert ppg.util.global_pipegraph.has_edge(b[0], a)\n\n def test_depends_on_params(self):\n a = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"a\"))\n b = a.depends_on_params(23)\n assert b[0].job_id == \"PIout/A\"\n assert b[0].parameters == \"f5dc163826b60f56c93be8f49df011c6\"\n assert ppg.util.global_pipegraph.has_edge(b[0], a)\n\n def test_parameter_invariant_twice_different_values(self):\n ppg.ParameterInvariant(\"a\", (1, 2, 3))\n with pytest.raises(ValueError):\n ppg.ParameterInvariant(\"a\", (1, 2, 4))\n\n @pytest.mark.skip # ppg2 does not have 'accept_as_unchanged_func on ParameterInvariant\n # if it really is a no-op-change it will terminate in the down stream jobs.\n # if it is not a no-op-change, it will correctly recalculate.\n # yes this trades brain power for computing power.\n def test_parameter_invariant_twice_different_accepts_func(self):\n def accept_as_unchanged(old):\n return True\n\n ppg.ParameterInvariant(\"a\", (1, 2, 3))\n with pytest.raises(ValueError):\n ppg.ParameterInvariant(\"a\", (1, 2, 3), accept_as_unchanged)\n\n @pytest.mark.skip # ppg2 does not have 'accept_as_unchanged_func on ParameterInvariant\n # see above\n def test_parameter_dependency_accepts_as_unchanged(self, ppg1_compatibility_test):\n write(\"out/A\", \"x\")\n job = ppg.FileGeneratingJob(\"out/A\", lambda: append(\"out/A\", \"A\"))\n p = ppg.ParameterInvariant(\"myparam\", (1, 2, 3))\n job.depends_on(p)\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"A\" # invalidation unlinks!\n\n ppg1_compatibility_test.new_pipegraph()\n\n def is_prefix(new):\n def inner(old):\n write(\"inner_check\", \"yes\")\n if len(old) > len(new):\n return False\n for ii in range(len(old)):\n if new[ii] != old[ii]:\n return False\n return True\n\n return inner\n\n job = ppg.FileGeneratingJob(\"out/A\", lambda: append(\"out/A\", \"A\"))\n param = (1, 2, 3, 4)\n p = ppg.ParameterInvariant(\n \"myparam\", param, accept_as_unchanged_func=is_prefix(param)\n )\n job.depends_on(p)\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"A\" # no change\n assert read(\"inner_check\") == \"yes\"\n\n def test_filetime_dependency(self, ppg1_compatibility_test):\n of = \"out/a\"\n\n def do_write():\n append(of, \"shu\" * self.sentinel_count())\n\n ftfn = \"out/ftdep\"\n write(ftfn, \"hello\")\n write(of, \"hello\")\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileTimeInvariant(ftfn)\n job.depends_on(dep)\n ppg.run_pipegraph()\n assert (\n read(of) == \"shu\"\n ) # job get's run though there is a file, because the FileTimeInvariant was not stored before...\n ppg1_compatibility_test.new_pipegraph()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileTimeInvariant(ftfn)\n job.depends_on(dep)\n ppg.run_pipegraph()\n assert read(of) == \"shu\" # job does not get rerun...\n\n time.sleep(1) # so linux actually advances the file time in the next line\n write(ftfn, \"hello\") # same content, different time\n\n ppg1_compatibility_test.new_pipegraph()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileTimeInvariant(ftfn)\n job.depends_on(dep)\n ppg.run_pipegraph()\n assert (\n read(of) == \"shu\"\n ) # job does not get rerun - filetime invariant is now filechecksum invariant...\n\n def test_filechecksum_dependency_raises_on_too_short_a_filename(self):\n assert not ppg.util.global_pipegraph.allow_short_filenames\n with pytest.raises(ValueError):\n ppg.RobustFileChecksumInvariant(\"a\")\n\n with pytest.raises(ValueError):\n ppg.RobustFileChecksumInvariant(\"sh\")\n ppg.RobustFileChecksumInvariant(\"shu\")\n\n def test_filechecksum_dependency(self, ppg1_compatibility_test):\n of = \"out/a\"\n\n def do_write():\n append(of, \"shu\" * self.sentinel_count())\n\n ftfn = \"out/ftdep\"\n write(ftfn, \"hello\")\n # import stat\n # logging.info('file time after creating %s'% os.stat(ftfn)[stat.ST_MTIME])\n\n write(of, \"hello\")\n\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileChecksumInvariant(ftfn)\n job.depends_on(dep)\n ppg.run_pipegraph()\n assert (\n read(of) == \"shu\"\n ) # job get's run though there is a file, because the FileTimeInvariant was not stored before...\n ppg1_compatibility_test.new_pipegraph()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileChecksumInvariant(ftfn)\n job.depends_on(dep)\n ppg.run_pipegraph()\n assert read(of) == \"shu\" # job does not get rerun...\n\n time.sleep(1) # so linux actually advances the file time in the next line\n # logging.info(\"NOW REWRITE\")\n write(ftfn, \"hello\") # same content, different time\n\n ppg1_compatibility_test.new_pipegraph()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileChecksumInvariant(ftfn)\n job.depends_on(dep)\n ppg.run_pipegraph()\n assert read(of) == \"shu\" # job does not get rerun...\n\n # time.sleep(1) #we don't care about the time, size should be enough...\n write(ftfn, \"hello world!!\") # different time\n time.sleep(1) # give the file system a second to realize the change.\n\n ppg1_compatibility_test.new_pipegraph()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileChecksumInvariant(ftfn)\n job.depends_on(dep)\n ppg.run_pipegraph()\n assert read(of) == \"shushu\" # job does get rerun\n\n def test_robust_filechecksum_invariant(self, ppg1_compatibility_test):\n of = \"out/B\"\n\n def do_write():\n append(of, \"shu\" * self.sentinel_count())\n\n ftfn = \"out/ftdep\"\n write(ftfn, \"hello\")\n # import stat\n # logging.info('file time after creating %s'% os.stat(ftfn)[stat.ST_MTIME])\n\n write(of, \"hello\")\n\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.RobustFileChecksumInvariant(ftfn)\n job.depends_on(dep)\n ppg.run_pipegraph()\n assert (\n read(of) == \"shu\"\n ) # job get's run though there is a file, because the FileTimeInvariant was not stored before...\n\n ppg1_compatibility_test.new_pipegraph()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.RobustFileChecksumInvariant(ftfn)\n job.depends_on(dep)\n ppg.run_pipegraph()\n assert read(of) == \"shu\" # job does not get rerun...\n\n os.mkdir(\"out/moved_here\")\n shutil.move(ftfn, os.path.join(\"out/moved_here\", \"ftdep\"))\n ppg1_compatibility_test.new_pipegraph()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.RobustFileChecksumInvariant(os.path.join(\"out/moved_here\", \"ftdep\"))\n job.depends_on(dep)\n assert read(of) == \"shu\" # job does not get rerun...\n ppg.run_pipegraph()\n assert read(of) == \"shu\" # job does not get rerun...\n\n def test_robust_filechecksum_invariant_after_normal(self, ppg1_compatibility_test):\n of = \"out/a\"\n\n def do_write():\n append(of, \"shu\" * self.sentinel_count())\n\n ftfn = \"out/ftdep\"\n write(ftfn, \"hello\")\n # import stat\n # logging.info('file time after creating %s'% os.stat(ftfn)[stat.ST_MTIME])\n\n write(of, \"hello\")\n\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileChecksumInvariant(ftfn)\n job.depends_on(dep)\n ppg.run_pipegraph()\n assert (\n read(of) == \"shu\"\n ) # job get's run though there is a file, because the FileTimeInvariant was not stored before...\n assert read(\"out/sentinel\") == \"2\" # job does not get rerun...\n\n ppg1_compatibility_test.new_pipegraph()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileChecksumInvariant(ftfn)\n job.depends_on(dep)\n ppg.run_pipegraph()\n assert read(of) == \"shu\" # job does not get rerun...\n assert read(\"out/sentinel\") == \"2\" # job does not get rerun...\n\n os.mkdir(\"out/moved_here\")\n shutil.move(ftfn, os.path.join(\"out/moved_here\", \"ftdep\"))\n ppg1_compatibility_test.new_pipegraph()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.RobustFileChecksumInvariant(os.path.join(\"out/moved_here\", \"ftdep\"))\n job.depends_on(dep)\n assert read(of) == \"shu\" # job does not get rerun...\n assert read(\"out/sentinel\") == \"2\" # job does not get rerun...\n print(\"now it counts\")\n ppg.run_pipegraph()\n assert read(\"out/sentinel\") == \"2\" # job does not get rerun...\n assert read(of) == \"shu\" # job does not get rerun...\n\n @pytest.mark.skip # we no longer do that, opting to calculate our own improved hash instead\n def test_file_invariant_with_md5sum(self, ppg1_compatibility_test):\n of = \"out/a\"\n\n def do_write():\n append(of, \"shu\" * self.sentinel_count())\n\n ftfn = \"out/ftdep\"\n write(ftfn, \"hello\")\n # import stat\n # logging.info('file time after creating %s'% os.stat(ftfn)[stat.ST_MTIME])\n\n write(of, \"hello\")\n\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileChecksumInvariant(ftfn)\n job.depends_on(dep)\n ppg.run_pipegraph()\n assert (\n read(of) == \"shu\"\n ) # job get's run though there is a file, because the FileTimeInvariant was not stored before...\n\n with open(ftfn + \".md5sum\", \"wb\") as op:\n op.write(hashlib.md5(b\"hello world\").hexdigest().encode(\"utf-8\"))\n write(ftfn, \"hello world\") # different content\n t = time.time()\n # now make\n os.utime(ftfn, (t, t))\n os.utime(ftfn + \".md5sum\", (t, t))\n time.sleep(1) # give the file system a second to realize the change.\n\n ppg1_compatibility_test.new_pipegraph()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileChecksumInvariant(ftfn)\n job.depends_on(dep)\n ppg.run_pipegraph()\n assert (\n read(of) == \"shushu\"\n ) # job get's run though there is a file, because the md5sum changed.\n\n with open(ftfn + \".md5sum\", \"wb\") as op:\n op.write(hashlib.md5(b\"hello world\").hexdigest().encode(\"utf-8\"))\n write(ftfn, \"hello\") # different content, but the md5sum is stil the same!\n t = time.time()\n # now make\n os.utime(ftfn, (t, t))\n os.utime(ftfn + \".md5sum\", (t, t))\n time.sleep(1) # give the file system a second to realize the change.\n\n ppg1_compatibility_test.new_pipegraph()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileChecksumInvariant(ftfn)\n job.depends_on(dep)\n ppg.run_pipegraph()\n assert read(of) == \"shushu\" # job does not get rerun, md5sum did not change...\n\n t = time.time() - 100 # force a file time mismatch\n os.utime(\n ftfn, (t, t)\n ) # I must change the one on the actual file, otherwise the 'size+filetime is the same' optimization bytes me\n\n ppg1_compatibility_test.new_pipegraph()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileChecksumInvariant(ftfn)\n job.depends_on(dep)\n ppg.run_pipegraph()\n assert (\n read(of) == \"shushushu\"\n ) # job does get rerun, md5sum and file time mismatch\n assert os.stat(ftfn)[stat.ST_MTIME] == os.stat(ftfn + \".md5sum\")[stat.ST_MTIME]\n\n def test_invariant_dumping_on_job_failure(self, ppg1_compatibility_test):\n ppg1_compatibility_test.new_pipegraph(log_level=6)\n\n def w():\n write(\"out/A\", \"A\")\n append(\"out/B\", \"B\")\n\n def func_c():\n append(\"out/C\", \"C\")\n\n func_dep = ppg.FunctionInvariant(\"func_c\", func_c)\n fg = ppg.FileGeneratingJob(\"out/A\", w)\n fg.ignore_code_changes() # no auto invariants for this test...\n fg.depends_on(func_dep)\n ppg.run_pipegraph()\n # ppg2 assert func_dep.was_invalidated\n # ppg2 assert fg.was_invalidated\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n ppg1_compatibility_test.new_pipegraph()\n\n def func_c1():\n append(\"out/C\", \"D\")\n\n def w2():\n raise ValueError() # so there is an error in a job...\n\n func_dep = ppg.FunctionInvariant(\"func_c\", func_c1) # so this invariant changes\n fg = ppg.FileGeneratingJob(\"out/A\", w2) # and this job crashes\n fg.ignore_code_changes() # no auto invariants for this test...\n fg.depends_on(func_dep)\n try:\n ppg.run_pipegraph()\n except ppg.RuntimeError:\n pass\n # ppg2 assert func_dep.was_invalidated\n # ppg2 assert fg.was_invalidated\n assert not (os.path.exists(\"out/A\")) # since it was removed, and not recreated\n assert read(\"out/B\") == \"B\"\n ppg1_compatibility_test.new_pipegraph()\n func_dep = ppg.FunctionInvariant(\n \"func_c\", func_c1\n ) # no invariant change this time\n fg = ppg.FileGeneratingJob(\"out/A\", w) # but this was not done the last time...\n fg.ignore_code_changes() # no auto invariants for this test...\n fg.depends_on(func_dep)\n ppg.run_pipegraph()\n # ppg2 assert not (func_dep.was_invalidated) # not invalidated\n # ppg2 assert fg.was_invalidated # yeah\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"BB\"\n\n @pytest.mark.skip # ppg1 implementation internals test, ppg2 must do it's own testing\n def test_invariant_dumping_on_graph_exception(self, ppg1_compatibility_test):\n # when an exception occurs not within a job\n # but within the pipegraph itself (e.g. when the user hit's CTRL-C\n # which we simulate here\n # compatibility layer does not support subclassing\n import pypipegraph2 as ppg2\n\n class ExplodingJob(ppg2.FileGeneratingJob):\n def __setattr__(self, name, value):\n if (\n name == \"stdout\"\n and value is not None\n and hasattr(self, \"do_explode\")\n and self.do_explode\n ):\n raise KeyboardInterrupt(\"simulated\")\n else:\n self.__dict__[name] = value\n\n def w(of):\n write(of, \"A\")\n append(\"out/B\", \"B\")\n\n def func_c():\n append(\"out/C\", \"C\")\n\n func_dep = ppg.FunctionInvariant(\"func_c\", func_c)\n fg = ExplodingJob(\"out/A\", w, depend_on_function=False)\n # ppg2 fg.ignore_code_changes() # no auto invariants for this test...\n fg.depends_on(func_dep)\n ppg.run_pipegraph()\n # assert func_dep.was_invalidated\n # assert fg.was_invalidated\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n ppg1_compatibility_test.new_pipegraph()\n\n def func_c1():\n append(\"out/C\", \"D\")\n\n def w2(of):\n raise ValueError() # so there is an error in a job...\n\n func_dep = ppg.FunctionInvariant(\"func_c\", func_c1) # so this invariant changes\n fg = ExplodingJob(\"out/A\", w2, depend_on_function=False) # and this job crashes\n fg.do_explode = True\n # ppg2 #fg.ignore_code_changes() # no auto invariants for this test...\n fg.depends_on(func_dep)\n ki_raised = False\n try:\n ppg.run_pipegraph()\n except ppg.RuntimeError:\n pass\n except KeyboardInterrupt: # we expect this to be raised\n ki_raised = True\n pass\n if not ki_raised:\n raise ValueError(\"KeyboardInterrupt was not raised\")\n assert func_dep.was_invalidated\n assert fg.was_invalidated\n assert not (os.path.exists(\"out/A\")) # since it was removed, and not recreated\n assert read(\"out/B\") == \"B\"\n ppg1_compatibility_test.new_pipegraph()\n func_dep = ppg.FunctionInvariant(\n \"func_c\", func_c1\n ) # no invariant change this time\n fg = ExplodingJob(\"out/A\", w) # but this was not done the last time...\n fg.ignore_code_changes() # no auto invariants for this test...\n fg.depends_on(func_dep)\n ppg.run_pipegraph()\n assert not (func_dep.was_invalidated) # not invalidated\n assert fg.was_invalidated # yeah\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"BB\"\n\n @pytest.mark.skip # ppg1 implementation internal, no longer releevant to ppg2\n def test_job_not_setting_invalidated_after_was_invalidated_raises(self):\n class BadJob(ppg.FileGeneratingJob):\n def invalidated(self, reason):\n pass\n\n BadJob(\"out/A\", lambda: write(\"out/A\", \"a\"))\n with pytest.raises(ppg.JobContractError):\n ppg.run_pipegraph()\n\n def test_FileTimeInvariant_cant_have_dependencies(self):\n # invariants are always roots of the DAG - they can't have any dependencies themselves\n write(\"out/shu\", \"shu\")\n job = ppg.FileTimeInvariant(\"out/shu\")\n jobB = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"a\"))\n\n def inner():\n job.depends_on(jobB)\n\n assertRaises(ppg.JobContractError, inner)\n\n def test_FileChecksumInvariant_cant_have_dependencies(self):\n # invariants are always roots of the DAG - they can't have any dependencies themselves\n write(\"out/shu\", \"shu\")\n job = ppg.FileChecksumInvariant(\"out/shu\")\n jobB = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"a\"))\n\n def inner():\n job.depends_on(jobB)\n\n assertRaises(ppg.JobContractError, inner)\n\n def test_ParameterInvariant_cant_have_dependencies(self):\n # invariants are always roots of the DAG - they can't have any dependencies themselves\n job = ppg.ParameterInvariant(\"out/shu\", (\"123\",))\n jobB = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"a\"))\n\n def inner():\n job.depends_on(jobB)\n\n assertRaises(ppg.JobContractError, inner)\n\n @pytest.mark.skip # ppg2 no longer uses pickle, but hashing\n def test_unpickable_raises(self):\n class Unpickable(object):\n def __getstate__(self):\n raise ValueError(\"SHU\")\n\n ppg.ParameterInvariant(\"a\", (Unpickable(), \"shu\"))\n\n def inner():\n ppg.run_pipegraph()\n\n assertRaises(ValueError, inner)\n\n def test_invariant_loading_issues_on_value_catastrophic(self):\n a = ppg.DataLoadingJob(\"a\", lambda: 5)\n b = ppg.FileGeneratingJob(\"out/b\", lambda: write(\"out/b\", \"b\"))\n b.ignore_code_changes()\n b.depends_on(a)\n write(\"out/b\", \"a\")\n import pickle\n\n ppg.util.global_pipegraph.get_history_filename().parent.mkdir(\n exist_ok=True, parents=True\n )\n with gzip.GzipFile(\n ppg.util.global_pipegraph.get_history_filename(), \"wb\"\n ) as op:\n pickle.dump(a.job_id, op, pickle.HIGHEST_PROTOCOL)\n op.write(b\"This breaks\")\n with pytest.raises(ppg.PyPipeGraphError):\n ppg.run_pipegraph()\n assert read(\"out/b\") == \"a\" # job was not run\n\n def test_invariant_loading_issues_on_value_undepickableclass(self):\n import tempfile\n import pickle\n\n ppg.util.global_pipegraph.quiet = False\n\n # make sure Undepickable is Undepickable\n with tempfile.TemporaryFile(\"wb+\") as tf:\n o = Undepickable()\n pickle.dump(o, tf, pickle.HIGHEST_PROTOCOL)\n with pytest.raises(pickle.UnpicklingError):\n tf.seek(0, 0)\n pickle.load(tf)\n\n a = ppg.ParameterInvariant(\"a\", 5)\n b = ppg.FileGeneratingJob(\"out/b\", lambda: write(\"out/b\", \"b\"))\n b.ignore_code_changes()\n c = ppg.ParameterInvariant(\"c\", 23)\n b.depends_on(a)\n write(\"out/b\", \"a\")\n\n # ppg2\n ppg.util.global_pipegraph.get_history_filename().parent.mkdir(\n exist_ok=True, parents=True\n )\n with gzip.GzipFile(\n ppg.util.global_pipegraph.get_history_filename(), \"wb\"\n ) as op:\n pickle.dump(a.job_id, op, pickle.HIGHEST_PROTOCOL)\n pickle.dump(Undepickable(), op, pickle.HIGHEST_PROTOCOL)\n pickle.dump(c.job_id, op, pickle.HIGHEST_PROTOCOL)\n pickle.dump(\n ({\"a\": 23}, {\"c\": 23}), op, pickle.HIGHEST_PROTOCOL\n ) # ppg2 expects inputs, outputs\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert read(\"out/b\") == \"b\" # job was run\n assert a.job_id in ppg.util.global_pipegraph.invariant_loading_issues\n assert ppg.util.global_pipegraph._load_history()[\"PIc\"] == (\n {},\n {\"PIc\": \"f5dc163826b60f56c93be8f49df011c6\"},\n )\n\n def test_invariant_loading_issues_on_key(self):\n a = ppg.DataLoadingJob(\"a\", lambda: 5)\n b = ppg.FileGeneratingJob(\"out/b\", lambda: write(\"out/b\", \"b\"))\n b.ignore_code_changes()\n b.depends_on(a)\n write(\"out/b\", \"a\")\n\n # ppg2\n ppg.util.global_pipegraph.get_history_filename().parent.mkdir(\n exist_ok=True, parents=True\n )\n with gzip.GzipFile(\n ppg.util.global_pipegraph.get_history_filename(), \"wb\"\n ) as op:\n op.write(b\"key breaks already\")\n op.write(b\"This breaks\")\n with pytest.raises(ppg.PyPipeGraphError):\n ppg.run_pipegraph()\n assert read(\"out/b\") == \"a\" # job was not run\n\n\[email protected](\"ppg1_compatibility_test\")\nclass TestFunctionInvariant:\n # most of the function invariant testing is handled by other test classes.\n # but these are more specialized.\n\n def test_generator_expressions(self):\n import pypipegraph2 as ppg2\n\n def get_func(r):\n def shu():\n return sum(i + 0 for i in r)\n\n return shu\n\n def get_func2(r):\n def shu():\n return sum(i + 0 for i in r)\n\n return shu\n\n def get_func3(r):\n def shu():\n return sum(i + 1 for i in r)\n\n return shu\n\n a = ppg.FunctionInvariant(\"a\", get_func(100))\n b = ppg.FunctionInvariant(\n \"b\", get_func2(100)\n ) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", get_func3(100)\n ) # and this invariant should be different\n av = a.run(None, None)[\"FIa\"][ppg2.jobs.python_version]\n bv = b.run(None, None)[\"FIb\"][ppg2.jobs.python_version]\n cv = c.run(None, None)[\"FIc\"][ppg2.jobs.python_version]\n assert a.run(None, None)\n assert bv == av\n assert not (av == cv)\n\n def test_lambdas(self):\n import pypipegraph2 as ppg2\n\n def get_func(x):\n def inner():\n arg = lambda y: x + x + x # noqa:E731\n return arg(1)\n\n return inner\n\n def get_func2(x):\n def inner():\n arg = lambda y: x + x + x # noqa:E731\n return arg(1)\n\n return inner\n\n def get_func3(x):\n def inner():\n arg = lambda y: x + x # noqa:E731\n return arg(1)\n\n return inner\n\n a = ppg.FunctionInvariant(\"a\", get_func(100))\n b = ppg.FunctionInvariant(\n \"b\", get_func2(100)\n ) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", get_func3(100)\n ) # and this invariant should be different\n av = a.run(None, None)[\"FIa\"][ppg2.jobs.python_version]\n bv = b.run(None, None)[\"FIb\"][ppg2.jobs.python_version]\n cv = c.run(None, None)[\"FIc\"][ppg2.jobs.python_version]\n\n self.maxDiff = 20000\n assert av\n assert bv == av\n assert not (av == cv)\n\n def test_inner_functions(self):\n import pypipegraph2 as ppg2\n\n def get_func(x):\n def inner():\n return 23\n\n return inner\n\n def get_func2(x):\n def inner():\n return 23\n\n return inner\n\n def get_func3(x):\n def inner():\n return 23 + 5\n\n return inner\n\n a = ppg.FunctionInvariant(\"a\", get_func(100))\n b = ppg.FunctionInvariant(\n \"b\", get_func2(100)\n ) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", get_func3(100)\n ) # and this invariant should be different\n av = a.run(None, None)[\"FIa\"][ppg2.jobs.python_version]\n bv = b.run(None, None)[\"FIb\"][ppg2.jobs.python_version]\n cv = c.run(None, None)[\"FIc\"][ppg2.jobs.python_version]\n assert av\n assert bv == av\n assert not (av == cv)\n\n def test_nested_inner_functions(self):\n import pypipegraph2 as ppg2\n\n def get_func(xv):\n def inner():\n def shu():\n return 23\n\n return shu\n\n return inner\n\n def get_func2(x):\n def inner():\n def shu():\n return 23\n\n return shu\n\n return inner\n\n def get_func3(x):\n def inner():\n def shu():\n return 23 + 5\n\n return shu\n\n return inner\n\n a = ppg.FunctionInvariant(\"a\", get_func(100))\n b = ppg.FunctionInvariant(\n \"b\", get_func2(100)\n ) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", get_func3(100)\n ) # and this invariant should be different\n av = a.run(None, None)[\"FIa\"][ppg2.jobs.python_version]\n bv = b.run(None, None)[\"FIb\"][ppg2.jobs.python_version]\n cv = c.run(None, None)[\"FIc\"][ppg2.jobs.python_version]\n assert av\n assert bv == av\n assert not (av == cv) # constat value is different\n\n def test_inner_functions_with_parameters(self):\n import pypipegraph2 as ppg2\n\n def get_func(x):\n def inner():\n return x\n\n return inner\n\n a = ppg.FunctionInvariant(\"a\", get_func(100))\n b = ppg.FunctionInvariant(\n \"b\", get_func(100)\n ) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", get_func(2000)\n ) # and this invariant should be different\n av = a.run(None, None)[\"FIa\"][ppg2.jobs.python_version]\n bv = b.run(None, None)[\"FIb\"][ppg2.jobs.python_version]\n cv = c.run(None, None)[\"FIc\"][ppg2.jobs.python_version]\n assert av\n assert bv == av\n assert not (av == cv)\n\n def test_passing_non_function_raises(self):\n def inner():\n ppg.FunctionInvariant(\"out/a\", \"shu\")\n\n # assertRaises(ValueError, inner)\n # ppg2\n assertRaises(TypeError, inner)\n\n def test_passing_none_as_function_is_ok(self):\n job = ppg.FunctionInvariant(\"out/a\", None)\n jobB = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"A\"))\n jobB.depends_on(job)\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"A\"\n\n def test_passing_non_string_as_jobid(self):\n def inner():\n ppg.FunctionInvariant(5, lambda: 1)\n\n assertRaises(TypeError, inner)\n\n def test_cant_have_dependencies(self):\n # invariants are always roots of the DAG - they can't have any dependencies themselves\n def shu():\n pass\n\n job = ppg.FunctionInvariant(\"shu\", shu)\n jobB = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"a\"))\n\n def inner():\n job.depends_on(jobB)\n\n assertRaises(ppg.JobContractError, inner)\n\n def test_raises_on_duplicate_with_different_functions(self):\n import pypipegraph2 as ppg2\n\n def shu():\n return \"a\"\n\n ppg.FunctionInvariant(\"A\", shu)\n ppg.FunctionInvariant(\"A\", shu) # ok.\n # with pytest.raises(ppg.JobContractError):\n # ppg2\n with pytest.raises(ppg2.JobRedefinitionError):\n ppg.FunctionInvariant(\"A\", lambda: \"b\") # raises ValueError\n\n def sha():\n def shu():\n return \"b\"\n\n return shu\n\n ppg.FunctionInvariant(\"B\", sha())\n ppg.FunctionInvariant(\"B\", sha())\n\n def test_instance_functions_ok(self, ppg1_compatibility_test):\n class shu:\n def __init__(self, letter):\n self.letter = letter\n\n def get_job(self):\n job = ppg.FileGeneratingJob(\n \"out/\" + self.letter, lambda: append(\"out/\" + self.letter, \"A\")\n )\n job.depends_on(ppg.FunctionInvariant(\"shu.sha\", self.sha))\n return job\n\n def sha(self):\n return 55 * 23\n\n x = shu(\"A\")\n x.get_job()\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"A\"\n append(\"out/A\", \"A\")\n\n ppg1_compatibility_test.new_pipegraph(dump_graph=False)\n x.get_job()\n y = shu(\"B\")\n j1 = y.get_job()\n j2 = y.get_job()\n assert j1.generating_function is j2.generating_function\n\n def test_invariant_build_in_function(self):\n a = ppg.FunctionInvariant(\"test\", sorted)\n a.run(None, None)\n\n def test_buildin_function(self):\n a = ppg.FunctionInvariant(\"a\", open)\n assert \"<built-in\" in str(a)\n\n def test_function_invariant_non_function(self):\n class CallMe:\n def __call__(self):\n raise ValueError()\n\n a = ppg.FunctionInvariant(\"a\", CallMe)\n with pytest.raises(ValueError):\n a.run(None, None)\n\n def test_closure_capturing(self):\n import pypipegraph2 as ppg2\n\n def func(da_list):\n def f():\n return da_list\n\n return f\n\n a = ppg.FunctionInvariant(\"a\", func([1, 2, 3]))\n b = ppg.FunctionInvariant(\n \"b\", func([1, 2, 3])\n ) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", func([1, 2, 3, 4])\n ) # and this invariant should be different\n av = a.run(None, None)[\"FIa\"][ppg2.jobs.python_version]\n bv = b.run(None, None)[\"FIb\"][ppg2.jobs.python_version]\n cv = c.run(None, None)[\"FIc\"][ppg2.jobs.python_version]\n assert av\n\n assert bv == av\n assert not (av == cv)\n\n def test_function_to_str_builtin(self):\n assert ppg.job.function_to_str(open) == \"<built-in function open>\"\n\n def test_closure_capturing_dict(self):\n import pypipegraph2 as ppg2\n\n def func(da_list):\n def f():\n return da_list\n\n return f\n\n a = ppg.FunctionInvariant(\"a\", func({\"1\": \"a\", \"3\": \"b\", \"2\": \"c\"}))\n b = ppg.FunctionInvariant(\n \"b\", func({\"1\": \"a\", \"3\": \"b\", \"2\": \"c\"})\n ) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", func({\"1\": \"a\", \"3\": \"b\", \"2\": \"d\"})\n ) # and this invariant should be different\n av = a.run(None, None)[\"FIa\"][ppg2.jobs.python_version]\n bv = b.run(None, None)[\"FIb\"][ppg2.jobs.python_version]\n cv = c.run(None, None)[\"FIc\"][ppg2.jobs.python_version]\n assert av\n assert bv == av\n assert not (av == cv)\n\n def test_closure_capturing_set(self):\n import pypipegraph2 as ppg2\n\n def func(da_list):\n def f():\n return da_list\n\n return f\n\n import random\n\n x = set([\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\"])\n a = ppg.FunctionInvariant(\"a\", func(x))\n x2 = list(x)\n random.shuffle(x2)\n x2 = set(x2)\n b = ppg.FunctionInvariant(\"b\", func(x2)) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", func({\"3\", \"2\"})\n ) # and this invariant should be different\n av = a.run(None, None)[\"FIa\"][ppg2.jobs.python_version]\n bv = b.run(None, None)[\"FIb\"][ppg2.jobs.python_version]\n cv = c.run(None, None)[\"FIc\"][ppg2.jobs.python_version]\n assert av\n assert bv == av\n assert not (av == cv)\n\n def test_closure_capturing_frozen_set(self):\n import pypipegraph2 as ppg2\n\n def func(da_list):\n def f():\n return da_list\n\n return f\n\n import random\n\n x = frozenset([\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\"])\n a = ppg.FunctionInvariant(\"a\", func(x))\n x2 = list(x)\n random.shuffle(x2)\n x2 = frozenset(x2)\n b = ppg.FunctionInvariant(\"b\", func(x2)) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", func(frozenset({\"3\", \"2\"}))\n ) # and this invariant should be different\n av = a.run(None, None)[\"FIa\"][ppg2.jobs.python_version]\n bv = b.run(None, None)[\"FIb\"][ppg2.jobs.python_version]\n cv = c.run(None, None)[\"FIc\"][ppg2.jobs.python_version]\n assert av\n assert bv == av\n assert not (av == cv)\n\n @pytest.mark.skip # ppg2 does it's own smart thing - no need to test here\n # @pytest.mark.xfail was marked fail\n def test_invariant_caching(self):\n\n a = ppg.FunctionInvariant(\"a\", ppg.inside_ppg)\n old_dis = a.dis_code\n counter = [0]\n\n def new_dis(*args, **kwargs):\n counter[0] += 1\n return old_dis(*args, **kwargs)\n\n a.dis_code = new_dis\n # round 0 - everything needs to be calculated\n assert len(ppg.util.global_pipegraph.func_hashes) == 0\n iv1 = a._get_invariant(False, [])\n assert counter[0] == 1\n assert len(ppg.util.global_pipegraph.func_hashes) == 1\n assert len(ppg.util.global_pipegraph.file_hashes) == 0\n\n # same function again - no new calc\n iv2 = a._get_invariant(False, [])\n assert iv1 == iv2\n assert counter[0] == 1\n\n # we lost the function hash, and were passed false:\n ppg.util.global_pipegraph.func_hashes.clear()\n iv3 = a._get_invariant(False, [])\n assert iv3 == iv2\n assert counter[0] == 2\n assert len(ppg.util.global_pipegraph.func_hashes) == 1\n\n # we lost the function hash - but were passed an old tuple\n # with matching file hash\n ppg.util.global_pipegraph.func_hashes.clear()\n iv3b = a._get_invariant(iv3, [])\n assert iv3 is iv3b\n assert counter[0] == 2\n assert len(ppg.util.global_pipegraph.func_hashes) == 0\n ppg.util.global_pipegraph.func_hashes.clear()\n\n # now let's simulate the file changing\n faked_iv3 = (\"aa\",) + iv3[1:]\n ppg.util.global_pipegraph.func_hashes.clear()\n with pytest.raises(ppg.NothingChanged) as e:\n a._get_invariant(faked_iv3, [])\n iv4 = e.value.new_value\n assert iv4[2:] == iv3[2:]\n assert len(ppg.util.global_pipegraph.func_hashes) == 1\n assert counter[0] == 3\n assert (\n len(ppg.util.global_pipegraph.file_hashes) == 0\n ) # we still used the the function.__code__.co_filename\n\n # now let's simulate the line no changing.\n faked_iv3 = (iv3[0],) + (1,) + iv3[2:]\n ppg.util.global_pipegraph.func_hashes.clear()\n with pytest.raises(ppg.NothingChanged) as e:\n a._get_invariant(faked_iv3, [])\n iv5 = e.value.new_value\n assert iv5[2:] == iv3[2:]\n assert len(ppg.util.global_pipegraph.func_hashes) == 1\n assert counter[0] == 4\n assert (\n len(ppg.util.global_pipegraph.file_hashes) == 0\n ) # we still used the the function.__code__.co_filename\n\n # and now, going from the old to the new...\n old = iv1[2] + iv1[3]\n with pytest.raises(ppg.NothingChanged) as e:\n a._get_invariant(old, [])\n assert e.value.new_value == iv1\n\n # and last but not least let's test the closure based seperation\n ppg.util.global_pipegraph.func_hashes.clear()\n ppg.util.global_pipegraph.file_hashes.clear()\n\n def capture(x):\n def inner():\n return 5 + x\n\n return inner\n\n b = ppg.FunctionInvariant(\"x5\", capture(5))\n c = ppg.FunctionInvariant(\"x10\", capture(10))\n ivb = b._get_invariant(False, [])\n assert len(ppg.util.global_pipegraph.func_hashes) == 1\n ivc = c._get_invariant(False, [])\n # no recalc - reuse the one from the previous function\n assert len(ppg.util.global_pipegraph.func_hashes) == 1\n assert ivb[:3] == ivc[:3]\n\n @pytest.mark.skip # no longer relevant - ppg2 needs to recalc ppg1 projects anyhow\n def test_37_dis_changes(self):\n # starting with python 3.7\n # dis can go into functions - we used to do this manually.\n # unfortunatly, we ran some projects before we discovered this\n # so let's see if we can get this fixed...\n\n if sys.version_info >= (3, 7):\n\n def shu(x):\n return lambda: x + 5\n\n source = \"(x):\\n return lambda: x + 5\"\n a = ppg.FunctionInvariant(\"shu\", shu)\n old = {\n \"source\": source,\n str((3, 6)): a.dis_code(shu.__code__, shu, (3, 6, 1)),\n }\n expected_new = old.copy()\n expected_new[\"_version\"] = 3\n expected_new[str(sys.version_info[:2])] = (\n a.dis_code(shu.__code__, shu, sys.version_info),\n \"\",\n )\n assert expected_new != old\n with pytest.raises(ppg.NothingChanged) as e:\n a.run(None, None)\n assert e.value.new_value == expected_new\n del old[\"source\"]\n res = a._get_invariant(old, [])\n assert res == expected_new\n\n\[email protected](\"ppg1_compatibility_test\")\nclass TestMultiFileInvariant:\n def test_input_checking(self):\n with pytest.raises(TypeError):\n ppg.MultiFileInvariant(\"out/A\", lambda: write(\"out/A\", \"A\"))\n with pytest.raises(TypeError):\n ppg.MultiFileInvariant(34, lambda: write(\"out/A\", \"A\"))\n with pytest.raises(TypeError):\n alist = [\"out/A\", \"out/B\"]\n ppg.MultiFileInvariant((x for x in alist), lambda: write(\"out/A\", \"A\"))\n # with pytest.raises(ValueError):\n # ppg2\n with pytest.raises(TypeError):\n ppg.MultiFileInvariant([\"out/A\", \"out/A\"], lambda: write(\"out/A\", \"A\"))\n with pytest.raises(TypeError):\n ppg.MultiFileInvariant([], lambda: write(\"out/A\", \"A\"))\n\n @pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2\n def test_new_raises_unchanged(self):\n write(\"out/a\", \"hello\")\n write(\"out/b\", \"world\")\n jobA = ppg.MultiFileInvariant([\"out/a\", \"out/b\"])\n\n def inner():\n jobA.run(None, None)\n\n assertRaises(ppg.NothingChanged, inner)\n\n @pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2\n def test_no_raise_on_no_change(self):\n write(\"out/a\", \"hello\")\n write(\"out/b\", \"world\")\n jobA = ppg.MultiFileInvariant([\"out/a\", \"out/b\"])\n try:\n jobA.run(None, None)\n self.fail(\"should not be reached\")\n except ppg.NothingChanged as e:\n cs = e.new_value\n try:\n jobA.get_invariant(cs, {jobA.job_id: cs})\n self.fail(\"should not be reached\")\n except ppg.NothingChanged as e:\n cs2 = e.new_value\n assert cs2 == cs\n\n @pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2\n def test_filetime_changed_contents_the_same(self):\n write(\"out/a\", \"hello\")\n write(\"out/b\", \"world\")\n jobA = ppg.MultiFileInvariant([\"out/a\", \"out/b\"])\n try:\n jobA.get_invariant(False, {})\n self.fail(\"should not be reached\")\n except ppg.NothingChanged as e:\n cs = e.new_value\n subprocess.check_call([\"touch\", \"--date=2004-02-29\", \"out/b\"])\n try:\n jobA.get_invariant(cs, {jobA.job_id: cs})\n self.fail(\"should not be reached\")\n except ppg.NothingChanged as e:\n cs2 = e.new_value\n assert not (cs2 == cs)\n assert not ([x[1] for x in cs2] == [x[1] for x in cs]) # times changed\n assert [x[2] for x in cs2] == [x[2] for x in cs] # sizes did not\n assert [x[3] for x in cs2] == [x[3] for x in cs]\n\n @pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2\n def test_changed_file(self):\n write(\"out/a\", \"hello\")\n write(\"out/b\", \"world\")\n jobA = ppg.MultiFileInvariant([\"out/a\", \"out/b\"])\n try:\n jobA.get_invariant(False, {})\n self.fail(\"should not be reached\")\n except ppg.NothingChanged as e:\n cs = e.new_value\n write(\"out/b\", \"world!\")\n cs2 = jobA.get_invariant(cs, {jobA.job_id: cs})\n assert not (cs2 == cs)\n assert [x[0] for x in cs2] == [x[0] for x in cs] # file names the same\n # assert not ( [x[1] for x in cs2] == [x[1] for x in cs]) # don't test times, might not have changed\n assert not ([x[2] for x in cs2] == [x[2] for x in cs]) # sizes changed\n assert not ([x[3] for x in cs2] == [x[2] for x in cs]) # checksums changed\n\n @pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2\n def test_changed_file_same_size(self):\n write(\"out/a\", \"hello\")\n write(\"out/b\", \"world\")\n jobA = ppg.MultiFileInvariant([\"out/a\", \"out/b\"])\n try:\n jobA.get_invariant(False, {})\n self.fail(\"should not be reached\")\n except ppg.NothingChanged as e:\n cs = e.new_value\n time.sleep(2) # must be certain we have a changed filetime!\n write(\"out/b\", \"worlt\")\n cs2 = jobA.get_invariant(cs, {jobA.job_id: cs})\n assert not (cs2 == cs)\n assert [x[0] for x in cs2] == [x[0] for x in cs] # file names the same\n assert [x[2] for x in cs2] == [x[2] for x in cs] # sizes the same\n assert not ([x[3] for x in cs2] == [x[2] for x in cs]) # checksums changed\n\n @pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2\n def test_rehome_no_change(self):\n write(\"out/a\", \"hello\")\n write(\"out/b\", \"world\")\n jobA = ppg.MultiFileInvariant([\"out/a\", \"out/b\"])\n try:\n jobA.get_invariant(False, {})\n self.fail(\"should not be reached\")\n except ppg.NothingChanged as e:\n cs = e.new_value\n try:\n jobA.get_invariant(cs, {jobA.job_id: cs})\n self.fail(\"should not be reached\")\n except ppg.NothingChanged as e:\n cs2 = e.new_value\n assert cs2 == cs\n os.makedirs(\"out2\")\n write(\"out2/a\", \"hello\")\n write(\"out2/b\", \"world\")\n jobB = ppg.MultiFileInvariant([\"out2/a\", \"out2/b\"])\n\n def inner():\n jobB.get_invariant(False, {jobA.job_id: cs})\n\n assertRaises(ppg.NothingChanged, inner)\n\n @pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2\n def test_rehome_and_change(self):\n write(\"out/a\", \"hello\")\n write(\"out/b\", \"world\")\n jobA = ppg.MultiFileInvariant([\"out/a\", \"out/b\"])\n try:\n jobA.get_invariant(False, {})\n self.fail(\"should not be reached\")\n except ppg.NothingChanged as e:\n cs = e.new_value\n try:\n jobA.get_invariant(cs, {jobA.job_id: cs})\n self.fail(\"should not be reached\")\n except ppg.NothingChanged as e:\n cs2 = e.new_value\n assert cs2 == cs\n os.makedirs(\"out2\")\n write(\"out2/a\", \"hello\")\n write(\"out2/b\", \"worl!x\") # either change the length, or wait 2 seconds...\n jobB = ppg.MultiFileInvariant([\"out2/a\", \"out2/b\"])\n cs3 = jobB.get_invariant(False, {jobA.job_id: cs})\n assert not ([x[3] for x in cs2] == [x[2] for x in cs3]) # checksums changed\n\n def test_non_existant_file_raises(self):\n # ppg2 does not raise until run.\n mfi = ppg.MultiFileInvariant([\"out/a\"])\n ppg.FileGeneratingJob(\"out/B\", lambda of: of.write(\"b\")).depends_on(mfi)\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n\n @pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2\n def test_rehome_and_additional_file(self):\n write(\"out/a\", \"hello\")\n write(\"out/b\", \"world\")\n jobA = ppg.MultiFileInvariant([\"out/a\", \"out/b\"])\n try:\n jobA.get_invariant(False, {})\n self.fail(\"should not be reached\")\n except ppg.NothingChanged as e:\n cs = e.new_value\n try:\n jobA.get_invariant(cs, {jobA.job_id: cs})\n self.fail(\"should not be reached\")\n except ppg.NothingChanged as e:\n cs2 = e.new_value\n assert cs2 == cs\n os.makedirs(\"out2\")\n write(\"out2/a\", \"hello\")\n write(\"out2/b\", \"world\")\n write(\"out2/c\", \"worl!x\") # either change the length, or wait 2 seconds...\n jobB = ppg.MultiFileInvariant([\"out2/a\", \"out2/b\", \"out2/c\"])\n cs3 = jobB.get_invariant(False, {jobA.job_id: cs})\n assert not ([x[3] for x in cs2] == [x[2] for x in cs3]) # checksums changed\n\n @pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2\n def test_rehome_and_missing_file(self):\n write(\"out/a\", \"hello\")\n write(\"out/b\", \"world\")\n jobA = ppg.MultiFileInvariant([\"out/a\", \"out/b\"])\n try:\n jobA.get_invariant(False, {})\n self.fail(\"should not be reached\")\n except ppg.NothingChanged as e:\n cs = e.new_value\n try:\n jobA.get_invariant(cs, {jobA.job_id: cs})\n self.fail(\"should not be reached\")\n except ppg.NothingChanged as e:\n cs2 = e.new_value\n assert cs2 == cs\n os.makedirs(\"out2\")\n write(\"out2/a\", \"hello\")\n jobB = ppg.MultiFileInvariant([\"out2/a\"])\n cs3 = jobB.get_invariant(False, {jobA.job_id: cs})\n assert not ([x[3] for x in cs2] == [x[2] for x in cs3]) # checksums changed\n\n def test_rehome_same_filenames_gives_up(self, ppg1_compatibility_test):\n from pathlib import Path\n\n write(\"out/counter\", \"0\")\n Path(\"out/A\").mkdir()\n Path(\"out/B\").mkdir()\n Path(\"out/C\").mkdir()\n Path(\"out/D\").mkdir()\n write(\"out/A/A\", \"hello\")\n write(\"out/B/A\", \"world\")\n jobA = ppg.MultiFileInvariant([\"out/A/A\", \"out/B/A\"])\n\n def of():\n append(\"out/counter\", \"x\")\n write(\"out/x\", \"ok\")\n\n jobB = ppg.FileGeneratingJob(\"out/x\", of)\n jobB.depends_on(jobA)\n ppg.run_pipegraph()\n assert read(\"out/counter\") == \"0x\"\n ppg1_compatibility_test.new_pipegraph()\n shutil.move(\"out/A/A\", \"out/C/A\")\n shutil.move(\"out/B/A\", \"out/D/A\")\n jobA = ppg.MultiFileInvariant([\"out/C/A\", \"out/D/A\"])\n jobB = ppg.FileGeneratingJob(\"out/x\", of)\n jobB.depends_on(jobA)\n ppg.run_pipegraph()\n # ppg2 now does *not* give up\n # assert read(\"out/counter\") == \"0xx\"\n assert read(\"out/counter\") == \"0x\" # so no rerun\n\n\[email protected](\"ppg1_compatibility_test\")\nclass TestDependency:\n def test_simple_chain(self):\n o = Dummy()\n\n def load_a():\n return \"shu\"\n\n jobA = ppg.AttributeLoadingJob(\"a\", o, \"myattr\", load_a)\n ofB = \"out/B\"\n\n def do_write_b():\n write(ofB, o.myattr)\n\n jobB = ppg.FileGeneratingJob(ofB, do_write_b).depends_on(jobA)\n ofC = \"out/C\"\n\n def do_write_C():\n write(ofC, o.myattr)\n\n ppg.FileGeneratingJob(ofC, do_write_C).depends_on(jobA)\n\n ofD = \"out/D\"\n\n def do_write_d():\n write(ofD, read(ofC) + read(ofB))\n\n ppg.FileGeneratingJob(ofD, do_write_d).depends_on([jobA, jobB])\n\n def test_failed_job_kills_those_after(self, ppg1_compatibility_test):\n ofA = \"out/A\"\n\n def write_a():\n append(ofA, \"hello\")\n\n jobA = ppg.FileGeneratingJob(ofA, write_a)\n\n ofB = \"out/B\"\n\n def write_b():\n raise ValueError(\"shu\")\n\n jobB = ppg.FileGeneratingJob(ofB, write_b)\n jobB.depends_on(jobA)\n\n ofC = \"out/C\"\n\n def write_c():\n write(ofC, \"hello\")\n\n jobC = ppg.FileGeneratingJob(ofC, write_c)\n jobC.depends_on(jobB)\n try:\n ppg.run_pipegraph()\n raise ValueError(\"should not be reached\")\n except ppg.RuntimeError:\n pass\n assert os.path.exists(ofA) # which was before the error\n assert not (os.path.exists(ofB)) # which was on the error\n assert not (os.path.exists(ofC)) # which was after the error\n ppg1_compatibility_test.new_pipegraph()\n jobA = ppg.FileGeneratingJob(ofA, write_a)\n jobC = ppg.FileGeneratingJob(ofC, write_c)\n\n def write_b_ok():\n write(ofB, \"BB\")\n\n jobB = ppg.FileGeneratingJob(ofB, write_b_ok)\n jobB.depends_on(jobA)\n jobC.depends_on(jobB)\n ppg.run_pipegraph()\n\n assert os.path.exists(ofA)\n assert read(ofA) == \"hello\" # run only once!\n assert os.path.exists(ofB)\n assert os.path.exists(ofC)\n\n def test_done_filejob_does_not_gum_up_execution(self):\n ofA = \"out/A\"\n write(ofA, \"1111\")\n\n def write_a():\n append(ofA, \"hello\")\n\n jobA = ppg.FileGeneratingJob(ofA, write_a)\n jobA.ignore_code_changes() # or it will inject a function dependency and run never the less...\n\n ofB = \"out/B\"\n\n def write_b():\n append(ofB, \"hello\")\n\n jobB = ppg.FileGeneratingJob(ofB, write_b)\n jobB.depends_on(jobA)\n\n ofC = \"out/C\"\n\n def write_c():\n write(ofC, \"hello\")\n\n jobC = ppg.FileGeneratingJob(ofC, write_c)\n jobC.depends_on(jobB)\n assert os.path.exists(ofA)\n\n ppg.run_pipegraph()\n\n assert os.path.exists(ofB)\n assert os.path.exists(ofC)\n # ppg2 - ppg2 runs the job at least once! and captures the hash afterwards\n # this might seem a disadvantag, but it's the only way to gurantee the code actually\n # produces the output.\n # on the plus side, you can swap in a FileInvariant inplace without trouble\n # (the FileGeneratingJob produces the same 'output'))\n # assert read(ofA) == \"1111\"\n assert read(ofA) == \"hello\"\n\n def test_invariant_violation_redoes_deps_but_not_nondeps(\n self, ppg1_compatibility_test\n ):\n def get_job(name):\n fn = \"out/\" + name\n\n def do_write():\n if os.path.exists(fn + \".sentinel\"):\n d = read(fn + \".sentinel\")\n else:\n d = \"\"\n append(fn + \".sentinel\", name) # get's longer all the time...\n write(fn, d + name) # get's deleted anyhow...\n\n return ppg.FileGeneratingJob(fn, do_write)\n\n jobA = get_job(\"A\")\n jobB = get_job(\"B\")\n jobC = get_job(\"C\")\n get_job(\"D\")\n jobC.depends_on(jobB)\n jobB.depends_on(jobA)\n dep = ppg.ParameterInvariant(\"myparam\", (\"hello\",))\n jobA.depends_on(dep)\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"C\"\n\n ppg1_compatibility_test.new_pipegraph()\n jobA = get_job(\"A\")\n jobB = get_job(\"B\")\n jobC = get_job(\"C\")\n get_job(\"D\")\n jobC.depends_on(jobB)\n jobB.depends_on(jobA)\n dep = ppg.ParameterInvariant(\"myparam\", (\"hello stranger\",))\n jobA.depends_on(dep) # now, the invariant has been changed, all jobs rerun...\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"AA\" # thanks to our smart rerun aware job definition..\n assert read(\"out/B\") == \"BB\"\n assert read(\"out/C\") == \"CC\"\n assert read(\"out/D\") == \"D\" # since that one does not to be rerun...\n\n def test_depends_on_accepts_a_list(self):\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda: write(\"out/B\", \"B\"))\n jobC = ppg.FileGeneratingJob(\"out/C\", lambda: write(\"out/C\", \"C\"))\n jobC.depends_on([jobA, jobB])\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"C\"\n\n def test_depends_on_accepts_multiple_values(self):\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda: write(\"out/B\", \"B\"))\n jobC = ppg.FileGeneratingJob(\"out/C\", lambda: write(\"out/C\", \"C\"))\n jobC.depends_on(jobA, jobB)\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"C\"\n\n def test_depends_on_accepts_multiple_values_mixed(self):\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda: write(\"out/B\", \"B\"))\n jobC = ppg.FileGeneratingJob(\"out/C\", lambda: write(\"out/C\", \"C\"))\n jobC.depends_on(jobA, [jobB])\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"C\"\n\n def test_depends_on_none_ignored(self):\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda: write(\"out/B\", \"B\"))\n jobC = ppg.FileGeneratingJob(\"out/C\", lambda: write(\"out/C\", \"C\"))\n jobC.depends_on(jobA, [jobB], None, [None])\n jobC.depends_on(None)\n jobC.depends_on()\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"C\"\n\n def test_depends_on_excludes_on_non_jobs(self):\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"A\"))\n\n def inner():\n jobA.depends_on(\"SHU\")\n\n assertRaises(KeyError, inner)\n with pytest.raises(ppg.CycleError):\n jobA.depends_on(jobA.job_id)\n\n def test_depends_on_instant_cycle_check(self):\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"out/b\", lambda: write(\"out/B\", \"b\"))\n jobB.depends_on(jobA)\n\n with pytest.raises(ppg.CycleError):\n jobA.depends_on(jobA)\n\n with pytest.raises(ppg.CycleError):\n jobA.depends_on(jobB)\n\n def test_depends_on_accepts_a_list_of_lists(self):\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda: write(\"out/B\", \"B\"))\n jobC = ppg.FileGeneratingJob(\n \"out/C\",\n lambda: write(\"out/C\", read(\"out/A\") + read(\"out/B\") + read(\"out/D\")),\n )\n jobD = ppg.FileGeneratingJob(\"out/D\", lambda: write(\"out/D\", \"D\"))\n jobC.depends_on([jobA, [jobB, jobD]])\n assert ppg.util.global_pipegraph.has_edge(jobD, jobC)\n assert ppg.util.global_pipegraph.has_edge(jobA, jobC)\n assert ppg.util.global_pipegraph.has_edge(jobB, jobC)\n ppg.run_pipegraph()\n # assert jobC.prerequisites is None ppg2\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"ABD\"\n assert read(\"out/D\") == \"D\"\n\n def test_invariant_job_depends_on_raises(self):\n\n with pytest.raises(ppg.JobContractError):\n ppg.ParameterInvariant(\"A\", \"a\").depends_on(\n ppg.Job([\"B\"])\n ) # ppg, don't rely on internals\n # with pytest.raises(ppg.JobContractError): # no final job in ppg2\n # ppg.FinalJob(\"A\").depends_on(ppg.Job(\"B\"))\n\n def test_cached_job_depends_on(self):\n class Dummy:\n pass\n\n o = Dummy()\n jobA = ppg.CachedAttributeLoadingJob(\"cache/A\", o, \"a\", lambda: 23)\n # jobA is the loading job,\n # jobA.lfg is the calculating job\n # but joba.depends_on goes to the .lfg...\n jobB = ppg.Job([\"B\"])\n jobC = ppg.Job([\"C\"])\n jobD = ppg.Job([\"D\"])\n jobA.depends_on([jobB], jobC, jobD)\n has_edge = ppg.util.global_pipegraph.has_edge\n assert not has_edge(jobB, jobA)\n assert not has_edge(jobC, jobA)\n assert not has_edge(jobD, jobA)\n assert has_edge(jobB, jobA.lfg)\n assert has_edge(jobC, jobA.lfg)\n assert has_edge(jobD, jobA.lfg)\n\n ppg.Job.depends_on(jobA, jobC)\n assert has_edge(jobC, jobA)\n\n def test_dependency_placeholder(self):\n jobA = ppg.FileGeneratingJob(\n \"out/A\", lambda: write(\"out/A\", \"A\" + read(\"out/B\"))\n )\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda: write(\"out/B\", \"B\"))\n\n def gen_deps():\n print(\"gen deps called\")\n return [jobB]\n\n jobA.depends_on(gen_deps)\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"AB\"\n\n def test_dependency_placeholder2(self):\n jobA = ppg.FileGeneratingJob(\n \"out/A\", lambda: write(\"out/A\", \"A\" + read(\"out/B\"))\n )\n\n def gen_deps():\n return ppg.FileGeneratingJob(\"out/B\", lambda: write(\"out/B\", \"B\"))\n\n jobA.depends_on(gen_deps)\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"AB\"\n\n def test_dependency_placeholder_nested(self):\n jobA = ppg.FileGeneratingJob(\n \"out/A\", lambda: write(\"out/A\", \"A\" + read(\"out/B\") + read(\"out/C\"))\n )\n\n def gen_deps2():\n return ppg.FileGeneratingJob(\"out/C\", lambda: write(\"out/C\", \"C\"))\n\n def gen_deps():\n return ppg.FileGeneratingJob(\n \"out/B\", lambda: write(\"out/B\", \"B\")\n ).depends_on(gen_deps2)\n\n jobA.depends_on(gen_deps)\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"ABC\"\n\n def test_dependency_placeholder_dynamic_auto_invariants(self):\n from loguru import logger\n\n jobA = ppg.FileGeneratingJob(\n \"out/A\", lambda: write(\"out/A\", \"A\" + read(\"out/B\"))\n )\n\n def check_function_invariant():\n write(\"out/B\", \"B\")\n print()\n\n # ppg2\n assert ppg.util.global_pipegraph.has_edge(\"FITestDependency.test_dependency_placeholder_dynamic_auto_invariants.<locals>.check_function_invariant\", \"out/B\")\n\n def gen_deps():\n jobB = ppg.FileGeneratingJob(\"out/B\", check_function_invariant)\n print(\"gen deps called\")\n return [jobB]\n\n jobA.depends_on(gen_deps)\n ppg.run_pipegraph()\n assert read(\"out/A\") == \"AB\"\n\n\[email protected](\"ppg1_compatibility_test\")\nclass TestDefinitionErrors:\n def test_defining_function_invariant_twice(self):\n a = lambda: 55 # noqa:E731\n b = lambda: 66 # noqa:E731\n ppg.FunctionInvariant(\"a\", a)\n\n def inner():\n ppg.FunctionInvariant(\"a\", b)\n\n # assertRaises(ppg.JobContractError, inner)\n # ppg2\n assertRaises(ValueError, inner)\n import pypipegraph2 as ppg2\n\n assertRaises(ppg2.JobRedefinitionError, inner)\n\n @pytest.mark.skip # in ppg2, you can't have a collision, they have different prefixes\n def test_defining_function_and_parameter_invariant_with_same_name(self):\n a = lambda: 55 # noqa:E731\n b = 66\n ja = ppg.FunctionInvariant(\"PIa\", a)\n\n # def inner():\n jb = ppg.ParameterInvariant(\"a\", b)\n assert ja.job_id == jb.job_id\n\n @pytest.mark.skip # in ppg2, you can't have a collision, they have different prefixes\n def test_defining_function_and_parameter_invariant_with_same_name_reversed(self):\n a = lambda: 55 # noqa:E731\n # b = lambda: 66 # noqa:E731\n # ppg2\n ja = ppg.ParameterInvariant(\"a\", 66)\n\n ppg.FunctionInvariant(ja.job_id, a)\n\n # assertRaises(ppg.JobContractError, inner)\n\n\[email protected](\"ppg1_compatibility_test\")\nclass TestFunctionInvariantDisChanges_BetweenVersions:\n @pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2\n def test_lambda(self):\n source = \"\"\"def test(arg1, arg2):\n l = lambda: arg1 + 123\n return somecall(arg2 + l())\n \"\"\"\n py369 = \"\"\" 2 0 LOAD_CLOSURE 0 (arg2)\n 2 BUILD_TUPLE 1\n 4 LOAD_CONST 1 (<code object <lambda> at 0x7f765af10e40, file \"<stdin>\", line 2>)\n 6 LOAD_CONST 2 ('test.<locals>.<lambda>')\n 8 MAKE_FUNCTION 8\n 10 STORE_FAST 2 (l)\n\n 3 12 LOAD_GLOBAL 0 (somecall)\n 14 LOAD_FAST 0 (arg1)\n 16 LOAD_FAST 2 (l)\n 18 CALL_FUNCTION 0\n 20 BINARY_ADD\n 22 CALL_FUNCTION 1\n 24 RETURN_VALUE\"\"\"\n # fmt: off\n py373 = ( # noqa: F841\n \"\"\" 2 0 LOAD_CLOSURE 0 (arg2)\n 2 BUILD_TUPLE 1\n 4 LOAD_CONST 1 (<code object <lambda> at 0x7fd895ea9030, file \"<stdin>\", line 2>)\n 6 LOAD_CONST 2 ('test.<locals>.<lambda>')\n 8 MAKE_FUNCTION 8\n 10 STORE_FAST 2 (l)\n\n 3 12 LOAD_GLOBAL 0 (somecall)\n 14 LOAD_FAST 0 (arg1)\n 16 LOAD_FAST 2 (l)\n 18 CALL_FUNCTION 0\n 20 BINARY_ADD\n 22 CALL_FUNCTION 1\n 24 RETURN_VALUE\n\nDisassembly of <code object <lambda> at 0x7fd895ea9030, file \"<stdin>\", line 2>:\n 2 0 LOAD_DEREF 0 (arg2)\n 2 LOAD_CONST 1 (123)\n 4 BINARY_ADD\n 6 RETURN_VALUE\"\"\")\n # fmt: on\n\n py380 = \"\"\"0\tLOAD_CLOSURE\t0\t(arg2)\nBUILD_TUPLE\t1\nLOAD_CONST\t1\t(lambda>\",\tline\t2>)\nLOAD_CONST\t2\t('<func name ommited>.<locals>.<lambda>')\nMAKE_FUNCTION\t8\t(closure)\nSTORE_FAST\t2\t(l)\n\n12\tLOAD_GLOBAL\t0\t(somecall)\nLOAD_FAST\t0\t(arg1)\nLOAD_FAST\t2\t(l)\nCALL_FUNCTION\t0\nBINARY_ADD\nCALL_FUNCTION\t1\nRETURN_VALUE\n\nof\tlambda>\",\tline\t2>:\n0\tLOAD_DEREF\t0\t(arg2)\nLOAD_CONST\t1\t(123)\nBINARY_ADD\nRETURN_VALUE\"\"\"\n # if source is present and identical, ignore all others\n with pytest.raises(ppg.NothingChanged):\n ppg.FunctionInvariant._compare_new_and_old(\n source, py380, {}, {\"source\": source}\n )\n # if byte code is present, in the right version and identical, ok.\n with pytest.raises(ppg.NothingChanged):\n ppg.FunctionInvariant._compare_new_and_old(\n source, py380, \"\", {str(sys.version_info[:2]): (py380, \"\")}\n )\n # nothing store -> change\n assert str(sys.version_info[:2]) in ppg.FunctionInvariant._compare_new_and_old(\n source, py380, {}, {}\n )\n # if source is present and identical, ignore all others, take 2\n with pytest.raises(ppg.NothingChanged):\n ppg.FunctionInvariant._compare_new_and_old(\n source,\n py380,\n {},\n {\"source\": source, (3, 6): (py369, \"\"), (3, 7): (\"\", \"\")}, #\n )\n\n @pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2\n def test_compare_with_old_style(self):\n shu = 10\n\n def test(arg):\n l = lambda: arg + 5 + shu # noqa: E731, E741\n return l()\n\n iv = ppg.FunctionInvariant(\"shu\", test)\n new = iv._get_invariant(False, [])\n old = (\n \"ignored\",\n \"ignored\",\n new[str(sys.version_info[:2])][0],\n new[str(sys.version_info[:2])][1],\n )\n\n with pytest.raises(ppg.NothingChanged):\n ppg.FunctionInvariant._compare_new_and_old(\n new[\"source\"],\n new[str(sys.version_info[:2])][0],\n new[str(sys.version_info[:2])][1],\n old,\n )\n\n @pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2\n def test_compare_with_old_old_style(self):\n shu = 10\n\n def test(arg):\n l = lambda: arg + 5 + shu # noqa: E731, E741\n return l()\n\n iv = ppg.FunctionInvariant(\"shu\", test)\n new = iv._get_invariant(False, [])\n old = new[str(sys.version_info[:2])][0] + new[str(sys.version_info[:2])][1]\n with pytest.raises(ppg.NothingChanged):\n ppg.FunctionInvariant._compare_new_and_old(\n new[\"source\"],\n new[str(sys.version_info[:2])][0],\n new[str(sys.version_info[:2])][1],\n old,\n )\n\n def test_function_name_is_irrelevant(self):\n import pypipegraph2 as ppg2\n\n def test_a():\n return 55\n\n def test_b():\n return 55\n\n def test_c():\n return 56\n\n a = ppg.FunctionInvariant(\"a\", test_a)\n b = ppg.FunctionInvariant(\"b\", test_b)\n c = ppg.FunctionInvariant(\"c\", test_c)\n assert (\n a.run(None, None)[\"FIa\"][ppg2.jobs.python_version]\n == b.run(None, None)[\"FIb\"][ppg2.jobs.python_version]\n )\n assert (\n a.run(None, None)[\"FIa\"][ppg2.jobs.python_version]\n != c.run(None, None)[\"FIc\"][ppg2.jobs.python_version]\n )\n\n def test_docstring_is_irrelevant(self):\n import pypipegraph2 as ppg2\n\n def test():\n \"\"\"A\"\"\"\n return 55\n\n a = ppg.FunctionInvariant(\"a\", test)\n\n # fmt: off\n def test():\n '''B'''\n return 55\n # fmt: on\n b = ppg.FunctionInvariant(\"b\", test)\n\n def test():\n \"c\"\n return 56\n\n c = ppg.FunctionInvariant(\"c\", test)\n\n def test():\n \"c\"\n return 56\n\n d = ppg.FunctionInvariant(\"d\", test)\n\n assert (\n a.run(None, None)[\"FIa\"][ppg2.jobs.python_version]\n == b.run(None, None)[\"FIb\"][ppg2.jobs.python_version]\n )\n assert (\n a.run(None, None)[\"FIa\"][ppg2.jobs.python_version]\n != c.run(None, None)[\"FIc\"][ppg2.jobs.python_version]\n )\n assert (\n c.run(None, None)[\"FIc\"][ppg2.jobs.python_version]\n == d.run(None, None)[\"FId\"][ppg2.jobs.python_version]\n )\n\n\[email protected](\"ppg1_compatibility_test\")\n# ppg2: actual ppg2 source is without final newline. tests adjusted.\nclass TestCythoncompatibility:\n def test_just_a_function(self):\n import pypipegraph2 as ppg2\n import cython\n\n src = \"\"\"\ndef a():\n return 1\n\ndef b():\n return 5\n\"\"\"\n func = cython.inline(src)[\"a\"]\n actual = ppg.FunctionInvariant(\"a\", func).run(None, None)[\"FIa\"][\"source\"]\n should = \"\"\" def a():\n return 1\"\"\"\n assert actual == should\n\n def test_nested_function(self):\n import pypipegraph2 as ppg2\n import cython\n\n src = \"\"\"\ndef a():\n def b():\n return 1\n return b\n\ndef c():\n return 5\n\"\"\"\n func = cython.inline(src)[\"a\"]()\n actual = ppg.FunctionInvariant(\"a\", func).run(None, None)[\"FIa\"][\"source\"]\n should = \"\"\" def b():\n return 1\"\"\"\n assert actual == should\n\n def test_class(self):\n import pypipegraph2 as ppg2\n import cython\n\n src = \"\"\"\nclass A():\n def b(self):\n return 55\n\ndef c():\n return 5\"\"\"\n\n func = cython.inline(src)[\"A\"]().b\n actual = ppg.FunctionInvariant(\"a\", func).run(None, None)[\"FIa\"][\"source\"]\n should = \"\"\" def b(self):\n return 55\"\"\"\n assert actual == should\n\n def test_class_inner_function(self):\n import pypipegraph2 as ppg2\n import cython\n\n src = \"\"\"\nclass A():\n def b(self):\n def c():\n return 55\n return c\n\ndef d():\n return 5\"\"\"\n\n func = cython.inline(src)[\"A\"]().b()\n actual = ppg.FunctionInvariant(\"a\", func).run(None, None)[\"FIa\"][\"source\"]\n should = \"\"\" def c():\n return 55\"\"\"\n assert actual == should\n", "id": "3483751", "language": "Python", "matching_score": 10.157373428344727, "max_stars_count": 0, "path": "tests/ppg1_compatibility_layer/test_invariants_and_depedencies.py" }, { "content": "import os\nimport gzip\nfrom pathlib import Path\nfrom loguru import logger\nimport stat\nimport time\nimport hashlib\nimport shutil\nimport pytest\nimport pypipegraph2 as ppg\nfrom .shared import write, read, append, Dummy, counter, force_load\n\n\nclass Undepickable(object):\n def __getstate__(self):\n return {\"shu\": 123} # must not return falsey value\n\n def __setstate__(self, state):\n self.sha = state[\"shu\"]\n import pickle\n\n raise pickle.UnpicklingError(\"SHU\")\n\n\[email protected](\"create_out_dir\")\[email protected](\"ppg2_per_test\")\nclass TestInvariant:\n def sentinel_count(self):\n sentinel = \"out/sentinel\"\n try:\n op = open(sentinel, \"r\")\n count = int(op.read())\n op.close()\n except IOError:\n count = 1\n op = open(sentinel, \"w\")\n op.write(\"%i\" % (count + 1))\n op.close()\n return count\n\n def test_filegen_jobs_detect_code_change(self):\n of = \"out/a\"\n\n def do_write(of):\n append(of, \"shu\" * self.sentinel_count())\n\n ppg.FileGeneratingJob(of, do_write)\n ppg.run()\n assert read(of) == \"shu\"\n ppg.new()\n ppg.FileGeneratingJob(of, do_write)\n ppg.run()\n assert read(of) == \"shu\" # has not been run again...\n\n def do_write2(of):\n append(of, \"sha\")\n\n ppg.new()\n ppg.FileGeneratingJob(of, do_write2)\n ppg.run()\n assert read(of) == \"sha\" # has been run again ;).\n\n def test_filegen_jobs_ignores_code_change(self):\n of = \"out/a\"\n\n def do_write(of):\n counter(\"A\")\n append(of, \"shu\" * self.sentinel_count())\n\n job = ppg.FileGeneratingJob(of, do_write)\n ppg.run()\n\n assert read(of) == \"shu\"\n assert read(\"A\") == \"1\"\n ppg.new()\n job = ppg.FileGeneratingJob(of, do_write)\n ppg.run()\n assert read(of) == \"shu\" # has not been run again, for no change\n assert read(\"A\") == \"1\"\n\n ppg.new()\n\n def do_write2(of):\n counter(\"A\")\n append(of, \"sha\")\n\n job = ppg.FileGeneratingJob(of, do_write2, depend_on_function=False)\n ppg.run()\n assert read(of) == \"sha\" # has been run again - number of invariants changed!\n assert read(\"A\") == \"2\"\n\n ppg.new()\n job = ppg.FileGeneratingJob(of, do_write2)\n ppg.run()\n assert read(of) == \"sha\" # Readding the invariant does trigger again\n assert read(\"A\") == \"3\"\n\n def test_parameter_dependency(self):\n of = \"out/a\"\n\n def do_write(of):\n append(of, \"shu\" * self.sentinel_count())\n\n job = ppg.FileGeneratingJob(of, do_write)\n param_dep = ppg.ParameterInvariant(\"myparam\", (1, 2, 3))\n job.depends_on(param_dep)\n ppg.run()\n assert read(of) == \"shu\"\n ppg.new()\n job = ppg.FileGeneratingJob(of, do_write)\n param_dep = ppg.ParameterInvariant(\"myparam\", (1, 2, 3))\n job.depends_on(param_dep)\n ppg.run()\n assert read(of) == \"shu\" # has not been run again...\n ppg.new()\n job = ppg.FileGeneratingJob(of, do_write)\n param_dep = ppg.ParameterInvariant(\"myparam\", (1, 2, 3, 4))\n job.depends_on(param_dep)\n ppg.run()\n assert read(of) == \"shushu\" # has been run again ;).\n\n def test_parameter_invariant_adds_hidden_job_id_prefix(self):\n param = \"A\"\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", param))\n jobB = ppg.ParameterInvariant(\"out/A\", param)\n jobA.depends_on(jobB)\n ppg.run()\n assert read(\"out/A\") == param\n\n def test_depends_on_func(self):\n a = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"a\"))\n b, a_again = a.depends_on_func(\"a123\", lambda: 123)\n assert b.job_id.startswith(\"FI\" + a.job_id + \"_\")\n assert ppg.global_pipegraph.has_edge(b, a)\n assert a_again is a\n\n def test_depends_on_file(self):\n a = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"a\"))\n write(\"shu\", \"hello\")\n b = a.depends_on_file(\"shu\")\n assert b.self is a\n assert ppg.global_pipegraph.has_edge(b.invariant, a)\n\n def test_depends_on_params(self):\n a = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"a\"))\n b = a.depends_on_params(23)\n assert b.invariant.job_id == \"PIout/A\"\n # assert b.invariant.parameters == 23\n assert ppg.global_pipegraph.has_edge(b.invariant, a)\n assert b.self is a\n\n def test_parameter_invariant_twice_different_values(self):\n ppg.ParameterInvariant(\"a\", (1, 2, 3))\n with pytest.raises(ValueError):\n ppg.ParameterInvariant(\"a\", (1, 2, 4))\n\n def test_filetime_dependency(self):\n of = \"out/a\"\n\n def do_write(of):\n append(of, \"shu\" * self.sentinel_count())\n\n ftfn = \"out/ftdep\"\n write(ftfn, \"hello\")\n write(of, \"hello\")\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileInvariant(ftfn)\n job.depends_on(dep)\n ppg.run()\n assert (\n read(of) == \"shu\"\n ) # job get's run though there is a file, because the FileInvariant was not stored before...\n ppg.new()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileInvariant(ftfn)\n job.depends_on(dep)\n ppg.run()\n assert read(of) == \"shu\" # job does not get rerun...\n\n time.sleep(1) # so linux actually advances the file time in the next line\n write(ftfn, \"hello\") # same content, different time\n\n ppg.new()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileInvariant(ftfn)\n job.depends_on(dep)\n ppg.run()\n assert (\n read(of) == \"shu\"\n ) # job does not get rerun - filetime invariant is now filechecksum invariant...\n\n def test_file_did_not_exist(self):\n ppg.FileInvariant(\"shu\")\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert \"did not exist\" in str(ppg.global_pipegraph.last_run_result[\"shu\"].error)\n\n def test_filechecksum_dependency_raises_on_too_short_a_filename(self):\n ppg.global_pipegraph.allow_short_filenames = False\n with pytest.raises(ValueError):\n ppg.FileInvariant(\"a\")\n\n with pytest.raises(ValueError):\n ppg.FileInvariant(\"sh\")\n ppg.FileInvariant(\"shu\")\n\n def test_filechecksum_dependency(self):\n of = \"out/a\"\n\n def do_write(of):\n append(of, \"shu\" * self.sentinel_count())\n\n ftfn = \"out/ftdep\"\n write(ftfn, \"hello\")\n # import stat\n # logging.info('file time after creating %s'% os.stat(ftfn)[stat.ST_MTIME])\n\n write(of, \"hello\")\n\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileInvariant(ftfn)\n job.depends_on(dep)\n ppg.run()\n assert (\n read(of) == \"shu\"\n ) # job get's run though there is a file, because the FileInvariant was not stored before...\n ppg.new()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileInvariant(ftfn)\n job.depends_on(dep)\n ppg.run()\n assert read(of) == \"shu\" # job does not get rerun...\n\n time.sleep(1) # so linux actually advances the file time in the next line\n # logging.info(\"NOW REWRITE\")\n write(ftfn, \"hello\") # same content, different time\n\n ppg.new()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileInvariant(ftfn)\n job.depends_on(dep)\n ppg.run()\n assert read(of) == \"shu\" # job does not get rerun...\n\n # time.sleep(1) #we don't care about the time, size should be enough...\n write(ftfn, \"hello world!!\") # different time\n time.sleep(1) # give the file system a second to realize the change.\n\n ppg.new()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileInvariant(ftfn)\n job.depends_on(dep)\n ppg.run()\n assert read(of) == \"shushu\" # job does get rerun\n\n def test_input_file_was_renamed(self):\n of = \"out/B\"\n\n def do_write(of):\n append(of, \"shu\" * self.sentinel_count())\n\n ftfn = \"out/ftdep\"\n write(ftfn, \"hello\")\n # import stat\n # logging.info('file time after creating %s'% os.stat(ftfn)[stat.ST_MTIME])\n\n write(of, \"hello\")\n\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileInvariant(ftfn)\n job.depends_on(dep)\n ppg.run()\n assert (\n read(of) == \"shu\"\n ) # job get's run though there is a file, because the FileInvariant was not stored before...\n\n ppg.new()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileInvariant(ftfn)\n job.depends_on(dep)\n ppg.run()\n assert read(of) == \"shu\" # job does not get rerun...\n\n os.mkdir(\"out/moved_here\")\n shutil.move(ftfn, os.path.join(\"out/moved_here\", \"ftdep\"))\n ppg.new()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileInvariant(os.path.join(\"out/moved_here\", \"ftdep\"))\n job.depends_on(dep)\n assert read(of) == \"shu\" # job does not get rerun...\n ppg.run()\n assert read(of) == \"shu\" # job does not get rerun...\n\n @pytest.mark.skip() # I have no idea why this was useful. Possibly the PrebuildJobs support?\n def test_file_invariant_with_md5sum(self):\n of = \"out/a\"\n\n def do_write(of):\n append(of, \"shu\" * self.sentinel_count())\n\n ftfn = \"out/ftdep\"\n write(ftfn, \"hello\")\n # import stat\n # logging.info('file time after creating %s'% os.stat(ftfn)[stat.ST_MTIME])\n\n write(of, \"hello\")\n\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileInvariant(ftfn)\n job.depends_on(dep)\n ppg.run()\n assert (\n read(of) == \"shu\"\n ) # job get's run though there is a file, because the FileInvariant was not stored before...\n\n with open(ftfn + \".md5sum\", \"wb\") as op:\n op.write(hashlib.md5(b\"hello world\").hexdigest().encode(\"utf-8\"))\n write(ftfn, \"hello world\") # different content\n t = time.time()\n # now make\n os.utime(ftfn, (t, t))\n os.utime(ftfn + \".md5sum\", (t, t))\n time.sleep(1) # give the file system a second to realize the change.\n\n ppg.new()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileInvariant(ftfn)\n job.depends_on(dep)\n ppg.run()\n assert (\n read(of) == \"shushu\"\n ) # job get's run though there is a file, because the md5sum changed.\n\n with open(ftfn + \".md5sum\", \"wb\") as op:\n op.write(hashlib.md5(b\"hello world\").hexdigest().encode(\"utf-8\"))\n write(ftfn, \"hello\") # different content, but the md5sum is stil the same!\n t = time.time()\n # now make\n os.utime(ftfn, (t, t))\n os.utime(ftfn + \".md5sum\", (t, t))\n time.sleep(1) # give the file system a second to realize the change.\n\n ppg.new()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileInvariant(ftfn)\n job.depends_on(dep)\n ppg.run()\n assert read(of) == \"shushu\" # job does not get rerun, md5sum did not change...\n\n t = time.time() - 100 # force a file time mismatch\n os.utime(\n ftfn, (t, t)\n ) # I must change the one on the actual file, otherwise the 'size+filetime is the same' optimization bytes me\n\n ppg.new()\n job = ppg.FileGeneratingJob(of, do_write)\n dep = ppg.FileInvariant(ftfn)\n job.depends_on(dep)\n ppg.run()\n assert (\n read(of) == \"shushushu\"\n ) # job does get rerun, md5sum and file time mismatch\n assert os.stat(ftfn)[stat.ST_MTIME] == os.stat(ftfn + \".md5sum\")[stat.ST_MTIME]\n\n def test_invariant_dumping_on_job_failure(self):\n def w(of):\n write(\"out/A\", \"A\")\n append(\"out/B\", \"B\")\n\n def func_c(of):\n append(\"out/C\", \"C\")\n\n func_dep = ppg.FunctionInvariant(\"func_c\", func_c)\n fg = ppg.FileGeneratingJob(\"out/A\", w, depend_on_function=False)\n fg.depends_on(func_dep)\n ppg.run()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n ppg.new()\n\n def func_c1(of):\n append(\"out/C\", \"D\")\n\n def w2(of):\n raise ValueError() # so there is an error in a job...\n\n func_dep = ppg.FunctionInvariant(\"func_c\", func_c1) # so this invariant changes\n fg = ppg.FileGeneratingJob(\n \"out/A\", w2, depend_on_function=False\n ) # and this job crashes\n fg.depends_on(func_dep)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert not (os.path.exists(\"out/A\")) # since it was removed, and not recreated\n assert read(\"out/B\") == \"B\"\n ppg.new()\n func_dep = ppg.FunctionInvariant(\n \"func_c\", func_c1\n ) # no invariant change this time\n fg = ppg.FileGeneratingJob(\n \"out/A\", w, depend_on_function=False\n ) # but this was not done the last time...\n fg.depends_on(func_dep)\n ppg.run()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"BB\"\n\n def test_invariant_dumping_on_graph_exception(self, mocker):\n # when an exception occurs not within a job\n # but within the pipegraph itself (e.g. when the user hit's CTRL-C\n # during history dumping\n # which we simulate here\n import pickle\n\n def w(of):\n write(\"out/A\", \"A\")\n append(\"out/B\", \"B\")\n\n def func_c(of):\n append(\"out/C\", \"C\")\n\n func_dep = ppg.FunctionInvariant(\"func_c\", func_c)\n fg = ppg.FileGeneratingJob(\"out/A\", w, depend_on_function=False)\n fg.depends_on(func_dep)\n ppg.run()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n ppg.new(run_mode=ppg.RunMode.CONSOLE)\n # ppg.new()\n\n def func_c1(of):\n append(\"out/C\", \"D\")\n\n def w2(of):\n write(\"out/A\", \"A2\")\n raise ValueError() # so there is an error in a job...\n\n func_dep = ppg.FunctionInvariant(\"func_c\", func_c1) # so this invariant changes\n fg = ppg.FileGeneratingJob(\n \"out/A\", w2, depend_on_function=False\n ) # and this job crashes\n fg.depends_on(func_dep) # so a get's deleted, and rebuild\n fg2 = ppg.FileGeneratingJob(\n \"out/C\", lambda of: counter(\"out/c\") and append(of, \"C\")\n )\n old_pickle_dumps = pickle.dumps\n raised_ki = [False]\n\n def new_pickle_dump(obj, protocol=None):\n if obj == \"out/A\" and not raised_ki[0]:\n raised_ki[0] = True\n raise KeyboardInterrupt(\"simulated\")\n else:\n return old_pickle_dumps(obj, protocol)\n\n mocker.patch(\"pickle.dumps\", new_pickle_dump)\n ki_raised = False\n assert not hasattr(ppg.global_pipegraph, \"last_run_result\")\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert isinstance(ppg.global_pipegraph.do_raise[0], KeyboardInterrupt)\n assert len(ppg.global_pipegraph.do_raise) == 2\n assert hasattr(ppg.global_pipegraph, \"last_run_result\")\n assert os.path.exists(\"out/A\") # The file get's written.\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"C\"\n assert read(\"out/c\") == \"1\" #\n mocker.stopall()\n\n ppg.new()\n func_dep = ppg.FunctionInvariant(\n \"func_c\", func_c1\n ) # no invariant change this time\n # but we had no stored input/output for A, right?\n # so it get's rerun\n fg = ppg.FileGeneratingJob(\n \"out/A\", w, depend_on_function=False\n ) # but this was not done the last time...\n fg.depends_on(func_dep)\n fg2 = ppg.FileGeneratingJob(\n \"out/C\", lambda of: counter(\"out/c\") and append(of, \"C\")\n )\n ppg.run()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"BB\"\n assert read(\"out/C\") == \"C\" #\n assert read(\"out/c\") == \"1\" # c did not get rerun\n\n def test_sig_int_is_ignored_in_console_mode(self):\n ppg.new(run_mode=ppg.RunMode.CONSOLE)\n\n def sigint():\n import signal\n\n counter(\"a\")\n os.kill(os.getpid(), signal.SIGINT)\n counter(\"A\")\n\n job = ppg.DataLoadingJob(\"A\", sigint)\n force_load(job)\n ppg.run()\n assert read(\"a\") == \"1\"\n assert read(\"A\") == \"1\"\n\n def test_input_output_dumping_dies_for_some_reason(self, ppg2_per_test, mocker):\n import pickle\n\n raised_ki = [False]\n old_pickle_dumps = pickle.dumps\n\n def new_pickle_dump(obj, protocol=None):\n if obj == \"A\" and not raised_ki[0]:\n raised_ki[0] = True\n raise ValueError(\"simulated\")\n else:\n return old_pickle_dumps(obj, protocol)\n\n mocker.patch(\"pickle.dumps\", new_pickle_dump)\n\n ppg.FileGeneratingJob(\"A\", lambda of: counter(\"a\") and write(of, \"A\"))\n ppg.FileGeneratingJob(\"B\", lambda of: counter(\"b\") and write(of, \"B\"))\n with pytest.raises(ppg.RunFailedInternally):\n ppg.run()\n assert read(\"A\") == \"A\"\n assert read(\"B\") == \"B\"\n assert read(\"a\") == \"1\"\n assert read(\"b\") == \"1\"\n ppg.run()\n assert read(\"A\") == \"A\"\n assert read(\"B\") == \"B\"\n assert read(\"a\") == \"2\"\n assert read(\"b\") == \"1\" # we had captured B so it's all good\n\n def test_FileInvariant_cant_have_dependencies(self):\n # invariants are always roots of the DAG - they can't have any dependencies themselves\n write(\"out/shu\", \"shu\")\n job = ppg.FileInvariant(\"out/shu\")\n jobB = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"a\"))\n\n with pytest.raises(ppg.JobContractError):\n job.depends_on(jobB)\n\n def test_FunctionInvariant_cant_have_dependencies(self):\n # invariants are always roots of the DAG - they can't have any dependencies themselves\n job = ppg.FunctionInvariant(\"shu\", lambda: 55)\n jobB = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"a\"))\n\n with pytest.raises(ppg.JobContractError):\n job.depends_on(jobB)\n\n def test_ParameterInvariant_cant_have_dependencies(self):\n # invariants are always roots of the DAG - they can't have any dependencies themselves\n job = ppg.ParameterInvariant(\"out/shu\", (\"123\",))\n jobB = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"a\"))\n\n with pytest.raises(ppg.JobContractError):\n job.depends_on(jobB)\n\n def test_invariant_loading_issues_on_value_catastrophic(self):\n a = ppg.DataLoadingJob(\"a\", lambda: 5)\n b = ppg.FileGeneratingJob(\n \"out/b\", lambda of: write(\"out/b\", \"b\"), depend_on_function=False\n )\n b.depends_on(a)\n write(\"out/b\", \"a\")\n import pickle\n\n Path(ppg.global_pipegraph.get_history_filename()).parent.mkdir(parents=True)\n with gzip.GzipFile(ppg.global_pipegraph.get_history_filename(), \"wb\") as op:\n pickle.dump(a.job_id, op, pickle.HIGHEST_PROTOCOL)\n op.write(b\"This breaks\")\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert read(\"out/b\") == \"a\" # job was not run\n\n def test_invariant_loading_issues_on_value_undepickableclass(self):\n import tempfile\n import pickle\n\n # make sure Undepickable is Undepickable\n with tempfile.TemporaryFile(\"wb+\") as tf:\n o = Undepickable()\n pickle.dump(o, tf, pickle.HIGHEST_PROTOCOL)\n with pytest.raises(pickle.UnpicklingError):\n tf.seek(0, 0)\n pickle.load(tf)\n\n a = ppg.ParameterInvariant(\"a\", 5)\n b = ppg.FileGeneratingJob(\n \"out/b\", lambda of: write(\"out/b\", \"b\"), depend_on_function=False\n )\n c = ppg.ParameterInvariant(\"c\", 23)\n b.depends_on(a)\n write(\"out/b\", \"a\")\n\n Path(ppg.global_pipegraph.get_history_filename()).parent.mkdir(parents=True)\n with gzip.GzipFile(ppg.global_pipegraph.get_history_filename(), \"wb\") as op:\n pickle.dump(a.job_id, op, pickle.HIGHEST_PROTOCOL)\n pickle.dump(Undepickable(), op, pickle.HIGHEST_PROTOCOL)\n pickle.dump(c.job_id, op, pickle.HIGHEST_PROTOCOL)\n pickle.dump(({}, {\"c\": str(c.parameters)}), op, pickle.HIGHEST_PROTOCOL)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert read(\"out/b\") == \"b\" # job was run\n # assert a.job_id in ppg.global_pipegraph.invariant_loading_issues\n # assert ppg.global_pipegraph.invariant_status[\"PIc\"] == 23\n\n def test_invariant_loading_issues_on_key(self):\n a = ppg.DataLoadingJob(\"a\", lambda: 5)\n b = ppg.FileGeneratingJob(\n \"out/b\", lambda of: write(\"out/b\", \"b\"), depend_on_function=False\n )\n b.depends_on(a)\n write(\"out/b\", \"a\")\n\n Path(ppg.global_pipegraph.get_history_filename()).parent.mkdir(parents=True)\n with gzip.GzipFile(ppg.global_pipegraph.get_history_filename(), \"wb\") as op:\n op.write(b\"key breaks already\")\n op.write(b\"This breaks\")\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert read(\"out/b\") == \"a\" # job was not run\n\n def test_file_invariant_swapping(self, ppg2_per_test):\n Path(\"a\").write_text(\"a\")\n Path(\"b\").write_text(\"b\")\n\n def out(of):\n counter(\"counter\")\n of.write_text(Path(\"a\").read_text() + Path(\"b\").read_text()),\n\n job = ppg.FileGeneratingJob(\"c\", out, depend_on_function=False)\n job.depends_on_file(\"a\")\n job.depends_on_file(\"b\")\n ppg.run()\n assert read(\"c\") == \"ab\"\n assert read(\"counter\") == \"1\"\n ppg.run()\n assert read(\"counter\") == \"1\"\n ppg2_per_test.new()\n job = ppg.FileGeneratingJob(\"c\", out, depend_on_function=False)\n job.depends_on_file(\"b\")\n job.depends_on_file(\"a\")\n ppg.run()\n assert read(\"counter\") == \"1\"\n\n def test_file_invariant_replaced(self):\n Path(\"a.tsv\").write_text(\"a\")\n a = ppg.FileInvariant(\"a.tsv\")\n\n def func(of):\n counter(\"J\")\n of.write_text(\"j\")\n\n j = ppg.FileGeneratingJob(\"j\", func)\n j.depends_on(a)\n ppg.run()\n assert read(\"j\") == \"j\"\n assert read(\"J\") == \"1\"\n ppg.run()\n assert read(\"J\") == \"1\"\n ppg.new()\n Path(\"b.tsv\").write_text(\"b\")\n b = ppg.FileInvariant(\"b.tsv\")\n j = ppg.FileGeneratingJob(\"j\", func)\n j.depends_on(b)\n ppg.run()\n assert read(\"J\") == \"2\"\n\n\ndef first_value(d):\n return list(d.values())[0]\n\n\[email protected](\"ppg2_per_test\")\nclass TestFunctionInvariant:\n # most of the function invariant testing is handled by other test classes.\n # but these are more specialized.\n\n def test_generator_expressions(self):\n def get_func(r):\n def shu():\n return sum(i + 0 for i in r)\n\n return shu\n\n def get_func2(r):\n def shu():\n return sum(i + 0 for i in r)\n\n return shu\n\n def get_func3(r):\n def shu():\n return sum(i + 1 for i in r)\n\n return shu\n\n a = ppg.FunctionInvariant(\"a\", get_func(100))\n b = ppg.FunctionInvariant(\n \"b\", get_func2(100)\n ) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", get_func3(100)\n ) # and this invariant should be different\n av = a.run(None, None)\n bv = b.run(None, None)\n cv = c.run(None, None)\n assert a.run(None, None)\n assert ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(bv)\n )\n assert not ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(cv)\n )\n\n def test_lambdas(self):\n def get_func(x):\n def inner():\n arg = lambda y: x + x + x # noqa:E731\n return arg(1)\n\n return inner\n\n def get_func2(x):\n def inner():\n arg = lambda y: x + x + x # noqa:E731\n return arg(1)\n\n return inner\n\n def get_func3(x):\n def inner():\n arg = lambda y: x + x # noqa:E731\n return arg(1)\n\n return inner\n\n a = ppg.FunctionInvariant(\"a\", get_func(100))\n b = ppg.FunctionInvariant(\n \"b\", get_func2(100)\n ) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", get_func3(100)\n ) # and this invariant should be different\n av = a.run(None, None)\n bv = b.run(None, None)\n cv = c.run(None, None)\n self.maxDiff = 20000\n assert a.run(None, None)\n assert ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(bv)\n )\n assert not ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(cv)\n )\n\n def test_inner_functions(self):\n def get_func(x):\n def inner():\n return 23\n\n return inner\n\n def get_func2(x):\n def inner():\n return 23\n\n return inner\n\n def get_func3(x):\n def inner():\n return 23 + 5\n\n return inner\n\n a = ppg.FunctionInvariant(\"a\", get_func(100))\n b = ppg.FunctionInvariant(\n \"b\", get_func2(100)\n ) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", get_func3(100)\n ) # and this invariant should be different\n av = a.run(None, None)\n bv = b.run(None, None)\n cv = c.run(None, None)\n assert a.run(None, None)\n assert ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(bv)\n )\n assert not ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(cv)\n )\n\n def test_nested_inner_functions(self):\n def get_func(x):\n def inner():\n def shu():\n return 23\n\n return shu\n\n return inner\n\n def get_func2(x):\n def inner():\n def shu():\n return 23\n\n return shu\n\n return inner\n\n def get_func3(x):\n def inner():\n def shu():\n return 23 + 5\n\n return shu\n\n return inner\n\n a = ppg.FunctionInvariant(\"a\", get_func(100))\n b = ppg.FunctionInvariant(\n \"b\", get_func2(100)\n ) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", get_func3(100)\n ) # and this invariant should be different\n av = a.run(None, None)\n bv = b.run(None, None)\n cv = c.run(None, None)\n assert a.run(None, None)\n assert ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(bv)\n )\n assert not ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(cv)\n )\n\n def test_inner_functions_with_parameters(self):\n def get_func(x):\n def inner():\n return x\n\n return inner\n\n a = ppg.FunctionInvariant(\"a\", get_func(100))\n b = ppg.FunctionInvariant(\n \"b\", get_func(100)\n ) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", get_func(2000)\n ) # and this invariant should be different\n av = a.run(None, None)\n bv = b.run(None, None)\n cv = c.run(None, None)\n assert a.run(None, None)\n assert ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(bv)\n )\n assert not ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(cv)\n )\n\n def test_passing_non_function_raises(self):\n with pytest.raises(TypeError):\n ppg.FunctionInvariant(\"out/a\", \"shu\")\n\n def test_passing_none_as_function_is_ok(self, create_out_dir):\n job = ppg.FunctionInvariant(\"out/a\", None)\n str(job)\n jobB = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"A\"))\n jobB.depends_on(job)\n ppg.run()\n assert read(\"out/A\") == \"A\"\n\n def test_passing_non_string_as_jobid(self):\n with pytest.raises(TypeError):\n ppg.FunctionInvariant(5, lambda: 1)\n\n def test_cant_have_dependencies(self):\n # invariants are always roots of the DAG - they can't have any dependencies themselves\n def shu():\n pass\n\n job = ppg.FunctionInvariant(\"shu\", shu)\n jobB = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"a\"))\n\n with pytest.raises(ppg.JobContractError):\n job.depends_on(jobB)\n\n def test_raises_on_duplicate_with_different_functions(self):\n def shu():\n return \"a\"\n\n ppg.FunctionInvariant(\"A\", shu)\n ppg.FunctionInvariant(\"A\", shu) # ok.\n with pytest.raises(ppg.JobRedefinitionError):\n ppg.FunctionInvariant(\"A\", lambda: \"b\") # raises ValueError\n\n def sha():\n def shu():\n return \"b\"\n\n return shu\n\n ppg.FunctionInvariant(\"B\", sha())\n ppg.FunctionInvariant(\"B\", sha())\n\n def test_instance_functions_ok(self, create_out_dir):\n class shu:\n def __init__(self, letter):\n self.letter = letter\n\n def get_job(self):\n job = ppg.FileGeneratingJob(\n \"out/\" + self.letter, lambda of: append(of, \"A\")\n )\n job.depends_on(ppg.FunctionInvariant(\"shu.sha\", self.sha))\n return job\n\n def sha(self):\n return 55 * 23\n\n x = shu(\"A\")\n x.get_job()\n ppg.run()\n assert read(\"out/A\") == \"A\"\n append(\"out/A\", \"A\")\n\n ppg.new()\n x.get_job()\n y = shu(\"B\")\n j1 = y.get_job()\n j2 = y.get_job()\n assert ppg.FunctionInvariant.functions_equal(\n j1.generating_function, j2.generating_function\n )\n # assert j1 is j2 # todo: interactive/notebook differences\n\n def test_buildin_function(self):\n a = ppg.FunctionInvariant(\"a\", open)\n assert \"<built-in\" in str(a)\n\n def test_function_invariant_non_function(self):\n class CallMe:\n def __call__(self):\n raise ValueError()\n\n a = ppg.FunctionInvariant(\"a\", CallMe)\n with pytest.raises(\n ValueError\n ): # todo: is this the right behaviour? can't we just forward to __call__ as the invariant?\n a.run(None, None)\n\n def test_closure_capturing(self):\n def func(da_list):\n def f():\n return da_list\n\n return f\n\n a = ppg.FunctionInvariant(\"a\", func([1, 2, 3]))\n b = ppg.FunctionInvariant(\n \"b\", func([1, 2, 3])\n ) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", func([1, 2, 3, 4])\n ) # and this invariant should be different\n av = a.run(None, None)\n bv = b.run(None, None)\n cv = c.run(None, None)\n assert a.run(None, None)\n assert ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(bv)\n )\n assert not ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(cv)\n )\n\n def test_function_to_str_builtin(self):\n assert ppg.FunctionInvariant.function_to_str(open) == \"<built-in function open>\"\n\n def test_closure_capturing_dict(self):\n def func(da_list):\n def f():\n return da_list\n\n return f\n\n a = ppg.FunctionInvariant(\"a\", func({\"1\": \"a\", \"3\": \"b\", \"2\": \"c\"}))\n b = ppg.FunctionInvariant(\n \"b\", func({\"1\": \"a\", \"3\": \"b\", \"2\": \"c\"})\n ) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", func({\"1\": \"a\", \"3\": \"b\", \"2\": \"d\"})\n ) # and this invariant should be different\n av = a.run(None, None)\n bv = b.run(None, None)\n cv = c.run(None, None)\n assert a.run(None, None)\n assert ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(bv)\n )\n assert not ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(cv)\n )\n\n def test_closure_capturing_set(self):\n def func(da_list):\n def f():\n return da_list\n\n return f\n\n import random\n\n x = set([\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\"])\n a = ppg.FunctionInvariant(\"a\", func(x))\n x2 = list(x)\n random.shuffle(x2)\n x2 = set(x2)\n b = ppg.FunctionInvariant(\"b\", func(x2)) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", func({\"3\", \"2\"})\n ) # and this invariant should be different\n av = a.run(None, None)\n bv = b.run(None, None)\n cv = c.run(None, None)\n assert a.run(None, None)\n assert ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(bv)\n )\n assert not ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(cv)\n )\n\n def test_closure_capturing_frozen_set(self):\n def func(da_list):\n def f():\n return da_list\n\n return f\n\n import random\n\n x = frozenset([\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\"])\n a = ppg.FunctionInvariant(\"a\", func(x))\n x2 = list(x)\n random.shuffle(x2)\n x2 = frozenset(x2)\n b = ppg.FunctionInvariant(\"b\", func(x2)) # that invariant should be the same\n c = ppg.FunctionInvariant(\n \"c\", func(frozenset({\"3\", \"2\"}))\n ) # and this invariant should be different\n av = a.run(None, None)\n bv = b.run(None, None)\n cv = c.run(None, None)\n assert a.run(None, None)\n assert ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(bv)\n )\n assert not ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(cv)\n )\n\n def test_function_invariants_are_equal_if_dis_identical_or_source_identical(self):\n python_version = (3, 99)\n a = {\"source\": \"hello\", python_version: (\"dis\", \"closure\")}\n b = a.copy()\n b[\"source\"] = \"hello_world\"\n c = a.copy()\n c[python_version] = (\"disB\", \"closure\")\n assert ppg.FunctionInvariant.compare_hashes(None, a, a, python_version)\n assert ppg.FunctionInvariant.compare_hashes(\n None, a, b, python_version\n ) # same dis ,different source\n assert not ppg.FunctionInvariant.compare_hashes(\n None, a, c, python_version\n ) # different dis, same source\n\n def test_source_file_mtime_change_without_hash_change(self):\n import sys\n\n def inner():\n pass\n\n python_version = tuple(sys.version_info)[:2] # we only care about major.minor\n\n a = ppg.FunctionInvariant(\"a\", inner)\n calc = a.run(None, None)\n changed = calc.copy()\n changed[\"FIa\"][\"source_file\"][\"mtime\"] = -1\n changed[\"FIa\"][\"source_file\"][\"dis\"] = \"find me\"\n calc2 = a.run(None, changed)\n assert calc2[\"FIa\"][\"source_file\"][\"dis\"] == \"find me\"\n\n\[email protected](\"create_out_dir\")\[email protected](\"ppg2_per_test\")\nclass TestDependency:\n def test_simple_chain(self):\n o = Dummy()\n\n def load_a():\n return \"shu\"\n\n jobA = ppg.AttributeLoadingJob(\"a\", o, \"myattr\", load_a)\n ofB = \"out/B\"\n\n def do_write_b(ofB):\n write(ofB, o.myattr)\n\n jobB = ppg.FileGeneratingJob(ofB, do_write_b).depends_on(jobA)\n ofC = \"out/C\"\n\n def do_write_C(ofC):\n write(ofC, o.myattr)\n\n ppg.FileGeneratingJob(ofC, do_write_C).depends_on(jobA)\n\n ofD = \"out/D\"\n\n def do_write_d(ofD):\n write(ofD, read(ofC) + read(ofB))\n\n ppg.FileGeneratingJob(ofD, do_write_d).depends_on([jobA, jobB])\n\n def test_failed_job_kills_those_after(self):\n ofA = \"out/A\"\n\n def write_a(ofA):\n append(ofA, \"hello\")\n\n jobA = ppg.FileGeneratingJob(ofA, write_a)\n\n ofB = \"out/B\"\n\n def write_b(ofB):\n raise ValueError(\"shu\")\n\n jobB = ppg.FileGeneratingJob(ofB, write_b)\n jobB.depends_on(jobA)\n\n ofC = \"out/C\"\n\n def write_c(ofC):\n write(ofC, \"hello\")\n\n jobC = ppg.FileGeneratingJob(ofC, write_c)\n jobC.depends_on(jobB)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert os.path.exists(ofA) # which was before the error\n assert not (os.path.exists(ofB)) # which was on the error\n assert not (os.path.exists(ofC)) # which was after the error\n ppg.new()\n jobA = ppg.FileGeneratingJob(ofA, write_a)\n jobC = ppg.FileGeneratingJob(ofC, write_c)\n\n def write_b_ok(ofB):\n write(ofB, \"BB\")\n\n jobB = ppg.FileGeneratingJob(ofB, write_b_ok)\n jobB.depends_on(jobA)\n jobC.depends_on(jobB)\n ppg.run()\n\n assert os.path.exists(ofA)\n assert read(ofA) == \"hello\" # run only once!\n assert os.path.exists(ofB)\n assert os.path.exists(ofC)\n\n def test_done_filejob_does_not_gum_up_execution(self):\n ofA = \"out/A\"\n write(ofA, \"1111\")\n\n def write_a(ofA):\n append(ofA, \"hello\")\n\n jobA = ppg.FileGeneratingJob(ofA, write_a, depend_on_function=False)\n\n ofB = \"out/B\"\n\n def write_b(ofB):\n append(ofB, \"hello\")\n\n jobB = ppg.FileGeneratingJob(ofB, write_b)\n jobB.depends_on(jobA)\n\n ofC = \"out/C\"\n\n def write_c(ofC):\n write(ofC, \"hello\")\n\n jobC = ppg.FileGeneratingJob(ofC, write_c)\n jobC.depends_on(jobB)\n assert os.path.exists(ofA)\n\n ppg.run()\n\n assert os.path.exists(ofB)\n assert os.path.exists(ofC)\n assert (\n read(ofA) == \"hello\"\n ) # change from ppgA, if it's not our file (we have recorded not output), rerun\n\n def test_invariant_violation_redoes_deps_but_not_nondeps(self):\n def get_job(name):\n fn = \"out/\" + name\n\n def do_write(of):\n if os.path.exists(fn + \".sentinel\"):\n d = read(fn + \".sentinel\")\n else:\n d = \"\"\n append(fn + \".sentinel\", name) # get's longer all the time...\n write(fn, d + name) # get's deleted anyhow...\n\n return ppg.FileGeneratingJob(fn, do_write)\n\n jobA = get_job(\"A\")\n jobB = get_job(\"B\")\n jobC = get_job(\"C\")\n get_job(\"D\")\n jobC.depends_on(jobB)\n jobB.depends_on(jobA)\n dep = ppg.ParameterInvariant(\"myparam\", (\"hello\",))\n jobA.depends_on(dep)\n ppg.run()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"C\"\n\n ppg.new()\n jobA = get_job(\"A\")\n jobB = get_job(\"B\")\n jobC = get_job(\"C\")\n get_job(\"D\")\n jobC.depends_on(jobB)\n jobB.depends_on(jobA)\n dep = ppg.ParameterInvariant(\"myparam\", (\"hello stranger\",))\n jobA.depends_on(dep) # now, the invariant has been changed, all jobs rerun...\n ppg.run()\n assert read(\"out/A\") == \"AA\" # thanks to our smart rerun aware job definition..\n assert read(\"out/B\") == \"BB\"\n assert read(\"out/C\") == \"CC\"\n assert read(\"out/D\") == \"D\" # since that one does not to be rerun...\n\n def test_depends_on_accepts_a_list(self):\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda of: write(\"out/B\", \"B\"))\n jobC = ppg.FileGeneratingJob(\"out/C\", lambda of: write(\"out/C\", \"C\"))\n jobC.depends_on([jobA, jobB])\n ppg.run()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"C\"\n\n def test_job_iter(self):\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"A\"))\n l = list(iter(jobA))\n assert l[0] is jobA\n\n def test_depends_on_accepts_multiple_values(self):\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda of: write(\"out/B\", \"B\"))\n jobC = ppg.FileGeneratingJob(\"out/C\", lambda of: write(\"out/C\", \"C\"))\n jobC.depends_on(jobA, jobB)\n ppg.run()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"C\"\n\n def test_depends_on_accepts_multiple_values_mixed(self):\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda of: write(\"out/B\", \"B\"))\n jobC = ppg.FileGeneratingJob(\"out/C\", lambda of: write(\"out/C\", \"C\"))\n jobC.depends_on(jobA, [jobB])\n ppg.run()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"C\"\n\n def test_depends_on_none_ignored(self):\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda of: write(\"out/B\", \"B\"))\n jobC = ppg.FileGeneratingJob(\"out/C\", lambda of: write(\"out/C\", \"C\"))\n jobC.depends_on(jobA, [jobB], None, [None])\n jobC.depends_on(None)\n jobC.depends_on() # that's a no-op as well\n jobC.depends_on([]) # that's a no-op as well\n ppg.run()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"C\"\n\n def test_depends_on_excludes_on_non_jobs(self):\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"A\"))\n\n with pytest.raises(KeyError):\n jobA.depends_on(\"SHU\")\n\n def test_depends_on_instant_cycle_check(self):\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"out/b\", lambda of: write(\"out/B\", \"b\"))\n jobB.depends_on(jobA)\n\n with pytest.raises(ppg.NotADag):\n jobA.depends_on(jobA)\n\n with pytest.raises(ppg.NotADag):\n jobA.depends_on(jobB)\n\n def test_depends_on_accepts_a_list_of_lists(self):\n jobA = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda of: write(\"out/B\", \"B\"))\n jobC = ppg.FileGeneratingJob(\n \"out/C\",\n lambda of: write(\"out/C\", read(\"out/A\") + read(\"out/B\") + read(\"out/D\")),\n )\n jobD = ppg.FileGeneratingJob(\"out/D\", lambda of: write(\"out/D\", \"D\"))\n jobC.depends_on([jobA, [jobB, jobD]])\n assert ppg.global_pipegraph.has_edge(jobD, jobC)\n assert ppg.global_pipegraph.has_edge(jobA, jobC)\n assert ppg.global_pipegraph.has_edge(jobB, jobC)\n ppg.run()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"ABD\"\n assert read(\"out/D\") == \"D\"\n\n def test_invariant_job_depends_on_raises(self):\n\n with pytest.raises(ppg.JobContractError):\n ppg.jobs._InvariantMixin().depends_on(ppg.Job([\"B\"]))\n\n def test_cached_job_depends_on(self):\n class Dummy:\n pass\n\n o = Dummy()\n jobA = ppg.CachedAttributeLoadingJob(\"cache/A\", o, \"a\", lambda: 23)\n jobB = ppg.Job([\"B\"])\n jobC = ppg.Job([\"C\"])\n jobD = ppg.Job([\"D\"])\n jobA.calc.depends_on([jobB], jobC, jobD)\n assert not ppg.global_pipegraph.has_edge(jobB, jobA.load)\n assert not ppg.global_pipegraph.has_edge(jobC, jobA.load)\n assert not ppg.global_pipegraph.has_edge(jobD, jobA.load)\n assert ppg.global_pipegraph.has_edge(jobB, jobA.calc)\n assert ppg.global_pipegraph.has_edge(jobC, jobA.calc)\n assert ppg.global_pipegraph.has_edge(jobD, jobA.calc)\n\n def test_dependency_placeholder(self):\n jobA = ppg.FileGeneratingJob(\n \"out/A\", lambda of: write(\"out/A\", \"A\" + read(\"out/B\"))\n )\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda of: write(\"out/B\", \"B\"))\n\n def gen_deps():\n logger.info(\"gen deps called\")\n return [jobB]\n\n jobA.depends_on(gen_deps)\n ppg.run()\n assert read(\"out/A\") == \"AB\"\n\n def test_dependency_placeholder2(self):\n jobA = ppg.FileGeneratingJob(\n \"out/A\", lambda of: write(\"out/A\", \"A\" + read(\"out/B\"))\n )\n\n def gen_deps():\n return ppg.FileGeneratingJob(\"out/B\", lambda of: write(\"out/B\", \"B\"))\n\n jobA.depends_on(gen_deps)\n ppg.run()\n assert read(\"out/A\") == \"AB\"\n\n def test_dependency_placeholder_nested(self):\n jobA = ppg.FileGeneratingJob(\n \"out/A\", lambda of: write(\"out/A\", \"A\" + read(\"out/B\") + read(\"out/C\"))\n )\n\n def gen_deps2():\n return ppg.FileGeneratingJob(\"out/C\", lambda of: write(\"out/C\", \"C\"))\n\n def gen_deps():\n return ppg.FileGeneratingJob(\n \"out/B\", lambda of: write(\"out/B\", \"B\")\n ).depends_on(gen_deps2)\n\n jobA.depends_on(gen_deps)\n ppg.run()\n assert read(\"out/A\") == \"ABC\"\n\n def test_dependency_placeholder_dynamic_auto_invariants(self):\n jobA = ppg.FileGeneratingJob(\n \"out/A\", lambda of: write(\"out/A\", \"A\" + read(\"out/B\"))\n )\n\n def check_function_invariant(of):\n write(\"out/B\", \"B\")\n assert (\n \"FITestDependency.test_dependency_placeholder_dynamic_auto_invariants.<locals>.check_function_invariant\"\n in ppg.global_pipegraph.jobs\n )\n\n def gen_deps():\n jobB = ppg.FileGeneratingJob(\"out/B\", check_function_invariant)\n print(\"gen deps called\")\n return [jobB]\n\n jobA.depends_on(gen_deps)\n assert \"FIout/B\" not in ppg.global_pipegraph.jobs\n ppg.run()\n assert read(\"out/A\") == \"AB\"\n\n\[email protected](\"ppg2_per_test\")\nclass TestDefinitionErrors:\n def test_defining_function_invariant_twice(self):\n a = lambda: 55 # noqa:E731\n b = lambda: 66 # noqa:E731\n ppg.FunctionInvariant(\"a\", a)\n ppg.FunctionInvariant(\"a\", a) # that's ok...\n\n with pytest.raises(ppg.JobRedefinitionError):\n ppg.FunctionInvariant(\"a\", b)\n\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n ppg.FunctionInvariant(\"a\", a)\n j = ppg.FunctionInvariant(\"a\", b)\n assert j.function is b\n\n def test_defining_function_and_parameter_invariant_with_same_name(self):\n # you can't really, FunctionInvariants are Prefixed with FI, ParameterInvariant with PI\n a = lambda: 55 # noqa:E731\n ppg.FunctionInvariant(\"PIa\", a)\n ppg.ParameterInvariant(\"a\", \"b\")\n\n def test_defining_function_and_parameter_invariant_with_same_name_reversed(self):\n a = lambda: 55 # noqa:E731\n ppg.ParameterInvariant(\"a\", \"b\")\n ppg.FunctionInvariant(\"PIa\", a)\n\n def test_parameter_invariant_does_not_accept_function(self):\n with pytest.raises(TypeError):\n ppg.ParameterInvariant(\"a\", lambda: 55)\n\n\[email protected](\"ppg2_per_test\")\nclass TestFunctionInvariantDisChanges_BetweenVersions:\n def test_function_name_is_irrelevant(self):\n def test_a():\n return 55\n\n def test_b():\n return 55\n\n def test_c():\n return 56\n\n a = ppg.FunctionInvariant(\"a\", test_a)\n b = ppg.FunctionInvariant(\"b\", test_b)\n c = ppg.FunctionInvariant(\"c\", test_c)\n av = a.run(None, None)\n bv = b.run(None, None)\n cv = c.run(None, None)\n assert ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(bv)\n )\n assert not ppg.FunctionInvariant.compare_hashes(\n None, first_value(av), first_value(cv)\n )\n\n def test_docstring_is_irrelevant(self):\n def test():\n \"\"\"A\"\"\"\n return 55\n\n a = ppg.FunctionInvariant(\"a\", test)\n\n # fmt: off\n def test():\n '''B'''\n return 55\n # fmt: on\n b = ppg.FunctionInvariant(\"b\", test)\n\n def test():\n \"c\"\n return 56\n\n c = ppg.FunctionInvariant(\"c\", test)\n\n def test():\n \"c\"\n return 56\n\n d = ppg.FunctionInvariant(\"d\", test)\n av = first_value(a.run(None, None))\n bv = first_value(b.run(None, None))\n cv = first_value(c.run(None, None))\n dv = first_value(d.run(None, None))\n assert ppg.FunctionInvariant.compare_hashes(None, (av), (bv))\n assert ppg.FunctionInvariant.compare_hashes(None, (cv), (dv))\n assert not ppg.FunctionInvariant.compare_hashes(None, (av), (cv))\n", "id": "10706639", "language": "Python", "matching_score": 3.0386242866516113, "max_stars_count": 0, "path": "tests/test_invariants_and_dependencies.py" }, { "content": "import pypipegraph2 as ppg\nimport sys\nimport subprocess\nfrom pathlib import Path\n\n\ntry:\n Path('deleteme').unlink()\nexcept:\n pass\nif Path('.ppg').exists():\n import shutil\n shutil.rmtree('.ppg')\n\nppg.new()\ndef doit(output_filename):\n subprocess.check_call(['./rust_stdout_stderr'])\n print(\"pythonstdout\\n\")\n sys.stderr.write(\"pythonstderr\\n\")\n output_filename.write_text('done')\n\n\nj = ppg.TempFileGeneratingJob('.ppg/deleteme', doit)\nj2 = ppg.FileGeneratingJob('.ppg/shu', lambda of : of.write_text('hello'))\nj2.depends_on(j)\nj2()\n\nPath('.ppg/shu').unlink()\nPath('.ppg/deleteme').write_text('done')\nj2()\n\n", "id": "7220248", "language": "Python", "matching_score": 2.0585014820098877, "max_stars_count": 0, "path": "tests/testchatter.py" }, { "content": "class Dummy(object):\n pass\n\n\nfrom pathlib import Path\n\n\ndef write(filename, text):\n Path(filename).write_text(text)\n\n\ndef append(filename, text):\n p = Path(filename)\n if p.exists():\n old = p.read_text()\n else:\n old = \"\"\n p.write_text(old + text)\n\n\ndef writeappend(filename_write, filename_append, string):\n write(filename_write, string)\n append(filename_append, string)\n\n\ndef read(filename):\n return Path(filename).read_text()\n\n\ndef counter(filename):\n \"\"\"Helper for counting invocations in a side-effect file\"\"\"\n try:\n res = int(Path(filename).read_text())\n except: # noqa: E722\n res = 0\n Path(filename).write_text(str(res + 1))\n return str(res)\n\n\ndef force_load(job):\n \"\"\"Force the loading of a Dataloading job that has no other dependents\"\"\"\n import pypipegraph2 as ppg\n ppg.JobGeneratingJob(job.job_id + '_gen', lambda: None).depends_on(job)\n", "id": "6050164", "language": "Python", "matching_score": 0.29157882928848267, "max_stars_count": 0, "path": "tests/shared.py" }, { "content": "import pytest\nimport pypipegraph as ppg\n\n\[email protected]\ndef new_pipegraph_no_qc(new_pipegraph):\n ppg.util.global_pipegraph._qc_keep_function = False\n return new_pipegraph\n # this really does not work :(\n\n\[email protected]\ndef both_ppg_and_no_ppg_no_qc(both_ppg_and_no_ppg):\n if ppg.util.global_pipegraph is not None:\n ppg.util.global_pipegraph._qc_keep_function = False\n return both_ppg_and_no_ppg\n", "id": "1824483", "language": "Python", "matching_score": 1.9581736326217651, "max_stars_count": 0, "path": "src/mbf_qualitycontrol/testing/fixtures.py" }, { "content": "import pypipegraph as ppg\n\n__version__ = \"0.1\"\n\n\ndef register_qc(job):\n for k in job:\n if not isinstance(job, ppg.Job):\n raise TypeError(\"register_qc takes only job objects\")\n job._mbf_qc = True\n if hasattr(ppg.util.global_pipegraph, \"_qc_keep_function\") and (\n getattr(ppg.util.global_pipegraph, \"_qc_keep_function\") is False\n or not getattr(ppg.util.global_pipegraph, \"_qc_keep_function\")(job)\n ):\n job.prune()\n for attr in [\"lfg\", \"cache_job\", \"table_job\"]:\n j = getattr(job, attr, None)\n if j is not None:\n if isinstance(j, ppg.Job):\n register_qc(j)\n return job\n\n\ndef qc_disabled():\n if not ppg.inside_ppg():\n return True\n return getattr(ppg.util.global_pipegraph, \"_qc_keep_function\", True) is False\n\n\ndef disable_qc():\n \"\"\"Disable qc.\n That means new jobs that are generated are automatically pruned\n (but may be revived by calling prune_qc with an appropriate keep function),\n but code that depends on qc_disabled() does not even generate the jobs\"\"\"\n\n ppg.util.global_pipegraph._qc_keep_function = False\n\n\ndef prune_qc(keep=False):\n \"\"\"Prune all qc jobs but those where keep returns True.\n Also applies to further qc jobs!\"\"\"\n ppg.util.global_pipegraph._qc_keep_function = keep\n for job in get_qc_jobs():\n if keep is not False and keep(job):\n job.unprune()\n else:\n job.prune()\n\n\ndef get_qc_jobs():\n \"\"\"Get all qc jobs defined so far\"\"\"\n for job in ppg.util.global_pipegraph.jobs.values():\n if hasattr(job, \"_mbf_qc\"):\n yield job\n\n\nclass QCCollectingJob(ppg.FileGeneratingJob):\n def __init__(self, job_id, callback, depends_on_function=True):\n # if ppg.job.was_inited_before(self, QCCollectingJob):\n if hasattr(self, \"inner_callback\"):\n return\n self.inner_callback = callback\n\n def cb(output_filename):\n callback(output_filename, self.objects)\n\n self.objects = []\n if hasattr(ppg, \"is_ppg2\"):\n import pypipegraph2 as ppg2\n\n cb = ppg2.jobs._mark_function_wrapped(cb, self.inner_callback)\n super().__init__(job_id, cb)\n if not depends_on_function:\n self._ignore_code_changes()\n else:\n self.do_ignore_code_changes = False\n self.depends_on(self._create_parameter_dependency)\n self._handle_function_dependency(self.generating_function)\n else:\n super().__init__(job_id, cb)\n\n def add(self, obj):\n self.objects.append(obj)\n return self\n\n def inject_auto_invariants(self):\n if not self.do_ignore_code_changes:\n self.depends_on(\n ppg.FunctionInvariant(self.job_id + \"_func\", self.inner_callback)\n )\n self._create_parameter_dependency()\n\n def _create_parameter_dependency(self):\n names = []\n for obj in self.objects:\n if hasattr(obj, \"name\"):\n names.append(obj.name)\n elif hasattr(obj, \"columns\"):\n names.append(obj.columns[0])\n else:\n print(type(obj))\n raise ValueError(dir(obj))\n self.depends_on(\n ppg.ParameterInvariant(\n self.job_id,\n tuple(sorted(names)),\n )\n )\n", "id": "669115", "language": "Python", "matching_score": 1.1138036251068115, "max_stars_count": 0, "path": "src/mbf_qualitycontrol/__init__.py" }, { "content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2012, <NAME> <<EMAIL>>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport pytest\nimport pypipegraph as ppg\nfrom .shared import assertRaises\n\n\[email protected](\"ppg1_compatibility_test\")\nclass TestUtils:\n def test_assert_uniqueness_simple(self):\n class Dummy:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self)\n\n Dummy(\"shu\")\n\n def inner():\n Dummy(\"shu\")\n\n assertRaises(ValueError, inner)\n\n def test_assert_uniqueness_ok(self):\n class Dummy:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self)\n\n Dummy(\"shu\")\n Dummy(\"sha\")\n\n def inner():\n Dummy(\"shu\")\n\n assertRaises(ValueError, inner)\n\n def test_assert_uniqueness_ok_multi_classes(self):\n class Dummy:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self)\n\n class Dummy2:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self)\n\n Dummy(\"shu\")\n Dummy2(\"shu\")\n\n def inner():\n Dummy(\"shu\")\n\n assertRaises(ValueError, inner)\n\n def test_assert_uniqueness_raises_slashes(self):\n class Dummy:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self)\n\n Dummy(\"shu\")\n\n def inner():\n Dummy(\"shu/sha\")\n\n assertRaises(ValueError, inner)\n\n def test_assert_uniqueness_raises_also_check(self):\n class Dummy:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self)\n\n class Dummy2:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self, also_check=Dummy)\n\n Dummy(\"shu\")\n\n def inner():\n Dummy2(\"shu\")\n\n assertRaises(ValueError, inner)\n\n def test_assert_uniqueness_raises_also_check_no_instance_of_second_class(self):\n class Dummy:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self)\n\n class Dummy2:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self, also_check=Dummy)\n\n # a = Dummy('shu')\n # does not raise of course...\n Dummy2(\"shu\")\n\n def inner():\n Dummy2(\"shu\")\n\n assertRaises(ValueError, inner)\n\n def test_assert_uniqueness_raises_also_check_list(self):\n class Dummy:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self)\n\n class Dummy2:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self, also_check=[Dummy])\n\n Dummy(\"shu\")\n\n def inner():\n Dummy2(\"shu\")\n\n assertRaises(ValueError, inner)\n\n def test_exception_on_run_without_previous_new_pipegraph(self):\n ppg.util.global_pipegraph = None\n with pytest.raises(ValueError):\n ppg.run_pipegraph()\n\n def test_flatten_jobs(self):\n j1 = ppg.FileGeneratingJob(\"A\", lambda: \"A\")\n j2 = ppg.FileGeneratingJob(\"B\", lambda: \"B\")\n j3 = ppg.FileGeneratingJob(\"Bc\", lambda: \"C\")\n res = [j1, [j2, [j3, j1]]]\n # no dedup on this.\n assert list(ppg.util.flatten_jobs(res)) == [j1, j2, j3, j1]\n", "id": "2463761", "language": "Python", "matching_score": 5.041041374206543, "max_stars_count": 0, "path": "tests/ppg1_compatibility_layer/test_util.py" }, { "content": "import pytest\nimport pypipegraph2 as ppg\n\n\[email protected](\"ppg2_per_test\")\nclass TestUtils:\n def test_assert_uniqueness_simple(self):\n class Dummy:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self)\n\n Dummy(\"shu\")\n\n with pytest.raises(ValueError):\n Dummy(\"shu\")\n\n def test_assert_uniqueness_ok(self):\n class Dummy:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self)\n\n Dummy(\"shu\")\n Dummy(\"sha\")\n\n with pytest.raises(ValueError):\n Dummy(\"shu\")\n\n def test_assert_uniqueness_ok_multi_classes(self):\n class Dummy:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self)\n\n class Dummy2:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self)\n\n Dummy(\"shu\")\n Dummy2(\"shu\")\n\n with pytest.raises(ValueError):\n Dummy(\"shu\")\n\n def test_assert_uniqueness_raises_slashes(self):\n class Dummy:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self)\n\n Dummy(\"shu\")\n\n with pytest.raises(ValueError):\n Dummy(\"shu/sha\")\n\n def test_assert_uniqueness_raises_also_check(self):\n class Dummy:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self)\n\n class Dummy2:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self, also_check=Dummy)\n\n Dummy(\"shu\")\n\n with pytest.raises(ValueError):\n Dummy2(\"shu\")\n\n def test_assert_uniqueness_raises_also_check_no_instance_of_second_class(self):\n class Dummy:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self)\n\n class Dummy2:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self, also_check=Dummy)\n\n # a = Dummy('shu')\n # does not raise of course...\n Dummy2(\"shu\")\n\n with pytest.raises(ValueError):\n Dummy2(\"shu\")\n\n def test_assert_uniqueness_raises_also_check_list(self):\n class Dummy:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self)\n\n class Dummy2:\n def __init__(self, name):\n self.name = name\n ppg.util.assert_uniqueness_of_object(self, also_check=[Dummy])\n\n Dummy(\"shu\")\n\n with pytest.raises(ValueError):\n Dummy2(\"shu\")\n\n def test_exception_on_run_without_previous_new_pipegraph(self):\n ppg.global_pipegraph = None\n with pytest.raises(ValueError):\n ppg.run()\n\n def test_flatten_jobs(self):\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n j1 = ppg.FileGeneratingJob(\"A\", lambda of: \"A\")\n j2 = ppg.FileGeneratingJob(\"B\", lambda of: \"B\")\n j3 = ppg.FileGeneratingJob(\"B\", lambda of: \"C\")\n res = [j1, [j2, [j3, j1]]]\n # no dedup on this.\n assert list(ppg.util.flatten_jobs(res)) == [j1, j2, j3, j1]\n\n\n def test_inside_ppg(self):\n assert ppg.global_pipegraph is not None\n assert ppg.inside_ppg()\n ppg.global_pipegraph = None\n assert not ppg.inside_ppg()\n\n", "id": "11293499", "language": "Python", "matching_score": 0.6730314493179321, "max_stars_count": 0, "path": "tests/test_util.py" }, { "content": "import pypipegraph2 as ppg\n\nshould_len = 1024 * 1024\n\n\ndef inner(*args):\n # df = pd.DataFrame()\n # df['shu']\n raise ValueError(\"x \" * (should_len // 2))\n\n\nppg.FileGeneratingJob(\"b\", lambda of: 5).depends_on(ppg.DataLoadingJob(\"A\", inner))\nppg.run()\n#\n", "id": "930064", "language": "Python", "matching_score": 1.5657509565353394, "max_stars_count": 0, "path": "examples/massive_exception.py" }, { "content": "# pandas exceptions have 'no source available' sections in their tracebacks\nimport pypipegraph2 as ppg\nfrom pathlib import Path\nimport os\nimport shutil\nimport pandas\n\np = Path(\"run/pandas_excetion\")\nif p.exists():\n shutil.rmtree(p)\np.mkdir(exist_ok=True, parents=True)\nos.chdir(p)\n\n\ndef a():\n try:\n df = pandas.DataFrame()\n df[\"shu\"]\n except ValueError as e:\n raise KeyError() from e\n\n\nppg.DataLoadingJob(\"a\", a)\ntry:\n ppg.run()\nexcept ppg.RunFailed:\n pass\n\n\ntry:\n raise ValueError()\nexcept ValueError:\n raise KeyError()\n", "id": "10894069", "language": "Python", "matching_score": 1.0814803838729858, "max_stars_count": 0, "path": "examples/pandas_exception.py" }, { "content": "#!/usr/bin/env python3\n\n\"\"\"This creates one FileGeneratingJob with a few hundred of direct DataLoadingJobs,\nwhich exposed a o(n**3) or so bottleneck in runner.modify_dag\n\"\"\"\nimport pypipegraph2 as ppg\nimport shutil\nfrom pathlib import Path\nimport os\nimport sys\n\nrun_dir = (Path(__file__).parent / \"run\").absolute()\n\ntry:\n count = int(sys.argv[1])\nexcept: # noqa:E722\n count = 200\n\n\ndef simple():\n if run_dir.exists():\n shutil.rmtree(run_dir)\n run_dir.mkdir()\n os.chdir(run_dir)\n ppg.new()\n\n data = {}\n\n def final(of):\n of.write_text(str(len(data)))\n\n final_job = ppg.FileGeneratingJob(\"final\", final)\n\n last = final_job\n for ii in range(count):\n\n def load(ii=ii):\n data[ii] = str(ii)\n\n loader = ppg.DataLoadingJob(f\"dl{ii}\", load)\n last.depends_on(loader)\n last = loader\n ppg.run()\n\n\ndef test_simple(benchmark):\n print(run_dir)\n benchmark(simple)\n\n\nif __name__ == \"__main__\":\n simple()\n", "id": "10333902", "language": "Python", "matching_score": 5.07328462600708, "max_stars_count": 0, "path": "benchmarks/bench_tall.py" }, { "content": "#!/usr/bin/env python3\n\n\"\"\"This creates one FileGeneratingJob with a few hundred of direct DataLoadingJobs,\nwhich exposed a o(n**3) or so bottleneck in runner.modify_dag\n\"\"\"\nimport pypipegraph2 as ppg\nimport shutil\nfrom pathlib import Path\nimport os\nimport sys\n\nrun_dir = (Path(__file__).parent / \"run\").absolute()\n\ntry:\n count = int(sys.argv[1])\nexcept: # noqa:E722\n count = 200\n\n\ndef simple():\n if run_dir.exists():\n shutil.rmtree(run_dir)\n run_dir.mkdir()\n os.chdir(run_dir)\n ppg.new()\n\n for ii in range(count):\n a = ppg.FileGeneratingJob(f\"file_{ii}\", lambda of: of.write_text(of.name))\n b = ppg.DataLoadingJob(f\"load_{ii}\", lambda ii=ii: ii)\n a.depends_on(b)\n ppg.run()\n\n\ndef test_simple(benchmark):\n print(run_dir)\n benchmark(simple)\n\n\nif __name__ == \"__main__\":\n simple()\n", "id": "7055824", "language": "Python", "matching_score": 3.4075815677642822, "max_stars_count": 0, "path": "benchmarks/bench_disjoint.py" }, { "content": "#!/usr/bin/env python3\nimport pypipegraph2 as ppg\nimport shutil\nfrom pathlib import Path\nimport os\n\nrun_dir = (Path(__file__).parent / \"run\").absolute()\ncounter = 0\n\n\ndef setup():\n global counter\n rd = run_dir / str(counter)\n rd.mkdir(parents=True, exist_ok=True)\n os.chdir(rd)\n counter += 1\n\n\ndef simple():\n setup()\n ppg.new()\n jobs = []\n data = {}\n for ii in range(100):\n\n def load(ii=ii):\n data[ii] = str(ii)\n\n loader = ppg.DataLoadingJob(f\"dl{ii}\", load)\n fg = ppg.FileGeneratingJob(\n str(ii),\n lambda of, ii=ii: of.write_text(data[ii]),\n # resources=ppg.Resources.RunsHere,\n )\n fg.depends_on(loader)\n if ii > 10:\n if ii % 2:\n fg.depends_on(jobs[ii % 10])\n else:\n loader.depends_on(jobs[ii % 10])\n jobs.append(fg)\n for i in range(10):\n ppg.run() # i really want to see the nothing to do parts\n\n\ndef test_simple(benchmark):\n if run_dir.exists():\n shutil.rmtree(run_dir)\n benchmark(simple)\n\n\nif __name__ == \"__main__\":\n simple()\n", "id": "915964", "language": "Python", "matching_score": 2.812063694000244, "max_stars_count": 0, "path": "benchmarks/bench_simple.py" }, { "content": "#!/usr/bin/env python3\nimport pypipegraph2 as ppg\nimport shutil\nfrom pathlib import Path\nimport os\n\nrun_dir = (Path(__file__).parent / \"run_kilojobs\").absolute()\nrun_dir.mkdir(exist_ok=True)\ncounter = 0\n\n\ndef gen_job(ii):\n return ppg.FileGeneratingJob(f\"{ii}.dat\", lambda of: of.write_text(\"h\"))\n\n\ndef kilo_jobs():\n os.chdir(run_dir)\n ppg.new()\n final = ppg.FileGeneratingJob(f\"final.dat\", lambda of: of.write_text(\"h\"))\n\n for ii in range(100000):\n final.depends_on(gen_job(ii))\n ppg.run()\n\n\nif __name__ == \"__main__\":\n kilo_jobs()\n\n\n# just doing it: 45s. (183k history)\n# noop rerun: 11s\n# rerun, rebuild final: 24s\n\n\n# after dicodon fixes\n# noop rerun: 22s\n# rerrun with rebuild final: 9s\n\n\n\n# 100k\n# run : 25m35s. 1.8M (compressed) history. 81MB uncompressed...\n# noop run: 1minute.\n\n", "id": "2826938", "language": "Python", "matching_score": 1.5153741836547852, "max_stars_count": 0, "path": "benchmarks/bench_kilo_jobs.py" }, { "content": "# demonstrate runtime logging\nimport sys\nimport pypipegraph2 as ppg\nimport time\nfrom pathlib import Path\nimport os\nimport shutil\n\np = Path(\"run/slow_running\")\nif len(sys.argv) <= 1:\n print(\"Nuking existing\", p)\n if p.exists():\n shutil.rmtree(p)\np.mkdir(exist_ok=True, parents=True)\nos.chdir(p)\n\n\ntotal_runtime = 10\n\nppg.new(cores=3, run_mode=ppg.RunMode.CONSOLE)\n\n\ndef gen_jobs_stack(name, runtime, total_runtime=total_runtime):\n def inner(of, runtime=runtime):\n for ii in range(runtime):\n time.sleep(1)\n if \"_10\" in of.name:\n raise ValueError(\"expected\")\n of.write_text(str(of) + \"_\" + str(runtime))\n\n out_jobs = []\n for ii in range(0, total_runtime + 1, runtime):\n ppg.FileGeneratingJob(\n f\"{name}_{ii}\", inner, resources=ppg.Resources.SingleCore\n )\n if out_jobs:\n out_jobs[-1].depends_on(ii)\n return out_jobs\n\n\n# gen_jobs_stack(\"1s\", 1)\ngen_jobs_stack(\"30s\", 20)\n# gen_jobs_stack('60s', 60)\n\n\ndef gen_2nd():\n gen_jobs_stack(\"1sB\", 1, 5)\n\n\nppg.JobGeneratingJob(\"genjobs\", gen_2nd)\n\ntry:\n ppg.run()\nexcept ppg.RunFailed:\n pass\n", "id": "9587768", "language": "Python", "matching_score": 2.9357311725616455, "max_stars_count": 0, "path": "examples/slow_for_interactive.py" }, { "content": "# demonstrate runtime logging\nimport pypipegraph2 as ppg\nimport time\nfrom pathlib import Path\nimport os\nimport shutil\n\np = Path(\"run/runtimes\")\nif p.exists():\n shutil.rmtree(p)\nPath(\"run/runtimes\").mkdir(exist_ok=True, parents=True)\nos.chdir(\"run/runtimes\")\n\n\ndef gen_job(name, runtime):\n def inner(fn):\n time.sleep(runtime)\n fn.write_text(str(runtime))\n\n return ppg.FileGeneratingJob(name, inner)\n\n\ngen_job(\"A1\", 1)\ngen_job(\"A.5\", 0.5)\ngen_job(\"A2\", 2)\ngen_job(\"A5\", 5)\nppg.run()\n#\n", "id": "9359737", "language": "Python", "matching_score": 0.31251344084739685, "max_stars_count": 0, "path": "examples/runtimes.py" }, { "content": "from pathlib import Path\nimport inspect\nimport sys\nfrom matplotlib.testing.compare import compare_images\nimport matplotlib.testing.exceptions\n\n\ndef caller_name(skip=2):\n \"\"\"Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means \"who calls me\", skip=2 \"who calls my caller\" etc.\n\n An empty string is returned if skipped levels exceed stack height\n \"\"\"\n\n def stack_(frame):\n framelist = []\n while frame:\n framelist.append(frame)\n frame = frame.f_back\n return framelist\n\n stack = stack_(sys._getframe(1))\n start = 0 + skip\n if len(stack) < start + 1: # pragma: no cover\n return \"\" # pragma: no cover\n parentframe = stack[start]\n\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module: # pragma: no branch\n name.append(module.__name__)\n # detect classname\n if \"self\" in parentframe.f_locals:\n # I don't know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals[\"self\"].__class__.__name__) # pragma: no cover\n codename = parentframe.f_code.co_name\n if codename != \"<module>\": # pragma: no branch # top level usually\n name.append(codename) # function or a method\n del parentframe\n return \".\".join(name)\n\n\ndef caller_file(skip=2):\n \"\"\"Get a name of a caller in the format module.class.method\n\n `skip` specifies how many levels of stack to skip while getting caller\n name. skip=1 means \"who calls me\", skip=2 \"who calls my caller\" etc.\n\n An empty string is returned if skipped levels exceed stack height\n \"\"\"\n\n def stack_(frame):\n framelist = []\n while frame:\n framelist.append(frame)\n frame = frame.f_back\n return framelist\n\n stack = stack_(sys._getframe(1))\n start = 0 + skip\n if len(stack) < start + 1: # pragma: no cover\n return \"\" # pragma: no cover\n parentframe = stack[start]\n\n return parentframe.f_code.co_filename\n\n\ndef dump_cp_for_changed_images(generated_image_path, should_path):\n import shlex\n\n print(\"use %s to accept all image changes\" % test_accept_image_path)\n if not test_accept_image_path.exists():\n test_accept_image_path.write_text(\"#!/usr/bin/sh\\n\")\n with open(test_accept_image_path, \"a\") as op:\n op.write(\n \"cp %s %s\\n\"\n % (shlex.quote(str(generated_image_path)), shlex.quote(str(should_path)))\n )\n test_accept_image_path.chmod(0o755)\n\n\ndef assert_image_equal(generated_image_path, suffix=\"\", tolerance=2, should_path=None):\n \"\"\"assert that the generated image and the base_images/{test_module}/{cls}/{function_name}{suffix}.extension is identical\"\"\"\n generated_image_path = Path(generated_image_path).absolute()\n if should_path is None:\n extension = generated_image_path.suffix\n caller = caller_name(1)\n caller_fn = caller_file(1)\n parts = caller.split(\".\")\n if len(parts) >= 3:\n func = parts[-1]\n cls = parts[-2]\n module = parts[-3]\n # if cls.lower() == cls: # not actually a class, a module instead\n # module = cls\n # cls = \"_\"\n else:\n module = parts[-2]\n cls = \"_\"\n func = parts[-1]\n should_path = (\n Path(caller_fn).parent\n / \"base_images\"\n / module\n / cls\n / (func + suffix + extension)\n ).resolve()\n if not generated_image_path.exists():\n raise IOError(f\"Image {generated_image_path} was not created\")\n if not should_path.exists():\n should_path.parent.mkdir(exist_ok=True, parents=True)\n dump_cp_for_changed_images(generated_image_path, should_path)\n raise ValueError(\n f\"Base_line image not found, perhaps: \\ncp {generated_image_path} {should_path}\"\n )\n try:\n\n err = compare_images(\n str(should_path), str(generated_image_path), tolerance, in_decorator=False\n )\n except matplotlib.testing.exceptions.ImageComparisonFailure as e:\n dump_cp_for_changed_images(generated_image_path, should_path)\n raise ValueError(\n \"Matplot lib testing for \\n%s \\n%s failed\\n%s\"\n % (generated_image_path, should_path, e)\n )\n # if isinstance(err, ValueError):\n # raise ValueError(\n # \"Images differed significantly, rms: %.2f\\nExpected: %s\\n, Actual: %s\\n, diff: %s\\n Accept with cp %s %s\\n\"\n # % (err.rms, err.expected, err.actual, err.diff, err.actual, err.expected)\n # )\n if err is not None:\n dump_cp_for_changed_images(generated_image_path, should_path)\n raise ValueError(err)\n\n\nif Path('.').absolute().name == 'tests':\n test_accept_image_path = Path(\"run/accept_all_image_changes.sh\").absolute()\nelse:\n test_accept_image_path = Path(\"tests/run/accept_all_image_changes.sh\").absolute()\nif test_accept_image_path.exists():\n test_accept_image_path.unlink()\n", "id": "10146470", "language": "Python", "matching_score": 2.22265625, "max_stars_count": 0, "path": "src/mbf_qualitycontrol/testing/__init__.py" }, { "content": "import pytest\nfrom pathlib import Path\nimport pypipegraph as ppg\nfrom mbf_qualitycontrol import (\n register_qc,\n prune_qc,\n get_qc_jobs,\n QCCollectingJob,\n disable_qc,\n qc_disabled,\n)\n\nfrom mbf_qualitycontrol.testing import assert_image_equal\n\n\nclass TestRegistration:\n def test_registration_and_pruning(self, new_pipegraph):\n with pytest.raises(TypeError):\n register_qc(\"shu\")\n jobA = ppg.FileGeneratingJob(\"a\", lambda: Path(\"a\").write_text(\"hello\"))\n register_qc(jobA)\n print(list(get_qc_jobs()))\n assert jobA in list(get_qc_jobs())\n assert not jobA._pruned\n jobc = register_qc(\n ppg.FileGeneratingJob(\"c\", lambda: Path(\"b\").write_text(\"hello\"))\n )\n\n def check_prune(job):\n return job.job_id.lower()[-1] == \"c\"\n\n prune_qc(check_prune)\n assert jobc in list(get_qc_jobs())\n assert not jobc._pruned\n jobB = register_qc(\n ppg.FileGeneratingJob(\"b\", lambda: Path(\"b\").write_text(\"hello\"))\n )\n assert jobB in list(get_qc_jobs())\n assert jobB._pruned\n jobC = register_qc(\n ppg.FileGeneratingJob(\"C\", lambda: Path(\"b\").write_text(\"hello\"))\n )\n assert not jobC._pruned\n assert len(list(get_qc_jobs())) == 4\n prune_qc()\n assert jobA._pruned\n assert jobB._pruned\n assert jobc._pruned\n assert jobC._pruned\n for j in get_qc_jobs():\n assert j._pruned\n\n def test_pruning_plotjob(self, new_pipegraph):\n jobA = register_qc(ppg.PlotJob(\"c.png\", lambda: None, lambda: None))\n assert not jobA._pruned\n prune_qc()\n assert jobA._pruned\n assert jobA.cache_job._pruned\n assert jobA.table_job._pruned\n\n def test_collecting_qc_job(self, new_pipegraph):\n def output(output_filename, elements):\n Path(output_filename).write_text(\"\\n\".join((e.name for e in elements)))\n\n class Collectible:\n def __init__(self, name):\n self.name = name\n\n job = QCCollectingJob(\"output\", output)\n job.add(Collectible(\"hello\"))\n job2 = QCCollectingJob(\"output\", output)\n assert job2 is job\n job.add(Collectible(\"world\"))\n ppg.run_pipegraph()\n assert Path(\"output\").read_text() == \"hello\\nworld\"\n\n def test_qc_pruning_is_bound_to_ppg(self, new_pipegraph):\n assert not qc_disabled()\n disable_qc()\n assert qc_disabled()\n new_pipegraph.new_pipegraph()\n assert not qc_disabled()\n\n def test_no_pipegraph_means_disabled(self, no_pipegraph):\n assert ppg.util.global_pipegraph is None\n assert qc_disabled()\n\n def test_both_fixtures(self, both_ppg_and_no_ppg_no_qc):\n if not ppg.inside_ppg():\n assert qc_disabled()\n else:\n assert qc_disabled()\n\n def test_new_pipegraph_no_qc(self, new_pipegraph_no_qc):\n assert qc_disabled()\n\n\nclass TestAssertImagesEqualInside:\n def test_assert_images_equal_inside_class(self):\n assert_image_equal(\n Path(__file__).parent\n / \"base_images\"\n / \"test_qc\"\n / \"_\"\n / \"test_assert_images_equal.png\"\n )\n with pytest.raises(ValueError):\n assert_image_equal(\n Path(__file__).parent\n / \"base_images\"\n / \"test_qc\"\n / \"_\"\n / \"test_assert_images_equal.png\",\n \"_b\",\n )\n\n\ndef test_assert_images_equal():\n assert_image_equal(\n Path(__file__).parent\n / \"base_images\"\n / \"test_qc\"\n / \"_\"\n / \"test_assert_images_equal.png\"\n )\n with pytest.raises(ValueError) as e: # here the baseline image does not exist\n assert_image_equal(\n Path(__file__).parent\n / \"base_images\"\n / \"test_qc\"\n / \"_\"\n / \"test_assert_images_equal.png\",\n \"_b\",\n )\n # should_path overwrites suffix\n assert_image_equal(\n Path(__file__).parent\n / \"base_images\"\n / \"test_qc\"\n / \"_\"\n / \"test_assert_images_equal.png\",\n \"_b\",\n should_path=Path(__file__).parent\n / \"base_images\"\n / \"test_qc\"\n / \"_\"\n / \"test_assert_images_equal.png\",\n )\n\n assert \"Base_line image not found\" in str(e.value)\n with pytest.raises(ValueError) as e: # here it is different\n assert_image_equal(\n Path(__file__).parent\n / \"base_images\"\n / \"test_qc\"\n / \"_\"\n / \"test_assert_images_equal.png\",\n suffix=\"_c\",\n )\n assert \"Image files did not match\" in str(e.value)\n\n with pytest.raises(IOError) as e:\n assert_image_equal(\"does not exist\")\n assert \"not created\" in str(e.value)\n\n # with pytest.raises(ValueError) as e: #here it is different\n with pytest.raises(ValueError) as e: # here it is different\n assert_image_equal(\n Path(__file__).parent\n / \"base_images\"\n / \"test_qc\"\n / \"_\"\n / \"test_assert_images_equal.png\",\n suffix=\"_d\",\n )\n assert \"do not match expected size\" in str(e.value)\n", "id": "11107399", "language": "Python", "matching_score": 3.0365445613861084, "max_stars_count": 0, "path": "tests/test_qc.py" }, { "content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2012, <NAME> <<EMAIL>>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport pytest\nfrom .shared import append, write, read\nimport unittest\nimport os\nfrom pathlib import Path\n\ntry:\n import dppd\n import dppd_plotnine # noqa: F401\n\n dp, X = dppd.dppd()\n\n has_pyggplot = True\nexcept ImportError:\n has_pyggplot = False\n pass\n\n\nif has_pyggplot: # noqa C901\n # import R\n import pandas as pd\n import pypipegraph as ppg\n import subprocess\n\n def magic(filename):\n \"\"\"See what linux 'file' commando says about that file\"\"\"\n if not os.path.exists(filename):\n raise OSError(\"Does not exists %s\" % filename)\n p = subprocess.Popen([\"file\", filename], stdout=subprocess.PIPE)\n stdout, stderr = p.communicate()\n return stdout\n\n @pytest.mark.usefixtures(\"ppg1_compatibility_test\")\n class TestPlotJob:\n def test_basic(self):\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n def plot2(df):\n p = dp(df).p9().add_point(\"Y\", \"X\")\n p.width = 5\n p.height = 2\n return p\n\n of = \"out/test.png\"\n p = ppg.PlotJob(of, calc, plot)\n #ppg2 has no add_fiddle p.add_fiddle(lambda p: dp(p).scale_x_continuous(trans=\"log10\").pd)\n p.add_another_plot(\"out/test2.png\", plot2)\n ppg.run_pipegraph()\n assert magic(of).find(b\"PNG image\") != -1\n assert os.path.exists(of + \".tsv\")\n assert os.path.exists(\"cache/out/test.png\")\n assert os.path.exists(\"out/test2.png\")\n assert not os.path.exists(\"cache/out/test2.png\")\n assert not os.path.exists(\"cache/out/test2.png.tsv\")\n\n def test_basic_skip_table(self):\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n ppg.PlotJob(of, calc, plot, skip_table=True)\n ppg.run_pipegraph()\n assert magic(of).find(b\"PNG image\") != -1\n assert not os.path.exists(of + \".tsv\")\n assert os.path.exists(\"cache/out/test.png\")\n\n def test_basic_return_dict(self):\n def calc():\n return {\n \"A\": pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n }\n\n def plot(df):\n p = dp(df[\"A\"]).p9().add_point(\"X\", \"Y\")\n p.width = 5\n p.height = 1\n return p\n\n of = \"out/test.png\"\n ppg.PlotJob(of, calc, plot)\n ppg.run_pipegraph()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(of + \".tsv\").find(\"#A\\n\") != -1\n\n def test_basic_return_dict_non_df_raises(self):\n def calc():\n return {\n \"A\": pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n ),\n \"B\": \"not_a_df\",\n }\n\n def plot(df):\n return dp(df[\"A\"]).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n p = ppg.PlotJob(of, calc, plot)\n p.height = 1200\n p.width = 800\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"did not return a DataFrame\" in str(p.cache_job.lfg.exception)\n\n def test_skip_caching(self):\n def calc():\n if not os.path.exists(\"A\"):\n raise ValueError()\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n def prep_job():\n write(\"A\", \"A\")\n\n p = ppg.FileGeneratingJob(\"A\", prep_job)\n # this tests the correct dependency setting on skip_caching\n of = \"out/test.png\"\n p2 = ppg.PlotJob(of, calc, plot, skip_caching=True)\n p2.depends_on(p)\n ppg.run_pipegraph()\n assert magic(of).find(b\"PNG image\") != -1\n assert not os.path.exists(\"cache/out/test.png\")\n\n def test_redefiniton_and_skip_changes_raises(self):\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n ppg.PlotJob(of, calc, plot)\n with pytest.raises(ValueError):\n ppg.PlotJob(of, calc, plot, skip_caching=True)\n with pytest.raises(ValueError):\n ppg.PlotJob(of, calc, plot, skip_table=True)\n with pytest.raises(ValueError):\n ppg.PlotJob(of, calc, plot, render_args={\"something\": 55})\n\n def test_pdf(self):\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.pdf\"\n ppg.PlotJob(of, calc, plot)\n ppg.run_pipegraph()\n assert magic(of).find(b\"PDF document\") != -1\n\n def test_depends_on_with_caching(self):\n of = \"out/test.pdf\"\n jobA = ppg.PlotJob(of, lambda: 5, lambda: 5)\n jobB = ppg.Job([\"B\"])\n jobA.depends_on(jobB)\n assert not ppg.util.global_pipegraph.has_edge(jobB, jobA)\n assert ppg.util.global_pipegraph.has_edge(jobB, jobA.cache_job.lfg) # ppg2?\n assert ppg.util.global_pipegraph.has_edge(jobA.cache_job, jobA.table_job)\n\n def test_depends_on_without_caching(self):\n of = \"out/test.pdf\"\n jobA = ppg.PlotJob(of, lambda: 5, lambda: 5, skip_caching=True)\n jobB = ppg.Job([\"B\"])\n jobA.depends_on(jobB)\n assert ppg.util.global_pipegraph.has_edge(jobB, jobA)\n\n def test_raises_on_invalid_filename(self):\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.shu\"\n\n def inner():\n ppg.PlotJob(of, calc, plot)\n\n with pytest.raises(ValueError):\n inner()\n\n def test_reruns_just_plot_if_plot_changed(self, ppg1_compatibility_test):\n def calc():\n append(\"out/calc\", \"A\")\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n append(\"out/plot\", \"B\")\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n ppg.PlotJob(of, calc, plot)\n ppg.run_pipegraph()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(\"out/calc\") == \"A\"\n assert read(\"out/plot\") == \"B\"\n\n ppg1_compatibility_test.new_pipegraph()\n\n def plot2(df):\n append(\"out/plot\", \"B\")\n return dp(df).p9().add_point(\"Y\", \"X\")\n\n ppg.PlotJob(of, calc, plot2)\n ppg.run_pipegraph()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(\"out/calc\") == \"A\"\n assert read(\"out/plot\") == \"BB\"\n\n def test_no_rerun_if_ignore_code_changes_and_plot_changes(self, ppg1_compatibility_test):\n def calc():\n append(\"out/calc\", \"A\")\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n append(\"out/plot\", \"B\")\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n job = ppg.PlotJob(of, calc, plot)\n job.ignore_code_changes() # ppg2. otherwise the missing dependency triggers! \n ppg.run_pipegraph()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(\"out/calc\") == \"A\"\n assert read(\"out/plot\") == \"B\"\n\n from loguru import logger\n logger.error(\"Round two\")\n ppg1_compatibility_test.new_pipegraph()\n\n def plot2(df):\n append(\"out/plot\", \"B\")\n return dp(df).p9().add_point(\"Y\", \"X\")\n\n job = ppg.PlotJob(of, calc, plot2)\n logger.error(f\"Round two {job.depend_on_function}\")\n job.ignore_code_changes()\n ppg.run_pipegraph()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(\"out/calc\") == \"A\"\n assert read(\"out/plot\") == \"B\"\n\n def test_reruns_both_if_calc_changed(self, ppg1_compatibility_test):\n def calc():\n append(\"out/calc\", \"A\")\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n append(\"out/plot\", \"B\")\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n ppg.PlotJob(of, calc, plot)\n ppg.run_pipegraph()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(\"out/calc\") == \"A\"\n assert read(\"out/plot\") == \"B\"\n\n ppg1_compatibility_test.new_pipegraph()\n\n def calc2():\n append(\"out/calc\", \"A\")\n x = 5 # noqa: E157,F841\n return pd.DataFrame(\n {\"X\": list(range(0+x, 100+x)), \"Y\": list(range(50, 150))} # must change actual output\n )\n\n ppg.PlotJob(of, calc2, plot)\n ppg.run_pipegraph()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(\"out/calc\") == \"AA\"\n assert read(\"out/plot\") == \"BB\"\n\n def test_no_rerun_if_calc_change_but_ignore_codechanges(self, ppg1_compatibility_test):\n def calc():\n append(\"out/calc\", \"A\")\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n append(\"out/plot\", \"B\")\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n job = ppg.PlotJob(of, calc, plot)\n job.ignore_code_changes() # ppg2 must always not have code changes otherwise the missing dependency triggers\n ppg.run_pipegraph()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(\"out/calc\") == \"A\"\n assert read(\"out/plot\") == \"B\"\n\n ppg1_compatibility_test.new_pipegraph()\n\n def calc2():\n append(\"out/calc\", \"A\")\n x = 5 # noqa: E157,F841\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n job = ppg.PlotJob(of, calc2, plot)\n job.ignore_code_changes()\n ppg.run_pipegraph()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(\"out/plot\") == \"B\"\n assert read(\"out/calc\") == \"A\"\n\n def test_plot_job_dependencies_are_added_to_just_the_cache_job(self):\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n job = ppg.PlotJob(of, calc, plot)\n dep = ppg.FileGeneratingJob(\"out/A\", lambda: write(\"out/A\", \"A\"))\n job.depends_on(dep)\n #assert dep in job.cache_job.prerequisites\n assert ppg.util.global_pipegraph.has_edge(dep,job.cache_job.lfg) # ppg2\n\n def test_raises_if_calc_returns_non_df(self):\n def calc():\n return None\n\n def plot(df):\n append(\"out/plot\", \"B\")\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n job = ppg.PlotJob(of, calc, plot)\n try:\n ppg.run_pipegraph()\n raise ValueError(\"should not be reached\")\n except ppg.RuntimeError:\n pass\n assert isinstance(ppg.util.global_pipegraph.last_run_result[job.cache_job.lfg.job_id].error.args[0], ppg.JobContractError)\n #assert isinstance(job.cache_job.exception, ppg.JobContractError)\n\n def test_raises_if_plot_returns_non_plot(self):\n # import pyggplot\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return None\n\n of = \"out/test.png\"\n job = ppg.PlotJob(of, calc, plot)\n try:\n ppg.run_pipegraph()\n raise ValueError(\"should not be reached\")\n except ppg.RuntimeError:\n pass\n assert isinstance(job.exception, ppg.JobContractError)\n\n def test_passing_non_function_for_calc(self):\n def inner():\n ppg.PlotJob(\"out/a\", \"shu\", lambda df: 1)\n\n with pytest.raises(ValueError):\n inner()\n\n def test_passing_non_function_for_plot(self):\n def inner():\n ppg.PlotJob(\"out/a\", lambda: 55, \"shu\")\n\n with pytest.raises(ValueError):\n inner()\n\n def test_passing_non_string_as_jobid(self):\n def inner():\n ppg.PlotJob(5, lambda: 1, lambda df: 34)\n\n with pytest.raises(TypeError):\n inner()\n\n def test_unpickling_error(self, ppg1_compatibility_test):\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n p = ppg.PlotJob(of, calc, plot)\n ppg.run_pipegraph()\n ppg1_compatibility_test.new_pipegraph()\n p = ppg.PlotJob(of, calc, plot)\n with open(\"cache/out/test.png\", \"w\") as op:\n op.write(\"no unpickling\")\n os.unlink(\"out/test.png\") # so it reruns\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert not os.path.exists(\"out/test.png\")\n #assert isinstance(p.exception, ValueError) # ppg2\n import pickle\n assert isinstance(p.cache_job.exception, pickle.UnpicklingError)\n #assert \"Unpickling error in file\" in str(p.exception)\n assert \"Unpickling error in file\" in str(p.cache_job.exception)\n\n def test_add_another_not_returning_plot(self):\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n def plot2(df):\n return\n\n of = \"out/test.png\"\n p = ppg.PlotJob(of, calc, plot)\n #ppg2 has no add_fiddle p.add_fiddle(lambda p: p.scale_x_log10())\n p2 = p.add_another_plot(\"out/test2.png\", plot2)\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert isinstance(p2.exception, ppg.JobContractError)\n\n @pytest.mark.skip # no combinedplotjob\n @pytest.mark.usefixtures(\"ppg1_compatibility_test\")\n class TestCombinedPlotJobs:\n def test_complete(self):\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150)), \"w\": \"A\"}\n )\n\n def calc2():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150)), \"w\": \"B\"}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n p1 = ppg.PlotJob(\"out/A.png\", calc, plot)\n p2 = ppg.PlotJob(\"out/B.png\", calc2, plot)\n import pathlib\n\n ppg.CombinedPlotJob(pathlib.Path(\"out/C.png\"), [p1, p2], [\"w\"])\n ppg.CombinedPlotJob(pathlib.Path(\"out/D.png\"), [p1, p2], [])\n ppg.CombinedPlotJob(\n pathlib.Path(\"out/E.png\"),\n [p1, p2],\n {\"facets\": \"w\"},\n fiddle=lambda p: p.scale_x_log10(),\n )\n with pytest.raises(ValueError):\n ppg.CombinedPlotJob(pathlib.Path(\"out/C.png\"), [p1, p2], \"w\")\n with pytest.raises(TypeError):\n ppg.CombinedPlotJob(5, [p1, p2], \"w\")\n with pytest.raises(ValueError):\n ppg.CombinedPlotJob(\"out/D.something\", [p1, p2], \"w\")\n with pytest.raises(ValueError):\n ppg.CombinedPlotJob(\"out/D.png\", [], \"w\")\n with pytest.raises(ValueError):\n ppg.CombinedPlotJob(\"out/D.png\", [p1, p2.job_id], \"w\")\n\n ppg.run_pipegraph()\n assert magic(\"out/C.png\").find(b\"PNG image\") != -1\n assert magic(\"out/D.png\").find(b\"PNG image\") != -1\n assert magic(\"out/E.png\").find(b\"PNG image\") != -1\n\n def test_plotjob_fails(self):\n def calc():\n return None\n\n def calc2():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150)), \"w\": \"B\"}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n p1 = ppg.PlotJob(\"out/A.png\", calc, plot)\n p2 = ppg.PlotJob(\"out/B.png\", calc2, plot)\n import pathlib\n\n pc = ppg.CombinedPlotJob(\n pathlib.Path(\"out/C.png\"), [p1, p2], {\"facet\": \"w\"}\n )\n with pytest.raises(ValueError):\n ppg.CombinedPlotJob(pathlib.Path(\"out/C.png\"), [p1, p2], [])\n with pytest.raises(ValueError):\n ppg.CombinedPlotJob(pathlib.Path(\"out/C.png\"), [p1], {\"facet\": \"w\"})\n\n ppg.CombinedPlotJob(pathlib.Path(\"out/D.png\"), [p1, p2], [])\n ppg.CombinedPlotJob(pathlib.Path(\"out/E.png\"), [p1, p2], {\"facet\": \"w\"})\n\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"did not return a\" in str(p1.cache_job.exception)\n assert pc.error_reason == \"Indirect\"\n\n def test_use_cores(self):\n j = ppg.PlotJob(\"a.png\", lambda: None, lambda: None)\n assert j.cores_needed == 1\n assert j.use_cores(5) is j\n assert j.cores_needed == 1\n assert j.cache_job.cores_needed == 5\n j2 = ppg.PlotJob(\"b.png\", lambda: None, lambda: None, skip_caching=True)\n assert j2.cores_needed == 1\n assert j2.use_cores(5) is j2\n assert j2.cores_needed == 5\n\n def test_changing_skip_caching_same_name_raises(self):\n ppg.PlotJob(\"a.png\", lambda: None, lambda: None)\n with pytest.raises(ValueError):\n ppg.PlotJob(\"a.png\", lambda: None, lambda: None, skip_caching=True)\n\n def test_prune(self):\n j = ppg.PlotJob(\n \"a.png\",\n lambda: pd.DataFrame({\"sha\": [1]}),\n lambda df: dp(df).p9().add_point(\"sha\", \"sha\"),\n )\n j.prune()\n ppg.run_pipegraph()\n assert not Path(\"cache/a.png\").exists()\n assert not Path(\"a.png\").exists()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "8463829", "language": "Python", "matching_score": 7.4249186515808105, "max_stars_count": 0, "path": "tests/ppg1_compatibility_layer/test_plotjobs.py" }, { "content": "import pytest\nimport os\nfrom pathlib import Path\nfrom .shared import read, write, append\nimport pickle\n\ntry:\n import dppd\n import dppd_plotnine # noqa: F401\n\n dp, X = dppd.dppd()\n has_pyggplot = True\nexcept ImportError:\n has_pyggplot = False\n pass\n\n\nif has_pyggplot: # noqa C901\n import pandas as pd\n import pypipegraph2 as ppg\n import subprocess\n\n def magic(filename):\n \"\"\"See what linux 'file' commando says about that file\"\"\"\n if not os.path.exists(filename):\n raise OSError(\"Does not exists %s\" % filename)\n p = subprocess.Popen([\"file\", filename], stdout=subprocess.PIPE)\n stdout, stderr = p.communicate()\n return stdout\n\n @pytest.mark.usefixtures(\"ppg2_per_test\")\n class TestPlotJob:\n def test_basic(self):\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n def plot2(df):\n p = dp(df).p9().add_point(\"Y\", \"X\")\n p.width = 5\n p.height = 2\n return p\n\n of = \"out/test.png\"\n p, c, t = ppg.PlotJob(of, calc, plot)\n # p.add_fiddle(lambda p: dp(p).scale_x_continuous(trans=\"log10\").pd)\n p.add_another_plot(\"out/test2.png\", plot2)\n ppg.run()\n assert magic(of).find(b\"PNG image\") != -1\n assert os.path.exists(of + \".tsv\")\n assert os.path.exists(\"cache/out/test.png\")\n assert os.path.exists(\"out/test2.png\")\n assert not os.path.exists(\"cache/out/test2.png\")\n assert not os.path.exists(\"cache/out/test2.png.tsv\")\n\n def test_basic_skip_table(self):\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n ppg.PlotJob(of, calc, plot, create_table=False)\n ppg.run()\n assert magic(of).find(b\"PNG image\") != -1\n assert not os.path.exists(of + \".tsv\")\n assert os.path.exists(\"cache/out/test.png\")\n\n def test_basic_return_dict(self):\n def calc():\n return {\n \"A\": pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n }\n\n def plot(df):\n p = dp(df[\"A\"]).p9().add_point(\"X\", \"Y\")\n p.width = 5\n p.height = 1\n return p\n\n of = \"out/test.png\"\n ppg.PlotJob(of, calc, plot)\n ppg.run()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(of + \".tsv\").find(\"#A\\n\") != -1\n\n def test_basic_return_dict_non_df_raises(self):\n def calc():\n return {\n \"A\": pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n ),\n \"B\": \"not_a_df\",\n }\n\n def plot(df):\n return dp(df[\"A\"]).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n p, c, t = ppg.PlotJob(of, calc, plot)\n p.height = 1200\n p.width = 800\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert \"did not return a DataFrame\" in str(\n ppg.global_pipegraph.last_run_result[c[1].job_id].error\n )\n\n def test_skip_caching(self):\n def calc():\n if not os.path.exists(\"A\"):\n raise ValueError()\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n def prep_job(output_filename):\n write(\"A\", \"A\")\n\n def plot2(df):\n p = dp(df).p9().add_point(\"Y\", \"X\")\n p.width = 5\n p.height = 2\n return p\n\n p = ppg.FileGeneratingJob(\"A\", prep_job)\n\n # this tests the correct dependency setting on skip_caching\n of = \"out/test.png\"\n p2, c2, t2 = ppg.PlotJob(\n of, calc, plot, cache_calc=False, render_args={\"width\": 2, \"height\": 4}\n )\n p2.depends_on(p)\n t2.depends_on(\n p\n ) # if you don't cache, you have to take care of this yourself\n p2.add_another_plot(\"out/test2.png\", plot2)\n ppg.run()\n assert magic(of).find(b\"PNG image\") != -1\n assert not os.path.exists(\"cache/out/test.png\")\n assert os.path.exists(\"out/test.png\")\n assert os.path.exists(\"out/test2.png\")\n\n def xxxtest_redefiniton_and_skip_changes_raises(self):\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n ppg.PlotJob(of, calc, plot)\n ppg.PlotJob(of, calc, plot)\n with pytest.raises(ppg.JobRedefinitionError):\n ppg.PlotJob(of, calc, plot, render_args={\"something\": 55})\n # does not remove the jobs though\n ppg.PlotJob(of, calc, plot, cache_calc=False)\n ppg.PlotJob(of, calc, plot, create_table=False)\n\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n ppg.PlotJob(of, calc, plot)\n ppg.PlotJob(of, calc, plot)\n ppg.PlotJob(of, calc, plot, cache_calc=False)\n ppg.PlotJob(of, calc, plot, create_table=False)\n ppg.PlotJob(of, calc, plot, render_args={\"something\": 55})\n\n def xxxtest_pdf(self):\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.pdf\"\n ppg.PlotJob(of, calc, plot)\n ppg.run()\n assert magic(of).find(b\"PDF document\") != -1\n\n def xxxtest_raises_on_invalid_filename(self):\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.shu\"\n\n def inner():\n ppg.PlotJob(of, calc, plot)\n\n with pytest.raises(ValueError):\n inner()\n\n def test_reruns_just_plot_if_plot_changed(self):\n def calc():\n append(\"out/calc\", \"A\")\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n append(\"out/plot\", \"B\")\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n ppg.PlotJob(of, calc, plot)\n ppg.run()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(\"out/calc\") == \"A\"\n assert read(\"out/plot\") == \"B\"\n\n ppg.new()\n\n def plot2(df):\n append(\"out/plot\", \"B\")\n return dp(df).p9().add_point(\"Y\", \"X\")\n\n ppg.PlotJob(of, calc, plot2)\n ppg.run()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(\"out/calc\") == \"A\"\n assert read(\"out/plot\") == \"BB\"\n\n def test_no_rerun_if_ignore_code_changes_and_plot_changes(self):\n def calc():\n append(\"out/calc\", \"A\")\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n append(\"out/plot\", \"B\")\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n # note that you already need to ignore the function here\n # otherwise, the fact that the function is now *missing*\n # would trigger downstream\n job = ppg.PlotJob(of, calc, plot, depend_on_function=False)\n ppg.run()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(\"out/calc\") == \"A\"\n assert read(\"out/plot\") == \"B\"\n\n ppg.new()\n\n def plot2(df):\n append(\"out/plot\", \"B\")\n return dp(df).p9().add_point(\"Y\", \"X\")\n\n ppg.PlotJob(of, calc, plot2, depend_on_function=False)\n ppg.run()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(\"out/calc\") == \"A\"\n assert read(\"out/plot\") == \"B\"\n\n def test_reruns_both_if_calc_changed(self):\n def calc():\n append(\"out/calc\", \"A\")\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n append(\"out/plot\", \"B\")\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n ppg.PlotJob(of, calc, plot)\n ppg.run()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(\"out/calc\") == \"A\"\n assert read(\"out/plot\") == \"B\"\n\n ppg.new()\n\n def calc2():\n append(\"out/calc\", \"A\")\n x = 5 # noqa: E157,F841\n return pd.DataFrame(\n {\n \"X\": list(range(1, 101)),\n \"Y\": list(range(50, 150)),\n } # output must really change\n )\n\n ppg.PlotJob(of, calc2, plot)\n ppg.run()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(\"out/calc\") == \"AA\"\n assert read(\"out/plot\") == \"BB\"\n\n def test_no_rerun_if_calc_change_but_ignore_codechanges(self):\n def calc():\n append(\"out/calc\", \"A\")\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n append(\"out/plot\", \"B\")\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n job = ppg.PlotJob(of, calc, plot, depend_on_function=False)\n ppg.run()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(\"out/calc\") == \"A\"\n assert read(\"out/plot\") == \"B\"\n\n ppg.new()\n\n def calc2():\n append(\"out/calc\", \"A\")\n x = 5 # noqa: E157,F841\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n ppg.PlotJob(of, calc2, plot, depend_on_function=False)\n ppg.run()\n assert magic(of).find(b\"PNG image\") != -1\n assert read(\"out/calc\") == \"A\"\n assert read(\"out/plot\") == \"B\"\n\n def test_raises_if_calc_returns_non_df(self):\n def calc():\n return None\n\n def plot(df):\n append(\"out/plot\", \"B\")\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n job, cache_job, table_job = ppg.PlotJob(of, calc, plot)\n with pytest.raises(ppg.JobsFailed):\n ppg.run(print_failures=False)\n print(ppg.global_pipegraph.last_run_result[cache_job[1].job_id].error)\n assert isinstance(\n ppg.global_pipegraph.last_run_result[cache_job[1].job_id].error,\n ppg.JobError,\n )\n\n def test_raises_if_plot_returns_non_plot(self):\n # import pyggplot\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return None\n\n of = \"out/test.png\"\n ppg.PlotJob(of, calc, plot)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n print(type(ppg.global_pipegraph.last_run_result[of].error))\n print(repr(ppg.global_pipegraph.last_run_result[of].error))\n assert isinstance(\n ppg.global_pipegraph.last_run_result[of].error, ppg.JobError\n )\n assert \"did not return a plot object\" in str(\n ppg.global_pipegraph.last_run_result[of].error\n )\n\n def test_passing_non_function_for_calc(self):\n def inner():\n ppg.PlotJob(\"out/a\", \"shu\", lambda df: 1)\n\n with pytest.raises(ValueError):\n inner()\n\n def test_passing_non_function_for_plot(self):\n def inner():\n ppg.PlotJob(\"out/a\", lambda: 55, \"shu\")\n\n with pytest.raises(ValueError):\n inner()\n\n def test_passing_non_string_as_jobid(self):\n def inner():\n ppg.PlotJob(5, lambda: 1, lambda df: 34)\n\n with pytest.raises(TypeError):\n inner()\n\n def test_unpickling_error(self):\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n of = \"out/test.png\"\n p = ppg.PlotJob(of, calc, plot)\n ppg.run()\n ppg.new()\n p = ppg.PlotJob(of, calc, plot)\n with open(\"cache/out/test.png\", \"w\") as op:\n op.write(\"no unpickling\")\n os.unlink(\"out/test.png\") # so it reruns\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert not os.path.exists(\"out/test.png\")\n assert isinstance(\n ppg.global_pipegraph.last_run_result[p[1][0].job_id].error.args[0],\n pickle.UnpicklingError,\n )\n assert \"Unpickling error in file\" in str(\n ppg.global_pipegraph.last_run_result[p[1][0].job_id].error.args[0]\n )\n\n def test_add_another_not_returning_plot(self):\n def calc():\n return pd.DataFrame(\n {\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))}\n )\n\n def plot(df):\n return dp(df).p9().add_point(\"X\", \"Y\")\n\n def plot2(df):\n return\n\n of = \"out/test.png\"\n p, c, t = ppg.PlotJob(of, calc, plot)\n # p.add_fiddle(lambda p: p.scale_x_log10())\n p2 = p.add_another_plot(\"out/test2.png\", plot2)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert isinstance(\n ppg.global_pipegraph.last_run_result[p2.job_id].error, ppg.JobError\n )\n\n def test_matplotlib(self):\n import matplotlib.pyplot as plt\n import numpy as np\n\n # Data for plotting\n t = np.arange(0.0, 2.0, 0.01)\n s = 1 + np.sin(2 * np.pi * t)\n\n fig, ax = plt.subplots()\n ax.plot(t, s)\n\n ax.set(\n xlabel=\"time (s)\",\n ylabel=\"voltage (mV)\",\n title=\"About as simple as it gets, folks\",\n )\n ax.grid()\n return fig\n", "id": "6999270", "language": "Python", "matching_score": 1.5954334735870361, "max_stars_count": 0, "path": "tests/test_plotjobs.py" }, { "content": "import math\n\n\nclass Plot_Matplotlib:\n # much faster than Plot_GGPlot...\n \"\"\"Allowed plot options:\n height\n max_width\n color_scale (from plt.cm, or a dictionary lane->plt.cm.*)\n hide_legend\n \"\"\"\n\n name = \"Plot_matplotlib\"\n\n def plot(self, df, names_in_order, plot_options, title=\"\"):\n import matplotlib.pyplot as plt\n import matplotlib\n\n plt.switch_backend(\"agg\")\n\n cluster_ids = df[\"cluster\"]\n df = df.drop(\"cluster\", axis=1)\n row_count, col_count = df.shape\n plot_height = plot_options.get(\"height\", max(5, math.ceil(row_count / 250.0)))\n plot_width = max(plot_options.get(\"max_width\", 4), 1.55 * col_count) * 2\n if plot_options.get(\"color_scale\", False) is False:\n color_scale = plt.cm.OrRd\n else:\n color_scale = plot_options[\"color_scale\"]\n fig = plt.figure(figsize=(plot_width, plot_height))\n title = plot_options.get(\"title\", \"\")\n if title:\n plt.suptitle(title)\n for ii, column in enumerate(df.columns):\n ax = fig.add_subplot(\n 1, col_count + 1, ii + 1\n ) # matplot lib numbers subplots from 1!\n ax.set_title(names_in_order[ii])\n if plot_options.get(\"show_clusters\", False):\n plt.scatter(\n x=[-10] * row_count,\n y=list(range(row_count)),\n c=cluster_ids,\n cmap=plt.cm.Set1,\n s=100,\n edgecolors=\"face\",\n )\n pass\n data = df[[column]].values\n if isinstance(color_scale, dict):\n cs = color_scale[column.name]\n elif hasattr(color_scale, \"__call__\") and not isinstance(\n color_scale, matplotlib.colors.Colormap\n ):\n cs = color_scale(column)\n else:\n cs = color_scale\n plt.imshow(data, cmap=cs, interpolation=\"nearest\", aspect=\"auto\")\n plt.axis(\"off\")\n\n if not plot_options.get(\"hide_legend\", False):\n plt.colorbar()\n return plt\n\n def render(self, output_filename, p):\n p.savefig(output_filename, bbox_inches=\"tight\")\n\n def get_parameters(self, plot_options, lanes_to_draw):\n cs = plot_options.get(\"color_scale\", False)\n color_scale_string = color_scale_to_string(cs, lanes_to_draw)\n return (\n plot_options.get(\"height\", None),\n plot_options.get(\"max_width\", 4),\n color_scale_string,\n plot_options.get(\"hide_legend\", False),\n plot_options.get(\"title\", None),\n )\n\n def get_dependencies(self, heatmapplot, plot_options):\n return None\n # res = []\n # cs = plot_options.get(\"color_scale\", False)\n # if hasattr(cs, '__call__'):\n # res.append(ppg.FunctionInvariant(heatmapplot.output_filename + '_color_scale', cs))\n # return res\n\n\ndef color_scale_to_string(cs, lanes_to_draw):\n \"\"\"Convert a matplotlib color scale into a string for parameter recording\"\"\"\n if hasattr(cs, \"name\"):\n color_scale_string = cs.name # matplotlib colors scales have no useful __str__\n elif isinstance(cs, dict):\n if set(cs.keys()) != set([x.name for x in lanes_to_draw]):\n raise ValueError(\n \"Using dict color_scale, but did not provide values for all lanes\"\n )\n color_scale_string = \"\"\n for x in cs:\n color_scale_string += x + \"===\"\n if hasattr(cs[x], \"name\"):\n color_scale_string += cs[x].name\n else:\n color_scale_string += str(cs[x])\n color_scale_string += \";;;\"\n elif hasattr(cs, \"__call__\"):\n color_scale_string = \"function\"\n elif cs is False:\n color_scale_string = \"default\"\n else:\n raise ValueError(\n \"Color option was not a matplotlib.pyplot.cm. class or a dict of such. Was %s:\"\n % cs\n )\n return color_scale_string\n", "id": "6105518", "language": "Python", "matching_score": 5.159689903259277, "max_stars_count": 0, "path": "src/mbf_heatmap/ddf/plot_strategies.py" }, { "content": "import math\nimport pandas as pd\n\n\nclass Plot_Matplotlib:\n # much faster than Plot_GGPlot...\n \"\"\"Allowed plot options:\n height\n max_width\n title\n color_scale (from plt.cm, or a dictionary lane->plt.cm.*)\n hide_legend\n \"\"\"\n\n name = \"Plot_matplotlib\"\n\n def plot(\n self,\n gr_to_draw,\n lanes_to_draw,\n raw_data,\n norm_data,\n order,\n names_in_order,\n plot_options,\n ):\n import matplotlib.pyplot as plt\n import matplotlib\n\n plt.switch_backend(\"agg\")\n seen = set()\n for lane in lanes_to_draw:\n if lane.name in seen:\n raise ValueError(\"Duplicate lanes in lanes to draw: %s\" % lane.name)\n seen.add(lane.name)\n peak_count = len(list(norm_data.values())[0])\n lane_count = len(norm_data)\n if \"height\" not in plot_options:\n plot_height = max(5, math.ceil(peak_count / 250.0))\n else:\n plot_height = plot_options[\"height\"]\n plot_width = (\n max(plot_options.get(\"max_width\", 4), 1.55 * len(lanes_to_draw)) * 2\n )\n peak_order = order[0][::-1]\n cluster_order = order[1]\n if cluster_order is not None:\n cluster_order = order[1][::-1]\n if plot_options.get(\"color_scale\", False) is False:\n color_scale = plt.cm.OrRd\n else:\n color_scale = plot_options[\"color_scale\"]\n fig = plt.figure(figsize=(plot_width, plot_height))\n title = plot_options.get(\"title\", True)\n if title is True:\n title = \"%i regions from %s\" % (peak_count, gr_to_draw.name)\n elif title:\n title = title.replace(\"%i\", \"%i\" % peak_count).replace(\n \"%s\", gr_to_draw.name\n )\n if title is not None:\n plt.suptitle(title)\n for ii, lane in enumerate(lanes_to_draw):\n lane_name = lane.name\n ax = fig.add_subplot(\n 1, lane_count, ii + 1\n ) # matplot lib numbers subplots from 1!\n ax.set_title(names_in_order[ii])\n if cluster_order is not None:\n plt.scatter(\n x=[-10] * peak_count,\n y=list(range(peak_count)),\n c=cluster_order,\n cmap=plt.cm.Set1,\n s=100,\n edgecolors=\"face\",\n )\n pass\n data = norm_data[lane_name][peak_order]\n if isinstance(color_scale, dict):\n cs = color_scale[lane.name]\n elif hasattr(color_scale, \"__call__\") and not isinstance(\n color_scale, matplotlib.colors.Colormap\n ):\n cs = color_scale(lane)\n else:\n cs = color_scale\n plt.imshow(data, cmap=cs, interpolation=\"nearest\", aspect=\"auto\")\n plt.axis(\"off\")\n\n if not plot_options.get(\"hide_legend\", False):\n plt.colorbar()\n if plot_options.get(\"dump_df\", False):\n \"\"\" Makes a Dataframe and writes this - dataframe should be about the GenomicRegion (gr_to_draw)\"\"\"\n df_dump_filename = plot_options[\"dump_df\"]\n if not isinstance(df_dump_filename, str):\n raise ValueError(\n \"dump_df needs a string that specifies the outputpath and filename\"\n )\n out_dict = {\"chr\": [], \"start\": [], \"stop\": [], \"cluster_id\": []}\n import itertools\n\n gr_to_draw_df = gr_to_draw.df\n for x, y in itertools.izip(order[0], order[1]):\n out_dict[\"start\"].append(gr_to_draw_df[\"start\"][x])\n out_dict[\"stop\"].append(gr_to_draw_df[\"stop\"][x])\n out_dict[\"chr\"].append(gr_to_draw_df[\"chr\"][x])\n out_dict[\"cluster_id\"].append(y)\n df = pd.DataFrame(out_dict)\n df.to_csv(df_dump_filename, sep=\"\\t\", index=False)\n return plt\n\n def render(self, output_filename, p):\n p.savefig(output_filename, bbox_inches=\"tight\")\n\n def get_parameters(self, plot_options, lanes_to_draw):\n cs = plot_options.get(\"color_scale\", False)\n if hasattr(cs, \"name\"):\n color_scale_string = (\n cs.name\n ) # matplotlib colors scales have no useful __str__\n elif isinstance(cs, dict):\n if set(cs.keys()) != set([x.name for x in lanes_to_draw]):\n raise ValueError(\n \"Using dict color_scale, but did not provide values for all lanes\"\n )\n color_scale_string = \"\"\n for x in cs:\n color_scale_string += x + \"===\"\n if hasattr(cs[x], \"name\"):\n color_scale_string += cs[x].name\n else:\n color_scale_string += str(cs[x])\n color_scale_string += \";;;\"\n elif hasattr(cs, \"__call__\"):\n color_scale_string = \"function\"\n elif cs is False:\n color_scale_string = \"default\"\n else:\n raise ValueError(\n \"Color option was not a matplotlib.pyplot.cm. class or a dict of such. Was %s:\"\n % cs\n )\n return (\n plot_options.get(\"height\", None),\n plot_options.get(\"max_width\", 4),\n color_scale_string,\n plot_options.get(\"hide_legend\", False),\n plot_options.get(\"title\", None),\n )\n\n def get_dependencies(self, heatmapplot, plot_options):\n return None\n", "id": "12348492", "language": "Python", "matching_score": 1.3935352563858032, "max_stars_count": 0, "path": "src/mbf_heatmap/chipseq/plot_strategies.py" }, { "content": "import pytest\nimport math\nimport pandas as pd\nimport pypipegraph as ppg\n\nfrom mbf_genomics import regions, genes\nfrom mbf_genomics.annotator import Constant\n\nfrom .shared import get_genome, get_genome_chr_length, force_load, run_pipegraph, RaisesDirectOrInsidePipegraph\n\n\[email protected](\"both_ppg_and_no_ppg\")\nclass TestGenomicRegionConvertTests:\n def test_random_same_number(self):\n def sample_data():\n return pd.DataFrame(\n {\n \"chr\": [\"1\", \"2\", \"1\"],\n \"start\": [10, 100, 1000],\n \"stop\": [12, 110, 1110],\n \"column_that_will_disappear\": [\"A\", \"b\", \"c\"],\n }\n )\n\n def convert(df):\n res = df[[\"chr\", \"start\", \"stop\"]]\n res = res.assign(start=res[\"start\"] + 1)\n return res\n\n if ppg.inside_ppg():\n deps = [ppg.ParameterInvariant(\"shuParam\", (\"hello\"))]\n else:\n deps = []\n a = regions.GenomicRegions(\"sharum\", sample_data, [], get_genome_chr_length())\n a.add_annotator(Constant(\"Constant\", 5))\n a.annotate()\n b = a.convert(\"a+1\", convert, dependencies=deps)\n force_load(b.load())\n for d in deps:\n assert d in b.load().lfg.prerequisites\n run_pipegraph()\n assert len(a.df) == len(b.df)\n assert (a.df[\"start\"] == b.df[\"start\"] - 1).all()\n assert \"column_that_will_disappear\" in a.df.columns\n assert not (\"column_that_will_disappear\" in b.df.columns)\n\n def test_raises_on_conversion_function_not_returning_df(self):\n def sample_data():\n return pd.DataFrame(\n {\n \"chr\": [\"1\", \"2\", \"1\"],\n \"start\": [10, 100, 1000],\n \"stop\": [12, 110, 1110],\n }\n )\n\n def convert(df):\n return None\n\n a = regions.GenomicRegions(\"sharum\", sample_data, [], get_genome_chr_length())\n with RaisesDirectOrInsidePipegraph(ValueError):\n a.convert(\"a+1\", convert)\n force_load(a.load())\n\n def test_raises_on_non_genome(self):\n def sample_data():\n return pd.DataFrame(\n {\n \"chr\": [\"1\", \"2\", \"1\"],\n \"start\": [10, 100, 1000],\n \"stop\": [12, 110, 1110],\n }\n )\n\n def convert(df):\n res = df[:]\n res[\"start\"] += 1\n return res\n\n genome = get_genome_chr_length()\n a = regions.GenomicRegions(\"sharum\", sample_data, [], genome)\n a.convert(\"a+1\", convert, genome)\n\n with pytest.raises(ValueError):\n a.convert(\"a+1b\", convert, \"hello\")\n\n def test_grow(self):\n def sample_data():\n return pd.DataFrame(\n {\n \"chr\": [\"1\", \"1\", \"2\"],\n \"start\": [10, 100, 1000],\n \"stop\": [12, 110, 1110],\n }\n )\n\n a = regions.GenomicRegions(\"sharum\", sample_data, [], get_genome_chr_length())\n b = a.convert(\"agrown\", regions.convert.grow(12))\n force_load(b.load())\n run_pipegraph()\n assert (b.df[\"start\"] == [0, 100 - 12, 1000 - 12]).all()\n assert (b.df[\"stop\"] == [24, 110 + 12, 1110 + 12]).all()\n\n def test_grow2(self):\n def sample_data():\n return pd.DataFrame(\n [\n {\"chr\": \"1\", \"start\": 7774885, \"stop\": 7791673},\n {\"chr\": \"1\", \"start\": 8286026, \"stop\": 8298500},\n {\"chr\": \"1\", \"start\": 8323232, \"stop\": 8342008},\n ]\n )\n\n a = regions.GenomicRegions(\"sharum\", sample_data, [], get_genome_chr_length())\n b = a.convert(\"agrown\", regions.convert.grow(50000), on_overlap=\"merge\")\n force_load(b.load())\n run_pipegraph()\n assert (b.df[\"start\"] == [7724885, 8236026]).all()\n assert (b.df[\"stop\"] == [7841673, 8392008]).all()\n\n def test_promotorize(self):\n\n g = genes.Genes(get_genome())\n b = g.convert(\"b\", regions.convert.promotorize(444), on_overlap=\"ignore\")\n force_load(b.load())\n force_load(b.load())\n run_pipegraph()\n assert len(g.df) > 0\n assert len(g.df) == len(b.df) + 1 # we drop one that ends up at 0..0\n assert \"strand\" in b.df.columns\n # we have to go by index - the order might change\n # convert to list of strings - bug in at, it won't work otherwise\n b_df = b.df.assign(gene_stable_id=[x for x in b.df.gene_stable_id])\n g_df = g.df.assign(gene_stable_id=[x for x in g.df.gene_stable_id])\n b_df = b_df.set_index(\"gene_stable_id\")\n g_df = g_df.set_index(\"gene_stable_id\")\n assert set(b_df.index) == set(g_df[1:].index) # again the one that we dropped\n\n for ii in b_df.index:\n if g_df.at[ii, \"strand\"] == 1:\n assert b_df.at[ii, \"start\"] == max(0, g_df.at[ii, \"tss\"] - 444)\n assert b_df.at[ii, \"stop\"] == max(0, g_df.at[ii, \"tss\"])\n else:\n assert b_df.at[ii, \"start\"] == max(0, g_df.at[ii, \"tss\"])\n assert b_df.at[ii, \"stop\"] == max(0, g_df.at[ii, \"tss\"] + 444)\n\n def test_merge_connected(self):\n def sample_data():\n return pd.DataFrame(\n {\n \"chr\": [\n \"1\",\n \"1\",\n \"1\",\n \"1\",\n \"1\",\n \"2\",\n \"3\",\n \"3\",\n \"3\",\n \"4\",\n \"4\",\n \"4\",\n \"5\",\n ],\n \"start\": [\n 10,\n 13,\n 110,\n 300,\n 400,\n 102,\n 5,\n 6,\n 6000,\n 10,\n 100,\n 200,\n 100,\n ],\n \"stop\": [\n 18,\n 100,\n 200,\n 400,\n 410,\n 1000,\n 5000,\n 4900,\n 6010,\n 100,\n 150,\n 300,\n 110,\n ],\n }\n )\n\n a = regions.GenomicRegions(\n \"sharum\", sample_data, [], get_genome_chr_length(), on_overlap=\"ignore\"\n )\n b = a.convert(\"agrown\", regions.convert.merge_connected())\n force_load(b.load())\n run_pipegraph()\n assert (b.df[\"chr\"] == [\"1\", \"1\", \"1\", \"2\", \"3\", \"3\", \"4\", \"4\", \"5\"]).all()\n assert (b.df[\"start\"] == [10, 110, 300, 102, 5, 6000, 10, 200, 100]).all()\n assert (b.df[\"stop\"] == [100, 200, 410, 1000, 5000, 6010, 150, 300, 110]).all()\n\n def test_merge_connected_2(self):\n def sample_data():\n return pd.DataFrame(\n {\n \"chr\": [\"1\", \"1\", \"2\"],\n \"start\": [27897200, 27898600, 0],\n \"stop\": [27897300, 27898700, 100],\n }\n )\n\n a = regions.GenomicRegions(\"sharum\", sample_data, [], get_genome_chr_length())\n b = a.convert(\"agrown\", regions.convert.merge_connected())\n force_load(b.load())\n run_pipegraph()\n assert (b.df[\"start\"] == [27897200, 27898600, 0]).all()\n assert (b.df[\"stop\"] == [27897300, 27898700, 100]).all()\n\n def test_liftover(self):\n def sample_data():\n return pd.DataFrame(\n {\n \"chr\": [\"1\", \"1\", \"2\"],\n \"start\": [27897200, 27898600, 100000],\n \"stop\": [27897300, 27898700, 100100],\n }\n )\n\n a = regions.GenomicRegions(\"sharum\", sample_data, [], get_genome_chr_length())\n b = a.convert(\"hg38\", regions.convert.lift_over(\"hg19ToHg38\"))\n force_load(b.load())\n run_pipegraph()\n # made these with the ucsc web liftover utility\n # http://genome.ucsc.edu/cgi-bin/hgLiftOver\n assert (b.df[\"start\"] == [27570689, 27572089, 100000]).all()\n assert (b.df[\"stop\"] == [27570789, 27572189, 100100]).all()\n\n def test_liftover_filter_chr(self):\n def sample_data():\n return pd.DataFrame(\n {\n \"chr\": [\"2\", \"1\", \"11_gl000202_random\", \"MT\"],\n \"start\": [100, 27897200, 500, 100000],\n \"stop\": [10000, 27897300, 5000, 100100],\n \"copy\": [\"D\", \"A\", \"B\", \"C\"],\n \"name\": [\"d\", \"a\", \"b\", \"c\"],\n }\n )\n\n a = regions.GenomicRegions(\n \"sharum\",\n sample_data,\n [],\n get_genome_chr_length(\n {\n \"1\": 100000,\n \"2\": 100000,\n \"11_gl000202_random\": 100000,\n \"MT\": 100000,\n \"11\": 1000000,\n }\n ),\n )\n b = a.convert(\n \"hg38\",\n regions.convert.lift_over(\n \"hg19ToHg38\", keep_name=True, filter_to_these_chromosomes=[\"1\"]\n ),\n )\n force_load(b.load())\n run_pipegraph()\n # made these with the ucsc web liftover utility\n # http://genome.ucsc.edu/cgi-bin/hgLiftOver\n print(b.df)\n assert (b.df[\"start\"] == [27570689]).all()\n assert (b.df[\"stop\"] == [27570789]).all()\n assert (b.df[\"copy\"] == [\"A\"]).all()\n assert (b.df[\"name\"] == [\"a\"]).all()\n assert (b.df[\"chr\"] == [\"1\"]).all()\n\n def test_windows(self):\n def sample_data():\n return pd.DataFrame(\n {\"chr\": [\"1\", \"2\"], \"start\": [0, 0], \"stop\": [200, 99 * 3]}\n )\n\n a = regions.GenomicRegions(\"sharum\", sample_data, [], get_genome_chr_length())\n b = a.convert(\"ndiwo\", regions.convert.windows(99, False))\n c = a.convert(\"ndiwo2\", regions.convert.windows(99, True))\n force_load(b.load())\n force_load(c.load())\n run_pipegraph()\n assert (b.df[\"start\"] == [0, 99, 99 * 2, 0, 99, 99 * 2]).all()\n assert (b.df[\"stop\"] == [99, 99 * 2, 200, 99, 99 * 2, 99 * 3]).all()\n assert (c.df[\"start\"] == [0, 99, 0, 99, 99 * 2]).all()\n assert (c.df[\"stop\"] == [99, 99 * 2, 99, 99 * 2, 99 * 3]).all()\n\n def test_cookiecutter_summit(self):\n def sample_data():\n return pd.DataFrame(\n {\"chr\": [\"1\", \"2\"], \"start\": [0, 0], \"stop\": [200, 99 * 3]}\n )\n\n a = regions.GenomicRegions(\"sharum\", sample_data, [], get_genome_chr_length())\n b = a.convert(\"cookie\", regions.convert.cookie_summit(a.summit_annotator, 220))\n c = a.convert(\n \"cookieB\",\n regions.convert.cookie_summit(\n a.summit_annotator, 220, drop_those_outside_chromosomes=True\n ),\n )\n force_load(b.load())\n force_load(c.load())\n run_pipegraph()\n assert len(b.df) == 2\n assert len(c.df) == 1\n assert (b.df[\"start\"] == [0, math.floor(99 * 3 / 2) - 110]).all()\n assert (b.df[\"stop\"] == [100 + 110, math.floor(99 * 3 / 2) + 110]).all()\n\n def test_name_must_be_string(self):\n def sample_data():\n return pd.DataFrame(\n {\"chr\": [\"1\", \"2\"], \"start\": [0, 0], \"stop\": [200, 99 * 3]}\n )\n\n a = regions.GenomicRegions(\"sharum\", sample_data, [], get_genome_chr_length())\n with pytest.raises(ValueError):\n a.convert(123, regions.convert.shift(50))\n\n def test_shift(self):\n def sample_data():\n return pd.DataFrame(\n {\"chr\": [\"1\", \"2\"], \"start\": [0, 0], \"stop\": [200, 99 * 3]}\n )\n\n a = regions.GenomicRegions(\"sharum\", sample_data, [], get_genome_chr_length())\n b = a.convert(\"cookie\", regions.convert.shift(50))\n force_load(b.load())\n run_pipegraph()\n assert len(b.df) == 2\n assert (b.df[\"start\"] == [50, 50]).all()\n assert (b.df[\"stop\"] == [200 + 50, 99 * 3 + 50]).all()\n\n def test_summit(self):\n def sample_data():\n return pd.DataFrame(\n {\"chr\": [\"1\", \"2\"], \"start\": [0, 0], \"stop\": [200, 99 * 3]}\n )\n\n a = regions.GenomicRegions(\"sharum\", sample_data, [], get_genome_chr_length())\n b = a.convert(\"cookie\", regions.convert.summit(a.summit_annotator))\n force_load(b.load())\n run_pipegraph()\n assert len(b.df) == 2\n assert (b.df[\"start\"] == [100, 148]).all()\n assert (b.df[\"stop\"] == [101, 149]).all()\n\n def test_cookie_cutter(self):\n def sample_data():\n return pd.DataFrame(\n {\n \"chr\": [\"1\", \"2\"],\n \"start\": [0, 0],\n \"stop\": [200, 99 * 3],\n \"strand\": [-1, 1],\n }\n )\n\n a = regions.GenomicRegions(\"sharum\", sample_data, [], get_genome_chr_length())\n b = a.convert(\"cookie\", regions.convert.cookie_cutter(100))\n force_load(b.load())\n run_pipegraph()\n assert (b.df[\"start\"] == [50, 98]).all()\n assert (b.df[\"stop\"] == [50 + 100, 98 + 100]).all()\n assert (b.df[\"strand\"] == [-1, 1]).all()\n", "id": "6017514", "language": "Python", "matching_score": 4.611237525939941, "max_stars_count": 0, "path": "tests/test_region_conversion.py" }, { "content": "\"\"\"Utility functions to pass to GenomicRegions.convert(..., convert_func)\n\n\"\"\"\n\n# TODO: Liftover utility -> mbf_externals\n# TODO: chains not included?\n\nimport tempfile\nimport pandas as pd\nimport pypipegraph as ppg\nimport numpy as np\nimport subprocess\nfrom mbf_externals.util import to_string, to_bytes\nfrom pathlib import Path\n\nfile_path = Path(__file__).parent\n\n\ndef grow(basepairs):\n \"\"\"A function for GenomicRegions.convert that enlarges the regions\n bei doing start = start - basepairs, stop = stop + basepairs\"\"\"\n\n def do_grow(df):\n starts = df[\"start\"] - basepairs\n starts[starts < 0] = 0\n stops = df[\"stop\"] + basepairs\n new_df = df.copy()\n new_df = new_df.assign(start=starts, stop=stops)\n return new_df\n\n return do_grow, [], basepairs\n\n\ndef promotorize(basepairs=1250):\n \"\"\"Genes.convert - returns [-basepairs...tss] regions\"\"\"\n\n def do_promotorize(df):\n res = {\"chr\": df[\"chr\"]}\n res[\"start\"] = np.zeros((len(df),), dtype=np.int32)\n res[\"stop\"] = np.zeros((len(df),), dtype=np.int32)\n forward = df[\"strand\"] == 1\n res[\"start\"][:] = df[\"tss\"] # Assign within array.\n res[\"stop\"][:] = df[\"tss\"] # Assign within array.\n res[\"start\"][forward] -= basepairs\n res[\"start\"][res[\"start\"] < 0] = 0\n res[\"stop\"][~forward] += basepairs\n res[\"strand\"] = df[\"strand\"]\n res[\"gene_stable_id\"] = df[\"gene_stable_id\"]\n res = pd.DataFrame(res)\n res = res[res[\"start\"] != res[\"stop\"]] # can happen by the 0 limiting\n return res\n\n return do_promotorize, [], basepairs\n\n\ndef shift(basepairs):\n def do_shift(df):\n res = {\n \"chr\": df[\"chr\"],\n \"start\": df[\"start\"] + basepairs,\n \"stop\": df[\"stop\"] + basepairs,\n }\n return pd.DataFrame(res)\n\n return do_shift\n\n\ndef summit(summit_annotator):\n def do_summits(df):\n summit_col = summit_annotator.columns[0]\n starts = (df[\"start\"] + df[summit_col]).astype(np.int)\n res = {\"chr\": df[\"chr\"], \"start\": starts, \"stop\": starts + 1}\n return pd.DataFrame(res)\n\n return do_summits, [summit_annotator]\n\n\ndef merge_connected():\n \"\"\"Merge regions that are next to each other.\n 100..200, 200..300 becomes 100..300\n \"\"\"\n\n def do_merge(df):\n from mbf_nested_intervals import merge_df_intervals\n\n return merge_df_intervals(df, lambda iv: iv.merge_connected())\n\n return do_merge\n\n\nclass LiftOver(object):\n def __init__(self):\n import mbf_genomes\n from mbf_externals.kent import LiftOver as LiftOverAlgorithm\n\n self.data_path = mbf_genomes.data_path / \"liftovers\"\n self.replacements = {\"hg19to38\": {\"11_gl000202_random\": \"GL000202.1\"}}\n self.algo = LiftOverAlgorithm()\n self.algo.store.unpack_version(self.algo.name, self.algo.version)\n\n def do_liftover(self, listOfChromosomeIntervals, chain_file):\n \"\"\"perform a lift over. Error messages are silently swallowed!\"\"\"\n tmp_input = tempfile.NamedTemporaryFile(mode=\"wb\")\n tmp_output = tempfile.NamedTemporaryFile(mode=\"wb\")\n tmp_error = tempfile.NamedTemporaryFile(mode=\"wb\")\n max_len = 0\n listOfChromosomeIntervals = [list(row) for row in listOfChromosomeIntervals]\n for row in listOfChromosomeIntervals:\n tmp_input.write(b\" \".join(to_bytes(str(x)) for x in row))\n tmp_input.write(b\"\\n\")\n max_len = max(len(row), max_len)\n tmp_input.write(b\"\\n\")\n tmp_input.flush() # it's magic ;)\n cmd = [\n self.algo.path / \"liftOver\",\n tmp_input.name,\n chain_file,\n tmp_output.name,\n tmp_error.name,\n ]\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n dummy_stdout, stderr = p.communicate()\n if p.returncode != 0: # pragma: no cover\n raise ValueError(\n \"do_liftover failed. Returncode: %s, stderr: %s\"\n % (p.returncode, stderr)\n )\n tmp_output_in = open(tmp_output.name, \"rb\")\n res = []\n for row in tmp_output_in:\n row = row.strip().split(b\"\\t\")\n row[0] = to_string(row[0])\n row[1] = int(row[1])\n row[2] = int(row[2])\n res.append(tuple(row))\n tmp_error_in = open(tmp_error.name, \"rb\")\n tmp_error_in.read()\n tmp_input.close()\n tmp_output.close()\n tmp_error.close()\n return res\n\n def get_convert_func(self, key, keep_name=False, filter_to_these_chromosomes=None):\n \"\"\"Note that filter_to_these_chromosomes is after the replacements have kicked in\"\"\"\n chain_file = self.data_path / (key + \".over.chain\")\n if not chain_file.exists(): # pragma: no cover\n raise ValueError(\"invalid liftover key, file not found: %s\" % chain_file)\n if filter_to_these_chromosomes:\n filter_to_these_chromosomes = set(filter_to_these_chromosomes)\n\n def do_convert(df):\n if df.index.duplicated().any(): # pragma: no cover\n raise ValueError(\"liftover only works with unique indices\")\n df.index = [str(x) for x in df.index]\n input_tuples = [\n (\"chr\" + row[\"chr\"], row[\"start\"], row[\"stop\"], idx)\n for idx, row in df.iterrows()\n ]\n\n output_tuples = self.do_liftover(input_tuples, chain_file)\n output_lists = list(zip(*output_tuples))\n res = pd.DataFrame(\n {\n \"chr\": output_lists[0],\n \"start\": output_lists[1],\n \"stop\": output_lists[2],\n \"parent\": [x.decode(\"utf-8\") for x in output_lists[3]],\n }\n ).set_index(\"parent\")\n new_chr = []\n for x in res[\"chr\"]:\n x = x[3:]\n # these are untested as of 2019-03-27\n if x == \"m\": # pragma: no cover\n x = \"MT\"\n elif (\n key in self.replacements and x in self.replacements[key]\n ): # pragma: no cover\n x = self.replacements[key][x]\n new_chr.append(x)\n res[\"chr\"] = new_chr\n for col in df.columns:\n if col not in res.columns:\n res = res.assign(**{col: df[col]})\n if filter_to_these_chromosomes:\n res = res[res[\"chr\"].isin(filter_to_these_chromosomes)]\n return res\n\n if ppg.inside_ppg():\n do_convert.dependencies = [\n ppg.FileTimeInvariant(chain_file),\n ppg.FunctionInvariant(\n \"genomics.regions.convert.LiftOver.do_liftover\",\n LiftOver.do_liftover,\n ),\n ]\n return do_convert\n\n\ndef lift_over(from_to, keep_name=False, filter_to_these_chromosomes=None):\n \"\"\"Map a genome to another genome.\n from_to looks like hg19ToHg38\n see mbf_genomes/data/liftovers for the list currently supported\"\"\"\n return LiftOver().get_convert_func(\n from_to,\n keep_name=keep_name,\n filter_to_these_chromosomes=filter_to_these_chromosomes,\n )\n\n\ndef cookie_cutter(bp):\n \"\"\" transform all their binding regions to -1/2 * bp ... 1/2 * bp centered\n around the old midpoint... (so pass in the final size of the region)\n inspired by Lupien et al (doi 10.1016/j.cell.2008.01.018\")\n \"\"\"\n\n def convert(df):\n peak_lengths = df[\"stop\"] - df[\"start\"]\n centers = np.array(df[\"start\"] + peak_lengths // 2, dtype=np.int32)\n new_starts = centers - bp // 2\n new_stops = new_starts + bp\n new_starts[new_starts < 0] = 0\n res = pd.DataFrame({\"chr\": df[\"chr\"], \"start\": new_starts, \"stop\": new_stops})\n if \"strand\" in df.columns: # pragma: no branch\n res[\"strand\"] = df[\"strand\"]\n return res\n\n return convert, [], bp\n\n\ndef cookie_summit(summit_annotator, bp, drop_those_outside_chromosomes=False):\n \"\"\" transform all their binding regions to -1/2 * bp ... 1/2 * bp centered\n around the summit (so pass in the final size of the region)\n\n if @drop_those_outside_chromosomes is set, regions < 0 are dropped\n \"\"\"\n\n def do_summits(df):\n summit_col = summit_annotator.columns[0]\n res = {\n \"chr\": df[\"chr\"],\n \"start\": df[\"start\"] + df[summit_col].astype(int) - bp // 2,\n \"stop\": df[\"start\"] + df[summit_col].astype(int) + bp // 2,\n }\n res = pd.DataFrame(res)\n if drop_those_outside_chromosomes:\n res = res[res[\"start\"] >= 0]\n else:\n res = res.assign(start=res[\"start\"].clip(lower=0))\n return res\n\n return do_summits, [summit_annotator], (bp, drop_those_outside_chromosomes)\n\n\ndef windows(window_size, drop_smaller_windows=False):\n \"\"\"Chuck the region into window_size sized windows.\n if @drop_smaller_windows is True, the right most windows get chopped\"\"\"\n\n def create_windows(df):\n res = {\"chr\": [], \"start\": [], \"stop\": []}\n for dummy_idx, row in df.iterrows():\n for start in range(row[\"start\"], row[\"stop\"], window_size):\n stop = min(start + window_size, row[\"stop\"])\n if drop_smaller_windows and stop - start < window_size:\n continue\n res[\"chr\"].append(row[\"chr\"])\n res[\"start\"].append(start)\n res[\"stop\"].append(stop)\n return pd.DataFrame(res)\n\n return create_windows, [], (window_size, drop_smaller_windows)\n", "id": "975780", "language": "Python", "matching_score": 2.467501401901245, "max_stars_count": 0, "path": "src/mbf_genomics/regions/convert.py" }, { "content": "import pypipegraph as ppg\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pathlib import Path\nfrom pandas.testing import assert_frame_equal\nfrom mbf_fileformats.bed import read_bed\nimport mbf_genomics.regions as regions\nimport mbf_genomics.genes as genes\nfrom mbf_genomics.annotator import Constant\n\nfrom .shared import (\n get_genome,\n get_genome_chr_length,\n force_load,\n run_pipegraph,\n RaisesDirectOrInsidePipegraph,\n MockGenome,\n)\n\n\[email protected](\"new_pipegraph\")\nclass TestGenesLoadingPPGOnly:\n def test_loading_from_genome_is_singletonic(self):\n genome = get_genome()\n print(genome)\n genesA = genes.Genes(genome)\n genesB = genes.Genes(genome)\n assert genesA is genesB\n filterA = genesA.filter(\"fa\", lambda df: df.index[:10])\n filterAa = genesA.filter(\"faa\", lambda df: df.index[:10])\n filterB = genesB.filter(\"fab\", lambda df: df.index[:10])\n assert not (filterA is genesA)\n assert not (filterAa is filterA)\n assert not (filterAa is filterB)\n with pytest.raises(ValueError): # can't have a different loading func\n filterB = genesB.filter(\"fab\", lambda df: df.index[:15])\n force_load(filterA.load)\n ppg.run_pipegraph()\n assert len(filterA.df) == 10\n\n\[email protected](\"both_ppg_and_no_ppg\")\nclass TestGenesLoading:\n def test_basic_loading_from_genome(self):\n g = genes.Genes(get_genome())\n force_load(g.load())\n run_pipegraph()\n assert len(g.df) == 246\n assert (g.df[\"gene_stable_id\"][:3] == [\"CRP_001\", \"CRP_002\", \"CRP_003\"]).all()\n assert g.df[\"gene_stable_id\"].iloc[-1] == \"CRP_182\"\n assert g.df[\"start\"].iloc[-1] == 158_649 - 1\n assert g.df[\"stop\"].iloc[-1] == 159_662\n assert g.df[\"strand\"].iloc[-1] == -1\n\n def test_filtering_with_annotator(self):\n import mbf_genomics\n\n g = genes.Genes(get_genome())\n\n class CopyAnno(mbf_genomics.annotator.Annotator):\n def __init__(self):\n self.columns = [\"copy\"]\n\n def calc(self, df):\n return pd.DataFrame({\"copy\": df[\"gene_stable_id\"]})\n\n g += CopyAnno()\n filtered = g.filter(\"a\", (\"gene_stable_id\", \"==\", \"CRP_003\"))\n force_load(filtered.annotate())\n run_pipegraph()\n assert (filtered.df[\"gene_stable_id\"] == [\"CRP_003\"]).all()\n assert (filtered.df[\"copy\"] == [\"CRP_003\"]).all()\n\n def test_alternative_loading_raises_on_non_df(self):\n with RaisesDirectOrInsidePipegraph(ValueError):\n g = genes.Genes(get_genome_chr_length(), lambda: None, \"myname\")\n force_load(g.load())\n\n def test_alternative_loading_raises_on_missing_column(self, both_ppg_and_no_ppg):\n df = pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 5000,\n \"tes\": 5500,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n ]\n )\n\n def inner_tss():\n df2 = df.copy()\n df2 = df2.drop(\"tss\", axis=1)\n g = genes.Genes(get_genome(), lambda: df2, name=\"sha\")\n g.load()\n # run_pipegraph()\n\n def inner_chr():\n df2 = df.copy()\n df2 = df2.drop(\"chr\", axis=1)\n g = genes.Genes(get_genome(), lambda: df2, name=\"shu\")\n g.load()\n # run_pipegraph()\n\n def inner_tes():\n df2 = df.copy()\n df2 = df2.drop(\"tes\", axis=1)\n g = genes.Genes(get_genome(), lambda: df2, name=\"shi\")\n g.load()\n # run_pipegraph()\n\n with RaisesDirectOrInsidePipegraph(ValueError):\n inner_tss()\n if ppg.util.global_pipegraph is not None:\n both_ppg_and_no_ppg.new_pipegraph()\n with RaisesDirectOrInsidePipegraph(ValueError):\n inner_tes()\n if ppg.util.global_pipegraph is not None:\n both_ppg_and_no_ppg.new_pipegraph()\n with RaisesDirectOrInsidePipegraph(ValueError):\n inner_chr()\n if ppg.util.global_pipegraph is not None:\n both_ppg_and_no_ppg.new_pipegraph()\n\n def test_alternative_loading_raises_on_missing_name(self):\n df = pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 5000,\n \"tes\": 5500,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n ]\n )\n\n with pytest.raises(ValueError):\n genes.Genes(get_genome(), lambda: df)\n\n def test_alternative_loading_raises_on_invalid_chromosome(self):\n df = pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1b\",\n \"strand\": 1,\n \"tss\": 5000,\n \"tes\": 5500,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n ]\n )\n\n with RaisesDirectOrInsidePipegraph(ValueError):\n g = genes.Genes(get_genome(), lambda: df, name=\"shu\")\n force_load(g.load())\n\n def test_alternative_loading_raises_on_non_int_tss(self):\n df = pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 5000.5,\n \"tes\": 5500,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n ]\n )\n\n with RaisesDirectOrInsidePipegraph(ValueError):\n g = genes.Genes(get_genome(), lambda: df, name=\"shu\")\n force_load(g.load())\n\n def test_alternative_loading_raises_on_non_int_tes(self):\n df = pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 5000,\n \"tes\": \"\",\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n ]\n )\n\n with RaisesDirectOrInsidePipegraph(ValueError):\n g = genes.Genes(get_genome(), lambda: df, name=\"shu\")\n force_load(g.load())\n\n def test_do_load_only_happens_once(self):\n df = pd.DataFrame(\n [\n {\n \"gene_stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 5000,\n \"tes\": 5500,\n \"description\": \"bla\",\n }\n ]\n )\n counter = [0]\n\n def load():\n counter[0] += 1\n return df\n\n g = genes.Genes(get_genome_chr_length(), load, name=\"shu\")\n if ppg.inside_ppg():\n assert counter[0] == 0\n g.load()\n assert counter[0] == 0\n g.load()\n assert counter[0] == 0\n ppg.run_pipegraph()\n else:\n assert counter[0] == 1\n g.load()\n assert counter[0] == 1\n\n def test_filtering_away_works(self):\n genome = MockGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 5000,\n \"tes\": 5500,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n ]\n )\n )\n g = genes.Genes(genome)\n filtered = g.filter(\"nogenes\", lambda df: df[\"chr\"] == \"4\")\n force_load(filtered.load())\n run_pipegraph()\n assert len(filtered.df) == 0\n assert \"start\" in filtered.df.columns\n assert \"stop\" in filtered.df.columns\n assert \"tss\" in filtered.df.columns\n assert \"tes\" in filtered.df.columns\n assert \"gene_stable_id\" in filtered.df.columns\n\n def test_annotators_are_kept_on_filtering(self):\n genome = MockGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 5000,\n \"tes\": 5500,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n ]\n )\n )\n g = genes.Genes(genome)\n ca = Constant(\"shu\", 5)\n g.add_annotator(ca)\n filtered = g.filter(\"nogenes\", lambda df: df[\"chr\"] == \"4\")\n assert filtered.has_annotator(ca)\n\n def test_filtering_returns_genes(self):\n g = genes.Genes(get_genome())\n on_chr_1 = g.filter(\"on_1\", lambda df: df[\"chr\"] == \"1\")\n assert g.__class__ == on_chr_1.__class__\n\n def test_overlap_genes_requires_two_genes(self):\n genome = get_genome()\n a = genes.Genes(genome)\n\n def sample_data():\n return pd.DataFrame(\n {\"chr\": [\"Chromosome\"], \"start\": [1000], \"stop\": [1100]}\n )\n\n b = regions.GenomicRegions(\"sha\", sample_data, [], genome)\n force_load(a.load())\n force_load(b.load())\n run_pipegraph()\n\n with pytest.raises(ValueError):\n a.overlap_genes(b)\n\n def test_overlap_genes_raises_on_unequal_genomes(self):\n genome = get_genome(\"A\")\n genomeB = get_genome(\"B\")\n a = genes.Genes(genome)\n b = genes.Genes(genomeB)\n\n with pytest.raises(ValueError):\n a.overlap_genes(b)\n\n def test_overlap(self):\n genome = MockGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 5000,\n \"tes\": 5500,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n ]\n )\n )\n g = genes.Genes(genome)\n on_chr_1 = g.filter(\"on_1\", lambda df: df[\"chr\"] == \"1\")\n on_chr_2 = g.filter(\"on_2\", lambda df: df[\"chr\"] == \"2\")\n one = g.filter(\"one\", lambda df: df[\"gene_stable_id\"] == \"fake1\")\n force_load(on_chr_1.load())\n force_load(on_chr_2.load())\n force_load(one.load())\n run_pipegraph()\n assert len(on_chr_1.df) == 2\n assert len(on_chr_2.df) == 1\n assert len(one.df) == 1\n assert g.overlap_genes(on_chr_1) == len(on_chr_1.df)\n assert on_chr_1.overlap_genes(g) == len(on_chr_1.df)\n assert on_chr_1.overlap_genes(on_chr_1) == len(on_chr_1.df)\n assert g.overlap_genes(on_chr_2) == len(on_chr_2.df)\n assert on_chr_2.overlap_genes(g) == len(on_chr_2.df)\n assert on_chr_2.overlap_genes(on_chr_2) == len(on_chr_2.df)\n assert g.overlap_genes(one) == len(one.df)\n assert one.overlap_genes(g) == len(one.df)\n assert one.overlap_genes(one) == len(one.df)\n\n assert on_chr_1.overlap_genes(one) == 1\n assert one.overlap_genes(on_chr_1) == 1\n\n assert on_chr_1.overlap_genes(on_chr_2) == 0\n assert on_chr_2.overlap_genes(on_chr_1) == 0\n\n def test_get_tss_regions(self):\n genome = MockGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 5000,\n \"tes\": 5500,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n ]\n )\n )\n g = genes.Genes(genome)\n tss = g.regions_tss()\n force_load(tss.load())\n run_pipegraph()\n assert len(tss.df) == 3\n assert (tss.df[\"start\"] == [5000, 5400, 5400]).all()\n assert (tss.df[\"stop\"] == tss.df[\"start\"] + 1).all()\n assert (tss.df[\"chr\"] == [\"1\", \"1\", \"2\"]).all()\n\n def test_get_tes_regions(self):\n genome = MockGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 3000,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n ]\n )\n )\n g = genes.Genes(genome)\n tes = g.regions_tes()\n force_load(tes.load())\n run_pipegraph()\n assert len(tes.df) == 2\n assert (tes.df[\"start\"] == [4900, 4900]).all()\n assert (tes.df[\"stop\"] == tes.df[\"start\"] + 1).all()\n assert (tes.df[\"chr\"] == [\"1\", \"2\"]).all()\n\n def test_get_exons_regions_overlapping(self):\n genome = MockGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 3000,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla1\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla2\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla3\",\n },\n ]\n ),\n # {transcript_stable_id, gene_stable_id, strand, start, end, exons},\n df_transcripts=pd.DataFrame(\n {\n \"transcript_stable_id\": [\"trans1a\", \"trans1b\", \"trans2\", \"trans3\"],\n \"gene_stable_id\": [\"fake1\", \"fake1\", \"fake2\", \"fake3\"],\n \"chr\": [\"1\", \"1\", \"1\", \"2\"],\n \"strand\": [1, 1, -1, -1],\n \"start\": [3100, 3000, 4910, 4900],\n \"stop\": [4900, 4000, 5400, 5400],\n \"exons\": [\n [(3100, 4900)],\n [(3000, 3500), (3300, 3330), (3750, 4000)],\n [(4910, 5000), (5100, 5400)],\n [(4900, 5400)],\n ],\n }\n ),\n )\n g = genes.Genes(genome)\n exons = g.regions_exons_overlapping()\n force_load(exons.load())\n run_pipegraph()\n assert (exons.df[\"start\"] == [3000, 3100, 3300, 3750, 4910, 5100, 4900]).all()\n assert (exons.df[\"stop\"] == [3500, 4900, 3330, 4000, 5000, 5400, 5400]).all()\n assert (exons.df[\"chr\"] == np.array([\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"2\"])).all()\n\n def test_get_exons_regions_merging(self):\n genome = MockGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 3000,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla1\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla2\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla3\",\n },\n ]\n ),\n # {transcript_stable_id, gene_stable_id, strand, start, end, exons},\n df_transcripts=pd.DataFrame(\n {\n \"transcript_stable_id\": [\"trans1a\", \"trans1b\", \"trans2\", \"trans3\"],\n \"gene_stable_id\": [\"fake1\", \"fake1\", \"fake2\", \"fake3\"],\n \"chr\": [\"1\", \"1\", \"1\", \"2\"],\n \"strand\": [1, 1, -1, -1],\n \"start\": [3100, 3000, 4910, 4900],\n \"stop\": [4900, 4000, 5400, 5400],\n \"exons\": [\n [(3100, 4900)],\n [(3000, 3500), (3300, 3330), (3750, 4000)],\n [(4910, 5000), (5100, 5400)],\n [(4900, 5400)],\n ],\n }\n ),\n )\n g = genes.Genes(genome)\n exons = g.regions_exons_merged()\n force_load(exons.load())\n run_pipegraph()\n assert (exons.df[\"start\"] == [3000, 4910, 5100, 4900]).all()\n assert (exons.df[\"stop\"] == [4900, 5000, 5400, 5400]).all()\n assert (exons.df[\"chr\"] == [\"1\", \"1\", \"1\", \"2\"]).all()\n\n def test_get_intron_regions(self):\n genome = MockGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 3000,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n ]\n ),\n # {transcript_stable_id, gene_stable_id, strand, start, end, exons},\n df_transcripts=pd.DataFrame(\n {\n \"transcript_stable_id\": [\"trans1a\", \"trans1b\", \"trans2\", \"trans3\"],\n \"gene_stable_id\": [\"fake1\", \"fake1\", \"fake2\", \"fake3\"],\n \"chr\": [\"1\", \"1\", \"1\", \"2\"],\n \"strand\": [1, 1, -1, -1],\n \"start\": [3100, 3000, 4900, 4900],\n \"stop\": [4900, 4000, 5400, 5400],\n \"exons\": [\n [(3100, 4900)],\n [(3000, 3500), (3750, 4000)],\n [(4900, 5000), (5100, 5400)],\n [(4900, 5400)],\n ],\n }\n ),\n )\n g = genes.Genes(genome)\n introns = g.regions_introns()\n force_load(introns.load())\n run_pipegraph()\n assert (introns.df[\"start\"] == [3000, 3500, 4000, 5000]).all()\n assert (introns.df[\"stop\"] == [3100, 3750, 4900, 5100]).all()\n # no intronic region on chr 2\n assert (introns.df[\"chr\"] == [\"1\", \"1\", \"1\", \"1\"]).all()\n\n\[email protected](\"both_ppg_and_no_ppg\")\nclass TestGenes:\n def test_write_bed(self):\n genome = MockGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 5000,\n \"tes\": 5500,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n ]\n )\n )\n g = genes.Genes(genome)\n sample_filename = \"genes.bed\"\n g.write_bed(sample_filename)\n run_pipegraph()\n assert len(g.df) > 0\n read = read_bed(g.result_dir / sample_filename)\n assert len(read) == len(g.df)\n assert read[0].refseq == b\"1\"\n assert read[1].refseq == b\"1\"\n assert read[2].refseq == b\"2\"\n assert read[0].position == 4900\n assert read[1].position == 5000\n assert read[2].position == 4900\n assert read[0].length == 500\n assert read[1].length == 500\n assert read[2].length == 500\n assert read[0].name == b\"fake2\"\n assert read[1].name == b\"fake1\"\n assert read[2].name == b\"fake3\"\n\n def test_write_bed_auto_filename(self):\n genome = MockGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 5000,\n \"tes\": 5500,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n ]\n )\n )\n g = genes.Genes(genome, sheet_name=\"da_genes\")\n assert \"/da_genes/\" in str(g.result_dir)\n sample_filename = g.write_bed()[1]\n run_pipegraph()\n assert len(g.df) > 0\n read = read_bed(sample_filename)\n assert len(read) == len(g.df)\n assert read[0].refseq == b\"1\"\n assert read[1].refseq == b\"1\"\n assert read[2].refseq == b\"2\"\n assert read[0].position == 4900\n assert read[1].position == 5000\n assert read[2].position == 4900\n assert read[0].length == 500\n assert read[1].length == 500\n assert read[2].length == 500\n assert read[0].name == b\"fake2\"\n assert read[1].name == b\"fake1\"\n assert read[2].name == b\"fake3\"\n\n # def test_annotation_keeps_row_names(self):\n # g = genes.Genes(dummyGenome)\n # g.do_load()\n # row_names = g.df.row_names\n # g.annotate()\n # run_pipegraph()\n # self.assertTrue((row_names == g.df.row_names).all())\n\n def test_write(self):\n g = genes.Genes(get_genome())\n with pytest.raises(ValueError):\n g.write(mangler_function=lambda df: df.tail())\n a = g.write()\n b = g.write(\"b.xls\")\n mangle = lambda df: df.head() # noqa: E731\n c = g.write(\"c.xls\", mangle)\n # this is ok... in ppg1 only., not in ppg2 strict\n # c = g.write(\"c.xls\", mangle)\n if ppg.util.inside_ppg(): # this is ok outside of ppg\n if hasattr(ppg, \"is_ppg2\"):\n with pytest.raises(ValueError):\n c = g.write(\"c.xls\", mangle)\n with pytest.raises(ValueError):\n g.write(\"c.xls\", lambda df: df.tail())\n run_pipegraph()\n afn = a[1]\n bfn = b[1]\n cfn = c[1]\n\n assert Path(afn).exists()\n assert Path(bfn).exists()\n assert Path(cfn).exists()\n assert_frame_equal(pd.read_csv(afn, sep=\"\\t\"), pd.read_excel(bfn))\n assert_frame_equal(\n pd.read_excel(bfn).head(),\n pd.read_excel(cfn),\n check_column_type=False,\n check_dtype=False,\n )\n\n def test_write_filtered(self):\n g = genes.Genes(get_genome())\n g2 = g.filter(\"filtered\", lambda df: df.index[:2])\n g2.write(Path(\"filtered.xls\").absolute())\n run_pipegraph()\n assert Path(\"filtered.xls\").exists()\n df = pd.read_excel(\"filtered.xls\")\n assert len(df) == 2\n assert \"parent_row\" in df.columns\n assert (df[\"parent_row\"] == [0, 1]).all()\n\n def test_invalid_chromosomes(self):\n def a():\n return pd.DataFrame(\n {\n \"chr\": \"7a\",\n \"start\": 100,\n \"stop\": 1000,\n \"tss\": 100,\n \"tes\": 1000,\n \"strand\": 1,\n \"name\": \"gene1\",\n \"gene_stable_id\": \"gene1\",\n },\n index=[\"gene1\"],\n )\n\n genome = get_genome()\n with RaisesDirectOrInsidePipegraph(ValueError):\n genes.Genes(\n genome, alternative_load_func=a, name=\"my_genes\", result_dir=\"my_genes\"\n ).load()\n\n def test_invalid_tss(self):\n def a():\n return pd.DataFrame(\n {\n \"chr\": \"Chromosome\",\n \"tss\": \"100\",\n \"tes\": 1000,\n \"strand\": 1,\n \"name\": \"gene1\",\n \"gene_stable_id\": \"gene1\",\n },\n index=[\"gene1\"],\n )\n\n genome = get_genome()\n with RaisesDirectOrInsidePipegraph(ValueError):\n genes.Genes(\n genome, alternative_load_func=a, name=\"my_genes\", result_dir=\"my_genes\"\n ).load()\n\n def test_invalid_tes(self):\n def a():\n return pd.DataFrame(\n {\n \"chr\": \"Chromosome\",\n \"tss\": 100,\n \"tes\": 1000.5,\n \"strand\": 1,\n \"name\": \"gene1\",\n \"gene_stable_id\": \"gene1\",\n },\n index=[\"gene1\"],\n )\n\n genome = get_genome()\n with RaisesDirectOrInsidePipegraph(ValueError):\n genes.Genes(\n genome, alternative_load_func=a, name=\"my_genes\", result_dir=\"my_genes\"\n ).load()\n\n def test_invalid_start_stop(self):\n def a():\n return pd.DataFrame(\n {\n \"chr\": \"Chromosome\",\n \"tss\": 100,\n \"tes\": 10,\n \"start\": 100,\n \"stop\": 10,\n \"strand\": 1,\n \"name\": \"gene1\",\n \"gene_stable_id\": \"gene1\",\n },\n index=[\"gene1\"],\n )\n\n genome = get_genome()\n with RaisesDirectOrInsidePipegraph(ValueError):\n genes.Genes(\n genome, alternative_load_func=a, name=\"my_genes\", result_dir=\"my_genes\"\n ).load()\n\n\[email protected](\"new_pipegraph\")\nclass TestGenesPPG:\n def test_def_twice_alternative_loading_func(self):\n def a():\n return pd.DataFrame(\n {\n \"chr\": \"1\",\n \"start\": 100,\n \"stop\": 1000,\n \"tss\": 100,\n \"tes\": 1000,\n \"strand\": 1,\n \"name\": \"gene1\",\n \"gene_stable_id\": \"gene1\",\n },\n index=[\"gene1\"],\n )\n\n def b():\n return pd.DataFrame(\n {\n \"chr\": \"1\",\n \"start\": 110,\n \"stop\": 1000,\n \"tss\": 110,\n \"tes\": 1000,\n \"strand\": 1,\n \"name\": \"gene1\",\n \"gene_stable_id\": \"gene1\",\n },\n index=[\"gene1\"],\n )\n\n genome = MockGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 5000,\n \"tes\": 5500,\n \"description\": \"bla\",\n }\n ]\n )\n )\n gA = genes.Genes(\n genome, alternative_load_func=a, name=\"my_genes\", result_dir=\"my_genes\"\n )\n assert gA.result_dir.resolve() == Path(\"my_genes\").resolve()\n gA.load()\n gA.load()\n with pytest.raises(ValueError):\n genes.Genes(genome, alternative_load_func=b, name=\"my_genes\")\n\n\[email protected](\"both_ppg_and_no_ppg\")\nclass TestGenesFrom:\n def test_difference(self):\n genome = get_genome()\n a = genes.Genes(genome)\n b = a.filter(\"filtered\", lambda df: df.index[:5])\n c = genes.FromDifference(\"delta\", a, b)\n force_load(c.load())\n run_pipegraph()\n assert len(c.df) == len(a.df) - len(b.df)\n\n def test_intersection(self):\n genome = get_genome()\n a = genes.Genes(genome)\n b = a.filter(\"filtered\", lambda df: df.index[:5])\n c = a.filter(\"filtered2\", lambda df: df.index[4:6])\n with pytest.raises(ValueError):\n d = genes.FromIntersection(\"delta\", b, c)\n d = genes.FromIntersection(\"delta\", [b, c])\n force_load(a.load())\n force_load(d.load())\n run_pipegraph()\n assert len(d.df) == 1\n assert list(d.df.gene_stable_id) == list(a.df.gene_stable_id.loc[4:4])\n\n def test_intersection2(self):\n genome = get_genome()\n a = genes.Genes(genome)\n b = a.filter(\"filtered\", lambda df: df.index[:5], vid=\"AA\")\n c = b.filter(\"filtered2\", lambda df: df.index[:1], vid=[\"BB\", \"CC\"])\n with pytest.raises(ValueError):\n d = genes.FromIntersection(\"delta\", b, c)\n d = genes.FromIntersection(\"delta\", [b, c])\n force_load(a.load())\n force_load(d.load())\n run_pipegraph()\n assert len(d.df) == 1\n assert list(d.df.gene_stable_id) == list(a.df.gene_stable_id.loc[0:0])\n assert \"AA\" in d.vid\n assert \"BB\" in d.vid\n assert \"CC\" in d.vid\n\n def test_from_any(self):\n genome = get_genome()\n a = genes.Genes(genome)\n b = a.filter(\"filtered\", lambda df: df.index[:5])\n c = a.filter(\"filtered2\", lambda df: df.index[-5:])\n d = a.filter(\"filtered3\", lambda df: df.index[10:15])\n e = genes.FromAny(\"delta\", [b, c, d], sheet_name=\"shu\")\n force_load(e.load())\n force_load(a.load())\n run_pipegraph()\n assert len(e.df) == 15\n assert sorted(list(e.df.gene_stable_id)) == sorted(\n list(a.df.gene_stable_id.iloc[:5])\n + list(a.df.gene_stable_id.iloc[10:15])\n + list(a.df.gene_stable_id.iloc[-5:])\n )\n assert \"/shu/\" in str(e.result_dir)\n\n def test_from_all(self):\n genome = get_genome()\n a = genes.Genes(genome)\n b = a.filter(\"filtered\", lambda df: df.index[:5])\n c = a.filter(\"filtered2\", lambda df: df.index[0:10])\n d = a.filter(\"filtered3\", lambda df: df.index[3:10])\n e = genes.FromAll(\"delta\", [b, c, d])\n force_load(e.load())\n force_load(a.load())\n run_pipegraph()\n assert len(e.df) == 2\n assert list(e.df.gene_stable_id) == list(a.df.gene_stable_id.loc[3:4])\n\n def test_from_none(self):\n genome = get_genome()\n a = genes.Genes(genome)\n b = a.filter(\"filtered\", lambda df: df.index[:5])\n c = a.filter(\"filtered2\", lambda df: df.index[-5:])\n d = a.filter(\"filtered3\", lambda df: df.index[3:10])\n e = genes.FromNone(\"delta\", [b, c, d])\n force_load(e.load())\n force_load(a.load())\n run_pipegraph()\n assert len(e.df) == len(a.df) - 5 - 5 - 5\n\n def test_genes_from_file(self, both_ppg_and_no_ppg):\n genome = get_genome()\n a = genes.Genes(genome)\n b = a.filter(\"filtered\", lambda df: df.index[:5])\n b.write(Path(\"filtered.xls\").absolute())\n force_load(b.load())\n print(both_ppg_and_no_ppg)\n run_pipegraph()\n assert not \"summit middle\" in a.df.columns\n assert not \"summit middle\" in b.df.columns\n print(both_ppg_and_no_ppg)\n both_ppg_and_no_ppg.new_pipegraph()\n genome = get_genome()\n c = genes.FromFile(\"reimport\", genome, Path(\"filtered.xls\").absolute())\n force_load(c.load())\n run_pipegraph()\n assert_frame_equal(b.df, c.df)\n\n def test_genes_from_file_of_transcripts(self):\n genome = get_genome()\n df = pd.DataFrame({\"a column!\": genome.df_transcripts.index[:5]})\n df.to_excel(\"transcripts.xls\")\n a = genes.FromFileOfTranscripts(\n \"my genes\", genome, \"transcripts.xls\", \"a column!\"\n )\n force_load(a.load())\n run_pipegraph()\n assert len(a.df) == 5\n tr = set()\n for gene_stable_id in a.df[\"gene_stable_id\"]:\n tr.update(\n [\n tr.transcript_stable_id\n for tr in genome.genes[gene_stable_id].transcripts\n ]\n )\n assert tr == set(genome.df_transcripts.index[:5])\n\n def test_genes_from_biotypes(self):\n genome = get_genome()\n nc = [\"tRNA\", \"rRNA\"]\n non_coding = genes.FromBiotypes(genome, nc)\n force_load(non_coding.load())\n run_pipegraph()\n assert len(non_coding.df) == genome.df_genes.biotype.isin(nc).sum()\n", "id": "6266829", "language": "Python", "matching_score": 4.644864559173584, "max_stars_count": 0, "path": "tests/test_genes.py" }, { "content": "# flake8: noqa\nif False: \n import pytest\n import mbf_genomics.genes as genes\n\n @pytest.mark.usefixtures(\"new_pipegraph\")\n class TestRegionAnnotationWithGenes:\n def test_anno_next_genes(self):\n genome = DummyGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 5000,\n \"tes\": 5500,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n ]\n )\n )\n\n def sample_data():\n df = pd.DataFrame(\n {\n \"chr\": [\"1\", \"2\", \"1\", \"3\", \"5\"],\n \"start\": [10, 100, 6000, 10000, 100000],\n \"stop\": [12, 110, 6110, 11110, 111110],\n }\n )\n df = df.assign(summit=(df[\"stop\"] - df[\"start\"]) / 2)\n return df\n\n a = regions.GenomicRegions(\"shu\", sample_data, [], genome)\n anno = regions.annotators.NextGenes(still_ok=True)\n a.add_annotator(anno)\n a.load()\n ppg.run_pipegraph()\n\n assert (\n a.df[\"Primary gene stable_id\"] == [\"fake1\", \"fake2\", \"fake3\", \"\", \"\"]\n ).all()\n should = [\n -1.0 * (5000 - (11)),\n -1.0 * (6055 - 5400),\n -1.0 * (105 - 5400),\n numpy.nan,\n numpy.nan,\n ]\n assert (\n (a.df[\"Primary gene distance\"] == should)\n | numpy.isnan(a.df[\"Primary gene distance\"])\n ).all()\n\n\n def test_anno_next_genes(self):\n genome = DummyGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 5000,\n \"tes\": 5500,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n ]\n )\n )\n\n def sample_data():\n df = pd.DataFrame(\n {\n \"chr\": [\"1\", \"2\", \"1\", \"3\", \"5\"],\n \"start\": [10, 100, 6000, 10000, 100000],\n \"stop\": [12, 110, 6110, 11110, 111110],\n }\n )\n df = df.assign(summit=(df[\"stop\"] - df[\"start\"]) / 2)\n return df\n\n a = regions.GenomicRegions(\"shu\", sample_data, [], genome)\n anno = regions.annotators.NextGenes(still_ok=True)\n a.add_annotator(anno)\n a.load()\n ppg.run_pipegraph()\n\n assert (a.df[\"Primary gene stable_id\"] == [\"fake1\", \"fake2\", \"fake3\", \"\", \"\"]).all()\n should = [\n -1.0 * (5000 - (11)),\n -1.0 * (6055 - 5400),\n -1.0 * (105 - 5400),\n numpy.nan,\n numpy.nan,\n ]\n assert (\n (a.df[\"Primary gene distance\"] == should)\n | numpy.isnan(a.df[\"Primary gene distance\"])\n ).all()\n", "id": "889504", "language": "Python", "matching_score": 2.75820255279541, "max_stars_count": 0, "path": "tests/test_gene_region_annotators.py" }, { "content": "import pytest\nimport pandas as pd\nimport pypipegraph as ppg\nfrom mbf_genomics import genes, DelayedDataFrame\nfrom mbf_genomics.testing import MockGenome\nfrom pypipegraph.testing import force_load\nfrom pathlib import Path\n\n\[email protected](\"new_pipegraph\")\nclass TestDescription:\n def test_simple(self):\n genome = MockGenome(\n pd.DataFrame(\n {\n \"stable_id\": [\"a\", \"b\", \"c\"],\n \"chr\": \"1\",\n \"tss\": [0, 100, 1000],\n \"tes\": [10, 101, 1010],\n }\n ),\n df_genes_meta=pd.DataFrame(\n {\n \"gene_stable_id\": [\"a\", \"b\", \"c\"],\n \"description\": [\"hello\", \"world\", \"!\"],\n }\n ).set_index(\"gene_stable_id\"),\n )\n g = genes.Genes(genome)\n anno = genes.annotators.Description()\n g += anno\n force_load(g.annotate())\n ppg.run_pipegraph()\n assert \"description\" in g.df.columns\n assert (\n g.df.sort_values(\"gene_stable_id\")[\"description\"] == [\"hello\", \"world\", \"!\"]\n ).all()\n\n def test_external_genome(self):\n genome = MockGenome(\n pd.DataFrame(\n {\n \"stable_id\": [\"a\", \"b\", \"c\"],\n \"chr\": \"1\",\n \"tss\": [0, 100, 1000],\n \"tes\": [10, 101, 1010],\n }\n ),\n df_genes_meta=pd.DataFrame(\n {\n \"gene_stable_id\": [\"a\", \"b\", \"c\"],\n \"description\": [\"hello\", \"world\", \"!\"],\n }\n ).set_index(\"gene_stable_id\"),\n )\n g = DelayedDataFrame(\"ex\", pd.DataFrame({\"gene_stable_id\": [\"a\", \"c\", \"b\"]}))\n anno = genes.annotators.Description(genome)\n g += anno\n force_load(g.annotate())\n ppg.run_pipegraph()\n assert \"description\" in g.df.columns\n assert (\n g.df.sort_values(\"gene_stable_id\")[\"description\"] == [\"hello\", \"world\", \"!\"]\n ).all()\n\n def test_missing_external_genome(self):\n g = DelayedDataFrame(\"ex\", pd.DataFrame({\"gene_stable_id\": [\"a\", \"c\", \"b\"]}))\n anno = genes.annotators.Description()\n g += anno\n force_load(g.annotate())\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"ddf had no .genome and no genome was passed to Description\" in str(\n g.anno_jobs[anno.get_cache_name()].lfg.exception\n )\n\n\[email protected](\"new_pipegraph\")\nclass TestFromFile:\n def test_simple(self, tmpdir):\n genome = MockGenome(\n pd.DataFrame(\n {\n \"stable_id\": [\"a\", \"b\", \"c\"],\n \"chr\": \"1\",\n \"tss\": [0, 100, 1000],\n \"tes\": [10, 101, 1010],\n }\n ),\n df_genes_meta=pd.DataFrame(\n {\n \"gene_stable_id\": [\"a\", \"b\", \"c\"],\n \"description\": [\"hello\", \"world\", \"!\"],\n }\n ).set_index(\"gene_stable_id\"),\n )\n g = genes.Genes(genome)\n df_to_add = pd.DataFrame(\n {\"testcol\": [1, 2, 3], \"index_vals\": [\"a\", \"b\", \"d\"]}, index=[\"a\", \"b\", \"d\"]\n )\n tmp_path = Path(tmpdir) / \"dump.tsv\"\n df_to_add.to_csv(tmp_path, sep=\"\\t\", index=False)\n anno = genes.annotators.FromFile(\n tmp_path,\n columns_to_add=[\"testcol\"],\n index_column_table=\"index_vals\",\n index_column_genes=\"gene_stable_id\",\n fill_value=-1,\n )\n g += anno\n force_load(g.annotate())\n ppg.run_pipegraph()\n print(g.df.index)\n print(g.df)\n assert \"testcol\" in g.df.columns\n assert g.df.loc[0][\"testcol\"] == 1\n assert g.df.loc[1][\"testcol\"] == 2\n assert g.df.loc[2][\"testcol\"] == -1\n assert len(g.df) == 3\n", "id": "5417670", "language": "Python", "matching_score": 3.713061809539795, "max_stars_count": 0, "path": "tests/test_genes_annotators.py" }, { "content": "from mbf_genomics.annotator import Annotator, FromFile\nimport pandas as pd\n\n\nclass Description(Annotator):\n \"\"\"Add the description for the genes from genome.\n\n @genome may be None (default), then the ddf is queried for a '.genome'\n Requires a genome with df_genes_meta - e.g. EnsemblGenomes\n \"\"\"\n\n columns = [\"description\"]\n\n def __init__(self, genome=None):\n self.genome = genome\n\n def calc_ddf(self, ddf):\n if self.genome is None:\n try:\n genome = ddf.genome\n except AttributeError:\n raise AttributeError(\n \"ddf had no .genome and no genome was passed to Description\"\n )\n else:\n genome = self.genome\n lookup = dict(genome.df_genes_meta[\"description\"].items())\n result = []\n for gene_stable_id in ddf.df[\"gene_stable_id\"]:\n result.append(lookup.get(gene_stable_id, \"\"))\n return pd.Series(result, index=ddf.df.index)\n\n\ndef GeneStrandedSalmon(*args, **kwargs):\n \"\"\"Deprecated. use anno_tag_counts.Salmon\"\"\"\n raise NotImplementedError(\"Deprecated. Use anno_tag_counts.Salmon\")\n\n\n# FromFile forwarded to mbf_genomics.annotator.FromFile\nFromFile = FromFile\n", "id": "7309021", "language": "Python", "matching_score": 0.9737212061882019, "max_stars_count": 0, "path": "src/mbf_genomics/genes/annotators.py" }, { "content": "from mbf_genomics.annotator import Annotator\nimport hashlib\nimport numpy as np\nimport pandas as pd\n\n\nclass NormalizationCPMBiotypes(Annotator):\n \"\"\"Tormalize to 1e6 by taking the sum of all [biotype, biotype2] genes.\n All other genes receive nan as their normalized value\"\"\"\n\n def __init__(self, raw_anno, biotypes):\n self.genome = raw_anno.genome\n if not isinstance(biotypes, tuple): # pragma: no cover\n raise ValueError(\"biotypes must be a tuple\")\n self.biotypes = biotypes\n self.raw_anno = raw_anno\n self.vid = raw_anno.vid\n self.normalize_to = 1e6\n self.aligned_lane = raw_anno.aligned_lane\n self.columns = [\n self.raw_anno.columns[0] + \" CPM(%s)\" % (\", \".join(sorted(biotypes)))\n ]\n self.cache_name = hashlib.md5(self.columns[0].encode(\"utf-8\")).hexdigest()\n self.column_properties = {\n self.columns[0]: {\n \"description\": \"Tag count inside protein coding (all if no protein coding transcripts) exons, normalized to 1e6 across genes in biotypes %s\"\n % (biotypes,)\n }\n }\n\n def dep_annos(self):\n return [self.raw_anno]\n\n def calc(self, df):\n raw_counts = df[self.raw_anno.columns[0]].copy()\n ok = np.zeros(len(df), np.bool)\n for biotype in self.biotypes:\n ok |= df[\"biotype\"] == biotype\n raw_counts[~ok] = np.nan\n total = float(raw_counts[ok].sum())\n result = raw_counts * (self.normalize_to / total)\n return pd.DataFrame({self.columns[0]: result})\n\n\nclass NormalizationTPMBiotypes(Annotator):\n \"\"\"TPM, but only consider genes matching one of the biotypes\n All other genes receive nan as their normalized value\"\"\"\n\n def __init__(self, raw_anno, biotypes):\n self.genome = raw_anno.genome\n if not isinstance(biotypes, tuple): # pragma: no cover\n raise ValueError(\"biotypes must be a tuple\")\n self.biotypes = biotypes\n self.raw_anno = raw_anno\n self.vid = raw_anno.vid\n self.normalize_to = 1e6\n self.aligned_lane = raw_anno.aligned_lane\n self.columns = [\n self.raw_anno.columns[0] + \" tpm(%s)\" % (\", \".join(sorted(biotypes)))\n ]\n self.cache_name = hashlib.md5(self.columns[0].encode(\"utf-8\")).hexdigest()\n self.column_properties = {\n self.columns[0]: {\n \"description\": \"transcripts per million, considering only biotypes %s\"\n % (biotypes,)\n }\n }\n\n def dep_annos(self):\n return [self.raw_anno]\n\n def calc(self, df):\n raw_counts = df[self.raw_anno.columns[0]].copy()\n ok = np.zeros(len(df), np.bool)\n for biotype in self.biotypes:\n ok |= df[\"biotype\"] == biotype\n raw_counts[~ok] = np.nan\n\n length_by_gene = self.raw_anno.interval_strategy.get_interval_lengths_by_gene(\n self.genome\n )\n result = np.zeros(raw_counts.shape, float)\n for ii, gene_stable_id in enumerate(df[\"gene_stable_id\"]):\n result[ii] = raw_counts[ii] / float(length_by_gene[gene_stable_id])\n total = float(result[ok].sum()) # result.sum would be nan!\n factor = self.normalize_to / total\n result = result * factor\n return pd.DataFrame({self.columns[0]: result})\n", "id": "3759316", "language": "Python", "matching_score": 4.36599063873291, "max_stars_count": 0, "path": "tests/old_reference_code.py" }, { "content": "\"\"\"New style (fast) tag count annos\n\nUse these for new projects.\n\n\"\"\"\nfrom mbf_genomics.annotator import Annotator\nfrom typing import Dict, List\nfrom pypipegraph import Job\nfrom mbf_genomics import DelayedDataFrame\nimport numpy as np\nimport pypipegraph as ppg\nimport hashlib\nimport pandas as pd\nimport mbf_r\nimport rpy2.robjects as ro\nimport rpy2.robjects.numpy2ri as numpy2ri\nfrom pathlib import Path\nfrom dppd import dppd\nimport dppd_plotnine # noqa:F401\nfrom mbf_qualitycontrol import register_qc, QCCollectingJob, qc_disabled\nfrom mbf_genomics.util import (\n parse_a_or_c_to_plot_name,\n parse_a_or_c_to_column,\n parse_a_or_c_to_anno,\n)\nfrom pandas import DataFrame\n\ndp, X = dppd()\n\n\n# ## Base classes and strategies - skip these if you just care about using TagCount annotators\nclass _CounterStrategyBase:\n cores_needed = 1\n\n def extract_lookup(self, data):\n \"\"\"Adapter for count strategies that have different outputs\n (e.g. one-hashmap-unstranded or two-hashmaps-one-forward-one-reversed)\n \"\"\"\n return data\n\n\nclass CounterStrategyStrandedRust(_CounterStrategyBase):\n cores_needed = -1\n name = \"stranded\"\n\n def __init__(self):\n self.disable_sanity_check = False\n\n def count_reads(\n self,\n interval_strategy,\n genome,\n bam_filename,\n bam_index_name,\n reverse=False,\n dump_matching_reads_filename=None,\n ):\n # bam_filename = bamfil\n\n intervals = interval_strategy._get_interval_tuples_by_chr(genome)\n gene_intervals = IntervalStrategyGene()._get_interval_tuples_by_chr(genome)\n from mbf_bam import count_reads_stranded\n\n if dump_matching_reads_filename:\n dump_matching_reads_filename = str(dump_matching_reads_filename)\n\n res = count_reads_stranded(\n bam_filename,\n bam_index_name,\n intervals,\n gene_intervals,\n matching_reads_output_bam_filename=dump_matching_reads_filename,\n )\n self.sanity_check(res, bam_filename)\n return res\n\n def sanity_check(self, foward_and_reverse, bam_filename):\n if self.disable_sanity_check:\n return\n error_count = 0\n forward, reverse = foward_and_reverse\n for gene_stable_id, forward_count in forward.items():\n reverse_count = reverse.get(gene_stable_id, 0)\n if (reverse_count > 100) and (reverse_count > forward_count * 1.1):\n error_count += 1\n if error_count > 0.1 * len(forward):\n raise ValueError(\n \"Found at least %.2f%% of genes to have a reverse read count (%s) \"\n \"above 110%% of the exon read count (and at least 100 tags). \"\n \"This indicates that this lane (%s) should have been reversed before alignment. \"\n \"Set reverse_reads=True on your Lane object\"\n % (\n 100.0 * error_count / len(forward),\n self.__class__.__name__,\n bam_filename,\n )\n )\n\n def extract_lookup(self, data):\n \"\"\"Adapter for count strategies that have different outputs\n (e.g. one-hashmap-unstranded or two-hashmaps-one-forward-one-reversed)\n \"\"\"\n return data[0]\n\n\nclass CounterStrategyUnstrandedRust(_CounterStrategyBase):\n cores_needed = -1\n name = \"unstranded\"\n\n def count_reads(\n self,\n interval_strategy,\n genome,\n bam_filename,\n bam_index_name,\n reverse=False,\n dump_matching_reads_filename=None,\n ):\n # bam_filename = bamfil\n if dump_matching_reads_filename:\n raise ValueError(\n \"dump_matching_reads_filename not supoprted on this Counter\"\n )\n\n intervals = interval_strategy._get_interval_tuples_by_chr(genome)\n gene_intervals = IntervalStrategyGene()._get_interval_tuples_by_chr(genome)\n # chr -> [gene_id, strand, [start], [stops]\n from mbf_bam import count_reads_unstranded\n\n res = count_reads_unstranded(\n bam_filename, bam_index_name, intervals, gene_intervals\n )\n return res\n\n\nclass _IntervalStrategy:\n def get_interval_lengths_by_gene(self, genome):\n by_chr = self._get_interval_tuples_by_chr(genome)\n length_by_gene = {}\n for chr, tups in by_chr.items():\n for tup in tups: # stable_id, strand, [starts], [stops]\n gene_stable_id = tup[0]\n length = 0\n for start, stop in zip(tup[2], tup[3]):\n length += stop - start\n length_by_gene[gene_stable_id] = length\n return length_by_gene\n\n def _get_interval_tuples_by_chr(self, genome): # pragma: no cover\n raise NotImplementedError()\n\n def get_deps(self):\n return []\n\n\nclass IntervalStrategyGenomicRegion(_IntervalStrategy):\n \"\"\"Used internally by _FastTagCounterGR\"\"\"\n\n def __init__(self, gr):\n self.gr = gr\n self.name = f\"GR_{gr.name}\"\n\n def _get_interval_tuples_by_chr(self, genome):\n result = {chr: [] for chr in genome.get_chromosome_lengths()}\n if self.gr.genome != genome: # pragma: no cover\n raise ValueError(\"Mismatched genomes\")\n df = self.gr.df\n if not \"strand\" in df.columns:\n df = df.assign(strand=1)\n df = df[[\"chr\", \"start\", \"stop\", \"strand\"]]\n if df.index.duplicated().any():\n raise ValueError(\"index must be unique\")\n for tup in df.itertuples():\n result[tup.chr].append((str(tup[0]), tup.strand, [tup.start], [tup.stop]))\n return result\n\n\nclass IntervalStrategyGene(_IntervalStrategy):\n \"\"\"Count from TSS to TES\"\"\"\n\n name = \"gene\"\n\n def _get_interval_tuples_by_chr(self, genome):\n result = {chr: [] for chr in genome.get_chromosome_lengths()}\n gene_info = genome.df_genes\n for tup in gene_info[[\"chr\", \"start\", \"stop\", \"strand\"]].itertuples():\n result[tup.chr].append((tup[0], tup.strand, [tup.start], [tup.stop]))\n return result\n\n\nclass IntervalStrategyExon(_IntervalStrategy):\n \"\"\"count all exons\"\"\"\n\n name = \"exon\"\n\n def _get_interval_tuples_by_chr(self, genome):\n result = {chr: [] for chr in genome.get_chromosome_lengths()}\n for gene in genome.genes.values():\n exons = gene.exons_merged\n result[gene.chr].append(\n (gene.gene_stable_id, gene.strand, list(exons[0]), list(exons[1]))\n )\n return result\n\n\nclass IntervalStrategyIntron(_IntervalStrategy):\n \"\"\"count all introns\"\"\"\n\n name = \"intron\"\n\n def _get_interval_tuples_by_chr(self, genome):\n result = {chr: [] for chr in genome.get_chromosome_lengths()}\n for gene in genome.genes.values():\n exons = gene.introns_strict\n result[gene.chr].append(\n (gene.gene_stable_id, gene.strand, list(exons[0]), list(exons[1]))\n )\n return result\n\n\nclass IntervalStrategyExonSmart(_IntervalStrategy):\n \"\"\"For protein coding genes: count only in exons of protein-coding transcripts.\n For other genes: count all exons\"\"\"\n\n name = \"exonsmart\"\n\n def _get_interval_tuples_by_chr(self, genome):\n result = {chr: [] for chr in genome.get_chromosome_lengths()}\n for g in genome.genes.values():\n e = g.exons_protein_coding_merged\n if len(e[0]) == 0:\n e = g.exons_merged\n result[g.chr].append((g.gene_stable_id, g.strand, list(e[0]), list(e[1])))\n return result\n\n\n# Now the actual tag count annotators\nclass TagCountCommonQC:\n def register_qc(self, genes):\n if not qc_disabled():\n self.register_qc_distribution(genes)\n self.register_qc_pca(genes)\n # self.register_qc_cummulative(genes)\n\n def register_qc_distribution(self, genes):\n output_filename = genes.result_dir / self.qc_folder / \"read_distribution.png\"\n output_filename.parent.mkdir(exist_ok=True)\n\n def plot(\n output_filename,\n elements,\n qc_distribution_scale_y_name=self.qc_distribution_scale_y_name,\n ):\n df = genes.df\n df = dp(df).select({x.aligned_lane.name: x.columns[0] for x in elements}).pd\n if len(df) == 0:\n df = pd.DataFrame({\"x\": [0], \"y\": [0], \"text\": \"no data\"})\n dp(df).p9().add_text(\"x\", \"y\", \"text\").render(output_filename).pd\n else:\n plot_df = dp(df).melt(var_name=\"sample\", value_name=\"count\").pd\n\n plot = dp(plot_df).p9().theme_bw()\n print(df)\n\n # df.to_pickle(output_filename + '.pickle')\n if ((df > 0).sum(axis=0) > 1).any() and len(df) > 1:\n # plot = plot.geom_violin(\n # dp.aes(x=\"sample\", y=\"count\"), width=0.5, bw=0.1\n # )\n pass # oh so slow as of 20201019\n if len(plot_df[\"sample\"].unique()) > 1:\n plot = plot.annotation_stripes(fill_range=True)\n if (plot_df[\"count\"] > 0).any():\n # can't have a log boxplot with all nans (log(0))\n plot = plot.scale_y_continuous(\n trans=\"log10\",\n name=qc_distribution_scale_y_name,\n breaks=[1, 10, 100, 1000, 10000, 100_000, 1e6, 1e7],\n )\n\n return (\n plot.add_boxplot(\n x=\"sample\", y=\"count\", _width=0.1, _fill=None, _color=\"blue\"\n )\n .turn_x_axis_labels()\n .title(\"Raw read distribution\")\n .hide_x_axis_title()\n .render_args(limitsize=False)\n .render(output_filename, width=0.2 * len(elements) + 1, height=4)\n )\n\n return register_qc(\n QCCollectingJob(output_filename, plot)\n .depends_on(genes.add_annotator(self))\n .add(self)\n )\n\n def register_qc_pca(self, genes):\n output_filename = genes.result_dir / self.qc_folder / \"pca.png\"\n\n def plot(output_filename, elements):\n import sklearn.decomposition as decom\n\n if len(elements) == 1:\n xy = np.array([[0], [0]]).transpose()\n title = \"PCA %s - fake / single sample\" % genes.name\n else:\n pca = decom.PCA(n_components=2, whiten=False)\n data = genes.df[[x.columns[0] for x in elements]]\n data -= data.min() # min max scaling 0..1\n data /= data.max()\n data = data[~pd.isnull(data).any(axis=1)] # can' do pca on NAN values\n if len(data):\n pca.fit(data.T)\n xy = pca.transform(data.T)\n title = \"PCA %s\\nExplained variance: x %.2f%%, y %.2f%%\" % (\n genes.name,\n pca.explained_variance_ratio_[0] * 100,\n pca.explained_variance_ratio_[1] * 100,\n )\n else:\n xy = np.array(\n [[0] * len(elements), [0] * len(elements)]\n ).transpose()\n title = \"PCA %s - fake / no rows\" % genes.name\n\n plot_df = pd.DataFrame(\n {\"x\": xy[:, 0], \"y\": xy[:, 1], \"label\": [x.plot_name for x in elements]}\n )\n print(plot_df)\n (\n dp(plot_df)\n .p9()\n .theme_bw()\n .add_scatter(\"x\", \"y\")\n .add_text(\n \"x\",\n \"y\",\n \"label\",\n # cool, this can go into an endless loop...\n # _adjust_text={\n # \"expand_points\": (2, 2),\n # \"arrowprops\": {\"arrowstyle\": \"->\", \"color\": \"red\"},\n # },\n )\n .scale_color_many_categories()\n .title(title)\n .render(output_filename, width=8, height=6)\n )\n\n return register_qc(\n QCCollectingJob(output_filename, plot)\n .depends_on(genes.add_annotator(self))\n .add(self)\n )\n\n\nclass _FastTagCounter(Annotator, TagCountCommonQC):\n def __init__(\n self,\n aligned_lane,\n count_strategy,\n interval_strategy,\n column_name,\n column_desc,\n dump_matching_reads_filename=None,\n ):\n if not hasattr(aligned_lane, \"get_bam\"):\n raise ValueError(\"_FastTagCounter only accepts aligned lanes!\")\n self.aligned_lane = aligned_lane\n self.genome = self.aligned_lane.genome\n self.count_strategy = count_strategy\n self.interval_strategy = interval_strategy\n self.columns = [(column_name % (self.aligned_lane.name,)).strip()]\n self.cache_name = (\n \"FT_%s_%s\" % (count_strategy.name, interval_strategy.name)\n + \"_\"\n + hashlib.md5(self.columns[0].encode(\"utf-8\")).hexdigest()\n )\n self.column_properties = {self.columns[0]: {\"description\": column_desc}}\n self.vid = aligned_lane.vid\n self.cores_needed = count_strategy.cores_needed\n self.plot_name = self.aligned_lane.name\n self.qc_folder = f\"{self.count_strategy.name}_{self.interval_strategy.name}\"\n self.qc_distribution_scale_y_name = \"raw counts\"\n self.dump_matching_reads_filename = dump_matching_reads_filename\n\n def calc(self, df):\n if ppg.inside_ppg():\n data = self._data\n else:\n data = self.calc_data()\n lookup = self.count_strategy.extract_lookup(data)\n result = []\n for gene_stable_id in df[\"gene_stable_id\"]:\n result.append(lookup.get(gene_stable_id, 0))\n result = np.array(result, dtype=np.float)\n return pd.Series(result)\n\n def deps(self, _genes):\n return [\n self.load_data(),\n ppg.ParameterInvariant(self.cache_name, self.dump_matching_reads_filename),\n ppg.FunctionInvariant(\n self.cache_name + \"_count_reads\",\n self.count_strategy.__class__.count_reads,\n )\n # todo: actually, this should be a declared file\n ]\n\n def calc_data(self):\n bam_file, bam_index_name = self.aligned_lane.get_bam_names()\n return self.count_strategy.count_reads(\n self.interval_strategy,\n self.genome,\n bam_file,\n bam_index_name,\n dump_matching_reads_filename=self.dump_matching_reads_filename,\n )\n\n def load_data(self):\n cf = Path(ppg.util.global_pipegraph.cache_folder) / \"FastTagCounters\"\n cf.mkdir(exist_ok=True)\n return (\n ppg.CachedAttributeLoadingJob(\n cf / self.cache_name, self, \"_data\", self.calc_data\n )\n .depends_on(self.aligned_lane.load())\n .use_cores(-1)\n )\n\n\nclass _FastTagCounterGR(Annotator):\n def __init__(self, aligned_lane, count_strategy, column_name, column_desc):\n if not hasattr(aligned_lane, \"get_bam\"):\n raise ValueError(\"_FastTagCounter only accepts aligned lanes!\")\n self.aligned_lane = aligned_lane\n self.genome = self.aligned_lane.genome\n self.count_strategy = count_strategy\n self.columns = [(column_name % (self.aligned_lane.name,)).strip()]\n self.cache_name = (\n \"FT_%s_%s\" % (count_strategy.name, \"on_gr\")\n + \"_\"\n + hashlib.md5(self.columns[0].encode(\"utf-8\")).hexdigest()\n )\n self.column_properties = {self.columns[0]: {\"description\": column_desc}}\n self.vid = aligned_lane.vid\n self.cores_needed = count_strategy.cores_needed\n self.plot_name = self.aligned_lane.name\n # self.qc_folder = f\"{self.count_strategy.name}_{self.interval_strategy.name}\"\n # self.qc_distribution_scale_y_name = \"raw counts\"\n\n def calc(self, df):\n if ppg.inside_ppg():\n data = self._data\n else:\n data = self.calc_data()\n lookup = self.count_strategy.extract_lookup(data)\n result = []\n for idx in df.index:\n result.append(lookup.get(str(idx), 0))\n result = np.array(result, dtype=np.float)\n return pd.Series(result)\n\n def deps(self, gr):\n return [self.load_data(gr)]\n\n def calc_data(self, gr):\n def inner():\n bam_file, bam_index_name = self.aligned_lane.get_bam_names()\n return self.count_strategy.count_reads(\n IntervalStrategyGenomicRegion(gr), self.genome, bam_file, bam_index_name\n )\n\n return inner\n\n def load_data(self, gr):\n cf = gr.cache_dir\n cf.mkdir(exist_ok=True)\n return (\n ppg.CachedAttributeLoadingJob(\n cf / self.cache_name, self, \"_data\", self.calc_data(gr)\n )\n .depends_on(self.aligned_lane.load())\n .depends_on(gr.load())\n .use_cores(-1) # should be count_strategy cores needed, no?\n )\n\n\n#\n# ## Raw tag count annos for analysis usage\n\n\nclass ExonSmartStrandedRust(_FastTagCounter):\n def __init__(self, aligned_lane, dump_matching_reads_filename=None):\n _FastTagCounter.__init__(\n self,\n aligned_lane,\n CounterStrategyStrandedRust(),\n IntervalStrategyExonSmart(),\n \"Exon, protein coding, stranded smart tag count %s\",\n \"Tag count inside exons of protein coding transcripts (all if no protein coding transcripts) exons, correct strand only\",\n dump_matching_reads_filename,\n )\n\n\nclass ExonSmartUnstrandedRust(_FastTagCounter):\n def __init__(self, aligned_lane):\n _FastTagCounter.__init__(\n self,\n aligned_lane,\n CounterStrategyUnstrandedRust(),\n IntervalStrategyExonSmart(),\n \"Exon, protein coding, unstranded smart tag count %s\",\n \"Tag count inside exons of protein coding transcripts (all if no protein coding transcripts) both strands\",\n )\n\n\nclass ExonStrandedRust(_FastTagCounter):\n def __init__(self, aligned_lane, dump_matching_reads_filename=None):\n _FastTagCounter.__init__(\n self,\n aligned_lane,\n CounterStrategyStrandedRust(),\n IntervalStrategyExon(),\n \"Exon, protein coding, stranded tag count %s\",\n \"Tag count inside exons of protein coding transcripts (all if no protein coding transcripts) exons, correct strand only\",\n dump_matching_reads_filename,\n )\n\n\nclass ExonUnstrandedRust(_FastTagCounter):\n def __init__(self, aligned_lane):\n _FastTagCounter.__init__(\n self,\n aligned_lane,\n CounterStrategyUnstrandedRust(),\n IntervalStrategyExon(),\n \"Exon, protein coding, unstranded tag count %s\",\n \"Tag count inside exons of protein coding transcripts (all if no protein coding transcripts) both strands\",\n )\n\n\nclass GeneStrandedRust(_FastTagCounter):\n def __init__(self, aligned_lane):\n _FastTagCounter.__init__(\n self,\n aligned_lane,\n CounterStrategyStrandedRust(),\n IntervalStrategyGene(),\n \"Gene, stranded tag count %s\",\n \"Tag count inside gene body (tss..tes), correct strand only\",\n )\n\n\nclass GeneUnstrandedRust(_FastTagCounter):\n def __init__(self, aligned_lane):\n _FastTagCounter.__init__(\n self,\n aligned_lane,\n CounterStrategyUnstrandedRust(),\n IntervalStrategyGene(),\n \"Gene unstranded tag count %s\",\n \"Tag count inside gene body (tss..tes), both strands\",\n )\n\n\ndef GRUnstrandedRust(aligned_lane):\n return _FastTagCounterGR(\n aligned_lane,\n CounterStrategyUnstrandedRust(),\n \"Tag count %s\",\n \"Tag count inside region, both strands\",\n )\n\n\ndef GRStrandedRust(aligned_lane):\n return _FastTagCounterGR(\n aligned_lane,\n CounterStrategyStrandedRust(),\n \"Tag count %s\",\n \"Tag count inside region, stranded\",\n )\n\n\n# we are keeping the python ones for now as reference implementations\nGeneUnstranded = GeneUnstrandedRust\nGeneStranded = GeneStrandedRust\nExonStranded = ExonStrandedRust\nExonUnstranded = ExonUnstrandedRust\nExonSmartStranded = ExonSmartStrandedRust\nExonSmartUnstranded = ExonSmartUnstrandedRust\n\n# ## Normalizing annotators - convert raw tag counts into something normalized\n\n\nclass _NormalizationAnno(Annotator, TagCountCommonQC):\n def __init__(self, base_column_spec):\n from ..util import parse_a_or_c_to_anno, parse_a_or_c_to_column\n\n self.raw_anno = parse_a_or_c_to_anno(base_column_spec)\n self.raw_column = parse_a_or_c_to_column(base_column_spec)\n if self.raw_anno is not None:\n self.genome = self.raw_anno.genome\n self.vid = getattr(self.raw_anno, \"vid\", None)\n self.aligned_lane = getattr(self.raw_anno, \"aligned_lane\", None)\n else:\n self.genome = None\n self.vid = None\n self.aligned_lane = None\n self.columns = [self.raw_column + \" \" + self.name]\n self.cache_name = (\n self.__class__.__name__\n + \"_\"\n + hashlib.md5(self.columns[0].encode(\"utf-8\")).hexdigest()\n )\n if self.raw_anno is not None:\n self.plot_name = getattr(self.raw_anno, \"plot_name\", self.raw_column)\n if hasattr(self.raw_anno, \"count_strategy\"):\n if hasattr(self.raw_anno, \"interval_strategy\"):\n iv_name = self.raw_anno.interval_strategy.name\n else:\n iv_name = \"-\"\n self.qc_folder = f\"normalized_{self.name}_{self.raw_anno.count_strategy.name}_{iv_name}\"\n else:\n self.qc_folder = f\"normalized_{self.name}\"\n else:\n self.plot_name = parse_a_or_c_to_plot_name(base_column_spec)\n self.qc_folder = f\"normalized_{self.name}\"\n self.qc_distribution_scale_y_name = self.name\n\n def dep_annos(self):\n if self.raw_anno is None:\n return []\n else:\n return [self.raw_anno]\n\n\nclass NormalizationCPM(_NormalizationAnno):\n \"\"\"Normalize to 1e6 by taking the sum of all genes\"\"\"\n\n def __init__(self, base_column_spec):\n self.name = \"CPM\"\n self.normalize_to = 1e6\n super().__init__(base_column_spec)\n self.column_properties = {\n self.columns[0]: {\n \"description\": \"Tag count inside protein coding (all if no protein coding transcripts) exons, normalized to 1e6 across all genes\"\n }\n }\n\n def calc(self, df):\n raw_counts = df[self.raw_column]\n total = max(1, float(raw_counts.sum())) # avoid division by 0\n result = raw_counts * (self.normalize_to / total)\n return pd.Series(result)\n\n\nclass NormalizationTPM(_NormalizationAnno):\n \"\"\"Normalize to transcripts per million, ie.\n count / length * (1e6 / (sum_i(count_/length_i)))\n\n \"\"\"\n\n def __init__(self, base_column_spec, interval_strategy=None):\n self.name = \"TPM\"\n self.normalize_to = 1e6\n super().__init__(base_column_spec)\n if self.raw_anno is None: # pragma: no cover\n if interval_strategy is None: # pragma: no cover\n raise ValueError(\n \"TPM normalization needs to know the intervals used. Either base of a FastTagCount annotator or pass in an interval strategy\"\n )\n self.interval_strategy = interval_strategy\n else:\n self.interval_strategy = self.raw_anno.interval_strategy\n self.column_properties = {\n self.columns[0]: {\"description\": \"transcripts per million\"}\n }\n\n def calc(self, df):\n raw_counts = df[self.raw_column]\n length_by_gene = self.interval_strategy.get_interval_lengths_by_gene(\n self.genome\n )\n result = np.zeros(raw_counts.shape, float)\n for ii, gene_stable_id in enumerate(df[\"gene_stable_id\"]):\n result[ii] = raw_counts.iloc[ii] / length_by_gene[gene_stable_id]\n total = float(result[~pd.isnull(result)].sum())\n factor = 1e6 / total\n result = result * factor\n return pd.DataFrame({self.columns[0]: result})\n\n\nclass NormalizationFPKM(Annotator):\n def __init__(self, raw_anno):\n raise NotImplementedError(\n \"FPKM is a bad thing to use. It is not supported by mbf\"\n )\n\n\nclass Salmon(Annotator):\n \"\"\"Add salmon gene level estimation calculated on a raw Sample\"\"\"\n\n def __init__(\n self,\n raw_lane,\n prefix=\"Salmon\",\n options={\n # \"--validateMappings\": None, this always get's set by aligners.Salmon\n \"--gcBias\": None,\n \"--seqBias\": None,\n },\n libtype=\"A\",\n accepted_biotypes=None, # set((\"protein_coding\", \"lincRNA\")),\n salmon_version=\"_last_used\",\n ):\n self.raw_lane = raw_lane\n self.options = options.copy()\n self.libtype = libtype\n self.accepted_biotypes = accepted_biotypes\n self.salmon_version = salmon_version\n self.columns = [\n f\"{prefix} TPM {raw_lane.name}\",\n f\"{prefix} NumReads {raw_lane.name}\",\n ]\n self.vid = self.raw_lane.vid\n\n def deps(self, ddf):\n import mbf_externals\n\n return mbf_externals.aligners.Salmon(\n self.accepted_biotypes, version=self.salmon_version\n ).run_quant_on_raw_lane(\n self.raw_lane, ddf.genome, self.libtype, self.options, gene_level=True\n )\n\n def calc_ddf(self, ddf):\n quant_path = Path(self.deps(ddf).job_id).parent / \"quant.genes.sf\"\n in_df = pd.read_csv(quant_path, sep=\"\\t\").set_index(\"Name\")[[\"TPM\", \"NumReads\"]]\n in_df.columns = self.columns\n res = in_df.reindex(ddf.df.gene_stable_id)\n res.index = ddf.df.index\n return res\n\n\nclass TMM(Annotator):\n \"\"\"\n Calculates the TMM normalization from edgeR on some raw counts.\n\n Returns log2-transformed cpms corrected by the TMM-estimated effective\n library sizes. In addition, batch correction using limma might be performed,\n if a dictionary indicatin the batches is given.\n\n Parameters\n ----------\n raw : Dict[str, Annotator]\n Dictionary of raw count annotator for all samples.\n dependencies : List[Job], optional\n List of additional dependencies, by default [].\n samples_to_group : Dict[str, str], optional\n A dictionary sample name to group name, by default None.\n batches. : Dict[str, str]\n Dictionary indicating batch effects.\n \"\"\"\n\n def __init__(\n self,\n raw: Dict[str, Annotator],\n dependencies: List[Job] = None,\n samples_to_group: Dict[str, str] = None,\n batches: Dict[str, str] = None,\n suffix: str = \"\",\n ):\n \"\"\"Constructor.\"\"\"\n self.sample_column_lookup = {}\n if batches is not None:\n for sample_name in raw:\n self.sample_column_lookup[\n parse_a_or_c_to_column(raw[sample_name])\n ] = f\"{sample_name}{suffix} TMM (batch removed)\"\n else:\n for sample_name in raw:\n self.sample_column_lookup[\n parse_a_or_c_to_column(raw[sample_name])\n ] = f\"{sample_name}{suffix} TMM\"\n self.columns = list(self.sample_column_lookup.values())\n self.dependencies = []\n if dependencies is not None:\n self.dependencies = dependencies\n self.raw = raw\n self.samples_to_group = samples_to_group\n self.cache_name = hashlib.md5(self.columns[0].encode(\"utf-8\")).hexdigest()\n self.batch = None\n if batches is not None:\n self.batch = [batches[sample_name] for sample_name in raw]\n\n def calc_ddf(self, ddf: DelayedDataFrame) -> DataFrame:\n \"\"\"\n Calculates TMM columns to be added to the ddf instance.\n\n TMM columns are calculated using edgeR with all samples given in self.raw.\n\n Parameters\n ----------\n ddf : DelayedDataFrame\n The DelayedDataFrame instance to be annotated.\n\n Returns\n -------\n DataFrame\n A dataframe containing TMM normalized columns for each\n \"\"\"\n raw_columns = [\n parse_a_or_c_to_column(self.raw[sample_name]) for sample_name in self.raw\n ]\n \n df = ddf.df[raw_columns]\n df_res = self.call_edgeR(df)\n assert (df_res.columns == df.columns).all()\n rename = {}\n before = df_res.columns.copy()\n for col in df_res.columns:\n rename[col] = self.sample_column_lookup[col]\n df_res = df_res.rename(columns=rename, errors='raise')\n if (df_res.columns == before).all():\n # there is a bug in pands 1.3.4 that prevents renaming\n # to work when multiindices / tuple named columns are involved\n # so we have to build it by hand, I suppose\n df_res = pd.DataFrame({v: df_res[k] for (k,v) in rename.items()})\n return df_res\n\n def call_edgeR(self, df_counts: DataFrame) -> DataFrame:\n \"\"\"\n Call to edgeR via r2py to get TMM (trimmed mean of M-values)\n normalization for raw counts.\n\n Prepare the edgeR input in python and call edgeR calcNormFactors via\n r2py. The TMM normalized values are returned in a DataFrame which\n is converted back to pandas DataFrame via r2py.\n\n Parameters\n ----------\n df_counts : DataFrame\n The dataframe containing the raw counts.\n\n Returns\n -------\n DataFrame\n A dataframe with TMM values (trimmed mean of M-values).\n \"\"\"\n ro.r(\"library(edgeR)\")\n ro.r(\"library(base)\")\n df_input = df_counts\n columns = df_input.columns\n to_df = {\"lib.size\": df_input.sum(axis=0).values}\n if self.samples_to_group is not None:\n to_df[\"group\"] = [\n self.samples_to_group[sample_name]\n for sample_name in self.samples_to_group\n ]\n if self.batch is not None:\n to_df[\"batch\"] = self.batch\n df_samples = pd.DataFrame(to_df)\n df_samples[\"lib.size\"] = df_samples[\"lib.size\"].astype(int)\n r_counts = mbf_r.convert_dataframe_to_r(df_input)\n r_samples = mbf_r.convert_dataframe_to_r(df_samples)\n y = ro.r(\"DGEList\")(\n counts=r_counts,\n samples=r_samples,\n )\n # apply TMM normalization\n y = ro.r(\"calcNormFactors\")(y) # default is TMM\n logtmm = ro.r(\n \"\"\"function(y){\n cpm(y, log=TRUE, prior.count=5)\n }\"\"\"\n )(\n y\n ) # apparently removeBatchEffects works better on log2-transformed values\n if self.batch is not None:\n batches = np.array(self.batch)\n batches = numpy2ri.py2rpy(batches)\n logtmm = ro.r(\n \"\"\"\n function(logtmm, batch) {\n tmm = removeBatchEffect(logtmm,batch=batch)\n }\n \"\"\"\n )(logtmm=logtmm, batch=batches)\n cpm = ro.r(\"data.frame\")(logtmm)\n df = mbf_r.convert_dataframe_from_r(cpm)\n df = df.reset_index(drop=True)\n df.columns = columns\n return df\n\n def deps(self, ddf) -> List[Job]:\n \"\"\"Return ppg.jobs\"\"\"\n return self.dependencies\n\n def dep_annos(self) -> List[Annotator]:\n \"\"\"Return other annotators\"\"\"\n return [parse_a_or_c_to_anno(x) for x in self.raw.values()]\n", "id": "1729979", "language": "Python", "matching_score": 6.305651664733887, "max_stars_count": 0, "path": "src/mbf_genomics/genes/anno_tag_counts.py" }, { "content": "from ..annotator import Annotator\nimport pandas as pd\n\n\nclass SummitBase(Annotator):\n pass\n\n\nclass SummitMiddle(SummitBase):\n \"\"\"Place a summit right in the center (ie. a fake summit\"\"\"\n\n columns = [\"summit middle\"]\n column_properties = {\n columns[0]: {\n \"description\": \"Fake summit, just the center of the region (given relative to start)\"\n }\n }\n\n def calc(self, df):\n res = []\n for dummy_idx, row in df.iterrows():\n res.append((row[\"stop\"] + row[\"start\"]) / 2 - row[\"start\"])\n return pd.Series(res)\n\n\n# from ..genes.anno_tag_counts import GRUnstrandedRust as TagCount\n# from ..genes.anno_tag_counts import GRStrandedRust as TagCountStranded\nfrom ..genes.anno_tag_counts import _NormalizationAnno\n\n\nclass NormalizationCPM(_NormalizationAnno):\n \"\"\"Normalize to 1e6 by taking the sum of all genes\"\"\"\n\n def __init__(self, base_column_spec):\n self.name = \"CPM(lane)\"\n self.normalize_to = 1e6\n super().__init__(base_column_spec)\n self.column_properties = {\n self.columns[0]: {\"description\": \"Tag count normalized to lane tag count\"}\n }\n\n def calc(self, df):\n raw_counts = df[self.raw_column]\n total = max(\n 1, sum((x.mapped for x in self.raw_anno.aligned_lane.get_bam().get_index_statistics()))\n )\n result = raw_counts * (self.normalize_to / total)\n return pd.Series(result)\n", "id": "8859577", "language": "Python", "matching_score": 0.5099238753318787, "max_stars_count": 0, "path": "src/mbf_genomics/regions/annotators.py" }, { "content": "import pandas as pd\nimport csv\nimport re\nimport collections\nfrom pathlib import Path\n\n\ndef fix_chromosome(c):\n c = c[3:]\n if c == \"M\":\n return \"MT\"\n if len(c) <= 2:\n return c\n if \"gl\" in c:\n return c[c.find(\"gl\") :].upper() + \".1\"\n return \"remove\"\n\n\ndef parse_attrs(attrs):\n attrs = re.findall('([^ ]+) \"([^\"]+)\";', attrs)\n return dict(attrs)\n\n\ndef format_attrs(attrs):\n return \" \".join(['%s \"%s\"; ' % x for x in attrs.items()])\n\n\nfor input_filename in Path(__file__).parent.glob(\"ribosomal_genes_*.gtf.gz\"):\n if \"full\" in input_filename.name:\n continue\n print(\"parsing\", input_filename)\n df = pd.read_csv(input_filename, header=None, sep=\"\\t\", comment=\"#\")\n df.columns = [\n \"chr\",\n \"source\", # constant\n \"kind\", # constant\n \"start\", # uscs database data is 0 based...\n \"stop\",\n \"score\",\n \"strand\",\n \"ignored\",\n \"annotation\",\n ]\n df[\"chr\"] = [fix_chromosome(x) for x in df[\"chr\"]]\n\n if (df.kind != \"exon\").any():\n raise ValueError(\"non exon found - unexpected\")\n # ok, this file is all exons, we need to create one gene, one transcript per row basicially,\n # no splicing here,\n result = []\n dedup = collections.Counter()\n for idx, row in df.iterrows():\n attrs = parse_attrs(row[\"annotation\"])\n gene_id = attrs[\"gene_id\"] + \"_\" + str(dedup[attrs[\"gene_id\"]])\n dedup[attrs[\"gene_id\"]] += 1\n gene_name = gene_id\n biotype = \"tRNA\" if \"tRNA\" in gene_id else \"rRNA\"\n attrs = {\n \"gene_id\": gene_id,\n \"gene_name\": gene_name,\n \"gene_source\": \"ucsc_rmsk\",\n \"gene_biotype\": biotype,\n }\n gene = {\n \"chr\": row[\"chr\"],\n \"source\": \"ucsc_rmsk\",\n \"kind\": \"gene\",\n \"start\": row[\"start\"],\n \"stop\": row[\"start\"],\n \"score\": row[\"score\"],\n \"strand\": row[\"strand\"],\n \"ignored\": \".\",\n \"annotation\": format_attrs(attrs),\n }\n result.append(gene)\n\n attrs.update(\n {\n \"transcript_id\": gene_id + \"_tr\",\n \"transcript_name\": gene_id,\n \"transcript_source\": \"ucsc_rmsk\",\n \"transcript_biotype\": biotype,\n }\n )\n transcript = {\n \"chr\": row[\"chr\"],\n \"source\": \"ucsc_rmsk\",\n \"kind\": \"transcript\",\n \"start\": row[\"start\"],\n \"stop\": row[\"start\"],\n \"score\": row[\"score\"],\n \"strand\": row[\"strand\"],\n \"ignored\": \".\",\n \"annotation\": format_attrs(attrs),\n }\n result.append(transcript)\n\n attrs.update({\"exon_id\": gene_id + \"_exon\"})\n exon = {\n \"chr\": row[\"chr\"],\n \"source\": \"ucsc_rmsk\",\n \"kind\": \"trarnscript\",\n \"start\": row[\"start\"],\n \"stop\": row[\"start\"],\n \"score\": row[\"score\"],\n \"strand\": row[\"strand\"],\n \"ignored\": \".\",\n \"annotation\": format_attrs(attrs),\n }\n result.append(exon)\n output_df = pd.DataFrame(result)[df.columns]\n output_df.to_csv(\n input_filename.with_name(input_filename.name + \".full.gtf.gz\"),\n sep=\"\\t\",\n header=False,\n quoting=csv.QUOTE_NONE,\n index=False,\n )\n", "id": "10189716", "language": "Python", "matching_score": 1.548567295074463, "max_stars_count": 0, "path": "data/polish_ribosomal.py" }, { "content": "import numpy as np\nimport os\nimport pandas as pd\nimport io\n\nfrom .util import open_file\n\n\ndef wiggle_to_intervals(\n filenameOrFileObject, comment_char=None, chromosome_mangler=None\n):\n \"\"\"read a (variable/fixed step) wiggle file and turn it into a df of {chr, start,stop, score}.\n \"\"\"\n fo = open_file(filenameOrFileObject)\n d = fo.read()\n modes = set()\n if b\"variableStep\" in d:\n modes.add(\"variableStep\")\n if b\"fixedStep\" in d:\n modes.add(\"fixedStep\")\n if b\"Bed mode\" in d:\n modes.add(\"bed\")\n if len(modes) > 1:\n fo.seek(0, os.SEEK_SET)\n return wiggle_to_intervals_slow(\n filenameOrFileObject, comment_char, chromosome_mangler\n )\n lines = np.array(d.strip().split(b\"\\n\"))\n del d\n if comment_char:\n lines = lines[\n ~np.char.startswith(lines, \"track\")\n & ~np.char.startswith(lines, comment_char)\n ]\n else:\n lines = lines[~np.char.startswith(lines, b\"track\")]\n if len(modes) == 0:\n if len(lines[0].split()) == 4:\n modes.add(\"bed\")\n if len(modes) == 0:\n raise ValueError(\n \"Did not know how to handle %s, no mode found\" % filenameOrFileObject\n )\n mode = list(modes)[0]\n if chromosome_mangler is None:\n chromosome_mangler = lambda x: x # NOQA\n if mode == \"bed\":\n s = io.BytesIO(b\"\\n\".join(lines))\n recarray = np.loadtxt(\n s,\n dtype=[\n (\"chr\", \"|S50\"),\n (\"start\", np.int32),\n (\"stop\", np.uint32),\n (\"score\", np.float),\n ],\n )\n res = pd.DataFrame(\n {\n \"chr\": [chromosome_mangler(x) for x in recarray[\"chr\"]],\n \"start\": recarray[\"start\"],\n \"stop\": recarray[\"stop\"],\n \"score\": recarray[\"score\"],\n }\n )\n return res\n elif mode == \"variableStep\":\n borders = np.char.startswith(lines, \"variableStep\")\n # n umpy.array([x.startswith('variableStep') for x in lines],\n # dtype=np.bool)\n border_offsets = np.where(borders)[0]\n dfs_by_chr = []\n for ii in range(0, len(border_offsets) - 1):\n start = border_offsets[ii]\n stop = border_offsets[ii + 1]\n data_lines = lines[start + 1 : stop]\n row = lines[start]\n chr = row[row.find(b\"chrom=\") + len(b\"chrom=\") :]\n chr = chr[: chr.find(\" \")]\n chr = chromosome_mangler(chr)\n if row.find(b\"span\") != -1:\n span = int(row[row.find(b\"span=\") + len(b\"span=\") :])\n else:\n span = 1\n just_one_row = len(data_lines) == 1\n data_stream = io.StringIO(\"\\n\".join(data_lines) + \"\\n\")\n data_lines = np.loadtxt(data_stream)\n if just_one_row:\n data_lines = np.array([data_lines])\n try:\n starts = np.array(data_lines[:, 0], dtype=np.int)\n except IndexError:\n print(row)\n print(data_lines)\n print(data_lines.value)\n raise\n stops = starts + span\n scores = data_lines[:, 1]\n chrs = np.array([chr] * len(scores), dtype=np.object)\n dfs_by_chr.append(\n pd.DataFrame(\n {\"chr\": chrs, \"start\": starts, \"stop\": stops, \"score\": scores}\n )\n )\n # .sort_by(('chr','start'), [True, True]) # we rely on the wiggle being sorte.d..\n return pd.concat(dfs_by_chr)\n elif mode == \"fixedStep\":\n borders = np.char.startswith(lines, \"fixedStep\")\n # borders = np.array([x.startswith('fixedStep') for x in lines], dtype=np.bool)\n border_offsets = np.where(borders)[0]\n dfs_by_chr = []\n for ii in range(0, len(border_offsets) - 1):\n data_lines = lines[start + 1 : stop]\n # parse header line...\n row = lines[start]\n chr = row[row.find(b\"chrom=\") + len(b\"chrom=\") :]\n chr = chr[: chr.find(\" \")]\n chr = chromosome_mangler(chr)\n if row.find(b\"span\") != -1:\n span = int(row[row.find(b\"span=\") + len(b\"span=\") :])\n else:\n raise ValueError(\"No span in fixed step?!\")\n if row.find(b\"start\") != -1:\n start = row[row.find(b\"start=\") + len(b\"start=\") :]\n start = int(start[: start.find(\" \")])\n else:\n raise ValueError(\"No start in fixed step?!\")\n if row.find(b\"step\") != -1:\n step = row[row.find(b\"step=\") + len(b\"step=\") :]\n step = int(step[: step.find(\" \")])\n else:\n raise ValueError(\"No step in fixed step?!\")\n if step != span:\n raise ValueError(\n \"Parser currently only supports step == span, and they were not equal\"\n )\n data_lines = io.StringIO(\"\\n\".join(data_lines))\n data_lines = np.loadtxt(data_lines)\n scores = data_lines\n starts = np.array([start + step * ii for ii in range(0, len(scores))])\n stops = stops + span\n chrs = [chr] * len(scores)\n dfs_by_chr.append(\n pd.DataFrame(\n {\"chr\": chrs, \"start\": starts, \"stop\": stops, \"score\": scores}\n )\n )\n return pd.concat(dfs_by_chr)\n else:\n raise NotImplementedError(\"Unknown Wiggle mode\")\n\n\ndef wiggle_to_intervals_slow(\n filenameOrFileObject, comment_char=None, chromosome_mangler=None\n):\n fo = open_file(filenameOrFileObject)\n result = {}\n mode = None\n chr = None\n span = None\n start = None\n stop = None\n lastScore = None\n data = {\"start\": [], \"stop\": [], \"score\": [], \"chr\": []}\n if chromosome_mangler is None:\n chromosome_mangler = lambda x: x # NOQA\n bytes_so_far = 0\n for row in fo:\n bytes_so_far += len(row)\n if (comment_char and row.startswith(comment_char)) or row.startswith(\"track\"):\n continue\n elif row.startswith(\"variableStep\"):\n mode = \"variable\"\n chr = row[row.find(\"chrom=\") + len(\"chrom=\") :]\n chr = chr[: chr.find(\" \")]\n if chr not in result:\n result[chr] = []\n if row.find(\"span\") != -1:\n span = row[row.find(\"span=\") + len(\"span=\") :]\n span = int(span)\n else:\n span = 1\n start = None\n stop = None\n lastScore = None\n continue\n elif row.startswith(\"fixedStep\"):\n mode = \"fixed\"\n chr = row[row.find(\"chrom=\") + len(\"chrom=\") :]\n chr = chr[: chr.find(\" \")]\n if chr not in result:\n result[chr] = []\n if row.find(\"span\") != -1:\n span = row[row.find(\"span=\") + len(\"span=\") :]\n span = int(span)\n else:\n raise ValueError(\"No span in fixed step?!\")\n if row.find(\"start\") != -1:\n start = row[row.find(\"start=\") + len(\"start=\") :]\n start = start[: start.find(\" \")]\n start = int(start)\n else:\n raise ValueError(\"No start in fixed step?!\")\n if row.find(\"step\") != -1:\n step = row[row.find(\"step=\") + len(\"step=\") :]\n step = step[: step.find(\" \")]\n step = int(step)\n else:\n raise ValueError(\"No step in fixed step?!\")\n if step != span:\n raise ValueError(\n \"Parser currently only supports step == span, and they were not equal\"\n )\n lastScore = None\n stop = start\n continue\n\n elif \"Bed format\" in row:\n mode = \"bed\"\n return _wiggle_with_bed_to_intervals(fo, \"\\t\")\n if (\n not mode\n ): # no mode set so far, perhaps set one now. Not 'continue', we need this row!\n if len(row.split(\"\\t\")) == 4:\n mode = \"defined\"\n # TODO: replace with dataframe csv reading in this case, should\n # be a bit faster\n # because the iterator reads in blocks...\n fo.seek(bytes_so_far - len(row), os.SEEK_SET)\n df = _wiggle_with_bed_to_intervals(fo, \"\\t\")\n df.convert_type(\"chr\", chromosome_mangler)\n return df\n elif len(row.split()) == 4:\n fo.seek(bytes_so_far - len(row), os.SEEK_SET)\n df = _wiggle_with_bed_to_intervals(fo, None)\n df.convert_type(\"chr\", chromosome_mangler)\n return df\n else:\n raise ValueError(\n \"no step mode definied in file. add variablestep/fixedstep to %s\"\n % fo.name\n )\n if mode == \"variable\":\n row = row.split(\"\\t\")\n pos = int(row[0])\n score = float(row[1])\n if score != lastScore:\n if start is not None:\n data[\"chr\"].append(chromosome_mangler(chr))\n data[\"start\"].append(start)\n data[\"stop\"].append(stop)\n data[\"score\"].append(lastScore)\n lastScore = score\n start = pos\n stop = pos + span\n else:\n stop = pos + span\n elif mode == \"fixed\":\n score = float(row.strip())\n if score != lastScore:\n if lastScore:\n data[\"chr\"].append(chromosome_mangler(chr))\n data[\"start\"].append(start)\n data[\"stop\"].append(stop)\n data[\"score\"].append(lastScore)\n start = stop + 1\n lastScore = score\n stop += step\n elif mode == \"defined\":\n row = row.split(\"\\t\")\n chr = row[0]\n start = int(row[1])\n stop = int(row[2])\n score = float(row[3])\n data[\"chr\"].append(chr)\n data[\"start\"].append(start)\n data[\"stop\"].append(stop)\n data[\"score\"].append(score)\n if mode == \"variable\" or mode == \"fixed\":\n data[\"chr\"].append(chromosome_mangler(chr))\n data[\"start\"].append(start)\n data[\"stop\"].append(stop)\n data[\"score\"].append(lastScore)\n return pd.DataFrame(data)\n\n\ndef _wiggle_with_bed_to_intervals(file_handle, separator):\n print(\"calling _wiggle_with_bed_to_intervals\")\n res = pd.read_csv.read(file_handle, header=None, sep=separator)\n res.rename_column(\"column0\", \"chr\")\n res.rename_column(\"column1\", \"start\")\n res.rename_column(\"column2\", \"stop\")\n res.rename_column(\"column3\", \"score\")\n return res\n\n pass\n", "id": "2455087", "language": "Python", "matching_score": 2.9878458976745605, "max_stars_count": 0, "path": "src/mbf_fileformats/wiggle.py" }, { "content": "\"\"\"sequence/chipseq formats\"\"\"\nimport io\nfrom .util import open_file, chunkify\nimport numpy\nimport pandas as pd\nimport os\nimport tempfile\nimport subprocess\n\n\ndef normalize_strand(x):\n if x == 1 or x == 0 or x == -1:\n return x\n if x == \"+\":\n return 1\n elif x == \"-\":\n return -1\n else:\n return 0\n\n\nclass BedEntry(object):\n __slots__ = [\"refseq\", \"position\", \"length\", \"strand\", \"score\", \"name\"]\n\n def __init__(self, chr, chrStart, chrEnd, name=None, strand=None, score=None):\n self.refseq = chr\n pos = int(chrStart)\n self.position = pos\n self.length = int(chrEnd) - pos\n if strand:\n self.strand = normalize_strand(strand)\n else:\n self.strand = normalize_strand(self.length > 0)\n self.score = numpy.nan\n if name is None:\n name = \"Noname\"\n self.name = name\n if score is not None:\n self.score = score\n\n def get_read_length(self):\n return self.length\n\n def __len__(self):\n return self.length\n\n def __repr__(self):\n return \"BedEntry(%s, %i, %i)\" % (\n repr(self.refseq),\n self.position,\n self.position + self.length,\n )\n\n def __str__(self):\n print(\n \"Bed Entry chr=%s, start=%i, length=%i, strand=%s, score=%s, name=%s\"\n % (\n self.refseq,\n self.position,\n self.length,\n self.strand,\n self.score,\n self.name,\n )\n )\n\n\ndef read_bed(filenameOrFileObject, report_progress=False):\n res = []\n for e in read_bed_iterator(filenameOrFileObject, report_progress):\n res.append(e)\n return res\n\n\ndef read_bed_iterator(filenameOrFileObject, report_progress=False):\n fo = open_file(filenameOrFileObject, \"rb\")\n for row in chunkify(fo, b\"\\n\"):\n if row.startswith(b\"track\"):\n trackInfo = row\n elif row.startswith(b\"#\"): # not really a comment character...\n continue\n else:\n try:\n if not row:\n continue\n row = row.split(b\"\\t\")\n e = BedEntry(row[0], row[1], row[2]) # bed does start at 0\n try:\n e.name = row[3]\n e.score = float(row[4])\n except IndexError:\n pass\n except ValueError:\n pass\n try:\n e.strand = normalize_strand(row[5])\n except IndexError:\n pass\n yield e\n except Exception as e:\n raise ValueError(\"Could not parse row: %s\" % row)\n\n\ndef write_bed_header(file_handle, track_name):\n file_handle.write(\n ('track name=\"%s\" description=\"\" useScore=0\\n' % track_name).encode(\"utf-8\")\n )\n\n\ndef write_bed_entry_short(file_handle, entry, name_to_chromosome_lookup_or_none=None):\n if name_to_chromosome_lookup_or_none:\n chr = name_to_chromosome_lookup_or_none[entry.refseq]\n else:\n chr = entry.refseq\n out = [\n chr, # chromosome\n entry.position, # start\n entry.position + len(entry), # end\n ]\n file_handle.write(\"\\t\".join((str(x) for x in out)) + \"\\n\")\n\n\ndef write_bed_entry_long(file_handle, entry, name_to_chromosome_lookup_or_none=None):\n if name_to_chromosome_lookup_or_none:\n chr = name_to_chromosome_lookup_or_none[entry.refseq]\n else:\n chr = entry.refseq\n out = [\n chr, # chromosome\n entry.position, # start\n entry.position + len(entry), # end\n entry.name,\n \".\" if numpy.isnan(entry.score) else entry.score,\n \"+\" if entry.strand == 1 else \"-\" if entry.strand == -1 else \".\",\n entry.position, # this start\n entry.position + len(entry),\n ]\n file_handle.write(b\"\\t\".join((str(x).encode(\"utf-8\") for x in out)) + b\"\\n\")\n\n\ndef write_bed(\n filenameOrFileObject,\n reads,\n name_to_chromosome_lookup_or_none,\n track_name,\n include_header=True,\n minimal=False,\n):\n fo = open_file(filenameOrFileObject, \"wb\")\n if include_header:\n write_bed_header(fo, track_name)\n if minimal:\n write_method = write_bed_entry_short\n else:\n write_method = write_bed_entry_long\n for read in reads:\n write_method(fo, read, name_to_chromosome_lookup_or_none)\n if fo != filenameOrFileObject:\n fo.close()\n\n\ndef bed_to_bigbed(\n input_bed_filename, output_filename, chromosome_lengths, already_sorted=False\n):\n \"\"\"Convert an existing bed file into bigbed. Chromosome lengths is a dictionary\"\"\"\n from mbf_externals.kent import BedToBigBed\n\n algo = BedToBigBed()\n algo.store.unpack_version(algo.name, algo.version)\n\n chrom_sizes_file = tempfile.NamedTemporaryFile(suffix=\".sizes\")\n for chr, length in sorted(chromosome_lengths.items()):\n chrom_sizes_file.write((\"%s\\t%i\\n\" % (chr, length)).encode(\"utf-8\"))\n chrom_sizes_file.flush()\n\n input = open(input_bed_filename, \"rb\")\n if not already_sorted:\n missing_chroms = set()\n out = list()\n chrom_lenghts = chromosome_lengths\n for line in input:\n line = line.strip()\n if line and not line.startswith(b\"#\") and not line.startswith(b\"track\"):\n line = line.split(b\"\\t\")\n try:\n chr = line[0].decode(\"utf-8\")\n start = min(max(0, int(line[1])), chrom_lenghts[chr] - 1)\n stop = min(int(line[2]), chrom_lenghts[chr] - 1)\n strand = None\n name = None\n try:\n name = line[3].decode(\"utf-8\")\n strand = line[5].decode(\"utf-8\")\n except IndexError:\n if name is None:\n name = \".\"\n if strand is None:\n strand = \".\"\n out.append(\n (chr, min(start, stop), max(start, stop), name, 0, strand)\n )\n except KeyError:\n missing_chroms.add(chr)\n if missing_chroms:\n print(\"could not find the following chromosomes %s\" % (missing_chroms,))\n out.sort()\n inputtf = tempfile.NamedTemporaryFile()\n for chr, start, stop, name, score, strand in out:\n inputtf.write(\n (\n \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (chr, start, stop, name, score, strand)\n ).encode(\"utf-8\")\n )\n inputtf.flush()\n inputtf.seek(0, 0)\n input.close()\n else:\n inputtf = input\n in_name = os.path.abspath(inputtf.name)\n cmd = [\n str(algo.path / \"bedToBigBed\"),\n \"-tab\",\n in_name,\n os.path.abspath(chrom_sizes_file.name),\n os.path.abspath(output_filename),\n \"-type=bed6\",\n ]\n print(cmd)\n p = subprocess.Popen(cmd)\n p.communicate()\n inputtf.close()\n chrom_sizes_file.close()\n if p.returncode != 0:\n raise ValueError(\"bedToBigBed returned an error code\")\n\n\ndef write_bigbed(\n input_dataframe_or_list_of_bed_entries, output_filename, chromosome_lengths\n):\n \"\"\"Take either a list of BedEntry objects, a dataframe with the slot names of BedEntry objects, or\n a DataFrame with {chr, start, stop, strand, score, name}.\n Either way, strand needs to be one of -1,0,1\n\n \"\"\"\n if not isinstance(input_dataframe_or_list_of_bed_entries, pd.DataFrame):\n r = {}\n for column in [\"refseq\", \"position\", \"length\", \"strand\", \"score\", \"name\"]:\n r[column] = []\n for e in input_dataframe_or_list_of_bed_entries:\n r[column].append(getattr(e, column))\n df = pd.DataFrame(r)\n else:\n df = input_dataframe_or_list_of_bed_entries\n\n if \"chr\" in df.columns and \"start\" in df.columns and \"stop\" in df.columns:\n df = df.sort_values([\"chr\", \"start\"], ascending=[True, True])\n elif \"refseq\" in df.columns and \"position\" in df.columns and \"length\" in df.columns:\n df = df.rename(columns={\"refseq\": \"chr\", \"position\": \"start\"})\n df = df.assign(stop=df[\"start\"] + df[\"length\"])\n df = df.sort_values([\"chr\", \"start\"], ascending=[True, True])\n else:\n raise ValueError(\n \"This dataframe did not contain the necessary bed columns (either (chr, start, stop) or (refseq, position, length)\"\n )\n if \"strand\" not in df.columns:\n df = df.assign(\"strand\", 0)\n if \"name\" not in df.columns:\n df = df.assign(\"name\", \"Noname\")\n if \"score\" not in df.columns:\n df = df.assign(\"score\", numpy.nan)\n of = tempfile.NamedTemporaryFile(suffix=\".bed\")\n for dummy_idx, row in df.iterrows():\n output = \"%s\\t%i\\t%i\\t%s\\t%s\\t%s\\n\" % (\n row[\"chr\"],\n row[\"start\"],\n row[\"stop\"],\n row[\"name\"],\n (\"%f\" % (row[\"score\"],)) if (row[\"score\"] is not None) else \".\",\n (\"+\" if row[\"strand\"] == 1 else \"-\" if row[\"strand\"] == -1 else \"+\"),\n )\n output = output.encode(\"utf-8\")\n of.write(output)\n of.flush()\n bed_to_bigbed(of.name, output_filename, chromosome_lengths)\n of.close()\n\n\ndef read_bigbed(filename, chromosome_lengths, chromosome_mangler=lambda x: x):\n import pyBigWig\n\n bb = pyBigWig.open(filename)\n chr_lengths = chromosome_lengths\n data = {\"chr\": [], \"start\": [], \"stop\": [], \"strand\": [], \"name\": []}\n for chr in chr_lengths:\n it = bb.entries(chromosome_mangler(chr), 0, chr_lengths[chr])\n if it is None: # no such chromosome. Tolerable if it's a contig or such.\n # If none of the chromosome names match,\n # we raise later because of an empty big file.\n continue\n for entry in it:\n data[\"chr\"].append(chr)\n data[\"start\"].append(entry[0])\n data[\"stop\"].append(entry[1])\n more = entry[2].split(\"\\t\")\n strand = more[2]\n data[\"strand\"].append(1 if strand == \"+\" else -1 if strand == \"-\" else 0)\n data[\"name\"].append(more[0])\n bb.close()\n return pd.DataFrame(data)\n", "id": "6675119", "language": "Python", "matching_score": 2.0798184871673584, "max_stars_count": 0, "path": "src/mbf_fileformats/bed.py" }, { "content": "from mbf_externals.util import to_bytes\r\nfrom .util import open_file\r\nfrom .bed import normalize_strand, read_bed\r\n\r\n\r\ndef _mapGFF(row):\r\n res = {\r\n 'seqname': row[0],\r\n 'source': row[1],\r\n 'feature': row[2],\r\n 'start': int(row[3]) if row[3] != '.' else 0,\r\n 'end': int(row[4]),\r\n 'score': row[5],\r\n 'strand': normalize_strand(row[6]),\r\n 'frame': row[7],\r\n 'attributes': {},\r\n 'comment': ''\r\n }\r\n attributes = row[8]\r\n comment = ''\r\n if attributes.find(b'#') != -1:\r\n comment = attributes[attributes.find(b'#') + 1:]\r\n attributes = attributes[:attributes.find(b'#')]\r\n res['comment'] = comment\r\n for x in attributes.split(b';'):\r\n if x.strip():\r\n if b'=' in x:\r\n x = x.split(b\"=\")\r\n else:\r\n x = x.split()\r\n res['attributes'][x[0].decode('utf-8')] = [y.decode('utf-8') for y in x[1:]]\r\n return res\r\n\r\n\r\ndef gffToDict(filename, comment_char=None):\r\n comment_char = to_bytes(comment_char)\r\n o = open_file(filename)\r\n rows = o.readlines()\r\n rows = (x.strip() for x in rows)\r\n rows = (x.split(b\"\\t\") for x in rows if x and (comment_char is None or x[0] != comment_char[0]))\r\n res = [_mapGFF(x) for x in rows]\r\n return res\r\n\r\n\r\ndef dictsToGFF(gffDicts, filename):\r\n rows = []\r\n for entry in gffDicts:\r\n if 'score' in entry:\r\n score = entry['score']\r\n else:\r\n score = '.'\r\n if 'attributes' in entry:\r\n attributes = entry['attributes']\r\n if isinstance(attributes, dict):\r\n attributes_str = []\r\n for key, value in attributes.items():\r\n attributes_str.append(\"%s=%s\" % (key, value))\r\n attributes_str = \";\".join(attributes_str)\r\n else:\r\n attributes_str = ''\r\n if entry['strand'] not in ('+', '-', '.'):\r\n raise ValueError(\"invalid strand: %s\" % entry['strand'])\r\n try:\r\n entry['frame'] = int(entry['frame'])\r\n if entry['frame'] not in (0, 1, 2):\r\n raise ValueError()\r\n except ValueError:\r\n raise ValueError(\"invalid frame: %s \" % entry['frame'])\r\n row = \"\\t\".join((entry['seqname'], entry['source'], entry['feature'], str(int(entry['start'])), str(int(entry['end'])),\r\n str(score), entry['strand'], str(entry['frame']), attributes_str))\r\n rows.append(row)\r\n o = open(filename, 'wb')\r\n o.write(\"\\n\".join(rows))\r\n o.close()\r\n\r\n\r\nclass GFF3:\r\n\r\n def escape(self, str):\r\n if str is None:\r\n return '.'\r\n escape = '\"\\t\\n\\r=;'\r\n for k in escape:\r\n str = str.replace(k, '%' + '%2x' % ord(k))\r\n return str\r\n\r\n def format_attributes(self, attributes):\r\n if attributes is None:\r\n return '.'\r\n if isinstance(attributes, dict):\r\n attributes = list(attributes.items())\r\n valid_attributes = [ 'Name', 'Alias', 'Parent', 'Target', 'Gap', 'Derives_from', 'Note', 'Dbxref']\r\n res = []\r\n for id, value in attributes:\r\n # if not id in valid_attributes and id != id.lower(): #lower case names are not reserved\r\n # raise ValueError(\"Not a valid tag: %s\" % id)\r\n res.append('%s=%s' % (self.escape(id), self.escape(value)))\r\n return \";\".join(res)\r\n\r\n\r\n def dump_row(self, file_handle, seqid = None, source = None, type = None, start = None, end = None, score = None, strand = None, phase = None, attributes = None):\r\n file_handle.write(\"\\t\".join((\r\n self.escape(seqid),\r\n self.escape(source),\r\n self.escape(type),\r\n self.escape(start),\r\n self.escape(end),\r\n self.escape(score),\r\n self.escape(strand),\r\n self.escape(phase),\r\n self.format_attributes(attributes))))\r\n file_handle.write(\"\\n\")\r\n\r\n\r\ndef bed_to_gff(input_filename_or_handle, output_filename_or_handle, feature_type, source = None, show_progress= False):\r\n entries = read_bed(input_filename_or_handle, report_progress = show_progress)\r\n output_file_handle = open_file(output_filename_or_handle, 'wb')\r\n gff = GFF3()\r\n for e in entries:\r\n gff.dump_row(output_file_handle, e.refseq, source=source, type=feature_type, start = str(e.position), end = str(e.position + e.length), strand='.')\r\n output_file_handle.close()\r\n", "id": "12519863", "language": "Python", "matching_score": 0.5982263684272766, "max_stars_count": 0, "path": "src/mbf_fileformats/gff.py" }, { "content": "import networkx\nfrom pathlib import Path\nimport sys\nimport subprocess\n\n\ndef browse_graph_ml(filename=None):\n graph = networkx.readwrite.graphml.read_graphml(filename)\n\n header = \"choose a node by typing. Ctrl-c to quit\"\n p = subprocess.Popen(\n [\"fzf\", \"--header\", header], stdin=subprocess.PIPE, stdout=subprocess.PIPE\n )\n chosen = p.communicate(\"\\n\".join(graph.nodes).encode(\"utf-8\"))[0].decode(\"utf-8\")\n stack = [chosen]\n\n if chosen:\n while True:\n if chosen.startswith(\"parent: \") or chosen.startswith(\"child: \"):\n chosen = chosen[chosen.find(\" \") + 1 :]\n chosen = chosen.strip()\n lines = []\n for child in reversed(sorted(graph.successors(chosen))):\n lines.append(f\"child: {child}\")\n lines.append(\"\")\n for parent in reversed(sorted(graph.predecessors(chosen))):\n lines.append(f\"parent: {parent}\")\n lines = \"\\n\".join(lines)\n header = f\"Examining: {chosen}\"\n p = subprocess.Popen(\n [\"fzf\", \"--header\", header],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n )\n chosen = p.communicate(lines.encode(\"utf-8\"))[0].decode(\"utf-8\")\n if not chosen:\n if len(stack) > 1:\n chosen = stack.pop()\n chosen = stack.pop()\n else:\n break\n else:\n if not stack or stack[-1] != chosen:\n stack.append(chosen)\n\n\ndef cli_browse_graph_ml():\n try:\n filename = sys.argv[1]\n except IndexError:\n print(\"ppg2_browse_graph_ml <path_to_.graphml>\")\n sys.exit(1)\n browse_graph_ml(filename)\n\n\ndef record_as_generated_file(filename, history):\n import pypipegraph2 as ppg2\n\n if not filename.exists():\n raise FileNotFound(filename)\n\n present = filename in history\n print(\"already present?\", present)\n if present:\n had_input = bool(history[filename][0])\n had_output = bool(history[filename][1])\n print(\"had output\", had_output)\n print(\"had input\", had_input)\n if not present or (not had_output) and (not had_input):\n print(\"ok, fixing,\", filename)\n history[filename] = (\n {},\n {\n str(filename): ppg2.hashers.hash_file(filename)\n },\n )\n else:\n print(\"skipping\", filename)\n\n\ndef cli_record_as_generated_file():\n '''Record <filename> (relative path) as if it was \n done by a filegenerating job - provided it wasn't recorded \n previously, and it had no input.\n\n This should not be necessary in almost all circumstances.\n\n '''\n import pypipegraph2 as ppg2\n if len(sys.argv) <= 1:\n print(\"ppg2_record_as_generated_file <filename>\")\n print(\"Records this file as if it had been generated by the pipegraph\")\n print(\"(only useful if the job in question is not being invalidated\")\n sys.exit(1)\n else:\n g = ppg2.new()\n history = g._load_history()\n\n for filename in sys.argv[1:]:\n record_as_generated_file(Path(filename), history)\n g._save_history(history)\n", "id": "12017929", "language": "Python", "matching_score": 1.2400394678115845, "max_stars_count": 0, "path": "src/pypipegraph2/entry_points.py" }, { "content": "import subprocess\nimport unittest\nimport pytest # noqa:F401\n\n\nclass Flake8TestCase(unittest.TestCase):\n def test_flake8(self):\n p = subprocess.Popen(\"flake8\", stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n if p.returncode != 0:\n self.fail(\n \"Flake 8 found issues: %s\\n%s\"\n % (stdout.decode(\"utf-8\"), stderr.decode(\"utf-8\"))\n )\n", "id": "5475249", "language": "Python", "matching_score": 0.012284683994948864, "max_stars_count": 0, "path": "tests/test_flake8.py" }, { "content": "from pathlib import Path\nimport time\nfrom .shared import counter, read\nimport pypipegraph2 as ppg\nimport pytest\n\n\nclass DummyObject:\n pass\n\n\ndef dummy_smfg(files, prefix):\n Path(prefix).mkdir(exist_ok=True, parents=True)\n for f in files:\n f.write_text(\"hello\")\n\n\ndef dummy_mfg(files):\n for f in files:\n f.parent.mkdir(exist_ok=True, parents=True)\n f.write_text(\"hello\")\n\n\ndef dummy_fg(of):\n of.parent.mkdir(exist_ok=True, parents=True)\n of.write_text(\"fg\")\n\n\[email protected](\"ppg2_per_test\")\nclass TestsFromTheField:\n def test_issue_20210726a(self, job_trace_log):\n \"\"\"This uncovered a depth first vs breadth first invalidation proagation bug.\n Created with Job_Status.dump_subgraph_for_debug and then heavily pruned\n \"\"\"\n\n job_0 = ppg.FileGeneratingJob(\"J0\", dummy_fg, depend_on_function=False)\n job_2 = ppg.DataLoadingJob(\"J2\", lambda: None, depend_on_function=False)\n job_3 = ppg.DataLoadingJob(\"J3\", lambda: None, depend_on_function=False)\n job_76 = ppg.FileGeneratingJob(\"J76\", dummy_fg, depend_on_function=False)\n\n edges = []\n edges.append((\"J0\", \"J2\"))\n edges.append((\"J2\", \"J3\"))\n edges.append((\"J2\", \"J76\"))\n edges.append((\"J76\", \"J3\"))\n\n for (a, b) in edges:\n if a in ppg.global_pipegraph.jobs and b in ppg.global_pipegraph.jobs:\n ppg.global_pipegraph.jobs[a].depends_on(ppg.global_pipegraph.jobs[b])\n else:\n print(\"unused edge\", a, b)\n\n ppg.run()\n ppg.run(event_timeout=1)\n\n def test_issue_20210726b(self, job_trace_log):\n job_0 = ppg.FileGeneratingJob(\"0\", dummy_fg, depend_on_function=False)\n job_1 = ppg.FunctionInvariant(\"1\", lambda: 55)\n job_2 = ppg.DataLoadingJob(\"2\", lambda: None, depend_on_function=False)\n job_3 = ppg.DataLoadingJob(\"3\", lambda: None, depend_on_function=False)\n job_4 = ppg.FileGeneratingJob(\"4\", dummy_fg, depend_on_function=False)\n job_5 = ppg.SharedMultiFileGeneratingJob(\n \"5\", [\"url.txt\"], dummy_smfg, depend_on_function=False\n )\n job_6 = ppg.FunctionInvariant(\"6\", lambda: 55)\n job_7 = ppg.ParameterInvariant(\"7\", 55)\n job_8 = ppg.SharedMultiFileGeneratingJob(\n \"8\", [\"genes.gtf\"], dummy_smfg, depend_on_function=False\n )\n job_9 = ppg.FunctionInvariant(\"9\", lambda: 55)\n job_10 = ppg.ParameterInvariant(\"10\", 55)\n job_11 = ppg.SharedMultiFileGeneratingJob(\n \"11\",\n [\"genome.fasta\", \"genome.fasta.fai\"],\n dummy_smfg,\n depend_on_function=False,\n )\n job_12 = ppg.FunctionInvariant(\"12\", lambda: 55)\n job_13 = ppg.ParameterInvariant(\"13\", 55)\n job_14 = ppg.SharedMultiFileGeneratingJob(\n \"14\", [\"references.txt\"], dummy_smfg, depend_on_function=False\n )\n job_15 = ppg.FunctionInvariant(\"15\", lambda: 55)\n job_16 = ppg.ParameterInvariant(\"16\", 55)\n job_17 = ppg.SharedMultiFileGeneratingJob(\n \"17\", [\"cdna.fasta\", \"cdna.fasta.fai\"], dummy_smfg, depend_on_function=False\n )\n job_18 = ppg.FunctionInvariant(\"18\", lambda: 55)\n job_19 = ppg.ParameterInvariant(\"19\", 55)\n job_20 = ppg.SharedMultiFileGeneratingJob(\n \"20\", [\"pep.fasta\", \"pep.fasta.fai\"], dummy_smfg, depend_on_function=False\n )\n job_21 = ppg.FunctionInvariant(\"21\", lambda: 55)\n job_22 = ppg.ParameterInvariant(\"22\", 55)\n job_23 = ppg.SharedMultiFileGeneratingJob(\n \"23\", [\"core.sql.gz\"], dummy_smfg, depend_on_function=False\n )\n job_24 = ppg.FunctionInvariant(\"24\", lambda: 55)\n job_25 = ppg.ParameterInvariant(\"25\", 55)\n job_26 = ppg.SharedMultiFileGeneratingJob(\n \"26\", [\"gene.txt.gz\"], dummy_smfg, depend_on_function=False\n )\n job_27 = ppg.FunctionInvariant(\"27\", lambda: 55)\n job_28 = ppg.ParameterInvariant(\"28\", 55)\n job_29 = ppg.SharedMultiFileGeneratingJob(\n \"29\", [\"transcript.txt.gz\"], dummy_smfg, depend_on_function=False\n )\n job_30 = ppg.FunctionInvariant(\"30\", lambda: 55)\n job_31 = ppg.ParameterInvariant(\"31\", 55)\n job_32 = ppg.SharedMultiFileGeneratingJob(\n \"32\", [\"translation.txt.gz\"], dummy_smfg, depend_on_function=False\n )\n job_33 = ppg.FunctionInvariant(\"33\", lambda: 55)\n job_34 = ppg.ParameterInvariant(\"34\", 55)\n job_35 = ppg.SharedMultiFileGeneratingJob(\n \"35\", [\"stable_id_event.txt.gz\"], dummy_smfg, depend_on_function=False\n )\n job_36 = ppg.FunctionInvariant(\"36\", lambda: 55)\n job_37 = ppg.ParameterInvariant(\"37\", 55)\n job_38 = ppg.SharedMultiFileGeneratingJob(\n \"38\", [\"external_db.txt.gz\"], dummy_smfg, depend_on_function=False\n )\n job_39 = ppg.FunctionInvariant(\"39\", lambda: 55)\n job_40 = ppg.ParameterInvariant(\"40\", 55)\n job_41 = ppg.SharedMultiFileGeneratingJob(\n \"41\", [\"object_xref.txt.gz\"], dummy_smfg, depend_on_function=False\n )\n job_42 = ppg.FunctionInvariant(\"42\", lambda: 55)\n job_43 = ppg.ParameterInvariant(\"43\", 55)\n job_44 = ppg.SharedMultiFileGeneratingJob(\n \"44\", [\"xref.txt.gz\"], dummy_smfg, depend_on_function=False\n )\n job_45 = ppg.FunctionInvariant(\"45\", lambda: 55)\n job_46 = ppg.ParameterInvariant(\"46\", 55)\n job_47 = ppg.SharedMultiFileGeneratingJob(\n \"47\", [\"alt_allele.txt.gz\"], dummy_smfg, depend_on_function=False\n )\n job_48 = ppg.FunctionInvariant(\"48\", lambda: 55)\n job_49 = ppg.ParameterInvariant(\"49\", 55)\n job_50 = ppg.SharedMultiFileGeneratingJob(\n \"50\", [\"seq_region.txt.gz\"], dummy_smfg, depend_on_function=False\n )\n job_51 = ppg.FunctionInvariant(\"51\", lambda: 55)\n job_52 = ppg.ParameterInvariant(\"52\", 55)\n job_53 = ppg.SharedMultiFileGeneratingJob(\n \"53\", [\"seq_region_attrib.txt.gz\"], dummy_smfg, depend_on_function=False\n )\n job_54 = ppg.FunctionInvariant(\"54\", lambda: 55)\n job_55 = ppg.ParameterInvariant(\"55\", 55)\n job_56 = ppg.SharedMultiFileGeneratingJob(\n \"56\", [\"attrib_type.txt.gz\"], dummy_smfg, depend_on_function=False\n )\n job_57 = ppg.FunctionInvariant(\"57\", lambda: 55)\n job_58 = ppg.ParameterInvariant(\"58\", 55)\n job_59 = ppg.SharedMultiFileGeneratingJob(\n \"59\", [\"external_synonym.txt.gz\"], dummy_smfg, depend_on_function=False\n )\n job_60 = ppg.FunctionInvariant(\"60\", lambda: 55)\n job_61 = ppg.ParameterInvariant(\"61\", 55)\n job_62 = ppg.SharedMultiFileGeneratingJob(\n \"62\", [\"df_genes.msgpack\"], dummy_smfg, depend_on_function=False\n )\n job_63 = ppg.FunctionInvariant(\"63\", lambda: 55)\n Path(\"64\").write_text(\"A\")\n job_64 = ppg.FileInvariant(\"64\")\n job_65 = ppg.ParameterInvariant(\"65\", 55)\n job_66 = ppg.FunctionInvariant(\"66\", lambda: 55)\n job_67 = ppg.SharedMultiFileGeneratingJob(\n \"67\", [\"df_transcripts.msgpack\"], dummy_smfg, depend_on_function=False\n )\n job_68 = ppg.FunctionInvariant(\"68\", lambda: 55)\n job_69 = ppg.ParameterInvariant(\"69\", 55)\n job_70 = ppg.FunctionInvariant(\"70\", lambda: 55)\n job_71 = ppg.FunctionInvariant(\"71\", lambda: 55)\n job_72 = ppg.FunctionInvariant(\"72\", lambda: 55)\n job_73 = ppg.ParameterInvariant(\"73\", 55)\n job_74 = ppg.FunctionInvariant(\"74\", lambda: 55)\n job_75 = ppg.FunctionInvariant(\"75\", lambda: 55)\n job_76 = ppg.FileGeneratingJob(\"76\", dummy_fg, depend_on_function=False)\n job_77 = ppg.FunctionInvariant(\"77\", lambda: 55)\n job_78 = ppg.FunctionInvariant(\"78\", lambda: 55)\n job_79 = ppg.AttributeLoadingJob(\n \"79\", DummyObject(), \"attr_79\", lambda: None, depend_on_function=False\n )\n job_80 = ppg.FileGeneratingJob(\"80\", dummy_fg, depend_on_function=False)\n job_81 = ppg.FileGeneratingJob(\"81\", dummy_fg, depend_on_function=False)\n job_82 = ppg.MultiFileGeneratingJob(\n [\"82/Cont-1.bam\", \"82/sentinel.txt\"], dummy_mfg, depend_on_function=False\n )\n job_83 = ppg.TempFileGeneratingJob(\"83\", dummy_fg, depend_on_function=False)\n Path(\"84\").write_text(\"A\")\n job_84 = ppg.FileInvariant(\"84\")\n Path(\"85\").write_text(\"A\")\n job_85 = ppg.FileInvariant(\"85\")\n Path(\"86\").write_text(\"A\")\n job_86 = ppg.FileInvariant(\"86\")\n Path(\"87\").write_text(\"A\")\n job_87 = ppg.FileInvariant(\"87\")\n job_88 = ppg.FunctionInvariant(\"88\", lambda: 55)\n job_89 = ppg.ParameterInvariant(\"89\", 55)\n job_90 = ppg.ParameterInvariant(\"90\", 55)\n job_91 = ppg.SharedMultiFileGeneratingJob(\n \"91\",\n [\n \"SA\",\n \"SAindex\",\n \"chrNameLength.txt\",\n \"exonGeTrInfo.tab\",\n \"exonInfo.tab\",\n \"geneInfo.tab\",\n \"sjdbInfo.txt\",\n \"sjdbList.fromGTF.out.tab\",\n \"sjdbList.out.tab\",\n \"transcriptInfo.tab\",\n ],\n dummy_smfg,\n depend_on_function=False,\n )\n job_92 = ppg.FunctionInvariant(\"92\", lambda: 55)\n job_93 = ppg.ParameterInvariant(\"93\", 55)\n Path(\"94\").write_text(\"A\")\n job_94 = ppg.FileInvariant(\"94\")\n job_95 = ppg.FunctionInvariant(\"95\", lambda: 55)\n job_96 = ppg.FunctionInvariant(\"96\", lambda: 55)\n job_97 = ppg.ParameterInvariant(\"97\", 55)\n job_98 = ppg.FileGeneratingJob(\"98\", dummy_fg, depend_on_function=False)\n job_99 = ppg.FunctionInvariant(\"99\", lambda: 55)\n job_100 = ppg.FunctionInvariant(\"100\", lambda: 55)\n job_101 = ppg.FunctionInvariant(\"101\", lambda: 55)\n job_102 = ppg.ParameterInvariant(\"102\", 55)\n job_103 = ppg.FileGeneratingJob(\"103\", dummy_fg, depend_on_function=False)\n job_104 = ppg.FunctionInvariant(\"104\", lambda: 55)\n job_105 = ppg.FunctionInvariant(\"105\", lambda: 55)\n job_106 = ppg.FunctionInvariant(\"106\", lambda: 55)\n job_107 = ppg.ParameterInvariant(\"107\", 55)\n job_108 = ppg.FunctionInvariant(\"108\", lambda: 55)\n job_109 = ppg.FunctionInvariant(\"109\", lambda: 55)\n edges = []\n edges.append((\"0\", \"1\"))\n edges.append((\"0\", \"2\"))\n edges.append((\"2\", \"3\"))\n edges.append((\"3\", \"4\"))\n edges.append((\"4\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"4\", \"8\"))\n edges.append((\"8\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"8\", \"9\"))\n edges.append((\"8\", \"10\"))\n edges.append((\"4\", \"11\"))\n edges.append((\"11\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"11\", \"12\"))\n edges.append((\"11\", \"13\"))\n edges.append((\"4\", \"14\"))\n edges.append((\"14\", \"11\"))\n edges.append((\"11\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"11\", \"12\"))\n edges.append((\"11\", \"13\"))\n edges.append((\"14\", \"15\"))\n edges.append((\"14\", \"16\"))\n edges.append((\"4\", \"17\"))\n edges.append((\"17\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"17\", \"18\"))\n edges.append((\"17\", \"19\"))\n edges.append((\"4\", \"20\"))\n edges.append((\"20\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"20\", \"21\"))\n edges.append((\"20\", \"22\"))\n edges.append((\"4\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"4\", \"26\"))\n edges.append((\"26\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"26\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"26\", \"27\"))\n edges.append((\"26\", \"28\"))\n edges.append((\"4\", \"29\"))\n edges.append((\"29\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"29\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"29\", \"30\"))\n edges.append((\"29\", \"31\"))\n edges.append((\"4\", \"32\"))\n edges.append((\"32\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"32\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"32\", \"33\"))\n edges.append((\"32\", \"34\"))\n edges.append((\"4\", \"35\"))\n edges.append((\"35\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"35\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"35\", \"36\"))\n edges.append((\"35\", \"37\"))\n edges.append((\"4\", \"38\"))\n edges.append((\"38\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"38\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"38\", \"39\"))\n edges.append((\"38\", \"40\"))\n edges.append((\"4\", \"41\"))\n edges.append((\"41\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"41\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"41\", \"42\"))\n edges.append((\"41\", \"43\"))\n edges.append((\"4\", \"44\"))\n edges.append((\"44\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"44\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"44\", \"45\"))\n edges.append((\"44\", \"46\"))\n edges.append((\"4\", \"47\"))\n edges.append((\"47\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"47\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"47\", \"48\"))\n edges.append((\"47\", \"49\"))\n edges.append((\"4\", \"50\"))\n edges.append((\"50\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"50\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"50\", \"51\"))\n edges.append((\"50\", \"52\"))\n edges.append((\"4\", \"53\"))\n edges.append((\"53\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"53\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"53\", \"54\"))\n edges.append((\"53\", \"55\"))\n edges.append((\"4\", \"56\"))\n edges.append((\"56\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"56\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"56\", \"57\"))\n edges.append((\"56\", \"58\"))\n edges.append((\"4\", \"59\"))\n edges.append((\"59\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"59\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"59\", \"60\"))\n edges.append((\"59\", \"61\"))\n edges.append((\"4\", \"62\"))\n edges.append((\"62\", \"8\"))\n edges.append((\"8\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"8\", \"9\"))\n edges.append((\"8\", \"10\"))\n edges.append((\"62\", \"63\"))\n edges.append((\"62\", \"64\"))\n edges.append((\"62\", \"65\"))\n edges.append((\"62\", \"66\"))\n edges.append((\"4\", \"67\"))\n edges.append((\"67\", \"8\"))\n edges.append((\"8\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"8\", \"9\"))\n edges.append((\"8\", \"10\"))\n edges.append((\"67\", \"62\"))\n edges.append((\"62\", \"8\"))\n edges.append((\"8\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"8\", \"9\"))\n edges.append((\"8\", \"10\"))\n edges.append((\"62\", \"63\"))\n edges.append((\"62\", \"64\"))\n edges.append((\"62\", \"65\"))\n edges.append((\"62\", \"66\"))\n edges.append((\"67\", \"64\"))\n edges.append((\"67\", \"68\"))\n edges.append((\"67\", \"69\"))\n edges.append((\"67\", \"70\"))\n edges.append((\"4\", \"71\"))\n edges.append((\"4\", \"72\"))\n edges.append((\"4\", \"73\"))\n edges.append((\"4\", \"74\"))\n edges.append((\"3\", \"75\"))\n edges.append((\"2\", \"76\"))\n edges.append((\"76\", \"3\"))\n edges.append((\"3\", \"4\"))\n edges.append((\"4\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"4\", \"8\"))\n edges.append((\"8\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"8\", \"9\"))\n edges.append((\"8\", \"10\"))\n edges.append((\"4\", \"11\"))\n edges.append((\"11\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"11\", \"12\"))\n edges.append((\"11\", \"13\"))\n edges.append((\"4\", \"14\"))\n edges.append((\"14\", \"11\"))\n edges.append((\"11\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"11\", \"12\"))\n edges.append((\"11\", \"13\"))\n edges.append((\"14\", \"15\"))\n edges.append((\"14\", \"16\"))\n edges.append((\"4\", \"17\"))\n edges.append((\"17\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"17\", \"18\"))\n edges.append((\"17\", \"19\"))\n edges.append((\"4\", \"20\"))\n edges.append((\"20\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"20\", \"21\"))\n edges.append((\"20\", \"22\"))\n edges.append((\"4\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"4\", \"26\"))\n edges.append((\"26\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"26\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"26\", \"27\"))\n edges.append((\"26\", \"28\"))\n edges.append((\"4\", \"29\"))\n edges.append((\"29\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"29\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"29\", \"30\"))\n edges.append((\"29\", \"31\"))\n edges.append((\"4\", \"32\"))\n edges.append((\"32\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"32\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"32\", \"33\"))\n edges.append((\"32\", \"34\"))\n edges.append((\"4\", \"35\"))\n edges.append((\"35\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"35\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"35\", \"36\"))\n edges.append((\"35\", \"37\"))\n edges.append((\"4\", \"38\"))\n edges.append((\"38\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"38\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"38\", \"39\"))\n edges.append((\"38\", \"40\"))\n edges.append((\"4\", \"41\"))\n edges.append((\"41\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"41\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"41\", \"42\"))\n edges.append((\"41\", \"43\"))\n edges.append((\"4\", \"44\"))\n edges.append((\"44\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"44\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"44\", \"45\"))\n edges.append((\"44\", \"46\"))\n edges.append((\"4\", \"47\"))\n edges.append((\"47\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"47\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"47\", \"48\"))\n edges.append((\"47\", \"49\"))\n edges.append((\"4\", \"50\"))\n edges.append((\"50\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"50\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"50\", \"51\"))\n edges.append((\"50\", \"52\"))\n edges.append((\"4\", \"53\"))\n edges.append((\"53\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"53\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"53\", \"54\"))\n edges.append((\"53\", \"55\"))\n edges.append((\"4\", \"56\"))\n edges.append((\"56\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"56\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"56\", \"57\"))\n edges.append((\"56\", \"58\"))\n edges.append((\"4\", \"59\"))\n edges.append((\"59\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"59\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"59\", \"60\"))\n edges.append((\"59\", \"61\"))\n edges.append((\"4\", \"62\"))\n edges.append((\"62\", \"8\"))\n edges.append((\"8\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"8\", \"9\"))\n edges.append((\"8\", \"10\"))\n edges.append((\"62\", \"63\"))\n edges.append((\"62\", \"64\"))\n edges.append((\"62\", \"65\"))\n edges.append((\"62\", \"66\"))\n edges.append((\"4\", \"67\"))\n edges.append((\"67\", \"8\"))\n edges.append((\"8\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"8\", \"9\"))\n edges.append((\"8\", \"10\"))\n edges.append((\"67\", \"62\"))\n edges.append((\"62\", \"8\"))\n edges.append((\"8\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"8\", \"9\"))\n edges.append((\"8\", \"10\"))\n edges.append((\"62\", \"63\"))\n edges.append((\"62\", \"64\"))\n edges.append((\"62\", \"65\"))\n edges.append((\"62\", \"66\"))\n edges.append((\"67\", \"64\"))\n edges.append((\"67\", \"68\"))\n edges.append((\"67\", \"69\"))\n edges.append((\"67\", \"70\"))\n edges.append((\"4\", \"71\"))\n edges.append((\"4\", \"72\"))\n edges.append((\"4\", \"73\"))\n edges.append((\"4\", \"74\"))\n edges.append((\"3\", \"75\"))\n edges.append((\"76\", \"77\"))\n edges.append((\"76\", \"78\"))\n edges.append((\"76\", \"79\"))\n edges.append((\"79\", \"80\"))\n edges.append((\"80\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"80\", \"8\"))\n edges.append((\"8\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"8\", \"9\"))\n edges.append((\"8\", \"10\"))\n edges.append((\"80\", \"11\"))\n edges.append((\"11\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"11\", \"12\"))\n edges.append((\"11\", \"13\"))\n edges.append((\"80\", \"14\"))\n edges.append((\"14\", \"11\"))\n edges.append((\"11\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"11\", \"12\"))\n edges.append((\"11\", \"13\"))\n edges.append((\"14\", \"15\"))\n edges.append((\"14\", \"16\"))\n edges.append((\"80\", \"17\"))\n edges.append((\"17\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"17\", \"18\"))\n edges.append((\"17\", \"19\"))\n edges.append((\"80\", \"20\"))\n edges.append((\"20\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"20\", \"21\"))\n edges.append((\"20\", \"22\"))\n edges.append((\"80\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"80\", \"26\"))\n edges.append((\"26\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"26\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"26\", \"27\"))\n edges.append((\"26\", \"28\"))\n edges.append((\"80\", \"29\"))\n edges.append((\"29\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"29\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"29\", \"30\"))\n edges.append((\"29\", \"31\"))\n edges.append((\"80\", \"32\"))\n edges.append((\"32\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"32\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"32\", \"33\"))\n edges.append((\"32\", \"34\"))\n edges.append((\"80\", \"35\"))\n edges.append((\"35\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"35\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"35\", \"36\"))\n edges.append((\"35\", \"37\"))\n edges.append((\"80\", \"38\"))\n edges.append((\"38\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"38\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"38\", \"39\"))\n edges.append((\"38\", \"40\"))\n edges.append((\"80\", \"41\"))\n edges.append((\"41\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"41\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"41\", \"42\"))\n edges.append((\"41\", \"43\"))\n edges.append((\"80\", \"44\"))\n edges.append((\"44\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"44\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"44\", \"45\"))\n edges.append((\"44\", \"46\"))\n edges.append((\"80\", \"47\"))\n edges.append((\"47\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"47\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"47\", \"48\"))\n edges.append((\"47\", \"49\"))\n edges.append((\"80\", \"50\"))\n edges.append((\"50\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"50\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"50\", \"51\"))\n edges.append((\"50\", \"52\"))\n edges.append((\"80\", \"53\"))\n edges.append((\"53\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"53\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"53\", \"54\"))\n edges.append((\"53\", \"55\"))\n edges.append((\"80\", \"56\"))\n edges.append((\"56\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"56\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"56\", \"57\"))\n edges.append((\"56\", \"58\"))\n edges.append((\"80\", \"59\"))\n edges.append((\"59\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"59\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"59\", \"60\"))\n edges.append((\"59\", \"61\"))\n edges.append((\"80\", \"81\"))\n edges.append((\"81\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"81\", \"8\"))\n edges.append((\"8\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"8\", \"9\"))\n edges.append((\"8\", \"10\"))\n edges.append((\"81\", \"11\"))\n edges.append((\"11\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"11\", \"12\"))\n edges.append((\"11\", \"13\"))\n edges.append((\"81\", \"14\"))\n edges.append((\"14\", \"11\"))\n edges.append((\"11\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"11\", \"12\"))\n edges.append((\"11\", \"13\"))\n edges.append((\"14\", \"15\"))\n edges.append((\"14\", \"16\"))\n edges.append((\"81\", \"17\"))\n edges.append((\"17\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"17\", \"18\"))\n edges.append((\"17\", \"19\"))\n edges.append((\"81\", \"20\"))\n edges.append((\"20\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"20\", \"21\"))\n edges.append((\"20\", \"22\"))\n edges.append((\"81\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"81\", \"26\"))\n edges.append((\"26\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"26\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"26\", \"27\"))\n edges.append((\"26\", \"28\"))\n edges.append((\"81\", \"29\"))\n edges.append((\"29\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"29\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"29\", \"30\"))\n edges.append((\"29\", \"31\"))\n edges.append((\"81\", \"32\"))\n edges.append((\"32\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"32\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"32\", \"33\"))\n edges.append((\"32\", \"34\"))\n edges.append((\"81\", \"35\"))\n edges.append((\"35\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"35\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"35\", \"36\"))\n edges.append((\"35\", \"37\"))\n edges.append((\"81\", \"38\"))\n edges.append((\"38\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"38\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"38\", \"39\"))\n edges.append((\"38\", \"40\"))\n edges.append((\"81\", \"41\"))\n edges.append((\"41\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"41\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"41\", \"42\"))\n edges.append((\"41\", \"43\"))\n edges.append((\"81\", \"44\"))\n edges.append((\"44\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"44\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"44\", \"45\"))\n edges.append((\"44\", \"46\"))\n edges.append((\"81\", \"47\"))\n edges.append((\"47\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"47\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"47\", \"48\"))\n edges.append((\"47\", \"49\"))\n edges.append((\"81\", \"50\"))\n edges.append((\"50\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"50\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"50\", \"51\"))\n edges.append((\"50\", \"52\"))\n edges.append((\"81\", \"53\"))\n edges.append((\"53\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"53\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"53\", \"54\"))\n edges.append((\"53\", \"55\"))\n edges.append((\"81\", \"56\"))\n edges.append((\"56\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"56\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"56\", \"57\"))\n edges.append((\"56\", \"58\"))\n edges.append((\"81\", \"59\"))\n edges.append((\"59\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"59\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"59\", \"60\"))\n edges.append((\"59\", \"61\"))\n edges.append((\"81\", \"82\"))\n edges.append((\"82\", \"83\"))\n edges.append((\"83\", \"84\"))\n edges.append((\"83\", \"85\"))\n edges.append((\"83\", \"86\"))\n edges.append((\"83\", \"87\"))\n edges.append((\"83\", \"88\"))\n edges.append((\"83\", \"89\"))\n edges.append((\"83\", \"90\"))\n edges.append((\"82\", \"91\"))\n edges.append((\"91\", \"8\"))\n edges.append((\"8\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"8\", \"9\"))\n edges.append((\"8\", \"10\"))\n edges.append((\"91\", \"11\"))\n edges.append((\"11\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"11\", \"12\"))\n edges.append((\"11\", \"13\"))\n edges.append((\"91\", \"92\"))\n edges.append((\"91\", \"93\"))\n edges.append((\"82\", \"94\"))\n edges.append((\"82\", \"95\"))\n edges.append((\"82\", \"96\"))\n edges.append((\"82\", \"97\"))\n edges.append((\"81\", \"98\"))\n edges.append((\"98\", \"82\"))\n edges.append((\"82\", \"83\"))\n edges.append((\"83\", \"84\"))\n edges.append((\"83\", \"85\"))\n edges.append((\"83\", \"86\"))\n edges.append((\"83\", \"87\"))\n edges.append((\"83\", \"88\"))\n edges.append((\"83\", \"89\"))\n edges.append((\"83\", \"90\"))\n edges.append((\"82\", \"91\"))\n edges.append((\"91\", \"8\"))\n edges.append((\"8\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"8\", \"9\"))\n edges.append((\"8\", \"10\"))\n edges.append((\"91\", \"11\"))\n edges.append((\"11\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"11\", \"12\"))\n edges.append((\"11\", \"13\"))\n edges.append((\"91\", \"92\"))\n edges.append((\"91\", \"93\"))\n edges.append((\"82\", \"94\"))\n edges.append((\"82\", \"95\"))\n edges.append((\"82\", \"96\"))\n edges.append((\"82\", \"97\"))\n edges.append((\"98\", \"99\"))\n edges.append((\"81\", \"100\"))\n edges.append((\"81\", \"101\"))\n edges.append((\"81\", \"102\"))\n edges.append((\"80\", \"103\"))\n edges.append((\"103\", \"81\"))\n edges.append((\"81\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"81\", \"8\"))\n edges.append((\"8\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"8\", \"9\"))\n edges.append((\"8\", \"10\"))\n edges.append((\"81\", \"11\"))\n edges.append((\"11\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"11\", \"12\"))\n edges.append((\"11\", \"13\"))\n edges.append((\"81\", \"14\"))\n edges.append((\"14\", \"11\"))\n edges.append((\"11\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"11\", \"12\"))\n edges.append((\"11\", \"13\"))\n edges.append((\"14\", \"15\"))\n edges.append((\"14\", \"16\"))\n edges.append((\"81\", \"17\"))\n edges.append((\"17\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"17\", \"18\"))\n edges.append((\"17\", \"19\"))\n edges.append((\"81\", \"20\"))\n edges.append((\"20\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"20\", \"21\"))\n edges.append((\"20\", \"22\"))\n edges.append((\"81\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"81\", \"26\"))\n edges.append((\"26\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"26\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"26\", \"27\"))\n edges.append((\"26\", \"28\"))\n edges.append((\"81\", \"29\"))\n edges.append((\"29\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"29\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"29\", \"30\"))\n edges.append((\"29\", \"31\"))\n edges.append((\"81\", \"32\"))\n edges.append((\"32\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"32\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"32\", \"33\"))\n edges.append((\"32\", \"34\"))\n edges.append((\"81\", \"35\"))\n edges.append((\"35\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"35\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"35\", \"36\"))\n edges.append((\"35\", \"37\"))\n edges.append((\"81\", \"38\"))\n edges.append((\"38\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"38\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"38\", \"39\"))\n edges.append((\"38\", \"40\"))\n edges.append((\"81\", \"41\"))\n edges.append((\"41\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"41\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"41\", \"42\"))\n edges.append((\"41\", \"43\"))\n edges.append((\"81\", \"44\"))\n edges.append((\"44\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"44\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"44\", \"45\"))\n edges.append((\"44\", \"46\"))\n edges.append((\"81\", \"47\"))\n edges.append((\"47\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"47\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"47\", \"48\"))\n edges.append((\"47\", \"49\"))\n edges.append((\"81\", \"50\"))\n edges.append((\"50\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"50\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"50\", \"51\"))\n edges.append((\"50\", \"52\"))\n edges.append((\"81\", \"53\"))\n edges.append((\"53\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"53\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"53\", \"54\"))\n edges.append((\"53\", \"55\"))\n edges.append((\"81\", \"56\"))\n edges.append((\"56\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"56\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"56\", \"57\"))\n edges.append((\"56\", \"58\"))\n edges.append((\"81\", \"59\"))\n edges.append((\"59\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"59\", \"23\"))\n edges.append((\"23\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"23\", \"24\"))\n edges.append((\"23\", \"25\"))\n edges.append((\"59\", \"60\"))\n edges.append((\"59\", \"61\"))\n edges.append((\"81\", \"82\"))\n edges.append((\"82\", \"83\"))\n edges.append((\"83\", \"84\"))\n edges.append((\"83\", \"85\"))\n edges.append((\"83\", \"86\"))\n edges.append((\"83\", \"87\"))\n edges.append((\"83\", \"88\"))\n edges.append((\"83\", \"89\"))\n edges.append((\"83\", \"90\"))\n edges.append((\"82\", \"91\"))\n edges.append((\"91\", \"8\"))\n edges.append((\"8\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"8\", \"9\"))\n edges.append((\"8\", \"10\"))\n edges.append((\"91\", \"11\"))\n edges.append((\"11\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"11\", \"12\"))\n edges.append((\"11\", \"13\"))\n edges.append((\"91\", \"92\"))\n edges.append((\"91\", \"93\"))\n edges.append((\"82\", \"94\"))\n edges.append((\"82\", \"95\"))\n edges.append((\"82\", \"96\"))\n edges.append((\"82\", \"97\"))\n edges.append((\"81\", \"98\"))\n edges.append((\"98\", \"82\"))\n edges.append((\"82\", \"83\"))\n edges.append((\"83\", \"84\"))\n edges.append((\"83\", \"85\"))\n edges.append((\"83\", \"86\"))\n edges.append((\"83\", \"87\"))\n edges.append((\"83\", \"88\"))\n edges.append((\"83\", \"89\"))\n edges.append((\"83\", \"90\"))\n edges.append((\"82\", \"91\"))\n edges.append((\"91\", \"8\"))\n edges.append((\"8\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"8\", \"9\"))\n edges.append((\"8\", \"10\"))\n edges.append((\"91\", \"11\"))\n edges.append((\"11\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"11\", \"12\"))\n edges.append((\"11\", \"13\"))\n edges.append((\"91\", \"92\"))\n edges.append((\"91\", \"93\"))\n edges.append((\"82\", \"94\"))\n edges.append((\"82\", \"95\"))\n edges.append((\"82\", \"96\"))\n edges.append((\"82\", \"97\"))\n edges.append((\"98\", \"99\"))\n edges.append((\"81\", \"100\"))\n edges.append((\"81\", \"101\"))\n edges.append((\"81\", \"102\"))\n edges.append((\"103\", \"104\"))\n edges.append((\"80\", \"105\"))\n edges.append((\"79\", \"106\"))\n edges.append((\"76\", \"107\"))\n edges.append((\"76\", \"108\"))\n edges.append((\"2\", \"109\"))\n for (a, b) in edges:\n if a in ppg.global_pipegraph.jobs and b in ppg.global_pipegraph.jobs:\n ppg.global_pipegraph.jobs[a].depends_on(ppg.global_pipegraph.jobs[b])\n\n ppg.run()\n ppg.run()\n edges.append((\"8\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"8\", \"9\"))\n edges.append((\"8\", \"10\"))\n edges.append((\"91\", \"11\"))\n edges.append((\"11\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"11\", \"12\"))\n edges.append((\"11\", \"13\"))\n edges.append((\"91\", \"92\"))\n edges.append((\"91\", \"93\"))\n edges.append((\"82\", \"94\"))\n edges.append((\"82\", \"95\"))\n edges.append((\"82\", \"96\"))\n edges.append((\"82\", \"97\"))\n edges.append((\"81\", \"98\"))\n edges.append((\"98\", \"82\"))\n edges.append((\"82\", \"83\"))\n edges.append((\"83\", \"84\"))\n edges.append((\"83\", \"85\"))\n edges.append((\"83\", \"86\"))\n edges.append((\"83\", \"87\"))\n edges.append((\"83\", \"88\"))\n edges.append((\"83\", \"89\"))\n edges.append((\"83\", \"90\"))\n edges.append((\"82\", \"91\"))\n edges.append((\"91\", \"8\"))\n edges.append((\"8\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"8\", \"9\"))\n edges.append((\"8\", \"10\"))\n edges.append((\"91\", \"11\"))\n edges.append((\"11\", \"5\"))\n edges.append((\"5\", \"6\"))\n edges.append((\"5\", \"7\"))\n edges.append((\"11\", \"12\"))\n edges.append((\"11\", \"13\"))\n edges.append((\"91\", \"92\"))\n edges.append((\"91\", \"93\"))\n edges.append((\"82\", \"94\"))\n edges.append((\"82\", \"95\"))\n edges.append((\"82\", \"96\"))\n edges.append((\"82\", \"97\"))\n edges.append((\"98\", \"99\"))\n edges.append((\"81\", \"100\"))\n edges.append((\"81\", \"101\"))\n edges.append((\"81\", \"102\"))\n edges.append((\"103\", \"104\"))\n edges.append((\"80\", \"105\"))\n edges.append((\"79\", \"106\"))\n edges.append((\"76\", \"107\"))\n edges.append((\"76\", \"108\"))\n edges.append((\"2\", \"109\"))\n for (a, b) in edges:\n if a in ppg.global_pipegraph.jobs and b in ppg.global_pipegraph.jobs:\n ppg.global_pipegraph.jobs[a].depends_on(ppg.global_pipegraph.jobs[b])\n\n ppg.run()\n ppg.run()\n\n def test_20210729(self, job_trace_log):\n def build():\n jobs_by_no = {}\n\n job_0 = ppg.FileGeneratingJob(\"0\", dummy_fg, depend_on_function=False)\n jobs_by_no[\"0\"] = job_0\n\n job_1 = ppg.DataLoadingJob(\"1\", lambda: 55, depend_on_function=False)\n jobs_by_no[\"1\"] = job_1\n\n job_530 = ppg.DataLoadingJob(\"530\", lambda: 55, depend_on_function=False)\n jobs_by_no[\"530\"] = job_530\n\n job_541 = ppg.DataLoadingJob(\"541\", lambda: 55, depend_on_function=False)\n jobs_by_no[\"541\"] = job_541\n\n job_542 = ppg.FileGeneratingJob(\"542\", dummy_fg, depend_on_function=False)\n jobs_by_no[\"542\"] = job_542\n\n edges = [\n (\"0\", \"541\"),\n (\"0\", \"530\"),\n (\"542\", \"530\"),\n (\"542\", \"1\"),\n (\"541\", \"1\"),\n (\"541\", \"542\"),\n ]\n\n ok_edges = []\n for (a, b) in edges:\n if a in jobs_by_no and b in jobs_by_no:\n jobs_by_no[a].depends_on(jobs_by_no[b])\n ok_edges.append((a, b))\n\n ppg.new(allow_short_filenames=True)\n build()\n ppg.run()\n ppg.new(allow_short_filenames=True, log_level=6)\n build()\n ppg.run()\n\n def test_ttcc(self):\n \"\"\"The absolute minimal two terminal jobs, two conditional ones,\n each T depending on each C, cross over\n \"\"\"\n t1 = ppg.FileGeneratingJob(\n \"t1\", lambda of: of.write_text(of.name) and counter(\"ct1\")\n )\n t2 = ppg.FileGeneratingJob(\n \"t2\", lambda of: of.write_text(of.name) and counter(\"ct2\")\n )\n # these do not invalidate...\n c1 = ppg.DataLoadingJob(\"c1\", lambda: 1, depend_on_function=False)\n c2 = ppg.DataLoadingJob(\"c2\", lambda: 1, depend_on_function=False)\n\n t1.depends_on(c1, c2)\n t2.depends_on(c1, c2)\n ppg.run()\n assert read(\"t1\") == \"t1\"\n assert read(\"ct1\") == \"1\"\n ppg.run()\n assert read(\"ct1\") == \"1\"\n assert read(\"ct1\") == \"1\"\n\n Path(\"t1\").unlink()\n ppg.run()\n\n assert read(\"t1\") == \"t1\"\n assert read(\"ct1\") == \"2\"\n assert read(\"ct2\") == \"1\"\n\n c1 = ppg.DataLoadingJob(\"c1\", lambda: 2, depend_on_function=False)\n ppg.run()\n # w ehad no function invariant1\n assert read(\"ct1\") == \"2\"\n assert read(\"ct2\") == \"1\"\n Path(\n \"t2\"\n ).unlink() # but now it runs, and it invalidates, and they *both* need to rerun!\n ppg.run()\n assert read(\"ct1\") == \"3\"\n assert read(\"ct2\") == \"2\"\n\n ppg.run() # no change\n assert read(\"ct1\") == \"3\"\n assert read(\"ct2\") == \"2\"\n\n Path(\"t2\").unlink()\n ppg.run()\n assert read(\"ct2\") == \"3\" # only t2 get's run...\n\n def test_20211001(self, job_trace_log):\n do_fail = False\n\n def fail(): # fail on demand\n if do_fail:\n raise ValueError()\n\n job_3 = ppg.DataLoadingJob(\"3\", lambda: None, depend_on_function=False)\n job_48 = ppg.AttributeLoadingJob(\n \"48\", DummyObject(), \"attr_48\", fail, depend_on_function=False\n )\n job_61 = ppg.FileGeneratingJob(\"61\", dummy_fg, depend_on_function=False)\n job_67 = ppg.JobGeneratingJob(\"67\", lambda: None, depend_on_function=False)\n\n edges = []\n\n edges.append((\"61\", \"48\"))\n edges.append((\"67\", \"48\"))\n edges.append((\"61\", \"3\"))\n\n for (a, b) in edges:\n if a in ppg.global_pipegraph.jobs and b in ppg.global_pipegraph.jobs:\n ppg.global_pipegraph.jobs[a].depends_on(ppg.global_pipegraph.jobs[b])\n\n ppg.run()\n ppg.run()\n do_fail = True\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert (\n ppg.global_pipegraph.last_run_result[\"48\"].outcome\n == ppg.enums.JobOutcome.Failed\n )\n assert (\n ppg.global_pipegraph.last_run_result[\"61\"].outcome\n == ppg.enums.JobOutcome.UpstreamFailed\n )\n assert (\n ppg.global_pipegraph.last_run_result[\"61\"].outcome\n == ppg.enums.JobOutcome.UpstreamFailed\n )\n assert (\n ppg.global_pipegraph.last_run_result[\"3\"].outcome\n == ppg.enums.JobOutcome.Skipped\n )\n\n def test_20211221(self):\n global do_fail\n do_fail = [False]\n # ppg.new(log_level=20)\n gen_20211221(lambda: 55)\n ppg.run()\n assert Path(\"651\").exists()\n ppg.new(log_level=5)\n do_fail[0] = True\n gen_20211221(lambda: 56)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert not Path(\"651\").exists()\n assert (\n ppg.global_pipegraph.last_run_result[\"651\"].outcome\n == ppg.enums.JobOutcome.Failed\n )\n assert (\n ppg.global_pipegraph.last_run_result[\"1079\"].outcome\n == ppg.enums.JobOutcome.UpstreamFailed\n )\n assert (\n ppg.global_pipegraph.last_run_result[\"1096\"].outcome\n == ppg.enums.JobOutcome.UpstreamFailed\n )\n assert (\n ppg.global_pipegraph.last_run_result[\"661\"].outcome\n == ppg.enums.JobOutcome.Skipped\n )\n\n\ndef gen_20211221(func):\n\n global do_fail\n\n def dummy_fg_fail(of):\n global do_fail\n if do_fail[0]:\n raise ValueError()\n of.parent.mkdir(exist_ok=True, parents=True)\n of.write_text(\"fg\")\n\n # debugged job PIGenes_KD5MA closest genes_parent\n # debugged job load_cache/GenomicRegions/H4ac_ISX_specific/calc\n\n job_650 = ppg.DataLoadingJob(\"650\", lambda: 35, depend_on_function=False)\n # debugged job cache/GenomicRegions/H4ac_ISX_specific/calc\n job_651 = ppg.FileGeneratingJob(\"651\", dummy_fg_fail, depend_on_function=False)\n job_651.depends_on(ppg.FunctionInvariant(\"shu\", func))\n job_661 = ppg.DataLoadingJob(\"661\", lambda: 35, depend_on_function=False)\n job_1079 = ppg.FileGeneratingJob(\"1079\", dummy_fg, depend_on_function=False)\n job_1096 = ppg.DataLoadingJob(\"1096\", lambda: 35, depend_on_function=False)\n\n cjobs_by_no = {}\n for k, v in locals().items():\n if k.startswith(\"job_\"):\n no = k[k.find(\"_\") + 1 :]\n cjobs_by_no[no] = v\n edges = []\n ea = edges.append\n ea((\"1079\", \"1096\"))\n ea((\"1096\", \"650\"))\n ea((\"1096\", \"661\"))\n ea((\"650\", \"651\"))\n for (a, b) in edges:\n if a in cjobs_by_no and b in cjobs_by_no:\n cjobs_by_no[a].depends_on(cjobs_by_no[b])\n # print(f\"ea(('{a}', '{b}'))\")\n", "id": "9727659", "language": "Python", "matching_score": 3.7248001098632812, "max_stars_count": 0, "path": "tests/tests_from_the_field.py" }, { "content": "import pypipegraph2 as ppg\nfrom .shared import counter, read, write\nfrom pathlib import Path\nimport pytest\n\n\[email protected](\"ppg2_per_test\", \"job_trace_log\")\nclass TestBootstrap:\n \"\"\"Step by step, establish a working executing workflow for the ppg2\"\"\"\n\n def test_smallest(self):\n assert not Path(\"A\").exists()\n job = ppg.FileGeneratingJob(\n \"A\", lambda of: of.write_text(\"Done\"), depend_on_function=False\n )\n ppg.run()\n assert Path(\"A\").read_text() == \"Done\"\n\n def test_smallest_graph(self):\n assert not Path(\"A\").exists()\n\n def func(of):\n counter(\"a\")\n of.write_text(\"A\")\n\n job = ppg.FileGeneratingJob(\"A\", func, depend_on_function=False)\n ppg.run()\n assert Path(\"A\").read_text() == \"A\"\n assert read(\"a\") == \"1\"\n ppg.run()\n assert read(\"a\") == \"1\"\n Path(\"A\").unlink()\n ppg.run()\n assert read(\"a\") == \"2\"\n assert Path(\"A\").read_text() == \"A\"\n\n def test_smallest_with_invariant(self):\n def func(of):\n counter(\"a\")\n of.write_text(\"A\")\n\n job = ppg.FileGeneratingJob(\"A\", func, depend_on_function=False)\n job.depends_on_params(1)\n ppg.run()\n assert read(\"a\") == \"1\"\n assert Path(\"A\").read_text() == \"A\"\n ppg.run()\n assert read(\"a\") == \"1\"\n assert Path(\"A\").read_text() == \"A\"\n ppg.new()\n job = ppg.FileGeneratingJob(\"A\", func, depend_on_function=False)\n job.depends_on_params(2)\n ppg.run()\n assert read(\"a\") == \"2\"\n assert Path(\"A\").read_text() == \"A\"\n\n def test_chain(self):\n jobA = ppg.FileGeneratingJob(\n \"a\", lambda of: counter(\"A\") and write(\"a\", \"a\"), depend_on_function=False\n )\n jobB = ppg.FileGeneratingJob(\n \"b\",\n lambda of: counter(\"B\") and write(\"b\", read(\"a\") + \"b\"),\n depend_on_function=False,\n )\n jobC = ppg.FileGeneratingJob(\n \"c\",\n lambda of: counter(\"C\") and write(\"c\", read(\"b\") + \"c\"),\n depend_on_function=False,\n )\n jobC.depends_on(jobB)\n jobB.depends_on(jobA)\n ppg.run()\n assert read(\"A\") == \"1\"\n assert read(\"B\") == \"1\"\n assert read(\"C\") == \"1\"\n assert read(\"a\") == \"a\"\n assert read(\"b\") == \"ab\"\n assert read(\"c\") == \"abc\"\n ppg.run()\n assert read(\"A\") == \"1\"\n assert read(\"B\") == \"1\"\n assert read(\"C\") == \"1\"\n assert read(\"a\") == \"a\"\n assert read(\"b\") == \"ab\"\n assert read(\"c\") == \"abc\"\n\n Path(\"a\").unlink()\n ppg.run()\n assert read(\"a\") == \"a\"\n assert read(\"b\") == \"ab\"\n assert read(\"c\") == \"abc\"\n assert read(\"A\") == \"2\"\n assert read(\"B\") == \"1\"\n assert read(\"C\") == \"1\"\n\n Path(\"b\").unlink()\n ppg.run()\n assert read(\"a\") == \"a\"\n assert read(\"b\") == \"ab\"\n assert read(\"c\") == \"abc\"\n assert read(\"A\") == \"2\"\n assert read(\"B\") == \"2\"\n assert read(\"C\") == \"1\"\n\n Path(\"c\").unlink()\n ppg.run()\n assert read(\"a\") == \"a\"\n assert read(\"b\") == \"ab\"\n assert read(\"c\") == \"abc\"\n assert read(\"A\") == \"2\"\n assert read(\"B\") == \"2\"\n assert read(\"C\") == \"2\"\n\n Path(\"a\").unlink()\n Path(\"b\").unlink()\n ppg.run()\n assert read(\"a\") == \"a\"\n assert read(\"b\") == \"ab\"\n assert read(\"c\") == \"abc\"\n assert read(\"A\") == \"3\"\n assert read(\"B\") == \"3\"\n assert read(\"C\") == \"2\"\n\n def test_two_inputs(self):\n jobA = ppg.FileGeneratingJob(\n \"a\", lambda of: counter(\"A\") and write(of, \"a\"), depend_on_function=False\n )\n jobB = ppg.FileGeneratingJob(\n \"b\", lambda of: counter(\"B\") and write(of, \"b\"), depend_on_function=False\n )\n jobC = ppg.FileGeneratingJob(\n \"c\",\n lambda of: counter(\"C\") and write(of, read(\"a\") + read(\"b\") + \"c\"),\n depend_on_function=False,\n )\n jobC.depends_on(jobA, jobB)\n ppg.run()\n assert read(\"A\") == \"1\"\n assert read(\"B\") == \"1\"\n assert read(\"C\") == \"1\"\n assert read(\"c\") == \"abc\"\n ppg.run()\n assert read(\"A\") == \"1\"\n assert read(\"B\") == \"1\"\n assert read(\"C\") == \"1\"\n assert read(\"c\") == \"abc\"\n Path(\"c\").unlink()\n ppg.run()\n assert read(\"A\") == \"1\"\n assert read(\"B\") == \"1\"\n assert read(\"C\") == \"2\"\n Path(\"a\").unlink()\n ppg.run()\n assert read(\"A\") == \"2\" # rebuild\n assert read(\"B\") == \"1\"\n assert read(\"C\") == \"2\" # not run, since the upstream did not invalidate.\n\n def test_data_loading_without_func_invariant(self):\n out = []\n a = ppg.DataLoadingJob(\n \"a\", lambda: counter(\"A\") and out.append(\"a\"), depend_on_function=False\n )\n b = ppg.FileGeneratingJob(\n \"b\",\n lambda of, out=out: counter(\"B\") and of.write_text(out[0] + \"b\"),\n depend_on_function=False,\n )\n b.depends_on(a)\n ppg.run()\n assert read(\"A\") == \"1\"\n assert read(\"B\") == \"1\"\n assert read(\"b\") == \"ab\"\n ppg.run()\n assert read(\"A\") == \"1\"\n assert read(\"B\") == \"1\"\n assert read(\"b\") == \"ab\"\n ppg.run()\n Path(\"b\").unlink()\n ppg.run()\n assert read(\"A\") == \"2\"\n assert read(\"B\") == \"2\"\n assert read(\"b\") == \"ab\"\n\n def test_data_loading(self):\n out = []\n a = ppg.DataLoadingJob(\n \"a\", lambda: counter(\"A\") and out.append(\"a\"), depend_on_function=False\n )\n b = ppg.FileGeneratingJob(\n \"b\",\n lambda of, out=out: counter(\"B\") and of.write_text(out[0] + \"b\"),\n depend_on_function=True,\n )\n b.depends_on(a)\n ppg.run()\n assert read(\"A\") == \"1\"\n assert read(\"B\") == \"1\"\n assert read(\"b\") == \"ab\"\n ppg.run()\n assert read(\"A\") == \"1\"\n assert read(\"B\") == \"1\"\n assert read(\"b\") == \"ab\"\n ppg.run()\n Path(\"b\").unlink()\n ppg.run()\n assert read(\"A\") == \"2\"\n assert read(\"B\") == \"2\"\n assert read(\"b\") == \"ab\"\n\n def test_two_data_loading_chain(self):\n out = []\n a = ppg.DataLoadingJob(\n \"a\", lambda: counter(\"A\") and out.append(\"a\"), depend_on_function=False\n )\n b = ppg.DataLoadingJob(\n \"b\",\n lambda: counter(\"B\") and out.append(out[0] + \"b\"),\n depend_on_function=False,\n )\n c = ppg.FileGeneratingJob('c',\n lambda of: counter('C') and of.write_text(out[1] + 'c'),\n depend_on_function=False)\n c.depends_on(b)\n b.depends_on(a)\n ppg.run()\n assert read('c') == 'abc'\n assert read('A') == '1'\n assert read('B') == '1'\n assert read('C') == '1'\n ppg.run()\n assert read('c') == 'abc'\n assert read('A') == '1'\n assert read('B') == '1'\n assert read('C') == '1'\n Path('c').unlink()\n ppg.run()\n assert read('c') == 'abc'\n assert read('A') == '2'\n assert read('B') == '2'\n assert read('C') == '2'\n", "id": "8463537", "language": "Python", "matching_score": 3.1313765048980713, "max_stars_count": 0, "path": "tests/test_bootstrap.py" }, { "content": "from .shared import counter, read\nfrom pathlib import Path\nimport pypipegraph2 as ppg\nimport pytest\n\n\[email protected](\"ppg2_per_test\")\nclass TestCallSyntax:\n def test_simple(self):\n a = ppg.FileGeneratingJob(\"A\", lambda of: counter(\"a\") and of.write_text(\"A\"))\n b = ppg.FileGeneratingJob(\"B\", lambda of: counter(\"b\") and of.write_text(\"B\"))\n assert a() == [Path(\"A\")]\n assert read(\"A\") == \"A\"\n assert not Path(\"B\").exists()\n assert read(\"a\") == \"1\"\n assert not Path(\"b\").exists()\n b()\n assert read(\"B\") == \"B\"\n assert read(\"a\") == \"1\"\n assert read(\"b\") == \"1\"\n\n def test_downstream(self):\n a = ppg.MultiFileGeneratingJob(\n {\"a\": \"A\"}, lambda of: counter(\"a\") and of[\"a\"].write_text(\"A\")\n )\n b = ppg.FileGeneratingJob(\"B\", lambda of: counter(\"b\") and of.write_text(\"B\"))\n b.depends_on(a)\n assert set(a().keys()) == set([\"a\"])\n assert read(\"A\") == \"A\"\n assert not Path(\"B\").exists()\n assert read(\"a\") == \"1\"\n assert not Path(\"b\").exists()\n b()\n assert read(\"B\") == \"B\"\n assert read(\"a\") == \"1\"\n assert read(\"b\") == \"1\"\n Path(\"A\").unlink()\n b()\n assert read(\"B\") == \"B\"\n assert read(\"a\") == \"2\"\n assert read(\"b\") == \"1\" # was shielded\n\n def test_job_is_pruned(self):\n a = ppg.FileGeneratingJob(\"A\", lambda of: counter(\"a\") and of.write_text(\"A\"))\n b = ppg.FileGeneratingJob(\"B\", lambda of: counter(\"b\") and of.write_text(\"B\"))\n a.prune()\n a()\n assert read(\"A\") == \"A\"\n assert not Path(\"B\").exists()\n assert read(\"a\") == \"1\"\n assert not Path(\"b\").exists()\n b()\n assert read(\"B\") == \"B\"\n assert read(\"a\") == \"1\"\n assert read(\"b\") == \"1\"\n\n def test_plot_job(self):\n import pandas as pd\n import plotnine\n\n def calc():\n return pd.DataFrame({\"X\": list(range(0, 100)), \"Y\": list(range(50, 150))})\n\n def plot(df):\n p = plotnine.ggplot(df)\n p = p + plotnine.geom_point(plotnine.aes(\"X\", \"Y\"))\n return p\n\n of = \"A.png\"\n p, c, t = ppg.PlotJob(of, calc, plot)\n pout = p()\n assert Path(of).exists()\n assert isinstance(pout, plotnine.ggplot)\n ppg.new()\n p, c, t = ppg.PlotJob(of, calc, plot)\n pout = p()\n assert isinstance(pout, plotnine.ggplot)\n", "id": "7679263", "language": "Python", "matching_score": 2.7361369132995605, "max_stars_count": 0, "path": "tests/test_interactive.py" }, { "content": "from pathlib import Path\nimport pytest\nimport pypipegraph2 as ppg\nfrom .shared import write, read\n\n\[email protected](\"ppg2_per_test\")\nclass TestPruning:\n def test_basic_prune(self):\n ppg.FileGeneratingJob(\"A\", lambda of: write(\"A\", \"A\"))\n b = ppg.FileGeneratingJob(\"B\", lambda of: write(\"B\", \"B\"))\n b.prune()\n ppg.run()\n assert Path(\"A\").read_text() == \"A\"\n assert not Path(\"B\").exists()\n\n def test_basic_prune2(self):\n a = ppg.FileGeneratingJob(\"A\", lambda of: write(\"A\", \"A\"))\n b = ppg.FileGeneratingJob(\"B\", lambda of: write(\"B\", \"B\"))\n b.depends_on(a)\n b.prune()\n ppg.run()\n assert Path(\"A\").read_text() == \"A\"\n assert not Path(\"B\").exists()\n\n def test_basic_prune3(self):\n a = ppg.FileGeneratingJob(\"A\", lambda of: write(\"A\", \"A\"))\n b = ppg.FileGeneratingJob(\"B\", lambda of: write(\"B\", \"B\"))\n c = ppg.FileGeneratingJob(\"C\", lambda of: write(\"C\", \"C\"))\n d = ppg.FileGeneratingJob(\"D\", lambda of: write(\"D\", \"D\"))\n b.depends_on(a)\n b.prune()\n c.depends_on(b) # that is ok, pruning happens after complet build.\n d.depends_on(a)\n ppg.run()\n assert Path(\"A\").read_text() == \"A\"\n assert Path(\"D\").read_text() == \"D\"\n assert not Path(\"B\").exists()\n assert not Path(\"C\").exists()\n assert c.prune_reason == b.job_id\n ppg.run() # just so we recurse_prune again.\n\n def test_tempfile_not_run_on_prune(self):\n a = ppg.TempFileGeneratingJob(\"A\", lambda of: write(\"A\", \"A\"))\n b = ppg.FileGeneratingJob(\"B\", lambda of: write(\"B\", \"B\" + read(\"A\")))\n b.depends_on(a)\n b.prune()\n ppg.run()\n assert not Path(\"B\").exists()\n assert not Path(\"A\").exists()\n\n def test_tempfile_still_run_if_needed_for_other(self):\n a = ppg.TempFileGeneratingJob(\"A\", lambda of: write(\"A\", \"A\"))\n b = ppg.FileGeneratingJob(\"B\", lambda of: write(\"B\", \"B\" + read(\"A\")))\n c = ppg.FileGeneratingJob(\"C\", lambda of: write(\"C\", \"C\" + read(\"A\")))\n b.depends_on(a)\n c.depends_on(a)\n b.prune()\n ppg.run()\n assert not Path(\"B\").exists()\n assert Path(\"C\").exists()\n assert Path(\"C\").read_text() == \"CA\"\n assert not Path(\"A\").exists()\n\n def test_basic_prune_unprune(self):\n ppg.FileGeneratingJob(\"A\", lambda of: write(\"A\", \"A\"))\n b = ppg.FileGeneratingJob(\"B\", lambda of: write(\"B\", \"B\"))\n b.prune()\n ppg.run()\n assert Path(\"A\").read_text() == \"A\"\n assert not Path(\"B\").exists()\n b.unprune()\n ppg.run()\n assert Path(\"A\").read_text() == \"A\"\n assert read(\"B\") == \"B\"\n", "id": "11255567", "language": "Python", "matching_score": 2.9016425609588623, "max_stars_count": 0, "path": "tests/test_prune.py" }, { "content": "from pathlib import Path\nimport time\nfrom loguru import logger\nimport pytest\nimport pypipegraph2 as ppg\nfrom pypipegraph2.runner import JobOutcome\nfrom .shared import counter, write, read\n\n\[email protected](\"ppg2_per_test\")\nclass TestPypipegraph2:\n def test_very_simple(self):\n assert not Path(\"A\").exists()\n job = ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(\"Done\"))\n ppg.run()\n assert Path(\"A\").read_text() == \"Done\"\n\n def test_very_simple_chain(self):\n assert not Path(\"A\").exists()\n assert not Path(\"B\").exists()\n jobA = ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(\"AAA\"))\n jobB = ppg.FileGeneratingJob(\n \"B\", lambda of: of.write_text(\"BBB\" + Path(\"A\").read_text())\n )\n jobB.depends_on(jobA)\n ppg.run()\n assert Path(\"A\").read_text() == \"AAA\"\n assert Path(\"B\").read_text() == \"BBBAAA\"\n\n def test_very_simple_chain_reverse(self):\n assert not Path(\"A\").exists()\n assert not Path(\"B\").exists()\n jobB = ppg.FileGeneratingJob(\n \"B\", lambda of: of.write_text(\"BBB\" + Path(\"A\").read_text())\n )\n jobA = ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(\"AAA\"))\n jobB.depends_on(jobA)\n ppg.run()\n assert Path(\"A\").read_text() == \"AAA\"\n assert Path(\"B\").read_text() == \"BBBAAA\"\n\n def test_very_simple_chain_rerun(self, job_trace_log):\n assert not Path(\"A\").exists()\n assert not Path(\"B\").exists()\n counter = 0\n jobA = ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(f\"{counter}\"))\n jobB = ppg.FileGeneratingJob(\n \"B\", lambda of: of.write_text(\"BBB\" + Path(\"A\").read_text())\n )\n jobB.depends_on(jobA)\n ppg.run()\n assert Path(\"B\").read_text() == \"BBB0\"\n ppg.run()\n assert Path(\"B\").read_text() == \"BBB0\"\n assert Path(\"A\").read_text() == \"0\"\n Path(\"A\").unlink()\n counter = 1\n ppg.run()\n assert Path(\"A\").read_text() == \"1\"\n assert Path(\"B\").read_text() == \"BBB1\"\n\n def test_isolation(self, trace_log):\n assert not Path(\"B\").exists()\n assert not Path(\"C\").exists()\n\n def b(of):\n of.write_text(\"BBB\")\n count = Path(\"counter\").read_text()\n Path(\"outcount\").write_text(count)\n\n jobB = ppg.FileGeneratingJob(\"B\", b)\n jobC = ppg.FileGeneratingJob(\n \"C\", lambda of: of.write_text(\"CCC\" + Path(\"outcount\").read_text())\n )\n jobC.depends_on(jobB)\n Path(\"counter\").write_text(\"0\")\n logger.error(\"Run 1\")\n ppg.run()\n assert Path(\"B\").read_text() == \"BBB\"\n assert Path(\"C\").read_text() == \"CCC0\"\n assert Path(\"outcount\").read_text() == \"0\"\n logger.error(\"Run 2 no rerun\")\n ppg.run()\n assert Path(\"B\").read_text() == \"BBB\"\n assert Path(\"C\").read_text() == \"CCC0\"\n assert Path(\"outcount\").read_text() == \"0\"\n\n Path(\"counter\").write_text(\"1\")\n logger.error(\"Run 3 - no rerun\")\n ppg.run() # since the counter is *not* a dependency...\n assert Path(\"B\").read_text() == \"BBB\"\n assert Path(\"C\").read_text() == \"CCC0\"\n assert Path(\"outcount\").read_text() == \"0\"\n\n Path(\"B\").unlink() # will make it rerun.\n logger.error(\"Run 4 - rerun B but not C\")\n ppg.run()\n assert Path(\"outcount\").read_text() == \"1\"\n # but C was not rerun, since the B output did not change.\n assert Path(\"C\").read_text() == \"CCC0\"\n\n def test_changing_inputs(self):\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n jobA = ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(\"A\"))\n jobB = ppg.FileGeneratingJob(\n \"B\", lambda of: of.write_text(\"B\" + Path(\"A\").read_text())\n )\n assert not Path(\"A\").exists()\n assert not Path(\"B\").exists()\n jobB.depends_on(jobA)\n logger.warning(\"first run\")\n ppg.run()\n assert Path(\"A\").read_text() == \"A\"\n assert Path(\"B\").read_text() == \"BA\"\n jobA = ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(\"c\"))\n logger.warning(\"Change run\")\n ppg.run()\n assert Path(\"A\").read_text() == \"c\"\n assert Path(\"B\").read_text() == \"Bc\"\n\n def test_changing_inputs_when_job_was_temporarily_missing(self):\n jobA = ppg.FileGeneratingJob(\n \"A\", lambda of: counter(\"a\") and of.write_text(\"AAA\")\n )\n jobB = ppg.FileGeneratingJob(\n \"B\", lambda of: of.write_text(\"BBB\" + Path(\"A\").read_text())\n )\n jobB.depends_on(jobA)\n ppg.run()\n assert Path(\"A\").read_text() == \"AAA\"\n assert Path(\"B\").read_text() == \"BBBAAA\"\n assert Path(\"a\").read_text() == \"1\"\n ppg.new()\n assert not \"A\" in ppg.global_pipegraph.jobs\n assert not \"B\" in ppg.global_pipegraph.jobs\n jobA = ppg.FileGeneratingJob(\n \"A\", lambda of: counter(\"a\") and of.write_text(\"AAAA\")\n )\n assert not \"B\" in ppg.global_pipegraph.jobs\n ppg.run()\n assert Path(\"A\").read_text() == \"AAAA\"\n assert Path(\"B\").read_text() == \"BBBAAA\" # not rerun\n assert Path(\"a\").read_text() == \"2\"\n ppg.new()\n jobA = ppg.FileGeneratingJob(\n \"A\", lambda of: counter(\"a\") and of.write_text(\"AAAA\")\n )\n jobB = ppg.FileGeneratingJob(\n \"B\", lambda of: of.write_text(\"BBB\" + Path(\"A\").read_text())\n )\n ppg.run()\n assert Path(\"a\").read_text() == \"2\"\n assert Path(\"B\").read_text() == \"BBBAAAA\" # correctly rerun\n\n def test_changing_bound_variables(self):\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n varA = \"hello\"\n jobA = ppg.FileGeneratingJob(\"A\", lambda of, varA=varA: of.write_text(varA))\n ppg.run()\n assert Path(\"A\").read_text() == \"hello\"\n\n varA = \"world\"\n jobA = ppg.FileGeneratingJob(\"A\", lambda of, varA=varA: of.write_text(varA))\n ppg.run()\n assert Path(\"A\").read_text() == \"hello\"\n\n def test_capturing_closures(self):\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n varA = [\"hello\"]\n jobA = ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(str(varA)))\n ppg.run()\n assert Path(\"A\").read_text() == str([\"hello\"])\n\n varA.append(\"world\")\n jobA = ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(str(varA)))\n ppg.run()\n assert Path(\"A\").read_text() == str([\"hello\", \"world\"])\n\n def test_failed_pruning(self):\n def a(of):\n raise ValueError()\n\n jobA = ppg.FileGeneratingJob(\"A\", a)\n jobB = ppg.FileGeneratingJob(\"B\", lambda of: of.write_text(\"B\"))\n jobC = ppg.FileGeneratingJob(\"C\", lambda of: of.write_text(\"C\"))\n jobB.depends_on(jobA)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert Path(\"C\").read_text() == \"C\"\n last = ppg.global_pipegraph.last_run_result\n print(last.keys())\n assert last[\"A\"].outcome == JobOutcome.Failed\n assert last[\"B\"].outcome == JobOutcome.UpstreamFailed\n assert last[\"C\"].outcome == JobOutcome.Success\n assert \"ValueError\" in str(last[\"A\"].error)\n\n def test_multi_file_generating_job(self):\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n\n assert counter(\"X\") == \"0\"\n # make sure the counter function does what it's supposed to\n assert counter(\"X\") == \"1\"\n\n def a(files):\n files[0].write_text(\"A1\")\n files[1].write_text(\"A2\")\n\n jobA = ppg.MultiFileGeneratingJob([\"A1\", \"A2\"], a)\n jobB = ppg.FileGeneratingJob(\n \"B\", lambda of: of.write_text(f\"B{counter('cB')}\"), depend_on_function=False\n )\n jobC = ppg.FileGeneratingJob(\n \"C\", lambda of: of.write_text(f\"C{counter('cC')}\"), depend_on_function=False\n )\n jobD = ppg.FileGeneratingJob(\n \"D\", lambda of: of.write_text(f\"D{counter('cD')}\"), depend_on_function=False\n )\n jobB.depends_on(\"A2\") # todo output must exist!\n jobC.depends_on(jobA)\n ppg.run()\n assert Path(\"A1\").read_text() == \"A1\"\n assert Path(\"A2\").read_text() == \"A2\"\n assert Path(\"B\").read_text() == \"B0\"\n assert Path(\"C\").read_text() == \"C0\"\n assert Path(\"D\").read_text() == \"D0\"\n\n logger.error(\"2nd no op run\")\n ppg.run()\n assert Path(\"A1\").read_text() == \"A1\"\n assert Path(\"A2\").read_text() == \"A2\"\n assert Path(\"B\").read_text() == \"B0\"\n assert Path(\"C\").read_text() == \"C0\"\n assert Path(\"D\").read_text() == \"D0\"\n\n def a(files):\n files[0].write_text(\"A1a\")\n files[1].write_text(\"A2\")\n\n jobA = ppg.MultiFileGeneratingJob([\"A1\", \"A2\"], a)\n logger.error(\"3rd run - run a, run c\")\n ppg.run()\n assert Path(\"A1\").read_text() == \"A1a\"\n assert Path(\"A2\").read_text() == \"A2\"\n assert Path(\"B\").read_text() == \"B0\" # does not get rewritten. It depends on A2\n assert (\n Path(\"C\").read_text() == \"C1\"\n ) # c get's rewritten, it depended on all of A\n assert Path(\"D\").read_text() == \"D0\"\n\n def test_tempfile(self, job_trace_log):\n jobA = ppg.TempFileGeneratingJob(\n \"TA\",\n lambda of: of.write_text(\"A\" + counter(\"a\")),\n depend_on_function=False,\n )\n jobB = ppg.FileGeneratingJob(\n \"B\",\n lambda of: of.write_text(\"B\" + counter(\"c\") + Path(\"TA\").read_text()),\n depend_on_function=False,\n )\n jobB.depends_on(jobA)\n ppg.run()\n assert not Path(\"TA\").exists()\n assert Path(\"B\").exists()\n assert Path(\"B\").read_text() == \"B0A0\"\n logger.error(\"Second run - no rerun\")\n ppg.run()\n assert not Path(\"TA\").exists()\n assert Path(\"B\").exists()\n assert Path(\"B\").read_text() == \"B0A0\"\n\n Path(\"B\").unlink()\n logger.error(\"Third run - B output missing\")\n ppg.run()\n assert not Path(\"TA\").exists()\n assert Path(\"B\").exists()\n assert Path(\"B\").read_text() == \"B1A1\"\n\n def test_tempfile_chained_invalidate_leaf(self):\n ppg.new(cores=1, log_level=6)\n jobA = ppg.TempFileGeneratingJob(\n \"TA\", lambda of: of.write_text(\"A\" + counter(\"a\")), depend_on_function=False\n )\n jobB = ppg.TempFileGeneratingJob(\n \"TB\",\n lambda of: of.write_text(\"B\" + counter(\"b\") + Path(\"TA\").read_text()),\n depend_on_function=False,\n )\n jobC = ppg.FileGeneratingJob(\n \"C\",\n lambda of: of.write_text(\"C\" + counter(\"c\") + Path(\"TB\").read_text()),\n depend_on_function=False,\n )\n jobC.depends_on(jobB)\n jobB.depends_on(jobA)\n ppg.util.log_error(\"First run\")\n ppg.run()\n assert not Path(\"TA\").exists()\n assert not Path(\"TB\").exists()\n assert Path(\"C\").read_text() == \"C0B0A0\"\n ppg.util.log_error(\"Second No op run.\")\n ppg.run()\n assert Path(\"C\").read_text() == \"C0B0A0\"\n assert not Path(\"TA\").exists()\n assert not Path(\"TB\").exists()\n\n jobC.depends_on(ppg.FunctionInvariant(lambda: 53, \"lambda_52\"))\n ppg.util.log_error(\"Third run - rerun because of FI\")\n ppg.run()\n assert Path(\"C\").read_text() == \"C1B1A1\"\n assert not Path(\"TA\").exists()\n assert not Path(\"TB\").exists()\n\n def test_tempfile_chained_invalidate_intermediate(self, trace_log):\n jobA = ppg.TempFileGeneratingJob(\n \"TA\", lambda of: of.write_text(\"A\" + counter(\"a\")), depend_on_function=False\n )\n jobB = ppg.TempFileGeneratingJob(\n \"TB\",\n lambda of: of.write_text(\"B\" + counter(\"b\") + Path(\"TA\").read_text()),\n depend_on_function=False,\n )\n jobC = ppg.FileGeneratingJob(\n \"C\",\n lambda of: of.write_text(\"C\" + counter(\"c\") + Path(\"TB\").read_text()),\n depend_on_function=False,\n )\n jobC.depends_on(jobB)\n jobB.depends_on(jobA)\n logger.error(\"First run\")\n ppg.run()\n assert not Path(\"TA\").exists()\n assert not Path(\"TB\").exists()\n assert Path(\"C\").read_text() == \"C0B0A0\"\n logger.error(\"Second No op run.\")\n ppg.run()\n assert Path(\"C\").read_text() == \"C0B0A0\"\n assert not Path(\"TA\").exists()\n assert not Path(\"TB\").exists()\n\n jobB.depends_on(ppg.FunctionInvariant(lambda: 53, \"lambda_52\"))\n logger.error(\"Third run - rerun because of FI\")\n ppg.run()\n assert Path(\"C\").read_text() == \"C1B1A1\"\n assert not Path(\"TA\").exists()\n assert not Path(\"TB\").exists()\n\n def test_just_a_tempfile(self, trace_log):\n jobA = ppg.TempFileGeneratingJob(\n \"TA\", lambda of: of.write_text(\"A\" + counter(\"a\")), depend_on_function=False\n )\n ppg.run()\n assert not Path(\"TA\").exists()\n assert not Path(\"a\").exists()\n\n def test_just_chained_tempfile(self, trace_log):\n jobA = ppg.TempFileGeneratingJob(\n \"TA\", lambda of: of.write_text(\"A\" + counter(\"a\"))\n )\n jobB = ppg.TempFileGeneratingJob(\n \"B\", lambda of: of.write_text(\"B\" + counter(\"b\") + Path(\"TA\").read_text())\n )\n jobB.depends_on(jobA)\n ppg.run()\n assert not Path(\"TA\").exists()\n assert not Path(\n \"a\"\n ).exists() # changed with the smarter hull stuff - they don't run for sideeffects-and-giggles\n assert not Path(\"B\").exists()\n assert not Path(\"b\").exists()\n\n def test_just_chained_tempfile_no_invariant(self, trace_log):\n jobA = ppg.TempFileGeneratingJob(\n \"TA\", lambda of: of.write_text(\"A\" + counter(\"a\")), depend_on_function=False\n )\n ppg.run()\n assert not Path(\"TA\").exists()\n assert not Path(\"a\").exists()\n\n def test_just_chained_tempfile3(self, trace_log):\n jobA = ppg.TempFileGeneratingJob(\n \"A\", lambda of: of.write_text(\"A\" + counter(\"a\"))\n )\n jobB = ppg.TempFileGeneratingJob(\n \"B\", lambda of: of.write_text(\"B\" + counter(\"b\") + Path(\"A\").read_text())\n )\n jobC = ppg.TempFileGeneratingJob(\n \"C\", lambda of: of.write_text(\"C\" + counter(\"c\") + Path(\"B\").read_text())\n )\n jobB.depends_on(jobA)\n jobC.depends_on(jobB)\n\n ppg.run()\n assert not Path(\"A\").exists()\n assert not Path(\"a\").exists()\n assert not Path(\"B\").exists()\n assert not Path(\"b\").exists()\n assert not Path(\"C\").exists()\n assert not Path(\"c\").exists()\n\n def test_tempfile_triggered_by_invalidating_final_job(self, trace_log):\n jobA = ppg.TempFileGeneratingJob(\n \"TA\",\n lambda of: of.write_text(\"A\" + counter(\"a\")),\n depend_on_function=False,\n )\n jobB = ppg.TempFileGeneratingJob(\n \"TB\",\n lambda of: of.write_text(\"B\" + counter(\"b\") + Path(\"TA\").read_text()),\n depend_on_function=False,\n )\n jobC = ppg.FileGeneratingJob(\n \"C\",\n lambda of: of.write_text(\"C\" + counter(\"c\") + Path(\"TB\").read_text()),\n depend_on_function=False,\n )\n jobC.depends_on(jobB)\n jobB.depends_on(jobA)\n ppg.run()\n assert Path(\"C\").read_text() == \"C0B0A0\"\n jobC.depends_on(ppg.FunctionInvariant(lambda: 52, \"lambda_52\"))\n ppg.run()\n assert Path(\"C\").read_text() == \"C1B1A1\"\n\n def test_tempfile_triggered_by_invalidating_tempfile(self, trace_log):\n jobA = ppg.TempFileGeneratingJob(\n \"A\",\n lambda of: of.write_text(\"A\" + counter(\"a\")),\n depend_on_function=False,\n )\n jobB = ppg.TempFileGeneratingJob(\n \"B\",\n lambda of: of.write_text(\"B\" + counter(\"b\") + Path(\"A\").read_text()),\n depend_on_function=False,\n )\n jobC = ppg.FileGeneratingJob(\n \"C\",\n lambda of: of.write_text(\"C\" + counter(\"c\") + Path(\"B\").read_text()),\n depend_on_function=False,\n )\n jobC.depends_on(jobB)\n jobB.depends_on(jobA)\n ppg.run()\n assert Path(\"C\").read_text() == \"C0B0A0\"\n jobB.depends_on(ppg.FunctionInvariant(lambda: 52, \"lambda_52\"))\n ppg.run()\n assert Path(\"C\").read_text() == \"C1B1A1\"\n\n def test_last_invalidated_tempfile_isolation(self, trace_log):\n jobA = ppg.TempFileGeneratingJob(\n \"A\",\n lambda of: of.write_text(\"A\" + counter(\"a\")),\n depend_on_function=False,\n )\n jobB = ppg.TempFileGeneratingJob(\n \"B\",\n lambda of: counter(\"b\") and of.write_text(\"B\"),\n depend_on_function=False,\n )\n jobC = ppg.FileGeneratingJob(\n \"C\",\n lambda of: of.write_text(\"C\" + counter(\"c\") + Path(\"B\").read_text()),\n depend_on_function=False,\n )\n jobC.depends_on(jobB)\n jobB.depends_on(jobA)\n ppg.run()\n assert Path(\"C\").read_text() == \"C0B\"\n assert Path(\"a\").read_text() == \"1\"\n jobB.depends_on(ppg.FunctionInvariant(lambda: 52, \"lambda_52\"))\n ppg.run()\n assert Path(\"C\").read_text() == \"C0B\"\n assert Path(\"a\").read_text() == \"2\"\n\n def test_depending_on_two_temp_jobs_but_only_one_invalidated(self):\n jobA = ppg.TempFileGeneratingJob(\n \"A\",\n lambda of: of.write_text(\"A\" + counter(\"a\")),\n depend_on_function=False,\n )\n jobB = ppg.TempFileGeneratingJob(\n \"B\",\n lambda of: counter(\"b\") and of.write_text(\"B\"),\n depend_on_function=False,\n )\n jobC = ppg.FileGeneratingJob(\n \"C\",\n lambda of: of.write_text(\n \"C\" + counter(\"c\") + Path(\"B\").read_text() + Path(\"A\").read_text()\n ),\n depend_on_function=False,\n )\n jobC.depends_on(jobB)\n jobC.depends_on(jobA)\n ppg.run()\n assert Path(\"C\").read_text() == \"C0BA0\"\n assert Path(\"a\").read_text() == \"1\"\n\n jobB = ppg.TempFileGeneratingJob(\n \"B\",\n lambda of: counter(\"b\") and of.write_text(\"BB\"),\n depend_on_function=False,\n ) # not changing the function does not trigger a change\n\n ppg.run()\n assert Path(\"C\").read_text() == \"C0BA0\"\n assert Path(\"a\").read_text() == \"1\"\n\n jobB = ppg.TempFileGeneratingJob(\n \"B\",\n lambda of: counter(\"b\") and of.write_text(\"BB\"),\n depend_on_function=True,\n ) # but if you have a function invariant!\n ppg.run()\n\n assert Path(\"C\").read_text() == \"C1BBA1\"\n assert Path(\"a\").read_text() == \"2\"\n\n def test_tempjob_serving_two(self):\n jobA = ppg.TempFileGeneratingJob(\n \"TA\",\n lambda of: of.write_text(\"TA\" + counter(\"a\")),\n depend_on_function=False,\n )\n jobB = ppg.FileGeneratingJob(\n \"B\",\n lambda of: counter(\"b\") and of.write_text(\"B\" + Path(\"TA\").read_text()),\n depend_on_function=False,\n )\n jobC = ppg.FileGeneratingJob(\n \"C\",\n lambda of: of.write_text(\"C\" + counter(\"c\") + Path(\"TA\").read_text()),\n depend_on_function=False,\n )\n jobB.depends_on(jobA)\n jobC.depends_on(jobA)\n ppg.run()\n assert Path(\"B\").read_text() == \"BTA0\"\n assert Path(\"C\").read_text() == \"C0TA0\"\n assert Path(\"a\").read_text() == \"1\"\n ppg.run()\n assert Path(\"B\").read_text() == \"BTA0\"\n assert Path(\"C\").read_text() == \"C0TA0\"\n assert Path(\"a\").read_text() == \"1\"\n Path(\"B\").unlink()\n ppg.run()\n assert Path(\"B\").read_text() == \"BTA1\"\n assert Path(\"C\").read_text() == \"C1TA1\" # TA1 invalidates C when it runs.\n assert Path(\"a\").read_text() == \"2\"\n ppg.run()\n assert Path(\"B\").read_text() == \"BTA1\"\n assert Path(\"C\").read_text() == \"C1TA1\"\n assert Path(\"a\").read_text() == \"2\"\n Path(\"B\").unlink()\n Path(\"C\").unlink()\n ppg.run()\n assert Path(\"B\").read_text() == \"BTA2\"\n assert Path(\"C\").read_text() == \"C2TA2\"\n assert Path(\"a\").read_text() == \"3\"\n\n def test_two_temp_jobs(self, trace_log):\n \"\"\"test_two_temp_jobs\n This tests one of the 'unnecessary' temp job reruns.\n We have these jobs\n Fi:TA -> TA -> C\n ^\n Fi:TB -> TB -> D\n\n (todo: this full argument might have been invalidated\n by the non-transitive-hull changes?)\n\n which means, after the graph rewriting,\n TA and TB depend on each other's FunctionInvariants\n (TempJobs steal the invariants from their downstreams,\n so that whenever the downstream is triggered,\n they are as well, and before hand.)\n\n If now Fi:TB triggers, we must recalculate TB,\n and we also recalculate TA.\n But if TB does not not lead to C and D's invalidation,\n we have recalculated TA unnecessarily.\n\n But I can't figure out a better way to do it.\n Handling TempJobs by anything other than graph rewriting has\n proven to be an absolute mess of a conditional event loop that\n I'm not capable of cutting through.\n\n The graph rewriting is elegant and makes the do-the-jobs event loop\n almost trivial. It fails on this particular task though.\n Not that it is given that a back-and-forth graph walking approach\n (ie. when C is triggered, go back and (re)do TA) would be able to\n actually avoid the issue.\n \"\"\"\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n\n jobA = ppg.TempFileGeneratingJob(\n \"TA\", lambda of: counter(\"a\") and of.write_text(\"A\")\n )\n jobB = ppg.TempFileGeneratingJob(\n \"TB\", lambda of: counter(\"b\") and of.write_text(\"B\")\n )\n jobC = ppg.FileGeneratingJob(\n \"C\",\n lambda of: counter(\"c\")\n and of.write_text(\"C\" + Path(\"TA\").read_text() + Path(\"TB\").read_text()),\n )\n jobD = ppg.FileGeneratingJob(\n \"D\", lambda of: counter(\"d\") and of.write_text(\"D\" + Path(\"TB\").read_text())\n )\n jobC.depends_on(jobA, jobB)\n jobD.depends_on(jobB)\n ppg.run()\n assert Path(\"D\").read_text() == \"DB\"\n assert Path(\"C\").read_text() == \"CAB\"\n assert Path(\"a\").read_text() == \"1\"\n assert Path(\"b\").read_text() == \"1\"\n ppg.run()\n assert Path(\"a\").read_text() == \"1\"\n assert Path(\"b\").read_text() == \"1\"\n assert Path(\"c\").read_text() == \"1\"\n assert Path(\"d\").read_text() == \"1\"\n\n # now trigger TB invalidation, but not C (or D) invalidation\n logger.info(\"now change FunctionInvariant:TB\")\n jobB = ppg.TempFileGeneratingJob(\n \"TB\", lambda of: counter(\"b\") and True and of.write_text(\"B\")\n )\n ppg.run()\n assert Path(\"b\").read_text() == \"2\" # we trigger that one\n assert (\n Path(\"a\").read_text() == \"1\"\n ) # the FunctionInvariant:TB was pulled into TA's upstream by the rewrite\n assert Path(\"c\").read_text() == \"1\" # but this one was isolated\n assert Path(\"d\").read_text() == \"1\" # as was this one was isolated\n\n def test_cycles(self):\n jobA = ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(\"Done\"))\n jobB = ppg.FileGeneratingJob(\"B\", lambda of: of.write_text(\"Done\"))\n jobC = ppg.FileGeneratingJob(\"C\", lambda of: of.write_text(\"Done\"))\n jobA.depends_on(jobB)\n with pytest.raises(ppg.exceptions.NotADag):\n jobB.depends_on(jobA) # simple one-step cycles: early !\n jobB.depends_on(jobC.depends_on(jobA))\n # bigger cycles: later\n with pytest.raises(ppg.exceptions.NotADag):\n ppg.run()\n\n def test_jobs_run_in_different_pids(self):\n import os\n\n pid_here = os.getpid()\n a = ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(str(os.getpid())))\n b = ppg.FileGeneratingJob(\"B\", lambda of: of.write_text(str(os.getpid())))\n ppg.run()\n pid_a = Path(\"A\").read_text()\n pid_b = Path(\"B\").read_text()\n assert pid_a != pid_b\n assert pid_a != pid_here\n\n def test_temp_jobs_run_in_different_pids(self):\n import os\n\n pid_here = os.getpid()\n a = ppg.TempFileGeneratingJob(\n \"A\", lambda of: counter(\"A\") and Path(\"a\").write_text(str(os.getpid()))\n )\n b = ppg.TempFileGeneratingJob(\n \"B\", lambda of: counter(\"B\") and Path(\"b\").write_text(str(os.getpid()))\n )\n c = ppg.FileGeneratingJob(\"C\", lambda of: counter(\"C\"))\n c.depends_on(a, b)\n ppg.run()\n pid_a = Path(\"a\").read_text()\n pid_b = Path(\"b\").read_text()\n assert pid_a != pid_b\n assert pid_a != pid_here\n\n def test_temp_job_not_writing_its_file(self):\n import os\n\n pid_here = os.getpid()\n a = ppg.TempFileGeneratingJob(\n \"A\", lambda of: counter(\"A\") and Path(\"a\").write_text(str(os.getpid()))\n )\n b = ppg.TempFileGeneratingJob(\n \"B\", lambda of: counter(\"b\") and Path(\"b\").write_text(str(os.getpid()))\n ) # yes, it's planned that it doesn't write B, this exposed a bug\n c = ppg.FileGeneratingJob(\"C\", lambda of: counter(\"C\"))\n c.depends_on(a, b)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n last = ppg.global_pipegraph.last_run_result\n assert last[\"A\"].outcome == JobOutcome.Success\n assert last[\"B\"].outcome == JobOutcome.Failed\n assert last[\"C\"].outcome == JobOutcome.UpstreamFailed\n assert isinstance(last[\"B\"].error.args[0], ppg.JobContractError)\n\n def test_file_gen_when_file_existed_outside_of_graph_depending_on_cached_data_load(\n self,\n ):\n # this exposed a bug when the file was existing\n # the graph would never return.\n o = []\n\n def load(x):\n o.append(x)\n\n load_job, cache_job = ppg.CachedDataLoadingJob(\n \"b\", lambda: \"52\", load, depend_on_function=False\n )\n a = ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(\"a\" + o[0]))\n a.depends_on(load_job)\n Path(\"b\").write_text(\"b\")\n ppg.run()\n assert Path(\"A\").read_text() == \"a52\"\n\n def test_event_timeout_handling(self):\n def doit(of):\n import time\n\n time.sleep(2)\n of.write_text(\"a\")\n\n job = ppg.FileGeneratingJob(\"a\", doit)\n ppg.run(event_timeout=0.1)\n assert Path(\"a\").exists()\n\n def test_catching_catastrophic_execution_message_passing_failures(\n self):\n import pickle\n\n class BadFileGeneratingJob(ppg.FileGeneratingJob):\n \"\"\"A file generating job that does not output_needed()= True\n if it has no history\n \"\"\"\n\n def output_needed(self, runner):\n for fn in self.files:\n if not fn.exists():\n return True\n # other wise we have no history, and the skipping will\n # break the graph execution\n # if str(fn) not in runner.job_outcome[self.job_id].historical_output:\n # return True\n return False\n\n # this exposed a bug when the file was existing\n # the graph would never return.\n o = []\n\n def load(x):\n o.append(x)\n\n old_fg = ppg.FileGeneratingJob\n try:\n ppg.jobs.FileGeneratingJob = BadFileGeneratingJob\n\n load_job, cache_job = ppg.CachedDataLoadingJob(\n \"b\", lambda: \"52\", load, depend_on_function=False\n )\n a = ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(\"a\" + o[0]))\n a.depends_on(load_job)\n load_job2, cache_job2 = ppg.CachedDataLoadingJob(\n \"c\", lambda: \"52\", load, depend_on_function=False\n )\n d = ppg.FileGeneratingJob('D', lambda of: of.write_text('d' + o[-1]))\n d.depends_on(load_job2)\n Path('c').write_text('c')\n # write something sensible\n with open(\"b\", \"wb\") as op:\n pickle.dump('153', op)\n # now this does not get rewritten\n # because of the BadFileGeneratingJob\n assert type(cache_job) == BadFileGeneratingJob\n # with the new execution engine (JobOutcome based)\n # this is no longer an issue\n # at worst, you'll get a pickle failed error if the job dies\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert read('A') == 'a153'\n assert 'UnpicklingError' in str(load_job2.exception)\n finally:\n ppg.jobs.FileGeneratingJob = old_fg\n\n def test_changing_mtime_triggers_recalc_of_hash(self):\n import datetime\n import time\n import os\n\n write(\"A\", \"hello\")\n fi = ppg.FileInvariant(\"A\")\n of = ppg.FileGeneratingJob(\"B\", lambda of: write(of, read(\"A\")))\n of.depends_on(fi)\n info = ppg.run()\n\n assert fi.did_hash_last_run\n del fi.did_hash_last_run # so we detect if it's not run() at all\n ppg.run()\n assert fi.did_hash_last_run is False\n date = datetime.datetime(\n year=2020, month=12, day=12, hour=12, minute=12, second=12\n )\n modTime = time.mktime(date.timetuple())\n os.utime(\"A\", (modTime, modTime))\n info = ppg.run()\n assert fi.did_hash_last_run\n # and for good measure, check that B wasn't run\n assert info[\"B\"].outcome is JobOutcome.Skipped\n assert read(\"B\") == \"hello\"\n\n def test_same_mtime_same_size_leads_to_false_negative(self):\n import datetime\n import time\n import os\n\n write(\"A\", \"hello\")\n date = datetime.datetime(\n year=2020, month=12, day=12, hour=12, minute=12, second=12\n )\n modTime = time.mktime(date.timetuple())\n os.utime(\"A\", (modTime, modTime))\n\n fi = ppg.FileInvariant(\"A\")\n of = ppg.FileGeneratingJob(\"B\", lambda of: write(of, read(\"A\")))\n of.depends_on(fi)\n info = ppg.run()\n assert read(\"B\") == \"hello\"\n write(\"A\", \"world\")\n os.utime(\"A\", (modTime, modTime)) # evily\n ppg.run()\n assert not fi.did_hash_last_run\n assert read(\"B\") == \"hello\"\n ppg.run()\n assert not fi.did_hash_last_run\n assert read(\"B\") == \"hello\"\n write(\"A\", \"world\")\n ppg.run()\n assert fi.did_hash_last_run\n assert read(\"B\") == \"world\"\n\n def test_file_invariant(self):\n Path(\"A\").write_text(\"A\")\n jobA = ppg.FileInvariant(\"A\")\n jobB = ppg.FileGeneratingJob(\n \"B\", lambda of: counter(\"b\") and of.write_text(Path(\"A\").read_text())\n )\n jobB.depends_on(jobA)\n ppg.run()\n assert Path(\"B\").read_text() == \"A\"\n assert Path(\"b\").read_text() == \"1\"\n ppg.run()\n assert Path(\"b\").read_text() == \"1\"\n Path(\"A\").write_text(\"AA\")\n ppg.run()\n assert Path(\"b\").read_text() == \"2\"\n assert Path(\"B\").read_text() == \"AA\"\n\n def test_adding_and_removing_variants(self):\n Path(\"A\").write_text(\"A\")\n jobA = ppg.FileInvariant(\"A\")\n jobB = ppg.FileGeneratingJob(\n \"B\", lambda of: counter(\"b\") and of.write_text(Path(\"A\").read_text())\n )\n jobB.depends_on(jobA)\n ppg.run()\n assert Path(\"B\").read_text() == \"A\"\n assert Path(\"b\").read_text() == \"1\"\n ppg.run()\n assert Path(\"b\").read_text() == \"1\"\n Path(\"C\").write_text(\"C\")\n jobC = ppg.FileInvariant(\"C\")\n jobB.depends_on(jobC)\n ppg.run()\n assert Path(\"b\").read_text() == \"2\"\n ppg.new()\n jobA.readd()\n jobB.readd()\n jobC.readd()\n jobB.depends_on(jobA, jobC)\n ppg.run()\n assert Path(\"b\").read_text() == \"2\"\n ppg.new()\n jobA.readd()\n jobB.readd()\n jobC.readd()\n jobB.depends_on(jobA)\n ppg.run()\n assert Path(\"b\").read_text() == \"3\" # hey, we lost one!\n\n def test_function_invariant_binding_parameter(self):\n params = [\"a\"]\n jobB = ppg.FileGeneratingJob(\n \"B\", lambda of: counter(\"b\") and of.write_text(params[0])\n )\n ppg.run()\n assert Path(\"B\").read_text() == \"a\"\n assert Path(\"b\").read_text() == \"1\"\n ppg.run()\n assert Path(\"B\").read_text() == \"a\"\n assert Path(\"b\").read_text() == \"1\"\n\n params[0] = \"b\"\n ppg.run()\n assert Path(\"B\").read_text() == \"b\"\n assert Path(\"b\").read_text() == \"2\"\n ppg.run()\n assert Path(\"B\").read_text() == \"b\"\n assert Path(\"b\").read_text() == \"2\"\n\n def test_parameter_invariant(self):\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n params = [\"a\"]\n jobA = ppg.ParameterInvariant(\"A\", params)\n\n def shu(): # so the functionInvariant does not bind params itself!\n return params[0]\n\n jobB = ppg.FileGeneratingJob(\n \"B\", lambda of: counter(\"b\") and of.write_text(shu())\n )\n jobB.depends_on(jobA)\n ppg.run()\n\n assert Path(\"B\").read_text() == \"a\"\n assert Path(\"b\").read_text() == \"1\"\n ppg.run()\n assert Path(\"B\").read_text() == \"a\"\n assert Path(\"b\").read_text() == \"1\"\n\n params[0] = \"b\"\n jobA = ppg.ParameterInvariant(\n \"A\", params\n ) # the parameters get frozen when the job is defined!\n ppg.run()\n assert Path(\"B\").read_text() == \"b\"\n assert Path(\"b\").read_text() == \"2\"\n ppg.run()\n assert Path(\"B\").read_text() == \"b\"\n assert Path(\"b\").read_text() == \"2\"\n\n @pytest.mark.skip # no longer relevant onnce we switched to deephash\n def test_parameter_invariant_needs_hash(self, create_out_dir):\n class NoHash:\n def __hash__(self):\n raise TypeError(\"can't hash this\")\n\n with pytest.raises(TypeError):\n ppg.ParameterInvariant(\"C\", (NoHash(),))\n\n def test_data_loading_job(self):\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n self.store = [] # use attribute to avoid closure binding\n try:\n jobA = ppg.DataLoadingJob(\"A\", lambda: self.store.append(\"A\"))\n jobB = ppg.FileGeneratingJob(\n \"B\", lambda of: counter(\"b\") and of.write_text(self.store[0])\n )\n jobB.depends_on(jobA)\n assert len(self.store) == 0\n ppg.run()\n assert len(self.store) == 1\n assert Path(\"B\").read_text() == \"A\"\n assert Path(\"b\").read_text() == \"1\"\n ppg.run()\n assert len(self.store) == 1\n assert Path(\"b\").read_text() == \"1\"\n jobB.depends_on(ppg.ParameterInvariant(\"C\", \"C\"))\n self.store.clear() # so we can be sure the DataLoadingJob ran agin.\n ppg.run()\n assert len(self.store) == 1\n assert Path(\"b\").read_text() == \"2\"\n assert Path(\"B\").read_text() == \"A\"\n ppg.run()\n\n assert len(self.store) == 1\n assert Path(\"b\").read_text() == \"2\"\n assert Path(\"B\").read_text() == \"A\"\n self.store.clear()\n jobA = ppg.DataLoadingJob(\"A\", lambda: self.store.append(\"B\"))\n ppg.util.log_error(\"final run\")\n ppg.run()\n assert len(self.store) == 1\n assert Path(\"b\").read_text() == \"3\"\n assert Path(\"B\").read_text() == \"B\"\n\n finally:\n del self.store\n\n def test_attribute_loading_job(self):\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n\n class TestRecv:\n def __init__(self):\n self.job = ppg.AttributeLoadingJob(\n \"A\", self, \"a_\", lambda: counter(\"a\") and \"A\"\n )\n\n a = TestRecv()\n jobB = ppg.FileGeneratingJob(\n \"B\", lambda of: counter(\"b\") and of.write_text(a.a_)\n )\n jobB.depends_on(a.job)\n ppg.run()\n assert not hasattr(a, \"a_\")\n assert Path(\"B\").read_text() == \"A\"\n assert Path(\"b\").read_text() == \"1\"\n assert Path(\"a\").read_text() == \"1\"\n ppg.run()\n assert not hasattr(a, \"a_\")\n assert Path(\"B\").read_text() == \"A\"\n assert Path(\"b\").read_text() == \"1\"\n assert Path(\"a\").read_text() == \"1\"\n\n a.job = ppg.AttributeLoadingJob(\"A\", a, \"a_\", lambda: counter(\"a\") and \"B\")\n ppg.run()\n assert Path(\"B\").read_text() == \"B\"\n assert Path(\"b\").read_text() == \"2\"\n assert Path(\"a\").read_text() == \"2\"\n assert not hasattr(a, \"a_\")\n\n def test_cached_attribute_loading_job(self):\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n\n class TestRecv:\n def __init__(self):\n self.job = ppg.CachedAttributeLoadingJob(\n \"A\", self, \"a_\", lambda: counter(\"a\") and \"A\"\n )\n\n a = TestRecv()\n jobB = ppg.FileGeneratingJob(\n \"B\", lambda of: counter(\"b\") and of.write_text(a.a_)\n )\n jobB.depends_on(a.job[0])\n assert not Path(\"A\").exists()\n ppg.run()\n assert not hasattr(a, \"a_\")\n assert Path(\"B\").read_text() == \"A\"\n assert Path(\"b\").read_text() == \"1\"\n assert Path(\"a\").read_text() == \"1\"\n assert Path(\"A\").exists()\n ppg.run()\n assert not hasattr(a, \"a_\")\n assert Path(\"B\").read_text() == \"A\"\n assert Path(\"b\").read_text() == \"1\"\n assert Path(\"a\").read_text() == \"1\"\n assert Path(\"A\").exists()\n\n a.job = ppg.CachedAttributeLoadingJob(\n \"A\", a, \"a_\", lambda: counter(\"a\") and \"B\"\n )\n logger.info(\"Run leads to recalc of A, B\")\n ppg.run()\n assert Path(\"B\").read_text() == \"B\"\n assert Path(\"b\").read_text() == \"2\"\n assert Path(\"a\").read_text() == \"2\"\n assert not hasattr(a, \"a_\")\n assert Path(\"A\").exists()\n\n def test_job_generating(self):\n def inner(): # don't keep it inside, or the FunctionInvariant will trigger each time.\n counter(\"a\")\n b = ppg.FileGeneratingJob(\n \"B\", lambda of: counter(\"b\") and of.write_text(\"B\")\n )\n c = ppg.FileGeneratingJob(\n \"C\", lambda of: counter(\"c\") and of.write_text(\"C\" + read(\"B\"))\n )\n c.depends_on(b)\n\n def gen():\n return ppg.JobGeneratingJob(\"A\", inner)\n\n gen()\n ppg.run()\n assert ppg.global_pipegraph.has_edge(\"B\", \"C\")\n assert Path(\"a\").read_text() == \"1\"\n assert Path(\"b\").read_text() == \"1\"\n assert Path(\"c\").read_text() == \"1\"\n assert Path(\"B\").read_text() == \"B\"\n assert Path(\"C\").read_text() == \"CB\"\n\n # no rerun\n ppg.new()\n gen()\n ppg.run()\n assert Path(\"a\").read_text() == \"2\"\n assert Path(\"b\").read_text() == \"1\"\n assert Path(\"c\").read_text() == \"1\"\n assert Path(\"B\").read_text() == \"B\"\n assert Path(\"C\").read_text() == \"CB\"\n ppg.new()\n jobA = gen()\n jobA.depends_on(ppg.ParameterInvariant(\"PA\", \"a\"))\n ppg.run()\n assert Path(\"a\").read_text() == \"3\"\n assert Path(\"b\").read_text() == \"1\" # this does not mean that B get's rerun.\n assert Path(\"c\").read_text() == \"1\"\n assert Path(\"B\").read_text() == \"B\"\n assert Path(\"C\").read_text() == \"CB\"\n\n ppg.new()\n Path(\"B\").unlink()\n gen()\n jobA.depends_on(ppg.ParameterInvariant(\"PA\", \"a\"))\n ppg.run()\n assert Path(\"a\").read_text() == \"4\" # a runs once per ppg.run()\n assert Path(\"b\").read_text() == \"2\" # must rerun b, since file B is missing\n assert Path(\"c\").read_text() == \"1\" # but this one is insulated\n assert Path(\"B\").exists()\n assert Path(\"C\").read_text() == \"CB\"\n\n ppg.new()\n gen()\n ppg.run() # missing ParameterInvariant triggers A to run\n assert (\n Path(\"a\").read_text() == \"5\"\n ) # still no rerun - input to A didn't change!\n assert Path(\"b\").read_text() == \"2\" # this does not mean that B get's rerun.\n assert Path(\"c\").read_text() == \"1\" # this does not mean that B get's rerun.\n assert Path(\"B\").read_text() == \"B\"\n assert Path(\"C\").read_text() == \"CB\"\n\n def test_job_generating_generated_fails_rerun(self):\n local_counter = [0]\n\n def inner():\n counter(\"a\")\n\n def fg(of):\n if counter(\"b\") in (\"0\", \"1\"):\n raise ValueError()\n of.write_text(\"B\")\n\n ppg.FileGeneratingJob(\"B\", fg)\n\n def gen():\n ppg.JobGeneratingJob(\"A\", inner)\n\n gen()\n assert not Path(\"a\").exists()\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert Path(\"a\").read_text() == \"1\"\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert Path(\"a\").read_text() == \"2\" # new .run means rerun this thing\n assert not Path(\"B\").exists()\n ppg.new()\n gen()\n ppg.run()\n assert Path(\"B\").read_text() == \"B\"\n assert (\n Path(\"a\").read_text() == \"3\"\n ) # new pipegraph means we need to rerun the jobgenerating job\n\n def test_filegen_not_creating_files_throws_job_contract(self):\n jobA = ppg.FileGeneratingJob(\"A\", lambda of: 55)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert isinstance(\n ppg.global_pipegraph.last_run_result[jobA.job_id].error.args[0],\n ppg.JobContractError,\n )\n\n @pytest.mark.xfail\n def test_undeclared_output_leads_to_job_and_ppg_failure(self):\n raise NotImplementedError()\n\n def test_working_failing_filegen(self):\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n a = ppg.FileGeneratingJob(\n \"A\", lambda of: write(of, \"A\"), depend_on_function=True\n )\n ppg.run()\n write(\"do_raise\", \"True\")\n\n def raiser(of):\n if read(\"do_raise\") == \"True\":\n write(of, \"B\")\n raise ValueError()\n else:\n write(of, \"C\")\n\n a = ppg.FileGeneratingJob(\"A\", raiser)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n write(\"do_raise\", \"False\")\n ppg.run()\n assert read(\"A\") == \"C\"\n\n def test_failing_working_no_function_dep(self):\n write(\"do_raise\", \"True\")\n\n def raiser(of):\n if read(\"do_raise\") == \"True\":\n write(of, \"B\")\n raise ValueError()\n else:\n write(of, \"C\")\n\n ppg.FileGeneratingJob(\"A\", raiser, depend_on_function=False)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert read(\"A\") == \"B\"\n write(\"do_raise\", \"False\")\n ppg.run()\n assert read(\"A\") == \"C\"\n\n def test_failing_plus_job_gen_runs_failing_only_once(self):\n def a(of):\n counter(of)\n counter(\"a\")\n raise ValueError()\n\n def b():\n counter(\"B\")\n ppg.FileGeneratingJob(\"C\", lambda of: counter(of))\n\n jobA = ppg.FileGeneratingJob(\"A\", a)\n ppg.FileGeneratingJob(\"E\", lambda of: counter(of)).depends_on(jobA)\n ppg.JobGeneratingJob(\"B\", b)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert read(\"B\") == \"1\"\n assert read(\"C\") == \"1\"\n assert read(\"A\") == \"1\"\n assert read(\"a\") == \"1\"\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert read(\"A\") == \"1\" # get's unlinked prior to run\n assert read(\"a\") == \"2\" # the real 'run' counter'\n\n def test_actually_multithreading(self):\n # use the Barrier primivite to force it to actually wait for all three threads\n from threading import Barrier\n\n barrier = Barrier(3, timeout=1)\n\n def inner(of):\n def inner2():\n c = barrier.wait()\n write(of, str(c))\n\n return inner2\n\n ppg.new(cores=3)\n a = ppg.DataLoadingJob(\"A\", inner(\"A\"), depend_on_function=False)\n b = ppg.DataLoadingJob(\"B\", inner(\"B\"), depend_on_function=False)\n c = ppg.DataLoadingJob(\"C\", inner(\"C\"), depend_on_function=False)\n d = ppg.JobGeneratingJob(\"D\", lambda: None)\n d.depends_on(a, b, c)\n\n ppg.run()\n seen = set()\n seen.add(read(\"A\"))\n seen.add(read(\"B\"))\n seen.add(read(\"C\"))\n assert seen == set([\"0\", \"1\", \"2\"])\n\n def test_failing_jobs_and_downstreams(self):\n def do_a(of):\n raise ValueError()\n\n a = ppg.FileGeneratingJob(\"A\", do_a)\n b = ppg.FileGeneratingJob(\"B\", lambda of: of.write_text(Path(\"A\").read_text()))\n b.depends_on(a)\n c = ppg.FileGeneratingJob(\"C\", lambda of: write(of, \"C\"))\n c.depends_on(b)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert not Path(\"A\").exists()\n assert not Path(\"B\").exists()\n assert not Path(\"C\").exists()\n\n def test_no_log_dir(self):\n ppg.new(log_dir=None)\n c = ppg.FileGeneratingJob(\"C\", lambda of: write(of, \"C\"))\n ppg.run()\n assert read(\"C\") == \"C\"\n\n def test_job_lying_about_its_outputs(self):\n # tests the job returned the wrong set of outputs detection\n class LyingJob(ppg.FileGeneratingJob):\n def run(self, runner, historical_output):\n result = super().run(runner, historical_output)\n result[\"shu\"] = \"sha\"\n return result\n\n a = LyingJob(\"A\", lambda of: counter(\"shu\") and write(of, \"shu\"))\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n error = ppg.global_pipegraph.last_run_result[\"A\"].error\n assert isinstance(error, ppg.JobContractError)\n\n def test_failing_temp_does_not_get_run_twice(self):\n def a(of):\n counter(\"a\")\n raise ValueError()\n\n jobA = ppg.TempFileGeneratingJob(\"A\", a)\n jobB = ppg.FileGeneratingJob(\"B\", lambda of: write(of, \"B\"))\n jobB.depends_on(jobA)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert read(\"a\") == \"1\"\n assert not Path(\"B\").exists()\n\n def test_new_default_params(self):\n ppg.new(cores=50)\n assert ppg.global_pipegraph.cores == 50\n ppg.new()\n assert ppg.global_pipegraph.cores == 50\n ppg.new(cores=ppg.default)\n assert ppg.global_pipegraph.cores == ppg.util.CPUs()\n\n def test_two_job_failing(self):\n def err(of):\n raise ValueError()\n\n ppg.FileGeneratingJob(\"A\", err)\n ppg.FileGeneratingJob(\"B\", err)\n ppg.FileGeneratingJob(\"C\", lambda of: write(of, str(of)))\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert len(ppg.global_pipegraph.do_raise) == 2 # both exceptions\n assert ppg.global_pipegraph.last_run_result[\"A\"].error\n assert ppg.global_pipegraph.last_run_result[\"B\"].error\n assert not ppg.global_pipegraph.last_run_result[\"C\"].error\n\n def test_getting_source_after_chdir(self):\n def inner(something):\n return 552341512412\n\n import os\n\n old = os.getcwd()\n try:\n f = ppg.FunctionInvariant(\"shu\", inner)\n os.chdir(\"/tmp\")\n assert f.get_source_file().is_absolute()\n assert \"552341512412\" in f.get_source_file().read_text()\n finally:\n os.chdir(old)\n\n def test_massive_exception(self):\n should_len = 1024 * 1024\n\n def inner(_):\n raise ValueError(\"x \" * (should_len // 2))\n\n ppg.FileGeneratingJob(\"A\", inner)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n # make sure we captured it all\n assert (\n len(ppg.global_pipegraph.last_run_result[\"A\"].error.args[0].args[0])\n == should_len\n )\n\n def test_depends_on_func(self):\n a = ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(\"A\"))\n\n def inner():\n return 55\n\n f1 = a.depends_on_func(\"mylambda1\", lambda: 55)\n f2 = a.depends_on_func(\"inner\", inner)\n f3 = a.depends_on_func(inner)\n f4 = a.depends_on_func(open) # built in\n assert isinstance(f1.invariant, ppg.FunctionInvariant)\n assert isinstance(f2.invariant, ppg.FunctionInvariant)\n assert isinstance(f3.invariant, ppg.FunctionInvariant)\n assert isinstance(f4.invariant, ppg.FunctionInvariant)\n assert f1.self is a\n assert f2.self is a\n assert f3.self is a\n assert f4.self is a\n assert ppg.global_pipegraph.has_edge(f1.invariant, a)\n assert ppg.global_pipegraph.has_edge(f2.invariant, a)\n assert ppg.global_pipegraph.has_edge(f3.invariant, a)\n assert ppg.global_pipegraph.has_edge(f4.invariant, a)\n with pytest.raises(ValueError):\n a.depends_on_func(\"open\")\n\n def test_partial_running_job(self):\n def a(of):\n counter(\"A\")\n with open(of, \"w\") as op:\n op.write(\"one\\n\")\n raise ValueError()\n op.write(\"two\\n\") # pragma: no cover\n\n job = ppg.FileGeneratingJob(\"a\", a)\n ppg.run(raise_on_job_error=False)\n assert read(\"a\") == \"one\\n\"\n assert read(\"A\") == \"1\"\n ppg.run(raise_on_job_error=False)\n assert read(\"A\") == \"2\"\n\n def test_declaring_filegen_with_function_without_parameter_raises_immediatly(self):\n with pytest.raises(TypeError):\n ppg.FileGeneratingJob(\"A\", lambda: None)\n\n def test_multi_file_generating_job_with_dict_file_definition(self):\n def ab(files):\n files[\"a\"].write_text(\"A\")\n files[\"b\"].write_text(\"A\")\n\n a = ppg.MultiFileGeneratingJob({\"a\": \"A\", \"b\": \"B\"}, ab)\n b = ppg.FileGeneratingJob(\"c\", lambda of: of.write_text(a[\"a\"].read_text()))\n b.depends_on(a[\"a\"])\n ppg.run()\n with pytest.raises(ValueError):\n d = ppg.MultiFileGeneratingJob([\"d\"], lambda of: None)\n d[\"shu\"]\n\n def test_no_error_dir(self):\n ppg.new(error_dir=None)\n try:\n ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(\"A\"))\n\n def b(of):\n raise ValueError()\n\n ppg.FileGeneratingJob(\"B\", b)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n finally:\n del ppg._last_new_arguments[\"error_dir\"] # reset to default\n\n def test_no_logs(self):\n ppg.new(log_dir=None)\n try:\n ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(\"A\"))\n\n def b(of):\n raise ValueError()\n\n ppg.FileGeneratingJob(\"B\", b)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n finally:\n del ppg._last_new_arguments[\"log_dir\"] # reset to default\n\n def test_log_retention(self):\n old_timeformat = ppg.graph.time_format\n try:\n ppg.graph.time_format = (\n \"%Y-%m-%d_%H-%M-%S-%f\" # we need subsecond resolution for this test.\n )\n ppg.new(log_retention=1) # so keep 2\n ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(\"A\"))\n assert len(list(ppg.global_pipegraph.log_dir.glob(\"*.log\"))) == 0\n ppg.run()\n assert (\n len(list(ppg.global_pipegraph.log_dir.glob(\"*\"))) == 1 + 1 + 1 # for latest\n ) # runtimes\n ppg.run()\n assert (\n len(list(ppg.global_pipegraph.log_dir.glob(\"*\"))) == 2 + 1 + 1 # for latest\n ) # runtimes\n ppg.new(log_retention=2)\n ppg.run()\n prior = list(ppg.global_pipegraph.log_dir.glob(\"*\"))\n assert (\n len(list(ppg.global_pipegraph.log_dir.glob(\"*\"))) == 3 + 1 + 1 # for latest\n ) # runtimes\n # no new.. still new log file please\n ppg.run()\n after = list(ppg.global_pipegraph.log_dir.glob(\"*\"))\n assert (\n len(list(ppg.global_pipegraph.log_dir.glob(\"*\"))) == 3 + 1 + 1 # for latest\n ) # runtimes\n assert set([x.name for x in prior]) != set([x.name for x in after])\n\n finally:\n del ppg._last_new_arguments[\"log_retention\"]\n ppg.graph.time_format = old_timeformat\n\n def test_nested_exception_traceback(self):\n def a():\n try:\n raise ValueError()\n except ValueError as e:\n raise KeyError() from e\n\n jobA = ppg.DataLoadingJob(\"a\", a)\n b = ppg.FileGeneratingJob(\"b\", lambda of: write(\"b\", \"b\"))\n b.depends_on(jobA)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n e = (\n ppg.global_pipegraph.error_dir\n / ppg.global_pipegraph.time_str\n / \"0_exception.txt\"\n ).read_text()\n assert \"KeyError\" in e\n assert \"ValueError\" in e\n assert e.index(\"ValueError\") < e.index(\"KeyError\")\n assert \"cause\" in e\n\n def test_renaming_input_while_invalidating_other(self):\n a = ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(\"A\"))\n b = ppg.FileGeneratingJob(\"B\", lambda of: of.write_text(\"B\"))\n\n def C(of):\n counter(\"C\")\n of.write_text(a.files[0].read_text() + b.files[0].read_text())\n\n c = ppg.FileGeneratingJob(\n \"c\", C, depend_on_function=False\n ) # otherwise renaming the input job will trigger already.\n c.depends_on(a, b)\n ppg.run()\n assert read(\"C\") == \"1\"\n assert read(\"c\") == \"AB\"\n ppg.new()\n a = ppg.FileGeneratingJob(\"A1\", lambda of: of.write_text(\"A\"))\n b = ppg.FileGeneratingJob(\"B\", lambda of: of.write_text(\"B\"))\n c = ppg.FileGeneratingJob(\"c\", C, depend_on_function=False)\n c.depends_on(a, b)\n time.sleep(1) # always change mtime - it is being rewritten, right\n ppg.run() # no rerun, changed name detection.\n assert read(\"C\") == \"1\"\n assert read(\"c\") == \"AB\"\n ppg.new()\n a = ppg.FileGeneratingJob(\"A2\", lambda of: of.write_text(\"A\"))\n b = ppg.FileGeneratingJob(\"B\", lambda of: of.write_text(\"B2\"))\n c = ppg.FileGeneratingJob(\"c\", C, depend_on_function=False)\n c.depends_on(a, b)\n ppg.run() # rerun, not because of the name change, but because b2 changed\n assert read(\"C\") == \"2\"\n assert read(\"c\") == \"AB2\"\n\n def test_renaming_maps_to_muliple(self):\n a = ppg.FileGeneratingJob(\"A\", lambda of: of.write_text(\"A\"))\n b = ppg.FileGeneratingJob(\"B\", lambda of: of.write_text(\"A\"))\n d = ppg.FileGeneratingJob(\"C\", lambda of: counter(\"c\") and of.write_text(\"c\"))\n d.depends_on(a, b)\n ppg.run()\n assert read(\"c\") == \"1\"\n ppg.new()\n a = ppg.FileGeneratingJob(\"A1\", lambda of: of.write_text(\"A\"))\n b = ppg.FileGeneratingJob(\"B\", lambda of: of.write_text(\"A\"))\n d = ppg.FileGeneratingJob(\"C\", lambda of: counter(\"c\") and of.write_text(\"c\"))\n d.depends_on(a, b)\n\n ppg.run()\n assert read(\"c\") == \"2\"\n\n def test_chained_failing_temps(self):\n def a(of):\n of.write_text(\"A\")\n\n def b(of):\n of.write_text(Path(\"A\").read_text())\n raise ValueError()\n\n def c(of):\n of.write_text(Path(\"B\").read_text())\n\n a = ppg.TempFileGeneratingJob(\"A\", a)\n b = ppg.TempFileGeneratingJob(\"B\", b)\n c = ppg.FileGeneratingJob(\"C\", c)\n d = ppg.JobGeneratingJob(\n \"D\",\n lambda: counter(\"D\")\n and ppg.FileGeneratingJob(\"E\", lambda of: of.write_text(\"E\")),\n )\n b.depends_on(a)\n c.depends_on(b)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert read(\"D\") == \"1\"\n\n def test_chained_failing_temps_no_downstream(self):\n def a(of):\n raise ValueError()\n of.write_text(\"A\")\n\n def b(of):\n of.write_text(Path(\"A\").read_text())\n\n def c(of):\n of.write_text(Path(\"B\").read_text())\n\n a = ppg.TempFileGeneratingJob(\"A\", a)\n b = ppg.TempFileGeneratingJob(\"B\", b)\n c = ppg.TempFileGeneratingJob(\"C\", c)\n d = ppg.JobGeneratingJob(\n \"D\",\n lambda: counter(\"D\")\n and ppg.FileGeneratingJob(\"E\", lambda of: of.write_text(\"E\")),\n )\n b.depends_on(a)\n c.depends_on(b)\n # with pytest.raises(ppg.JobsFailed):\n ppg.run() # won't raise since the tfs never get run.\n # they don't get run, because we create the 'clone jobs'\n # backwards - and c disappears\n # since it had no downstreams of its own.\n # and then the 2nd level (b) disappears, because\n # it no longer has a downstream\n # and the same happens to a\n assert read(\"D\") == \"1\"\n\n def test_no_source_traceback(self):\n def a(of):\n import pandas\n\n df = pandas.DataFrame()\n df[\"shu\"]\n\n ppg.FileGeneratingJob(\"a\", a)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n e = (\n ppg.global_pipegraph.error_dir\n / ppg.global_pipegraph.time_str\n / \"0_exception.txt\"\n ).read_text()\n assert \"# no source available\" in e\n assert \"KeyError\" in e\n\n def test_redefining_with_different_type(self):\n a = ppg.FileGeneratingJob(\"a\", lambda of: of.write_text(str(of)))\n with pytest.raises(ValueError):\n ppg.JobGeneratingJob(\"a\", lambda: None)\n\n def test_file_gen_job_running_here(self):\n tracker = []\n\n def a(of):\n of.write_text(\"a\")\n tracker.append(\"a\")\n\n ppg.FileGeneratingJob(\"a\", a, resources=ppg.Resources.RunsHere)\n ppg.run()\n assert read(\"a\") == \"a\"\n assert len(tracker) == 1\n\n def test_unlink_on_invalidation(self):\n Path(\"a\").write_text(\"shu\")\n\n def inner(of):\n assert not of.exists()\n of.write_text(\"a\")\n\n a = ppg.FileGeneratingJob(\"a\", inner)\n ppg.run()\n ppg.new()\n\n def inner(of):\n assert not of.exists()\n of.write_text(\"b\")\n\n a = ppg.FileGeneratingJob(\"a\", inner)\n ppg.run()\n\n def test_func_equals_none(self):\n a = ppg.FileGeneratingJob(\n \"a\", lambda of: of.write_text(str(of)), depend_on_function=False\n )\n a.depends_on(ppg.FunctionInvariant(\"myFunc\", None))\n ppg.run()\n ppg.run()\n ppg.FunctionInvariant(\"myFunc\", None) # that one is ok, not a redef\n with pytest.raises(ppg.JobRedefinitionError):\n ppg.FunctionInvariant(\"myFunc\", lambda: 5)\n with pytest.raises(ppg.JobRedefinitionError):\n ppg.FunctionInvariant(\"myFunc\", open)\n ppg.FunctionInvariant(\"build\", open)\n with pytest.raises(ppg.JobRedefinitionError):\n ppg.FunctionInvariant(\"build\", lambda: 5)\n ll = lambda: 5 # noqa:E731\n ppg.FunctionInvariant(\"lamb\", ll)\n ppg.FunctionInvariant(\"lamb\", ll)\n with pytest.raises(ppg.JobRedefinitionError):\n ppg.FunctionInvariant(\"build\", open)\n\n def test_funcinvariant_mixing_function_types_none(self):\n a = ppg.FileGeneratingJob(\n \"A\",\n lambda of: counter(\"a\") and of.write_text(str(of)),\n depend_on_function=False,\n )\n a.depends_on(ppg.FunctionInvariant(\"myFunc\", None))\n ppg.run()\n ppg.run()\n assert read(\"a\") == \"1\"\n\n ppg.new()\n a = ppg.FileGeneratingJob(\n \"A\",\n lambda of: counter(\"a\") and of.write_text(str(of)),\n depend_on_function=False,\n )\n a.depends_on(ppg.FunctionInvariant(\"myFunc\", open))\n ppg.run()\n assert read(\"a\") == \"2\"\n ppg.run()\n assert read(\"a\") == \"2\"\n\n ppg.new()\n a = ppg.FileGeneratingJob(\n \"A\",\n lambda of: counter(\"a\") and of.write_text(str(of)),\n depend_on_function=False,\n )\n a.depends_on(ppg.FunctionInvariant(\"myFunc\", lambda: 55))\n ppg.run()\n ppg.run()\n assert read(\"a\") == \"3\"\n\n ppg.new()\n a = ppg.FileGeneratingJob(\n \"A\",\n lambda of: counter(\"a\") and of.write_text(str(of)),\n depend_on_function=False,\n )\n a.depends_on(ppg.FunctionInvariant(\"myFunc\", open))\n ppg.run()\n assert read(\"a\") == \"4\"\n ppg.run()\n assert read(\"a\") == \"4\"\n\n def test_focus_on_these_jobs_and_generating(self, job_trace_log):\n \"\"\"What happens when you focus() on a JobGeneratingJob?\"\"\"\n\n def inner():\n counter(\"b\")\n ppg.FileGeneratingJob(\"A\", lambda of: counter(\"a\") and of.write_text(\"A\"))\n\n c = ppg.FileGeneratingJob(\"c\", lambda of: of.write_text(\"c\"))\n ppg.JobGeneratingJob(\"B\", inner)() # here is the call\n assert read(\"b\") == \"1\"\n assert read(\"a\") == \"1\"\n assert read(\"A\") == \"A\"\n assert not Path(\"c\").exists()\n assert not hasattr(c, \"prune_reason\")\n ppg.run()\n assert read(\"b\") == \"2\"\n assert read(\"a\") == \"1\"\n assert read(\"c\") == \"c\"\n\n def test_focus_on_multiple(self):\n a = ppg.FileGeneratingJob('a', lambda of: of.write_text(of.name))\n b = ppg.FileGeneratingJob('b', lambda of: of.write_text(of.name))\n c = ppg.FileGeneratingJob('c', lambda of: of.write_text(of.name))\n ppg.global_pipegraph.run_for_these([a,b])\n assert read('a') == 'a'\n assert read('b') == 'b'\n assert not Path('c').exists()\n\n def test_fail_but_write(self):\n def fail(of):\n of.write_text(\"A\")\n raise ValueError()\n\n ppg.FileGeneratingJob(\"a\", fail)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert read(\"a\") == \"A\"\n\n def test_failing_job_but_required_again_after_job_generating_job(self):\n def fail(of):\n counter(\"a\")\n raise ValueError()\n\n a = ppg.FileGeneratingJob(\"A\", fail)\n b = ppg.FileGeneratingJob(\n \"B\", lambda of: counter(\"b\") and of.write_text(read(\"a\"))\n )\n b.depends_on(a)\n\n def c():\n counter(\"c\")\n d = ppg.FileGeneratingJob(\n \"d\", lambda of: counter(\"d\") and of.write_text(read(\"a\"))\n )\n d.depends_on(a)\n\n c = ppg.JobGeneratingJob(\"c\", c)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert read(\"a\") == \"1\"\n assert read(\"c\") == \"1\"\n assert not Path(\"b\").exists()\n assert not Path(\"d\").exists()\n\n def test_going_from_file_generating_to_file_invariant_no_retrigger(self):\n a = ppg.FileGeneratingJob(\"a\", lambda of: of.write_text(\"a\"))\n b = ppg.FileGeneratingJob(\n \"b\", lambda of: counter(\"B\") and of.write_text(\"b\" + read(\"a\"))\n )\n b.depends_on(a)\n ppg.run()\n assert read(\"b\") == \"ba\"\n assert read(\"B\") == \"1\"\n ppg.new()\n a = ppg.FileInvariant(\"a\")\n b = ppg.FileGeneratingJob(\n \"b\", lambda of: counter(\"B\") and of.write_text(\"b\" + read(\"a\"))\n )\n b.depends_on(a)\n ppg.run()\n assert read(\"b\") == \"ba\"\n assert read(\"B\") == \"1\"\n ppg.new()\n a = ppg.FileGeneratingJob(\"a\", lambda of: of.write_text(\"a\"))\n b = ppg.FileGeneratingJob(\n \"b\", lambda of: counter(\"B\") and of.write_text(\"b\" + read(\"a\"))\n )\n b.depends_on(a)\n ppg.run()\n assert read(\"b\") == \"ba\"\n assert read(\"B\") == \"1\"\n\n def test_going_from_multi_file_generating_to_file_invariant_no_retrigger(self):\n # this one depends on all files\n a = ppg.MultiFileGeneratingJob(\n [\"a\", \"a1\"], lambda of: of[0].write_text(\"a\") and of[1].write_text(\"a1\")\n )\n b = ppg.FileGeneratingJob(\n \"b\", lambda of: counter(\"B\") and of.write_text(\"b\" + read(\"a\"))\n )\n b.depends_on(a)\n ppg.run()\n assert read(\"b\") == \"ba\"\n assert read(\"B\") == \"1\"\n ppg.new()\n b = ppg.FileGeneratingJob(\n \"b\", lambda of: counter(\"B\") and of.write_text(\"b\" + read(\"a\"))\n )\n b.depends_on(ppg.FileInvariant(\"a\"))\n b.depends_on(ppg.FileInvariant(\"a1\"))\n ppg.run()\n assert read(\"b\") == \"ba\"\n assert read(\"B\") == \"1\"\n ppg.new()\n a = ppg.MultiFileGeneratingJob(\n [\"a\", \"a1\"], lambda of: of[0].write_text(\"a\") and of[1].write_text(\"a1\")\n )\n b = ppg.FileGeneratingJob(\n \"b\", lambda of: counter(\"B\") and of.write_text(\"b\" + read(\"a\"))\n )\n b.depends_on(a)\n ppg.run()\n assert read(\"b\") == \"ba\"\n assert read(\"B\") == \"1\"\n\n def test_empty_depends_on_ok(self):\n a = ppg.FileGeneratingJob(\"shu\", lambda of: of.write_text(of.name))\n a.depends_on()\n", "id": "12049388", "language": "Python", "matching_score": 7.408092498779297, "max_stars_count": 0, "path": "tests/test_basics.py" }, { "content": "import os\nimport time\nimport subprocess\nimport sys\nimport pytest\nimport pypipegraph2 as ppg\nfrom pathlib import Path\nfrom .shared import write, read, Dummy, counter\n\n\[email protected](\"ppg2_per_test\", \"create_out_dir\")\nclass TestResourceCoordinator:\n def test_jobs_that_need_all_cores_are_spawned_one_by_one(self):\n # we'll determine this by the start respective end times..\n ppg.new(\n cores=3,\n )\n\n def a(of):\n write(of, \"A\")\n time.sleep(0.1)\n\n def b(of):\n write(of, \"B\")\n time.sleep(0.1)\n\n jobA = ppg.FileGeneratingJob(\"out/A\", a, resources=ppg.Resources.AllCores)\n jobB = ppg.FileGeneratingJob(\"out/B\", b, resources=ppg.Resources.AllCores)\n ppg.run()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n if jobA.start_time < jobB.start_time:\n first_job = jobA\n second_job = jobB\n else:\n first_job = jobB\n second_job = jobA\n min_start = min(jobA.start_time, jobB.start_time)\n print(\n \"times\",\n first_job.start_time - min_start,\n first_job.stop_time - min_start,\n second_job.start_time - min_start,\n second_job.stop_time - min_start\n )\n if jobA.start_time is None:\n raise ValueError(\"JobA did not run\")\n assert first_job.stop_time < second_job.start_time\n\n def test_jobs_concurrent_jobs_run_concurrently(self):\n # we'll determine this by the start respective end times..\n import time\n\n ppg.new(\n cores=2,\n )\n jobA = ppg.FileGeneratingJob(\n \"out/A\", lambda of: write(of, \"A\"), resources=ppg.Resources.AllCores\n )\n jobB = ppg.FileGeneratingJob(\n \"out/B\", lambda of: write(of, \"B\"), resources=ppg.Resources.AllCores\n )\n now = time.time()\n ppg.run()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n if jobA.start_time < jobB.start_time:\n first_job = jobA\n second_job = jobB\n else:\n first_job = jobB\n second_job = jobA\n print(\n \"times\",\n first_job.start_time - now,\n first_job.stop_time - now,\n second_job.start_time - now,\n second_job.stop_time - now,\n )\n if jobA.start_time is None:\n raise ValueError(\"JobA did not run\")\n assert first_job.stop_time - now > second_job.start_time - now\n\n def test_multiple_all_cores_blocking_single_jobs(self):\n pass # todo\n\n\nclass CantDepickle:\n \"\"\"A class that can't be depickled (throws a type error,\n just like the numpy.maskedarray does occacionally)\"\"\"\n\n def __getstate__(self):\n return {\"shu\": \"5\"}\n\n def __setstate__(self, state):\n print(state)\n raise TypeError(\"I can be pickled, but not unpickled\")\n\n\[email protected](\"ppg2_per_test\")\nclass TestingTheUnexpectedTests:\n def test_job_exiting_python(self):\n def dies(of):\n import sys\n from loguru import logger\n\n logger.info(\"Now terminating child python\")\n sys.exit(5)\n\n ppg.FileGeneratingJob(\"out/A\", dies)\n with pytest.raises(ppg.JobsFailed):\n # ppg.util.global_pipegraph.rc.timeout = 1\n ppg.run()\n assert not (Path(\"out/A\").exists())\n error = ppg.global_pipegraph.last_run_result[\"out/A\"].error\n assert isinstance(error.args[0], ppg.JobDied)\n assert error.args[0].args[2] == 5\n\n def test_job_exiting_python_stdout_stderr_logged(self):\n def dies(of):\n import sys\n\n # logging.info(\"Now terminating child python\")\n print(\"hello\")\n sys.stderr.write(\"I am stderr\\n\")\n sys.stdout.flush()\n sys.exit(5)\n\n fg = ppg.FileGeneratingJob(\"out/A\", dies)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n ppg.run()\n assert not (Path(\"out/A\").exists())\n error = ppg.global_pipegraph.last_run_result[\"out/A\"].error\n assert isinstance(error.args[0], ppg.JobDied)\n assert error.args[0].args[2] == 5\n assert fg.stdout == \"hello\\n\"\n assert fg.stderr == \"I am stderr\\n\"\n\n def test_job_getting_killed_python_stdout_stderr_logged(self):\n def dies(of):\n import sys\n import signal\n\n # logging.info(\"Now terminating child python\")\n print(\"hello\")\n sys.stderr.write(\"I am stderr\\n\")\n sys.stdout.flush()\n sys.stderr.flush()\n os.kill(os.getpid(), signal.SIGKILL)\n\n fg = ppg.FileGeneratingJob(\"out/A\", dies)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n ppg.run()\n assert not (Path(\"out/A\").exists())\n error = ppg.global_pipegraph.last_run_result[\"out/A\"].error\n assert isinstance(error.args[0], ppg.JobDied)\n assert error.args[0].args[2] == -9\n assert fg.stdout == \"hello\\n\"\n assert fg.stderr == \"I am stderr\\n\"\n\n def testing_import_does_not_hang(self): # see python issue22853\n old_dir = os.getcwd()\n os.chdir(Path(__file__).parent)\n p = subprocess.Popen(\n [sys.executable, \"_import_does_not_hang.py\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n stdout, stderr = p.communicate()\n print(stdout, stderr)\n assert b\"OK\" in stdout\n os.chdir(old_dir)\n\n def test_older_jobs_added_back_to_new_pipegraph(self, create_out_dir):\n a = ppg.FileGeneratingJob(\"out/A\", lambda of: write(of, \"a\"))\n # ppg.run()\n ppg.new()\n b = ppg.FileGeneratingJob(\"out/B\", lambda of: write(of, \"b\"))\n with pytest.raises(KeyError):\n a.depends_on(b)\n with pytest.raises(KeyError):\n b.depends_on(a)\n a.readd()\n a.depends_on(b)\n assert \"out/B\" in ppg.global_pipegraph.jobs\n assert \"out/A\" in ppg.global_pipegraph.jobs\n # with pytest.raises(ppg.PyPipeGraphError):\n\n\[email protected](\"ppg2_per_test\")\nclass TestPathLib:\n def test_multifilegenerating_job_requires_string_filenames(self):\n import pathlib\n\n x = lambda of: 5 # noqa:E731\n ppg.MultiFileGeneratingJob([\"a\"], x)\n ppg.MultiFileGeneratingJob([pathlib.Path(\"a\")], x)\n\n with pytest.raises(TypeError):\n ppg.MultiFileGeneratingJob([0])\n\n with pytest.raises(TypeError):\n ppg.MultiFileGeneratingJob([b\"a\"]) # bytes is not a string type\n\n def test_accepts(self):\n import pathlib\n\n write(\"aaa\", \"hello\")\n write(\"bbb\", \"hello\")\n write(\"ccc\", \"hello\")\n a = ppg.FileInvariant(pathlib.Path(\"aaa\"))\n b = ppg.FileGeneratingJob(\n pathlib.Path(\"b\"),\n lambda of: write(of, \"bb\" + read(\"aaa\") + read(\"bbb\") + read(\"ccc\")),\n )\n b.depends_on(a)\n\n dd = Dummy()\n\n def mf(ofs):\n ofs[0].write_text(\"cc\" + read(\"g\"))\n ofs[1].write_text(\"dd\" + read(\"h\") + dd.attr)\n ofs[2].write_text(\"ee\" + read(\"i\") + read(\"j\") + read(\"k\"))\n\n c = ppg.MultiFileGeneratingJob([pathlib.Path(\"c\"), \"d\", pathlib.Path(\"e\")], mf)\n c.depends_on(b)\n d = ppg.FunctionInvariant(pathlib.Path(\"f\"), lambda x: x + 1)\n c.depends_on(d)\n e = ppg.ParameterInvariant(pathlib.Path(\"c\"), \"hello\")\n c.depends_on(e)\n f = ppg.TempFileGeneratingJob(pathlib.Path(\"g\"), lambda of: write(of, \"gg\"))\n c.depends_on(f)\n\n def tmf(ofs):\n ofs[0].write_text(\"hh\")\n ofs[1].write_text(\"ii\")\n\n g = ppg.MultiTempFileGeneratingJob([pathlib.Path(\"h\"), \"i\"], tmf)\n c.depends_on(g)\n\n def tpf(ofs):\n assert len(ofs) == 2\n write(ofs[0], \"jjjj\")\n write(ofs[1], \"kkkk\")\n\n with pytest.raises(ValueError):\n h = ppg.MultiTempFileGeneratingJob(\n [pathlib.Path(\"j\"), \"k\", pathlib.Path(\"k\")], tpf\n )\n h = ppg.MultiTempFileGeneratingJob([pathlib.Path(\"j\"), \"k\"], tpf)\n c.depends_on(h)\n\n i = ppg.CachedDataLoadingJob(\n pathlib.Path(\"l\"), lambda: write(\"l\", \"llll\"), lambda res: res\n )\n with pytest.raises(TypeError):\n c.depends_on(i)\n c.depends_on(i.load)\n\n m = ppg.CachedAttributeLoadingJob(pathlib.Path(\"m\"), dd, \"attr\", lambda: \"55\")\n c.depends_on(m.load)\n ppg.run()\n assert read(\"aaa\") == \"hello\"\n assert read(\"b\") == \"bbhellohellohello\"\n assert read(\"c\") == \"ccgg\"\n assert read(\"d\") == \"ddhh55\"\n assert read(\"e\") == \"eeiijjjjkkkk\"\n assert not (Path(\"g\").exists())\n assert not (Path(\"h\").exists())\n assert not (Path(\"i\").exists())\n assert not (Path(\"j\").exists())\n\n\ndef test_fixture_without_class(ppg2_per_test):\n # just to make sure the ppg2_per_test fixture does what it's supposed to if you're not using a class\n import pathlib\n\n assert \"run/.test_fixture_without_class\" in str(pathlib.Path(\".\").absolute())\n\n\ndef test_job_or_filename(ppg2_per_test):\n a, dep_a = ppg.util.job_or_filename(\"out/A\")\n assert a == Path(\"out/A\")\n assert len(dep_a) == 1\n assert isinstance(dep_a[0], ppg.FileInvariant)\n j = ppg.FileGeneratingJob(\"out/B\", lambda of: None)\n b, dep_b = ppg.util.job_or_filename(j)\n assert b == Path(\"out/B\")\n assert dep_b[0] is j\n assert len(dep_b) == 1\n\n c, dep_c = ppg.util.job_or_filename(None)\n assert c is None\n assert not dep_c\n\n\[email protected]() # todo\ndef test_interactive_import(ppg2_per_test):\n # just so at least the import part of interactive is under coverage\n import pypipegraph2.interactive # noqa:F401\n\n\ndef test_version_is_correct():\n import configparser\n from pathlib import Path\n\n c = configparser.ConfigParser()\n c.read(Path(__file__).parent.parent / \"setup.cfg\")\n version = c[\"metadata\"][\"version\"]\n assert version == ppg.__version__\n\n\ndef test_dataloading_job_changing_cwd(ppg2_per_test):\n from pathlib import Path\n\n os.mkdir(\"shu\")\n\n def load():\n os.chdir(\"shu\")\n Path(\"b\").write_text(\"world\")\n return 55\n\n a = ppg.FileGeneratingJob(\"a\", lambda of: of.write_text(\"hello\"))\n b = ppg.DataLoadingJob(\"b\", load)\n a.depends_on(b)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert isinstance(\n ppg.global_pipegraph.last_run_result[\"b\"].error.args[0], ppg.JobContractError\n )\n\n\ndef test_job_generating_job_changing_cwd(ppg2_per_test):\n from pathlib import Path\n\n os.mkdir(\"shu\")\n\n def load():\n os.chdir(\"shu\")\n Path(\"b\").write_text(\"world\")\n return 55\n\n a = ppg.FileGeneratingJob(\"a\", lambda of: Path(\"a\").write_text(\"hello\"))\n b = ppg.JobGeneratingJob(\"b\", load)\n a.depends_on(b)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert isinstance(\n ppg.global_pipegraph.last_run_result[\"b\"].error.args[0], ppg.JobContractError\n )\n\n\ndef test_capturing_locals_when_they_have_throwing_str(ppg2_per_test):\n class NoStr:\n def __str__(self):\n raise ValueError(\"Cant string this\")\n\n def inner(of):\n a = NoStr()\n raise ValueError(\"expected\") # trace check\n\n j = ppg.FileGeneratingJob(\"a\", inner)\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert \"expected\" in str(j.exception)\n assert \"trace check\" in str(j.stack_trace) # we captured teh relevant line\n\n\ndef test_cache_dir(ppg2_per_test):\n ppg.new(cache_dir=\"shu\")\n assert Path(\"shu\").exists()\n ppg.new(cache_dir=None)\n a = ppg.FileGeneratingJob(\"a\", lambda of: of.write_text(\"A\"))\n ppg.run()\n assert Path(\"a\").read_text() == \"A\"\n\n\nclass TestCleanup:\n def test_error_cleanup(self, ppg2_per_test):\n import pypipegraph2 as ppg2\n\n ppg2_per_test.new(log_retention=2)\n assert ppg2.global_pipegraph.log_retention == 2\n\n def fail(of):\n raise ValueError()\n\n job = ppg.FileGeneratingJob(\"A\", fail)\n ec = [] # counters\n lc = []\n with pytest.raises(ppg.JobsFailed):\n ppg.run(print_failures=False)\n ec.append(len(list(ppg.global_pipegraph.error_dir.glob(\"*\"))))\n lc.append(len(list(ppg.global_pipegraph.log_dir.glob(\"*.log\"))))\n with pytest.raises(ppg.JobsFailed):\n ppg.run(print_failures=False)\n ec.append(len(list(ppg.global_pipegraph.error_dir.glob(\"*\"))))\n lc.append(len(list(ppg.global_pipegraph.log_dir.glob(\"*.log\"))))\n with pytest.raises(ppg.JobsFailed):\n ppg.run(print_failures=False)\n # we keep log_retention old ones + the current one\n ec.append(len(list(ppg.global_pipegraph.error_dir.glob(\"*\"))))\n lc.append(len(list(ppg.global_pipegraph.log_dir.glob(\"*.log\"))))\n\n with pytest.raises(ppg.JobsFailed):\n ppg.run(print_failures=False)\n assert ppg.global_pipegraph.log_file.exists()\n ec.append(len(list(ppg.global_pipegraph.error_dir.glob(\"*\"))))\n lc.append(len(list(ppg.global_pipegraph.log_dir.glob(\"*.log\"))))\n assert ec == [\n 1+1,\n 2+1,\n 3+1, # latest\n 3+1,\n ]\n assert lc == [\n 1,\n 2,\n 3, # latest exluded by *.log\n 3,\n ]\n\n\ndef test_exploding_core_lock_captured(ppg2_per_test):\n \"\"\"We had an issue where an exception in acquiring the core lock led to the thread dying\n and the graph runner hanging\n \"\"\"\n a = ppg.FileGeneratingJob(\n \"a\", lambda of: of.write_text(\"a\"), resources=ppg.Resources._RaiseInCoreLock\n )\n with pytest.raises(ppg.FatalGraphException):\n ppg.run()\n assert \"Count == 0\" in str(a.exception)\n\n\ndef test_exploding_resources_to_number(ppg2_per_test):\n \"\"\"We had an issue where an exception in acquiring the core lock led to the thread dying\n and the graph runner hanging\n \"\"\"\n a = ppg.FileGeneratingJob(\n \"a\", lambda of: of.write_text(\"a\"), resources=ppg.Resources._RaiseInToNumber\n )\n with pytest.raises(ppg.FatalGraphException):\n ppg.run()\n assert \"Not a Resource\" in str(a.exception)\n\n\[email protected](\"ppg2_per_test\")\nclass TestModifyDag:\n def test_2_fg_one_dl(self):\n from pypipegraph2.util import log_info\n\n parts = []\n a1 = ppg.FileGeneratingJob(\n \"a1\", lambda of: of.write_text(str(parts)), depend_on_function=False\n )\n a2 = ppg.FileGeneratingJob(\n \"a2\",\n lambda of: of.write_text(Path(\"a1\").read_text() + str(parts)),\n depend_on_function=False,\n )\n a2.depends_on(a1)\n b = ppg.DataLoadingJob(\n \"b\", lambda *args: parts.append(1), depend_on_function=False\n )\n a1.depends_on(b)\n a2.depends_on(b)\n log_info(\"now run\")\n ppg.run()\n assert read(\"a1\") == \"[1]\"\n assert read(\"a2\") == \"[1][1]\"\n\n def test_2_fg_one_dl_failing(pself, ppg2_per_test, job_trace_log):\n from pypipegraph2.util import log_info\n\n parts = []\n a1 = ppg.FileGeneratingJob(\n \"a1\", lambda of: of.write_text(\"a1\"), depend_on_function=False\n )\n a2 = ppg.FileGeneratingJob(\n \"a2\",\n lambda of: of.write_text(Path(\"a2\").read_text() + str(parts)),\n depend_on_function=False,\n )\n a2.depends_on(a1)\n b = ppg.DataLoadingJob(\"b\", lambda: _no_such_thing, depend_on_function=False)\n a1.depends_on(b)\n a2.depends_on(b)\n log_info(\"now run\")\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert type(b.exception) is NameError\n assert \"Upstream\" in str(a1.exception)\n assert \"Upstream\" in str(a2.exception)\n assert not Path(\"a1\").exists()\n assert not Path(\"a2\").exists()\n\n def test_2_fg_one_temp(ppg2_per_test):\n from pypipegraph2.util import log_info\n\n parts = []\n a1 = ppg.FileGeneratingJob(\n \"a1\",\n lambda of: of.write_text(Path(\"b\").read_text() + \"a1\"),\n depend_on_function=False,\n )\n a2 = ppg.FileGeneratingJob(\n \"a2\",\n lambda of: of.write_text(\n Path(\"b\").read_text() + Path(\"a1\").read_text() + \"a2\"\n ),\n depend_on_function=False,\n )\n a2.depends_on(a1)\n b = ppg.TempFileGeneratingJob(\n \"b\", lambda of: of.write_text(\"b\"), depend_on_function=False\n )\n a1.depends_on(b)\n a2.depends_on(b)\n\n log_info(\"now run\")\n ppg.run()\n assert read(\"a1\") == \"ba1\"\n assert read(\"a2\") == \"bba1a2\"\n assert not Path(\"b\").exists()\n\n\ndef test_prevent_absolute_paths(ppg2_per_test):\n ppg2_per_test.new(prevent_absolute_paths=True)\n with pytest.raises(ValueError):\n ppg.FileGeneratingJob(\"/tmp/absolute\", lambda of: of.write_text(\"a\"))\n ppg2_per_test.new(prevent_absolute_paths=False)\n ppg.FileGeneratingJob(\"/tmp/absolute\", lambda of: of.write_text(\"a\"))\n ppg2_per_test.new(prevent_absolute_paths=True)\n\n\ndef test_broken_case_from_delayeddataframe(ppg2_per_test):\n out = {}\n\n def store(key, value):\n out[key] = value\n\n a = ppg.CachedDataLoadingJob(\"a\", lambda: \"a\", lambda value: counter('A') and store(\"a\", value))\n event = ppg.CachedDataLoadingJob(\n \"event\", lambda: counter(\"EVENT\") and \"event\", lambda value: store(\"event\", value)\n )\n event2 = ppg.CachedDataLoadingJob(\n \"event2\", lambda: counter(\"EVENT2\") and \"event2\", lambda value: store(\"event2\", value)\n )\n anno_sequence = ppg.CachedDataLoadingJob(\n \"anno_sequence\",\n lambda: counter('ANNO_SEQUENCE') and \"anno_sequence\",\n lambda value: store(\"anno_sequence\", value),\n )\n event_seq = ppg.DataLoadingJob(\n \"event_seq\", lambda: store(\"event_seq\", out[\"event\"] + out[\"anno_sequence\"])\n )\n event2_seq = ppg.DataLoadingJob(\n \"event2_seq\", lambda: store(\"event2_seq\", out[\"event2\"] + out[\"event_seq\"])\n )\n force_load = ppg.JobGeneratingJob(\"force_load\", lambda: None)\n\n anno_sequence.calc.depends_on(a.load)\n anno_sequence.load.depends_on(a.load)\n event.calc.depends_on(a.load)\n\n event_seq.depends_on(event.load)\n event_seq.depends_on(anno_sequence.load)\n\n event2.calc.depends_on(event.load)\n\n event2_seq.depends_on(event_seq, event2.load)\n\n force_load.depends_on(event.load, event2.load, event2_seq, event_seq)\n ppg.run()\n assert out[\"event2_seq\"] == \"event2\" + \"event\" + \"anno_sequence\"\n assert read('EVENT') == '1'\n assert read('EVENT2') == '1'\n assert read('ANNO_SEQUENCE') == '1'\n ppg.run()\n assert read('EVENT') == '1'\n assert read('EVENT2') == '1'\n assert read('ANNO_SEQUENCE') == '1'\n", "id": "2432321", "language": "Python", "matching_score": 5.484094619750977, "max_stars_count": 0, "path": "tests/test_other.py" }, { "content": "import pytest\nimport pickle\nfrom pathlib import Path\nimport pypipegraph2 as ppg\n\nfrom .shared import read, write, append, Dummy, counter\n\n\[email protected](\"ppg2_per_test\")\nclass TestCachedDataLoadingJob:\n def test_simple(self):\n o = Dummy()\n\n def calc():\n return \", \".join(str(x) for x in range(0, 100))\n\n def store(value):\n o.a = value\n\n job, cache_job = ppg.CachedDataLoadingJob(\"out/mycalc\", calc, store)\n of = \"out/A\"\n\n def do_write(of):\n write(of, o.a)\n\n ppg.FileGeneratingJob(of, do_write).depends_on(job)\n ppg.run()\n assert read(of) == \", \".join(str(x) for x in range(0, 100))\n\n def test_no_downstream_still_calc(self):\n o = Dummy()\n\n def calc():\n return \", \".join(str(x) for x in range(0, 100))\n\n def store(value):\n o.a = value\n\n ppg.CachedDataLoadingJob(\"out/mycalc\", calc, store)\n # job.ignore_code_changes() #or it would run anyway... hm.\n assert not (Path(\"out/mycalc\").exists())\n ppg.run()\n assert Path(\"out/mycalc\").exists()\n\n def test_passing_non_function_to_calc(self):\n\n with pytest.raises(TypeError):\n ppg.CachedDataLoadingJob(\"out/a\", \"shu\", lambda value: 55)\n\n def test_passing_non_function_to_store(self):\n with pytest.raises(TypeError):\n ppg.CachedDataLoadingJob(\"out/a\", lambda value: 55, \"shu\")\n\n def test_passing_non_string_as_jobid(self):\n with pytest.raises(TypeError):\n ppg.CachedDataLoadingJob(5, lambda: 1, lambda value: 55)\n\n def test_being_generated(self):\n o = Dummy()\n\n def calc():\n return 55\n\n def store(value):\n o.a = value\n\n def dump(of):\n write(\"out/c\", \"c\")\n write(\"out/A\", str(o.a))\n\n def gen():\n load_job, cache_job = ppg.CachedDataLoadingJob(\"out/B\", calc, store)\n dump_job = ppg.FileGeneratingJob(\"out/A\", dump)\n dump_job.depends_on(load_job)\n\n ppg.JobGeneratingJob(\"out/C\", gen)\n ppg.run()\n assert read(\"out/A\") == \"55\"\n assert read(\"out/c\") == \"c\"\n\n def test_being_generated_nested(self):\n o = Dummy()\n\n def calc():\n return 55\n\n def store(value):\n o.a = value\n\n def dump(of):\n write(of, str(o.a))\n\n def gen():\n write(\"out/c\", \"c\")\n calc_job, cache_job = ppg.CachedDataLoadingJob(\"out/B\", calc, store)\n\n def gen2():\n write(\"out/d\", \"d\")\n dump_job = ppg.FileGeneratingJob(\"out/A\", dump)\n dump_job.depends_on(calc_job)\n\n ppg.JobGeneratingJob(\"out/D\", gen2)\n\n ppg.JobGeneratingJob(\"out/C\", gen)\n ppg.run()\n assert read(\"out/c\") == \"c\"\n assert read(\"out/d\") == \"d\"\n assert read(\"out/A\") == \"55\"\n\n def test_cached_dataloading_job_does_not_load_its_preqs_on_cached(self):\n o = Dummy()\n\n def a():\n o.a = \"A\"\n append(\"out/A\", \"A\")\n\n def calc():\n append(\"out/B\", \"B\")\n return o.a * 2\n\n def load(value):\n o.c = value\n append(\"out/Cx\", \"C\") # not C, that's the cached file, you know...\n\n def output(of):\n write(of, o.c)\n\n\n dl = ppg.DataLoadingJob(\"out/A\", a)\n ca, cca = ppg.CachedDataLoadingJob(\"out/C\", calc, load)\n fg = ppg.FileGeneratingJob(\"out/D\", output)\n fg.depends_on(ca)\n cca.depends_on(dl)\n ppg.run()\n assert read(\"out/D\") == \"AA\" # we did write the final result\n assert read(\"out/A\") == \"A\" # ran the dl job\n assert read(\"out/B\") == \"B\" # ran the calc job...\n assert read(\"out/Cx\") == \"C\" # ran the load jobo\n Path(\n \"out/D\"\n ).unlink() # so the filegen and the loadjob of cached should rerun...\n ppg.new()\n dl = ppg.DataLoadingJob(\"out/A\", a)\n ca, cca = ppg.CachedDataLoadingJob(\"out/C\", calc, load)\n fg = ppg.FileGeneratingJob(\"out/D\", output)\n fg.depends_on(ca)\n cca.depends_on(dl)\n ppg.run()\n assert read(\"out/D\") == \"AA\" # we did write the final result\n assert read(\"out/A\") == \"A\" # did not run the dl job\n assert read(\"out/B\") == \"B\" # did not run the calc job again\n assert read(\"out/Cx\") == \"CC\" # did run the load job again\n\n def test_name_must_be_str(self):\n with pytest.raises(TypeError):\n ppg.CachedDataLoadingJob(123, lambda: 123, lambda: 5)\n with pytest.raises(TypeError):\n ppg.CachedDataLoadingJob(\"123\", 123, lambda: 5)\n with pytest.raises(TypeError):\n ppg.CachedDataLoadingJob(\"123\", lambda: 5, 123)\n with pytest.raises(TypeError):\n ppg.CachedDataLoadingJob(Path(\"123\"), lambda: 5, 123)\n\n def test_cant_unpickle(self):\n o = Dummy()\n\n def calc():\n return \", \".join(str(x) for x in range(0, 100))\n\n def store(value):\n o.a = value\n\n job, cache_job = ppg.CachedDataLoadingJob(\n \"out/mycalc\", calc, store, depend_on_function=False\n )\n write(\"out/mycalc\", \"no unpickling this\")\n of = \"out/A\"\n\n def do_write(of):\n write(of, o.a)\n\n ppg.FileGeneratingJob(of, do_write).depends_on(job)\n ppg.run() # this does not raise. The file must be generated by the graph!\n # overwriting it again\n write(\"out/mycalc\", \"no unpickling this\")\n Path(of).unlink()\n with pytest.raises(ppg.JobsFailed):\n ppg.run()\n error = ppg.global_pipegraph.last_run_result[job.job_id].error\n assert isinstance(error, ppg.JobError)\n assert isinstance(error.args[0], pickle.UnpicklingError)\n assert \"out/mycalc\" in str(error)\n\n def test_use_cores(self):\n ca, cca = ppg.CachedDataLoadingJob(\"out/C\", lambda: 55, lambda x: None)\n assert cca.resources is ppg.Resources.SingleCore\n assert cca.use_resources(ppg.Resources.AllCores) is cca\n assert cca.resources is ppg.Resources.AllCores\n assert ca.resources is ppg.Resources.SingleCore\n assert ca.use_resources(ppg.Resources.AllCores) is ca\n assert ca.resources is ppg.Resources.AllCores\n\n\[email protected](\"ppg2_per_test\")\nclass TestCachedAttributeJob:\n def test_simple(self):\n o = Dummy()\n\n def calc():\n return \", \".join(str(x) for x in range(0, 100))\n\n job, _cache_job = ppg.CachedAttributeLoadingJob(\"mycalc\", o, \"a\", calc)\n of = \"A\"\n\n def do_write(output_filename):\n write(output_filename, o.a)\n\n ppg.FileGeneratingJob(of, do_write).depends_on(job)\n ppg.run()\n assert read(of) == \", \".join(str(x) for x in range(0, 100))\n\n def test_no_downstream_still_calc(self):\n o = Dummy()\n\n def calc():\n return \", \".join(str(x) for x in range(0, 100))\n\n ppg.CachedAttributeLoadingJob(\"mycalc\", o, \"a\", calc)\n assert not (Path(\"mycalc\").exists())\n ppg.run()\n assert Path(\"mycalc\").exists()\n\n def test_invalidation_redoes_output(self):\n o = Dummy()\n\n def calc():\n return \", \".join(str(x) for x in range(0, 100))\n\n job, cache_job = ppg.CachedAttributeLoadingJob(\"mycalc\", o, \"a\", calc)\n of = \"A\"\n\n def do_write(of):\n write(of, o.a)\n\n ppg.FileGeneratingJob(of, do_write).depends_on(job)\n ppg.run()\n assert read(of) == \", \".join(str(x) for x in range(0, 100))\n\n ppg.new()\n\n def calc2():\n return \", \".join(str(x) for x in range(0, 200))\n\n job, cache_job = ppg.CachedAttributeLoadingJob(\n \"mycalc\", o, \"a\", calc2\n ) # now, jobB should be deleted...\n ppg.FileGeneratingJob(of, do_write).depends_on(job)\n ppg.run()\n assert read(of) == \", \".join(str(x) for x in range(0, 200))\n\n def test_invalidation_ignored_does_not_redo_output(self):\n # actually, this test has evolved away from it's original behaviour.\n # adding/remoning a function dependency will always trigger!\n o = Dummy()\n\n def calc():\n counter('1')\n return \", \".join(str(x) for x in range(0, 100))\n\n job, cache_job = ppg.CachedAttributeLoadingJob(\"mycalc\", o, \"a\", calc)\n of = \"A\"\n\n def do_write(of):\n write(of, o.a)\n\n ppg.FileGeneratingJob(of, do_write).depends_on(job)\n ppg.run()\n assert read(of) == \", \".join(str(x) for x in range(0, 100))\n assert read('1') == '1'\n\n ppg.new()\n\n def calc2():\n counter('2')\n return \", \".join(str(x) for x in range(0, 200))\n\n job, cache_job = ppg.CachedAttributeLoadingJob(\n \"mycalc\", o, \"a\", calc2, depend_on_function=False\n )\n ppg.FileGeneratingJob(of, do_write).depends_on(job)\n ppg.run()\n assert read(of) == \", \".join(str(x) for x in range(0, 200)) # removing the dependency triggers\n assert read('2') == '1'\n\n ppg.new()\n job, cache_job = ppg.CachedAttributeLoadingJob(\"mycalc\", o, \"a\", calc2)\n ppg.FileGeneratingJob(of, do_write).depends_on(job)\n ppg.run()\n assert read(of) == \", \".join(\n str(x) for x in range(0, 200)\n ) # The new stuff - you either have an explicit ignore_code_changes in our codebase, or we enforce consistency between code and result\n assert read('2') == '2' # rerun, we regained the func dependency\n\n ppg.run()\n assert read(of) == \", \".join(\n str(x) for x in range(0, 200)\n ) # The new stuff - you either have an explicit ignore_code_changes in our codebase, or we enforce consistency between code and result\n assert read('2') == '2' # no rerun\n\n\n def test_throws_on_non_function_func(self):\n o = Dummy()\n\n with pytest.raises(TypeError):\n ppg.CachedAttributeLoadingJob(\n \"mycalc\", lambda: 5, o, \"a\"\n ) # wrong argument order\n\n def test_calc_depends_on_added_dependencies(self):\n o = Dummy()\n load_attr = ppg.AttributeLoadingJob(\"load_attr\", o, \"o\", lambda: 55)\n\n def calc():\n return o.o\n\n def out(output_filename):\n write(output_filename, str(o.o2))\n\n lj, cj = ppg.CachedAttributeLoadingJob(\"cached_job\", o, \"o2\", calc)\n fg = ppg.FileGeneratingJob(\"A\", out)\n fg.depends_on(lj)\n cj.depends_on(load_attr)\n ppg.run()\n assert read(\"A\") == \"55\"\n\n def test_depends_on_returns_self(self):\n o = Dummy()\n jobA, cache_job = ppg.CachedAttributeLoadingJob(\n \"A\", o, \"shu\", lambda: write(\"out/A\", \"shu\")\n )\n jobB = ppg.FileGeneratingJob(\"B\", lambda of: write(\"out/B\", \"shu\"))\n assert jobA.depends_on(jobB) is jobA\n\n def test_passing_non_function(self):\n o = Dummy()\n\n with pytest.raises(TypeError):\n ppg.CachedAttributeLoadingJob(\"a\", o, \"a\", 55)\n\n def test_passing_non_string_as_jobid(self):\n o = Dummy()\n\n with pytest.raises(TypeError):\n ppg.CachedAttributeLoadingJob(5, o, \"a\", lambda: 55)\n\n def test_no_swapping_attributes_for_one_job(self):\n def cache():\n return list(range(0, 100))\n\n o = Dummy()\n ppg.CachedAttributeLoadingJob(\"A\", o, \"a\", cache)\n\n with pytest.raises(ppg.JobRedefinitionError):\n ppg.CachedAttributeLoadingJob(\"A\", o, \"b\", cache)\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n ppg.CachedAttributeLoadingJob(\"A\", o, \"a\", cache)\n ppg.CachedAttributeLoadingJob(\"A\", o, \"b\", cache)\n\n def test_no_swapping_objects_for_one_job(self):\n def cache():\n return list(range(0, 100))\n\n o = Dummy()\n o2 = Dummy()\n ppg.CachedAttributeLoadingJob(\"A\", o, \"a\", cache)\n with pytest.raises(ppg.JobRedefinitionError):\n ppg.CachedAttributeLoadingJob(\"A\", o2, \"a\", cache)\n ppg.new(run_mode=ppg.RunMode.NOTEBOOK)\n ppg.CachedAttributeLoadingJob(\"A\", o, \"a\", cache)\n ppg.CachedAttributeLoadingJob(\"A\", o2, \"a\", cache)\n\n def test_cached_attribute_job_does_not_load_its_preqs_on_cached(self):\n o = Dummy()\n\n def a():\n o.a = \"A\"\n append(\"A\", \"A\")\n\n def calc():\n append(\"B\", \"B\")\n return o.a * 2\n\n def output(output_filename):\n write(\"D\", o.c)\n\n dl = ppg.DataLoadingJob(\"A\", a)\n ca, cache_job = ppg.CachedAttributeLoadingJob(\"C\", o, \"c\", calc)\n fg = ppg.FileGeneratingJob(\"D\", output)\n fg.depends_on(ca)\n cache_job.depends_on(dl)\n ppg.run()\n assert read(\"D\") == \"AA\" # we did write the final result\n assert read(\"A\") == \"A\" # ran the dl job\n assert read(\"B\") == \"B\" # ran the calc job...\n Path(\"D\").unlink() # so the filegen and the loadjob of cached should rerun...\n ppg.new()\n\n dl = ppg.DataLoadingJob(\"A\", a)\n ca, cache_job = ppg.CachedAttributeLoadingJob(\"C\", o, \"c\", calc)\n fg = ppg.FileGeneratingJob(\"D\", output)\n fg.depends_on(ca)\n cache_job.depends_on(dl)\n ppg.run()\n assert read(\"D\") == \"AA\" # we did write the final result\n assert read(\"B\") == \"B\" # did not run the calc job again\n assert read(\"A\") == \"A\" # did not run the dl job\n\n def test_raises_on_non_string_filename(self):\n o = Dummy()\n with pytest.raises(TypeError):\n ppg.CachedAttributeLoadingJob(55, o, \"c\", lambda: 55)\n\n def test_raises_on_non_string_attribute(self):\n o = Dummy()\n with pytest.raises(ValueError):\n ppg.CachedAttributeLoadingJob(\"C\", o, 354, lambda: 55)\n\n def test_callback_must_be_callable(self):\n o = Dummy()\n with pytest.raises(TypeError):\n ppg.CachedAttributeLoadingJob(\"x\", o, \"a\", \"shu\")\n\n def test_name_must_be_str(self):\n o = Dummy()\n with pytest.raises(TypeError):\n ppg.CachedAttributeLoadingJob(123, o, \"a\", lambda: 123)\n", "id": "10878685", "language": "Python", "matching_score": 4.484705924987793, "max_stars_count": 0, "path": "tests/test_cache_jobs.py" }, { "content": "from pathlib import Path\nimport pytest\nimport pypipegraph2 as ppg\nfrom .shared import write, read, append, writeappend, Dummy, counter\n\nshu = None\n\n\[email protected](\"create_out_dir\")\[email protected](\"ppg2_per_test\")\nclass TestJobGeneratingJob:\n def test_basic(self):\n def gen():\n ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"A\"))\n ppg.FileGeneratingJob(\"out/B\", lambda of: write(\"out/B\", \"B\"))\n ppg.FileGeneratingJob(\"out/C\", lambda of: write(\"out/C\", \"C\"))\n\n ppg.JobGeneratingJob(\"genjob\", gen)\n ppg.run()\n assert read(\"out/A\") == \"A\"\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"C\"\n\n def test_injecting_multiple_stages(self):\n def gen():\n def genB():\n def genC():\n ppg.FileGeneratingJob(\"out/D\", lambda of: write(\"out/D\", \"D\"))\n\n ppg.JobGeneratingJob(\"C\", genC)\n\n ppg.JobGeneratingJob(\"B\", genB)\n\n ppg.JobGeneratingJob(\"A\", gen)\n ppg.run()\n assert read(\"out/D\") == \"D\"\n\n def test_generated_job_depending_on_each_other_one_of_them_is_Invariant(self):\n # basic idea. You have jobgen A,\n # it not only creates filegenB, but also ParameterDependencyC that A depends on\n # does that work\n def gen():\n jobB = ppg.FileGeneratingJob(\n \"out/B\", lambda of: write(\"out/B\", \"B\"), depend_on_function=False\n )\n jobC = ppg.ParameterInvariant(\"C\", (\"ccc\",))\n jobB.depends_on(jobC)\n\n ppg.JobGeneratingJob(\"A\", gen)\n ppg.run()\n assert read(\"out/B\") == \"B\"\n\n ppg.new()\n\n def gen2():\n jobB = ppg.FileGeneratingJob(\n \"out/B\", lambda of: write(\"out/B\", \"C\"), depend_on_function=False\n )\n jobC = ppg.ParameterInvariant(\"C\", (\"ccc\",))\n jobB.depends_on(jobC)\n\n ppg.JobGeneratingJob(\"A\", gen2)\n ppg.run()\n assert read(\"out/B\") == \"B\" # no rerun\n\n ppg.new()\n\n def gen3():\n counter(\"3\")\n jobB = ppg.FileGeneratingJob(\n \"out/B\", lambda of: write(\"out/B\", \"C\"), depend_on_function=False\n )\n # jobB.ignore_code_changes()\n jobCX = ppg.ParameterInvariant(\"C\", (\"DDD\",))\n jobB.depends_on(jobCX)\n\n ppg.JobGeneratingJob(\"A\", gen3)\n ppg.run()\n assert read(\"out/B\") == \"C\" # did get rerun\n assert read(\"3\") == \"1\" # check that gen3 really ran...\n\n def test_generated_job_depending_on_job_that_cant_have_finished(self):\n # basic idea. You have jobgen A, and filegen B.\n # filegenB depends on jobgenA.\n # jobGenA created C depends on filegenB\n # Perhaps add a filegen D that's independand of jobGenA, but C also deps on D\n def a():\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda of: write(\"out/B\", \"B\"))\n\n def genA():\n jobC = ppg.FileGeneratingJob(\"out/C\", lambda of: write(\"out/C\", \"C\"))\n jobC.depends_on(jobB)\n\n jobA = ppg.JobGeneratingJob(\"A\", genA)\n jobB.depends_on(jobA)\n ppg.run()\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"C\"\n\n def b():\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda of: write(\"out/B\", \"B\"))\n jobD = ppg.FileGeneratingJob(\"out/D\", lambda of: write(\"out/D\", \"D\"))\n\n def genA():\n jobC = ppg.FileGeneratingJob(\"out/C\", lambda of: write(\"out/C\", \"C\"))\n jobC.depends_on(jobB)\n jobC.depends_on(jobD)\n\n jobA = ppg.JobGeneratingJob(\"A\", genA)\n jobB.depends_on(jobA)\n ppg.run()\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"C\"\n\n a()\n ppg.new()\n b()\n\n def test_generated_job_depending_on_each_other(self):\n # basic idea. You have jobgen A,\n # it not only creates filegenB, but also filegenC that depends on B\n # does that work\n def gen():\n jobB = ppg.FileGeneratingJob(\"out/B\", lambda of: write(\"out/B\", \"B\"))\n jobC = ppg.FileGeneratingJob(\n \"out/C\", lambda of: write(\"out/C\", read(\"out/B\"))\n )\n jobC.depends_on(jobB)\n\n ppg.JobGeneratingJob(\"A\", gen)\n ppg.run()\n assert read(\"out/B\") == \"B\"\n assert read(\"out/C\") == \"B\"\n\n def test_generated_job_depending_on_each_other_one_of_them_is_loading(self):\n # basic idea. You have jobgen A,\n # it not only creates filegenB, but also DataloadingC that depends on B\n # does that work\n def gen():\n def load():\n global shu\n shu = \"123\"\n\n def do_write(of):\n global shu\n write(of, shu)\n\n dl = ppg.DataLoadingJob(\"dl\", load)\n jobB = ppg.FileGeneratingJob(\"out/A\", do_write)\n jobB.depends_on(dl)\n\n ppg.JobGeneratingJob(\"gen\", gen)\n ppg.run()\n assert read(\"out/A\") == \"123\"\n\n def test_passing_non_function(self):\n with pytest.raises(TypeError):\n ppg.JobGeneratingJob(\"out/a\", \"shu\")\n\n def test_passing_non_string_as_jobid(self):\n with pytest.raises(TypeError):\n ppg.JobGeneratingJob(5, lambda: 1)\n\n def test_generated_jobs_that_can_not_run_right_away_because_of_dataloading_do_not_crash(\n self,\n ):\n o = Dummy()\n existing_dl = ppg.AttributeLoadingJob(\"a\", o, \"a\", lambda: \"Ashu\")\n\n def gen():\n new_dl = ppg.AttributeLoadingJob(\"b\", o, \"b\", lambda: \"Bshu\")\n fg_a = ppg.FileGeneratingJob(\"out/C\", lambda of: write(\"out/C\", o.a))\n fg_b = ppg.FileGeneratingJob(\"out/D\", lambda of: write(\"out/D\", o.b))\n fg_a.depends_on(existing_dl)\n fg_b.depends_on(new_dl)\n\n ppg.JobGeneratingJob(\"E\", gen)\n ppg.run()\n assert read(\"out/C\") == \"Ashu\"\n assert read(\"out/D\") == \"Bshu\"\n\n def test_filegen_invalidated_jobgen_created_filegen_later_also_invalidated(self):\n a = ppg.FileGeneratingJob(\n \"out/A\", lambda of: writeappend(\"out/A\", \"out/Ac\", \"A\")\n )\n p = ppg.ParameterInvariant(\"p\", \"p\")\n a.depends_on(p)\n\n def gen():\n append(\"out/g\", \"g\")\n c = ppg.FileGeneratingJob(\n \"out/C\", lambda of: writeappend(\"out/C\", \"out/Cx\", \"C\")\n )\n c.depends_on(a)\n\n # difference to ppg1 - gen jobs will not rerun if their inputs did not change!\n ppg.JobGeneratingJob(\"b\", gen).depends_on(a)\n ppg.run()\n assert read(\"out/g\") == \"g\"\n assert read(\"out/A\") == \"A\"\n assert read(\"out/Ac\") == \"A\"\n assert read(\"out/C\") == \"C\"\n assert read(\"out/Cx\") == \"C\"\n\n ppg.new()\n a = ppg.FileGeneratingJob(\n \"out/A\", lambda of: writeappend(\"out/A\", \"out/Ac\", \"A\")\n )\n p = ppg.ParameterInvariant(\"p\", \"p2\")\n a.depends_on(p)\n ppg.JobGeneratingJob(\"b\", gen).depends_on(a)\n ppg.run()\n assert read(\"out/g\") == \"gg\"\n assert read(\"out/Ac\") == \"AA\"\n assert (\n read(\"out/Cx\") == \"C\"\n ) # this is a difference to the pipegraph. depending on A\n # does not retrigger just because a's upstream changed.\n # only if a truly changed\n\n ppg.new()\n a = ppg.FileGeneratingJob(\n \"out/A\", lambda of: writeappend(\"out/A\", \"out/Ac\", \"B\")\n )\n p = ppg.ParameterInvariant(\"p\", \"p3\")\n a.depends_on(p)\n ppg.JobGeneratingJob(\"b\", gen).depends_on(a)\n ppg.run()\n assert read(\"out/g\") == \"ggg\"\n assert read(\"out/Ac\") == \"AAB\"\n assert read(\"out/Cx\") == \"CC\"\n\n def test_creating_within_dataload(self):\n \"\"\"This used to be forbidden in ppg1.\n I don't see a reason to forbid it now, the only\n substantial difference is that DataLoadingJobs run when their downstream needs them,\n and JobGeneratingJobs always run\"\"\"\n write_job = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"aa\"))\n\n def load():\n ppg.FileGeneratingJob(\"out/B\", lambda of: write(\"out/B\", \"aa\"))\n\n dl = ppg.DataLoadingJob(\"load_data\", load)\n write_job.depends_on(dl)\n # with pytest.raises(ppg.JobsFailed):\n ppg.run()\n assert Path(\"out/B\").exists()\n\n def test_ignored_if_generating_within_filegenerating(self):\n write_job = ppg.FileGeneratingJob(\"out/A\", lambda of: write(\"out/A\", \"aa\"))\n\n def load(_of):\n ppg.FileGeneratingJob(\"out/B\", lambda of: write(\"out/B\", \"aa\"))\n write(\"out/C\", \"c\")\n\n dl = ppg.FileGeneratingJob(\"out/C\", load)\n write_job.depends_on(dl)\n ppg.run()\n assert read(\"out/C\") == \"c\"\n assert not Path(\"out/B\").exists()\n\n def test_invalidation(self):\n def gen():\n ppg.FileGeneratingJob(\"out/D\", lambda of: write(\"out/D\", \"D\"))\n\n ppg.JobGeneratingJob(\"A\", gen)\n ppg.run()\n assert read(\"out/D\") == \"D\"\n ppg.new()\n\n def gen():\n ppg.FileGeneratingJob(\"out/D\", lambda of: write(\"out/D\", \"E\"))\n\n ppg.JobGeneratingJob(\"A\", gen)\n ppg.run()\n assert read(\"out/D\") == \"E\"\n\n def test_invalidation_multiple_stages(self):\n counter = [0]\n\n def count():\n counter[0] += 1\n return str(counter[0])\n\n def gen():\n def genB():\n def genC():\n count()\n ppg.FileGeneratingJob(\"out/D\", lambda of: write(of, \"D\"))\n\n ppg.JobGeneratingJob(\"C\", genC)\n\n ppg.JobGeneratingJob(\"B\", genB)\n\n ppg.JobGeneratingJob(\"A\", gen)\n ppg.run()\n assert read(\"out/D\") == \"D\"\n assert counter[0] == 1\n\n ppg.new()\n ppg.JobGeneratingJob(\"A\", gen)\n ppg.run()\n assert read(\"out/D\") == \"D\"\n assert counter[0] == 2\n\n ppg.new()\n\n def gen():\n def genB():\n def genC():\n count()\n ppg.FileGeneratingJob(\"out/D\", lambda of: write(\"out/D\", \"E\"))\n\n ppg.JobGeneratingJob(\"C\", genC)\n\n ppg.JobGeneratingJob(\"B\", genB)\n\n ppg.JobGeneratingJob(\"A\", gen)\n ppg.run()\n assert read(\"out/D\") == \"E\"\n assert counter[0] == 3\n\n # this test only works if you'd remove the locking around next_job_number\n # *and* add in a small delay to actually trigger it\n # def test_massive_trying_to_hit_jobnumber_conflict(self):\n # ppg.new(cores=4)\n # def fail(of):\n # raise ValueError()\n # def jj(prefix):\n # def inner():\n # Path(prefix).mkdir(exist_ok=True)\n # for i in range(0,100):\n # ppg.FileGeneratingJob(prefix + '/' + str(i), lambda of: of.write_text(prefix))\n # return inner\n # a = ppg.JobGeneratingJob('a', jj('a'))\n # b = ppg.JobGeneratingJob('b', jj('b'))\n # c = ppg.JobGeneratingJob('c', jj('c'))\n # d = ppg.JobGeneratingJob('d', jj('d'))\n # ppg.run()\n\n", "id": "9050805", "language": "Python", "matching_score": 2.697848081588745, "max_stars_count": 0, "path": "tests/test_job_generating_jobs.py" }, { "content": "import pytest\nfrom pathlib import Path\nfrom .shared import write, force_load\nimport pypipegraph2 as ppg2\nimport pypipegraph as ppg1\n\n# to push test coverage...\n\n\nclass TestcompatibilityLayer:\n def test_repeated_apply_unapply(self):\n fg1 = ppg1.FileGeneratingJob\n fg2 = ppg2.FileGeneratingJob\n ppg2.replace_ppg1()\n assert ppg1.FileGeneratingJob is not fg1\n assert ppg1.FileGeneratingJob is not fg2 # actually it is wrapped...\n fg1_to_two = ppg1.FileGeneratingJob\n ppg2.unreplace_ppg1()\n assert ppg1.FileGeneratingJob is fg1\n ppg2.replace_ppg1()\n assert ppg1.FileGeneratingJob is fg1_to_two\n ppg2.replace_ppg1()\n assert ppg1.FileGeneratingJob is fg1_to_two\n ppg2.replace_ppg1()\n assert ppg1.FileGeneratingJob is fg1_to_two\n ppg2.unreplace_ppg1()\n assert ppg1.FileGeneratingJob is fg1\n ppg2.unreplace_ppg1()\n assert ppg1.FileGeneratingJob is fg1\n\n\[email protected](\"ppg1_compatibility_test\")\nclass TestcompatibilityLayerMore:\n def test_use_cores_exclusive(self):\n j = ppg1.FileGeneratingJob(\"shu\", lambda of: of.write_text(\"j\"))\n j.cores_needed = -2\n assert j.resources == ppg2.Resources.Exclusive\n assert j.cores_needed == -2\n\n def test_fg_did_not_create_its_file(self):\n j = ppg1.FileGeneratingJob(\"test_file_gen\", lambda: 55) # old school callback\n j2 = ppg1.FileGeneratingJob(\n \"test_file_gen_does\", lambda: Path(\"test_file_gen_does\").write_text(\"A\")\n ) # old school callback\n with pytest.raises(ppg2.JobsFailed):\n ppg2.run()\n assert \"did not create\" in str(j.exception)\n\n def test_multifilegenerating_without_arguments(self):\n j1 = ppg1.MultiFileGeneratingJob(\n [\"out/A\", \"out/B\"], lambda: write(\"out/A\", \"A\") or write(\"out/B\", \"B\")\n )\n j2 = ppg1.MultiFileGeneratingJob([\"out/C\", \"out/D\"], lambda: 55)\n j3 = ppg1.MultiFileGeneratingJob([\"out/G\", \"out/F\"], lambda of: 55)\n with pytest.raises(ppg2.JobsFailed):\n ppg2.run()\n assert \"did not create\" in str(j2.exception)\n assert \"did not create\" in str(j3.exception)\n assert Path(\"out/A\").read_text() == \"A\"\n assert Path(\"out/B\").read_text() == \"B\"\n\n def test_temp_file_with_and_without(self):\n a = ppg1.TempFileGeneratingJob(\"A\", lambda: 55)\n b = ppg1.TempFileGeneratingJob(\"b\", lambda of: 55)\n c = ppg1.MultiTempFileGeneratingJob([\"C\"], lambda: 55)\n d = ppg1.MultiTempFileGeneratingJob([\"D\"], lambda of: 55)\n force_load(a)\n force_load(b)\n force_load(c)\n force_load(d)\n with pytest.raises(ppg2.JobsFailed):\n ppg2.run()\n assert \"did not create\" in str(a.exception)\n assert \"did not create\" in str(b.exception)\n assert \"did not create\" in str(c.exception)\n assert \"did not create\" in str(d.exception)\n\n def test_unsupported(self):\n with pytest.raises(NotImplementedError):\n ppg1.MemMappedDataLoadingJob()\n\n def test_predecessors(self):\n a = ppg1.TempFileGeneratingJob(\"A\", lambda: 55)\n b = ppg1.TempFileGeneratingJob(\"b\", lambda of: 55)\n b.ignore_code_changes()\n b.depends_on(a)\n assert list(b.prerequisites) == [a]\n\n def test_depends_on_file_param_returns_wrapped(self):\n a = ppg1.FileGeneratingJob(\"a\", lambda of: counter(of))\n Path(\"input\").write_text(\"hello\")\n b = a.depends_on_file(\"input\").invariant\n assert isinstance(b, ppg2.ppg1_compatibility.FileInvariant)\n c = a.depends_on_params(\"shu\").invariant\n assert isinstance(c, ppg2.ppg1_compatibility.ParameterInvariant)\n\n def test_callback_adaption_with_default_parameters(self):\n def no_args():\n pass\n\n def all_default_args(a=123, b=234):\n pass\n\n def new_style(of, a=123, b=234):\n pass\n\n def new_style2(of):\n pass\n\n a = ppg1.FileGeneratingJob(\"a\", no_args)\n assert hasattr(\n a.generating_function, \"wrapped_function\"\n ) # is a wrapped function\n b = ppg1.FileGeneratingJob(\"b\", new_style)\n assert not hasattr(\n b.generating_function, \"wrapped_function\"\n ) # is a wrapped function\n b1 = ppg1.FileGeneratingJob(\"b1\", new_style)\n assert not hasattr(\n b1.generating_function, \"wrapped_function\"\n ) # is a wrapped function\n\n with pytest.raises(TypeError):\n c = ppg1.FileGeneratingJob(\"c\", all_default_args)\n ppg1.new_pipegraph()\n a = ppg1.MultiFileGeneratingJob([\"a\"], no_args)\n assert hasattr(\n a.generating_function, \"wrapped_function\"\n ) # is a wrapped function\n b = ppg1.MultiFileGeneratingJob([\"b\"], new_style)\n assert not hasattr(\n b.generating_function, \"wrapped_function\"\n ) # is a wrapped function\n c = ppg1.MultiFileGeneratingJob(\n [\"c\"], all_default_args\n ) # mfg never passed [output_files]\n assert hasattr(\n a.generating_function, \"wrapped_function\"\n ) # is a wrapped function\n\n def test_rc_cores_available(self):\n assert ppg1.util.global_pipegraph.rc.cores_available == ppg2.global_pipegraph.cores\n\n def test_ignore_code_changes_changes_both_dag_and_jobs(self):\n a = ppg1.FileGeneratingJob('a', lambda of: of.write_text('a'))\n assert len(ppg2.global_pipegraph.jobs) == 2 # the fg, and teh FI\n assert len(ppg2.global_pipegraph.job_dag) == 2 # the fg, and teh FI\n a.ignore_code_changes()\n assert len(ppg2.global_pipegraph.jobs) == 1 # the fg, and teh FI\n assert len(ppg2.global_pipegraph.job_dag) == 1 # the fg, and teh FI\n", "id": "1811775", "language": "Python", "matching_score": 4.046665668487549, "max_stars_count": 0, "path": "tests/ppg1_compatibility_layer/test_compatibility_layer.py" }, { "content": "from pathlib import Path\nimport types\nimport inspect\nimport os\nimport sys\nimport logging\nimport pypipegraph as ppg1\nimport pypipegraph.testing\nimport pypipegraph.testing.fixtures\nimport pypipegraph2 as ppg2\nimport pypipegraph2.testing\nimport wrapt\nimport importlib\nfrom .util import log_info, log_error, log_warning, log_debug, log_job_trace\n\n\nold_entries = {}\nold_modules = {}\npatched = False\n\nexception_map = {\n \"RuntimeError\": \"JobsFailed\",\n \"JobContractError\": \"JobContractError\",\n \"PyPipeGraphError\": \"FatalGraphException\",\n \"CycleError\": \"NotADag\",\n \"JobDiedException\": \"JobDied\",\n \"RuntimeException\": \"RunFailedInternally\",\n}\n\n\ndef replace_ppg1():\n \"\"\"Turn all ppg1 references into actual ppg2\n objects.\n Best effort, but the commonly used API should be well supported.\n Try to do this before anything imports ppg1.\n \"\"\"\n\n global patched\n if patched:\n return\n for x in dir(ppg1):\n old_entries[x] = getattr(ppg1, x)\n delattr(ppg1, x)\n for module_name, replacement in {\n \"pypipegraph.job\": job,\n \"pypipegraph.testing\": ppg2.testing,\n \"pypipegraph.testing.fixtures\": ppg2.testing.fixtures,\n }.items():\n if not module_name in sys.modules:\n importlib.import_module(module_name)\n\n old_modules[module_name] = sys.modules[module_name]\n sys.modules[module_name] = replacement\n\n # ppg1.__name__ == \"pypipegraph2\"\n # ppg1.__file__ == __file__\n # ppg1.__path__ == __path__\n # ppg1.__loader__ == __loader__\n # ppg1.__spec__ == __spec__\n # ppg1.__version__ == ppg2.__version__\n ppg1.__doc__ == \"\"\"ppg1->2 compatibility layer.\nSupports the commonly used the old ppg1 API\nwith ppg2 objects. Aspires to be a drop-in replacement.\n \"\"\"\n\n for old, new in exception_map.items():\n setattr(ppg1, old, getattr(ppg2.exceptions, new))\n\n # invariants\n\n ppg1.ParameterInvariant = ParameterInvariant\n ppg1.FileInvariant = FileInvariant\n ppg1.FileTimeInvariant = FileInvariant\n ppg1.RobustFileChecksumInvariant = FileInvariant\n ppg1.FileChecksumInvariant = FileInvariant\n ppg1.FunctionInvariant = FunctionInvariant\n ppg1.MultiFileInvariant = MultiFileInvariant\n\n ppg1.MultiFileGeneratingJob = MultiFileGeneratingJob\n ppg1.FileGeneratingJob = FileGeneratingJob\n ppg1.CachedAttributeLoadingJob = CachedAttributeLoadingJob\n ppg1.CachedDataLoadingJob = CachedDataLoadingJob\n ppg1.TempFileGeneratingJob = TempFileGeneratingJob\n ppg1.MultiTempFileGeneratingJob = MultiTempFileGeneratingJob\n ppg1.PlotJob = PlotJob\n\n ppg1.Job = ppg2.Job # don't wrap, we need the inheritance\n ppg1.DataLoadingJob = wrap_job(ppg2.DataLoadingJob)\n ppg1.AttributeLoadingJob = wrap_job(ppg2.AttributeLoadingJob)\n ppg1.JobGeneratingJob = wrap_job(ppg2.JobGeneratingJob)\n\n # unsupported\n for k in (\n \"NotebookJob\",\n \"DependencyInjectionJob\",\n \"TempFilePlusGeneratingJob\",\n \"MemMappedDataLoadingJob\",\n \"FinalJob\",\n \"CombinedPlotJob\",\n \"NothingChanged\", # very implementation detail...\n ):\n setattr(ppg1, k, unsupported(k))\n\n # misc\n ppg1.resource_coordinators = ResourceCoordinators\n ppg1.new_pipegraph = new_pipegraph\n ppg1.run_pipegraph = run_pipegraph\n ppg1.util = util\n ppg1.graph = graph\n ppg1.job = job\n ppg1.JobList = ppg2.JobList\n ppg1.ppg_exceptions = ppg_exceptions\n ppg1.inside_ppg = ppg2.inside_ppg\n ppg1.assert_uniqueness_of_object = ppg2.assert_uniqueness_of_object\n ppg1.testing = ppg2.testing\n ppg1.is_ppg2 = True\n ppg1.testing = ppg2.testing\n ppg1.testing.fixtures = ppg2.testing.fixtures\n # todo: list unpatched...\n new_entries = set(dir(ppg1))\n # this was used to find unported code.\n # for k in set(old_entries).difference(new_entries): # pragma: no cover\n # if not k.startswith(\"__\") and k != \"all\":\n # warnings.warn(f\"not yet ppg1-compatibility layer implemented: {k}\")\n patched = True\n\n\ndef unreplace_ppg1():\n \"\"\"Turn ppg1 compatibility layer off, restoring ppg1\n not that well tested, I suppose...\n\n \"\"\"\n global patched\n if not patched:\n return\n for x in dir(ppg1):\n delattr(ppg1, x)\n for k, v in old_entries.items():\n setattr(ppg1, k, v)\n for k, v in old_modules.items():\n sys.modules[k] = v\n ppg1.testing = old_modules[\"pypipegraph.testing\"]\n ppg1.testing.fixtures = old_modules[\"pypipegraph.testing.fixtures\"]\n patched = False\n\n\ndef wrap_job(cls):\n \"\"\"Adapt for ppg1 api idiosyncracies\"\"\"\n return lambda *args, **kwargs: PPG1Adaptor(cls(*args, **kwargs))\n\n\nclass ResourceCoordinators:\n def LocalSystem(max_cores_to_use=ppg2.ALL_CORES, profile=False, interactive=True):\n return (max_cores_to_use, interactive)\n\n\nclass Util:\n @property\n def global_pipegraph(self):\n from . import global_pipegraph\n\n return global_pipegraph\n\n @global_pipegraph.setter\n def global_pipegraph(self, value):\n from . import change_global_pipegraph\n\n change_global_pipegraph(value)\n\n @staticmethod\n def checksum_file(filename):\n \"\"\"was used by outside functions\"\"\"\n import stat as stat_module\n import hashlib\n\n file_size = os.stat(filename)[stat_module.ST_SIZE]\n if file_size > 200 * 1024 * 1024: # pragma: no cover\n print(\"Taking md5 of large file\", filename)\n with open(filename, \"rb\") as op:\n block_size = 1024 ** 2 * 10\n block = op.read(block_size)\n _hash = hashlib.md5()\n while block:\n _hash.update(block)\n block = op.read(block_size)\n res = _hash.hexdigest()\n return res\n\n\ndef job_or_filename(\n job_or_filename, invariant_class=None\n): # we want to return the wrapped class\n if invariant_class is None:\n invariant_class = FileInvariant\n return ppg2.util.job_or_filename(job_or_filename, invariant_class)\n\n\nutil = Util()\nutil.job_or_filename = job_or_filename\nutil.inside_ppg = ppg2.inside_ppg\nutil.assert_uniqueness_of_object = ppg2.assert_uniqueness_of_object\nutil.flatten_jobs = ppg2.util.flatten_jobs\nutil.freeze = ppg2.jobs.ParameterInvariant.freeze\n\n\nclass PPGExceptions:\n pass\n\n\nppg_exceptions = PPGExceptions()\nfor old, new in exception_map.items():\n setattr(ppg_exceptions, old, getattr(ppg2.exceptions, new))\n\n\n# earlier on, we had a different pickling scheme,\n# and that's what the files were called.\nif os.path.exists(\".pypipegraph_status_robust\"): # old projects keep their filename\n invariant_status_filename_default = \".pypipegraph_status_robust\" # pragma: no cover\nelif \"/\" in sys.argv[0]: # no script name but an executable?\n invariant_status_filename_default = \".pypipegraph_status_robust\"\nelse:\n # script specific pipegraphs\n invariant_status_filename_default = (\n \".ppg_status_%s\" % sys.argv[0]\n ) # pragma: no cover\n\n\nclass Graph:\n\n invariant_status_filename_default = invariant_status_filename_default\n\n\ngraph = Graph()\n\n\nclass Job:\n _InvariantJob = ppg2.jobs._InvariantMixin\n pass\n\n\njob = Job()\njob.function_to_str = ppg2.FunctionInvariant.function_to_str\n\n\nclass FakeRC:\n @property\n def cores_available(self):\n return ppg2.global_pipegraph.cores\n\n\ndef new_pipegraph(\n resource_coordinator=None,\n quiet=False,\n invariant_status_filename=None,\n dump_graph=True,\n interactive=True,\n cache_folder=\"cache\",\n log_file=None,\n log_level=logging.ERROR,\n):\n cores = ppg2.ALL_CORES\n run_mode = ppg2.RunMode.CONSOLE\n if resource_coordinator:\n cores = resource_coordinator[0]\n interactive = resource_coordinator[1] # rc overrides interactive setting\n if interactive: # otherwise, we read the one passed into the function\n run_mode = ppg2.RunMode.CONSOLE\n else:\n run_mode = ppg2.RunMode.NONINTERACTIVE\n kwargs = {}\n if invariant_status_filename:\n invariant_status_filename = Path(invariant_status_filename)\n kwargs[\"log_dir\"] = invariant_status_filename / \"logs\"\n kwargs[\"error_dir\"] = invariant_status_filename / \"errors\"\n kwargs[\"history_dir\"] = invariant_status_filename / \"history\"\n kwargs[\"run_dir\"] = invariant_status_filename / \"run\"\n kwargs[\"allow_short_filenames\"] = False # as was the default for ppg1\n kwargs[\"prevent_absolute_paths\"] = False # as was the default for ppg1\n\n res = ppg2.new(\n cores=cores,\n run_mode=run_mode,\n log_level=log_level,\n cache_dir=Path(cache_folder),\n **kwargs,\n )\n _add_graph_comp(res)\n return res\n\ndef _add_graph_comp(graph):\n graph.cache_folder = graph.cache_dir # ppg1 compatibility\n graph.rc = FakeRC()\n util.global_pipegraph = graph\n\n\n\ndef run_pipegraph(*args, **kwargs):\n \"\"\"Run the current global pipegraph\"\"\"\n if util.global_pipegraph is None:\n raise ValueError(\"You need to call new_pipegraph first\")\n ppg2.run(**kwargs)\n\n\ndef _ignore_code_changes(job):\n job.depend_on_function = False\n if hasattr(job, \"func_invariant\"):\n log_job_trace(f\"ignoring changes for {job.job_id}\")\n util.global_pipegraph.job_dag.remove_edge(job.func_invariant.job_id, job.job_id)\n\n if hasattr(job.func_invariant, 'usage_counter'):\n job.func_invariant.usage_counter -= 1\n if not hasattr(job.func_invariant, 'usage_counter') or job.func_invariant.usage_counter == 0:\n util.global_pipegraph.job_dag.remove_node(job.func_invariant.job_id)\n for k in job.func_invariant.outputs:\n util.global_pipegraph.job_inputs[job.job_id].remove(k)\n del util.global_pipegraph.jobs[job.func_invariant.job_id]\n\n del job.func_invariant\n if hasattr(job, \"lfg\"):\n _ignore_code_changes(job.lfg)\n\n\nclass PPG1AdaptorBase:\n def ignore_code_changes(self):\n _ignore_code_changes(self)\n\n def use_cores(self, value):\n self.cores_needed = value\n return self\n\n @property\n def cores_needed(self):\n res = None\n if self.resources == ppg2.Resources.AllCores:\n res = -1\n elif self.resources == ppg2.Resources.Exclusive:\n res = -2\n else:\n res = 1\n return res\n\n @cores_needed.setter\n def cores_needed(self, value):\n if value == -1 or value > 1:\n self.use_resources(ppg2.Resources.AllCores)\n elif value == -2:\n self.use_resources(ppg2.Resources.Exclusive)\n else: # elif value == 1:\n self.use_resources(ppg2.Resources.SingleCore)\n\n def depends_on(self, *args): # keep the wrapper\n if hasattr(self, \"__wrapped__\"):\n res = self.__wrapped__.depends_on(*args)\n else:\n super().depends_on(*args)\n return self\n\n def depends_on_file(self, filename):\n job = FileInvariant(filename)\n self.depends_on(job)\n return ppg2.jobs.DependsOnInvariant(job, self)\n\n def depends_on_params(self, params):\n job = ParameterInvariant(self.job_id, params)\n self.depends_on(job)\n return ppg2.jobs.DependsOnInvariant(job, self)\n\n @property\n def filenames(self):\n return self.files\n\n @property\n def prerequisites(self):\n return self.upstreams\n\n\nclass PPG1Adaptor(wrapt.ObjectProxy, PPG1AdaptorBase):\n pass\n\n\nclass FileInvariant(PPG1AdaptorBase, ppg2.FileInvariant):\n pass\n\n\nclass FunctionInvariant(PPG1AdaptorBase, ppg2.FunctionInvariant):\n pass\n\n\nclass ParameterInvariant(PPG1AdaptorBase, ppg2.ParameterInvariant):\n pass\n\n\ndef assert_ppg_created():\n if not util.global_pipegraph:\n raise ValueError(\"Must instantiate a pipegraph before creating any Jobs\")\n\n\ndef _first_param_empty(signature):\n \"\"\"Check whether the first argument to this call is\n empty, ie. no with a default value\"\"\"\n try:\n first = next((signature.parameters.items()).__iter__())\n return first[1].default == inspect._empty\n except StopIteration:\n return True\n\n\ndef _wrap_func_if_no_output_file_params(function, accept_all_defaults=False):\n sig = inspect.signature(function)\n if len(sig.parameters) == 0 or not _first_param_empty(sig):\n # no or only default parameters = do it oldstyle.\n if not accept_all_defaults and not _first_param_empty(sig):\n raise TypeError(\n f\"Could not correctly wrap {function}.\\n\"\n f\"{ppg2.FunctionInvariant.function_to_str(function)}\\n\"\n \"It has default parameter that would have been replaced \"\n \"with output_filename in ppg1 already. Fix your function arguments\"\n )\n\n def wrapper(of): # pragma: no cover - runs in spawned process\n function()\n if not isinstance(of, list):\n of = [of]\n for a_filename in of:\n if not a_filename.exists():\n raise ppg2.exceptions.JobContractError(\n \"%s did not create its file(s) %s %s\\n.Cwd: %s\"\n % (\n a_filename,\n function.__code__.co_filename,\n function.__code__.co_firstlineno,\n os.path.abspath(os.getcwd()),\n )\n )\n\n wrapper.wrapped_function = function\n func = wrapper\n else:\n func = function\n return func\n\n\nclass FileGeneratingJob(PPG1AdaptorBase, ppg2.FileGeneratingJob):\n def __new__(cls, *args, **kwargs):\n obj = ppg2.FileGeneratingJob.__new__(cls, *args, **kwargs)\n return obj\n\n def __init__(self, output_filename, function, rename_broken=False, empty_ok=False):\n func = _wrap_func_if_no_output_file_params(function)\n super().__init__(output_filename, func, empty_ok=empty_ok)\n\n\nclass MultiFileGeneratingJob(PPG1AdaptorBase, ppg2.MultiFileGeneratingJob):\n def __init__(self, output_filenames, function, rename_broken=False, empty_ok=False):\n func = _wrap_func_if_no_output_file_params(function, accept_all_defaults=True)\n res = super().__init__(output_filenames, func, empty_ok=empty_ok)\n\n\nclass TempFileGeneratingJob(PPG1AdaptorBase, ppg2.TempFileGeneratingJob):\n def __init__(self, output_filename, function, rename_broken=False):\n func = _wrap_func_if_no_output_file_params(function)\n super().__init__(output_filename, func)\n\n\nclass MultiTempFileGeneratingJob(PPG1AdaptorBase, ppg2.MultiTempFileGeneratingJob):\n def __init__(self, output_filenames, function, rename_broken=False):\n func = _wrap_func_if_no_output_file_params(function, accept_all_defaults=True)\n super().__init__(output_filenames, func)\n\n\ndef MultiFileInvariant(filenames):\n # ppg2 already detects when invariants are gained and lost -> no need for\n # special MultiFileInvariant\n res = []\n for f in filenames:\n res.append(ppg2.FileInvariant(f))\n return res\n\n\n# no one inherits from these, so wrapping in a function is ok, I suppose\n# they should have been functions in ppg1.e..\n\n\ndef CachedAttributeLoadingJob(\n cache_filename, target_object, target_attribute, calculating_function\n):\n try:\n job = ppg2.CachedAttributeLoadingJob(\n cache_filename, target_object, target_attribute, calculating_function\n )\n except ppg2.JobRedefinitionError as e:\n raise ppg1.JobContractError(str(e))\n\n return wrap_old_style_lfg_cached_job(job)\n\n\ndef CachedDataLoadingJob(cache_filename, calculating_function, loading_function):\n job = ppg2.CachedDataLoadingJob(\n cache_filename, calculating_function, loading_function\n )\n return wrap_old_style_lfg_cached_job(job)\n\n\ndef PlotJob(\n output_filename,\n calc_function,\n plot_function,\n render_args=None,\n skip_table=False,\n skip_caching=False,\n):\n pj = ppg2.PlotJob(\n output_filename,\n calc_function,\n plot_function,\n render_args=render_args,\n cache_calc=not skip_caching,\n create_table=not skip_table,\n )\n res = pj.plot\n if isinstance(pj.cache, ppg2.jobs.CachedJobTuple):\n res.cache_job = wrap_old_style_lfg_cached_job(pj.cache)\n else:\n res.cache_job = pj.cache\n res.table_job = pj.table\n res = PPG1Adaptor(res)\n\n def depends_on(\n self,\n *other_jobs,\n ):\n # FileGeneratingJob.depends_on(self, other_job) # just like the cached jobs, the plotting does not depend on the loading of prerequisites\n if res.cache_job is None:\n ppg2.Job.depends_on(self, *other_jobs)\n if self.table_job is not None:\n self.table_job.depends_on(*other_jobs)\n elif (\n hasattr(self, \"cache_job\") and other_jobs[0] is not self.cache_job\n ): # activate this after we have added the invariants...\n self.cache_job.depends_on(*other_jobs)\n return self\n\n res.depends_on = types.MethodType(depends_on, res)\n\n def ignore_code_changes(self):\n _ignore_code_changes(self)\n if self.cache_job is not None:\n _ignore_code_changes(self.cache_job)\n if self.table_job is not None:\n _ignore_code_changes(self.table_job)\n\n res.ignore_code_changes = types.MethodType(ignore_code_changes, res)\n\n return res\n\n\ndef wrap_old_style_lfg_cached_job(job):\n # adapt new style to old style\n if hasattr(job.load, \"__wrapped__\"): # pragma: no cover\n res = job.load\n res.lfg = job.calc # just assume it's a PPG1Adaptor\n else:\n res = PPG1Adaptor(job.load)\n res.lfg = PPG1Adaptor(job.calc)\n\n def depends_on(self, *args, **kwargs):\n if args and args[0] == self.lfg: # repeated definition, I suppose\n # must not call self.__wrapped__.depends_on - that's a recursion for some reason?\n ppg2.Job.depends_on(self, *args, **kwargs)\n else:\n self.lfg.depends_on(*args, **kwargs)\n return self\n\n res.depends_on = depends_on.__get__(res)\n\n def use_cores(self, cores):\n self.lfg.use_cores(cores)\n return self\n\n res.use_cores = use_cores.__get__(res)\n\n return res\n\n\ndef unsupported(name):\n def inner():\n raise NotImplementedError(f\"ppg2 no longer offers {name}\")\n\n return inner\n", "id": "8426069", "language": "Python", "matching_score": 10.394603729248047, "max_stars_count": 0, "path": "src/pypipegraph2/ppg1_compatibility.py" }, { "content": "# -*- coding: utf-8 -*-\n\n__version__ = '0.197'\n\nfrom .graph import run_pipegraph, new_pipegraph\nfrom .ppg_exceptions import (\n RuntimeError,\n RuntimeException,\n CycleError,\n JobContractError,\n PyPipeGraphError,\n JobDiedException,\n NothingChanged,\n)\nfrom . import util\n\ninside_ppg = util.inside_ppg\n\nfrom .job import (\n Job,\n JobList,\n FileGeneratingJob,\n MultiFileGeneratingJob,\n DataLoadingJob,\n AttributeLoadingJob,\n TempFileGeneratingJob,\n TempFilePlusGeneratingJob,\n CachedAttributeLoadingJob,\n CachedDataLoadingJob,\n PlotJob,\n CombinedPlotJob,\n FunctionInvariant,\n ParameterInvariant,\n FileTimeInvariant,\n FileChecksumInvariant,\n RobustFileChecksumInvariant,\n FileInvariant,\n MultiFileInvariant,\n JobGeneratingJob,\n DependencyInjectionJob,\n FinalJob,\n MemMappedDataLoadingJob,\n MultiTempFileGeneratingJob,\n NotebookJob,\n)\n\nassert_uniqueness_of_object = util.assert_uniqueness_of_object\n\nall = [\n run_pipegraph,\n new_pipegraph,\n RuntimeError,\n RuntimeException,\n CycleError,\n JobContractError,\n PyPipeGraphError,\n JobDiedException,\n NothingChanged,\n Job,\n JobList,\n FileGeneratingJob,\n MultiFileGeneratingJob,\n DataLoadingJob,\n AttributeLoadingJob,\n TempFileGeneratingJob,\n TempFilePlusGeneratingJob,\n CachedAttributeLoadingJob,\n CachedDataLoadingJob,\n PlotJob,\n CombinedPlotJob,\n FunctionInvariant,\n ParameterInvariant,\n FileTimeInvariant,\n FileChecksumInvariant,\n RobustFileChecksumInvariant,\n MultiFileInvariant,\n FileInvariant,\n JobGeneratingJob,\n DependencyInjectionJob,\n FinalJob,\n MemMappedDataLoadingJob,\n util,\n inside_ppg,\n MultiTempFileGeneratingJob,\n NotebookJob,\n]\n", "id": "5756333", "language": "Python", "matching_score": 1.208581566810608, "max_stars_count": 0, "path": "src/pypipegraph/__init__.py" }, { "content": "import sys\nimport pytest\nimport pypipegraph2 as ppg2\nfrom . import fixtures\n\n\ndef run():\n \"\"\"a ppg/no ppg aware run wrapper\"\"\"\n if ppg2.inside_ppg():\n ppg2.run()\n else:\n pass\n\n\nrun_pipegraph = run # ppg1 compatibility\n\nfl_count = 0\n\n\ndef force_load(job, prefix=None):\n \"\"\"make sure a dataloadingjob has been loaded (if applicable)\"\"\"\n if ppg2.inside_ppg():\n if not isinstance(job, ppg2.Job):\n if prefix is None:\n global fl_count\n fl_count += 1\n prefix = \"fl_%i\" % fl_count\n else:\n prefix = job.job_id\n return ppg2.JobGeneratingJob(prefix + \"_force_load\", lambda: None).depends_on(\n job\n )\n\n\nclass RaisesDirectOrInsidePipegraph(object):\n \"\"\"Piece of black magic from the depths of _pytest\n that will check whether a piece of code will raise the\n expected expcition (if outside of ppg), or if it will\n raise the exception when the pipegraph is running\n\n Use as a context manager like pytest.raises\"\"\"\n\n def __init__(self, expected_exception, search_message=None):\n self.expected_exception = expected_exception\n self.message = \"DID NOT RAISE {}\".format(expected_exception)\n self.search_message = search_message\n self.excinfo = None\n\n def __enter__(self):\n import _pytest\n\n self.excinfo = object.__new__(_pytest._code.ExceptionInfo)\n return self.excinfo\n\n def __exit__(self, *tp):\n from _pytest.outcomes import fail\n\n if ppg2.inside_ppg():\n with pytest.raises(ppg2.RunFailed) as e:\n run()\n ex = e.value.exceptions[0]\n if isinstance(ex, ppg2.JobError):\n ex = ex.args[0]\n if not isinstance(ex, self.expected_exception):\n raise ValueError(\n f\"Unexpected exception. Expected {self.expected_exception}, found {e.value.exceptions[0]}\"\n )\n\n if self.search_message:\n assert self.search_message in str(e.value.exceptions[0])\n else:\n __tracebackhide__ = True\n if tp[0] is None:\n fail(self.message)\n self.excinfo.__init__(tp)\n suppress_exception = issubclass(self.excinfo.type, self.expected_exception)\n if sys.version_info[0] == 2:\n raise ValueError(\"No python2 support\")\n if suppress_exception:\n return True # seep PEP_0343\n else:\n return False\n", "id": "3653371", "language": "Python", "matching_score": 2.3107593059539795, "max_stars_count": 0, "path": "src/pypipegraph2/testing/__init__.py" }, { "content": "class PPGException(Exception):\n \"\"\"Baseclass for all our exceptions\"\"\"\n\n pass\n\n\nclass FatalGraphException(PPGException):\n \"\"\"Could not finish executing the graph completly\"\"\"\n\n pass\n\n\nclass NotADag(FatalGraphException):\n \"\"\"Your graph is not acyclic\"\"\"\n\n pass\n\n\nclass JobOutputConflict(ValueError):\n \"\"\"Multiple jobs with overlapping (but not identical) outputs were defined\"\"\"\n\n pass\n\n\nclass JobContractError(PPGException):\n \"\"\"A job did not do what it was supposed to do\"\"\"\n\n pass\n\n\nclass JobDied(PPGException):\n pass\n\n\nclass JobRedefinitionError(ValueError):\n pass\n\n\nclass JobEvaluationFailed(PPGException):\n \"\"\"For some reason, we could not decide on whether to execute this job\"\"\"\n\n pass\n\n\nclass RunFailed(FatalGraphException):\n \"\"\"The execution failed outside of the scope of a single job\"\"\"\n pass\n\n\nclass JobsFailed(RunFailed):\n \"\"\"one or more jobs failed\"\"\"\n\n def __init__(self, msg, exceptions):\n super().__init__(msg)\n self.exceptions = exceptions\n\n\nclass RunFailedInternally(RunFailed):\n \"\"\"There is a bug in pypipegraph2\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(\n \"RunFailedInternally: Due to some bug in the graph-running, we could not finish running. File a bug report.\",\n *args,\n **kwargs,\n )\n\n\nclass _RunAgain(PPGException):\n \"\"\"Internal to signal rerun\"\"\"\n\n pass\n\n\nclass JobError(PPGException):\n \"\"\"Wrapper around the exceptions thrown by random jobs\"\"\"\n\n def __str__(self):\n return (\n (\"ppg.JobError:\\n\")\n + (f\"{self.args[0].__class__.__name__}: {self.args[0]}\\n\")\n # + (f\"\\tTraceback: {self.args[1]}\\n\")\n + (\"\")\n )\n\n def __repr__(self):\n return str(self)\n\nclass JobCanceled(PPGException):\n pass\n", "id": "3435499", "language": "Python", "matching_score": 0.5738641023635864, "max_stars_count": 0, "path": "src/pypipegraph2/exceptions.py" }, { "content": "from enum import Enum, auto\n\n\nclass ProcessingStatus(Enum):\n Waiting = auto() # not yet ready\n ReadyToRun = auto() # short livened, turns into schedulded very soon\n Schedulded = auto() # it's in the pipeline. Stand by for outcome.\n Done = auto() # grilled and done.\n\n def is_terminal(self):\n return self is ProcessingStatus.Done\n\n\nclass JobOutcome(Enum):\n NotYet = auto() # default\n Success = auto()\n Skipped = auto()\n Failed = auto()\n UpstreamFailed = auto()\n Pruned = auto()\n\n\nclass ShouldRun(Enum):\n Maybe = auto()\n Yes = auto()\n YesAfterValidation = auto()\n No = auto()\n IfInvalidated = auto()\n IfDownstreamNeedsMe = auto()\n IfParentJobRan = auto()\n\n def is_decided(self):\n return self in (ShouldRun.Yes, ShouldRun.No)\n\n def almost_decided(self):\n return self in (ShouldRun.Yes, ShouldRun.No, ShouldRun.IfInvalidated)\n\n\nclass Action(Enum):\n Schedulde = auto()\n GoYes = auto()\n GoNo = auto()\n ShouldNotHappen = auto()\n TakeFromParent = auto()\n RefreshValidationAndTryAgain = auto()\n ConditionalValidated = auto()\n\nclass ValidationState(Enum):\n Unknown = auto()\n Validated = auto()\n Invalidated = auto()\n UpstreamFailed = auto()\n\n def is_terminal(self):\n return self != ValidationState.Unknown\n\n\nclass JobKind(Enum):\n Invariant = auto()\n Output = auto()\n Temp = auto()\n Cleanup = auto()\n Loading = auto()\n JobGenerating = auto()\n\n\nclass RunMode(Enum):\n CONSOLE = 1 # certain redefinitions: FatalGraphException, interactive console, ctrl-c does not work\n NOTEBOOK = 2 # certain redefinitions: warning, no interactive console (todo: gui), control-c,/abort works TODO\n NONINTERACTIVE = (\n 3 # such as testing, redefinitions like console, but no gui, ctrl-c works TODO\n )\n\n def is_strict(self):\n return self is RunMode.CONSOLE or self is RunMode.NONINTERACTIVE\n\n\nclass Resources(Enum):\n SingleCore = \"SingleCore\"\n AllCores = \"AllCores\"\n # MemoryHog = \"MemoryHog\" # todo\n Exclusive = \"Exclusive\"\n # RateLimited = \"RateLimited\" # todo, think web requests\n RunsHere = \"RunsHere\" # in this process\n _RaiseInCoreLock = \"_RaiseInCoreLock =\" # for testing\n _RaiseInToNumber = \"_RaiseInToNumber =\" # for testing\n\n def is_external(self):\n return self in (\n Resources.SingleCore,\n Resources.AllCores,\n Resources.Exclusive,\n ) # pragma: no cover - used by interactive\n\n def to_number(self, max_cores):\n if self is Resources.SingleCore:\n return 1\n elif self is Resources.AllCores:\n return max(max_cores - 1, 1) # never return less than 1 core\n elif self is Resources.Exclusive:\n return max_cores\n elif self is Resources.RunsHere:\n return 1\n elif self is Resources._RaiseInCoreLock:\n return 0 # which the core lock does not like!\n else:\n raise ValueError(\"Not a Resource with a given number of cores\")\n", "id": "7586181", "language": "Python", "matching_score": 1.6665302515029907, "max_stars_count": 0, "path": "src/pypipegraph2/enums.py" }, { "content": "from __future__ import print_function\n\n\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2017, <NAME> <<EMAIL>>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport time\n\nimport os\nimport traceback\nimport multiprocessing\nimport threading\nimport signal\nimport sys\nimport collections\nimport tempfile\nimport queue\nimport pickle\n\nfrom . import ppg_exceptions\nfrom . import util\n\n\nclass DummyResourceCoordinator:\n \"\"\"For the calculating workers. so it throws exceptions...\"\"\"\n\n\ndef get_memory_available():\n if hasattr(os, \"sysconf\"):\n if \"SC_NPROCESSORS_ONLN\" in os.sysconf_names: # a linux or unix system\n op = open(\"/proc/meminfo\", \"r\")\n d = op.read()\n op.close()\n mem_total = d[d.find(\"MemTotal:\") + len(\"MemTotal:\") :]\n mem_total = mem_total[: mem_total.find(\"kB\")].strip()\n swap_total = d[d.find(\"SwapTotal:\") + len(\"SwapTotal:\") :]\n swap_total = swap_total[: swap_total.find(\"kB\")].strip()\n physical_memory = int(mem_total) * 1024\n swap_memory = int(swap_total) * 1024\n return physical_memory, swap_memory\n else: # pragma: no cover\n # assume it's mac os x\n physical_memory = int(os.popen2(\"sysctl -n hw.memsize\")[1].read())\n swap_memory = (\n physical_memory * 10\n ) # mac os x virtual memory system uses *all* available boot device size, so a heuristic should work well enough\n return physical_memory, swap_memory\n else: # pragma: no cover\n raise ValueError(\n \"get_memory_available() does not know how to get available memory on your system.\"\n )\n\n\ndef signal_handler(signal, frame): # pragma: no cover - interactive\n print('Ctrl-C has been disable. Please give command \"abort\"')\n\n\nJobReturnValue = collections.namedtuple(\n \"JobReturnValue\",\n (\n \"worker_id\",\n \"was_ok\",\n \"job_id\",\n \"stdout\",\n \"stderr\",\n \"exception\",\n \"trace\",\n \"new_jobs\",\n \"runtime\",\n ),\n)\n\n\nclass LocalSystem:\n \"\"\"A ResourceCoordinator that uses the current machine,\n up to max_cores_to_use cores of it\n\n It uses multiprocessing and the LocalWorker\n \"\"\"\n\n def __init__(self, max_cores_to_use=util.CPUs(), profile=False, interactive=True):\n self.max_cores_to_use = max_cores_to_use # todo: update to local cpu count...\n self.worker = LocalWorker(self)\n self.cores_available = max_cores_to_use\n self.physical_memory, self.swap_memory = get_memory_available()\n self.timeout = 5\n self.profile = profile\n if (multiprocessing.current_process().name != \"MainProcess\") or (\n util._running_inside_test\n ):\n interactive = False\n self.interactive = interactive\n\n def spawn_workers(self):\n return {\"LocalWorker\": self.worker}\n\n def get_resources(self):\n res = {\n \"LocalWorker\": { # this is always the maximum available - the graph is handling the bookeeping of running jobs\n \"cores\": self.cores_available,\n \"physical_memory\": self.physical_memory,\n \"swap_memory\": self.swap_memory,\n }\n }\n return res\n\n def enter_loop(self):\n os.environ['RAYON_NUM_THREADS'] = \"%i\" % (self.max_cores_to_use,)\n self.spawn_workers()\n if sys.version_info[0] == 2 and sys.version_info[1] < 7: # pragma: no cover\n raise ValueError(\"pypipegraph needs python >=2.7\")\n else:\n self.que = multiprocessing.Queue()\n\n self.pipegraph.logger.debug(\"Entering execution loop\")\n self.pipegraph.start_jobs()\n if self.interactive: # pragma: no cover\n from . import interactive\n\n interactive_thread = threading.Thread(target=interactive.thread_loop)\n interactive_thread.start()\n s = signal.signal(signal.SIGINT, signal_handler) # ignore ctrl-c\n while True:\n self.worker.check_for_dead_jobs() # whether time out or or job was done, let's check this...\n if self.interactive: # pragma: no cover\n self.see_if_output_is_requested()\n try:\n start = time.time()\n r = self.que.get(block=True, timeout=self.timeout)\n stop = time.time()\n self.pipegraph.logger.info(\"Till que.got: %.2f\" % (stop - start))\n\n if r is None and interactive.interpreter.terminated: # pragma: no cover\n # abort was requested\n self.worker.kill_jobs()\n break\n # worker_id, was_ok, job_id_done, stdout, stderr, exception, trace, new_jobs, runtime = (\n # r\n # ) # was there a job done?t\n self.pipegraph.logger.debug(\n \"Job returned: %s, was_ok: %s\" % (r.job_id, r.was_ok)\n )\n job = self.pipegraph.jobs[r.job_id]\n job.stop_time = time.time()\n job.was_done_on.add(r.worker_id)\n job.stdout = r.stdout\n job.stderr = r.stderr\n job.exception = r.exception\n job.trace = r.trace\n job.failed = not r.was_ok\n if job.start_time:\n delta = job.stop_time - job.start_time\n if delta > 5:\n self.pipegraph.logger.warning(\n \"%s runtime: %.2fs (%.2fs w/oque)\"\n % (r.job_id, delta, r.runtime)\n )\n job.runtime = delta\n else:\n job.runtime = -1\n if job.failed:\n try:\n if job.exception.startswith(\"STR\".encode(\"UTF-8\")):\n job.exception = job.exception[3:]\n raise pickle.UnpicklingError(\n \"String Transmission\"\n ) # what an ugly control flow...\n job.exception = pickle.loads(r.exception)\n except (\n pickle.UnpicklingError,\n EOFError,\n TypeError,\n AttributeError,\n ): # some exceptions can't be pickled, so we send a string instead\n pass\n if job.exception:\n self.pipegraph.logger.warning(\n \"Job returned with exception: %s\" % job\n )\n self.pipegraph.logger.warning(\n \"Exception: %s\" % repr(r.exception)\n )\n self.pipegraph.logger.warning(\"Trace: %s\" % r.trace)\n if r.new_jobs is not False:\n if not job.modifies_jobgraph(): # pragma: no cover\n job.exception = ValueError(\"This branch should not be reached.\")\n job.failed = True\n else:\n new_jobs = pickle.loads(r.new_jobs)\n self.pipegraph.logger.debug(\n \"We retrieved %i new jobs from %s\" % (len(new_jobs), job)\n )\n self.pipegraph.new_jobs_generated_during_runtime(new_jobs)\n\n more_jobs = self.pipegraph.job_executed(job)\n if (\n not more_jobs\n ): # this means that all jobs are done and there are no longer any more running...\n break\n self.pipegraph.start_jobs()\n\n except (queue.Empty, IOError): # either timeout, or the que failed\n pass\n self.que.close()\n self.que.join_thread() # wait for the que to close\n if self.interactive: # pragma: no cover - interactive\n if not interactive.interpreter.stay:\n interactive.interpreter.terminated = True\n interactive_thread.join()\n signal.signal(signal.SIGINT, s)\n self.pipegraph.logger.debug(\"Leaving loop\")\n\n def see_if_output_is_requested(self): # pragma: no cover - interactive\n import select\n\n try:\n if select.select([sys.stdin], [], [], 0)[0]:\n sys.stdin.read(1) # enter pressed...\n self.pipegraph.print_running_jobs()\n pass\n finally:\n pass\n # termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n\n def abort(self): # pragma: no cover - interactive\n self.que.put(None)\n\n def kill_job(self, job): # pragma: no cover - interactive\n self.worker.kill_job(job)\n\n def get_job_pid(self, job): # pragma: no cover - interactive\n return self.worker.get_job_pid(job)\n\n\nclass LocalWorker:\n def __init__(self, rc):\n self.rc = rc\n self.worker_id = \"LocalWorker\"\n self.process_to_job = {}\n\n def spawn(self, job):\n self.rc.pipegraph.logger.debug(\"spawning %s\", job)\n job.start_time = time.time()\n preq_failed = False\n if not job.is_final_job: # final jobs don't load their (fake) prereqs.\n for preq in job.prerequisites:\n if preq.is_loadable():\n self.rc.pipegraph.logger.debug(\"Now loading %s\", preq)\n preq.start_time = time.time()\n if not self.load_job(preq):\n preq_failed = True\n break\n preq.stop_time = time.time()\n delta = preq.stop_time - preq.start_time\n self.rc.pipegraph.logger.debug(\"load time %s %.2f\", preq, delta)\n if delta > 5:\n self.rc.pipegraph.logger.warning(\n \"%s load runtime: %.2fs\" % (preq.job_id, delta)\n )\n if preq_failed:\n self.rc.que.put(\n JobReturnValue(\n worker_id=self.worker_id,\n was_ok=False,\n job_id=job.job_id,\n stdout=\"\",\n stderr=\"\",\n exception=\"STRPrerequsite failed\".encode(\"UTF-8\"),\n trace=\"\",\n new_jobs=False,\n runtime=-1,\n )\n )\n # time.sleep(0)\n else:\n if job.modifies_jobgraph():\n stdout = tempfile.SpooledTemporaryFile(mode=\"w+\")\n stderr = tempfile.SpooledTemporaryFile(mode=\"w+\")\n self.run_a_job(job, stdout, stderr)\n else:\n stdout = tempfile.TemporaryFile(\n mode=\"w+\"\n ) # no more spooling - it doesn't get passed back\n stderr = tempfile.TemporaryFile(mode=\"w+\")\n stdout.fileno()\n stderr.fileno()\n p = multiprocessing.Process(\n target=self.wrap_run, args=[job, stdout, stderr, False]\n )\n job.stdout_handle = stdout\n job.stderr_handle = stderr\n p.start()\n job.run_info = \"pid = %s\" % (p.pid,)\n job.pid = p.pid\n\n self.process_to_job[p] = job\n\n def load_job(\n self, job\n ): # this executes a load job returns false if an error occured\n stdout = tempfile.SpooledTemporaryFile(mode=\"w\")\n stderr = tempfile.SpooledTemporaryFile(mode=\"w\")\n\n old_stdout = sys.stdout\n old_stderr = sys.stderr\n sys.stdout = stdout\n sys.stderr = stderr\n trace = \"\"\n new_jobs = False\n start = time.time()\n try:\n job.load()\n was_ok = True\n exception = None\n except Exception as e:\n trace = traceback.format_exc()\n was_ok = False\n exception = e\n try:\n exception = pickle.dumps(exception)\n except Exception as e: # some exceptions can't be pickled, so we send a string instead\n exception = str(e)\n stop = time.time()\n stdout.seek(0, os.SEEK_SET)\n stdout_text = stdout.read()[-10 * 1024 :]\n stdout.close()\n stderr.seek(0, os.SEEK_SET)\n stderr_text = stderr.read()[-10 * 1024 :]\n stderr.close()\n sys.stdout = old_stdout\n sys.stderr = old_stderr\n if not was_ok:\n self.rc.que.put(\n JobReturnValue(\n worker_id=self.worker_id,\n was_ok=was_ok,\n job_id=job.job_id,\n stdout=stdout_text,\n stderr=stderr_text,\n exception=exception,\n trace=trace,\n new_jobs=new_jobs,\n runtime=stop - start,\n )\n )\n os.chdir(self.rc.pipegraph.chdir)\n return was_ok\n\n def wrap_run(self, job, stdout, stderr, is_local):\n if self.rc.interactive: # pragma: no cover\n signal.signal(signal.SIGINT, signal.SIG_IGN) # ignore ctrl-c\n\n self.run_a_job(job, stdout, stderr, is_local)\n\n def run_a_job(\n self, job, stdout, stderr, is_local=True\n ): # this runs in the spawned processes, except for job.modifies_jobgraph()==True jobs\n\n old_stdout = sys.stdout\n old_stderr = sys.stderr\n sys.stdout = stdout\n sys.stderr = stderr\n trace = \"\"\n new_jobs = False\n util.global_pipegraph.new_jobs = None # ignore jobs created here.\n start = time.time()\n try:\n temp = job.run()\n was_ok = True\n exception = None\n if job.modifies_jobgraph():\n new_jobs = self.prepare_jobs_for_transfer(temp)\n elif temp:\n raise ppg_exceptions.JobContractError(\n \"Job returned a value (which should be new jobs generated here) without having modifies_jobgraph() returning True\"\n )\n except Exception as e:\n print(\"exception in \", job.job_id)\n trace = traceback.format_exc()\n was_ok = False\n exception = e\n try:\n exception = pickle.dumps(e)\n except Exception as e: # some exceptions can't be pickled, so we send a string instead\n try:\n exception = bytes(\"STR\", \"UTF-8\") + bytes(e)\n except TypeError:\n exception = str(e)\n finally:\n stop = time.time()\n try:\n stdout.seek(0, os.SEEK_SET)\n stdout_text = stdout.read()\n stdout.close()\n except ValueError as e: # pragma: no cover - defensive\n if \"I/O operation on closed file\" in str(e):\n stdout_text = (\n \"Stdout could not be captured / io operation on closed file\"\n )\n else:\n raise\n try:\n stderr.seek(0, os.SEEK_SET)\n stderr_text = stderr.read()\n stderr.close()\n except ValueError as e: # pragma: no cover - defensive\n if \"I/O operation on closed file\" in str(e):\n stderr_text = (\n \"stderr could not be captured / io operation on closed file\"\n )\n else:\n raise\n sys.stdout = old_stdout\n sys.stderr = old_stderr\n stop = time.time()\n self.rc.que.put(\n JobReturnValue(\n worker_id=self.worker_id,\n was_ok=was_ok,\n job_id=job.job_id,\n stdout=stdout_text,\n stderr=stderr_text,\n exception=exception,\n trace=trace,\n new_jobs=new_jobs,\n runtime=stop - start,\n )\n )\n if not is_local:\n self.rc.que.close()\n self.rc.que.join_thread()\n\n def prepare_jobs_for_transfer(self, job_dict):\n \"\"\"When traveling back, jobs-dependencies are wrapped as strings - this should\n prevent nasty suprises\"\"\"\n # package as strings\n for job in job_dict.values():\n job.prerequisites = [preq.job_id for preq in job.prerequisites]\n job.dependants = [dep.job_id for dep in job.dependants]\n # unpackanging is don in new_jobs_generated_during_runtime\n self.rc.pipegraph.new_jobs_generated_during_runtime(job_dict)\n return pickle.dumps(\n {}\n ) # The LocalWorker does not need to serialize back the jobs, it already is running in the space of the MCP\n\n def check_for_dead_jobs(self):\n remove = []\n for proc in self.process_to_job:\n if not proc.is_alive():\n remove.append(proc)\n if (\n proc.exitcode != 0\n ): # 0 means everything ok, we should have an answer via the que from the job itself...\n job = self.process_to_job[proc]\n job.stdout_handle.flush()\n job.stderr_handle.flush()\n job.stdout_handle.seek(0, os.SEEK_SET)\n job.stderr_handle.seek(0, os.SEEK_SET)\n stdout = job.stdout_handle.read()\n stderr = job.stderr_handle.read()\n job.stdout_handle.close()\n job.stderr_handle.close()\n job.stdout_handle = None\n job.stderr_handle = None\n self.rc.que.put(\n JobReturnValue(\n worker_id=self.worker_id,\n was_ok=False,\n job_id=job.job_id,\n stdout=stdout,\n stderr=stderr,\n exception=pickle.dumps(\n ppg_exceptions.JobDiedException(proc.exitcode)\n ),\n trace=\"\",\n new_jobs=False, # no new jobs\n runtime=-1,\n )\n )\n for proc in remove:\n del self.process_to_job[proc]\n\n def kill_job(self, target_job): # pragma: no cover (needed by interactive)\n for process, job in self.process_to_job.items():\n if job == target_job:\n print(\"Found target job\")\n self.rc.pipegraph.logger.info(\"Killing job on user request: %s\", job)\n process.terminate()\n\n def kill_jobs(self): # pragma: no cover (needed by interactive)\n print(\"Killing %i running children\" % len(self.process_to_job))\n for proc in self.process_to_job:\n proc.terminate()\n\n def get_job_pid(self, target_job): # pragma: no cover (needed by interactive)\n print(target_job)\n print(target_job.run_info)\n print(target_job.pid)\n return target_job.pid\n", "id": "3355407", "language": "Python", "matching_score": 3.059241771697998, "max_stars_count": 4, "path": "src/pypipegraph/resource_coordinators.py" }, { "content": "\"\"\"Borrowed and adapted from 'rich'\n\nCopyright 2020 <NAME>, <NAME>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"\nfrom types import TracebackType\nfrom typing import Dict, List, Type\nfrom traceback import walk_tb\nimport inspect\nimport sys\nimport os\nfrom dataclasses import dataclass, field\nimport textwrap\n\n_load_cwd = os.path.abspath(os.getcwd())\n\n\n@dataclass\nclass Frame:\n filename: str\n lineno: int\n name: str\n locals: Dict[str, str]\n source: str\n\n\n@dataclass\nclass Stack:\n exc_type: str\n exc_value: str\n is_cause: bool = False\n frames: List[Frame] = field(default_factory=list)\n\n\nclass Trace:\n def __init__(\n self,\n exc_type: Type[BaseException],\n exc_value: BaseException,\n traceback: TracebackType,\n ):\n \"\"\"Extract traceback information.\n\n Args:\n exc_type (Type[BaseException]): Exception type.\n exc_value (BaseException): Exception value.\n traceback (TracebackType): Python Traceback object.\n\n Returns:\n Trace: A Trace instance which you can use to construct a `Traceback`.\n \"\"\"\n\n stacks: List[Stack] = []\n is_cause = False\n\n while True:\n stack = Stack(\n exc_type=str(exc_type.__name__),\n exc_value=str(exc_value),\n is_cause=is_cause,\n )\n\n stacks.append(stack)\n append = stack.frames.append\n\n for frame_summary, line_no in walk_tb(traceback):\n try:\n if (\n inspect.getsourcefile(frame_summary) == sys.argv[0]\n ): # current script, not absolute\n filename = os.path.join(\n _load_cwd, sys.argv[0]\n ) # pragma: no cover\n else:\n filename = inspect.getabsfile(frame_summary)\n except Exception: # pragma: no cover\n filename = frame_summary.f_code.co_filename\n if filename and not filename.startswith(\"<\"):\n filename = os.path.abspath(filename) if filename else \"?\"\n try:\n with open(filename, \"rb\") as op:\n source = op.read().decode(\"utf-8\", errors=\"replace\")\n except Exception: # pragma: no cover\n source = \"\"\n # this needs to be 'robust'\n # exceptions here tend to not leave a decent stack trace\n my_locals = {}\n for key, value in frame_summary.f_locals.items():\n try:\n my_locals[key] = str(value)\n except Exception as e:\n my_locals[key] = f\"Could not str() local: {e}\"\n frame = Frame(\n filename=filename,\n lineno=line_no,\n name=frame_summary.f_code.co_name,\n locals=my_locals,\n source=source,\n )\n append(frame)\n\n cause = getattr(exc_value, \"__cause__\", None)\n if cause and cause.__traceback__:\n exc_type = cause.__class__\n exc_value = cause\n traceback = cause.__traceback__\n if traceback:\n is_cause = not getattr(exc_value, \"__suppress_context__\", False)\n stack.is_cause = is_cause\n continue\n\n # No cover, code is reached but coverage doesn't recognize it.\n break # pragma: no cover\n\n self.stacks = stacks[::-1]\n\n\n def __str__(self):\n return self._format_rich_traceback_fallback(False)\n\n def _format_rich_traceback_fallback(self, include_locals=False, include_formating=True):\n \"\"\"Pretty print a traceback.\n\n We don't use rich's own facility, since it is\n not time-bounded / does not cut the output\n \"\"\"\n def bold(x):\n if include_formating:\n return f'[bold]{x}[/bold]'\n else:\n return x\n def red(x):\n if include_formating:\n return f'[red]{x}[/red]'\n else:\n return x\n\n\n if include_locals:\n\n def render_locals(frame):\n out.append(bold(\"Locals\") + \":\")\n scope = frame.locals\n items = sorted(scope.items())\n len_longest_key = max((len(x[0]) for x in items))\n for key, value in items:\n v = str(value)\n if len(v) > 1000:\n v = v[:1000] + \"…\"\n v = textwrap.indent(v, \"\\t \" + \" \" * len_longest_key).lstrip()\n out.append(f\"\\t{key.rjust(len_longest_key, ' ')} = {v}\")\n out.append(\"\")\n\n else:\n\n def render_locals(frame):\n pass\n\n first_stack = True\n out = []\n if self is None:\n out = [\"# no traceback was captured\"]\n else:\n for stack in self.stacks:\n if not first_stack:\n out.append(\"\")\n if stack.is_cause:\n out.append(\"The above exception cause to the following one\")\n first_stack = False\n exc_value = str(stack.exc_value)\n if len(exc_value) > 1000:\n exc_value = exc_value[:1000] + \"…\"\n out.append(\n f\"{bold('Exception')}: {red(bold(stack.exc_type) + ' ' + exc_value)}\"\n )\n out.append(\"{bold('Traceback')} (most recent call last):\")\n\n for frame in stack.frames:\n out.append(f'{frame.filename}\":{frame.lineno}, in {frame.name}')\n # if frame.filename.startswith(\"<\"): # pragma: no cover # - interactive, I suppose\n # render_locals(frame)\n # continue\n extra_lines = 3\n if frame.source:\n code = frame.source\n line_range = (\n frame.lineno - extra_lines,\n frame.lineno + extra_lines,\n )\n # leading empty lines get filtered from the output\n # but to actually show the correct section & highlight\n # we need to adjust the line range accordingly.\n code = code.split(\"\\n\")\n for ii, line in zip(\n range(*line_range), code[line_range[0] : line_range[1]]\n ):\n if ii == frame.lineno - 1:\n c = \"> \"\n else:\n c = \" \"\n out.append(f\"\\t{c}{ii} {line}\")\n if frame.locals:\n render_locals(frame)\n continue\n else:\n out.append(\"# no source available\")\n if frame.locals:\n render_locals(frame)\n out.append(\n f\"{bold('Exception')} (repeated from above): {red(bold(stack.exc_type) + ' ' + exc_value)}\"\n )\n return \"\\n\".join(out)\n\n\n", "id": "2918356", "language": "Python", "matching_score": 2.4193456172943115, "max_stars_count": 0, "path": "src/pypipegraph2/ppg_traceback.py" }, { "content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2012, <NAME> <<EMAIL>>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\nimport os\nimport stat as stat_module\nimport hashlib\nimport time\nimport pandas as pd\n\nglobal_pipegraph = None\nis_remote = False\n_running_inside_test = False # set from testing.fixtures upon import\n\n# import gc\n# gc.set_debug(gc.DEBUG_UNCOLLECTABLE)\n\n\ndef inside_ppg():\n return global_pipegraph is not None\n\n\ndef output_file_exists(filename):\n \"\"\"Check if a file exists and its size is > 0\"\"\"\n if not file_exists(filename):\n return False\n st = stat(filename)\n if st[stat_module.ST_SIZE] == 0:\n return False\n return True\n\n\ndef assert_uniqueness_of_object(\n object_with_name_attribute, pipeline=None, also_check=None\n):\n \"\"\"Makes certain there is only one object with this class & .name.\n\n This is necesarry so the pipeline jobs assign their data only to the\n objects you're actually working with.\"\"\"\n if pipeline is None:\n pipeline = global_pipegraph\n\n if object_with_name_attribute.name.find(\"/\") != -1:\n raise ValueError(\n \"Names must not contain /, it confuses the directory calculations\"\n )\n typ = object_with_name_attribute.__class__\n if typ not in pipeline.object_uniquifier:\n pipeline.object_uniquifier[typ] = {}\n if object_with_name_attribute.name in pipeline.object_uniquifier[typ]:\n raise ValueError(\n \"Doublicate object: %s, %s\" % (typ, object_with_name_attribute.name)\n )\n if also_check:\n if not isinstance(also_check, list):\n also_check = [also_check]\n for other_typ in also_check:\n if (\n other_typ in pipeline.object_uniquifier\n and object_with_name_attribute.name\n in pipeline.object_uniquifier[other_typ]\n ):\n raise ValueError(\n \"Doublicate object: %s, %s\"\n % (other_typ, object_with_name_attribute.name)\n )\n object_with_name_attribute.unique_id = len(pipeline.object_uniquifier[typ])\n pipeline.object_uniquifier[typ][object_with_name_attribute.name] = True\n\n\ncpu_count = 0\n\n\ndef CPUs():\n \"\"\"\n Detects the number of CPUs on a system. Cribbed from pp.\n \"\"\"\n global cpu_count\n if cpu_count == 0:\n cpu_count = 1 # default\n # Linux, Unix and MacOS:\n if hasattr(os, \"sysconf\"):\n if \"SC_NPROCESSORS_ONLN\" in os.sysconf_names:\n # Linux & Unix:\n ncpus = os.sysconf(\"SC_NPROCESSORS_ONLN\")\n if isinstance(ncpus, int) and ncpus > 0:\n cpu_count = ncpus\n else: # OSX: pragma: no cover\n cpu_count = int(\n os.popen2(\"sysctl -n hw.ncpu\")[1].read()\n ) # pragma: no cover\n # Windows:\n if \"NUMBER_OF_PROCESSORS\" in os.environ: # pragma: no cover\n ncpus = int(os.environ[\"NUMBER_OF_PROCESSORS\"])\n if ncpus > 0:\n cpu_count = ncpus\n return cpu_count\n\n\ndef file_exists(filename):\n return os.path.exists(filename)\n\n\ndef stat(filename):\n stat_cache = global_pipegraph.stat_cache\n if filename not in stat_cache:\n stat_cache[filename] = (os.stat(filename), time.time())\n return stat_cache[filename][0]\n s, t = stat_cache[filename]\n if (time.time() - t) > 1:\n stat_cache[filename] = (os.stat(filename), time.time())\n s, t = stat_cache[filename]\n return s\n\n\ndef job_or_filename(job_or_filename, invariant_class=None):\n \"\"\"Take a filename, or a job. Return filename, dependency-for-that-file\n ie. either the job, or a invariant_class (default: FileChecksumInvariant)\"\"\"\n from .job import FileGeneratingJob, FileChecksumInvariant\n\n if invariant_class is None:\n invariant_class = FileChecksumInvariant\n\n if isinstance(job_or_filename, FileGeneratingJob):\n filename = job_or_filename.job_id\n deps = [job_or_filename]\n elif job_or_filename is not None:\n filename = job_or_filename\n deps = [invariant_class(filename)]\n else: # = None\n filename = None\n deps = []\n return filename, deps\n\n\ndef checksum_file(filename):\n file_size = os.stat(filename)[stat_module.ST_SIZE]\n if file_size > 200 * 1024 * 1024: # pragma: no cover\n print(\"Taking md5 of large file\", filename)\n with open(filename, \"rb\") as op:\n block_size = 1024 ** 2 * 10\n block = op.read(block_size)\n _hash = hashlib.md5()\n while block:\n _hash.update(block)\n block = op.read(block_size)\n res = _hash.hexdigest()\n return res\n\n\ndef flatten_jobs(j):\n \"\"\"Take an arbritary deeply nested list of lists of jobs\n and return just the jobs\"\"\"\n from .job import Job\n\n if isinstance(j, Job):\n yield j\n else:\n for sj in j:\n yield from flatten_jobs(sj)\n\n\ndef load_invariant_stati(filename):\n import pickle\n\n result = {}\n with open(filename, \"rb\") as op_in:\n try:\n while True:\n name = pickle.load(op_in)\n value = pickle.load(op_in)\n result[name] = value\n except EOFError:\n pass\n return result\n\n\ndef freeze(obj):\n \"\"\" Turn dicts into tuples of (key,value),\n lists into tuples, and sets\n into frozensets, recursively - usefull\n to get a hash value..\n \"\"\"\n\n try:\n hash(obj)\n return obj\n except TypeError:\n pass\n\n if isinstance(obj, dict):\n frz = tuple(sorted([(k, freeze(obj[k])) for k in obj]))\n return frz\n elif isinstance(obj, (list, tuple)):\n return tuple([freeze(x) for x in obj])\n\n elif isinstance(obj, set):\n return frozenset(obj)\n elif isinstance(obj, pd.DataFrame):\n hashed = pd.util.hash_pandas_object(obj)\n return tuple([freeze(x) for x in hashed])\n else:\n msg = \"Unsupported type: %r\" % type(obj).__name__\n raise TypeError(msg)\n", "id": "1543555", "language": "Python", "matching_score": 4.522797584533691, "max_stars_count": 0, "path": "src/pypipegraph/util.py" }, { "content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2012, <NAME> <<EMAIL>>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport pytest\nimport pypipegraph as ppg\nfrom .shared import write, assertRaises\n\n\[email protected](\"ppg1_compatibility_test\")\nclass TestCycles:\n def test_simple_cycle(self, ppg1_compatibility_test):\n def inner():\n ppg1_compatibility_test.new_pipegraph()\n jobA = ppg.FileGeneratingJob(\"A\", lambda: write(\"A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"B\", lambda: write(\"B\", \"A\"))\n jobA.depends_on(jobB)\n jobB.depends_on(jobA)\n # ppg.run_pipegraph()\n\n assertRaises(ppg.CycleError, inner)\n\n def test_indirect_cicle(self, ppg1_compatibility_test):\n ppg1_compatibility_test.new_pipegraph()\n jobA = ppg.FileGeneratingJob(\"A\", lambda: write(\"A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"B\", lambda: write(\"B\", \"A\"))\n jobC = ppg.FileGeneratingJob(\"C\", lambda: write(\"C\", \"A\"))\n jobC.depends_on(jobB)\n jobB.depends_on(jobA)\n jobA.depends_on(jobC)\n\n def inner():\n ppg.run_pipegraph()\n\n assertRaises(ppg.CycleError, inner)\n\n def test_exceeding_max_cycle(self, ppg1_compatibility_test):\n max_depth = 50\n # this raisess...\n jobs = []\n for x in range(0, max_depth - 1):\n j = ppg.FileGeneratingJob(str(x), lambda: write(str(x), str(x)))\n if jobs:\n j.depends_on(jobs[-1])\n jobs.append(j)\n jobs[0].depends_on(j)\n\n def inner():\n ppg.run_pipegraph()\n\n assertRaises(ppg.CycleError, inner)\n\n ppg1_compatibility_test.new_pipegraph()\n jobs = []\n for x in range(0, max_depth + 100):\n j = ppg.FileGeneratingJob(str(x), lambda: write(str(x), str(x)))\n if jobs:\n j.depends_on(jobs[-1])\n jobs.append(j)\n jobs[0].depends_on(j)\n\n with pytest.raises(ppg.CycleError):\n ppg.run_pipegraph()\n", "id": "2959399", "language": "Python", "matching_score": 4.495553493499756, "max_stars_count": 0, "path": "tests/ppg1_compatibility_layer/test_cycles.py" }, { "content": "import pytest\nimport pypipegraph2 as ppg\nfrom .shared import write\n\n\[email protected](\"ppg2_per_test\")\nclass TestCycles:\n def test_simple_cycle(self):\n with pytest.raises(ppg.exceptions.NotADag):\n jobA = ppg.FileGeneratingJob(\"A\", lambda of: write(\"A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"B\", lambda of: write(\"B\", \"A\"))\n jobA.depends_on(jobB)\n jobB.depends_on(jobA)\n # ppg.run()\n\n def test_indirect_cicle(self):\n jobA = ppg.FileGeneratingJob(\"A\", lambda of: write(\"A\", \"A\"))\n jobB = ppg.FileGeneratingJob(\"B\", lambda of: write(\"B\", \"A\"))\n jobC = ppg.FileGeneratingJob(\"C\", lambda of: write(\"C\", \"A\"))\n jobC.depends_on(jobB)\n jobB.depends_on(jobA)\n jobA.depends_on(jobC)\n\n with pytest.raises(ppg.exceptions.NotADag):\n ppg.run()\n\n def test_exceeding_max_cycle(self):\n max_depth = 50\n # this raises\n jobs = []\n for x in range(0, max_depth - 1):\n j = ppg.FileGeneratingJob(str(x), lambda of: write(str(x), str(x)))\n if jobs:\n j.depends_on(jobs[-1])\n jobs.append(j)\n jobs[0].depends_on(j)\n\n with pytest.raises(ppg.exceptions.NotADag):\n ppg.run()\n\n ppg.new()\n jobs = []\n for x in range(0, max_depth + 100):\n j = ppg.FileGeneratingJob(str(x), lambda of: write(str(x), str(x)))\n if jobs:\n j.depends_on(jobs[-1])\n jobs.append(j)\n jobs[0].depends_on(j)\n\n with pytest.raises(ppg.exceptions.NotADag):\n ppg.run()\n", "id": "56306", "language": "Python", "matching_score": 1.0392378568649292, "max_stars_count": 0, "path": "tests/test_cycles.py" }, { "content": "from pathlib import Path\nimport pytest\nimport pypipegraph2 as ppg\nfrom .shared import write, read\n\n\ndef forget_job_status(invariant_status_filename=None):\n \"\"\"Delete the job status file - usually only useful for testing\"\"\"\n if invariant_status_filename is None:\n invariant_status_filename = ppg.global_pipegraph.get_history_filename()\n try:\n Path(invariant_status_filename).unlink()\n except OSError:\n pass\n\n\ndef destroy_global_pipegraph():\n \"\"\"Free the current global pipegraph - usually only useful for testing\"\"\"\n ppg.global_pipegraph = None\n\n\[email protected](\"ppg2_per_test\")\nclass TestSimple:\n def test_job_creation_before_pipegraph_creation_raises(self):\n destroy_global_pipegraph()\n with pytest.raises(ValueError):\n ppg.FileGeneratingJob(\"A\", lambda: None)\n\n def test_using_after_destruction(self):\n a = ppg.FileGeneratingJob(\"A\", lambda of: None)\n destroy_global_pipegraph()\n with pytest.raises(ValueError):\n a.readd()\n\n def test_run_pipegraph_without_pipegraph_raises(self):\n destroy_global_pipegraph()\n with pytest.raises(ValueError):\n ppg.run()\n\n def test_can_run_twice(self):\n\n ppg.run()\n ppg.run()\n ppg.run()\n\n def test_can_add_jobs_after_run(self):\n\n ppg.new()\n ppg.run()\n ppg.FileGeneratingJob(\"A\", lambda of: write(of, \"A\"))\n ppg.run()\n assert read(\"A\") == \"A\"\n\n def test_non_default_status_filename(self):\n ppg.new(history_dir=\"shu\")\n ppg.FileGeneratingJob(\"A\", lambda of: write(of, \"A\"))\n ppg.run()\n assert Path(\"shu/ppg_history.gz\").exists()\n assert not Path(\".ppg/ppg_history.gz\").exists()\n", "id": "4465971", "language": "Python", "matching_score": 0.49565237760543823, "max_stars_count": 0, "path": "tests/test_simple.py" }, { "content": "import pypipegraph2 as ppg\nimport pytest\n\n\[email protected](\"ppg2_per_test\")\nclass TestBuildInCompabilty:\n def test_invariant_build_in_function(self):\n a = ppg.FunctionInvariant(\"test\", sorted).run(None, None)[\"FItest\"][\"source\"]\n assert a == \"<built-in function sorted>\"\n\n\n_cytohn_func_counter = 0\n\[email protected](\"ppg2_per_test\")\nclass TestCythonCompability:\n\n def source_via_func_invariant(self, name, func):\n global _cytohn_func_counter\n _cytohn_func_counter =+ 1\n r = ppg.FunctionInvariant(name + str(_cytohn_func_counter), func).run(None, None)\n print(r)\n return r[\n \"FI\" + name + str(_cytohn_func_counter)\n ][\"source\"]\n\n def test_just_a_function(self):\n import cython\n\n src = \"\"\"\ndef a():\n '''single line docstring'''\n return 1\n\ndef b():\n '''Multi\n line\n docstring\n '''\n\n\n shu = 55\n\n return shu\n\"\"\"\n func = cython.inline(src)[\"a\"]\n func2 = cython.inline(src)[\"b\"]\n actual = self.source_via_func_invariant(\"a\", func)\n should = \"\"\" def a():\n return 1\"\"\"\n assert actual == should\n\n actual = self.source_via_func_invariant(\"b\", func2)\n should = \"\"\" def b():\n shu = 55\n \n return shu\"\"\"\n assert actual == should\n\n ppg.FunctionInvariant(\"a\", func) # not a redefinition\n with pytest.raises(ppg.JobRedefinitionError):\n ppg.FunctionInvariant(\"a\", func2) # cython vs cython\n with pytest.raises(ppg.JobRedefinitionError):\n ppg.FunctionInvariant(\"a\", lambda: 1) # cython vs python\n ppg.FunctionInvariant(\"b\", lambda: 45)\n with pytest.raises(ppg.JobRedefinitionError):\n ppg.FunctionInvariant(\"b\", func2) # python vs cython\n\n def test_just_a_function_with_docstring(self):\n import cython\n\n src = (\n \"\"\"\ndef a():\n ''' a() is used\n to do nothing\n '''\n return 1\n\n\"\"\"\n '''def b():\n \"\"\" b() is used\n to do nothing as well\n \"\"\"\n return 5\n'''\n )\n func = cython.inline(src)[\"a\"]\n actual = self.source_via_func_invariant(\"a\", func)\n should = \"\"\" def a():\n return 1\"\"\"\n assert actual == should\n\n def test_nested_function(self):\n import cython\n\n src = \"\"\"\ndef a():\n def b():\n return 1\n return b\n\ndef c():\n return 5\n\"\"\"\n func = cython.inline(src)[\"a\"]()\n actual = self.source_via_func_invariant(\"a\", func)\n should = \"\"\" def b():\n return 1\"\"\"\n assert actual == should\n\n def test_class(self):\n import cython\n\n src = \"\"\"\nclass A():\n def b(self):\n return 55\n\ndef c():\n return 5\n\"\"\"\n\n func = cython.inline(src)[\"A\"]().b\n actual = self.source_via_func_invariant(\"a\", func)\n should = \"\"\" def b(self):\n return 55\"\"\"\n assert actual == should\n\n def test_class_inner_function(self):\n import cython\n\n src = \"\"\"\nclass A():\n def b(self):\n def c():\n return 55\n return c\n\ndef d():\n return 5\n\"\"\"\n\n func = cython.inline(src)[\"A\"]().b()\n actual = self.source_via_func_invariant(\"a\", func)\n should = \"\"\" def c():\n return 55\"\"\"\n assert actual == should\n", "id": "861050", "language": "Python", "matching_score": 0.6111865043640137, "max_stars_count": 0, "path": "tests/test_function_invariants.py" }, { "content": "import pandas as pd\nimport pytest\nfrom pandas.testing import assert_frame_equal\nimport pypipegraph as ppg\nfrom mbf_genomics.annotator import Constant, Annotator\nfrom mbf_genomics.util import (\n read_pandas,\n freeze,\n parse_a_or_c_to_column,\n parse_a_or_c_to_anno,\n parse_a_or_c_to_plot_name,\n find_annos_from_column,\n)\n\n\ndef test_read_pandas_csv_in_xls(new_pipegraph):\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"], \"y\": [1, 2.5, 3]})\n df.to_excel(\"shu.xls\", index=False)\n assert_frame_equal(df, read_pandas(\"shu.xls\"))\n df.to_csv(\"shu.xls\", sep=\"\\t\", index=False)\n assert_frame_equal(df, read_pandas(\"shu.xls\"))\n df.to_csv(\"shu.tsv\", sep=\"\\t\", index=False)\n assert_frame_equal(df, read_pandas(\"shu.tsv\"))\n df.to_csv(\"shu.csv\", index=False)\n assert_frame_equal(df, read_pandas(\"shu.csv\"))\n df.to_csv(\"shu.something\", index=False)\n with pytest.raises(ValueError):\n read_pandas(\"shu.something\")\n\n\ndef test_freeze():\n a = {\"a\": [1, 2, 3], \"b\": {\"c\": set([2, 3, 5])}}\n with pytest.raises(TypeError):\n hash(a)\n assert hash(freeze(a))\n assert freeze(a) == freeze(freeze(a))\n\n class Nohash:\n def __hash__(self):\n return NotImplemented\n\n with pytest.raises(TypeError):\n freeze(Nohash())\n\n\nclass PolyConstant(Annotator):\n def __init__(self, column_names, values, plot_name=None):\n self.columns = column_names\n self.value = values\n if plot_name is not None:\n self.plot_name = plot_name\n\n def calc(self, df):\n return pd.DataFrame(\n {k: v for (k, v) in zip(self.columns, self.value)}, index=df.index\n )\n\n\nclass TestAnnotatorParsing:\n def test_to_column(self):\n assert parse_a_or_c_to_column(\"hello\") == \"hello\"\n assert parse_a_or_c_to_column(Constant(\"shu\", 5)) == \"shu\"\n assert parse_a_or_c_to_column(PolyConstant([\"shu\", \"sha\"], [5, 10])) == \"shu\"\n assert (\n parse_a_or_c_to_column((PolyConstant([\"shu\", \"sha\"], [5, 10]), 1)) == \"sha\"\n )\n assert (\n parse_a_or_c_to_column((PolyConstant([\"shu\", \"sha\"], [5, 10]), \"sha\"))\n == \"sha\"\n )\n assert parse_a_or_c_to_column((None, \"shi\")) == \"shi\"\n with pytest.raises(KeyError):\n parse_a_or_c_to_column((PolyConstant([\"shu\", \"sha\"], [5, 10]), \"shi\"))\n with pytest.raises(IndexError):\n parse_a_or_c_to_column((PolyConstant([\"shu\", \"sha\"], [5, 10]), 5))\n\n with pytest.raises(ValueError):\n parse_a_or_c_to_column(5)\n with pytest.raises(ValueError):\n parse_a_or_c_to_column((Constant(\"shu\", 5), \"shu\", 3))\n\n def test_to_anno(self):\n assert parse_a_or_c_to_anno(\"hello\") is None\n assert parse_a_or_c_to_anno(Constant(\"shu\", 5)) == Constant(\"shu\", 5)\n assert parse_a_or_c_to_anno(\n PolyConstant([\"shu\", \"sha\"], [5, 10])\n ) == PolyConstant([\"shu\", \"sha\"], [5, 10])\n assert parse_a_or_c_to_anno(\n (PolyConstant([\"shu\", \"sha\"], [5, 10]), 1)\n ) == PolyConstant([\"shu\", \"sha\"], [5, 10])\n assert parse_a_or_c_to_anno(\n (PolyConstant([\"shu\", \"sha\"], [5, 10]), \"sha\")\n ) == PolyConstant([\"shu\", \"sha\"], [5, 10])\n with pytest.raises(KeyError):\n parse_a_or_c_to_anno((PolyConstant([\"shu\", \"sha\"], [5, 10]), \"shi\"))\n with pytest.raises(IndexError):\n parse_a_or_c_to_anno((PolyConstant([\"shu\", \"sha\"], [5, 10]), 5))\n\n with pytest.raises(ValueError):\n parse_a_or_c_to_anno(5)\n with pytest.raises(ValueError):\n parse_a_or_c_to_anno((Constant(\"shu\", 5), \"shu\", 3))\n\n def test_to_plot_name(self):\n assert parse_a_or_c_to_plot_name(\"hello\") == \"hello\"\n assert parse_a_or_c_to_plot_name(Constant(\"shu\", 5)) == \"shu\"\n assert parse_a_or_c_to_plot_name(PolyConstant([\"shu\", \"sha\"], [5, 10])) == \"shu\"\n assert (\n parse_a_or_c_to_plot_name((PolyConstant([\"shu\", \"sha\"], [5, 10]), 1))\n == \"sha\"\n )\n assert (\n parse_a_or_c_to_plot_name((PolyConstant([\"shu\", \"sha\"], [5, 10]), \"sha\"))\n == \"sha\"\n )\n with pytest.raises(KeyError):\n parse_a_or_c_to_plot_name((PolyConstant([\"shu\", \"sha\"], [5, 10]), \"shi\"))\n with pytest.raises(IndexError):\n parse_a_or_c_to_plot_name((PolyConstant([\"shu\", \"sha\"], [5, 10]), 5))\n\n with pytest.raises(ValueError):\n parse_a_or_c_to_plot_name(5)\n with pytest.raises(ValueError):\n parse_a_or_c_to_plot_name((Constant(\"shu\", 5), \"shu\", 3))\n\n assert (\n parse_a_or_c_to_plot_name(PolyConstant([\"shu\", \"sha\"], [5, 10], \"hello\"))\n == \"hello\"\n )\n assert (\n parse_a_or_c_to_plot_name(\n (PolyConstant([\"shu\", \"sha\"], [5, 10], \"hello\"), \"sha\")\n )\n == \"hello\"\n )\n assert (\n parse_a_or_c_to_plot_name(\n (PolyConstant([\"shu\", \"sha\"], [5, 10], \"hello\"), 1)\n )\n == \"hello\"\n )\n\n def test_find_annos_from_column(self, both_ppg_and_no_ppg_no_qc, clear_annotators):\n a = Constant(\"shu\", 5)\n assert find_annos_from_column(\"shu\") == [a]\n assert find_annos_from_column(\"shu\")[0] is a\n with pytest.raises(KeyError):\n find_annos_from_column(\"nosuchcolumn\")\n\n b = PolyConstant([\"shu\"], [10])\n assert find_annos_from_column(\"shu\") == [a, b]\n\n if ppg.inside_ppg():\n both_ppg_and_no_ppg_no_qc.new_pipegraph()\n with pytest.raises(KeyError):\n find_annos_from_column(\"shu\")\n", "id": "5663409", "language": "Python", "matching_score": 3.988191843032837, "max_stars_count": 0, "path": "tests/test_util.py" }, { "content": "import pytest\nimport collections\nfrom pathlib import Path\nimport pandas as pd\nfrom mbf_genomics import DelayedDataFrame\nfrom mbf_genomics.annotator import Constant, Annotator\nimport pypipegraph as ppg\nfrom pypipegraph.testing import run_pipegraph, force_load\nfrom pandas.testing import assert_frame_equal\nfrom mbf_genomics.util import find_annos_from_column\n\n\nclass LenAnno(Annotator):\n def __init__(self, name):\n self.columns = [name]\n\n def calc(self, df):\n return pd.DataFrame(\n {self.columns[0]: [\"%s%i\" % (self.columns[0], len(df))] * len(df)}\n )\n\n\[email protected](\"no_pipegraph\")\[email protected](\"clear_annotators\")\nclass Test_DelayedDataFrameDirect:\n def test_create(self):\n test_df = pd.DataFrame({\"A\": [1, 2]})\n\n def load():\n return test_df\n\n a = DelayedDataFrame(\"shu\", load)\n assert_frame_equal(a.df, test_df)\n assert a.non_annotator_columns == \"A\"\n\n def test_create_from_df(self):\n test_df = pd.DataFrame({\"A\": [1, 2]})\n\n a = DelayedDataFrame(\"shu\", test_df)\n assert_frame_equal(a.df, test_df)\n assert a.non_annotator_columns == \"A\"\n\n def test_write(self):\n test_df = pd.DataFrame({\"A\": [1, 2]})\n\n def load():\n return test_df\n\n a = DelayedDataFrame(\"shu\", load, result_dir=\"sha\")\n assert Path(\"sha\").exists()\n assert_frame_equal(a.df, test_df)\n assert a.non_annotator_columns == \"A\"\n fn = a.write()[1]\n assert \"/sha\" in str(fn.parent.absolute())\n assert fn.exists()\n assert_frame_equal(pd.read_csv(fn, sep=\"\\t\"), test_df)\n\n def test_write_excel(self):\n test_df = pd.DataFrame({\"A\": [1, 2]})\n\n def load():\n return test_df\n\n a = DelayedDataFrame(\"shu\", load, result_dir=\"sha\")\n assert Path(\"sha\").exists()\n assert_frame_equal(a.df, test_df)\n assert a.non_annotator_columns == \"A\"\n fn = a.write(\"sha.xls\")[1]\n assert fn.exists()\n assert_frame_equal(pd.read_excel(fn), test_df)\n\n def test_write_excel2(self):\n data = {}\n for i in range(0, 257):\n c = \"A%i\" % i\n d = [1, 1]\n data[c] = d\n test_df = pd.DataFrame(data)\n\n def load():\n return test_df\n\n a = DelayedDataFrame(\"shu\", load, result_dir=\"sha\")\n fn = a.write(\"sha.xls\")[1]\n assert fn.exists()\n assert_frame_equal(pd.read_excel(fn), test_df)\n\n def test_write_mangle(self):\n test_df = pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n\n def load():\n return test_df\n\n a = DelayedDataFrame(\"shu\", load)\n assert_frame_equal(a.df, test_df)\n assert (a.non_annotator_columns == [\"A\", \"B\"]).all()\n\n def mangle(df):\n df = df.drop(\"A\", axis=1)\n df = df[df.B == \"c\"]\n return df\n\n fn = a.write(\"test.csv\", mangle)[1]\n assert fn.exists()\n assert_frame_equal(pd.read_csv(fn, sep=\"\\t\"), mangle(test_df))\n\n def test_magic(self):\n test_df = pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n a = DelayedDataFrame(\"shu\", lambda: test_df)\n assert hash(a)\n assert a.name in str(a)\n assert a.name in repr(a)\n\n def test_annotator(self):\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n a += Constant(\"column\", \"value\")\n a.annotate()\n assert \"column\" in a.df.columns\n assert (a.df[\"column\"] == \"value\").all()\n\n def test_add_non_anno(self):\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n with pytest.raises(TypeError):\n a += 5\n\n def test_annotator_wrong_columns(self):\n class WrongConstant(Annotator):\n def __init__(self, column_name, value):\n self.columns = [column_name]\n self.value = value\n\n def calc(self, df):\n return pd.DataFrame({\"shu\": self.value}, index=df.index)\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n with pytest.raises(ValueError):\n a += WrongConstant(\"column\", \"value\")\n\n def test_annotator_minimum_columns(self):\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n assert \"Direct\" in str(a.load_strategy)\n\n class MissingCalc(Annotator):\n column_names = [\"shu\"]\n\n with pytest.raises(AttributeError):\n a += MissingCalc()\n\n class EmptyColumnNames(Annotator):\n columns = []\n\n def calc(self, df):\n return pd.DataFrame({})\n\n with pytest.raises(IndexError):\n a += EmptyColumnNames()\n\n class EmptyColumnNamesButCacheName(Annotator):\n cache_name = \"shu\"\n columns = []\n\n def calc(self, df):\n return pd.DataFrame({})\n\n with pytest.raises(IndexError):\n a += EmptyColumnNamesButCacheName()\n\n class MissingColumnNames(Annotator):\n def calc(self, df):\n pass\n\n with pytest.raises(AttributeError):\n a += MissingColumnNames()\n\n class NonListColumns(Annotator):\n columns = \"shu\"\n\n def calc(self, df):\n pass\n\n with pytest.raises(ValueError):\n a += NonListColumns()\n\n def test_DynamicColumNames(self):\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n\n class Dynamic(Annotator):\n @property\n def columns(self):\n return [\"a\"]\n\n def calc(self, df):\n return pd.DataFrame({\"a\": [\"x\", \"y\"]})\n\n a += Dynamic()\n a.annotate()\n assert_frame_equal(\n a.df, pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"], \"a\": [\"x\", \"y\"]})\n )\n\n def test_annos_added_only_once(self):\n count = [0]\n\n class CountingConstant(Annotator):\n def __init__(self, column_name, value):\n count[0] += 1\n self.columns = [column_name]\n self.value = value\n\n def calc(self, df):\n return pd.DataFrame({self.columns[0]: self.value}, index=df.index)\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n c = CountingConstant(\"hello\", \"c\")\n a += c\n a.annotate()\n assert \"hello\" in a.df.columns\n assert count[0] == 1\n a += c # this get's ignored\n\n def test_annos_same_column_different_anno(self):\n count = [0]\n\n class CountingConstant(Annotator):\n def __init__(self, column_name, value):\n count[0] += 1\n self.columns = [column_name]\n self.value = value\n\n def calc(self, df):\n return pd.DataFrame({self.columns[0]: self.value}, index=df.index)\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n c = CountingConstant(\"hello\", \"c\")\n a += c\n a.annotate()\n assert \"hello\" in a.df.columns\n assert count[0] == 1\n c = CountingConstant(\"hello2\", \"c\")\n a += c\n a.annotate()\n assert \"hello2\" in a.df.columns\n assert count[0] == 2\n d = CountingConstant(\"hello2\", \"d\")\n assert c is not d\n with pytest.raises(ValueError):\n a += d\n\n def test_annos_same_column_different_anno2(self):\n class A(Annotator):\n cache_name = \"hello\"\n columns = [\"aa\"]\n\n def calc(self, df):\n return pd.DataFrame({self.columns[0]: \"a\"}, index=df.index)\n\n class B(Annotator):\n cache_name = \"hello2\"\n columns = [\"aa\"]\n\n def calc(self, df):\n return pd.DataFrame({self.columns[0]: \"a\"}, index=df.index)\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n a += A()\n with pytest.raises(ValueError):\n a += B()\n\n def test_annos_dependening(self):\n class A(Annotator):\n cache_name = \"hello\"\n columns = [\"aa\"]\n\n def calc(self, df):\n return pd.DataFrame({self.columns[0]: \"a\"}, index=df.index)\n\n class B(Annotator):\n cache_name = \"hello2\"\n columns = [\"ab\"]\n\n def calc(self, df):\n return df[\"aa\"] + \"b\"\n\n def dep_annos(self):\n return [A()]\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n a += B()\n a.annotate()\n assert \"ab\" in a.df.columns\n assert \"aa\" in a.df.columns\n assert (a.df[\"ab\"] == (a.df[\"aa\"] + \"b\")).all()\n\n def test_annos_dependening_none(self):\n class A(Annotator):\n cache_name = \"hello\"\n columns = [\"aa\"]\n\n def calc(self, df):\n return pd.DataFrame({self.columns[0]: \"a\"}, index=df.index)\n\n class B(Annotator):\n cache_name = \"hello2\"\n columns = [\"ab\"]\n\n def calc(self, df):\n return df[\"aa\"] + \"b\"\n\n def dep_annos(self):\n return [None, A(), None]\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n a += B()\n a.annotate()\n assert \"ab\" in a.df.columns\n assert \"aa\" in a.df.columns\n assert (a.df[\"ab\"] == (a.df[\"aa\"] + \"b\")).all()\n\n def test_filtering(self):\n class A(Annotator):\n cache_name = \"A\"\n columns = [\"aa\"]\n\n def calc(self, df):\n return pd.DataFrame({self.columns[0]: \"a\"}, index=df.index)\n\n class B(Annotator):\n cache_name = \"B\"\n columns = [\"ab\"]\n\n def calc(self, df):\n return df[\"aa\"] + \"b\"\n\n def dep_annos(self):\n return [A()]\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n a += Constant(\"C\", \"c\")\n assert \"C\" in a.df.columns\n b = a.filter(\"sha\", lambda df: df[\"A\"] == 1)\n assert \"C\" in b.df.columns\n a += A()\n assert \"aa\" in a.df.columns\n assert \"aa\" in b.df.columns\n b += B()\n assert \"ab\" in b.df.columns\n assert not \"ab\" in a.df.columns\n\n def test_filtering2(self):\n counts = collections.Counter()\n\n class A(Annotator):\n cache_name = \"A\"\n columns = [\"aa\"]\n\n def calc(self, df):\n counts[\"A\"] += 1\n return pd.DataFrame({self.columns[0]: \"a\"}, index=df.index)\n\n class B(Annotator):\n cache_name = \"B\"\n columns = [\"ab\"]\n\n def calc(self, df):\n counts[\"B\"] += 1\n return df[\"aa\"] + \"b\"\n\n def dep_annos(self):\n return [A()]\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n b = a.filter(\"sha\", lambda df: df[\"A\"] == 1)\n b += B()\n assert \"aa\" in b.df.columns\n assert \"ab\" in b.df.columns\n assert not \"aa\" in a.df.columns\n assert not \"ab\" in a.df.columns\n assert counts[\"A\"] == 1\n a += A()\n assert \"aa\" in a.df.columns\n assert counts[\"A\"] == 2 # no two recalcs\n assert not \"ab\" in a.df.columns\n a += B()\n assert \"ab\" in a.df.columns\n assert counts[\"A\"] == 2 # no two recalcs\n assert counts[\"B\"] == 2 # no two recalcs\n\n def test_filtering_result_dir(self):\n counts = collections.Counter()\n\n class A(Annotator):\n cache_name = \"A\"\n columns = [\"aa\"]\n\n def calc(self, df):\n counts[\"A\"] += 1\n return pd.DataFrame({self.columns[0]: \"a\"}, index=df.index)\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n b = a.filter(\"sha\", lambda df: df[\"A\"] == 1, result_dir=\"shu2\")\n assert b.result_dir.absolute() == Path(\"shu2\").absolute()\n\n def test_filtering_on_annotator(self):\n class A(Annotator):\n cache_name = \"A\"\n columns = [\"aa\"]\n\n def calc(self, df):\n return pd.DataFrame(\n {self.columns[0]: ([\"a\", \"b\"] * int(len(df) / 2 + 1))[: len(df)]},\n index=df.index,\n )\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n with pytest.raises(KeyError):\n b = a.filter(\"sha\", lambda df: df[\"aa\"] == \"a\")\n b = a.filter(\"sha\", lambda df: df[\"aa\"] == \"a\", [A()])\n canno = Constant(\"C\", \"c\")\n a += canno\n b += canno\n assert (b.df[\"A\"] == [1]).all()\n\n def test_multi_level(self):\n a = DelayedDataFrame(\n \"shu\",\n lambda: pd.DataFrame(\n {\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"c\"], \"idx\": [\"x\", \"y\", \"z\"]}\n ).set_index(\"idx\"),\n )\n b = a.filter(\"sha\", lambda df: df[\"C\"] == 4, Constant(\"C\", 4))\n a1 = LenAnno(\"count\")\n b += a1\n c = b.filter(\"shc\", lambda df: df[\"A\"] >= 2)\n a2 = LenAnno(\"count2\")\n c += a2\n c.annotate()\n print(c.df)\n assert len(c.df) == 2\n assert (c.df[\"A\"] == [2, 3]).all()\n assert (c.df[\"count\"] == \"count3\").all()\n assert (c.df[\"count2\"] == \"count22\").all()\n\n def test_anno_not_returning_enough_rows_and_no_index_range_index_on_df(self):\n class BrokenAnno(Annotator):\n columns = [\"X\"]\n\n def calc(self, df):\n return pd.DataFrame({\"X\": [1]})\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"c\"]})\n )\n with pytest.raises(ValueError) as excinfo:\n a += BrokenAnno()\n print(str(excinfo))\n assert \"Length and index mismatch \" in str(excinfo.value)\n\n def test_anno_returning_series(self):\n a = DelayedDataFrame(\n \"shu\",\n lambda: pd.DataFrame(\n {\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"c\"], \"idx\": [\"x\", \"y\", \"z\"]}\n ).set_index(\"idx\"),\n )\n\n class SeriesAnno(Annotator):\n columns = [\"C\"]\n\n def calc(self, df):\n return pd.Series(list(range(len(df))))\n\n a += SeriesAnno()\n assert (a.df[\"C\"] == [0, 1, 2]).all()\n\n def test_anno_returning_series_but_defined_two_columns(self):\n a = DelayedDataFrame(\n \"shu\",\n lambda: pd.DataFrame(\n {\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"c\"], \"idx\": [\"x\", \"y\", \"z\"]}\n ).set_index(\"idx\"),\n )\n\n class SeriesAnno(Annotator):\n columns = [\"C\", \"D\"]\n\n def calc(self, df):\n return pd.Series(list(range(len(df))))\n\n with pytest.raises(ValueError) as excinfo:\n a += SeriesAnno()\n assert \"result was no dataframe\" in str(excinfo)\n\n def test_anno_returning_string(self):\n a = DelayedDataFrame(\n \"shu\",\n lambda: pd.DataFrame(\n {\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"c\"], \"idx\": [\"x\", \"y\", \"z\"]}\n ).set_index(\"idx\"),\n )\n\n class SeriesAnno(Annotator):\n columns = [\"C\", \"D\"]\n\n def calc(self, df):\n return \"abc\"\n\n with pytest.raises(ValueError) as excinfo:\n a += SeriesAnno()\n assert \"return non DataFrame\" in str(excinfo)\n\n def test_anno_returing_right_length_but_wrong_start_range_index(self):\n a = DelayedDataFrame(\"shu\", lambda: pd.DataFrame({\"A\": [1, 2, 3]}))\n\n class BadAnno(Annotator):\n columns = [\"X\"]\n\n def calc(self, df):\n return pd.Series([\"a\", \"b\", \"c\"], index=pd.RangeIndex(5, 5 + 3))\n\n with pytest.raises(ValueError) as excinfo:\n a += BadAnno()\n assert \"Index mismatch\" in str(excinfo)\n\n def test_lying_about_columns(self):\n a = DelayedDataFrame(\n \"shu\",\n lambda: pd.DataFrame(\n {\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"c\"], \"idx\": [\"x\", \"y\", \"z\"]}\n ).set_index(\"idx\"),\n )\n\n class SeriesAnno(Annotator):\n columns = [\"C\"]\n\n def calc(self, df):\n return pd.DataFrame({\"D\": [0, 1, 2]})\n\n with pytest.raises(ValueError) as excinfo:\n a += SeriesAnno()\n assert \"declared different\" in str(excinfo)\n\n def test_filtering_by_definition_operators(self):\n a = DelayedDataFrame(\"shu\", pd.DataFrame({\"A\": [-1, 0, 1, 2, 3, 4]}))\n assert (a.filter(\"a1\", [(\"A\", \"==\", 0)]).df[\"A\"] == [0]).all()\n assert (a.filter(\"a2\", [(\"A\", \">=\", 3)]).df[\"A\"] == [3, 4]).all()\n assert (a.filter(\"a3\", [(\"A\", \"<=\", 0)]).df[\"A\"] == [-1, 0]).all()\n assert (a.filter(\"a4\", [(\"A\", \">\", 3)]).df[\"A\"] == [4]).all()\n assert (a.filter(\"a5\", [(\"A\", \"<\", 0)]).df[\"A\"] == [-1]).all()\n assert (a.filter(\"a6\", [(\"A\", \"|>\", 0)]).df[\"A\"] == [-1, 1, 2, 3, 4]).all()\n assert (a.filter(\"a7\", [(\"A\", \"|>=\", 1)]).df[\"A\"] == [-1, 1, 2, 3, 4]).all()\n assert (a.filter(\"a8\", [(\"A\", \"|<\", 2)]).df[\"A\"] == [-1, 0, 1]).all()\n assert (a.filter(\"a9\", [(\"A\", \"|<=\", 2)]).df[\"A\"] == [-1, 0, 1, 2]).all()\n with pytest.raises(ValueError):\n a.filter(\"a10\", [(\"A\", \"xx\", 2)])\n\n\nclass XAnno(Annotator):\n def __init__(self, column_name, values):\n self.columns = [column_name]\n self.values = values\n\n def calc(self, df):\n return pd.DataFrame({self.columns[0]: self.values}, index=df.index)\n\n\[email protected](\"both_ppg_and_no_ppg\")\[email protected](\"clear_annotators\")\nclass Test_DelayedDataFrameBoth:\n def test_filtering_by_definition(self):\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n c = XAnno(\"C\", [1, 2])\n a += c\n d = XAnno(\"D\", [4, 5])\n\n # native column\n a1 = a.filter(\"a1\", (\"A\", \"==\", 1))\n # search for the anno\n a2 = a.filter(\"a2\", (\"C\", \"==\", 2))\n # extract the column name from the anno - anno already added\n a4 = a.filter(\"a4\", (d, \"==\", 5))\n # extract the column name from the anno - anno not already added\n a3 = a.filter(\"a3\", (c, \"==\", 1))\n # lookup column to name\n a6 = a.filter(\"a6\", (\"X\", \"==\", 2), column_lookup={\"X\": \"C\"})\n # lookup column to anno\n a7 = a.filter(\"a7\", (\"X\", \"==\", 2), column_lookup={\"X\": c})\n\n if not ppg.inside_ppg():\n e1 = XAnno(\"E\", [6, 7])\n e2 = XAnno(\"E\", [6, 8])\n assert find_annos_from_column(\"E\") == [e1, e2]\n # column name to longer unique\n with pytest.raises(KeyError):\n a.filter(\"a5\", (\"E\", \"==\", 5))\n with pytest.raises(KeyError):\n a.filter(\"a5\", ((c, \"D\"), \"==\", 5))\n force_load(a1.annotate())\n force_load(a2.annotate())\n force_load(a3.annotate())\n force_load(a4.annotate())\n force_load(a6.annotate())\n force_load(a7.annotate())\n run_pipegraph()\n\n assert (a1.df[\"A\"] == [1]).all()\n\n assert (a2.df[\"A\"] == [2]).all()\n\n assert (a3.df[\"A\"] == [1]).all()\n\n assert (a4.df[\"A\"] == [2]).all()\n assert (a6.df[\"A\"] == [2]).all()\n assert (a7.df[\"A\"] == [2]).all()\n\n\[email protected](\"new_pipegraph\")\nclass Test_DelayedDataFramePPG:\n def test_create(self):\n test_df = pd.DataFrame({\"A\": [1, 2]})\n\n def load():\n return test_df\n\n a = DelayedDataFrame(\"shu\", load)\n assert not hasattr(a, \"df\")\n print(\"load is\", a.load())\n force_load(a.load(), False)\n ppg.run_pipegraph()\n assert_frame_equal(a.df, test_df)\n assert a.non_annotator_columns == \"A\"\n\n def test_write(self):\n test_df = pd.DataFrame({\"A\": [1, 2]})\n\n def load():\n return test_df\n\n a = DelayedDataFrame(\"shu\", load)\n fn = a.write()[0]\n ppg.run_pipegraph()\n assert Path(fn.filenames[0]).exists()\n assert_frame_equal(pd.read_csv(fn.filenames[0], sep=\"\\t\"), test_df)\n\n def test_write_mixed_manglers(self):\n test_df = pd.DataFrame({\"A\": [1, 2]})\n\n def load():\n return test_df\n\n a = DelayedDataFrame(\"shu\", load)\n a.write(mangler_function=lambda df: df)\n\n def b(df):\n return df.head()\n\n ok = False\n try:\n a.write(mangler_function=b)\n except Exception as e:\n se = str(type(e))\n if \"JobContractError\" in se: # ppg\n ok = True\n elif \"JobRedefinitionError\" in se: # ppg2\n ok = True\n if not ok:\n raise ValueError(\"Did not raise the expected exception\")\n\n def test_annotator_basic(self):\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n a += Constant(\"aa\", \"aa\")\n force_load(a.annotate())\n ppg.run_pipegraph()\n assert (a.df[\"aa\"] == \"aa\").all()\n\n def test_annotator_raising(self):\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n\n class RaiseAnno(Annotator):\n columns = [\"aa\"]\n cache_name = \"empty\"\n\n def calc(self, df):\n raise ValueError(\"hello\")\n\n anno1 = RaiseAnno()\n a += anno1\n force_load(a.annotate())\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n anno_job = a.anno_jobs[RaiseAnno().get_cache_name()]\n assert \"hello\" in str(anno_job.lfg.exception)\n\n def test_annotator_columns_not_list(self):\n class BrokenAnno(Annotator):\n def __init__(\n self,\n ):\n self.columns = \"shu\"\n\n def calc(self, df):\n return pd.DataFrame(\n {self.columns[0]: [\"%s%i\" % (self.columns[0], len(df))] * len(df)}\n )\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n a += BrokenAnno()\n lg = a.anno_jobs[BrokenAnno().get_cache_name()]\n force_load(a.annotate())\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"list\" in str(lg().lfg.exception)\n\n def test_annotator_empty_columns(self):\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n\n class EmptyColumnNames(Annotator):\n columns = []\n cache_name = \"empty\"\n\n def calc(self, df):\n return pd.DataFrame({\"shu\": [1, 2]})\n\n def __repr__(self):\n return \"EmptyColumNames()\"\n\n a += EmptyColumnNames()\n force_load(a.annotate())\n anno_job_cb = a.anno_jobs[EmptyColumnNames().get_cache_name()]\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert anno_job_cb() is anno_job_cb()\n assert \"anno.columns was empty\" in repr(anno_job_cb().exception)\n\n def test_annotator_missing_columns(self):\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n\n class MissingColumnNames(Annotator):\n cache_name = \"MissingColumnNames\"\n\n def calc(self, df):\n return pd.DataFrame({})\n\n def __repr__(self):\n return \"MissingColumnNames()\"\n\n a += MissingColumnNames()\n lg = a.anno_jobs[\"MissingColumnNames\"]\n force_load(a.annotate())\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"AttributeError\" in repr(lg().lfg.exception)\n\n def test_DynamicColumNames(self):\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n\n class Dynamic(Annotator):\n @property\n def columns(self):\n return [\"a\"]\n\n def calc(self, df):\n return pd.DataFrame({\"a\": [\"x\", \"y\"]})\n\n a += Dynamic()\n a.anno_jobs[Dynamic().get_cache_name()]\n force_load(a.annotate())\n ppg.run_pipegraph()\n assert_frame_equal(\n a.df, pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"], \"a\": [\"x\", \"y\"]})\n )\n\n def test_annos_same_column_different_anno(self):\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n c = Constant(\"hello\", \"c\")\n a += c\n c = Constant(\"hello2\", \"c\")\n a += c\n c = Constant(\"hello2\", \"d\")\n with pytest.raises(ValueError):\n a += c\n\n def test_annos_dependening(self):\n class A(Annotator):\n cache_name = \"hello\"\n columns = [\"aa\"]\n\n def calc(self, df):\n return pd.DataFrame({self.columns[0]: \"a\"}, index=df.index)\n\n class B(Annotator):\n cache_name = \"hello2\"\n columns = [\"ab\"]\n\n def calc(self, df):\n return df[\"aa\"] + \"b\"\n\n def dep_annos(self):\n return [A()]\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n a += B()\n ppg.JobGeneratingJob(\"shu\", lambda: 55).depends_on(a.annotate())\n ppg.run_pipegraph()\n assert \"ab\" in a.df.columns\n assert \"aa\" in a.df.columns\n assert (a.df[\"ab\"] == (a.df[\"aa\"] + \"b\")).all()\n\n def test_filteringA(self):\n ppg.util.global_pipegraph.quiet = False\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n b = a.filter(\"sha\", lambda df: df[\"A\"] == 1)\n a += LenAnno(\"C\")\n b.write()\n ppg.run_pipegraph()\n assert \"C\" in b.df.columns\n assert \"C\" in a.df.columns\n assert (b.df[\"C\"] == \"C2\").all()\n assert (a.df[\"C\"] == \"C2\").all()\n\n def test_filteringB(self):\n ppg.util.global_pipegraph.quiet = False\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n b = a.filter(\"sha\", lambda df: df[\"A\"] == 1)\n a += LenAnno(\"C\")\n b += LenAnno(\"D\")\n assert not LenAnno(\"D\").get_cache_name() in a.anno_jobs\n b.write()\n ppg.run_pipegraph()\n assert not LenAnno(\"D\").get_cache_name() in a.anno_jobs\n assert \"C\" in b.df.columns\n assert \"C\" in a.df.columns\n assert not \"D\" in a.df.columns\n assert len(a.df) == 2\n assert len(b.df) == 1\n assert (b.df[\"C\"] == \"C2\").all()\n assert (b.df[\"D\"] == \"D1\").all()\n assert (a.df[\"C\"] == \"C2\").all()\n assert not \"D\" in a.df.columns\n\n def test_filteringC(self):\n ppg.util.global_pipegraph.quiet = False\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n # a += LenAnno(\"C\")\n b = a.filter(\"sha\", lambda df: df[\"C\"] == 2, LenAnno(\"C\"), set())\n b.write()\n ppg.run_pipegraph()\n assert \"C\" in a.df\n assert \"C\" in b.df\n\n def test_filter_and_clone_without_annos(self):\n ppg.util.global_pipegraph.quiet = False\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n # a += LenAnno(\"C\")\n b = a.filter(\"sha\", lambda df: df[\"C\"] == 2, LenAnno(\"C\"), set())\n b.write()\n with pytest.raises(ValueError):\n b.clone_without_annotators(\"shc\", \"hello\")\n c = b.clone_without_annotators(\"shc\", result_dir=\"dir_c\")\n fn = c.write()[1]\n ppg.run_pipegraph()\n assert \"C\" in a.df\n assert \"C\" in b.df\n assert \"C\" not in c.df\n written = pd.read_csv(fn, sep=\"\\t\")\n assert set(c.df.columns) == set(written.columns)\n for col in c.df.columns:\n assert (c.df[col] == written[col]).all()\n\n def test_filtering_on_annotator_missing(self):\n class A(Annotator):\n cache_name = \"A\"\n columns = [\"aa\"]\n\n def calc(self, df):\n return pd.DataFrame(\n {self.columns[0]: ([\"a\", \"b\"] * int(len(df) / 2 + 1))[: len(df)]},\n index=df.index,\n )\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n b = a.filter(\"sha\", lambda df: df[\"aaA\"] == \"a\")\n load_job = b.load()\n a.write()\n print(\"run now\")\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"KeyError\" in repr(load_job.lfg.exception)\n\n def test_forbidden_cache_names(self):\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"]})\n )\n c1 = Constant(\"c1*\", \"*\")\n c2 = Constant(\"c2/\", \"*\")\n c3 = Constant(\"c3?\", \"*\")\n c4 = Constant(\"c4\" * 100, \"*\")\n with pytest.raises(ValueError):\n a += c1\n with pytest.raises(ValueError):\n a += c2\n with pytest.raises(ValueError):\n a += c3\n with pytest.raises(ValueError):\n a += c4\n\n def test_multi_level(self):\n a = DelayedDataFrame(\n \"shu\",\n lambda: pd.DataFrame(\n {\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"c\"], \"idx\": [\"x\", \"y\", \"z\"]}\n ).set_index(\"idx\"),\n )\n b = a.filter(\"sha\", lambda df: df[\"C\"] == 4, Constant(\"C\", 4))\n a1 = LenAnno(\"count\")\n b += a1\n c = b.filter(\"shc\", lambda df: df[\"A\"] >= 2)\n a2 = LenAnno(\"count2\")\n c += a2\n c.write()\n ppg.run_pipegraph()\n assert len(c.df) == 2\n assert (c.df[\"A\"] == [2, 3]).all()\n assert (c.df[\"count\"] == \"count3\").all()\n assert (c.df[\"count2\"] == \"count22\").all()\n\n def test_anno_not_returning_enough_rows_and_no_index(self):\n class BrokenAnno(Annotator):\n columns = [\"X\"]\n\n def calc(self, df):\n return pd.DataFrame({\"X\": [1]})\n\n a = DelayedDataFrame(\n \"shu\",\n lambda: pd.DataFrame(\n {\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"c\"], \"idx\": [\"x\", \"y\", \"z\"]}\n ).set_index(\"idx\"),\n )\n a += BrokenAnno()\n lj = a.anno_jobs[\"X\"]\n ppg.JobGeneratingJob(\"shu\", lambda: 55).depends_on(a.annotate())\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"Length and index mismatch \" in str(lj().exception)\n\n def test_anno_not_returning_enough_rows_and_no_index_range_index_on_df(self):\n class BrokenAnno(Annotator):\n columns = [\"X\"]\n\n def calc(self, df):\n return pd.DataFrame({\"X\": [1]})\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"c\"]})\n )\n a += BrokenAnno()\n lj = a.anno_jobs[\"X\"]\n ppg.JobGeneratingJob(\"shu\", lambda: 55).depends_on(a.annotate())\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"Length and index mismatch \" in str(lj().exception)\n\n def test_annotator_coliding_with_non_anno_column(self):\n a = DelayedDataFrame(\n \"shu\",\n lambda: pd.DataFrame(\n {\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"c\"], \"idx\": [\"x\", \"y\", \"z\"]}\n ).set_index(\"idx\"),\n )\n a += Constant(\"A\", \"aa\")\n lj = a.anno_jobs[\"A\"]\n ppg.JobGeneratingJob(\"shu\", lambda: 55).depends_on(a.annotate())\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"were already present\" in str(lj().exception)\n\n def test_anno_returning_series(self):\n a = DelayedDataFrame(\n \"shu\",\n lambda: pd.DataFrame(\n {\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"c\"], \"idx\": [\"x\", \"y\", \"z\"]}\n ).set_index(\"idx\"),\n )\n\n class SeriesAnno(Annotator):\n columns = [\"C\"]\n\n def calc(self, df):\n return pd.Series(list(range(len(df))))\n\n a += SeriesAnno()\n ppg.JobGeneratingJob(\"shu\", lambda: 55).depends_on(a.annotate())\n ppg.run_pipegraph()\n assert (a.df[\"C\"] == [0, 1, 2]).all()\n\n def test_anno_returning_series_but_defined_two_columns(self):\n a = DelayedDataFrame(\n \"shu\",\n lambda: pd.DataFrame(\n {\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"c\"], \"idx\": [\"x\", \"y\", \"z\"]}\n ).set_index(\"idx\"),\n )\n\n class SeriesAnno(Annotator):\n columns = [\"C\", \"D\"]\n\n def calc(self, df):\n return pd.Series(list(range(len(df))))\n\n a += SeriesAnno()\n lj = a.anno_jobs[\"C\"]\n ppg.JobGeneratingJob(\"shu\", lambda: 55).depends_on(a.annotate())\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"result was no dataframe\" in str(lj().lfg.exception)\n\n def test_anno_returning_string(self):\n a = DelayedDataFrame(\n \"shu\",\n lambda: pd.DataFrame(\n {\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"c\"], \"idx\": [\"x\", \"y\", \"z\"]}\n ).set_index(\"idx\"),\n )\n\n class SeriesAnno(Annotator):\n columns = [\"C\", \"D\"]\n\n def calc(self, df):\n return \"abc\"\n\n a += SeriesAnno()\n lj = a.anno_jobs[\"C\"]\n ppg.JobGeneratingJob(\"shu\", lambda: 55).depends_on(a.annotate())\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"result was no dataframe\" in str(lj().lfg.exception)\n\n def test_lying_about_columns(self):\n a = DelayedDataFrame(\n \"shu\",\n lambda: pd.DataFrame(\n {\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"c\"], \"idx\": [\"x\", \"y\", \"z\"]}\n ).set_index(\"idx\"),\n )\n\n class SeriesAnno(Annotator):\n columns = [\"C\"]\n\n def calc(self, df):\n return pd.DataFrame({\"D\": [0, 1, 2]})\n\n a += SeriesAnno()\n lj = a.anno_jobs[\"C\"]\n ppg.JobGeneratingJob(\"shu\", lambda: 55).depends_on(a.annotate())\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"declared different \" in str(lj().exception)\n\n def test_annotator_depending_on_actual_jobs(self):\n def wf():\n Path(\"fileA\").write_text(\"hello\")\n\n class TestAnno(Annotator):\n columns = [\"C\"]\n\n def calc(self, df):\n prefix = Path(\"fileA\").read_text()\n return pd.Series([prefix] * len(df))\n\n def deps(self, ddf):\n return [ppg.FileGeneratingJob(\"fileA\", wf)]\n\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"c\"]})\n )\n a.add_annotator(TestAnno())\n a.write()\n ppg.run_pipegraph()\n assert (a.df[\"C\"] == \"hello\").all()\n\n def test_nested_anno_dependencies(self):\n class Nested(Annotator):\n columns = [\"b\"]\n\n def calc(self, df):\n return pd.Series([10] * len(df))\n\n def dep_annos(self):\n return [Constant(\"Nestedconst\", 5)]\n\n class Nesting(Annotator):\n columns = [\"a\"]\n\n def calc(self, df):\n return pd.Series([15] * len(df))\n\n def dep_annos(self):\n return [Constant(\"Nestingconst\", 5), Nested()]\n\n anno = Nesting()\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"c\"]})\n )\n a += anno\n a.write()\n ppg.run_pipegraph()\n assert (a.df[\"a\"] == 15).all()\n assert (a.df[\"b\"] == 10).all()\n assert (a.df[\"Nestedconst\"] == 5).all()\n assert (a.df[\"Nestingconst\"] == 5).all()\n\n def test_adding_in_job_generating_raises(self):\n a = DelayedDataFrame(\n \"shu\", lambda: pd.DataFrame({\"A\": [1, 2, 3], \"B\": [\"a\", \"b\", \"c\"]})\n )\n\n def gen():\n a.add_annotator(Constant(\"shu\", 5))\n\n job = ppg.JobGeneratingJob(\"x\", gen)\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert isinstance(job.exception, ppg.JobContractError)\n\n def test_anno_returing_right_length_but_wrong_start_range_index(self):\n a = DelayedDataFrame(\"shu\", lambda: pd.DataFrame({\"A\": [1, 2, 3]}))\n\n class BadAnno(Annotator):\n columns = [\"X\"]\n\n def calc(self, df):\n return pd.Series([\"a\", \"b\", \"c\"], index=pd.RangeIndex(5, 5 + 3))\n\n a += BadAnno()\n force_load(a.annotate())\n lj = a.anno_jobs[\"X\"]\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"Index mismatch\" in str(lj().exception)\n", "id": "8694021", "language": "Python", "matching_score": 5.408649921417236, "max_stars_count": 0, "path": "tests/test_delayed_dataframe.py" }, { "content": "from pathlib import Path\nimport pandas as pd\nimport numpy as np\nimport pypipegraph as ppg\nimport threading\n\nfrom .annotator import Annotator\nfrom mbf_externals.util import lazy_method\nfrom mbf_genomics.util import (\n parse_a_or_c_to_column,\n parse_a_or_c_to_anno,\n find_annos_from_column,\n)\n\n\nclass DelayedDataFrame(object):\n \"\"\"Base class for DataFrame + annotators style of classes.\n\n An annotator is an object that can calculate additional columns for a DataFrame.\n\n This is a dual object - it can be used in a pipegraph - in which\n case it .annotate() returns jobs, and calculatin is 'smart & lazy'\n or without a global pipegraph in which case loading and += annotator evaluation\n happens immediatly (though it still chases the dep_anno chain defined by the annotators)\n\n Annotators may be annotated to any child (created by .filter),\n and in the ppg case, they will be evaluated on the top most parent that\n actually needs them.\n\n \"\"\"\n\n def __init__(self, name, loading_function, dependencies=[], result_dir=None):\n # assert_uniqueness_of_object is taking core of by the load_strategy\n self.name = name\n if result_dir:\n self.result_dir = Path(result_dir)\n else:\n self.result_dir = Path(\"results\") / self.__class__.__name__ / self.name\n self.result_dir.mkdir(parents=True, exist_ok=True)\n if isinstance(loading_function, pd.DataFrame):\n # don't you just love lambda variable binding?\n loading_function = (\n lambda loading_function=loading_function: loading_function\n )\n\n if not ppg.inside_ppg():\n self.load_strategy = Load_Direct(self, loading_function)\n else:\n self.load_strategy = Load_PPG(self, loading_function, dependencies)\n self.column_to_annotators = {}\n self.annotators = {}\n self.parent = None\n self.children = []\n # this prevents writing the same file with two different mangler functions\n # but still allows you to call write() in ppg settings multiple times\n # if different parts need to ensure it's being written out\n self.mangler_dict = {self.get_table_filename(): None}\n self.load()\n\n # magic\n def __hash__(self):\n return hash(\"DelayedDataFrame\" + self.name)\n\n def __str__(self):\n return \"DelayedDataFrame(%s)\" % self.name\n\n def __repr__(self):\n return \"DelayedDataFrame(%s)\" % self.name\n\n def load(self):\n return self.load_strategy.load()\n\n def __iadd__(self, other):\n \"\"\"Add and return self\"\"\"\n if isinstance(other, Annotator):\n if ppg.inside_ppg():\n if not self.has_annotator(other):\n self.load_strategy.add_annotator(other)\n elif self.get_annotator(other.get_cache_name()) is not other:\n raise ValueError(\n \"trying to add different annotators with identical cache_names\\n%s\\n%s\"\n % (other, self.get_annotator(other.get_cache_name()))\n )\n else:\n self.load_strategy.add_annotator(other)\n\n return self\n else:\n return NotImplemented\n\n def add_annotator(self, anno):\n \"\"\"Sugar for\n self += anno\n return self.anno_jobs[anno.get_cache_name()]\n \"\"\"\n self += anno\n if self.load_strategy.build_deps: # pragma: no branch\n return self.anno_jobs[anno.get_cache_name()]\n\n def has_annotator(self, anno):\n return anno.get_cache_name() in self.annotators\n\n def get_annotator(self, cache_name):\n return self.annotators[cache_name]\n\n @property\n def root(self):\n x = self\n while x.parent is not None:\n x = x.parent\n return x\n\n def annotate(self):\n \"\"\"Job: create plotjobs and dummy annotation job.\n\n This is all fairly convoluted.\n If you require a particular column, don't depend on this, but on the result of add_annotator().\n This is only the master job that jobs that require *all* columns (table dump...) depend on.\n \"\"\"\n return self.load_strategy.annotate()\n\n def filter(\n self,\n new_name,\n df_filter_function,\n annotators=[],\n dependencies=None,\n column_lookup=None,\n **kwargs,\n ):\n \"\"\"Filter an ddf to a new one called new_name.\n\n Paramaters\n -----------\n df_filter_function: function|list_of_filters\n function: take a df, return a valid index\n list_of_filters: see DelayedDataFrame.definition_to_function\n\n annotators:\n annotators used by your filter function.\n Leave empty in the case of list_of_filters\n\n dependencies:\n list of ppg.Jobs\n\n column_lookup: offer abbreviations to the column definitions in list_of_filters\n\n \"\"\"\n\n def load():\n idx_or_bools = df_filter_function(self.df)\n if isinstance(idx_or_bools, pd.Index):\n res = self.df.loc[idx_or_bools][self.non_annotator_columns]\n res = res.assign(parent_row=idx_or_bools)\n else:\n res = self.df[idx_or_bools][self.non_annotator_columns]\n res = res.assign(parent_row=self.df.index[idx_or_bools])\n return res\n\n if dependencies is None:\n dependencies = []\n elif not isinstance(dependencies, list): # pragma: no cover\n dependencies = list(dependencies)\n if isinstance(df_filter_function, (list, tuple)):\n if isinstance(df_filter_function, (tuple)):\n df_filter_function = [df_filter_function]\n df_filter_function, annotators = self.definition_to_function(\n df_filter_function, column_lookup if column_lookup is not None else {}\n )\n\n else:\n if isinstance(annotators, Annotator):\n annotators = [annotators]\n if self.load_strategy.build_deps:\n dependencies.append(\n ppg.ParameterInvariant(\n self.__class__.__name__ + \"_\" + new_name + \"_parent\", self.name\n )\n )\n dependencies.append(\n ppg.FunctionInvariant(\n self.__class__.__name__ + \"_\" + new_name + \"_filter\",\n df_filter_function,\n )\n )\n if hasattr(df_filter_function, \"_dependency_params\"):\n dependencies.append(\n ppg.ParameterInvariant(\n self.__class__.__name__ + \"_\" + new_name + \"_filter\",\n df_filter_function._dependency_params,\n )\n )\n\n dependencies.append(self.load())\n for anno in annotators:\n self += anno\n dependencies.append(self.anno_jobs[anno.get_cache_name()])\n\n else:\n for anno in annotators:\n self += anno\n\n result = self._new_for_filtering(new_name, load, dependencies, **kwargs)\n result.parent = self\n result.filter_annos = annotators\n for anno in self.annotators.values():\n result += anno\n\n self.children.append(result)\n return result\n\n def definition_to_function(self, definition, column_lookup): # noqa: C901\n \"\"\"\n Create a filter function from a tuple of\n (column_definition, operator, threshold)\n\n Operators are strings '>', '<', '==', '>=', '<=', 'isin'\n They may be prefixed by '|' which means 'take absolute first'\n\n Example:\n genes.filter('2x', [\n ('FDR', '<=', 0.05) # a name from column_lookup\n ('log2FC', '|>', 1), # absolute by prefixing operator\n ...\n (anno, '>=', 50),\n ((anno, 1), '>=', 50), # for the second column of the annotator\n ((anno, 'columnX'), '>=', 50), # for the second column of the annotator\n ('annotator_columnX', '=>' 50), # search for an annotator with that column. Use if exactly one, complain otherwise\n\n\n returns: a df_filter_func, [annotators]\n\n \"\"\"\n functors = []\n annotators = []\n for column_name, op, threshold in definition:\n anno = None\n if hasattr(self, \"df\") and column_name in self.df.columns:\n # we can't check for non-annotator columns on filter\n # definition in ppg ddfs that have not been loaded yet.\n # oh well, no worse than passing in a function with an\n # invalid column name\n # exception in the 'we have a df and column is not in it is\n # below\n anno = None\n else:\n if column_name in column_lookup:\n column_name = column_lookup[column_name]\n try:\n anno = parse_a_or_c_to_anno(column_name)\n column_name = parse_a_or_c_to_column(column_name)\n except (ValueError, KeyError):\n anno = None\n if anno is None:\n try:\n annos = find_annos_from_column(column_name)\n except KeyError:\n annos = []\n if len(annos) == 1:\n anno = annos[0]\n elif len(annos) > 1:\n raise KeyError(\n \"Column (%s) was present in multiple annotators: %s.\\n Pass in anno or (anno, column)\"\n % (column_name, annos)\n )\n\n if anno is None:\n if hasattr(self, \"df\"):\n if not column_name in self.df.columns:\n raise KeyError(\n f\"unknown column {column_name}\",\n \"available\",\n sorted(\n set(list(self.df.columns) + list(column_lookup))\n ),\n )\n else: # I guess, then the filter job fails later.\n pass\n if op == \"==\":\n f = (\n lambda df, column_name=column_name, threshold=threshold: df[\n column_name\n ]\n == threshold\n ) # noqa: E03\n elif op == \">\":\n f = (\n lambda df, column_name=column_name, threshold=threshold: df[\n column_name\n ]\n > threshold\n ) # noqa: E03\n elif op == \"<\":\n f = (\n lambda df, column_name=column_name, threshold=threshold: df[\n column_name\n ]\n < threshold\n ) # noqa: E03\n elif op == \">=\":\n f = (\n lambda df, column_name=column_name, threshold=threshold: df[\n column_name\n ]\n >= threshold\n ) # noqa: E03\n elif op == \"<=\":\n f = (\n lambda df, column_name=column_name, threshold=threshold: df[\n column_name\n ]\n <= threshold\n ) # noqa: E03\n elif op == \"|>\":\n f = (\n lambda df, column_name=column_name, threshold=threshold: df[\n column_name\n ].abs()\n > threshold # noqa: E03\n )\n elif op == \"|<\":\n f = (\n lambda df, column_name=column_name, threshold=threshold: df[\n column_name\n ].abs()\n < threshold\n ) # noqa: E03\n elif op == \"|>=\":\n f = (\n lambda df, column_name=column_name, threshold=threshold: df[\n column_name\n ].abs()\n >= threshold\n ) # noqa: E03\n elif op == \"|<=\":\n f = (\n lambda df, column_name=column_name, threshold=threshold: df[\n column_name\n ].abs()\n <= threshold\n ) # noqa: E03\n elif op == \"isin\":\n f = lambda df, column_name=column_name, chosen_set=threshold: df[ # noqa: E731\n column_name\n ].isin(\n set(chosen_set)\n )\n else:\n raise ValueError(f\"invalid operator {op}\")\n functors.append(f)\n if anno is not None:\n annotators.append(anno)\n\n def filter_func(df):\n keep = np.ones(len(df), bool)\n for f in functors:\n keep &= f(df)\n return keep\n\n filter_func._dependency_params = (definition, column_lookup)\n return filter_func, annotators\n\n def _new_for_filtering(self, new_name, load_func, deps, **kwargs):\n if not \"result_dir\" in kwargs:\n kwargs[\"result_dir\"] = self.result_dir / new_name\n return self.__class__(new_name, load_func, deps, **kwargs)\n\n def clone_without_annotators(\n self, new_name, non_annotator_columns_to_copy=None, result_dir=None\n ):\n \"\"\"Create a clone of this DelayedDataFrame without any of the old annotators attached\"\"\"\n if isinstance(non_annotator_columns_to_copy, str):\n raise ValueError(\n \"non_annotator_columns_to_copy must be an iterable - maybe you were trying to set result_dir?\"\n )\n\n def load():\n return self.df[self.non_annotator_columns]\n\n deps = [self.load()]\n result = self.__class__(new_name, load, deps, result_dir)\n return result\n\n def get_table_filename(self):\n return self.result_dir / (self.name + \".tsv\")\n\n def mangle_df_for_write(self, df):\n return df\n\n def write(self, output_filename=None, mangler_function=None, float_format=\"%4g\"):\n \"\"\"Job: Store the internal DataFrame (df) in a table.\n To sort, filter, remove columns, etc before output,\n pass in a mangler_function (takes df, returns df)\n\n Retruns a (Job, Path) tuple - job is None if outside ppg\n \"\"\"\n output_filename = self.pathify(\n output_filename, self.get_table_filename().absolute()\n ).relative_to(Path(\".\").absolute())\n\n def write(output_filename):\n if mangler_function:\n df = mangler_function(self.df.copy())\n else:\n df = self.mangle_df_for_write(self.df)\n if str(output_filename).endswith(\".xls\") or str(output_filename).endswith(\n \".xlsx\"\n ):\n try:\n df.to_excel(output_filename, index=False, float_format=float_format)\n except (ValueError):\n df.to_csv(\n output_filename,\n sep=\"\\t\",\n index=False,\n float_format=float_format,\n )\n else:\n df.to_csv(\n output_filename,\n sep=\"\\t\",\n index=False,\n encoding=\"utf-8\",\n float_format=float_format,\n )\n\n if self.load_strategy.build_deps:\n deps = [\n self.annotate(),\n ppg.FunctionInvariant(\n str(output_filename) + \"_mangler\", mangler_function\n ),\n ppg.ParameterInvariant(str(output_filename), float_format),\n ]\n else:\n deps = []\n return self.load_strategy.generate_file(output_filename, write, deps)\n\n def plot(self, output_filename, plot_func, calc_func=None, annotators=None):\n output_filename = self.pathify(output_filename).relative_to(\n Path(\".\").absolute()\n )\n\n def do_plot(output_filename):\n df = self.df\n if calc_func is not None:\n df = calc_func(df)\n p = plot_func(df)\n if hasattr(p, \"pd\"):\n p = p.pd\n p.save(output_filename, verbose=False)\n\n if self.load_strategy.build_deps:\n deps = [\n ppg.FunctionInvariant(\n output_filename.with_name(output_filename.name + \"_plot_func\"),\n plot_func,\n )\n ]\n if annotators is None:\n deps.append(self.annotate())\n else:\n deps.extend([self.add_annotator(x) for x in annotators])\n else:\n deps = []\n if annotators is not None:\n for anno in annotators:\n self += anno\n return self.load_strategy.generate_file(output_filename, do_plot, deps)\n\n def pathify(self, output_filename, default=None):\n \"\"\"Turn output_filename into a Path. If it's a relative path, treat\n it as relative to self.result_dir,\n if it's absolute, take it is at is\n \"\"\"\n if output_filename is None:\n output_filename = default\n output_filename = Path(output_filename)\n if not output_filename.is_absolute() and not \"/\" in str(output_filename):\n output_filename = self.result_dir / output_filename\n return output_filename.absolute()\n\n\ndef _combine_annotator_df_and_old_df(a_df, ddf_df):\n if len(a_df) == len(ddf_df):\n if isinstance(a_df.index, pd.RangeIndex) and a_df.index.start == 0:\n # assume it is in order\n a_df.index = ddf_df.index\n else:\n raise ValueError(\n \"Length and index mismatch - annotator did not return enough rows\"\n )\n new_df = pd.concat([ddf_df, a_df], axis=1)\n if len(new_df) != len(ddf_df):\n raise ValueError(\n \"Index mismatch between DataFrame and Annotator result concatenating added %i rows- \"\n % (len(new_df) - len(ddf_df))\n + \"Annotator must return either a DF with a compatible index \"\n \"or one with a RangeIndex(0,len(df))\"\n )\n return new_df\n\n\nclass Load_Direct:\n def __init__(self, ddf, loading_function):\n self.ddf = ddf\n self.loading_function = loading_function\n self.build_deps = False\n\n def load(self):\n if not hasattr(self.ddf, \"df\"):\n self.ddf.df = self.loading_function()\n self.ddf.non_annotator_columns = self.ddf.df.columns\n\n def generate_file(self, filename, write_callback, dependencies, empty_ok=False):\n write_callback(filename)\n return None, Path(filename).absolute()\n\n def add_annotator(self, anno):\n if anno.get_cache_name() in self.ddf.annotators:\n if self.ddf.annotators[anno.get_cache_name()] != anno:\n raise ValueError(\n \"Trying to add two different annotators with same cache name: %s and %s\"\n % (anno, self.ddf.annotators[anno.get_cache_name()])\n )\n return\n self.ddf.annotators[anno.get_cache_name()] = anno\n for d in anno.dep_annos():\n if d is not None:\n self.ddf += d\n s_should = set(anno.columns)\n if not s_should:\n raise IndexError(\"Empty columns\")\n if (\n self.ddf.parent is not None\n and anno.get_cache_name() in self.ddf.parent.annotators\n ):\n a_df = self.ddf.parent.df.loc[self.ddf.df.index]\n a_df = a_df[s_should]\n else:\n if not isinstance(anno.columns, list):\n raise ValueError(\"Columns was not a list\")\n if hasattr(anno, \"calc_ddf\"):\n a_df = anno.calc_ddf(self.ddf)\n else:\n a_df = anno.calc(self.ddf.df)\n if isinstance(a_df, pd.Series) and len(s_should) == 1:\n a_df = pd.DataFrame({next(iter(s_should)): a_df})\n elif not isinstance(a_df, pd.DataFrame):\n raise ValueError(\n \"Annotator return non DataFrame result (nor a Series and len(anno.columns) == 1)\"\n )\n s_actual = set(a_df.columns)\n if s_should != s_actual:\n raise ValueError(\n \"Annotator declared different columns from those actualy calculated: %s\"\n % (s_should.symmetric_difference(s_actual))\n )\n for k in s_actual:\n if k in self.ddf.df.columns:\n raise ValueError(\"Same column form two annotators\", k)\n self.ddf.df = _combine_annotator_df_and_old_df(a_df, self.ddf.df)\n\n for c in self.ddf.children:\n c += anno\n\n def annotate(self): # a noop\n return None\n\n\nclass Load_PPG:\n def __init__(self, ddf, loading_function, deps):\n ppg.assert_uniqueness_of_object(ddf)\n ddf.cache_dir = (\n Path(ppg.util.global_pipegraph.cache_folder)\n / ddf.__class__.__name__\n / ddf.name\n )\n ddf.cache_dir.mkdir(parents=True, exist_ok=True)\n self.ddf = ddf\n self.ddf.anno_jobs = {}\n\n self.loading_function = loading_function\n self.deps = deps\n self.build_deps = True\n self.tree_fixed = False\n self.lock = threading.Lock()\n\n def add_annotator(self, anno):\n if ppg.util.global_pipegraph.running:\n raise ppg.JobContractError(\n \"Can not add_annotator in a running pipegraph\"\n \" - the annotator structure get's fixed when a \"\n \"pipegraph is run, you can't add to it in e.g. a \"\n \"JobGeneratingJob\"\n )\n cache_name = anno.get_cache_name()\n forbidden_chars = \"/\", \"?\", \"*\"\n if any((x in cache_name for x in forbidden_chars)) or len(cache_name) > 60:\n raise ValueError(\n \"annotator.column_names[0] not suitable as a cache_name (was %s), add cache_name property\"\n % repr(cache_name)\n )\n if not cache_name in self.ddf.annotators:\n # if not hasattr(anno, \"columns\"): # handled by get_cache_name\n # raise AttributeError(\"no columns property on annotator %s\" % repr(anno))\n self.ddf.annotators[cache_name] = anno\n self.ddf.anno_jobs[cache_name] = self.get_anno_dependency_callback(anno)\n for c in self.ddf.children:\n c += anno\n return self.ddf.anno_jobs[cache_name]\n\n @lazy_method\n def load(self):\n def load_func(df):\n self.ddf.df = df\n self.ddf.non_annotator_columns = self.ddf.df.columns\n\n job = ppg.CachedDataLoadingJob(\n self.ddf.cache_dir / \"calc\", self.loading_function, load_func\n )\n job.depends_on(self.deps).depends_on(\n ppg.FunctionInvariant(\n self.ddf.__class__.__name__ + \"_\" + self.ddf.name + \"_load\",\n self.loading_function,\n )\n )\n return job\n\n def generate_file(self, filename, write_callback, dependencies, empty_ok=False):\n return (\n ppg.FileGeneratingJob(filename, write_callback, empty_ok=empty_ok)\n .depends_on(dependencies)\n .depends_on(self.load()),\n Path(filename),\n )\n\n def get_anno_dependency_callback(self, anno):\n if anno.get_cache_name() in self.ddf.anno_jobs:\n raise NotImplementedError(\"Should have checked before\") # pragma: no cover\n\n def gen():\n self.ddf.root.load_strategy.fix_anno_tree()\n return self.ddf.anno_jobs[anno.get_cache_name()]\n\n return gen\n\n def fix_anno_tree(self):\n if self.ddf.parent is not None: # pragma: no branch\n raise NotImplementedError(\n \"Should only be called on the root\"\n ) # pragma: no cover\n if self.tree_fixed:\n return\n self.tree_fixed = True\n\n def recursivly_add_annos(deps, a):\n new = a.dep_annos()\n for n in new:\n if n is None:\n continue\n if n not in deps:\n recursivly_add_annos(deps, n)\n deps.add(n)\n\n def descend_and_add_annos(node):\n annos_here = set(node.ddf.annotators.values())\n deps = set()\n for a in annos_here:\n recursivly_add_annos(deps, a)\n annos_here.update(deps)\n for anno in annos_here:\n node.add_annotator(anno)\n for c in node.ddf.children:\n c.load_strategy.add_annotator(anno)\n for (\n c\n ) in (\n node.ddf.children\n ): # they might have annos that the parent did not have\n descend_and_add_annos(c.load_strategy)\n\n def descend_and_jobify(node):\n for anno in node.ddf.annotators.values():\n if (\n node.ddf.parent is not None\n and anno.get_cache_name() in node.ddf.parent.annotators\n ):\n node.ddf.anno_jobs[anno.get_cache_name()] = node._anno_load(anno)\n else:\n # this is the top most node with this annotator\n node.ddf.anno_jobs[\n anno.get_cache_name()\n ] = node._anno_cache_and_calc(anno)\n if hasattr(anno, \"register_qc\"):\n anno.register_qc(node.ddf)\n for c in node.ddf.children:\n descend_and_jobify(c.load_strategy)\n\n descend_and_add_annos(self)\n for anno in self.ddf.annotators.values():\n job = self._anno_cache_and_calc(anno)\n self.ddf.anno_jobs[anno.get_cache_name()] = job\n # for c in self.ddf.children:\n descend_and_jobify(self)\n\n def _anno_load(self, anno):\n def load():\n\n # ppg2.util.log_error(\n # f\"retreiving for {self.ddf.name} from {self.ddf.parent.name} {anno.columns} - available {self.ddf.parent.df.columns} {id(self.ddf.parent.df)}\"\n # )\n new_cols = self.ddf.parent.df[anno.columns].reindex(self.ddf.df.index)\n with self.lock:\n self.ddf.df = pd.concat(\n [\n self.ddf.df,\n new_cols,\n ],\n axis=1,\n )\n if hasattr(ppg, \"is_ppg2\"):\n import pypipegraph2 as ppg2\n\n my_hash = ppg2.hashers.hash_bytes(new_cols.values.tobytes())\n return ppg2.ValuePlusHash(None, my_hash)\n\n job = ppg.DataLoadingJob(self.ddf.cache_dir / anno.get_cache_name(), load)\n job.depends_on(\n ppg.FunctionInvariant(\n self.ddf.cache_dir / (anno.get_cache_name() + \"_funcv\"), anno.calc\n ),\n self.ddf.parent.anno_jobs[anno.get_cache_name()],\n self.ddf.load(),\n )\n return job\n\n def _anno_cache_and_calc(self, anno):\n def calc():\n if not isinstance(anno.columns, list):\n raise ValueError(\"Columns was not a list\")\n\n if hasattr(anno, \"calc_ddf\"):\n df = anno.calc_ddf(self.ddf)\n else:\n df = anno.calc(self.ddf.df)\n if isinstance(df, pd.Series) and len(anno.columns) == 1:\n df = pd.DataFrame({anno.columns[0]: df})\n if not isinstance(df, pd.DataFrame):\n raise ValueError(\n \"result was no dataframe (or series and len(anno.columns) == 1)\"\n )\n return df\n\n def load(df):\n s_should = set(anno.columns)\n if not len(s_should):\n raise ValueError(\"anno.columns was empty\")\n s_actual = set(df.columns)\n if s_should != s_actual:\n raise ValueError(\n \"Annotator declared different columns from those actualy calculated: %s\"\n % (s_should.symmetric_difference(s_actual))\n )\n if set(df.columns).intersection(self.ddf.df.columns):\n raise ValueError(\n \"Annotator created columns that were already present.\",\n self.ddf.name,\n anno.get_cache_name(),\n set(df.columns).intersection(self.ddf.df.columns),\n )\n\n # old_id = id(self.ddf.df)\n with self.lock:\n self.ddf.df = _combine_annotator_df_and_old_df(df, self.ddf.df)\n # ppg2.util.log_error(\n # f\"added to {self.ddf.name} {df.columns} {self.ddf.df.columns} {id(self.ddf.df)} {old_id}\"\n # )\n\n (self.ddf.cache_dir / anno.__class__.__name__).mkdir(exist_ok=True)\n job = ppg.CachedDataLoadingJob(\n self.ddf.cache_dir / anno.__class__.__name__ / anno.get_cache_name(),\n calc,\n load,\n )\n\n ppg.Job.depends_on(\n job, self.load()\n ) # both the load and the calc needs our ddf.df\n job.depends_on(\n self.load(),\n ppg.FunctionInvariant(\n self.ddf.cache_dir / (anno.get_cache_name() + \"_calc_func\"),\n anno.calc if hasattr(anno, \"calc\") else anno.calc_ddf,\n ),\n )\n for d in anno.dep_annos():\n if d is not None:\n job.depends_on(self.ddf.anno_jobs[d.get_cache_name()])\n job.depends_on(anno.deps(self.ddf))\n job.lfg.cores_needed = getattr(anno, \"cores_needed\", 1)\n return job\n\n def annotate(self):\n res = lambda: list(self.ddf.anno_jobs.values()) # noqa: E731\n res.job_id = self.ddf.name + \"_annotate_callback\"\n return res\n", "id": "4875372", "language": "Python", "matching_score": 3.9572508335113525, "max_stars_count": 0, "path": "src/mbf_genomics/delayeddataframe.py" }, { "content": "from abc import ABC\nfrom typing import List\nimport hashlib\nimport pandas as pd\nimport pypipegraph as ppg\nfrom .util import freeze\nfrom pathlib import Path\nimport numpy as np\n\nannotator_singletons = {\"lookup\": []}\n\n\nclass Annotator(ABC):\n def __new__(cls, *args, **kwargs):\n cn = cls.__name__\n if ppg.inside_ppg():\n if not hasattr(ppg.util.global_pipegraph, \"_annotator_singleton_dict\"):\n ppg.util.global_pipegraph._annotator_singleton_dict = {\"lookup\": []}\n singleton_dict = ppg.util.global_pipegraph._annotator_singleton_dict\n else:\n singleton_dict = annotator_singletons\n if not cn in singleton_dict:\n singleton_dict[cn] = {}\n key = {}\n for ii in range(0, len(args)):\n key[\"arg_%i\" % ii] = args[ii]\n key.update(kwargs)\n for k, v in key.items():\n key[k] = freeze(v)\n key = tuple(sorted(key.items()))\n if not key in singleton_dict[cn]:\n singleton_dict[cn][key] = object.__new__(cls)\n singleton_dict[\"lookup\"].append(singleton_dict[cn][key])\n\n return singleton_dict[cn][key]\n\n def __hash__(self):\n return hash(self.get_cache_name())\n\n def __str__(self):\n return \"Annotator %s\" % self.columns[0]\n\n def __repr__(self):\n return \"Annotator(%s)\" % self.columns[0]\n\n def __freeze__(self):\n return \"Annotator(%s)\" % self.columns[0]\n\n def get_cache_name(self):\n if hasattr(self, \"cache_name\"):\n return self.cache_name\n else:\n return self.columns[0]\n\n def calc(self, df):\n raise NotImplementedError() # pragma: no cover\n\n def deps(self, ddf):\n \"\"\"Return ppg.jobs\"\"\"\n return []\n\n def dep_annos(self):\n \"\"\"Return other annotators\"\"\"\n return []\n\n\nclass Constant(Annotator):\n def __init__(self, column_name, value):\n self.columns = [column_name]\n self.value = value\n\n def calc(self, df):\n return pd.DataFrame({self.columns[0]: self.value}, index=df.index)\n\n\nclass FromFile(Annotator):\n def __init__(\n self,\n tablepath: Path,\n columns_to_add: List[str],\n index_column_table: str = \"gene_stable_id\",\n index_column_genes: str = \"gene_stable_id\",\n fill_value: float = None,\n is_tsv: bool = False,\n ):\n \"\"\"\n Adds arbitrary columns from a table.\n\n This requires that both the table and the ddf have a common column on\n which we can index.\n\n Parameters\n ----------\n tablepath : Path\n Path to table with additional columns.\n columns_to_add : List[str]\n List of columns to append.\n index_column_table : str, optional\n Index column in table, by default \"gene_stable_id\".\n index_column_genes : str, optional\n Index column in ddf to append to, by default \"gene_stable_id\".\n fill_value : float, optonal\n Value to fill for missing rows, defaults to np.NaN.\n is_tsv : bool\n If the input file is a .tsv file regardless of the suffix.\n \"\"\"\n self.tablepath = tablepath\n self.columns = columns_to_add\n self.index_column_table = index_column_table\n self.index_column_genes = index_column_genes\n self.fill = fill_value if fill_value is not None else np.NaN\n self.is_tsv = is_tsv\n\n def parse(self):\n if ((self.tablepath.suffix == \".xls\") or (self.tablepath.suffix == \".xlsx\")) and not self.is_tsv:\n return pd.read_excel(self.tablepath)\n else:\n return pd.read_csv(self.tablepath, sep=\"\\t\")\n\n def get_cache_name(self):\n suffix = f\"{self.tablepath.name}_{self.columns[0]}\".encode(\"utf-8\")\n return f\"FromFile_{hashlib.md5(suffix).hexdigest()}\"\n\n def calc_ddf(self, ddf):\n \"\"\"Calculates the ddf to append.\"\"\"\n df_copy = ddf.df.copy()\n if self.index_column_genes not in df_copy.columns:\n raise ValueError(\n f\"Column {self.index_column_genes} not found in ddf index, found was:\\n{[str(x) for x in df_copy.columns]}.\"\n )\n df_in = self.parse()\n if self.index_column_table not in df_in.columns:\n raise ValueError(\n f\"Column {self.index_column_table} not found in table, found was:\\n{[str(x) for x in df_in.columns]}.\"\n )\n for column in self.columns:\n if column not in df_in.columns:\n raise ValueError(\n f\"Column {column} not found in table, found was:\\n{[str(x) for x in df_in.columns]}.\"\n )\n df_copy.index = df_copy[self.index_column_genes]\n df_in.index = df_in[self.index_column_table]\n df_in = df_in.reindex(df_copy.index, fill_value=self.fill)\n df_in = df_in[self.columns]\n df_in.index = ddf.df.index\n return df_in\n\n def deps(self, ddf):\n \"\"\"Return ppg.jobs\"\"\"\n return ppg.FileInvariant(self.tablepath)\n", "id": "112894", "language": "Python", "matching_score": 3.16766619682312, "max_stars_count": 0, "path": "src/mbf_genomics/annotator.py" }, { "content": "import pandas as pd\n\n\ndef read_pandas(filename):\n filename = str(filename)\n if filename.endswith(\".xls\") or filename.endswith(\".xlsx\"):\n from xlrd import XLRDError\n\n try:\n filein = pd.read_excel(filename)\n except XLRDError:\n filein = pd.read_csv(filename, sep=\"\\t\")\n except ValueError as e:\n if \"Excel file format cannot be determined\" in str(e):\n filein = pd.read_csv(filename, sep=\"\\t\")\n else:\n raise\n return filein\n\n elif filename.endswith(\".tsv\"):\n return pd.read_csv(filename, sep=\"\\t\")\n elif filename.endswith(\".csv\"):\n return pd.read_csv(filename)\n else:\n raise ValueError(\"Unknown filetype: %s\" % filename)\n\n\ndef freeze(obj):\n \"\"\" Turn dicts into frozendict,\n lists into tuples, and sets\n into frozensets, recursively - usefull\n to get a hash value..\n \"\"\"\n # TODO: combine with ppg.util.freeze\n if hasattr(obj, \"__freeze__\"):\n return obj.__freeze__()\n else:\n import pypipegraph as ppg\n\n return ppg.util.freeze(obj)\n\n\ndef parse_a_or_c(ac):\n \"\"\"parse an annotator/column combo into a tuple\n anno, column\n\n Input may be\n a str -> None, column name\n an annotator -> anno, anno.columns[0]\n an (annotator, str) tuple -> anno, str\n an (annotator, int(i)) tuple -> anno, annotator.columns[i]\n \"\"\"\n from mbf_genomics.annotator import Annotator\n\n if isinstance(ac, str):\n return (None, ac)\n elif isinstance(ac, Annotator):\n return ac, ac.columns[0]\n elif isinstance(ac, tuple) and len(ac) == 2 and isinstance(ac[0], Annotator):\n if isinstance(ac[1], int):\n return ac[0], ac[0].columns[ac[1]]\n else:\n if not ac[1] in ac[0].columns:\n raise KeyError(\n \"Invalid column name, %s -annotator had %s\", (ac[1], ac[0].columns)\n )\n return ac\n elif isinstance(ac, tuple) and len(ac) == 2 and ac[0] is None:\n return ac\n else:\n raise ValueError(\"parse_a_or_c could not parse %s\" % (ac,))\n\n\ndef parse_a_or_c_to_column(k):\n \"\"\"Parse an annotator + column spec to the column name.\n See parse_a_or_c\n \"\"\"\n return parse_a_or_c(k)[1]\n\n\ndef parse_a_or_c_to_anno(k):\n \"\"\"Parse an annotator + column spec to the annotator (or None)\n See parse_a_or_c\n \"\"\"\n return parse_a_or_c(k)[0]\n\n\ndef parse_a_or_c_to_plot_name(k, default=None):\n \"\"\"Parse an annotator + column spec to a plot name\n See parse_a_or_c_to_column\n\n Defaults to column name if no plot_name is defined on annotator\n\n \"\"\"\n ac = parse_a_or_c(k)\n if ac[0] is None:\n return k\n return getattr(ac[0], \"plot_name\", default if default is not None else ac[1])\n\n\ndef find_annos_from_column(k):\n from . import annotator\n import pypipegraph as ppg\n\n if ppg.inside_ppg():\n if not hasattr(ppg.util.global_pipegraph, \"_annotator_singleton_dict\"):\n ppg.util.global_pipegraph._annotator_singleton_dict = {}\n singleton_dict = ppg.util.global_pipegraph._annotator_singleton_dict\n else:\n singleton_dict = annotator.annotator_singletons\n\n res = []\n for anno in singleton_dict[\"lookup\"]:\n if k in anno.columns:\n res.append(anno)\n if res:\n return res\n else:\n raise KeyError(\"No anno for column '%s' found\" % (k,))\n", "id": "5009634", "language": "Python", "matching_score": 2.1240718364715576, "max_stars_count": 0, "path": "src/mbf_genomics/util.py" }, { "content": "import pytest\n\n\[email protected]\ndef clear_annotators(request):\n \"\"\"Clear the annotator singleton instance cache\n which is only used if no ppg is in play\"\"\"\n import mbf_genomics.annotator\n\n mbf_genomics.annotator.annotator_singletons.clear()\n mbf_genomics.annotator.annotator_singletons[\"lookup\"] = []\n", "id": "10106987", "language": "Python", "matching_score": 0.13432921469211578, "max_stars_count": 0, "path": "src/mbf_genomics/testing/fixtures.py" }, { "content": "from . import convert # for export\nfrom . import plots # for export...\nfrom .regions import GenomicRegions, region_registry\nfrom .regions_from import (\n GenomicRegions_BinnedGenome,\n GenomicRegions_Common,\n GenomicRegions_CommonInAtLeastX,\n GenomicRegions_Difference,\n GenomicRegions_Intersection,\n GenomicRegions_FromBed,\n GenomicRegions_FromBigBed,\n GenomicRegions_FromGFF,\n GenomicRegions_FromPartec,\n GenomicRegions_FromTable,\n GenomicRegions_FromWig,\n GenomicRegions_Invert,\n GenomicRegions_Overlapping,\n GenomicRegions_Union,\n GenomicRegions_Windows,\n GenomicRegions_FilterRemoveOverlapping,\n GenomicRegions_FilterToOverlapping,\n)\n# from . import annotators\n\n\n__all__ = [\n \"convert\",\n \"GenomicRegions\",\n \"GenomicRegions_BinnedGenome\",\n \"GenomicRegions_Common\",\n \"GenomicRegions_CommonInAtLeastX\",\n \"GenomicRegions_Difference\",\n \"GenomicRegions_FromBed\",\n \"GenomicRegions_FromBigBed\",\n \"GenomicRegions_FromGFF\",\n \"GenomicRegions_FromPartec\",\n \"GenomicRegions_FromTable\",\n \"GenomicRegions_FromWig\",\n \"GenomicRegions_Invert\",\n \"GenomicRegions_Overlapping\",\n \"GenomicRegions_Union\",\n \"GenomicRegions_Intersection\",\n \"GenomicRegions_Windows\",\n \"plots\",\n \"region_registry\",\n \"GenomicRegions_FilterRemoveOverlapping\",\n \"GenomicRegions_FilterToOverlapping\",\n]\n", "id": "2951179", "language": "Python", "matching_score": 1.552728295326233, "max_stars_count": 0, "path": "src/mbf_genomics/regions/__init__.py" }, { "content": "from .genes import Genes\nfrom .genes_from import (\n FromDifference,\n FromIntersection,\n FromAny,\n FromAll,\n FromNone,\n FromFile,\n FromFileOfTranscripts,\n FromBiotypes,\n FromNames,\n)\nfrom . import anno_tag_counts\nfrom . import annotators\n\n\n__all__ = [\n \"Genes\",\n \"FromDifference\",\n \"FromIntersection\",\n \"FromAny\",\n \"FromAll\",\n \"FromNone\",\n \"FromFile\",\n \"FromFileOfTranscripts\",\n \"FromBiotypes\",\n \"FromNames\",\n \"anno_tag_counts\",\n \"annotators\",\n]\n", "id": "8275880", "language": "Python", "matching_score": 1.2655977010726929, "max_stars_count": 0, "path": "src/mbf_genomics/genes/__init__.py" }, { "content": "from mbf_genomes import HardCodedGenome\n\n\ndef MockGenome( # noqa: C901\n df_genes, df_transcripts=None, chr_lengths=None, df_genes_meta=None\n): # noqa: C901\n if chr_lengths is None:\n chr_lengths = {\n \"1\": 100_000,\n \"2\": 200_000,\n \"3\": 300_000,\n \"4\": 400_000,\n \"5\": 500_000,\n }\n\n df_genes = df_genes.rename(columns={\"stable_id\": \"gene_stable_id\"})\n if not \"start\" in df_genes.columns:\n starts = []\n stops = []\n if not \"strand\" in df_genes:\n tes_larger = df_genes[\"tes\"] > df_genes[\"tss\"]\n strand = tes_larger.replace({True: 1, False: -1})\n df_genes = df_genes.assign(strand=strand)\n for idx, row in df_genes.iterrows():\n starts.append(min(row[\"tss\"], row[\"tes\"]))\n stops.append(max(row[\"tss\"], row[\"tes\"]))\n df_genes = df_genes.assign(start=starts, stop=stops)\n if not \"biotype\" in df_genes.columns:\n df_genes = df_genes.assign(biotype=\"protein_coding\")\n if not \"name\" in df_genes.columns:\n df_genes = df_genes.assign(name=df_genes.gene_stable_id)\n df_genes = df_genes.sort_values([\"chr\", \"start\"])\n df_genes = df_genes.set_index(\"gene_stable_id\")\n if not df_genes.index.is_unique: # pragma: no cover\n raise ValueError(\"gene_stable_ids not unique\")\n if df_transcripts is not None and len(df_transcripts):\n if not \"transcript_stable_id\" in df_transcripts.columns:\n print(df_transcripts.columns)\n df_transcripts = df_transcripts.assign(\n transcript_stable_id=df_transcripts[\"name\"]\n )\n if not \"biotype\" in df_transcripts.columns:\n df_transcripts = df_transcripts.assign(biotype=\"protein_coding\")\n if not \"name\" in df_transcripts.columns:\n df_transcripts = df_transcripts.assign(\n name=df_transcripts.transcript_stable_id\n )\n if \"exons\" in df_transcripts.columns:\n if len(df_transcripts[\"exons\"].iloc[0]) == 3: # pragma: no cover\n df_transcripts = df_transcripts.assign(\n exons=[(x[0], x[1]) for x in df_transcripts[\"exons\"]]\n )\n df_transcripts = df_transcripts.assign(\n exon_stable_ids=[\n \"exon_%s_%i\" % (idx, ii)\n for (ii, idx) in enumerate(df_transcripts[\"exons\"])\n ]\n )\n stops = []\n if not \"strand\" in df_transcripts: # pragma: no cover\n df_transcripts = df_transcripts.assign(strand=1)\n if not \"tss\" in df_transcripts: # pragma: no branch\n tss = []\n tes = []\n for tr, row in df_transcripts.iterrows():\n if row[\"strand\"] == 1:\n tss.append(min((x[0] for x in row[\"exons\"])))\n tes.append(max((x[1] for x in row[\"exons\"])))\n else:\n tss.append(max((x[1] for x in row[\"exons\"])))\n tes.append(min((x[0] for x in row[\"exons\"])))\n df_transcripts = df_transcripts.assign(tss=tss, tes=tes)\n if not \"start\" in df_transcripts:\n starts = []\n stops = []\n for idx, row in df_transcripts.iterrows():\n starts.append(min(row[\"tss\"], row[\"tes\"]))\n stops.append(max(row[\"tss\"], row[\"tes\"]))\n df_transcripts = df_transcripts.assign(start=starts, stop=stops)\n df_transcripts = df_transcripts.set_index(\"transcript_stable_id\")\n if not df_transcripts.index.is_unique: # pragma: no cover\n raise ValueError(\"transcript_stable_ids not unique\")\n result = HardCodedGenome(\"dummy\", chr_lengths, df_genes, df_transcripts, None)\n result.sanity_check_genes(df_genes)\n if df_transcripts is not None and len(df_transcripts):\n result.sanity_check_transcripts(df_transcripts)\n if df_genes_meta is not None:\n result.df_genes_meta = df_genes_meta\n return result\n", "id": "4677236", "language": "Python", "matching_score": 2.409230947494507, "max_stars_count": 0, "path": "src/mbf_genomics/testing/__init__.py" }, { "content": "import pytest\nimport numpy as np\nfrom mbf_genomes import HardCodedGenome\nimport pandas as pd\n\ndefault_chr_lengths = {\n \"1\": 100_000,\n \"2\": 200_000,\n \"3\": 300_000,\n \"4\": 400_000,\n \"5\": 500_000,\n}\n\n\ndef DummyGenome(df_genes, df_transcripts=None):\n\n df_genes = df_genes.rename(columns={\"stable_id\": \"gene_stable_id\"})\n if not \"start\" in df_genes.columns:\n starts = []\n stops = []\n for idx, row in df_genes.iterrows():\n if row[\"strand\"] == 1:\n starts.append(row[\"tss\"])\n stops.append(row[\"tes\"])\n else:\n starts.append(row[\"tes\"])\n stops.append(row[\"tss\"])\n df_genes = df_genes.assign(start=starts, stop=stops)\n if not \"biotype\" in df_genes.columns:\n df_genes = df_genes.assign(biotype=\"protein_coding\")\n if not \"name\" in df_genes.columns:\n df_genes = df_genes.assign(name=df_genes.index)\n df_genes = df_genes.sort_values([\"chr\", \"start\"])\n df_genes = df_genes.set_index(\"gene_stable_id\")\n if df_transcripts is not None:\n if not \"biotype\" in df_transcripts.columns:\n df_transcripts = df_transcripts.assign(biotype=\"protein_coding\")\n if not \"name\" in df_transcripts.columns:\n df_transcripts = df_transcripts.assign(name=df_transcripts.index)\n if \"exons\" in df_transcripts.columns:\n if len(df_transcripts[\"exons\"].iloc[0]) == 3:\n df_transcripts = df_transcripts.assign(\n exons=[(x[0], x[1]) for x in df_transcripts[\"exons\"]]\n )\n df_transcripts = df_transcripts.assign(\n exon_stable_ids=[\n \"exon_%s_%i\" % (idx, ii)\n for (ii, idx) in enumerate(df_transcripts[\"exons\"])\n ]\n )\n df_transcripts = df_transcripts.set_index(\"transcript_stable_id\")\n return HardCodedGenome(\"dummy\", default_chr_lengths, df_genes, df_transcripts, None)\n\n\ndef test_transcript_get_introns():\n genome = DummyGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 3000,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n ]\n ),\n # {transcript_stable_id, gene_stable_id, strand, start, end, exons},\n df_transcripts=pd.DataFrame(\n {\n \"transcript_stable_id\": [\"trans1a\", \"trans1b\", \"trans2\", \"trans3\"],\n \"gene_stable_id\": [\"fake1\", \"fake1\", \"fake2\", \"fake3\"],\n \"chr\": [\"1\", \"1\", \"1\", \"2\"],\n \"strand\": [1, 1, -1, -1],\n \"start\": [3100, 3000, 4900, 4900],\n \"stop\": [4900, 4000, 5400, 5400],\n \"exons\": [\n [(3100, 4900)],\n [(3000, 3500), (3750, 4000)],\n [(4900, 5000), (5100, 5400)],\n [(4900, 5000), (5100, 5200), (5222, 5400)],\n ],\n }\n ),\n )\n assert genome.transcripts[\"trans1a\"].introns == [(3000, 3100)]\n assert genome.transcripts[\"trans1b\"].introns == [(3500, 3750), (4000, 4900)]\n assert genome.transcripts[\"trans2\"].introns == [(5000, 5100)]\n assert genome.transcripts[\"trans3\"].introns == [(5000, 5100), (5200, 5222)]\n\n\ndef test_gene_get_introns_merging():\n genome = DummyGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 3000,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n },\n ]\n ),\n # {transcript_stable_id, gene_stable_id, strand, start, end, exons},\n df_transcripts=pd.DataFrame(\n {\n \"transcript_stable_id\": [\"trans1a\", \"trans1b\", \"trans2\", \"trans3\"],\n \"gene_stable_id\": [\"fake1\", \"fake1\", \"fake2\", \"fake3\"],\n \"chr\": [\"1\", \"1\", \"1\", \"2\"],\n \"strand\": [1, 1, -1, -1],\n \"start\": [3100, 3000, 4900, 4900],\n \"stop\": [4900, 4000, 5400, 5400],\n \"exons\": [\n [(3100, 4800)],\n [(3000, 3500), (3750, 4000)],\n [(4900, 5000), (5100, 5400)],\n [(4900, 5000), (5100, 5200), (5222, 5400)],\n ],\n }\n ),\n )\n print(genome.genes[\"fake1\"].introns_strict)\n\n def as_list(introns):\n return [list(introns[0]), list(introns[1])]\n\n assert as_list(genome.genes[\"fake1\"].introns_strict) == [[4800], [4900]]\n # assert genome.transcripts[\"trans1b\"].introns == [(3500, 3750), (4000, 4900)]\n assert as_list(genome.genes[\"fake2\"].introns_strict) == [[5000], [5100]]\n assert as_list(genome.genes[\"fake3\"].introns_strict) == [[5000, 5200], [5100, 5222]]\n assert as_list(genome.genes[\"fake1\"].introns_all) == [\n [3000, 4800, 3500, 4000],\n [3100, 4900, 3750, 4900],\n ]\n\n\ndef test_intronify_more_complex():\n transcript = {\n \"chr\": \"2R\",\n \"exons\": [\n (14_243_005, 14_244_766),\n (14_177_040, 14_177_355),\n (14_176_065, 14_176_232),\n (14_175_632, 14_175_893),\n (14_172_742, 14_175_244),\n (14_172_109, 14_172_226),\n (14_170_836, 14_172_015),\n (14_169_750, 14_170_749),\n (14_169_470, 14_169_683),\n (14_169_134, 14_169_402),\n (14_167_751, 14_169_018),\n (14_166_570, 14_167_681),\n ],\n \"gene_stable_id\": \"FBgn0010575\",\n \"start\": 14_166_570,\n \"stop\": 14_244_766,\n \"strand\": -1,\n \"transcript_stable_id\": \"FBtr0301547\",\n }\n gene = {\n \"biotype\": \"protein_coding\",\n \"chr\": \"2R\",\n \"description\": \"CG5580 [Source:FlyBase;GeneId:FBgn0010575]\",\n \"name\": \"sbb\",\n \"stable_id\": \"FBgn0010575\",\n \"strand\": -1,\n \"tes\": 14_166_570,\n \"tss\": 14_244_766,\n }\n genome = DummyGenome(pd.DataFrame([gene]), pd.DataFrame([transcript]))\n g = genome.transcripts[\"FBtr0301547\"]\n assert g.gene_stable_id == \"FBgn0010575\"\n introns = g.introns\n assert (\n np.array(introns)\n == [\n (14_167_681, 14_167_751),\n (14_169_018, 14_169_134),\n (14_169_402, 14_169_470),\n (14_169_683, 14_169_750),\n (14_170_749, 14_170_836),\n (14_172_015, 14_172_109),\n (14_172_226, 14_172_742),\n (14_175_244, 14_175_632),\n (14_175_893, 14_176_065),\n (14_176_232, 14_177_040),\n (14_177_355, 14_243_005),\n ]\n ).all()\n assert genome.get_chromosome_lengths() == default_chr_lengths\n assert genome.job_genes() is None\n\n\ndef test_intron_intervals_raises_on_inverted():\n transcript = {\n \"chr\": \"2R\",\n \"exons\": [\n (14_243_005, 14_244_766),\n (14_177_040, 14_177_355),\n (14_176_065, 14_176_232),\n (14_175_632, 14_175_893),\n (14_172_742, 14_175_244),\n (14_172_109, 14_172_226),\n (14_172_015, 14_170_836), # inverted\n (14_169_750, 14_170_749),\n (14_169_470, 14_169_683),\n (14_169_134, 14_169_402),\n (14_167_751, 14_169_018),\n (14_166_570, 14_167_681),\n ],\n \"gene_stable_id\": \"FBgn0010575\",\n \"start\": 14_166_570,\n \"stop\": 14_244_766,\n \"strand\": -1,\n \"transcript_stable_id\": \"FBtr0301547\",\n }\n gene = {\n \"biotype\": \"protein_coding\",\n \"chr\": \"2R\",\n \"description\": \"CG5580 [Source:FlyBase;GeneId:FBgn0010575]\",\n \"name\": \"sbb\",\n \"stable_id\": \"FBgn0010575\",\n \"strand\": -1,\n \"tes\": 14_166_570,\n \"tss\": 14_244_766,\n }\n genome = DummyGenome(pd.DataFrame([gene]), pd.DataFrame([transcript]))\n g = genome.transcripts[\"FBtr0301547\"]\n with pytest.raises(ValueError):\n g.introns\n\n\ndef test_get_gene_introns():\n genome = DummyGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 3000,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla1\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5500,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla2\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla3\",\n },\n ]\n ),\n # {transcript_stable_id, gene_stable_id, strand, start, end, exons},\n df_transcripts=pd.DataFrame(\n {\n \"transcript_stable_id\": [\"trans1a\", \"trans1b\", \"trans2\", \"trans3\"],\n \"gene_stable_id\": [\"fake1\", \"fake1\", \"fake2\", \"fake3\"],\n \"chr\": [\"1\", \"1\", \"1\", \"2\"],\n \"strand\": [1, 1, -1, -1],\n \"start\": [3100, 3000, 4910, 4900],\n \"stop\": [4900, 4000, 5400, 5400],\n \"exons\": [\n [(3100, 4900)],\n [(3000, 3500), (3300, 3330), (3750, 4000)],\n [(4910, 5000), (5100, 5400)],\n [(4900, 5400)],\n ],\n }\n ),\n )\n one = genome.genes[\"fake1\"].introns_strict\n assert len(one[0]) == 0\n\n two = genome.genes[\"fake2\"].introns_strict\n\n def as_list(introns):\n return [list(introns[0]), list(introns[1])]\n\n assert as_list(two) == [[4900, 5000, 5400], [4910, 5100, 5500]]\n\n\ndef test_get_gene_exons_merged():\n genome = DummyGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 3000,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla1\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla2\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla3\",\n },\n {\n \"stable_id\": \"fake4\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 6400,\n \"tes\": 5900,\n \"description\": \"bla\",\n \"name\": \"bla3\",\n },\n ]\n ),\n # {transcript_stable_id, gene_stable_id, strand, start, end, exons},\n df_transcripts=pd.DataFrame(\n {\n \"transcript_stable_id\": [\n \"trans1a\",\n \"trans1b\",\n \"trans1c\",\n \"trans2\",\n \"trans3\",\n ],\n \"gene_stable_id\": [\"fake1\", \"fake1\", \"fake1\", \"fake2\", \"fake3\"],\n \"chr\": [\"1\", \"1\", \"1\", \"1\", \"2\"],\n \"strand\": [1, 1, 1, -1, -1],\n \"start\": [3100, 3000, 4850, 4910, 4900],\n \"stop\": [4900, 4000, 4950, 5400, 5400],\n \"exons\": [\n [(3100, 4900)],\n [(3000, 3500), (3300, 3330), (3750, 4000)],\n [(4850, 4950)],\n [(4910, 5000), (5100, 5400)],\n [(4900, 5400)],\n ],\n }\n ),\n )\n g = genome.genes[\"fake1\"]\n one = g.exons_merged\n assert (one[0] == [3000]).all()\n assert (one[1] == [4950]).all()\n g = genome.genes[\"fake2\"]\n two = g.exons_merged\n assert (two[0] == [4910, 5100]).all()\n assert (two[1] == [5000, 5400]).all()\n four = genome.genes[\"fake4\"].exons_merged\n assert len(four[0]) == 0\n\n\ndef test_get_gene_exons_protein_coding_merged():\n genome = DummyGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 3000,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla1\",\n \"biotype\": \"protein_coding\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla2\",\n \"biotype\": \"protein_coding\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla3\",\n \"biotype\": \"lincRNA\",\n },\n {\n \"stable_id\": \"fake4\",\n \"chr\": \"3\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla3\",\n \"biotype\": \"noncoding_rna\",\n },\n ]\n ),\n # {transcript_stable_id, gene_stable_id, strand, start, end, exons},\n df_transcripts=pd.DataFrame(\n {\n \"transcript_stable_id\": [\n \"trans1a\",\n \"trans1b\",\n \"trans2\",\n \"trans3\",\n \"trans4\",\n ],\n \"biotype\": [\n \"protein_coding\",\n \"whatever\",\n \"protein_coding\",\n \"lincRNA\",\n \"non_coding_rna\",\n ],\n \"gene_stable_id\": [\"fake1\", \"fake1\", \"fake2\", \"fake3\", \"fake4\"],\n \"chr\": [\"1\", \"1\", \"1\", \"2\", \"3\"],\n \"strand\": [1, 1, -1, -1, -1],\n \"start\": [3100, 3000, 4910, 4900, 4900],\n \"stop\": [3200, 4000, 5400, 5400, 5400],\n \"exons\": [\n [(3100, 3200)],\n [(3000, 3500), (3300, 3330), (3750, 4000)],\n [(4910, 5000), (5100, 5400)],\n [(4900, 5400)],\n [],\n ],\n }\n ),\n )\n g = genome.genes[\"fake1\"]\n one = g.exons_protein_coding_merged\n assert (one[0] == [3100]).all()\n assert (one[1] == [3200]).all()\n\n g = genome.genes[\"fake2\"]\n two = g.exons_protein_coding_merged\n assert (two[0] == [4910, 5100]).all()\n assert (two[1] == [5000, 5400]).all()\n\n g = genome.genes[\"fake3\"]\n three = g.exons_protein_coding_merged\n assert (three[0] == [4900]).all()\n assert (three[1] == [5400]).all()\n four = genome.genes[\"fake4\"].exons_protein_coding_merged\n assert len(four[0]) == 0\n\n\ndef test_get_gene_exons_protein_coding_overlapping():\n genome = DummyGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 3000,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla1\",\n \"biotype\": \"protein_coding\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla2\",\n \"biotype\": \"protein_coding\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla3\",\n \"biotype\": \"lincRNA\",\n },\n {\n \"stable_id\": \"fake4\",\n \"chr\": \"3\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla3\",\n \"biotype\": \"noncoding_rna\",\n },\n ]\n ),\n # {transcript_stable_id, gene_stable_id, strand, start, end, exons},\n df_transcripts=pd.DataFrame(\n {\n \"transcript_stable_id\": [\n \"trans1a\",\n \"trans1b\",\n \"trans2a\",\n \"trans2b\",\n \"trans3\",\n \"trans4\",\n ],\n \"biotype\": [\n \"protein_coding\",\n \"protein_coding\",\n \"protein_coding\",\n \"non_coding\",\n \"lincRNA\",\n \"non_coding_rna\",\n ],\n \"gene_stable_id\": [\n \"fake1\",\n \"fake1\",\n \"fake2\",\n \"fake2\",\n \"fake3\",\n \"fake4\",\n ],\n \"chr\": [\"1\", \"1\", \"1\", \"2\", \"2\", \"3\"],\n \"strand\": [1, 1, -1, -1, -1, -1],\n \"start\": [3100, 3000, 4910, 4950, 4900, 4900],\n \"stop\": [3200, 4000, 5400, 5300, 5400, 5400],\n \"exons\": [\n [(3100, 3200)],\n [(3000, 3500), (3300, 3330), (3750, 4000)],\n [(4910, 5000), (5100, 5400)],\n [(4950, 5300)],\n [(4900, 5400)],\n [],\n ],\n }\n ),\n )\n g = genome.genes[\"fake1\"]\n one = g.exons_protein_coding_overlapping\n print(one)\n assert (one[0] == [3000, 3100, 3300, 3750]).all()\n assert (one[1] == [3500, 3200, 3330, 4000]).all()\n\n g = genome.genes[\"fake2\"]\n two = g.exons_protein_coding_overlapping\n assert (two[0] == [4910, 5100]).all()\n assert (two[1] == [5000, 5400]).all()\n\n g = genome.genes[\"fake3\"]\n three = g.exons_protein_coding_overlapping\n assert (three[0] == [4900]).all()\n assert (three[1] == [5400]).all()\n four = genome.genes[\"fake4\"].exons_protein_coding_overlapping\n assert len(four[0]) == 0\n\n\ndef test_gene_exons_overlapping():\n genome = DummyGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 3000,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla1\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla2\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla3\",\n },\n {\n \"stable_id\": \"fake4\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 6400,\n \"tes\": 5900,\n \"description\": \"bla\",\n \"name\": \"bla3\",\n },\n ]\n ),\n # {transcript_stable_id, gene_stable_id, strand, start, end, exons},\n df_transcripts=pd.DataFrame(\n {\n \"transcript_stable_id\": [\"trans1a\", \"trans1b\", \"trans2\", \"trans3\"],\n \"gene_stable_id\": [\"fake1\", \"fake1\", \"fake2\", \"fake3\"],\n \"chr\": [\"1\", \"1\", \"1\", \"2\"],\n \"strand\": [1, 1, -1, -1],\n \"start\": [3100, 3000, 4910, 4900],\n \"stop\": [4900, 4000, 5400, 5400],\n \"exons\": [\n [(3100, 4900)],\n [(3000, 3500), (3300, 3330), (3750, 4000)],\n [(4910, 5000), (5100, 5400)],\n [(4900, 5400)],\n ],\n }\n ),\n )\n one = genome.genes[\"fake1\"].exons_overlapping\n print(one)\n assert (one[0] == [3000, 3100, 3300, 3750]).all()\n assert (one[1] == [3500, 4900, 3330, 4000]).all()\n assert len(genome.genes[\"fake4\"].exons_overlapping[0]) == 0\n\n\ndef test_gene_tss_tes():\n genome = DummyGenome(\n pd.DataFrame(\n [\n {\n \"stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 3000,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla1\",\n },\n {\n \"stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla2\",\n },\n {\n \"stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla3\",\n },\n {\n \"stable_id\": \"fake4\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 6400,\n \"tes\": 5900,\n \"description\": \"bla\",\n \"name\": \"bla3\",\n },\n ]\n ),\n pd.DataFrame(\n {\n \"transcript_stable_id\": [\"trans1a\", \"trans1b\", \"trans2\", \"trans3\"],\n \"gene_stable_id\": [\"fake1\", \"fake1\", \"fake2\", \"fake3\"],\n \"chr\": [\"1\", \"1\", \"1\", \"2\"],\n \"strand\": [1, 1, -1, -1],\n \"start\": [3100, 3000, 4910, 4900],\n \"stop\": [4900, 4000, 5400, 5400],\n \"exons\": [\n [(3100, 4900)],\n [(3000, 3500), (3300, 3330), (3750, 4000)],\n [(4910, 5000), (5100, 5400)],\n [(4900, 5400)],\n ],\n }\n ),\n )\n assert genome.genes[\"fake1\"].tss == 3000\n assert genome.genes[\"fake1\"].tes == 4900\n assert genome.genes[\"fake2\"].tss == 5400\n assert genome.genes[\"fake2\"].tes == 4900\n\n\ndef test_name_to_gene_id():\n genome = DummyGenome(\n pd.DataFrame(\n [\n {\n \"gene_stable_id\": \"fake1\",\n \"chr\": \"1\",\n \"strand\": 1,\n \"tss\": 3000,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla1\",\n },\n {\n \"gene_stable_id\": \"fake2\",\n \"chr\": \"1\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla2\",\n },\n {\n \"gene_stable_id\": \"fake3\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 5400,\n \"tes\": 4900,\n \"description\": \"bla\",\n \"name\": \"bla3\",\n },\n {\n \"gene_stable_id\": \"fake4\",\n \"chr\": \"2\",\n \"strand\": -1,\n \"tss\": 6400,\n \"tes\": 5900,\n \"description\": \"bla\",\n \"name\": \"bla4\",\n },\n ]\n )\n )\n assert genome.name_to_gene_ids(\"bla1\") == set([\"fake1\"])\n assert genome.name_to_gene_ids(\"bla2\") == set([\"fake2\"])\n assert genome.name_to_gene_ids(\"bla3\") == set([\"fake3\"])\n assert genome.name_to_gene_ids(\"bla4\") == set([\"fake4\"])\n\n\ndef test_get_reads_in_exon():\n import mbf_sampledata\n import pysam\n\n genome = mbf_sampledata.get_human_22_fake_genome()\n bam = pysam.Samfile(\n mbf_sampledata.get_sample_path(\"mbf_align/rnaseq_spliced_chr22.bam\")\n )\n g = genome.genes[\"ENSG00000128228\"]\n reads = g.get_reads_in_exons(bam)\n assert reads\n start = 21642302 - 1\n stop = 21644299\n for r in reads:\n ov = r.get_overlap(start, stop)\n assert ov > 0\n\n\ndef test_transcript_coordinate_translation_fwd():\n import mbf_sampledata\n\n genome = mbf_sampledata.get_human_22_fake_genome()\n tr = genome.transcripts[\"ENST00000455558\"]\n map = tr.coordinate_translations\n assert len(map) == 657\n # first exon\n for ii in range(0, 47):\n assert (25565107 - 1 + ii) in map\n\n reference = [\n (25565107, 25565153, 47),\n (25604377, 25604453, 77),\n (25661576, 25661677, 102),\n (25663630, 25663704, 75),\n (25667739, 25667800, 62),\n (25672296, 25672347, 52),\n (25674437, 25674528, 92),\n (25678816, 25678915, 100),\n (25684519, 25684568, 50),\n ]\n offset = 0\n should = []\n for genome_start, genome_stop, length in reference:\n for ii in range(offset, offset + length):\n should.append(genome_start - 1 + ii - offset)\n offset += length\n ok = map == should\n assert ok # pytest spends an eternity formating the diff otherwise on failure..\n\n assert map[-1] == (25684568 - 1)\n\n\ndef test_transcript_coordinate_translation_reverse():\n import mbf_sampledata\n\n genome = mbf_sampledata.get_human_22_fake_genome()\n tr = genome.transcripts[\"ENST00000420242\"]\n map = tr.coordinate_translations\n assert len(map) == 525\n\n print(tr.exons)\n assert map[0] == (26512496 - 1)\n assert map[102] == (26512394 - 1)\n assert map[103] == (26512175 - 1)\n for ii in range(77):\n assert map[103 + ii] == (26512175 - 1 - ii)\n assert map[103 + 77] == (26510692 - 1)\n assert map[103 + 77 + 76 - 1] == (26510617 - 1)\n assert map[-1] == (26506878 - 1)\n assert len(set(map)) == len(map)\n", "id": "9994827", "language": "Python", "matching_score": 3.0610196590423584, "max_stars_count": 0, "path": "tests/test_gene.py" }, { "content": "import attr\nimport numpy as np\nfrom mbf_nested_intervals import IntervalSet\nfrom .common import reverse_complement\n\n\[email protected](slots=True)\nclass Gene:\n gene_stable_id = attr.ib()\n name = attr.ib()\n chr = attr.ib()\n start = attr.ib()\n stop = attr.ib()\n strand = attr.ib()\n biotype = attr.ib()\n transcripts = attr.ib()\n genome = attr.ib()\n\n @property\n def tss(self):\n return self.start if self.strand == 1 else self.stop\n\n @property\n def tes(self):\n return self.start if self.strand != 1 else self.stop\n\n @property\n def introns_strict(self):\n \"\"\"Get truly intronic regions - ie. not covered by any exon for this gene\n result is a a tuple of np arrays, (starts, stops)\n By it's definition, the introns are disjunct\n \"\"\"\n gene_start = self.start\n gene_stop = self.stop\n exons = []\n for tr in self.transcripts:\n try:\n exons.extend(tr.exons)\n except TypeError: # pragma: no cover\n raise ValueError(f\"No exons defined for {tr.transcript_stable_id}\")\n return IntervalSet.from_tuples(exons).invert(gene_start, gene_stop).to_numpy()\n\n @property\n def introns_all(self):\n \"\"\"Get intronic regions - ie. an intron in any of the transcripts.\n May contain repetitions and overlaps and is not sorted!\n \"\"\"\n gene_start = self.start\n gene_stop = self.stop\n introns = [], []\n for tr in self.transcripts:\n try:\n starts, stops = (\n IntervalSet.from_tuples(tr.exons)\n .invert(gene_start, gene_stop)\n .to_numpy()\n )\n except TypeError: # pragma: no cover\n raise ValueError(f\"No exons defined for {tr.transcript_stable_id}\")\n introns[0].extend(starts)\n introns[1].extend(stops)\n return introns\n\n @property\n def _exons(self):\n \"\"\"Common code to exons_merged and exons_overlapping\"\"\"\n exons = []\n for tr in self.transcripts:\n try:\n exons.extend(tr.exons)\n except TypeError: # pragma: no cover\n raise ValueError(f\"No exons defined for {tr.transcript_stable_id}\")\n return exons\n\n @property\n def exons_merged(self):\n \"\"\"Get the merged exon regions for a gene given by gene_stable_id\n result is a a tuple of np arrays, (starts, stops)\n \"\"\"\n return IntervalSet.from_tuples(self._exons).merge_hull().to_numpy()\n\n @property\n def exons_overlapping(self):\n \"\"\"Get the overlapping exon regions for a gene given by gene_stable_id\n result is a a tuple of np arrays, (starts, stops)\n not sorted\n \"\"\"\n return self._reformat_exons(self._exons)\n\n def _reformat_exons(self, exons):\n \"\"\"Turn exons [(start, stop), ...] into [[start, ...], [stop, ...]\n \"\"\"\n exons.sort()\n return np.array([x[0] for x in exons]), np.array([x[1] for x in exons])\n\n @property\n def _exons_protein_coding(self):\n \"\"\"common code for the exons_protein_coding_* propertys\"\"\"\n exons = []\n for tr in self.transcripts:\n if tr.biotype == \"protein_coding\":\n exons.extend(tr.exons)\n return exons\n\n @property\n def exons_protein_coding_merged(self):\n \"\"\"Get the merged exon regions for a gene , only for protein coding exons.\n Empty result on non protein coding genes\n result is a a tuple of np arrays, (starts, stops)\n \"\"\"\n return (\n IntervalSet.from_tuples(self._exons_protein_coding).merge_hull().to_numpy()\n )\n\n @property\n def exons_protein_coding_overlapping(self):\n \"\"\"Get the overlapping exon regions for a gene, only for protein coding transcripts.\n Empty result on non protein coding genes\n\n Result is a DataFrame{chr, strand, start, stop}\n\n We test biotype on transcripts, not on genes,\n because for example polymorphismic_pseudogenes can have protein coding variants.\n \"\"\"\n return self._reformat_exons(self._exons_protein_coding)\n\n def get_reads_in_exons(self, bam, dedup=\"gene\"):\n \"\"\"Return a list of all the reads in the bam that\n have an overlap with the exon regions\n\n dedup may be:\n 'gene' - a given read may only count once in this gene (default)\n False - no dedup (but each alignment only once even if it spans multiple exons)\n 'primary_only': - only return reads with tag HI==1\n \"\"\"\n result = {}\n if dedup == \"gene\":\n for start, stop in zip(*self.exons_merged):\n for r in bam.fetch(self.chr, start, stop):\n if r.get_overlap(start, stop) > 0:\n result[r.query_name] = r\n elif dedup is False:\n for start, stop in zip(*self.exons_merged):\n for r in bam.fetch(self.chr, start, stop):\n if r.get_overlap(start, stop) > 0:\n # which amounts to no-dedud except if the aligner is\n # *really* buggy\n result[len(result)] = r\n elif dedup == \"primary_only\":\n for start, stop in zip(*self.exons_merged):\n for r in bam.fetch(self.chr, start, stop):\n if r.get_overlap(start, stop) > 0 and r.get_tag(\"HI\") == 1:\n # which amounts to no-dedud except if the aligner is\n # *really* buggy\n result[r.query_name, r.pos, r.cigarstring] = r\n else:\n raise ValueError(\"Invalid dedup value\")\n\n return list(result.values())\n\n\[email protected](slots=True)\nclass Transcript:\n transcript_stable_id = attr.ib()\n gene_stable_id = attr.ib()\n name = attr.ib()\n chr = attr.ib()\n start = attr.ib()\n stop = attr.ib()\n strand = attr.ib()\n biotype = attr.ib()\n exons = attr.ib()\n exon_stable_ids = attr.ib()\n gene = attr.ib()\n genome = attr.ib()\n\n @property\n def exons_tuples(self):\n return [(start, stop) for (start, stop) in self.exons]\n\n @property\n def introns(self):\n \"\"\"Return [(start, stop),...] for all introns in the transcript\n Order is in genomic order.\n Intron is defined as everything inside tss..tes that is not an exon,\n so if a gene, by any reason would extend beyond it's exons,\n that region would also be covered.\n \"\"\"\n gene_start = self.gene.start\n gene_stop = self.gene.stop\n exons = sorted(self.exons_tuples)\n return IntervalSet.from_tuples(exons).invert(gene_start, gene_stop).to_tuples()\n\n @property\n def cdna(self):\n \"\"\"Get the cdna sequence as defined by cdna.fasta\"\"\"\n return self.genome.get_cdna_sequence(self.transcript_stable_id)\n\n @property\n def mrna(self):\n \"\"\"The mRNA sequence after splicing.\n (forward strand - ie. ATG is ATG)\n\n unlike cdna, this is build dynamically from the genome_sequence\n and exon definition and is available for non-protein coding transcripts\n \"\"\"\n seq = \"\".join(\n [\n self.genome.get_genome_sequence(self.chr, start, stop)\n for (start, stop) in self.exons\n ]\n )\n if self.strand == -1:\n seq = reverse_complement(seq)\n return seq\n\n @property\n def coordinate_translations(self):\n \"\"\"Return a [genomic_coordinates]\n the transcript coordinate relative to the 5' start\n is the index\n \"\"\"\n result = []\n tr_pos = 0\n if self.strand == 1:\n for exon_start, exon_stop in self.exons:\n for ii in range(exon_start, exon_stop):\n # result.append((tr_pos, ii))\n result.append(ii)\n tr_pos += 1\n else:\n for exon_start, exon_stop in reversed(self.exons):\n for ii in range(exon_stop, exon_start, -1):\n # result.append((tr_pos, ii - 1))\n result.append(ii - 1)\n tr_pos += 1\n return result\n", "id": "5083659", "language": "Python", "matching_score": 2.163175106048584, "max_stars_count": 0, "path": "src/mbf_genomes/gene.py" }, { "content": "from .mbf_nested_intervals import * # noqa:F401\nfrom mbf_nested_intervals import IntervalSet\nimport pandas as pd\nimport itertools\n\n__version__ = '0.2.8'\n\n\ndef _df_to_tup(df):\n joined = []\n for ii, (chr, start, stop, strand) in enumerate(\n zip(df[\"chr\"], df[\"start\"], df[\"stop\"], df[\"strand\"])\n ):\n joined.append(((chr, strand), start, stop, ii))\n joined.sort(key=lambda tup: tup[0])\n return joined\n\n\ndef _df_to_tup_no_strand(df):\n joined = []\n for ii, (chr, start, stop) in enumerate(zip(df[\"chr\"], df[\"start\"], df[\"stop\"])):\n joined.append((chr, start, stop, ii))\n joined.sort(key=lambda tup: tup[0])\n return joined\n\n\ndef merge_df_intervals(df, iv_func=lambda iv: iv.merge_hull()):\n \"\"\"take a DataFrame {chr, start, end, *} and merge overlapping intervals.\n * is from the last entry.\n\n\n \"\"\"\n if not \"strand\" in df.columns:\n df = df.assign(strand=1)\n strand_added = True\n else:\n strand_added = False\n joined = _df_to_tup(df)\n\n out = []\n for chr_strand, sub_group in itertools.groupby(joined, lambda tup: tup[0]):\n args = [x[1:] for x in sub_group]\n iv = IntervalSet.from_tuples_with_id(args)\n new_order = iv_func(iv).to_tuples_last_id()\n new_df = df.iloc[[x[2] for x in new_order]].copy()\n new_df.loc[:, \"start\"] = [x[0] for x in new_order]\n new_df.loc[:, \"stop\"] = [x[1] for x in new_order]\n out.append(new_df)\n res = pd.concat(out)\n if strand_added:\n res = res.drop(\"strand\", axis=1)\n return res.sort_values([\"chr\", \"start\"])\n\n\ndef merge_df_intervals_with_callback(df, callback):\n \"\"\"take a {chr, start, end, *} dataframe and merge overlapping intervals, calling callback for group larger than one..\"\"\"\n if not \"strand\" in df:\n df = df.assign(strand=1)\n strand_added = True\n else:\n strand_added = False\n joined = _df_to_tup(df)\n result = []\n for chr, sub_group in itertools.groupby(joined, lambda tup: tup[0]):\n args = [x[1:] for x in sub_group]\n iv = IntervalSet.from_tuples_with_id(args)\n subsets = iv.merge_hull().to_tuples_with_id()\n for s in subsets:\n sub_df = df.iloc[list(s[2])].copy()\n sub_df.at[:, \"start\"] = s[0]\n sub_df.at[:, \"stop\"] = s[1]\n row_data = callback(sub_df)\n if not isinstance(\n row_data, dict\n ): # and not (isinstance(row_data, pd.core.series.Series) and len(row_data.shape) == 1):\n print(\"type\", type(row_data))\n # print 'len(shape)', len(row_data.shape)\n print(callback)\n raise ValueError(\n \"Merge_function returned something other than dict (writing to the pandas series directly is very slow, call to_dict() on it, then modify it.)\"\n )\n if set(row_data.keys()) != set(df.columns):\n raise ValueError(\n \"Merge_function return wrong columns. Expected %s, was %s\"\n % (df.columns, list(row_data.keys()))\n )\n row_data[\"start\"] = s[0]\n row_data[\"stop\"] = s[1]\n\n result.append(row_data)\n res = pd.DataFrame(result).sort_values([\"chr\", \"start\"])\n if strand_added:\n res = res.drop(\"strand\", axis=1)\n return res\n", "id": "2654618", "language": "Python", "matching_score": 1.950887680053711, "max_stars_count": 0, "path": "mbf_nested_intervals/__init__.py" }, { "content": "from .delayeddataframe import DelayedDataFrame # noqa:F401\nfrom . import annotator # noqa: F401\nfrom . import regions\nfrom . import genes\n\n__version__ = '0.4'\n\n\n_all__ = [DelayedDataFrame, annotator, regions, genes, __version__]\n", "id": "8761228", "language": "Python", "matching_score": 0.3226946294307709, "max_stars_count": 0, "path": "src/mbf_genomics/__init__.py" }, { "content": "import pandas as pd\nimport numpy as np\n\n\ndef read_prism_tables(filename):\n try:\n import BeautifulSoup as bs\n import HTMLParser\n\n html_decode = HTMLParser.unescape\n except ImportError:\n import bs4 as bs\n\n # import html\n html_decode = lambda x: x # bs4 already does this\n with open(filename) as op:\n x = bs.BeautifulSoup(op.read(), \"lxml\")\n result = []\n for t in x.findAll(\"table\"):\n titles = [html_decode(title.text) for title in t.findAll(\"title\")]\n columns = []\n max_length = 0\n for subcolumn in t.findAll(\"subcolumn\"):\n c = []\n float_count = 0\n for d in subcolumn.findAll(\"d\"):\n dt = html_decode(d.text)\n if dt == \"\":\n dt = np.nan\n try:\n dt = float(dt)\n float_count += 1\n except ValueError:\n if dt.count(\",\") == 1 and dt.count(\".\") == 0:\n try:\n dt = float(dt.replace(\",\", \".\"))\n float_count += 1\n except ValueError:\n pass\n c.append(dt)\n if float_count <= 5:\n c = [\"\" if isinstance(x, float) and np.isnan(x) else x for x in c]\n columns.append(c)\n max_length = max(max_length, len(c))\n for c in columns:\n while len(c) < max_length:\n c.append(np.nan)\n df = pd.DataFrame(dict(zip(titles, columns)))[titles]\n result.append(df)\n return result\n", "id": "3991058", "language": "Python", "matching_score": 0.5659295916557312, "max_stars_count": 0, "path": "src/mbf_fileformats/prism.py" }, { "content": "import unittest\nimport sys\nsys.path.append('../pysam')\nimport _marsnpdiff\nimport math\nimport numpy as np\nimport numexpr\nfrom __init__ import find_snps\n\n# retrieved from http://www.molecularecologist.com/next-gen-table-3c-2014/\nread_error_prob = 0.001\nll_99 = math.log(1 - read_error_prob)\nll_003 = math.log(read_error_prob / 3)\nll_005 = math.log(read_error_prob / 2)\nll_495 = math.log((1 - read_error_prob) / 2)\nll_25 = math.log(0.25)\n\n\ndef _logLikelihood2(count_a, count_c, count_g, count_t):\n res = np.zeros((11, count_a.shape[0]), dtype=np.float)\n count_a__ll_003 = count_a * ll_003\n count_c__ll_003 = count_c * ll_003\n count_g__ll_003 = count_g * ll_003\n count_t__ll_003 = count_t * ll_003\n count_a__ll_005 = count_a * ll_005\n count_c__ll_005 = count_c * ll_005\n count_g__ll_005 = count_g * ll_005\n count_t__ll_005 = count_t * ll_005\n count_a__ll_495 = count_a * ll_495\n count_c__ll_495 = count_c * ll_495\n count_g__ll_495 = count_g * ll_495\n count_t__ll_495 = count_t * ll_495\n\n d = {'count_a': count_a, 'count_c': count_c, 'count_g': count_g, 'count_t': count_t,\n 'count_a__ll_003': count_a__ll_003,\n 'count_c__ll_003': count_c__ll_003,\n 'count_g__ll_003': count_g__ll_003, 'count_t__ll_003': count_t__ll_003, 'count_a__ll_005': count_a__ll_005, 'count_c__ll_005': count_c__ll_005, 'count_g__ll_005': count_g__ll_005, 'count_t__ll_005': count_t__ll_005, 'count_a__ll_495': count_a__ll_495, 'count_c__ll_495': count_c__ll_495, 'count_g__ll_495': count_g__ll_495, 'count_t__ll_495': count_t__ll_495,\n 'll_99': ll_99, 'll_003': ll_003, 'll_005': ll_005, 'll_495': ll_495, 'll_25': ll_25}\n res[0, :] = numexpr.evaluate(\n \"(count_a * ll_99 + count_c__ll_003 + count_g__ll_003 + count_t__ll_003)\", d) # 'AA'), 0\n res[1, :] = numexpr.evaluate(\n \"(count_a__ll_495 + count_c__ll_495 + count_g__ll_005 + count_t__ll_005)\", d) # 'AC'),1\n res[2, :] = numexpr.evaluate(\n \"(count_a__ll_495 + count_c__ll_005 + count_g__ll_495 + count_t__ll_005)\", d) # 'AG'),2\n res[3, :] = numexpr.evaluate(\n \"(count_a__ll_495 + count_c__ll_005 + count_g__ll_005 + count_t__ll_495)\", d) # 'AT'),3\n res[4, :] = numexpr.evaluate(\n \"(count_c * ll_99 + count_a__ll_003 + count_g__ll_003 + count_t__ll_003)\", d) # 'CC'), 4\n res[5, :] = numexpr.evaluate(\n \"(count_a__ll_005 + count_c__ll_495 + count_g__ll_495 + count_t__ll_005)\", d) # 'CG'),5\n res[6, :] = numexpr.evaluate(\n \"(count_a__ll_005 + count_c__ll_495 + count_g__ll_005 + count_t__ll_495)\", d) # 'CT'),6\n res[7, :] = numexpr.evaluate(\n \"(count_g * ll_99 + count_a__ll_003 + count_c__ll_003 + count_t__ll_003)\", d) # 'GG'), 7\n res[8, :] = numexpr.evaluate(\n \"(count_a__ll_005 + count_c__ll_005 + count_g__ll_495 + count_t__ll_495)\", d) # 'GT'), 8\n res[9, :] = numexpr.evaluate(\n \"(count_t * ll_99 + count_a__ll_003 + count_c__ll_003 + count_g__ll_003)\", d) # 'TT'), 9\n res[10, :] = numexpr.evaluate(\n \"(count_a * ll_25 + count_c * ll_25 + count_g * ll_25 + count_t * ll_25)\", d) # 'NN'), 10\n return res\n\n\nclass LLTests(unittest.TestCase):\n\n def test_ll(self):\n count_a = [100, 0, 0, 100, 25]\n count_c = [0, 200, 0, 0, 25]\n count_g = [0, 0, 100, 0, 25]\n count_t = [0, 0, 0, 100, 25]\n count_a = np.array(count_a, dtype=np.float32)\n count_c = np.array(count_c, dtype=np.float32)\n count_g = np.array(count_g, dtype=np.float32)\n count_t = np.array(count_t, dtype=np.float32)\n ll = _marsnpdiff.logLikelihood(count_a, count_c, count_g, count_t)\n should = _logLikelihood2(count_a, count_c, count_g, count_t)\n should = should.astype(np.float32)\n self.assertEqual(len(ll), 11)\n self.assertEqual(len(ll[0]), 5)\n for p in xrange(5):\n # print p\n for i in xrange(0, 11):\n # print i,\n # if abs(ll[i][p]- should[i][p]) > 0.0001:\n # print '!!!',\n # print \"%.15f\" % ll[i][p],\"%.15f\" % should[i][p]\n # #print \"%.15f\" % round(ll[i][p]-should[i][p], 3)\n self.assertAlmostEquals(ll[i][p], should[i][p], 3)\n\n self.assertAlmostEqual(ll[0][0], 100 * ll_99)\n self.assertAlmostEqual(ll[4][1], 200 * ll_99)\n self.assertAlmostEqual(ll[7][2], 100 * ll_99)\n self.assertAlmostEqual(ll[3][3], 100 * ll_495 * 2, 4)\n\n def test_llMax(self):\n input = [\n [-1, 0, 0, -1, -1, -5], # j0\n [-2, 0, 0, -0.01, -2.1, -6], # 1\n [-4, 0, 0, -1, -0.5, -7], # 2\n [-5, 0, 0, -1, -1, -1], # 3\n [-1, 0, 0, -1, -1, -5], # 4\n [-2, 0, 0, -1, -1, -100], # 5\n [-2, 0, 0, -1, -1, -12], # 6\n [-2, 0, 0, -1, -1, -23], # 7\n [-2, 0, 0, -1, -1, -1.3], # 8\n [-2, 0, 10, -1, -1, -1.11], # 9\n [-2, 0, 0, -1, -1, -1.0001], # 10\n ]\n input = [np.array(x, dtype=np.float32) for x in input]\n valueMax, argMax = _marsnpdiff.llMax(input)\n self.assertTrue((np.array([0, 0, 9, 1, 2, 3]) == argMax).all())\n self.assertFalse(\n (np.abs(np.array([-1, 0, 10, -0.01, -0.5, -1]) - valueMax) > 0.0001).any())\n\n def test_singular_instances(self):\n def make_counts(pos):\n counts = []\n for i in xrange(4):\n if i == pos:\n counts.append(np.array([51], np.float32))\n else:\n counts.append(np.array([1], np.float32))\n return counts\n # make sure the 'reference' is identical\n ll_aa = _logLikelihood2(*make_counts(0))[0]\n ll_cc = _logLikelihood2(*make_counts(1))[4]\n ll_gg = _logLikelihood2(*make_counts(2))[7]\n ll_tt = _logLikelihood2(*make_counts(3))[9]\n self.assertEqual(ll_aa, ll_cc)\n self.assertEqual(ll_aa, ll_gg)\n self.assertEqual(ll_aa, ll_tt)\n self.assertEqual(ll_cc, ll_gg)\n self.assertEqual(ll_cc, ll_tt)\n self.assertEqual(ll_gg, ll_tt)\n # make sure the implementation is identical\n ma_ll_aa = _marsnpdiff.logLikelihood(*make_counts(0))[0]\n ma_ll_cc = _marsnpdiff.logLikelihood(*make_counts(1))[4]\n ma_ll_gg = _marsnpdiff.logLikelihood(*make_counts(2))[7]\n ma_ll_tt = _marsnpdiff.logLikelihood(*make_counts(3))[9]\n self.assertEqual(ma_ll_aa, ma_ll_cc)\n self.assertEqual(ma_ll_aa, ma_ll_gg)\n self.assertEqual(ma_ll_aa, ma_ll_tt)\n self.assertEqual(ma_ll_cc, ma_ll_gg)\n self.assertEqual(ma_ll_cc, ma_ll_tt)\n self.assertEqual(ma_ll_gg, ma_ll_tt)\n\n # that's enough, the rest must be the same\n self.assertAlmostEqual(ma_ll_aa, ll_aa, 4)\n\n def test_all_combinations(self):\n for a in [0, 51]:\n for c in [0, 51]:\n for g in [0, 51]:\n for t in [0, 51]:\n counts = [\n np.array([a], np.float32),\n np.array([c], np.float32),\n np.array([g], np.float32),\n np.array([t], np.float32),\n ]\n ref = _logLikelihood2(*counts)\n impl = _marsnpdiff.logLikelihood(*counts)\n for ii in xrange(0, 11):\n self.assertAlmostEqual(ref[ii], impl[ii], 4)\n\n def test_problematic(self):\n counts_a = [\n np.array([0., 51.], dtype=np.float32),\n np.array([0., 0.], dtype=np.float32),\n np.array([51., 0.], dtype=np.float32),\n np.array([0., 0.], dtype=np.float32),\n ]\n counts_b = [\n np.array([0., 0.], dtype=np.float32),\n np.array([0., 0.], dtype=np.float32),\n np.array([0., 0.], dtype=np.float32),\n np.array([51., 51.], dtype=np.float32),\n ]\n res = _marsnpdiff.score_coverage_differences(counts_a, counts_b)\n score = res[3]\n self.assertEqual(score[0], score[1])\n\n def test_simple(self):\n coverage_a = (\n np.array([100, 0, 0, 25], dtype=np.float32),\n np.array([0, 100, 0, 25], dtype=np.float32),\n np.array([0, 0, 100, 25], dtype=np.float32),\n np.array([0, 0, 100, 25], dtype=np.float32),\n\n )\n coverage_b = (\n np.array([0, 0, 100, 50], dtype=np.float32),\n np.array([100, 0, 0, 50], dtype=np.float32),\n np.array([0, 0, 0, 0], dtype=np.float32),\n np.array([0, 0, 100, 0], dtype=np.float32),\n )\n candidates, ccov_a, ccov_b, scores = _marsnpdiff.score_coverage_differences(\n coverage_a, coverage_b)\n self.assertTrue((candidates == [0, 2, 3]).all())\n self.assertTrue((ccov_a[0] == [100, 0, 25]).all())\n self.assertTrue((ccov_a[1] == [0, 0, 25]).all())\n self.assertTrue((ccov_a[2] == [0, 100, 25]).all())\n self.assertTrue((ccov_a[3] == [0, 100, 25]).all())\n\n self.assertTrue((ccov_b[0] == [0, 100, 50]).all())\n self.assertTrue((ccov_b[1] == [100, 0, 50]).all())\n self.assertTrue((ccov_b[2] == [0, 0, 0]).all())\n self.assertTrue((ccov_b[3] == [0, 100, 0]).all())\n\n\nclass BamTests(unittest.TestCase):\n\n def test_simple(self):\n res = find_snps(\n ['sample_data/sample_a.bam', ],\n ['sample_data/sample_b.bam'],\n {'1': int(1e6)},\n ll_threshold=0,\n chunk_size=100)\n self.assertEqual(2, len(res))\n self.assertTrue((res['pos'] == [10556, 10568]).all())\n self.assertTrue(((res['score'] - [10556, 10568]) < 0.0001).all())\n self.assertTrue((res['pos'] == [10556, 10568]).all())\n self.assertEqual(res['score'][0], res['score'][1])\n\n def test_simple_twice(self):\n res = find_snps(\n ['sample_data/sample_a.bam', 'sample_data/sample_a.bam'],\n ['sample_data/sample_b.bam'],\n {'1': int(1e6)},\n ll_threshold=0,\n chunk_size=100)\n self.assertEqual(2, len(res))\n self.assertTrue((res['pos'] == [10556, 10568]).all())\n self.assertTrue(((res['score'] - [10556, 10568]) < 0.0001).all())\n self.assertTrue((res['pos'] == [10556, 10568]).all())\n self.assertEqual(res['score'][0], res['score'][1])\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "8496917", "language": "Python", "matching_score": 2.5856330394744873, "max_stars_count": 0, "path": "tests.py" }, { "content": "\"\"\"\nMarSNPDiff is a very simple and fast differential SNP caller.\nIt examines two sets of bam files and get's you a list of most likely\nsingle nucleotide polymorphisms. It does not cover indels, fusions etc.\n\nThe basic idea is to calculate coverage for each nuclotide at each position,\nand calculate a log likelihood for each haplotype.\nA sequencer error rate of 1% is assumed for the LL calculation.\n\nThis is done for both lanes (A and B), then positions where the maximum LL haplotype\ndiffers are scored as follows.\nll_differing = maxLL_A + maxLL_B\nll_same_haplotype_from_A = maxLL_A + ll_B(argMaxLL_A)\nll_same_haplotype_from_B = maxLL_B + ll_A(argMaxLL_B)\nscore = ll_differing - max(ll_same_haplotype_from_A, ll_same_haplotype_from_B)\n\nThe more positive the score, the better the snp.\nResult is a pandas.DataFrame with chr, pos, score and coverage information.\n\"\"\"\n\nimport pysam\nimport pandas\nimport _marsnpdiff\nimport multiprocessing\nimport random\n\n\ndef find_differing_snps_from_vector(coverage_a, coverage_b, ll_threshold = 50):\n positions, candidate_coverage_a, candidate_coverage_b, scores, haplotypeA, haplotypeB = _marsnpdiff.score_coverage_differences(coverage_a, coverage_b)\n ok = scores >= ll_threshold\n candidate_coverage_a[0] = candidate_coverage_a[0][ok]\n candidate_coverage_a[1] = candidate_coverage_a[1][ok]\n candidate_coverage_a[2] = candidate_coverage_a[2][ok]\n candidate_coverage_a[3] = candidate_coverage_a[3][ok]\n candidate_coverage_b[0] = candidate_coverage_b[0][ok]\n candidate_coverage_b[1] = candidate_coverage_b[1][ok]\n candidate_coverage_b[2] = candidate_coverage_b[2][ok]\n candidate_coverage_b[3] = candidate_coverage_b[3][ok]\n\n return {\n 'positions': positions[ok],\n 'coverageA': candidate_coverage_a,\n 'coverageB': candidate_coverage_b,\n 'scores': scores[ok],\n 'haplotypeA': haplotypeA[ok],\n 'haplotypeB': haplotypeB[ok],\n }\n\n\ndef count_coverage_multiple(samfiles, chr, start, stop, quality_threshold):\n \"\"\"Conut and add the coverage of multiple bam files\"\"\"\n coverage = list(_marsnpdiff.count_coverage(samfiles[0], chr, start, stop, quality_threshold))\n for sf in samfiles[1:]:\n cov2 = _marsnpdiff.count_coverage(sf, chr, start, stop, quality_threshold)\n coverage[0] += cov2[0]\n coverage[1] += cov2[1]\n coverage[2] += cov2[2]\n coverage[3] += cov2[3]\n return tuple(coverage)\n\n\ndef find_differing_snps(sam_filenames_a, sam_filenames_b, chr, start, stop, quality_threshold = 15, ll_threshold = 50):\n \"\"\"Find actual differences between two sets of bam files.\n @samfiles_a (and _b) may be single pysam.AligmentFile/SamFile, or lists of such - in this case the coverage in each set is added together\n @quality_threshold controls which reads/bases are actually considered.\n @ll_threshold: Log likelihood difference must be above this value for the SNP to be reported\n \"\"\"\n if hasattr(sam_filenames_a, 'fetch'):\n sam_filenames_a = [sam_filenames_a]\n if hasattr(sam_filenames_b, 'fetch'):\n sam_filenames_b = [sam_filenames_b]\n samfiles_a = [pysam.Samfile(x) for x in sam_filenames_a]\n samfiles_b = [pysam.Samfile(x) for x in sam_filenames_b]\n coverage_a = count_coverage_multiple(samfiles_a, chr, start, stop, quality_threshold)\n coverage_b = count_coverage_multiple(samfiles_b, chr, start, stop, quality_threshold)\n res = find_differing_snps_from_vector(coverage_a, coverage_b, ll_threshold)\n res['positions'] += start\n res['chr'] = chr\n return res\n\n\ndef iter_chromosome_chunks(chromosome_lengths, chunk_size = 5e6):\n \"\"\"Cut all chromosomes into mangable chunks\"\"\"\n chunks = []\n chunk_size = int(chunk_size)\n for chr in chromosome_lengths:\n for start in xrange(0, chromosome_lengths[chr], chunk_size):\n chunks.append((chr, start, start + chunk_size))\n random.shuffle(chunks)\n return chunks\n\n\ndef _map_find_differing_snps(args):\n \"\"\"Adapter sinnce pool.map only passes one argument\"\"\"\n return find_differing_snps(*args)\n\n\ndef find_snps(\n bam_filenames_a,\n bam_filenames_b,\n chromosome_lengths,\n quality_threshold = 15,\n ll_threshold = 50, cores_to_use = 4,\n chunk_size = 5e6):\n\n p = multiprocessing.Pool(processes=cores_to_use)\n all_snps_found = p.map(_map_find_differing_snps,\n [[bam_filenames_a, bam_filenames_b, chr, start, stop, quality_threshold, ll_threshold] for (chr, start, stop) in iter_chromosome_chunks(chromosome_lengths, chunk_size)])\n res = {'chr': [], 'pos': [], 'score': [],\n 'A_A': [],\n 'A_C': [],\n 'A_G': [],\n 'A_T': [],\n 'B_A': [],\n 'B_C': [],\n 'B_G': [],\n 'B_T': [], \n 'haplotypeA': [],\n 'haplotypeB': [],\n }\n for found in all_snps_found:\n res['chr'].extend([found['chr']] * len(found['positions']))\n res['pos'].extend(found['positions'])\n res['score'].extend(found['scores'])\n res['A_A'].extend(found['coverageA'][0])\n res['A_C'].extend(found['coverageA'][1])\n res['A_G'].extend(found['coverageA'][2])\n res['A_T'].extend(found['coverageA'][3])\n res['B_A'].extend(found['coverageB'][0])\n res['B_C'].extend(found['coverageB'][1])\n res['B_G'].extend(found['coverageB'][2])\n res['B_T'].extend(found['coverageB'][3])\n res['haplotypeA'].extend([_marsnpdiff.llPosToHaplotype[x] for x in found['haplotypeA']])\n res['haplotypeB'].extend([_marsnpdiff.llPosToHaplotype[x] for x in found['haplotypeB']])\n p.close()\n p.join()\n return pandas.DataFrame(res)[['chr', 'pos', 'score', 'A_A', 'B_A', 'A_C', 'B_C',\n 'A_G', 'B_G', 'A_T', 'B_T', 'haplotypeA', 'haplotypeB']]\n\n\nif __name__ == '__main__':\n # TODO: implement command line interface\n print \"no command line interface has been implemented so far\"\n pass\n", "id": "7904037", "language": "Python", "matching_score": 1.8136876821517944, "max_stars_count": 0, "path": "__init__.py" }, { "content": "# and import pypipegraph as ppg\n# import time\n# from pathlib import Path\n# from .. import find_code_path\nfrom .externals import ExternalAlgorithm\nfrom .util import download_zip_and_turn_into_tar_gzip\nfrom pathlib import Path\nimport pypipegraph as ppg\n\n\nclass Snpdiffrs(ExternalAlgorithm):\n @property\n def name(self):\n return \"snpdiffrs\"\n\n def run_n_to_n(\n self,\n output_directory,\n sample_to_bams: dict,\n min_score=None,\n quality_threshold=None,\n filter_homo_polymer_threshold=None,\n chromosomes=None,\n ):\n output_directory = Path(output_directory)\n output_directory.mkdir(exist_ok=True, parents=True)\n for name, bams in sample_to_bams.items():\n if not isinstance(bams, list):\n raise ValueError(\"Sample-bams must be a list per sample\")\n\n def write_toml(output_filename):\n toml = [f\"output_dir = '{str(output_directory)}'\"]\n if min_score:\n toml.append(f\"min_score = {min_score:.2f}\")\n if quality_threshold:\n toml.append(f\"quality_threshold = {int(quality_threshold)}\")\n if filter_homo_polymer_threshold:\n toml.append(\n f\"filter_homo_polymer_threshold = {int(filter_homo_polymer_threshold)}\"\n )\n if chromosomes:\n toml.append(f\"chromosomes = {chromosomes}\")\n toml.append(\"[samples]\")\n for sample_name, bams in sample_to_bams.items():\n toml.append(f\"\\t{sample_name} = {[str(x) for x in bams]}\")\n Path(output_filename).write_text(\"\\n\".join(toml))\n\n prep_job = ppg.FileGeneratingJob(\n output_directory / \"input.toml\", write_toml\n ).depends_on(\n ppg.ParameterInvariant(\n output_directory / \"input.toml\",\n (\n sample_to_bams,\n min_score,\n quality_threshold,\n filter_homo_polymer_threshold,\n chromosomes,\n ),\n )\n )\n res = self.run(output_directory, [str(output_directory / \"input.toml\")])\n res.depends_on(prep_job)\n\n def build_cmd(self, output_directory, ncores, arguments): # pragma: no cover\n return [str(self.path / \"snpdiffrs\")] + arguments\n\n @property\n def multi_core(self): # pragma: no cover\n return True\n\n def get_latest_version(self):\n return \"0.1.1\"\n\n def fetch_version(self, version, target_filename): # pragma: no cover\n import tempfile\n from pathlib import Path\n import subprocess\n\n download_zip_and_turn_into_tar_gzip(\n \"https://github.com/TyberiusPrime/snpdiffrs/releases/download/%s/snpdiffrs_linux_%s.zip\"\n % (version, version),\n target_filename,\n [\"snpdiffrs\"],\n )\n", "id": "1133151", "language": "Python", "matching_score": 3.465609312057495, "max_stars_count": 0, "path": "src/mbf_externals/snpdiffrs.py" }, { "content": "from pathlib import Path\nimport time\nimport subprocess\nimport os\nimport stat\nfrom abc import ABC, abstractmethod\nimport pypipegraph as ppg\nfrom .util import lazy_property, sort_versions\n\n_global_store = None\n\n\ndef change_global_store(new_store):\n global _global_store\n _global_store = new_store\n\n\ndef get_global_store():\n return _global_store\n\n\nclass DownloadDiscrepancyException(ValueError):\n pass\n\n\ndef reproducible_tar(target_tar, folder, cwd):\n \"\"\"Create tars that look the same every time.\"\"\"\n # see http://h2.jaguarpaw.co.uk/posts/reproducible-tar/\n\n target_tar = str(target_tar)\n folder = str(folder)\n\n cmd = [\n \"tar\",\n \"--format=posix\",\n \"--pax-option=exthdr.name=%d/PaxHeaders/%f,delete=atime,delete=ctime,delete=mtime\",\n \"--mtime=1970-01-01 00:00:00Z\",\n \"--sort=name\",\n \"--numeric-owner\",\n \"--owner=0\",\n \"--group=0\",\n \"--mode=go+rwX,u+rwX\",\n \"-cvf\",\n target_tar,\n folder,\n ]\n subprocess.check_call(cmd, cwd=cwd)\n\n\nclass ExternalAlgorithm(ABC):\n \"\"\"Together with an ExternalAlgorithmStore (or the global one),\n ExternalAlgorithm encapsulates a callable algorithm such as a high throughput aligner.\n \"\"\"\n\n def __init__(self, version=\"_last_used\", store=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n version: str\n either one of the available versions from the store,\n _latest (always the latest!) or\n _last_used - the last used one, or the newes if this is the first time\n (stored '.mbf_external_versions' )\n\n \"\"\"\n super().__init__(**kwargs)\n if store is None:\n store = _global_store\n self.store = store\n\n if version == \"_last_used\":\n actual_version = self._last_used_version\n if actual_version is None:\n actual_version = \"_latest\"\n else:\n actual_version = version\n if actual_version == \"_latest\":\n self.version = self.get_latest_version()\n self._fetch_and_check(self.version)\n elif actual_version == \"_fetching\": # pragma: no cover\n self.version = \"_fetching\"\n else:\n if actual_version in store.get_available_versions(self.name):\n self.version = actual_version\n else:\n self._fetch_and_check(actual_version)\n self.version = actual_version\n self._store_used_version()\n self.path = store.get_unpacked_path(self.name, self.version)\n\n @lazy_property\n def _last_used_version(self):\n try:\n lines = Path(\".mbf_external_versions\").read_text().strip().split(\"\\n\")\n for l in lines:\n if l.strip():\n name, version = l.split(\"==\")\n if name == self.name:\n return version\n except OSError:\n pass\n return None\n\n def _store_used_version(self):\n last_used = self._last_used_version\n if (\n last_used is None\n or sort_versions([last_used, self.version])[0] == last_used\n ):\n try:\n p = Path(\".mbf_external_versions\")\n lines = p.read_text().strip().split(\"\\n\")\n lines = [x for x in lines if not x.startswith(self.name + \"==\")]\n except OSError:\n lines = []\n lines.append(f\"{self.name}=={self.version}\")\n p.write_text(\"\\n\".join(lines) + \"\\n\")\n\n @property\n @abstractmethod\n def name(self):\n pass # pragma: no cover\n\n @abstractmethod\n def build_cmd(self, output_directory, ncores, arguments):\n pass # pragma: no cover\n\n @property\n def multi_core(self):\n return False\n\n def run(\n self,\n output_directory,\n arguments=None,\n cwd=None,\n call_afterwards=None,\n additional_files_created=None,\n ):\n \"\"\"Return a job that runs the algorithm and puts the\n results in output_directory.\n Note that assigning different ouput_directories to different\n versions is your problem.\n \"\"\"\n output_directory = Path(output_directory)\n output_directory.mkdir(parents=True, exist_ok=True)\n sentinel = output_directory / \"sentinel.txt\"\n filenames = [sentinel]\n if additional_files_created:\n if isinstance(additional_files_created, (str, Path)):\n additional_files_created = [additional_files_created]\n filenames.extend(additional_files_created)\n\n job = ppg.MultiFileGeneratingJob(\n filenames,\n self.get_run_func(\n output_directory, arguments, cwd=cwd, call_afterwards=call_afterwards\n ),\n ).depends_on(\n ppg.FileChecksumInvariant(\n self.store.get_zip_file_path(self.name, self.version)\n ),\n ppg.FunctionInvariant(str(sentinel) + \"_call_afterwards\", call_afterwards),\n )\n job.ignore_code_changes()\n job.depends_on(\n ppg.FunctionInvariant(\n job.job_id + \"_build_cmd_func\", self.__class__.build_cmd\n )\n )\n if self.multi_core:\n job.cores_needed = -1\n return job\n\n def get_run_func(self, output_directory, arguments, cwd=None, call_afterwards=None):\n def do_run():\n self.store.unpack_version(self.name, self.version)\n sentinel = output_directory / \"sentinel.txt\"\n stdout = output_directory / \"stdout.txt\"\n stderr = output_directory / \"stderr.txt\"\n cmd_out = output_directory / \"cmd.txt\"\n\n op_stdout = open(stdout, \"wb\")\n op_stderr = open(stderr, \"wb\")\n cmd = [\n str(x)\n for x in self.build_cmd(\n output_directory,\n ppg.util.global_pipegraph.rc.cores_available\n if self.multi_core\n else 1,\n arguments,\n )\n ]\n cmd_out.write_text(\" \".join(cmd))\n start_time = time.time()\n print(\" \".join(cmd))\n p = subprocess.Popen(cmd, stdout=op_stdout, stderr=op_stderr, cwd=cwd)\n p.communicate()\n op_stdout.close()\n op_stderr.close()\n ok = self.check_success(\n p.returncode, stdout.read_bytes(), stderr.read_bytes()\n )\n if ok is True:\n runtime = time.time() - start_time\n sentinel.write_text(\n f\"run time: {runtime:.2f} seconds\\nreturn code: {p.returncode}\"\n )\n if call_afterwards is not None:\n call_afterwards()\n else:\n raise ValueError(\n f\"{self.name} run failed. Error was: {ok}. Cmd was: {cmd}\"\n )\n\n return do_run\n\n def check_success(self, return_code, stdout, stderr):\n if return_code == 0:\n return True\n else:\n return f\"Return code != 0: {return_code}\"\n\n def _fetch_and_check(self, version):\n if self.store.no_downloads:\n print(\"WARNING: Downloads disabled for this store\")\n return\n target_filename = self.store.get_zip_file_path(self.name, version).absolute()\n if target_filename.exists():\n return\n self.fetch_version(version, target_filename)\n try:\n checksum = ppg.util.checksum_file(target_filename)\n except OSError: # pragma: no cover\n raise ValueError(\"Algorithm did not download correctly\")\n md5_file = target_filename.with_name(target_filename.name + \".md5sum\")\n st = os.stat(target_filename)\n with open(md5_file, \"wb\") as op:\n op.write(checksum.encode(\"utf-8\"))\n os.utime(md5_file, (st[stat.ST_MTIME], st[stat.ST_MTIME]))\n self._check_hash_against_others(target_filename, checksum)\n\n def _check_hash_against_others(self, target_filename, checksum):\n \"\"\"See if another machine has downloaded the file and synced it's mbf_store.\n If so, look at it's hash. If it differs, throw an Exception\"\"\"\n search_path = self.store.zip_path.absolute().parent.parent.parent\n print(search_path)\n search_key = \"**/\" + self.store.zip_path.name + \"/\" + target_filename.name\n by_hash = {checksum: [target_filename]}\n for found in search_path.glob(search_key):\n print(\"found\", found)\n if found != target_filename:\n cs = ppg.util.checksum_file(found)\n if not cs in by_hash:\n by_hash[cs] = []\n by_hash[cs].append(found)\n if len(by_hash) > 1:\n import pprint\n\n pprint.pprint(by_hash)\n raise DownloadDiscrepancyException(\n f\"Found multiple different {target_filename.name} with different md5sum. Investitage and fix (possibly using reproducible_tar), please.\"\n )\n\n def fetch_version(self, version, target_filename): # pragma: no cover\n # overwrite this in the downstream algorithms\n raise NotImplementedError()\n pass\n\n\nclass ExternalAlgorithmStore:\n def __init__(self, zip_path, unpack_path, no_downloads=False):\n self.zip_path = Path(zip_path)\n self.unpack_path = Path(unpack_path)\n self.no_downloads = no_downloads\n self._version_cache = {}\n\n def get_available_versions(self, algorithm_name):\n if (\n not algorithm_name in self._version_cache\n or not self._version_cache[algorithm_name]\n ):\n glob = f\"{algorithm_name}__*.tar.gz\"\n matching = list(self.zip_path.glob(glob))\n versions = [x.stem[x.stem.find(\"__\") + 2 : -4] for x in matching]\n self._version_cache[algorithm_name] = sort_versions(versions)\n return self._version_cache[algorithm_name]\n\n def unpack_version(self, algorithm_name, version):\n if not version in self.get_available_versions(algorithm_name):\n raise ValueError(f\"No such version {algorithm_name} {version}\")\n target_path = self.get_unpacked_path(algorithm_name, version)\n sentinel = target_path / \"unpack_done.txt\"\n if sentinel.exists():\n return\n target_path.mkdir(parents=True, exist_ok=True)\n gzip_path = self.get_zip_file_path(algorithm_name, version)\n subprocess.check_call([\"tar\", \"-xf\", gzip_path], cwd=target_path)\n sentinel.write_text(\"Done\")\n\n def get_unpacked_path(self, algorithm_name, version):\n return self.unpack_path / algorithm_name / version\n\n def get_zip_file_path(self, algorithm_name, version):\n return self.zip_path / (algorithm_name + \"__\" + version + \".tar.gz\")\n", "id": "9429130", "language": "Python", "matching_score": 3.1130380630493164, "max_stars_count": 0, "path": "src/mbf_externals/externals.py" }, { "content": "import functools\nfrom pathlib import Path\n\n\nclass lazy_property(object):\n \"\"\"\n meant to be used for lazy evaluation of an object attribute.\n property should represent non-mutable data, as it replaces itself.\n \"\"\"\n\n def __init__(self, fget):\n self.fget = fget\n\n # copy the getter function's docstring and other attributes\n functools.update_wrapper(self, fget)\n\n def __get__(self, obj, cls):\n # if obj is None: # this was in the original recepie, but I don't see\n # when it would be called?\n # return self\n\n value = self.fget(obj)\n setattr(obj, self.fget.__name__, value)\n return value\n\n\ndef lazy_method(func):\n \"\"\"\n meant to be used for lazy evaluation of an object attribute.\n property should represent non-mutable data, as it replaces itself.\n \"\"\"\n cache_name = \"_cached_\" + func.__name__\n\n def inner(self):\n if not hasattr(self, cache_name):\n setattr(self, cache_name, func(self))\n return getattr(self, cache_name)\n\n return inner\n\n\ndef sort_versions(versions):\n \"\"\"Sort versions, from natsort manual:\n Sorts like this:\n ['1.1', '1.2', '1.2alpha', '1.2beta1', '1.2beta2', '1.2rc1', '1.2.1', '1.3']\n \"\"\"\n import natsort\n\n return natsort.natsorted(\n versions,\n key=lambda x: x.replace(\".\", \"~\")\n if not isinstance(x, tuple)\n else x[0].replace(\".\", \"~\"),\n )\n\n\nclass Version:\n def __init__(self, version):\n self.version = version\n\n def __str__(self):\n return self.version\n\n def __repr__(self):\n return 'Version(\"%s\")' % (self.version,)\n\n def __eq__(self, other_version):\n if isinstance(other_version, Version):\n other = other_version.version\n else:\n other = other_version\n return self.version == other\n\n def __lt__(self, other_version):\n if isinstance(other_version, Version):\n other = other_version.version\n else:\n other = other_version\n s = sort_versions([self.version, other])\n return s[0] == self.version and not self.version == other\n\n def __le__(self, other_version):\n return (self == other_version) or (self < other_version)\n\n def __gt__(self, other_version):\n if isinstance(other_version, Version):\n other = other_version.version\n else:\n other = other_version\n s = sort_versions([self.version, other])\n return s[1] == self.version and not self.version == other\n\n def __ge__(self, other_version):\n return (self == other_version) or (self > other_version)\n\n\nclass UpstreamChangedError(ValueError):\n pass\n\n\ndef download_file(url, file_object):\n \"\"\"Download an url\"\"\"\n if isinstance(file_object, (str, Path)):\n raise ValueError(\"download_file needs a file-object not a name\")\n\n try:\n if url.startswith(\"ftp\"):\n return download_ftp(url, file_object)\n else:\n return download_http(url, file_object)\n except Exception as e:\n raise ValueError(\"Could not download %s, exception: %s\" % (repr(url), e))\n\n\ndef download_http(url, file_object):\n \"\"\"Download a file from http\"\"\"\n import requests\n import shutil\n\n r = requests.get(url, stream=True)\n if r.status_code != 200:\n raise ValueError(\"HTTP Error return: %i fetching %s\" % (r.status_code, url))\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, file_object)\n\n\ndef download_ftp(url, file_object):\n \"\"\"Download a file from ftp\"\"\"\n import ftplib\n import urllib\n\n schema, host, path, parameters, query, fragment = urllib.parse.urlparse(url)\n with ftplib.FTP(host) as ftp:\n try:\n ftp.login(\"anonymous\", \"\")\n if \"\\n\" in path: # pragma: no cover\n raise ValueError(\"New line in path: %s\" % (repr(path),))\n if path.endswith(\"/\"):\n ftp.retrbinary(\"LIST \" + path, file_object.write)\n else:\n ftp.retrbinary(\"RETR \" + path, file_object.write)\n except ftplib.Error as e:\n raise ValueError(\"Error retrieving urls %s: %s\" % (url, e))\n\n\ndef get_page(url):\n \"\"\"Download a web page (http/ftp) into a string\"\"\"\n from io import BytesIO\n\n tf = BytesIO()\n download_file(url, tf)\n tf.seek(0, 0)\n return tf.read().decode(\"utf-8\")\n\n\ndef download_file_and_gunzip(url, unzipped_filename):\n import shutil\n import gzip\n import tempfile\n\n tf = tempfile.NamedTemporaryFile(suffix=\".gz\")\n download_file(url, tf)\n tf.flush()\n\n with gzip.GzipFile(tf.name, \"rb\") as gz_in:\n with open(unzipped_filename, \"wb\") as op:\n shutil.copyfileobj(gz_in, op)\n\n\ndef download_file_and_gzip(url, gzipped_filename):\n import shutil\n import gzip\n import tempfile\n\n gzipped_filename = str(gzipped_filename)\n if not gzipped_filename.endswith(\".gz\"): # pragma: no cover\n raise ValueError(\"output filename did not end with .gz\")\n\n with tempfile.NamedTemporaryFile(suffix=\"\") as tf:\n with gzip.GzipFile(tf.name, \"wb\") as gf:\n download_file(url, gf)\n shutil.copy(tf.name, gzipped_filename)\n\n\ndef write_md5_sum(filepath):\n \"\"\"Create filepath.md5sum with the md5 hexdigest\"\"\"\n from pypipegraph.util import checksum_file\n\n md5sum = checksum_file(filepath)\n (filepath.with_name(filepath.name + \".md5sum\")).write_text(md5sum)\n\n\ndef to_string(s, encoding=\"utf-8\"):\n if isinstance(s, str):\n return s\n else:\n return s.decode(encoding)\n\n\ndef to_bytes(x, encoding=\"utf-8\"):\n \"\"\" In python3: str -> bytes. Bytes stay bytes\"\"\"\n if isinstance(x, bytes):\n return x\n else:\n return x.encode(encoding)\n\n\ndef chmod(filename, mode):\n \"\"\"Chmod if possible - otherwise try to steal the file and chmod then\"\"\"\n import os\n import shutil\n\n try:\n os.chmod(filename, mode)\n except OSError as e: # pragma: no cover\n if (\n str(e).find(\"Operation not permitted\") == -1\n and str(e).find(\"Permission denied\") == -1\n ):\n raise\n else: # steal ownership and set the permissions...\n t = filename + \".temp\"\n shutil.copyfile(filename, t)\n try:\n os.chmod(t, mode)\n except OSError:\n pass\n os.unlink(filename)\n shutil.move(t, filename)\n\n\ndef download_zip_and_turn_into_tar_gzip(url, target_filename, chmod_x_files=[]):\n \"\"\"Download a zip archive and turn it into the correct tar.gzip\n \"\"\"\n import tempfile\n import subprocess\n from .externals import reproducible_tar\n\n if isinstance(chmod_x_files, str): # pragma: no cover\n chmod_x_files = [chmod_x_files]\n\n with tempfile.TemporaryDirectory() as tmpdir:\n tmpdir = Path(tmpdir)\n with (tmpdir / \"source.zip\").open(\"wb\") as zip_file:\n download_file(url, zip_file)\n import zipfile\n\n with zipfile.ZipFile(zip_file.name, \"r\") as zip_ref:\n zip_ref.extractall(tmpdir / \"target\")\n for fn in chmod_x_files:\n subprocess.check_call(\n [\"chmod\", \"+x\", str(tmpdir.absolute() / \"target\" / fn)]\n )\n reproducible_tar(target_filename.absolute(), \"./\", cwd=tmpdir / \"target\")\n\n\ndef download_mercurial_update_and_zip(url, changeset, target_filename):\n \"\"\"Download a mercurial repo, update it to a specific changeset, and tar it up\"\"\"\n import tempfile\n import subprocess\n from .externals import reproducible_tar\n\n with tempfile.TemporaryDirectory() as tmpdir:\n tmpdir = Path(tmpdir)\n subprocess.check_call([\"hg\", \"clone\", url, str(tmpdir.absolute())])\n subprocess.check_call([\"hg\", \"up\", \"-r\", changeset], cwd=tmpdir)\n reproducible_tar(target_filename.absolute(), \"./\", cwd=tmpdir)\n\n\ndef download_tar_bz2_and_turn_into_tar_gzip(url, target_filename, version, chmod_x_files=[], make=True):\n \"\"\"Download a tar.bz2 archive and turn it into the correct tar.gzip\n \"\"\"\n\n import tempfile\n import subprocess\n from .externals import reproducible_tar\n import tarfile\n\n if isinstance(chmod_x_files, str): # pragma: no cover\n chmod_x_files = [chmod_x_files]\n\n with tempfile.TemporaryDirectory() as tmpdir:\n tmpdir = Path(tmpdir)\n with (tmpdir / \"source.tar.bz2\").open(\"wb\") as archive_file:\n download_file(url, archive_file)\n\n with tarfile.open(archive_file.name, \"r\") as zip_ref:\n zip_ref.extractall(tmpdir / \"target\")\n for fn in chmod_x_files:\n subprocess.check_call(\n [\"chmod\", \"+x\", str(tmpdir.absolute() / \"target\" / fn)]\n )\n if make:\n import os\n wd = [x for x in (tmpdir.absolute() / \"target\").iterdir()][0]\n subprocess.check_call([\"make\"], cwd=wd)\n reproducible_tar(target_filename.absolute(), \"./\", cwd=tmpdir / \"target\")\n", "id": "8918121", "language": "Python", "matching_score": 2.480375289916992, "max_stars_count": 0, "path": "src/mbf_externals/util.py" }, { "content": "from mbf_fileformats.util import chunkify, open_file\n\n\ndef test_open_file():\n import gzip\n import tempfile\n import bz2\n\n tf = tempfile.TemporaryFile()\n assert open_file(tf) is tf\n\n tf2 = tempfile.NamedTemporaryFile(suffix=\".gz\", mode=\"w\")\n g = gzip.GzipFile(tf2.name, \"w\")\n g.write(b\"hello\")\n g.close()\n tf2.flush()\n assert open_file(tf2.name).read() == b\"hello\"\n\n tf3 = tempfile.NamedTemporaryFile(suffix=\".bz2\", mode=\"w\")\n b = bz2.BZ2File(tf3.name, \"w\")\n b.write(b\"world\")\n b.close()\n tf3.flush()\n assert open_file(tf3.name).read() == b\"world\"\n\n\ndef test_chunkify():\n import tempfile\n\n tf = tempfile.TemporaryFile(\"w+\")\n tf.write(\"hello world\")\n tf.flush()\n tf.seek(0, 0)\n c = list(chunkify(tf, \" \", 2))\n assert c == [\"hello\", \"world\"]\n", "id": "9166681", "language": "Python", "matching_score": 0.4659850001335144, "max_stars_count": 0, "path": "tests/test_util.py" }, { "content": "\"\"\"Tests for the various combinations you can evoke\nour cython based fastq reader with.\"\"\"\nimport os\nimport pytest\nimport pypipegraph as ppg\nfrom pathlib import Path\n\nfrom mbf_align import fastq2\nfrom mbf_sampledata import get_sample_data\n\n\[email protected](\"new_pipegraph\")\nclass TestFastqS:\n def test_straight_copy(self):\n test = b\"\"\"@SEQ_ID_1\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nTATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nAAAATGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n@SEQ_ID_2\nCATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC\n\"\"\"\n try:\n with open(\"test_straight_copy.fastq\", \"wb\") as op:\n op.write(test)\n x = fastq2.Straight()\n x.generate_aligner_input(\n \"test_straight_copy.out.fastq\", [\"test_straight_copy.fastq\"], False\n )\n with open(\"test_straight_copy.out.fastq\", \"rb\") as op:\n was = op.read()\n assert test == was\n\n finally:\n if os.path.exists(\"test_straight_copy.out.fastq\"):\n os.unlink(\"test_straight_copy.out.fastq\")\n\n def test_straight_reverse(self):\n test = b\"\"\"@SEQ_ID_1\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nTATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nAAAATGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n@SEQ_ID_2\nCATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCA\n\"\"\"\n should = b\"\"\"@SEQ_ID_1\nAAACTGTGAGTTGAACAAATGGATTTACTATTTGATCGATACTGCTTTGAACCCCAAATT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcA\n@SEQ_ID_2\nAAACTGTGAGTTGAACAAATGGATTTACTATTTGATCGATACTGCTTTGAACCCCAAATA\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nAAACTGTGAGTTGAACAAATGGATTTACTATTTGATCGATACTGCTTTGAACCCCATTTT\n+\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n@SEQ_ID_2\nAAACTGTGAGTTGAACAAATGGATTTACTATTTGATCGATACTGCTTTGAACCCCAAATG\n+\nACCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC\n\"\"\"\n try:\n with open(\"test_straight_reverse.fastq\", \"wb\") as op:\n op.write(test)\n x = fastq2.Straight()\n x.generate_aligner_input(\n \"test_straight_reverse.out.fastq\", [\"test_straight_reverse.fastq\"], True\n )\n with open(\"test_straight_reverse.out.fastq\", \"rb\") as op:\n was = op.read()\n assert should == was\n\n finally:\n if os.path.exists(\"test_straight_reverse.out.fastq\"):\n os.unlink(\"test_straight_reverse.out.fastq\")\n\n def test_filter(self):\n test = b\"\"\"@SEQ_ID_1\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nTATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_3\nAAAATGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n@SEQ_ID_4\nCATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCA\n\"\"\"\n should = b\"\"\"@SEQ_ID_1\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_3\nAAAATGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n\"\"\"\n\n try:\n with open(\"test_filter.fastq\", \"wb\") as op:\n op.write(test)\n\n def f(seq, qual, name):\n print(seq, qual, name)\n result = seq.startswith(b\"A\")\n return result\n\n x = fastq2.Filtered(f)\n x.generate_aligner_input(\n \"test_filter.out.fastq\", [\"test_filter.fastq\"], False\n )\n with open(\"test_filter.out.fastq\", \"rb\") as op:\n was = op.read()\n assert should == was\n\n finally:\n if os.path.exists(\"test_filter.out.fastq\"):\n os.unlink(\"test_filter.out.fastq\")\n\n def test_quality_filter_bool(self):\n test = b\"\"\"@SEQ_ID_1\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nTATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_3\nAAAATGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n@SEQ_ID_4\nCATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCA\n\"\"\"\n should = b\"\"\"@SEQ_ID_1\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_3\nAAAATGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n\"\"\"\n\n try:\n with open(\"test_quality_filter_bool.fastq\", \"wb\") as op:\n op.write(test)\n\n def f(qual, seq):\n return seq.startswith(b\"A\")\n\n x = fastq2.QualityFilter(f)\n x.generate_aligner_input(\n \"test_quality_filter_bool.out.fastq\",\n [\"test_quality_filter_bool.fastq\"],\n False,\n )\n with open(\"test_quality_filter_bool.out.fastq\", \"rb\") as op:\n was = op.read()\n assert should == was\n\n finally:\n if os.path.exists(\"test_quality_filter_bool.out.fastq\"):\n os.unlink(\"test_quality_filter_bool.out.fastq\")\n\n def test_quality_filter_positive_integer(self):\n test = b\"\"\"@SEQ_ID_1\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nTATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_3\nAAAATGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n@SEQ_ID_4\nCATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCA\n\"\"\"\n should = b\"\"\"@SEQ_ID_1\nAATTT\n+\nAcAAA\n@SEQ_ID_3\nAAAAT\n+\nBBBBB\n\"\"\"\n\n try:\n with open(\"test_quality_filter_positive_integer.fastq\", \"wb\") as op:\n op.write(test)\n\n def f(qual, seq):\n if seq.startswith(b\"A\"): # keep the first 5 bases\n return 5\n else:\n return False\n\n x = fastq2.QualityFilter(f)\n x.generate_aligner_input(\n \"test_quality_filter_positive_integer.out.fastq\",\n [\"test_quality_filter_positive_integer.fastq\"],\n False,\n )\n with open(\"test_quality_filter_positive_integer.out.fastq\", \"rb\") as op:\n was = op.read()\n assert should == was\n\n finally:\n if os.path.exists(\"test_quality_filter_positive_integer.out.fastq\"):\n os.unlink(\"test_quality_filter_positive_integer.out.fastq\")\n\n def test_quality_filter_bool_cut_3(self):\n test = b\"\"\"@SEQ_ID_1\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nTATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_3\nAAAATGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n@SEQ_ID_4\nCATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCA\n\"\"\"\n should = b\"\"\"@SEQ_ID_1\nAGTTT\n+\nAAAAA\n@SEQ_ID_3\nAGTTT\n+\nBBBBB\n\"\"\"\n\n try:\n with open(\"test_quality_filter_bool_cut_3.fastq\", \"wb\") as op:\n op.write(test)\n\n def f(qual, seq):\n if seq.startswith(b\"A\"):\n return -5 # keep the last |-5| bases\n else:\n return False\n\n x = fastq2.QualityFilter(f)\n x.generate_aligner_input(\n \"test_quality_filter_bool_cut_3.out.fastq\",\n [\"test_quality_filter_bool_cut_3.fastq\"],\n False,\n )\n with open(\"test_quality_filter_bool_cut_3.out.fastq\", \"rb\") as op:\n was = op.read()\n assert should == was\n\n finally:\n if os.path.exists(\"test_quality_filter_bool_cut_3.out.fastq\"):\n os.unlink(\"test_quality_filter_bool_cut_3.out.fastq\")\n\n def test_quality_filter_tuple(self):\n test = b\"\"\"@SEQ_ID_1\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAcAAAAghAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nTATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_3\nAAAATGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBcBBBBB\n@SEQ_ID_4\nCATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCA\n\"\"\"\n should = b\"\"\"@SEQ_ID_1\nGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCAC\n+\nghAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_3\nGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCAC\n+\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBc\n\"\"\"\n\n try:\n with open(\"test_quality_filter_tuple.fastq\", \"wb\") as op:\n op.write(test)\n\n def f(qual, seq):\n if seq.startswith(b\"A\"):\n return (6, -5)\n else:\n return False\n\n x = fastq2.QualityFilter(f)\n x.generate_aligner_input(\n \"test_quality_filter_tuple.out.fastq\",\n [\"test_quality_filter_tuple.fastq\"],\n False,\n )\n with open(\"test_quality_filter_tuple.out.fastq\", \"rb\") as op:\n was = op.read()\n assert should == was\n\n finally:\n if os.path.exists(\"test_quality_filter_tuple.out.fastq\"):\n os.unlink(\"test_quality_filter_tuple.out.fastq\")\n\n def test_cutadapt(self):\n test = b\"\"\"@SEQ_ID_1\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAcAAAAghAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nTACCCGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n\"\"\"\n should = b\"\"\"@SEQ_ID_1\nTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n\"\"\"\n\n try:\n with open(\"test_cutadapt.fastq\", \"wb\") as op:\n op.write(test)\n\n x = fastq2.CutAdapt(\"AATTTGGGG\", None, False, maximal_error_rate=1)\n x.generate_aligner_input(\n \"test_cutadapt.out.fastq\", [\"test_cutadapt.fastq\"], False\n )\n with open(\"test_cutadapt.out.fastq\", \"rb\") as op:\n was = op.read()\n print(\"should\")\n print(should)\n print(\"was\")\n print(was)\n assert should == was\n\n finally:\n if os.path.exists(\"test_cutadapt.out.fastq\"):\n os.unlink(\"test_cutadapt.out.fastq\")\n\n def test_cutadapt_both_ends(self):\n test = b\"\"\"@SEQ_ID_1\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAcAAAAghAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nTACCCGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n\"\"\"\n should = b\"\"\"@SEQ_ID_1\nTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAAC\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n\"\"\"\n\n try:\n with open(\"test_cutadapt.fastq\", \"wb\") as op:\n op.write(test)\n\n x = fastq2.CutAdapt(\"AATTTGGGG\", \"TCAC\", False, maximal_error_rate=1)\n x.generate_aligner_input(\n \"test_cutadapt.out.fastq\", [\"test_cutadapt.fastq\"], False\n )\n with open(\"test_cutadapt.out.fastq\", \"rb\") as op:\n was = op.read()\n print(\"should\")\n print(should)\n print(\"was\")\n print(was)\n assert should == was\n\n finally:\n if os.path.exists(\"test_cutadapt.out.fastq\"):\n os.unlink(\"test_cutadapt.out.fastq\")\n\n def test_cutadapt_both_ends_keep(self):\n test = b\"\"\"@SEQ_ID_1\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAcAAAAghAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACAAAAAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n\"\"\"\n should = b\"\"\"@SEQ_ID_1\nTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAAC\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACAAAAAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n\"\"\"\n\n try:\n with open(\"test_cutadapt.fastq\", \"wb\") as op:\n op.write(test)\n\n x = fastq2.CutAdapt(\"AATTTGGGG\", \"TCACAGTTT\", True, maximal_error_rate=1)\n x.generate_aligner_input(\n \"test_cutadapt.out.fastq\", [\"test_cutadapt.fastq\"], False\n )\n with open(\"test_cutadapt.out.fastq\", \"rb\") as op:\n was = op.read()\n print(\"should\")\n print(should)\n print(\"was\")\n print(was)\n assert should == was\n\n finally:\n if os.path.exists(\"test_cutadapt.out.fastq\"):\n os.unlink(\"test_cutadapt.out.fastq\")\n\n def test_cutadapt_both_ends_no_keep(self):\n test = b\"\"\"@SEQ_ID_1\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAcAAAAghAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_3\nAATTTGGGGAAAATCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACAAAAAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n\"\"\"\n should = b\"\"\"@SEQ_ID_1\nTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAAC\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n\"\"\"\n\n try:\n with open(\"test_cutadapt.fastq\", \"wb\") as op:\n op.write(test)\n\n x = fastq2.CutAdapt(\"AATTTGGGG\", \"TCACAGTTT\", False, maximal_error_rate=1)\n x.generate_aligner_input(\n \"test_cutadapt.out.fastq\", [\"test_cutadapt.fastq\"], False\n )\n with open(\"test_cutadapt.out.fastq\", \"rb\") as op:\n was = op.read()\n print(\"should\")\n print(should)\n print(\"was\")\n print(was)\n assert should == was\n\n finally:\n if os.path.exists(\"test_cutadapt.out.fastq\"):\n os.unlink(\"test_cutadapt.out.fastq\")\n\n def test_cutadapt_only_end(self):\n test = b\"\"\"@SEQ_ID_1\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAcAAAAghAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nTACCCGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n\"\"\"\n should = b\"\"\"@SEQ_ID_1\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAAC\n+\nAcAAAAghAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nTACCCGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAAC\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n\"\"\"\n\n try:\n with open(\"test_cutadapt.fastq\", \"wb\") as op:\n op.write(test)\n\n x = fastq2.CutAdapt(None, \"TCAC\", False, maximal_error_rate=1)\n x.generate_aligner_input(\n \"test_cutadapt.out.fastq\", [\"test_cutadapt.fastq\"], False\n )\n with open(\"test_cutadapt.out.fastq\", \"rb\") as op:\n was = op.read()\n print(\"should\")\n print(should)\n print(\"was\")\n print(was)\n assert should == was\n\n finally:\n if os.path.exists(\"test_cutadapt.out.fastq\"):\n os.unlink(\"test_cutadapt.out.fastq\")\n\n def test_cutadapt_mismatch_straight_keep(self):\n test = b\"\"\"@SEQ_ID_1\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAcAAAAghAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nTACCCGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n\"\"\"\n should = b\"\"\"@SEQ_ID_1\nTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAG\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nTACCCGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAG\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n\"\"\"\n\n try:\n with open(\"test_cutadapt.fastq\", \"wb\") as op:\n op.write(test)\n\n x = fastq2.CutAdapt(\"AATTTGGGT\", -3, True, maximal_error_rate=1)\n x.generate_aligner_input(\n \"test_cutadapt.out.fastq\", [\"test_cutadapt.fastq\"], False\n )\n with open(\"test_cutadapt.out.fastq\", \"rb\") as op:\n was = op.read()\n print(\"should\")\n print(should)\n print(\"was\")\n print(was)\n assert should == was\n\n finally:\n if os.path.exists(\"test_cutadapt.out.fastq\"):\n os.unlink(\"test_cutadapt.out.fastq\")\n\n\ndef test_filtered_paired(new_pipegraph):\n import gzip\n\n r1_name_found = [False]\n r1_qual_found = [False]\n r2_name_found = [False]\n r2_qual_found = [False]\n\n def f(seq1, qual1, name1, seq2, qual2, name2):\n if name1 == b\"HWI-C00113:209:HJCNTBCX2:2:2206:9418:13942 1:N:0:ATCACG\":\n r1_name_found[0] = True\n if qual1 == b\"DDDDDIIIIIIIIIIIIIIIIIIIIII/<FHHIII<<CHHGHHIHHIIIIH\":\n r1_qual_found[0] = True\n if name2 == b\"HWI-C00113:209:HJCNTBCX2:2:2206:10802:17968 1:N:0:ATCACG\":\n r2_name_found[0] = True\n if qual2 == b\"DDDDDIIIIIIIIIIIIIIII1<FHHI/<GHIIII/<EHIIHII/<DHIID\":\n r2_qual_found[0] = True\n return seq1.startswith(b\"G\") and seq2.startswith(b\"G\")\n\n x = fastq2.Paired_Filtered(f)\n of1 = \"output_R1.fastq\"\n of2 = \"output_R2.fastq\"\n tf1 = open(\"input_R1_.fastq\", \"wb\")\n tf2 = open(\"input_R2_.fastq\", \"wb\")\n with gzip.GzipFile(\n get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R1_.fastq.gz\"), \"rb\"\n ) as op_in:\n tf1.write(op_in.read())\n tf1.flush()\n with gzip.GzipFile(\n get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R2_.fastq.gz\"), \"rb\"\n ) as op_in:\n tf2.write(op_in.read())\n tf2.flush()\n\n x.generate_aligner_input_paired(of1, of2, [(tf1.name, tf2.name)], False)\n assert r1_name_found[0]\n assert r2_name_found[0]\n assert r1_qual_found[0]\n assert r2_qual_found[0]\n actual1 = Path(of1).read_text()\n actual2 = Path(of2).read_text()\n assert actual1.count(\"\\n\") == 4\n assert \"@HWI-C00113:209:HJCNTBCX2:2:2206:9559:13855 1:N:0:ATCACG\" in actual1\n assert \"GCCCAATGTTCGAAATTGCTATTCTACGACAAGGTGCCAGATCTCATCTGA\" in actual1\n assert actual2.count(\"\\n\") == 4\n assert \"@HWI-C00113:209:HJCNTBCX2:2:2206:11052:17798 1:N:0:ATCACG\" in actual2\n assert \"GTCGGTCCTGAGAGATGGGCGGGCGCCGTTCCGAAAGTACGGGCGATGGCC\" in actual2\n\n\ndef test_filtered_depends_on_function_invariant(new_pipegraph):\n def f(seq1, qual1, name1, seq2, qual2, name2):\n return True\n\n x = fastq2.Paired_Filtered(f)\n deps = x.get_dependencies([\"test_R1_.fastq\", \"test_R2_.fastq\"])\n assert isinstance(deps[0], ppg.FunctionInvariant)\n assert \"test_R1_.fastq\" in deps[0].job_id\n assert deps[0].function is f\n\n\ndef test_filtered_paired_depends_on_function_invariant(new_pipegraph):\n def f(name, seq, qual):\n return True\n\n x = fastq2.Filtered(f)\n deps = x.get_dependencies(\"test.fastq\")\n assert isinstance(deps[0], ppg.FunctionInvariant)\n assert \"test.fastq\" in deps[0].job_id\n assert deps[0].function is f\n\n\ndef test_quality_filter_depends_on_function_invariant(new_pipegraph):\n def f(qual, seq):\n return True\n\n x = fastq2.QualityFilter(f)\n deps = x.get_dependencies(\"test.fastq\")\n assert isinstance(deps[0], ppg.FunctionInvariant)\n assert deps[0].function is f\n\n\ndef test_read_creator_must_be_fastq_right_now(new_pipegraph):\n with pytest.raises(ValueError):\n fastq2.Straight().generate_aligner_input(\n \"test.fastq\",\n [str(get_sample_data(Path(\"mbf_align/sample_a)/a.fastq\")))],\n False,\n \"fail\",\n )\n with pytest.raises(ValueError):\n fastq2.Filtered(lambda seq, qual, name: True).generate_aligner_input(\n \"test.fastq\",\n [str(get_sample_data(Path(\"mbf_align/sample_a)/a.fastq\")))],\n False,\n \"fail\",\n )\n with pytest.raises(ValueError):\n fastq2.QualityFilter(lambda qual, seq: True).generate_aligner_input(\n \"test.fastq\",\n [str(get_sample_data(Path(\"mbf_align/sample_a)/a.fastq\")))],\n False,\n \"fail\",\n )\n\n\ndef test_quality_raises_on_0_return(new_pipegraph):\n with pytest.raises(ValueError):\n fastq2.QualityFilter(lambda qual, seq: 0).generate_aligner_input(\n \"test.fastq\",\n [str(get_sample_data(Path(\"mbf_align/sample_a/a.fastq\")))],\n False,\n )\n\n\ndef test_cutadapt_three_prime_deprecated():\n with pytest.raises(DeprecationWarning):\n fastq2.CutAdaptThreePrime()\n\n\ndef test_cutadapt_raises_on_negative_adapter_sequences():\n with pytest.raises(ValueError):\n fastq2.CutAdapt(-1, 5, True)\n with pytest.raises(ValueError):\n fastq2.CutAdapt(1, -5, True)\n\n\ndef test_umi_extract(new_pipegraph):\n test = b\"\"\"@SEQ_ID_1 123\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nTATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nAAAATGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n@SEQ_ID_2\nCATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC\n\"\"\"\n should = b\"\"\"@SEQ_ID_1_AAT 123\nTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2_TAT\nTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2_AAA\nATGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n@SEQ_ID_2_CAT\nTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC\n\"\"\"\n\n try:\n with open(\"test_straight_copy.fastq\", \"wb\") as op:\n op.write(test)\n x = fastq2.UMIExtract(3)\n assert len(x.get_dependencies(\"test_straight_copy.out.fastq\")) == 1\n x.generate_aligner_input(\n \"test_straight_copy.out.fastq\", [\"test_straight_copy.fastq\"], False\n )\n with open(\"test_straight_copy.out.fastq\", \"rb\") as op:\n was = op.read()\n assert should == was\n\n finally:\n if os.path.exists(\"test_straight_copy.out.fastq\"):\n os.unlink(\"test_straight_copy.out.fastq\")\n\n\ndef test_quantseq_fwd(new_pipegraph):\n test = b\"\"\"@SEQ_ID_1\nAATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT\n@SEQ_ID_2 1:N:0:7\nTATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2\nAAAATGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n@SEQ_ID_2\nCATTTGGGGTTCAAAGCAGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTTT\n+\nCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC\n\"\"\"\n should = b\"\"\"@SEQ_ID_1_AATTTG\nGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTT\n+\nGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACG\n@SEQ_ID_2_TATTTG 1:N:0:7\nGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTT\n+\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n@SEQ_ID_2_AAAATG\nGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTT\n+\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\n@SEQ_ID_2_CATTTG\nGTATCGATCAAATAGTAAATCCATTTGTTCAACTCACAGTT\n+\nCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC\n\"\"\"\n\n try:\n with open(\"test_straight_copy.fastq\", \"wb\") as op:\n op.write(test)\n x = fastq2.QuantSeqFWD()\n assert len(x.get_dependencies(\"test_straight_copy.out.fastq\")) == 1\n x.generate_aligner_input(\n \"test_straight_copy.out.fastq\", [\"test_straight_copy.fastq\"], False\n )\n with open(\"test_straight_copy.out.fastq\", \"rb\") as op:\n was = op.read()\n assert should == was\n\n finally:\n if os.path.exists(\"test_straight_copy.out.fastq\"):\n os.unlink(\"test_straight_copy.out.fastq\")\n", "id": "3079994", "language": "Python", "matching_score": 3.764543056488037, "max_stars_count": 0, "path": "tests/test_fastq2.py" }, { "content": "import pytest\nimport gzip\nfrom pathlib import Path\nfrom pypipegraph import FileGeneratingJob, MultiFileGeneratingJob\nimport requests_mock\nimport pypipegraph as ppg\n\nfrom mbf_align import (\n FASTQsFromFile,\n FASTQsFromFiles,\n FASTQsFromFolder,\n FASTQsFromJob,\n FASTQsFromURLs,\n FASTQsFromAccession,\n FASTQsFromPrefix,\n build_fastq_strategy,\n FASTQsFromMRNAs,\n FASTQsJoin\n)\nfrom mbf_align import Sample\nfrom mbf_align import PairingError\nfrom mbf_align import fastq2\nfrom mbf_align._common import read_fastq_iterator\nfrom mbf_sampledata import get_sample_data\nimport attr\n\n\ndef test_FASTQsFromFile():\n fn = Path(\n get_sample_data(Path(\"mbf_align/sample_a\") / \"..\" / \"sample_a\" / \"a.fastq\")\n )\n o = FASTQsFromFile(fn)\n assert o() == [(fn.resolve(),)]\n\n\ndef test_FASTQsFromFileRaisesOnMissing():\n fn = get_sample_data(Path(\"mbf_align/sample_a\") / \"a.fastq.nosuchfile\")\n with pytest.raises(IOError):\n FASTQsFromFile(fn)\n\n\ndef test_FASTQsFromFilePaired():\n fn = Path(get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R1_.fastq.gz\"))\n fn2 = Path(\n get_sample_data(\n Path(\"mbf_align/sample_b\") / \"..\" / \"sample_b\" / \"a_R2_.fastq.gz\"\n )\n )\n o = FASTQsFromFile(fn, fn2)\n assert o() == [(fn.resolve(), fn2.resolve())]\n\n\ndef test_FASTQsFromFilePairedMissingR2():\n fn = get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R1_.fastq.gz\")\n fn2 = get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R2_.fastq.gz.nosuchfile\")\n with pytest.raises(IOError):\n FASTQsFromFile(fn, fn2)\n\n\ndef test_FASTQsFromFilesPaired():\n fn = Path(get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R1_.fastq.gz\"))\n fn2 = Path(\n get_sample_data(\n Path(\"mbf_align/sample_b\") / \"..\" / \"sample_b\" / \"a_R2_.fastq.gz\"\n )\n )\n o = FASTQsFromFiles([fn, fn2])\n assert o() == [(fn.resolve(), fn2.resolve())]\n\n\ndef test_FASTQsFromFilesPaired_build_strategy():\n fn = Path(get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R1_.fastq.gz\"))\n fn2 = Path(\n get_sample_data(\n Path(\"mbf_align/sample_b\") / \"..\" / \"sample_b\" / \"a_R2_.fastq.gz\"\n )\n )\n o = build_fastq_strategy([fn, fn2])\n assert o() == [(fn.resolve(), fn2.resolve())]\n\n\ndef test_FASTQsFromFolder():\n folder = Path(get_sample_data(Path(\"mbf_align/sample_a\")))\n o = FASTQsFromFolder(folder)\n import pprint\n\n pprint.pprint(o())\n assert o() == [\n ((folder / \"a.fastq\").resolve(),),\n ((folder / \"b.fastq.gz\").resolve(),),\n ]\n\n\ndef test_fastqs_join():\n fn = Path(get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R1_.fastq.gz\"))\n fn2 = Path(\n get_sample_data(\n Path(\"mbf_align/sample_b\") / \"..\" / \"sample_b\" / \"a_R2_.fastq.gz\"\n )\n )\n a = FASTQsFromFiles([fn, fn2])\n b = FASTQsFromFile(fn)\n c = FASTQsFromFile(fn2)\n d = FASTQsJoin([a, b, c])\n o = d()\n assert o == [(fn.resolve(), fn2.resolve()), (fn.resolve(),), (fn2.resolve(),)]\n\n\ndef test_FASTQsFromFolder_raises_on_non_existing():\n with pytest.raises(IOError):\n FASTQsFromFolder(\"shu\")\n\n\ndef test_FASTQsFromFolder_raises_on_no_fastqs():\n with pytest.raises(ValueError):\n FASTQsFromFolder(get_sample_data(Path(\"mbf_align/sample_f\")))\n\n\ndef test_FASTQsFromFolder_raises_on_not_a_folder():\n with pytest.raises(ValueError):\n FASTQsFromFolder(get_sample_data(Path(\"mbf_align/sample_a\") / \"a.fastq\"))\n\n\ndef test_FASTQsFromFolderPaired():\n folder = Path(get_sample_data(Path(\"mbf_align/sample_b\")))\n o = FASTQsFromFolder(folder)\n assert o() == [\n ((folder / \"a_R1_.fastq.gz\").resolve(), (folder / \"a_R2_.fastq.gz\").resolve())\n ]\n\n\ndef test_FASTQsFromFolderR2_but_missing_any_r1():\n folder = get_sample_data(Path(\"mbf_align/sample_c\"))\n o = FASTQsFromFolder(folder)\n with pytest.raises(ValueError):\n o()\n\n\ndef test_FASTQsFromFolder_pairing_files_fails():\n folder = get_sample_data(Path(\"mbf_align/sample_d\"))\n o = FASTQsFromFolder(folder)\n with pytest.raises(ValueError):\n o()\n\n\ndef test_FASTQsFromFolder_pairing_files_fails2():\n folder = get_sample_data(Path(\"mbf_align/sample_e\"))\n o = FASTQsFromFolder(folder)\n with pytest.raises(ValueError):\n o()\n\n\ndef test_FASTQsFromPrefix():\n fn1 = Path(get_sample_data(Path(\"mbf_align/sample_d\") / \"a_R1_.fastq.gz\"))\n fn2 = Path(get_sample_data(Path(\"mbf_align/sample_d\") / \"a_R2_.fastq.gz\"))\n fn_prefix = Path(get_sample_data(Path(\"mbf_align/sample_d\") / \"a\"))\n o = FASTQsFromPrefix(fn_prefix)\n str(o)\n assert o() == [(fn1.resolve(), fn2.resolve())]\n\n\ndef test_FASTQsFromPrefix_raises_on_non_existant():\n fn_prefix = Path(\"/shu/sha\")\n with pytest.raises(IOError):\n FASTQsFromPrefix(fn_prefix)\n\n\ndef test_FASTQsFromPrefix_raises_on_non_found():\n fn_prefix = Path(get_sample_data(Path(\"mbf_align/sample_d\") / \"x\"))\n with pytest.raises(ValueError):\n FASTQsFromPrefix(fn_prefix)\n\n\[email protected](\"new_pipegraph_no_qc\")\nclass TestSamples:\n def test_FASTQsFromJob(self):\n job = FileGeneratingJob(\"test.fastq.gz\", lambda of: None)\n o = FASTQsFromJob(job)\n assert o() == [(Path(\"test.fastq.gz\").resolve(),)]\n\n def test_FASTQsFromJob_R1_ok(self):\n job = FileGeneratingJob(\"test_R1_.fastq.gz\", lambda of: None)\n o = FASTQsFromJob(job)\n assert o() == [(Path(\"test_R1_.fastq.gz\").resolve(),)]\n\n def test_FASTQsFromJob_Multiple_Unpaired(self):\n job = MultiFileGeneratingJob(\n [\"test.fastq.gz\", \"test2.fastq.gz\"], lambda of: None\n )\n o = FASTQsFromJob(job)\n assert o() == [\n (Path(\"test.fastq.gz\").resolve(),),\n (Path(\"test2.fastq.gz\").resolve(),),\n ]\n\n def test_FASTQsFromJob_Multiple_Unpaired_R1(self):\n job = MultiFileGeneratingJob(\n [\"test_R1_.fastq.gz\", \"test2_R1_.fastq.gz\"], lambda of: None\n )\n o = FASTQsFromJob(job)\n assert o() == [\n # 2 sorts before _\n (Path(\"test2_R1_.fastq.gz\").resolve(),),\n (Path(\"test_R1_.fastq.gz\").resolve(),),\n ]\n\n def test_FASTQsFromJob_Multiple_Paired(self):\n job = MultiFileGeneratingJob(\n [\n \"test_R1_.fastq.gz\",\n \"test2_R1_.fastq.gz\",\n \"test_R2_.fastq.gz\",\n \"test2_R2_.fastq.gz\",\n ],\n lambda of: None,\n )\n o = FASTQsFromJob(job)\n assert set(o()) == set(\n [\n # 2 sorts before _\n (\n Path(\"test2_R1_.fastq.gz\").resolve(),\n Path(\"test2_R2_.fastq.gz\").resolve(),\n ),\n (\n Path(\"test_R1_.fastq.gz\").resolve(),\n Path(\"test_R2_.fastq.gz\").resolve(),\n ),\n ]\n )\n\n def test_build_fastq_strategy(self):\n # single filename\n assert build_fastq_strategy(\n get_sample_data(\n Path(\"mbf_align/sample_b\") / \"..\" / \"sample_b\" / \"a_R1_.fastq.gz\"\n )\n )() == [\n (\n (\n Path(get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R1_.fastq.gz\"))\n ).resolve(),\n )\n ]\n assert build_fastq_strategy(\n str(\n get_sample_data(\n Path(\"mbf_align/sample_b\") / \"..\" / \"sample_b\" / \"a_R1_.fastq.gz\"\n )\n )\n )() == [\n (\n (\n Path(get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R1_.fastq.gz\"))\n ).resolve(),\n )\n ]\n # multiple files - end up being paired!\n assert build_fastq_strategy(\n [\n get_sample_data(\n Path(\"mbf_align/sample_b\") / \"..\" / \"sample_b\" / \"a_R1_.fastq.gz\"\n ),\n get_sample_data(\n Path(\"mbf_align/sample_b\") / \"..\" / \"sample_b\" / \"a_R2_.fastq.gz\"\n ),\n ]\n )() == [\n (\n Path(\n get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R1_.fastq.gz\")\n ).resolve(),\n Path(\n get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R2_.fastq.gz\")\n ).resolve(),\n )\n ]\n # folder\n assert build_fastq_strategy(\n get_sample_data(Path(\"mbf_align/sample_b\") / \"..\" / \"sample_b\")\n )() == [\n (\n Path(\n get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R1_.fastq.gz\")\n ).resolve(),\n Path(\n get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R2_.fastq.gz\")\n ).resolve(),\n )\n ]\n # job\n assert isinstance(\n build_fastq_strategy(FileGeneratingJob(\"test.fastq\", lambda of: None)),\n FASTQsFromJob,\n )\n\n # pass through\n fn = get_sample_data(Path(\"mbf_align/sample_a\") / \"..\" / \"sample_a\" / \"a.fastq\")\n o = FASTQsFromFile(fn)\n assert build_fastq_strategy(o) is o\n\n with pytest.raises(ValueError):\n build_fastq_strategy(55)\n\n def test_lane(self):\n\n lane = Sample(\n \"Sample_a\", get_sample_data(Path(\"mbf_align/sample_a\")), False, vid=\"VA000\"\n )\n assert lane.vid == \"VA000\"\n temp_job = lane.prepare_input()\n real_job = lane.save_input()\n ppg.run_pipegraph()\n assert not Path(temp_job.filenames[0]).exists()\n assert Path(real_job.filenames[0]).exists()\n with gzip.GzipFile(real_job.filenames[0], \"r\") as op:\n lines = op.readlines()\n assert len(lines) == 20 + 20\n\n def test_paired_modes(self):\n with pytest.raises(PairingError):\n lane = Sample(\n \"Sample_a\",\n get_sample_data(Path(\"mbf_align/sample_b\")),\n False,\n vid=\"VA000\",\n )\n lane.prepare_input()\n\n def test_lane_paired_straight(self):\n\n lane = Sample(\n \"Sample_a\",\n get_sample_data(Path(\"mbf_align/sample_b\")),\n False,\n vid=\"VA000\",\n pairing=\"paired\",\n )\n assert lane.vid == \"VA000\"\n temp_job = lane.prepare_input()\n real_job = lane.save_input()\n ppg.run_pipegraph()\n assert not Path(temp_job.filenames[0]).exists()\n assert not Path(temp_job.filenames[1]).exists()\n assert Path(real_job.filenames[0]).exists()\n assert Path(real_job.filenames[1]).exists()\n assert \"_R1_\" in real_job.filenames[0]\n assert \"_R2_\" in real_job.filenames[1]\n assert \".fastq.gz\" in real_job.filenames[0]\n assert \".fastq.gz\" in real_job.filenames[1]\n\n for input_fn, output_fn in zip(\n [\n (get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R1_.fastq.gz\")),\n (get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R2_.fastq.gz\")),\n ],\n real_job.filenames,\n ):\n with gzip.GzipFile(output_fn, \"r\") as op:\n actual = op.read()\n with gzip.GzipFile(input_fn, \"r\") as op:\n should = op.read()\n assert actual == should\n\n def test_lane_paired_filtered(self):\n\n lane = Sample(\n \"Sample_a\",\n get_sample_data(Path(\"mbf_align/sample_b\")),\n False,\n vid=\"VA000\",\n pairing=\"paired\",\n fastq_processor=fastq2.Paired_Filtered(lambda *args: True),\n )\n assert lane.vid == \"VA000\"\n temp_job = lane.prepare_input()\n real_job = lane.save_input()\n ppg.run_pipegraph()\n assert not Path(temp_job.filenames[0]).exists()\n assert not Path(temp_job.filenames[1]).exists()\n assert Path(real_job.filenames[0]).exists()\n assert Path(real_job.filenames[1]).exists()\n assert \"_R1_\" in real_job.filenames[0]\n assert \"_R2_\" in real_job.filenames[1]\n assert \".fastq.gz\" in real_job.filenames[0]\n assert \".fastq.gz\" in real_job.filenames[1]\n\n for input_fn, output_fn in zip(\n [\n (get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R1_.fastq.gz\")),\n (get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R2_.fastq.gz\")),\n ],\n real_job.filenames,\n ):\n with gzip.GzipFile(output_fn, \"r\") as op:\n actual = op.read()\n with gzip.GzipFile(input_fn, \"r\") as op:\n should = op.read()\n assert actual == should\n\n def test_lane_paired_as_single(self):\n\n lane = Sample(\n \"Sample_a\",\n get_sample_data(Path(\"mbf_align/sample_b\")),\n False,\n vid=\"VA000\",\n pairing=\"paired_as_single\",\n )\n assert lane.vid == \"VA000\"\n temp_job = lane.prepare_input()\n real_job = lane.save_input()\n ppg.run_pipegraph()\n assert not Path(temp_job.filenames[0]).exists()\n assert len(temp_job.filenames) == 1\n assert Path(real_job.filenames[0]).exists()\n assert len(real_job.filenames) == 1\n assert not \"_R1_\" in real_job.filenames[0]\n assert \".fastq.gz\" in real_job.filenames[0]\n\n should = b\"\"\n for input_fn in [\n (get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R1_.fastq.gz\")),\n (get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R2_.fastq.gz\")),\n ]:\n with gzip.GzipFile(input_fn, \"r\") as op:\n should += op.read()\n with gzip.GzipFile(real_job.filenames[0], \"r\") as op:\n actual = op.read()\n assert actual == should\n\n def test_lane_paired_missing_R2(self):\n\n lane = Sample(\n \"Sample_a\",\n get_sample_data(Path(\"mbf_align/sample_a\")),\n False,\n vid=\"VA000\",\n pairing=\"paired\",\n )\n with pytest.raises(PairingError):\n lane.prepare_input()\n\n def test_lane_paired_only_first(self):\n\n lane = Sample(\n \"Sample_a\",\n get_sample_data(Path(\"mbf_align/sample_b\")),\n False,\n vid=\"VA000\",\n pairing=\"only_first\",\n )\n assert lane.vid == \"VA000\"\n temp_job = lane.prepare_input()\n real_job = lane.save_input()\n ppg.run_pipegraph()\n assert not Path(temp_job.filenames[0]).exists()\n assert len(temp_job.filenames) == 1\n assert Path(real_job.filenames[0]).exists()\n assert len(real_job.filenames) == 1\n assert not \"_R1_\" in real_job.filenames[0]\n assert \".fastq.gz\" in real_job.filenames[0]\n\n should = b\"\"\n for input_fn in [\n (get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R1_.fastq.gz\"))\n ]:\n with gzip.GzipFile(input_fn, \"r\") as op:\n should += op.read()\n with gzip.GzipFile(real_job.filenames[0], \"r\") as op:\n actual = op.read()\n assert actual == should\n\n def test_lane_paired_only_second(self):\n\n lane = Sample(\n \"Sample_a\",\n get_sample_data(Path(\"mbf_align/sample_b\")),\n False,\n vid=\"VA000\",\n pairing=\"only_second\",\n )\n assert lane.vid == \"VA000\"\n temp_job = lane.prepare_input()\n real_job = lane.save_input()\n ppg.run_pipegraph()\n assert not Path(temp_job.filenames[0]).exists()\n assert len(temp_job.filenames) == 1\n assert Path(real_job.filenames[0]).exists()\n assert len(real_job.filenames) == 1\n assert not \"_R1_\" in real_job.filenames[0]\n assert \".fastq.gz\" in real_job.filenames[0]\n\n should = b\"\"\n for input_fn in [\n (get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R2_.fastq.gz\"))\n ]:\n with gzip.GzipFile(input_fn, \"r\") as op:\n should += op.read()\n with gzip.GzipFile(real_job.filenames[0], \"r\") as op:\n actual = op.read()\n assert actual == should\n\n def test_pairing_invalid_value(self):\n with pytest.raises(ValueError):\n Sample(\n \"Sample_a\",\n get_sample_data(Path(\"mbf_align/sample_a\")),\n False,\n pairing=\"do_what_you_want\",\n )\n with pytest.raises(ValueError):\n Sample(\n \"Sample_a\",\n get_sample_data(Path(\"mbf_align/sample_a\")),\n False,\n pairing=False,\n )\n with pytest.raises(ValueError):\n Sample(\n \"Sample_a\",\n get_sample_data(Path(\"mbf_align/sample_a\")),\n False,\n pairing=None,\n )\n with pytest.raises(ValueError):\n Sample(\n \"Sample_a\",\n get_sample_data(Path(\"mbf_align/sample_a\")),\n False,\n pairing=[5],\n )\n\n def test_lane_raises_on_pe_as_se(self):\n lane = Sample(\"Sample_a\", get_sample_data(Path(\"mbf_align/sample_b\")), False)\n with pytest.raises(PairingError):\n lane.prepare_input()\n\n def test_lane_with_job_generating_fastq(self):\n def gen_fastq(fn):\n with open(fn, \"wb\") as op:\n op.write(b\"@shu\\nAGTC\\n+\\nasdf\")\n\n job = FileGeneratingJob(\"input.fastq\", gen_fastq)\n\n lane = Sample(\"Sample_a\", job, False, vid=\"VA000\")\n assert lane.vid == \"VA000\"\n temp_job = lane.prepare_input()\n assert job in temp_job.prerequisites\n real_job = lane.save_input()\n ppg.run_pipegraph()\n assert not Path(temp_job.filenames[0]).exists()\n assert Path(real_job.filenames[0]).exists()\n with gzip.GzipFile(real_job.filenames[0], \"r\") as op:\n lines = op.readlines()\n assert len(lines) == 4\n\n def test_align(self, local_store):\n import json\n import gzip\n\n class FakeGenome:\n name = \"FakeGenome\"\n\n def download_genome(self):\n return []\n\n def job_genes(self):\n return []\n\n def job_transcripts(self):\n return []\n\n def build_index(self, aligner, fasta_to_use=None, gtf_to_use=None):\n job = ppg.FileGeneratingJob(\n \"fake_index\", lambda: Path(\"fake_index\").write_text(\"hello\")\n )\n job.output_path = \"fake_index\"\n return job\n\n class FakeAligner:\n name = \"FakeAligner\"\n version = \"0.1\"\n\n def align_job(\n self,\n input_fastq,\n paired_end_filename,\n index_basename,\n output_bam_filename,\n parameters,\n ):\n def align():\n with open(output_bam_filename, \"w\") as op:\n json.dump(\n [\n open(input_fastq).read(200),\n open(paired_end_filename).read(200)\n if paired_end_filename\n else \"\",\n index_basename,\n str(parameters),\n ],\n op,\n )\n with open(str(output_bam_filename) + \".bai\", \"w\") as op:\n op.write(\"Done\")\n\n job = ppg.MultiFileGeneratingJob(\n [output_bam_filename, str(output_bam_filename) + \".bai\"], align\n )\n job.depends_on_params(\"\")\n return job\n\n aligner = FakeAligner()\n lane = Sample(\n \"Sample_a\",\n get_sample_data(Path(\"mbf_align/sample_b\")),\n False,\n vid=\"VA000\",\n pairing=\"paired\",\n )\n genome = FakeGenome()\n params = {\"shu\": 123}\n aligned_lane = lane.align(aligner, genome, params)\n ppg.run_pipegraph()\n assert Path(\"fake_index\").exists()\n assert Path(\"fake_index\").read_text() == \"hello\"\n assert aligned_lane.load()[0].filenames[0].endswith(lane.name + \".bam\")\n assert aligned_lane.load()[0].filenames[1].endswith(lane.name + \".bam.bai\")\n assert Path(aligned_lane.load()[0].filenames[0]).exists()\n with open(aligned_lane.load()[0].filenames[0]) as op:\n actual = json.load(op)\n with gzip.GzipFile(\n get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R1_.fastq.gz\")\n ) as op:\n should_0 = op.read(200).decode(\"utf-8\")\n with gzip.GzipFile(\n get_sample_data(Path(\"mbf_align/sample_b\") / \"a_R2_.fastq.gz\")\n ) as op:\n should_1 = op.read(200).decode(\"utf-8\")\n\n assert actual[0] == should_0\n assert actual[1] == should_1\n assert actual[2] == \"fake_index\"\n assert actual[3] == str(params)\n\n def test_align_parameterDependencyChecking(self, local_store):\n class FakeGenome:\n name = \"FakeGenome\"\n\n def build_index(self, aligner, fasta_to_use=None, gtf_to_use=None):\n job = ppg.FileGeneratingJob(\n \"fake_index\", lambda: Path(\"fake_index\").write_text(\"hello\")\n )\n job.output_path = \"fake_index\"\n return job\n\n class FakeAligner:\n name = \"FakeAligner\"\n version = \"0.1\"\n\n def align_job(\n self,\n input_fastq,\n paired_end_filename,\n index_basename,\n output_bam_filename,\n parameters,\n ):\n job = ppg.MultiFileGeneratingJob(\n [output_bam_filename, str(output_bam_filename) + \".bai\"], lambda: 5\n )\n # job.depends_on_params(\"\") # that's the line we check\n return job\n\n aligner = FakeAligner()\n lane = Sample(\n \"Sample_a\",\n get_sample_data(Path(\"mbf_align/sample_b\")),\n False,\n vid=\"VA000\",\n pairing=\"paired\",\n )\n genome = FakeGenome()\n params = {\"shu\": 123}\n with pytest.raises(ppg.JobContractError):\n lane.align(aligner, genome, params)\n\n def test_from_url(self):\n import requests_mock\n\n with requests_mock.mock() as m:\n url = \"https://www.imt.uni-marburg.de/sample.fastq.gz\"\n m.get(url, text=\"hello_world\")\n o = FASTQsFromURLs(url)\n ppg.run_pipegraph()\n assert len(o.target_files) == 1\n assert len(o.dependencies) == 2\n assert Path(o.dependencies[0].filenames[0]).read_text() == \"hello_world\"\n assert Path(o.dependencies[0].filenames[1]).read_text() == url\n assert o() == [(Path(o.dependencies[0].filenames[0]).absolute(),)]\n\n def test_from_url_paired(self):\n import requests_mock\n\n with requests_mock.mock() as m:\n url1 = \"https://www.imt.uni-marburg.de/sample_R1_.fastq.gz\"\n url2 = \"https://www.imt.uni-marburg.de/sample_R2_.fastq.gz\"\n m.get(url1, text=\"hello_world1\")\n m.get(url2, text=\"hello_world2\")\n o = FASTQsFromURLs([url1, url2])\n ppg.run_pipegraph()\n assert len(o.target_files) == 2\n assert len(o.dependencies) == 3\n assert Path(o.dependencies[0].filenames[0]).read_text() == \"hello_world1\"\n assert \"_R1_.fastq.gz\" in o.dependencies[0].filenames[0]\n assert Path(o.dependencies[0].filenames[1]).read_text() == url1\n assert Path(o.dependencies[1].filenames[0]).read_text() == \"hello_world2\"\n assert Path(o.dependencies[1].filenames[1]).read_text() == url2\n assert o() == [\n (\n Path(o.dependencies[0].filenames[0]).absolute(),\n Path(o.dependencies[1].filenames[0]).absolute(),\n )\n ]\n\n def test_from_url_detects_404(self):\n\n with requests_mock.mock() as m:\n url = \"https://www.imt.uni-marburg.de/sample.fastq.gz\"\n m.get(url, text=\"hello_world\", status_code=404)\n o = FASTQsFromURLs(url)\n o.download_files()\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"Error return\" in str(o.dependencies[0].exception)\n assert url in str(o.dependencies[0].exception)\n\n def test_fastqs_from_err(self):\n with requests_mock.mock() as m:\n m.get(\n \"http://www.ebi.ac.uk/ena/data/warehouse/filereport?accession=ERR2223563&result=read_run&fields=run_accession,fastq_ftp,fastq_md5,fastq_bytes\",\n text=\"\"\"run_accession\tfastq_ftp\tfastq_md5\tfastq_bytes\nERR2223563\tftp.sra.ebi.ac.uk/vol1/fastq/ERR222/003/ERR2223563/ERR2223563_1.fastq.gz;ftp.sra.ebi.ac.uk/vol1/fastq/ERR222/003/ERR2223563/ERR2223563_2.fastq.gz\t0e29c053bcd31072c8bed9eddece1cec;5d848b65379c195fe158a5d7324b4a18\t1170312089;1246298835\"\"\",\n )\n o = FASTQsFromAccession(\"ERR2223563\")\n print(o.urls)\n assert (\n o.urls[0]\n == \"http://ftp.sra.ebi.ac.uk/vol1/fastq/ERR222/003/ERR2223563/ERR2223563_1.fastq.gz\"\n )\n assert (\n o.urls[1]\n == \"http://ftp.sra.ebi.ac.uk/vol1/fastq/ERR222/003/ERR2223563/ERR2223563_2.fastq.gz\"\n )\n\n def test_fastqs_from_mrnas(self):\n @attr.s\n class Transcript:\n transcript_stable_id = attr.ib()\n mrna = attr.ib()\n\n class FakeGenome:\n name = \"FakeGenome\"\n\n def download_genome(self):\n return []\n\n def job_genes(self):\n return []\n\n def job_transcripts(self):\n return []\n\n transcripts = {\n \"tr1\": Transcript(\"gene1\", \"AGTC\"),\n \"tr2\": Transcript(\"gene1\", \"ACCA\"),\n }\n\n o = FASTQsFromMRNAs([\"tr1\", \"tr2\"], FakeGenome(), 2)\n target = o()[0][0]\n ppg.run_pipegraph()\n assert Path(target).exists()\n with open(target) as r:\n seqs, names, quals = zip(*read_fastq_iterator(r))\n assert list(seqs) == [\"AG\", \"GT\", \"TC\", \"AC\", \"CC\", \"CA\"]\n\n\[email protected](\"new_pipegraph\")\nclass TestSamplesQC:\n def test_fastqc(self):\n from mbf_qualitycontrol import get_qc_jobs\n\n lane = Sample(\n \"Sample_a\", get_sample_data(Path(\"mbf_align/sample_a\")), False, vid=\"VA000\"\n )\n qc_jobs = list(get_qc_jobs())\n assert len(qc_jobs) == 1\n assert \"results/lanes/Sample_a/FASTQC/sentinel.txt\" in qc_jobs[0].filenames\n assert lane.prepare_input() in qc_jobs[0].prerequisites\n", "id": "848543", "language": "Python", "matching_score": 3.872351884841919, "max_stars_count": 0, "path": "tests/test_samples.py" }, { "content": "import pypipegraph as ppg\nfrom pathlib import Path\nfrom .strategies import build_fastq_strategy\nfrom . import fastq2\nfrom .exceptions import PairingError\n\n\nclass Sample:\n def __init__(\n self,\n sample_name,\n input_strategy,\n reverse_reads,\n fastq_processor=fastq2.Straight(),\n pairing=\"single\",\n vid=None,\n ):\n \"\"\"A sequenced sample, represented by one or more fastq files\n\n Paramaters\n ----------\n sample_name: str\n name of sample - must be unique\n input_strategy: varied\n see build_fastq_strategy\n reverse_reads: bool\n whether to reverse the reads before processing\n fastq_processor: fastq2.*\n Preprocessing strategy\n pairing: 'auto', 'single', 'paired', 'only_first', 'only_second', 'paired_as_first'\n default: 'auto'\n 'auto' -> discover pairing from presence of R1/R2 files (-> 'single' or 'paired')\n 'single' -> single end sequencing\n 'paired -> 'paired end' sequencing\n 'only_first -> 'paired end' sequencing, but take only R1 reads\n 'only_second' -> 'paired end' sequencing, but take only R2 reads\n 'paired_as_single' -> treat each fragment as an independent read\n vid: str\n sample identification number\n \"\"\"\n self.name = sample_name\n ppg.assert_uniqueness_of_object(self)\n\n self.input_strategy = build_fastq_strategy(input_strategy)\n self.reverse_reads = reverse_reads\n self.fastq_processor = fastq_processor\n self.vid = vid\n accepted_pairing_values = (\n 'auto',\n \"single\",\n \"paired\",\n \"only_first\",\n \"only_second\",\n \"paired_as_single\",\n )\n if not pairing in accepted_pairing_values:\n raise ValueError(\n f\"pairing was not in accepted values: {accepted_pairing_values}\"\n )\n if pairing == 'auto':\n if self.input_strategy.is_paired:\n pairing = 'paired'\n else:\n pairing = 'single'\n self.pairing = pairing\n self.is_paired = self.pairing == \"paired\"\n self.cache_dir = (\n Path(ppg.util.global_pipegraph.cache_folder) / \"lanes\" / self.name\n )\n self.result_dir = Path(\"results\") / \"lanes\" / self.name\n self.cache_dir.mkdir(parents=True, exist_ok=True)\n self.result_dir.mkdir(parents=True, exist_ok=True)\n self.register_qc()\n\n def get_aligner_input_filenames(self):\n if self.is_paired:\n return (\n self.cache_dir / \"input_R1_.fastq\",\n self.cache_dir / \"input_R2_.fastq\",\n )\n else:\n return (self.cache_dir / \"input.fastq\",)\n\n def prepare_input(self):\n # input_strategy returns a list of\n # paired fastq files\n # ie. [('A_R1_.fastq1', 'A_R2.fastq', ...), ...]\n\n input_pairs = self.input_strategy()\n any_r2 = any([len(x) > 1 for x in input_pairs])\n # Single end - works from flat list\n if self.pairing == \"single\":\n if any_r2:\n raise PairingError(\n f\"{self.name}: paired end lane defined as single end - you need to change the pairing parameter\"\n )\n input_filenames = [str(f[0]) for f in input_pairs]\n elif self.pairing == \"paired_as_single\":\n input_filenames = [str(f) for fl in input_pairs for f in fl]\n elif self.pairing == \"only_first\":\n input_filenames = [str(f[0]) for f in input_pairs]\n elif self.pairing == \"only_second\":\n input_filenames = [str(f[1]) for f in input_pairs]\n elif self.pairing == \"paired\":\n if not any_r2:\n raise PairingError(f\"Paired end lane, but no R2 reads found. Found files: {input_pairs}\")\n input_filenames = [\n (str(f[0]), str(f[1])) for f in input_pairs\n ] # throwing away all later...\n else:\n raise PairingError(\"unknown pairing\") # pragma: no cover\n if self.pairing == \"paired\":\n flat_input_filenames = [f for fl in input_pairs for f in fl]\n else:\n flat_input_filenames = input_filenames\n\n if hasattr(self.input_strategy, \"dependencies\"):\n deps = self.input_strategy.dependencies\n else:\n deps = [ppg.FileChecksumInvariant(f) for f in flat_input_filenames]\n output_filenames = self.get_aligner_input_filenames()\n\n if self.pairing == \"paired\":\n if hasattr(self.fastq_processor, \"generate_aligner_input_paired\"):\n\n def prep_aligner_input():\n import shutil\n\n self.fastq_processor.generate_aligner_input_paired(\n str(output_filenames[0]) + \".temp\",\n str(output_filenames[1]) + \".temp\",\n input_filenames,\n self.reverse_reads,\n )\n shutil.move(str(output_filenames[0]) + \".temp\", output_filenames[0])\n shutil.move(str(output_filenames[1]) + \".temp\", output_filenames[1])\n\n job = ppg.MultiTempFileGeneratingJob(\n output_filenames, prep_aligner_input\n )\n job.depends_on(\n self.fastq_processor.get_dependencies(\n [str(x) for x in output_filenames]\n )\n )\n else:\n\n def prep_aligner_input_r1():\n import shutil\n\n self.fastq_processor.generate_aligner_input(\n str(output_filenames[0]) + \".temp\",\n [x[0] for x in input_filenames],\n self.reverse_reads,\n )\n shutil.move(str(output_filenames[0]) + \".temp\", output_filenames[0])\n\n def prep_aligner_input_r2():\n import shutil\n\n self.fastq_processor.generate_aligner_input(\n str(output_filenames[1]) + \".temp\",\n [x[1] for x in input_filenames],\n self.reverse_reads,\n )\n shutil.move(str(output_filenames[1]) + \".temp\", output_filenames[1])\n\n jobR1 = ppg.TempFileGeneratingJob(\n output_filenames[0], prep_aligner_input_r1\n )\n jobR2 = ppg.TempFileGeneratingJob(\n output_filenames[1], prep_aligner_input_r2\n )\n\n jobR1.depends_on(\n self.fastq_processor.get_dependencies(str(output_filenames[0]))\n )\n jobR2.depends_on(\n self.fastq_processor.get_dependencies(str(output_filenames[1]))\n )\n job = ppg.JobList([jobR1, jobR2])\n # needed by downstream code.\n job.filenames = [output_filenames[0], output_filenames[1]]\n else:\n\n def prep_aligner_input(output_filename):\n import shutil\n\n self.fastq_processor.generate_aligner_input(\n str(output_filename) + \".temp\", input_filenames, self.reverse_reads\n )\n shutil.move(str(output_filename) + \".temp\", output_filename)\n\n job = ppg.TempFileGeneratingJob(output_filenames[0], prep_aligner_input)\n job.depends_on(\n self.fastq_processor.get_dependencies(str(output_filenames[0]))\n )\n\n job.depends_on(\n deps,\n ppg.ParameterInvariant(\n self.name + \"input_files\",\n tuple(sorted(input_filenames))\n + (self.reverse_reads, self.fastq_processor.__class__.__name__),\n ),\n )\n return job\n\n def save_input(self):\n \"\"\"Store the filtered input also in filename for later reference\"\"\"\n import gzip\n\n temp_job = self.prepare_input()\n output_dir = self.result_dir / \"aligner_input\"\n output_dir.mkdir(exist_ok=True)\n output_names = [output_dir / (Path(x).name + \".gz\") for x in temp_job.filenames]\n pairs = zip(temp_job.filenames, output_names)\n\n def do_store():\n block_size = 10 * 1024 * 1024\n for input_filename, output_filename in pairs:\n op = open(input_filename, \"rb\")\n op_out = gzip.GzipFile(output_filename, \"wb\")\n f = op.read(block_size)\n while f:\n op_out.write(f)\n f = op.read(block_size)\n op_out.close()\n op.close()\n\n return ppg.MultiFileGeneratingJob(output_names, do_store).depends_on(temp_job)\n\n def align(self, aligner, genome, aligner_parameters, name=None):\n from .lanes import AlignedSample\n\n output_dir = (\n Path(\"results\")\n / \"aligned\"\n / (\"%s_%s\" % (aligner.name, aligner.version))\n / genome.name\n / self.name\n )\n output_dir.mkdir(parents=True, exist_ok=True)\n output_filename = output_dir / (self.name + \".bam\")\n input_job = self.prepare_input()\n index_job = genome.build_index(aligner)\n alignment_job = aligner.align_job(\n input_job.filenames[0],\n input_job.filenames[1] if self.is_paired else None,\n index_job.output_path\n if hasattr(index_job, \"output_path\")\n else index_job.filenames[0],\n output_filename,\n aligner_parameters if aligner_parameters else {},\n )\n alignment_job.depends_on(\n input_job,\n index_job,\n # ppg.ParameterInvariant(output_filename, aligner_parameters), # that's the aligner's job.\n )\n for j in alignment_job.prerequisites:\n if isinstance(j, ppg.ParameterInvariant):\n break\n else:\n raise ppg.JobContractError(\n \"aligner (%s).align_job should have added a parameter invariant for aligner parameters\"\n % aligner\n )\n return AlignedSample(\n f\"{self.name if name is None else name}_{aligner.name}\",\n alignment_job,\n genome,\n self.is_paired,\n self.vid,\n output_dir,\n aligner=aligner,\n )\n\n def register_qc(self):\n from mbf_qualitycontrol import qc_disabled\n\n if not qc_disabled():\n self.register_qc_fastqc()\n\n def register_qc_fastqc(self):\n from mbf_externals import FASTQC\n from mbf_qualitycontrol import register_qc\n\n a = FASTQC()\n output_dir = self.result_dir / \"FASTQC\"\n temp_job = self.prepare_input()\n if hasattr(temp_job, 'filenames'):\n filenames = temp_job.filenames\n else:\n filenames = []\n for j in temp_job: # is actually joblist\n filenames.extend(j.filenames)\n\n job = a.run(output_dir, filenames)\n return register_qc(job.depends_on(temp_job))\n", "id": "8624679", "language": "Python", "matching_score": 2.216404676437378, "max_stars_count": 0, "path": "src/mbf_align/raw.py" }, { "content": "class PairingError(ValueError):\n pass\n", "id": "5112298", "language": "Python", "matching_score": 0.0638963058590889, "max_stars_count": 0, "path": "src/mbf_align/exceptions.py" }, { "content": "import re\nimport multiprocessing\nimport time\nimport shutil\nfrom pathlib import Path\n\n\ndef run_tests(modules, anysnake, config, report_only=False):\n all_modules = discover_modules(anysnake.paths[\"code\"])\n if not modules:\n modules = all_modules\n else:\n for k in modules:\n if k not in all_modules:\n raise ValueError(\"module not found\", k)\n print(\"run tests on\", modules)\n output_dir = Path(config[\"base\"].get(\"test_result_dir\", \"test_results\"))\n output_dir.mkdir(exist_ok=True)\n (output_dir / \"html\").mkdir(exist_ok=True)\n error_dir = output_dir / \"with_errors\"\n if error_dir.exists():\n shutil.rmtree(error_dir)\n error_dir.mkdir()\n print(\"output results to\", output_dir)\n if not report_only:\n multiplex_tests(modules, output_dir, anysnake, config)\n report_tests(modules, output_dir)\n\n\ndef discover_modules(code_path):\n res = []\n for d in Path(code_path).glob(\"*\"):\n if d.is_dir():\n conf_test_path = d / \"tests\" / \"conftest.py\"\n if conf_test_path.exists():\n res.append(d.name)\n return res\n\n\ndef multiplex_tests(modules, output_dir, anysnake, config):\n cmds = [\n (\n f\"cd /project/code/{module} && pytest --junitxml=/project/{output_dir}/{module}.log --html=/project/{output_dir}/html/{module}.html\",\n anysnake,\n config,\n ii,\n )\n for ii, module in enumerate(modules)\n ]\n p = multiprocessing.Pool(multiprocessing.cpu_count())\n results = p.map(run_single_test, cmds)\n p.close()\n p.join()\n output = \"\"\n for m, results in zip(modules, results):\n if isinstance(results, tuple):\n results = results[0].decode(\"utf-8\") + \"\\n\" + results[1].decode(\"utf-8\")\n else:\n results.decode(\"utf-8\")\n print(m, results)\n output += f\"Module: {m}\\n{results}\\n\\n\\n\"\n (output_dir / \"test_results.txt\").write_text(output.replace(\"\\r\\n\", \"\\n\"))\n print(\"Test results written to %s\" % ((output_dir / \"test_results.txt\",)))\n\n\ndef report_tests(modules, output_dir):\n for m in modules:\n html_filename = output_dir / \"html\" / (m + \".html\")\n if contained_errors(html_filename):\n target = (output_dir / \"with_errors\" / (m + \".html\"))\n target.symlink_to(html_filename.absolute())\n any_errors = True\n if any_errors:\n target = target = (output_dir / \"with_errors\" / 'assets')\n target.symlink_to((output_dir / \"html\" / 'assets').absolute(), True)\n\n\n\n\ndef run_single_test(args):\n cmd, anysnake, config, ii = args\n from .cli import home_files, get_volumes_config\n\n time.sleep(0.01 * ii)\n return anysnake.run_non_interactive(\n cmd,\n allow_writes=False,\n home_files=home_files,\n volumes_ro=get_volumes_config(config, \"additional_volumes_ro\"),\n volumes_rw=get_volumes_config(config, \"additional_volumes_rw\"),\n )\n\n\ndef contained_errors(html_filename):\n source = html_filename.read_text()\n failed = re.findall(r\">(\\d+) failed\", source)\n errors = re.findall(r\">(\\d+) errors\", source)\n unexpected_passes = re.findall(r\">(\\d+) unexpected passes\", source)\n combined = failed + errors + unexpected_passes\n combined = [int(x) for x in combined if int(x) > 0]\n return bool(combined)\n", "id": "10840082", "language": "Python", "matching_score": 2.5885722637176514, "max_stars_count": 0, "path": "src/mbf_anysnake/testing.py" }, { "content": "# -*- coding: future_fstrings -*-\nimport os\nimport tempfile\nimport click\nimport click_completion\nimport sys\n\nclick_completion.init()\n\nfrom pathlib import Path\nfrom mbf_anysnake import parse_requirements, parsed_to_anysnake\nimport subprocess\nfrom .util import get_next_free_port\n\n\nconfig_file = \"anysnake.toml\"\nhome_files = [\".hgrc\", \".git-credentials\", \".gitconfig\"]\nhome_dirs = [\n \".config/fish\",\n \".config/matplotlib\",\n \".cache/matplotlib\",\n \".jupyter\",\n \".local/share/fish\",\n \".local/share/jupyter\",\n \".ipython\",\n]\n\n\[email protected]()\ndef main():\n pass\n\n\ndef get_anysnake():\n parsed = parse_requirements(config_file)\n return parsed_to_anysnake(parsed), parsed\n\n\ndef get_volumes_config(config, key2):\n \"\"\"Extract a volumes config from the config if present.\n\n Representation is a dictionary,\n target_path: source_path\n \"\"\"\n result = {}\n for key1 in [\"global_run\", \"run\"]:\n if key1 in config and key2 in config[key1]:\n for (f, t) in config[key1][key2]: # from / to\n result[t] = Path(f).expanduser().absolute()\n return result\n\n\[email protected]()\[email protected](\"--do-time\", default=False, is_flag=True)\ndef build(do_time=False):\n \"\"\"Build everything if necessary - from docker to local venv from project.setup\n Outputs full docker_image:tag\n \"\"\"\n d, _ = get_anysnake()\n d.ensure(do_time)\n print(d.docker_image)\n return d\n\n\[email protected]()\ndef rebuild():\n \"\"\"for each locally cloned package in code,\n call python setup.py install\n \"\"\"\n d, config = get_anysnake()\n d.rebuild()\n\n\[email protected]()\[email protected](\"packages\", nargs=-1, required=True)\ndef remove_pip(packages):\n \"\"\"Remove pip modules, from anysnake.toml.\n If they're installed, remove their installation\n If they're editable, remove their code/folders as well\"\"\"\n import shutil\n import tomlkit\n\n d, config = get_anysnake()\n local_config = tomlkit.loads(Path(\"anysnake.toml\").read_text())\n write_toml = False\n for p in packages:\n if p in local_config.get(\"python\"):\n del local_config[\"python\"][p]\n write_toml = True\n path = d.paths[\"code_clones\"] / p\n if path.exists():\n if click.confirm(f\"really remove {path}?)\"):\n shutil.rmtree(str(path))\n lib_path = (\n d.paths[\"code_venv\"]\n / \"lib\"\n / (\"python\" + d.major_python_version)\n / \"site-packages\"\n )\n print(p + \"*\")\n for f in lib_path.glob(p + \"*\"):\n print(f)\n\n if write_toml:\n import time\n\n backup_filename = \"anysnake.toml.%s\" % time.strftime(\"%Y-%M-%d-%H-%M\")\n print(\"writing new anysnake.toml - old one in %s\" % backup_filename)\n shutil.copy(\"anysnake.toml\", backup_filename)\n with open(\"anysnake.toml\", \"w\") as op:\n op.write(tomlkit.dumps(local_config))\n\n\[email protected]()\ndef rebuild_global_venv():\n raise ValueError(\"todo\")\n\n\[email protected]()\[email protected](\n \"--no-build/--build\",\n default=False,\n help=\"don't perform build if things are missing\",\n)\[email protected](\n \"--allow-writes/--no-allow-writes\", default=False, help=\"mount all volumes rw\"\n)\[email protected](\n \"--include-perf/--no-include-perf\",\n default=False,\n help=\"include perf tool for profiling\",\n)\ndef shell(no_build=False, allow_writes=False, include_perf=False):\n \"\"\"Run a shell with everything mapped (build if necessary)\"\"\"\n\n d, config = get_anysnake()\n if not no_build:\n d.ensure()\n else:\n d.ensure_just_docker()\n cmd = \"\"\"\nif [ -f \"/usr/bin/fish\" ];\nthen\n /usr/bin/fish\nelse\n /bin/bash\nfi\n \"\"\"\n if include_perf:\n cmd = (\n \"sudo apt-get update;\\nsudo apt-get install -y linux-tools-common linux-tools-generic linux-tools-`uname -r`\\n\"\n + cmd\n )\n d.mode = \"shell\"\n print(\n d.run(\n cmd,\n allow_writes=allow_writes,\n home_files=home_files,\n home_dirs=home_dirs,\n volumes_ro=get_volumes_config(config, \"additional_volumes_ro\"),\n volumes_rw=get_volumes_config(config, \"additional_volumes_rw\"),\n )\n )\n\n\[email protected]()\[email protected](\"--no-build/--build\", default=False)\[email protected](\"--pre/--no-pre\", default=True, help=\"run pre_run_inside/outside\")\[email protected](\"--post/--no-post\", default=True, help=\"run post_run_inside/outside\")\[email protected](\"cmd\", nargs=-1)\ndef run(cmd, no_build=False, pre=True, post=True):\n \"\"\"Run a command\"\"\"\n import subprocess\n\n d, config = get_anysnake()\n if not no_build:\n d.ensure()\n else:\n d.ensure_just_docker()\n\n pre_run_outside = config.get(\"run\", {}).get(\"pre_run_outside\", False)\n pre_run_inside = config.get(\"run\", {}).get(\"pre_run_inside\", False)\n if pre and pre_run_outside:\n subprocess.Popen(pre_run_outside, shell=True).communicate()\n cmd = \"\\n\" + \" \".join(cmd) + \"\\n\"\n if pre and pre_run_inside:\n cmd = pre_run_inside + cmd\n post_run_outside = config.get(\"run\", {}).get(\"post_run_outside\", False)\n post_run_inside = config.get(\"run\", {}).get(\"post_run_inside\", False)\n if post and post_run_inside:\n cmd += post_run_inside\n d.mode = \"run\"\n print(\n d.run(\n cmd,\n allow_writes=False,\n home_files=home_files,\n home_dirs=home_dirs,\n volumes_ro=get_volumes_config(config, \"additional_volumes_ro\"),\n volumes_rw=get_volumes_config(config, \"additional_volumes_rw\"),\n )\n )\n if post and post_run_outside:\n subprocess.Popen(post_run_outside, shell=True).communicate()\n\n\ndef check_if_nb_extensions_are_activated():\n \"\"\"Check if the nb extensions are activated\"\"\"\n try:\n d = Path(\"~/.jupyter/jupyter_notebook_config.json\").expanduser().read_text()\n return '\"jupyter_nbextensions_configurator\": true' in d\n except IOError:\n return False\n\n\[email protected]()\[email protected](\"--no-build/--build\", default=False)\ndef jupyter(no_build=False):\n \"\"\"Run a jupyter with everything mapped (build if necessary)\"\"\"\n\n d, config = get_anysnake()\n if not no_build:\n d.ensure()\n else:\n d.ensure_just_docker()\n host_port = get_next_free_port(8888)\n print(\"Starting notebook at %i\" % host_port)\n nbextensions_not_activated = not check_if_nb_extensions_are_activated()\n if not \"jupyter_contrib_nbextensions\" in d.global_python_packages:\n d.global_python_packages[\"jupyter_contrib_nbextensions\"] = \"\"\n\n jupyter_r_kernel = \"\"\n if d.R_version and d.R_version >= \"3.6\":\n # this is shit.\n # it will enable this by writing to home/.local/share/jupyter\n # which will be shared by all anysnake runs\n # independent of actual R version (and we need R 3.6 / bioconductor 3.10\n # to actually have an R-kernel available.\n # oh well, it's still much easier and less insane than the other variants\n # of having a dockfill if jupyter get's installed and R > ...\n if not Path(\"~/.local/share/jupyter/kernels/ir\").expanduser().exists():\n jupyter_r_kernel = \"echo 'IRkernel::installspec()' | R --no-save --quiet\\n\"\n\n d.mode = \"jupyter\"\n d.run(\n (\n \"\"\"\n jupyter contrib nbextension install --user --symlink\n jupyter nbextensions_configurator enable --user\n \"\"\"\n if nbextensions_not_activated\n else \"\"\n )\n + jupyter_r_kernel\n + config.get(\"jupyter\", {}).get(\"pre_run_inside\", \"\")\n + \"\"\"jupyter notebook --ip=0.0.0.0 --no-browser\\n\"\"\"\n + config.get(\"jupyter\", {}).get(\"post_run_inside\", \"\"),\n home_files=home_files,\n home_dirs=home_dirs,\n volumes_ro=get_volumes_config(config, \"additional_volumes_ro\"),\n volumes_rw=get_volumes_config(config, \"additional_volumes_rw\"),\n ports=[(host_port, 8888)],\n )\n\n\[email protected]()\[email protected](\"--no-build/--build\", default=False)\[email protected](\"regexps\", nargs=-1)\ndef instant_browser(regexps, no_build=False):\n \"\"\"Run an instant_browser with everything mapped (build if necessary).\"\"\"\n host_port = get_next_free_port(8888)\n print(\"Starting instant_browser at %i\" % host_port)\n d, config = get_anysnake()\n if not no_build:\n d.ensure()\n else:\n d.ensure_just_docker()\n\n d.mode = \"instant_browser\"\n d.run(\n \"instant_browser \"\n + \" \".join(\n regexps,\n ),\n home_files=home_files,\n home_dirs=home_dirs,\n volumes_ro=get_volumes_config(config, \"additional_volumes_ro\"),\n volumes_rw=get_volumes_config(config, \"additional_volumes_rw\"),\n ports=[(host_port, 8888)],\n )\n\n\[email protected]()\ndef docker_tag():\n \"\"\"return the currently used docker_tag\n for integration purposes\"\"\"\n d, config = get_anysnake()\n print(d.docker_image)\n\n\[email protected]()\[email protected](\"--no-build/--build\", default=False)\ndef ssh(no_build=False):\n \"\"\"Run an sshd with everything mapped (build if necessary),\n using your authorized_keys keys from ~/.ssh\n\n You might want to use additional_volumes_ro to map in\n some host keys (\n \"/etc/ssh/ssh_host_ecdsa_key\",\n \"/etc/ssh/ssh_host_ed25519_key\",\n \"/etc/ssh/ssh_host_rsa_key\",\n ).\n\n \"\"\"\n\n d, config = get_anysnake()\n if not no_build:\n d.ensure()\n else:\n d.ensure_just_docker()\n host_port = get_next_free_port(8822)\n print(\"Starting sshd at %i\" % host_port)\n if not \".vscode-remote\" in home_dirs:\n home_dirs.append(\".vscode-remote\")\n home_files.append(\".ssh/authorized_keys\")\n\n tf = tempfile.NamedTemporaryFile(mode=\"w\", suffix=\".env\")\n tf.write(\n \"\\n\".join(\n [\n f\"{key}={value}\"\n for (key, value) in d.get_environment_variables({}).items()\n ]\n )\n )\n tf.flush()\n\n volumes_ro = get_volumes_config(config, \"additional_volumes_ro\")\n volumes_ro[Path(tf.name)] = Path(d.paths[\"home_inside_docker\"]) / \".ssh/environment\"\n import pprint\n\n pprint.pprint(volumes_ro)\n d.run(\n f\"\"\"\n echo \"now starting ssh server\"\n echo \"Port 8822\\nPermitUserEnvironment yes\\n\" >/tmp/sshd_config\n sudo /usr/sbin/sshd -D -f /tmp/sshd_config\n #/usr/bin/fish\n \"\"\",\n home_files=home_files,\n home_dirs=home_dirs,\n volumes_ro=volumes_ro,\n volumes_rw=get_volumes_config(config, \"additional_volumes_rw\"),\n ports=[(host_port, 8822)],\n )\n\n\[email protected]()\[email protected](\"modules\", nargs=-1)\[email protected](\"--report-only/--no-report-only\", default=False)\ndef test(modules, report_only):\n \"\"\"Run pytest on all (or a subset) modules that were in the code path and had a tests/conftest.py\"\"\"\n from . import testing\n\n d, config = get_anysnake()\n d.ensure()\n testing.run_tests(modules, d, config, report_only)\n\n\[email protected]()\ndef show_config():\n \"\"\"Print the config as it is actually used\"\"\"\n d, parsed = get_anysnake()\n d.pprint()\n print(\"\")\n print(\"Additional volumes\")\n print(\" RO\")\n for outside, inside in get_volumes_config(parsed, \"additional_volumes_ro\").items():\n print(f\" {outside} -> {inside}\")\n print(\" RW\")\n for outside, inside in get_volumes_config(parsed, \"additional_volumes_rw\").items():\n print(f\" {outside} -> {inside}\")\n print(\"\")\n print(\"Config files used:\", parsed[\"used_files\"])\n\n\[email protected]()\ndef show_paths():\n \"\"\"Print the config as it is actually used\"\"\"\n d, parsed = get_anysnake()\n import pprint\n\n print(\"paths detected\")\n pprint.pprint(d.paths)\n\n\[email protected]()\ndef default_config():\n \"\"\"Print a default config\"\"\"\n p = Path(\"anysnake.toml\")\n print(\n \"\"\"[base]\n# docker image to use/build\n# use image:tag for full spec\n# or just 'image' for auto_build from mbf_anysnake\n# docker specs\ndocker_image=\"mbf_anysnake_18.04\"\n\n# optional global config to import\n# global_config=\"/etc/anysnake.tompl\"\n\n# python version to use\npython=\"3.7.2\"\n\n# project_name = folder name of anysnake.toml by default, overwrite here\n# project_name=\"example\"\n\n# bioconductor version to use, R version and CRAN dates are derived from this\n# (optional) \nbioconductor=\"3.8\"\n\n# cran options are 'minimal' (just what's needed from bioconductor) and 'full'\n# (everything)\ncran=\"full\"\n\n# rpy2 version to use.\n# rpy2_version = \"3.2.0\"\n\n\n# where to store the installations\n# python, R, global virtual enviromnments, bioconductor, cran\nstorage_path=\"/var/lib/anysnake\"\n\n# local venv, editable libraries\ncode_path=\"code\"\n\n# install all bioconductor packages whether they need experimental or annotation\n# data or not.\n# bioconductor_whitelist=[\"_full_\"]\n# or install selected packages otherwise omited like this\n# bioconductor_whitelist=[\"chimera\"]\n\n# include rust (if you use bioconductor, rust 1.30.0 will be added automatically)\n# rust = [\"1.30.0\", \"nigthly-2019-03-20\"]\n\n[run]\n# additional folders to map into docker\nadditional_volumes_ro = [['/opt', '/opt']]\nadditional_volumes_rw = [['/home/some_user/.hgrc', '/home/u1000/.hgrc']]\npre_run_outside = \\\"\"\"\n echo \"bash script, runs outside of the continer before 'run'\"\n\\\"\"\"\n\npre_run_inside = \\\"\"\"\n echo \"bash script, runs inside of the continer before 'run' (ie. after pre_run_outside)\"\n\\\"\"\"\npost_run_inside = \"echo 'bash script running inside container after run cmd'\"\npost_run_outside = \"echo 'bash script running outside container after run cmd'\"\n\n# python packages installed into global storage\n[global_python]\njupyter=\"\"\n\n# python packages installed locally\n[python]\npandas=\">=0.23\"\n# an editable library\ndppd=\"@git+https://github.com/TyberiusPrime/dppd\"\n# github integration\ndppd_plotine=\"@gh/TyberiusPrime/dppd\"\n\n# additional @something urls for [python]\n# [pip_regexps]\n# @mbf/something ->\n# \"@mbf/(.+)\"=\"@hg+https://mysite.com/hg/\\\\1\"\n# or just @mbf with 'smart' substitiution.\n# @mbf\"=[\"@hg+https://mysite.com/hg/\\\\1\"@\n\n# environmental variables inside the container\n[env]\nINSIDE_ANYSNAKE=\"yes\"\n\n\n\"\"\"\n )\n\n\ndef merge_dicts(a, b, path=None):\n \"merges b into a\"\n if path is None:\n path = []\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n merge_dicts(a[key], b[key], path + [str(key)])\n elif a[key] == b[key]:\n pass # same leaf value\n else:\n raise Exception(\"Conflict at %s\" % \".\".join(path + [str(key)]))\n else:\n a[key] = b[key]\n return a\n\n\[email protected]()\ndef freeze():\n \"\"\"Output installed packages in anysnake.toml format\"\"\"\n import tomlkit\n\n d, parsed = get_anysnake()\n output = {}\n for s in d.strategies:\n if hasattr(s, \"freeze\"):\n merge_dicts(output, s.freeze())\n print(tomlkit.dumps(output))\n\n\[email protected]()\ndef version():\n import mbf_anysnake\n\n print(\"mbf_anysnake version %s\" % mbf_anysnake.__version__)\n\n\[email protected]()\[email protected](\n \"-i\", \"--case-insensitive/--no-case-insensitive\", help=\"Case insensitive completion\"\n)\[email protected](\n \"shell\",\n required=False,\n type=click_completion.DocumentedChoice(click_completion.core.shells),\n)\ndef show_completion(shell, case_insensitive):\n \"\"\"Show the click-completion-command completion code\n ie. what you need to add to your shell configuration.\n \"\"\"\n extra_env = (\n {\"_CLICK_COMPLETION_COMMAND_CASE_INSENSITIVE_COMPLETE\": \"ON\"}\n if case_insensitive\n else {}\n )\n click.echo(click_completion.core.get_code(shell, extra_env=extra_env))\n\n\ndef extract_ports_from_docker_inspect(info):\n host_inside = {}\n for inside, port_info in info.get(\"NetworkSettings\").get(\"Ports\", {}).items():\n if \"/\" in inside:\n inside = inside[: inside.find(\"/\")]\n host = port_info[0].get(\"HostPort\", \"????\")\n host_inside[host] = inside\n return host_inside\n\n\ndef parse_env(entries):\n \"\"\"entries look like X=abc\"\"\"\n res = {}\n for e in entries:\n if \"=\" in e:\n e = e.split(\"=\", 2)\n res[e[0]] = e[1]\n return res\n\n\[email protected]()\ndef attach():\n \"\"\"attach to anysnake docker running from this folder.\n Will prompt if there are multiple available\"\"\"\n print(\"Which docker would you like to attach to\")\n chosen = select_running_container()\n if chosen is None:\n print(\"No container to attach found\")\n sys.exit(0)\n else:\n print(\"attach shell in \", chosen[0])\n cmd = [\"docker\", 'attach', chosen[1]]\n p = subprocess.Popen(cmd)\n p.communicate()\n sys.exit(0)\n\n\n\[email protected]()\ndef enter():\n \"\"\"exec a fish shell in anysnake docker running from this folder.\n Will prompt if there are multiple available\n \"\"\"\n print(\"Choose container to exec a shell\")\n chosen = select_running_container()\n if chosen is None:\n print(\"No container to enter found\")\n sys.exit(0)\n else:\n print(\"exec shell in \", chosen[0])\n cmd = [\"docker\", \"exec\", \"-it\", chosen[1], \"fish\"]\n p = subprocess.Popen(cmd)\n p.communicate()\n sys.exit(0)\n\n\ndef select_running_container():\n \"\"\"Let the user choose a running container (if multiple), or return the\n only one (name, docker_id).\n Returns none if none are present\n \"\"\"\n import json\n import datetime\n import math\n\n cwd = str(Path(\".\").absolute())\n d, parsed = get_anysnake()\n lines = subprocess.check_output([\"docker\", \"ps\"]).decode(\"utf-8\").split(\"\\n\")\n candidates = []\n for l in lines:\n if d.docker_image in l:\n docker_id = l[: l.find(\" \")]\n info = json.loads(\n subprocess.check_output([\"docker\", \"inspect\", docker_id]).decode(\n \"utf-8\"\n )\n )[0]\n env = parse_env(info.get(\"Config\", {}).get(\"Env\", {}))\n found = False\n found = env.get(\"ANYSNAKE_PROJECT_PATH\", \"\") == cwd\n mode = env.get(\"ANYSNAKE_MODE\", \"??\")\n if found:\n # if mode in ('run','??', 'jupyter'):\n ports = extract_ports_from_docker_inspect(info)\n user = env.get(\"ANYSNAKE_USER\", \"??\")\n start_time = info.get('State', {}).get('StartedAt','')\n if not start_time:\n start_time = datetime.datetime.now()\n else:\n start_time = datetime.datetime.strptime(start_time[:start_time.rfind('.')], \"%Y-%m-%dT%H:%M:%S\")\n\n candidates.append(\n (user, docker_id, info.get(\"Name\", \"/?\")[1:], mode, ports, start_time)\n )\n if len(candidates) == 0:\n return None\n else:\n print(\"Pick one\")\n print(\"Number\",'owner','mode','uptime(hours)','ports', sep=\"\\t\")\n candidates = sorted(candidates, key = lambda x: x[-1]) # sort by runtime\n for (ii, (user, docker_id, name, mode, ports, start_time)) in enumerate(candidates):\n delta = datetime.datetime.now() - start_time\n hours = delta.seconds / 3600\n ts = f\"{math.floor(hours):02}:{math.ceil(60 * (hours - math.floor(hours))):02}\"\n print(ii, user, name, mode, \n ts,\n ports if ports else \"\",\n sep=\"\\t\")\n chosen = sys.stdin.readline().strip()\n chosen = int(chosen)\n candidates = [candidates[chosen]]\n if candidates:\n return candidates[0][2], candidates[0][1]\n\n # print(d.docker_image)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "10043837", "language": "Python", "matching_score": 3.821887493133545, "max_stars_count": 0, "path": "src/mbf_anysnake/cli.py" }, { "content": "# -*- coding: future_fstrings -*-\nimport re\nimport requests\nimport subprocess\nimport time\nimport shutil\nimport time\nfrom pathlib import Path\n\nre_github = r\"[A-Za-z0-9-]+\\/[A-Za-z0-9]+\"\n\n\ndef combine_volumes(ro=[], rw=[]):\n d = dict()\n for (what, mode) in [(ro, \"ro\"), (rw, \"rw\")]:\n if isinstance(what, dict):\n what = [what]\n for dd in what:\n for target, source in dd.items():\n if isinstance(target, dict):\n raise ValueError(\"fix me\")\n elif isinstance(target, tuple):\n raise ValueError(\"fix me\")\n\n source = str(Path(source).absolute())\n d[target] = source, mode\n return d\n\n\ndef find_storage_path_from_other_machine(anysnake, postfix, check_func=None):\n \"\"\"Find a usable storage path for this if it was already done by another machine\n and storage_per_hostname is set. \n Otherwise return the local storage_path / postfix\n \"\"\"\n if check_func is None:\n check_func = lambda x: x.exists()\n search_path = anysnake.paths[\"storage\"].parent.parent\n docker_image = Path(anysnake.paths[\"storage\"].name)\n result = anysnake.paths[\"storage\"] / postfix\n postfix = docker_image / postfix\n if not result.exists():\n if anysnake.storage_per_hostname:\n for d in search_path.glob(\"*\"):\n if d.is_dir():\n if check_func(d / postfix):\n result = d / postfix\n break\n return result\n\n\ndef download_file(url, filename):\n \"\"\"Download a file with requests if the target does not exist yet\"\"\"\n if not Path(filename).exists():\n print(\"downloading\", url, filename)\n r = requests.get(url, stream=True)\n if r.status_code != 200:\n raise ValueError(f\"Error return on {url} {r.status_code}\")\n start = time.time()\n count = 0\n with open(str(filename) + \"_temp\", \"wb\") as op:\n for block in r.iter_content(1024 * 1024):\n op.write(block)\n count += len(block)\n shutil.move(str(filename) + \"_temp\", str(filename))\n stop = time.time()\n print(\"Rate: %.2f MB/s\" % ((count / 1024 / 1024 / (stop - start))))\n\n\ndef dict_to_toml(d):\n import tomlkit\n\n toml = tomlkit.document()\n toml.add(tomlkit.comment(\"Autogenertod by anysnake\"))\n for key, sub_d in d.items():\n table = tomlkit.table()\n for k, v in sub_d.items():\n table.add(k, v)\n toml.add(key, table)\n return toml\n\n\ndef get_next_free_port(start_at):\n import socket\n\n try_next = True\n port = start_at\n while try_next:\n try:\n s = socket.socket()\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind((\"localhost\", port))\n s.close()\n try_next = False\n except socket.error:\n port += 1\n if port > start_at + 100:\n raise ValueError(\"No empty port found within search range\")\n return port\n\n\ndef clone_repo(url, name, target_path, log_file):\n print(f\"]\\tCloning {name} to {target_path} from {url}\")\n if url.startswith(\"@\"):\n url = url[1:]\n if re.match(re_github, url):\n method = \"git\"\n url = \"https://github.com/\" + url\n elif url.startswith(\"git+\"):\n method = \"git\"\n url = url[4:]\n elif url.startswith(\"hg+\"):\n method = \"hg\"\n url = url[3:]\n else:\n raise ValueError(\n \"Could not parse url / must be git+http(s) / hg+https, or github path\"\n )\n if method == \"git\":\n try:\n subprocess.check_call(\n [\"git\", \"clone\", url, str(target_path)],\n stdout=log_file,\n stderr=log_file,\n )\n except subprocess.CalledProcessError:\n import shutil\n\n shutil.rmtree(target_path)\n raise\n elif method == \"hg\":\n try:\n subprocess.check_call(\n [\"hg\", \"clone\", url, str(target_path)], stdout=log_file, stderr=log_file\n )\n except subprocess.CalledProcessError:\n import shutil\n\n if target_path.exists():\n shutil.rmtree(target_path)\n raise\n", "id": "2258484", "language": "Python", "matching_score": 1.4244141578674316, "max_stars_count": 0, "path": "src/mbf_anysnake/util.py" }, { "content": "# *- coding: future_fstrings -*-\n\nimport requests\nfrom pathlib import Path\nimport re\nfrom .util import find_storage_path_from_other_machine, download_file\n\n\nclass DockFill_Bioconductor:\n def __init__(self, anysnake, dockfill_r):\n self.anysnake = anysnake\n self.dockfill_r = dockfill_r\n self.paths = self.anysnake.paths\n self.bioconductor_version = anysnake.bioconductor_version\n self.bioconductor_whitelist = anysnake.bioconductor_whitelist\n self.cran_mode = anysnake.cran_mode\n\n self.done_string = (\n \"done:\" + self.cran_mode + \":\" + \":\".join(self.bioconductor_whitelist)\n )\n bc_path = find_storage_path_from_other_machine(\n self.anysnake,\n Path(\"bioconductor\") / self.bioconductor_version,\n self.is_done,\n )\n self.paths.update(\n {\n \"storage_bioconductor\": bc_path,\n \"docker_storage_bioconductor\": \"/anysnake/bioconductor\",\n \"storage_bioconductor_download\": (\n self.paths[\"storage\"]\n / \"bioconductor_download\"\n / self.bioconductor_version\n ),\n \"docker_storage_bioconductor_download\": (\n str(Path(\"/anysnake/bioconductor_download\"))\n ),\n \"log_bioconductor\": (\n self.paths[\"log_storage\"]\n / f\"anysnake.bioconductor.{self.bioconductor_version}.log\"\n ),\n \"log_bioconductor.todo\": (\n self.paths[\"log_storage\"]\n / f\"anysnake.bioconductor.{self.bioconductor_version}.todo.log\"\n ),\n \"project_bioconductor\": self.paths['code'] / 'venv' / 'bioconductor'/ self.bioconductor_version,\n \"docker_project_bioconductor\": Path('/project') / 'code' / 'venv' / 'bioconductor'/ self.bioconductor_version,\n }\n )\n self.volumes = {\n self.paths[\"docker_storage_bioconductor\"]: self.paths[\n \"storage_bioconductor\"\n ],\n self.paths[\"docker_storage_bioconductor_download\"]: self.paths[\n \"storage_bioconductor_download\"\n ],\n }\n self.env = {\n \"R_LIBS_SITE\": \"/anysnake/bioconductor\",\n \"R_LIBS\": self.paths['docker_project_bioconductor']\n }\n\n def is_done(self, path):\n done_file = path / \"done.sentinel\"\n return done_file.exists() and done_file.read_text() == self.done_string\n\n def pprint(self):\n print(f\" Bioconductor version={self.bioconductor_version}\")\n\n @staticmethod\n def fetch_bioconductor_release_information():\n import maya\n\n url = \"https://bioconductor.org/about/release-announcements/\"\n bc = requests.get(url).text\n tbody = bc[\n bc.find(\"<tbody>\") : bc.find(\"</tbody>\")\n ] # at least for now it's the first table on the page\n if not \">3.8<\" in tbody:\n raise ValueError(\n \"Bioconductor relase page layout changed - update fetch_bioconductor_release_information()\"\n )\n try:\n info = {} # release -> {'date': , 'r_major_version':\n for block in tbody.split(\"</tr>\"):\n if not block.strip():\n continue\n if block.count(\"<td style\") != 4:\n print(block.count(\"<td style\"))\n raise ValueError(\n \"Bioconductor relase page layout changed - update fetch_bioconductor_release_information() - too few elements?\"\n )\n tds = block.split(\"<td style\")[1:]\n bc_version = re.findall(\"\\d+\\.\\d+\", tds[0])[0]\n release_date = tds[1][tds[1].find('\">') + 2 :]\n release_date = release_date[: release_date.find(\"<\")]\n package_count = re.findall(\">(\\d+)<\", tds[2])[0]\n r_version = re.findall(\">(\\d+\\.\\d+)<\", tds[3])[0]\n\n release_date = maya.parse(release_date)\n release_date = release_date.rfc3339()\n release_date = release_date[: release_date.find(\"T\")]\n\n info[bc_version] = {\n \"date\": release_date,\n \"r_major_version\": r_version,\n \"pckg_count\": int(package_count),\n }\n except:\n print(\n \"Bioconductor relase page layout changed - update fetch_bioconductor_release_information()\"\n )\n raise\n\n return info\n\n @classmethod\n def bioconductor_relase_information(cls, anysnake):\n \"\"\"Fetch the information, annotate it with a viable minor release,\n and cache the results.\n\n Sideeffect: inside one storeage, R does not get minor releases\n with out a change in Bioconductor Version.\n\n Guess you can overwrite R_version in your configuration file.\n \"\"\"\n import tomlkit\n\n anysnake.paths.update(\n {\n \"storage_bioconductor_release_info\": (\n anysnake.paths[\"storage\"]\n / \"bioconductor_release_info\"\n / anysnake.bioconductor_version\n )\n }\n )\n cache_file = anysnake.paths[\"storage_bioconductor_release_info\"]\n if not cache_file.exists():\n cache_file.parent.mkdir(exist_ok=True, parents=True)\n all_info = cls.fetch_bioconductor_release_information()\n if not anysnake.bioconductor_version in all_info:\n raise ValueError(\n f\"Could not find bioconductor {anysnake.bioconductor_version} - check https://bioconductor.org/about/release-announcements/\"\n )\n info = all_info[anysnake.bioconductor_version]\n major = info[\"r_major_version\"]\n url = anysnake.cran_mirror + \"src/base/R-\" + major[0]\n r = requests.get(url).text\n available = re.findall(\"R-(\" + major + r\"\\.\\d+).tar.gz\", r)\n matching = [x for x in available if x.startswith(major)]\n by_minor = [(re.findall(r\"\\d+.\\d+.(\\d+)\", x), x) for x in matching]\n by_minor.sort()\n chosen = by_minor[-1][1]\n info[\"r_version\"] = chosen\n cache_file.write_text(tomlkit.dumps(info))\n raw = cache_file.read_text()\n return tomlkit.loads(raw)\n\n @classmethod\n def find_r_from_bioconductor(cls, anysnake):\n return cls.bioconductor_relase_information(anysnake)[\"r_version\"]\n\n def check_r_bioconductor_match(self):\n info = self.get_bioconductor_release_information()\n major = info[\"r_major_version\"]\n if not self.anysnake.R_version.startswith(major):\n raise ValueError(\n f\"bioconductor {self.bioconductor_version} requires R {major}.*, but you requested {self.R_version}\"\n )\n\n def ensure(self):\n done_file = self.paths[\"storage_bioconductor\"] / \"done.sentinel\"\n should = self.done_string\n self.paths['project_bioconductor'].mkdir(exist_ok=True, parents=True)\n if not done_file.exists() or done_file.read_text() != should:\n info = self.bioconductor_relase_information(self.anysnake)\n # bioconductor can really only be reliably installed with the CRAN\n # packages against which it was developed\n # arguably, that's an illdefined problem\n # but we'll go with \"should've worked at the release date at least\"\n # for now\n # Microsoft's snapshotted cran mirror to the rescue\n\n mran_url = f\"https://cran.microsoft.com/snapshot/{info['date']}/\"\n\n urls = {\n \"software\": f\"https://bioconductor.org/packages/{self.bioconductor_version}/bioc/\",\n \"annotation\": f\"https://bioconductor.org/packages/{self.bioconductor_version}/data/annotation/\",\n \"experiment\": f\"https://bioconductor.org/packages/{self.bioconductor_version}/data/experiment/\",\n \"cran\": mran_url,\n }\n for k, url in urls.items():\n cache_path = self.paths[\"storage_bioconductor_download\"] / (\n k + \".PACKAGES\"\n )\n if not cache_path.exists():\n cache_path.parent.mkdir(exist_ok=True, parents=True)\n download_file(url + \"src/contrib/PACKAGES\", cache_path)\n\n bash_script = f\"\"\"\n{self.paths['docker_storage_python']}/bin/virtualenv /tmp/venv\nsource /tmp/venv/bin/activate\npip install pypipegraph requests==2.20.0 future-fstrings packaging numpy\nexport PATH=$PATH:/anysnake/cargo/bin\necho \"cargo?\"\necho `which cargo`\npython {self.paths['docker_storage_bioconductor']}/_inside_dockfill_bioconductor.py\n\"\"\"\n env = {\"URL_%s\" % k.upper(): v for (k, v) in urls.items()}\n env[\"BIOCONDUCTOR_VERSION\"] = self.bioconductor_version\n env[\"BIOCONDUCTOR_WHITELIST\"] = \":\".join(self.bioconductor_whitelist)\n env[\"CRAN_MODE\"] = self.cran_mode\n env[\n \"RUSTUP_TOOLCHAIN\"\n ] = \"1.30.0\" # Todo: combine with the one in parser.py\n volumes = {\n self.paths[\"docker_storage_python\"]: self.paths[\"storage_python\"],\n self.paths[\"docker_storage_venv\"]: self.paths[\"storage_venv\"],\n self.paths[\"docker_storage_r\"]: self.paths[\"storage_r\"],\n self.paths[\"docker_storage_bioconductor\"]\n / \"_inside_dockfill_bioconductor.py\": Path(__file__).parent\n / \"_inside_dockfill_bioconductor.py\",\n self.paths[\"docker_storage_bioconductor_download\"]: self.paths[\n \"storage_bioconductor_download\"\n ],\n self.paths[\"docker_storage_bioconductor\"]: self.paths[\n \"storage_bioconductor\"\n ],\n self.paths[\"docker_storage_rustup\"]: self.paths[\"storage_rustup\"],\n self.paths[\"docker_storage_cargo\"]: self.paths[\"storage_cargo\"],\n }\n print(\"calling bioconductor install docker\")\n self.anysnake._run_docker(\n bash_script,\n {\"volumes\": volumes, \"environment\": env},\n \"log_bioconductor\",\n root=True,\n )\n if not self.is_done(self.paths[\"storage_bioconductor\"]):\n print(\n f\"bioconductor install failed, check {self.paths['log_bioconductor']}\"\n )\n else:\n print(\"bioconductor install done\")\n return True\n return False\n\n def freeze(self):\n return {\n \"base\": {\n \"bioconductor_version\": self.bioconductor_version,\n \"bioconductor_whitelist\": self.bioconductor_whitelist,\n \"cran\": self.cran_mode,\n }\n }\n", "id": "9652634", "language": "Python", "matching_score": 3.374704599380493, "max_stars_count": 0, "path": "src/mbf_anysnake/dockfill_bioconductor.py" }, { "content": "# -*- coding: future_fstrings -*-\nfrom .util import combine_volumes, find_storage_path_from_other_machine, download_file\nimport re\nfrom pathlib import Path\n\n\nclass DockFill_Rust:\n def __init__(self, anysnake, rust_versions, cargo_install):\n self.anysnake = anysnake\n self.rust_versions = rust_versions\n for v in rust_versions:\n if v.startswith(\"nigthly\") and not re.match(r\"nigthly-\\d{4}-\\d\\d-\\d\\d\", v):\n raise ValueError(\n \"Rust nigthly versions must be dated e.g. nigthly-2019-03-20\"\n )\n elif v.startswith(\"stable\"):\n raise ValueError(\n \"stable is auto updating - use a definied version (e.g. 1.30.0) instead\"\n )\n self.paths = self.anysnake.paths\n self.paths.update(\n {\n # this does not use the find_storage_path_from_other_machine\n # because rustup will place the binaries in the cargo/bin path\n # but the cargo stuff needs to be per machine because \n # the downloads happen there.\n \"storage_rustup\": self.paths['per_user'] / 'rustup_home', \n \"docker_storage_rustup\": Path(\"/anysnake/rustup_home\"),\n #\"storage_cargo\": self.paths[\"storage\"] / \"rust_cargo\",\n \"storage_cargo\": self.paths[\"per_user\"] / \"rust_cargo\",\n \"docker_storage_cargo\": Path(\"/anysnake/cargo\"),\n \"log_rust\": (self.paths[\"log_storage\"] / f\"anysnake.rust.log\"),\n }\n )\n self.volumes = {\n self.paths[\"docker_storage_rustup\"]: self.paths[\"storage_rustup\"]\n }\n self.rw_volumes = {\n self.paths[\"docker_storage_cargo\"]: self.paths[\"storage_cargo\"]\n }\n self.env = {\n \"RUSTUP_HOME\": self.paths[\"docker_storage_rustup\"],\n \"CARGO_HOME\": self.paths[\"docker_storage_cargo\"],\n \"RUSTUP_TOOLCHAIN\": self.rust_versions[0],\n }\n self.shell_path = str(self.paths[\"docker_storage_cargo\"] / \"bin\")\n\n def pprint(self):\n print(f\" Rust versions={self.rust_versions}\")\n\n def ensure(self):\n self.paths[\"storage_rustup\"].mkdir(exist_ok=True)\n self.paths[\"storage_cargo\"].mkdir(exist_ok=True)\n installed_versions = self.get_installed_rust_versions()\n missing = set(self.rust_versions).difference(installed_versions)\n if missing:\n print(\"installing rust versions \", missing)\n download_file(\n \"https://sh.rustup.rs\", self.paths[\"storage_rustup\"] / \"rustup.sh\"\n )\n env = {\n \"RUSTUP_HOME\": self.paths[\"docker_storage_rustup\"],\n \"CARGO_HOME\": self.paths[\"docker_storage_cargo\"],\n }\n cmd = f\"\"\"\n sudo -E -u {self.anysnake.get_login_username()} sh $RUSTUP_HOME/rustup.sh -y --default-toolchain none\n sudo -E -u {self.anysnake.get_login_username()} mkdir -p $RUSTUP_HOME/anysnake\n export PATH=$PATH:$CARGO_HOME/bin\n echo \"rustup default {self.rust_versions[0]}\"\n sudo -E -u {self.anysnake.get_login_username()} $CARGO_HOME/bin/rustup default {self.rust_versions[0]}\n \"\"\"\n for version in self.rust_versions:\n if not version in installed_versions:\n if version.startswith('nightly'):\n force = '--force'\n else:\n force = ''\n cmd += f\"sudo -E -u {self.anysnake.get_login_username()} $CARGO_HOME/bin/rustup toolchain install {version} {force}&& $CARGO_HOME/bin/cargo && touch $RUSTUP_HOME/anysnake/{version}.done\\n\"\n volumes = {\n self.paths[\"docker_storage_rustup\"]: self.paths[\"storage_rustup\"],\n self.paths[\"docker_storage_cargo\"]: self.paths[\"storage_cargo\"],\n }\n self.anysnake._run_docker(\n cmd, {\"volumes\": volumes, \"environment\": env}, \"log_rust\", root=True\n )\n installed_now = self.get_installed_rust_versions()\n if missing.difference(installed_now):\n raise ValueError(f\"rust install failed, check {self.paths['log_rust']}\")\n else:\n print(\"rust install done\")\n return True\n return False\n\n def get_installed_rust_versions(self):\n result = set()\n p = self.paths[\"storage_rustup\"] / \"anysnake\"\n if p.exists():\n for d in p.glob(\"*.done\"):\n v = d.name[:-5]\n result.add(v)\n return result\n", "id": "6826595", "language": "Python", "matching_score": 1.9769643545150757, "max_stars_count": 0, "path": "src/mbf_anysnake/dockfill_rust.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n Use setup.cfg to configure your project.\n\n This file was generated with PyScaffold 3.1.\n PyScaffold helps you to put up the scaffold of your new Python project.\n Learn more under: https://pyscaffold.org/\n\"\"\"\nimport sys\n\nfrom pkg_resources import require, VersionConflict\nfrom setuptools import setup\n\nfrom setuptools_rust import Binding, RustExtension\n\ntry:\n require(\"setuptools>=38.3\")\nexcept VersionConflict:\n print(\"Error: version of setuptools is too old (<38.3)!\")\n sys.exit(1)\n\n# import subprocess\n# toolchains = subprocess.check_output([\"rustup\", \"toolchain\", \"list\"]).decode('utf-8').split(\"\\n\")\n# nightlys = sorted([x[:x.find(' ') if x.find(' ') != -1 else None] for x in toolchains if x.startswith('nightly')])\n# import os\n# os.environ['RUSTUP_TOOLCHAIN'] = nightlys[0] # use the oldest one...\n# print(\"USING\", os.environ['RUSTUP_TOOLCHAIN'])\n\n\nif __name__ == \"__main__\":\n setup(\n rust_extensions=[\n RustExtension(\"mbf_bam.mbf_bam\", binding=Binding.PyO3, debug=False)\n ],\n )\n", "id": "5221993", "language": "Python", "matching_score": 0.992434024810791, "max_stars_count": 0, "path": "setup.py" }, { "content": "from pkg_resources import get_distribution, DistributionNotFound\n\ntry:\n __version__ = get_distribution(__name__).version\nexcept DistributionNotFound: # pragma: no cover\n # package is not installed\n __version__ = \"unknown\"\n pass\n\ntry: # we need to ignore the import error (module not build) for poetry to be able to determine the version\n from .mbf_bam import *\n from pathlib import Path\n import pypipegraph as ppg\n import pysam\n import tempfile\nexcept ImportError:\n pass\n\n\ndef reheader_and_rename_chromosomes(in_bam_file, out_bam_file, replacements):\n with pysam.Samfile(in_bam_file) as f:\n h = str(f.header)\n org_header = h\n for a, b in replacements.items():\n h = h.replace(f\"SN:{a}\", f\"SN:{b}\")\n if h == org_header:\n raise ValueError(\"No replacement happened\")\n tf = tempfile.NamedTemporaryFile()\n tf.write(h.encode(\"utf-8\"))\n tf.flush()\n out_bam_file.write_text(\"\") # must be there for save_stdout to work..\n pysam.reheader(\n tf.name,\n str(in_bam_file.absolute()),\n save_stdout=str(out_bam_file.absolute()).encode(\"utf-8\"),\n )\n pysam.index(str(out_bam_file))\n\n\ndef job_reheader_and_rename_chromosomes(input_bam_path, output_bam_path, replacements):\n input_path_bam = Path(input_bam_path)\n output_bam_path = Path(output_bam_path)\n\n def do_replace(replacements=replacements):\n reheader_and_rename_chromosomes(input_bam_path, output_bam_path, replacements)\n\n output_bam_path.parent.mkdir(exist_ok=True, parents=True)\n return ppg.MultiFileGeneratingJob(\n [output_bam_path, output_bam_path.with_suffix(\".bam.bai\")], do_replace\n ).depends_on(\n ppg.FileInvariant(input_bam_path),\n ppg.FunctionInvariant(\n \"mbf_bam.reheader_and_rename_chromosomes\", reheader_and_rename_chromosomes\n ),\n )\n\n__version__ = '0.1.8'\n", "id": "3476202", "language": "Python", "matching_score": 2.393953800201416, "max_stars_count": 0, "path": "mbf_bam/__init__.py" }, { "content": "from .anysnake import Anysnake\nfrom .parser import parse_requirements, parsed_to_anysnake\n\nfrom pkg_resources import get_distribution, DistributionNotFound\ntry:\n __version__ = get_distribution(__name__).version\nexcept DistributionNotFound:\n # package is not installed\n pass\n\n__all__ = [Anysnake, parse_requirements, parsed_to_anysnake, __version__]\n", "id": "4109251", "language": "Python", "matching_score": 0.2621033191680908, "max_stars_count": 0, "path": "src/mbf_anysnake/__init__.py" }, { "content": "from .util import clone_repo\n\n\nclass DockFill_Clone:\n \"\"\"Just clone arbitrary repos and do nothing with them\"\"\"\n\n def __init__(self, anysnake):\n self.anysnake = anysnake\n self.paths = self.anysnake.paths\n\n self.paths.update(\n {\n \"storage_clones\": self.paths[\"storage\"] / \"clones\",\n \"code_clones\": self.paths[\"code\"] / \"clones\",\n \"docker_storage_clones\": \"/anysnake/clones\",\n \"docker_code_clones\": \"/anysnake/code_clones\",\n }\n )\n self.volumes = {\n anysnake.paths[\"docker_storage_clones\"]: anysnake.paths[\"storage_clones\"],\n anysnake.paths[\"docker_code_clones\"]: anysnake.paths[\"code_clones\"],\n }\n self.paths['storage_clones'].mkdir(exist_ok=True)\n self.paths['code_clones'].mkdir(exist_ok=True)\n\n def pprint(self):\n print(\" Global cloned repos\")\n for entry in self.anysnake.global_clones.items():\n print(\" {}\".format(entry))\n print(\" Locally cloned repos\")\n for entry in self.anysnake.local_clones.items():\n print(\" {}\".format(entry))\n\n def ensure(self):\n cloned = False\n with (self.paths[\"storage_clones\"] / \"log.txt\").open(\"w\") as log_file:\n for name, source in self.anysnake.global_clones.items():\n cloned |= self.clone(name, source, self.paths[\"storage_clones\"], log_file)\n with (self.paths[\"code_clones\"] / \"log.txt\").open(\"w\") as log_file:\n for name, source in self.anysnake.local_clones.items():\n cloned |= self.clone(name, source, self.paths[\"code_clones\"], log_file)\n return cloned\n\n def clone(self, name, source, target_path, log_file):\n if not (target_path / name).exists():\n clone_repo(source, name, target_path / name, log_file)\n return True\n return False\n", "id": "8047912", "language": "Python", "matching_score": 0.45708709955215454, "max_stars_count": 0, "path": "src/mbf_anysnake/dockfill_clone.py" }, { "content": "from abc import ABC\n\nfrom mbf_fileformats.util import open_file, chunkify\n\n\ndef iter_fasta(filenameOrFileLikeObject, keyFunc=None, block_size=None):\n \"\"\"An iterator over a fasta file (raw or gzipped).\n Yields tupples of key, sequence (bytes!) on each iteration\n \"\"\"\n o = open_file(filenameOrFileLikeObject)\n key = \"\"\n for chunk in chunkify(o, b\"\\n>\", block_size=block_size):\n key = chunk[: chunk.find(b\"\\n\")].strip()\n if key.startswith(b\">\"):\n key = key[1:]\n if keyFunc:\n key = keyFunc(key)\n if chunk.find(b\"\\n\") != -1:\n seq = chunk[chunk.find(b\"\\n\") + 1 :].replace(b\"\\r\", b\"\").replace(b\"\\n\", b\"\")\n else:\n raise ValueError(\"Should not be reached\") # pragma: no cover\n # seq = b\"\"\n yield (key, seq)\n return\n\n\ndef wrappedIterator(width):\n def inner(text):\n i = 0\n length = len(text)\n while i < length:\n yield text[i : i + width]\n i += width\n\n return inner\n\n\nrc_table = str.maketrans(\"agctAGCT\", \"tcgaTCGA\")\niupac_forward = \"ACGTRYMKSWBDHVN\"\niupac_reverse = \"TGCAYRKMSWVHDBN\"\niupac_forward += iupac_forward.lower()\niupac_reverse += iupac_reverse.lower()\niupac_rc_table = str.maketrans(\n iupac_forward + iupac_forward.upper(), iupac_reverse + iupac_reverse.upper()\n)\n\n\ndef reverse_complement(s):\n \"\"\"return complementary, reversed sequence to x (keeping case)\"\"\"\n return s.translate(rc_table)[::-1]\n\n\ndef reverse_complement_iupac(s):\n \"\"\"return complementary, reversed sequence to x (keeping case)\"\"\"\n return s.translate(iupac_rc_table)[::-1]\n\n\nuniversal_genenetic_code = {\n \"ATA\": \"I\",\n \"ATC\": \"I\",\n \"ATT\": \"I\",\n \"ATG\": \"M\",\n \"ACA\": \"T\",\n \"ACC\": \"T\",\n \"ACG\": \"T\",\n \"ACT\": \"T\",\n \"AAC\": \"N\",\n \"AAT\": \"N\",\n \"AAA\": \"K\",\n \"AAG\": \"K\",\n \"AGC\": \"S\",\n \"AGT\": \"S\",\n \"AGA\": \"R\",\n \"AGG\": \"R\",\n \"CTA\": \"L\",\n \"CTC\": \"L\",\n \"CTG\": \"L\",\n \"CTT\": \"L\",\n \"CCA\": \"P\",\n \"CCC\": \"P\",\n \"CCG\": \"P\",\n \"CCT\": \"P\",\n \"CAC\": \"H\",\n \"CAT\": \"H\",\n \"CAA\": \"Q\",\n \"CAG\": \"Q\",\n \"CGA\": \"R\",\n \"CGC\": \"R\",\n \"CGG\": \"R\",\n \"CGT\": \"R\",\n \"GTA\": \"V\",\n \"GTC\": \"V\",\n \"GTG\": \"V\",\n \"GTT\": \"V\",\n \"GCA\": \"A\",\n \"GCC\": \"A\",\n \"GCG\": \"A\",\n \"GCT\": \"A\",\n \"GAC\": \"D\",\n \"GAT\": \"D\",\n \"GAA\": \"E\",\n \"GAG\": \"E\",\n \"GGA\": \"G\",\n \"GGC\": \"G\",\n \"GGG\": \"G\",\n \"GGT\": \"G\",\n \"TCA\": \"S\",\n \"TCC\": \"S\",\n \"TCG\": \"S\",\n \"TCT\": \"S\",\n \"TTC\": \"F\",\n \"TTT\": \"F\",\n \"TTA\": \"L\",\n \"TTG\": \"L\",\n \"TAC\": \"Y\",\n \"TAT\": \"Y\",\n \"TAA\": \"*\",\n \"TAG\": \"*\",\n \"TGC\": \"C\",\n \"TGT\": \"C\",\n \"TGA\": \"*\",\n \"TGG\": \"W\",\n}\n\n\nclass GeneticCode(ABC):\n @classmethod\n def translate_dna(cls, sequence, raise_on_non_multiple_of_three=True):\n if raise_on_non_multiple_of_three and len(sequence) % 3 != 0:\n raise ValueError(\"len(sequence) was not a multiple of 3\")\n genetic_code = cls.genetic_code\n proteinseq = \"\"\n sequence = sequence.upper()\n if sequence[:3] in cls.start_codons:\n proteinseq += \"M\"\n else:\n proteinseq += genetic_code[sequence[:3]]\n for n in range(3, len(sequence), 3):\n proteinseq += genetic_code[sequence[n : n + 3]]\n return proteinseq\n\n @classmethod\n def translate_dna_till_stop(cls, sequence, genetic_code=None):\n genetic_code = cls.genetic_code\n proteinseq = \"\"\n sequence = sequence.upper()\n sequence = sequence.upper()\n if sequence[:3] in cls.start_codons:\n proteinseq += \"M\"\n else:\n proteinseq += genetic_code[sequence[:3]]\n for n in range(3, len(sequence), 3): # pragma: no branch\n try:\n codon = sequence[n : n + 3]\n x = genetic_code[codon]\n proteinseq += x\n if x == \"*\":\n break\n except KeyError:\n if len(codon) < 3:\n raise ValueError(\"No stop codon found\")\n else:\n raise NotImplementedError( # pragma: no cover\n \"Incomplete genetic code?, codon %s not found\" % codon\n )\n return proteinseq\n\n\nclass EukaryoticCode(GeneticCode):\n \"\"\"Genetic code for eukaryotes\"\"\"\n\n genetic_code = universal_genenetic_code\n start_codons = [\"ATG\"]\n\n\nclass ProkaryoticCode(GeneticCode):\n \"\"\"Genetic code for prokaryotes - e.g. E. coli\"\"\"\n\n genetic_code = universal_genenetic_code\n # for e coli, from wikipedia) - 83% AUG (3542/4284), 14% (612) GUG, 3% (103) UUG[7] and one or two others (e.g., an AUU and possibly a CUG\n start_codons = [\"ATG\", \"GTG\", \"TTG\", \" ATT\", \"CTG\"]\n\n\ndef df_to_rows(df, columns_to_choose=None):\n \"\"\"Turn a DataFrame into named tuples\n index -> {columnA: X, columnB: Y}\n You can then use that much faster than\n accessing df.loc[] repeatedly\n right now (2019-03-25) for whatever reason\n \"\"\"\n if columns_to_choose is None: # pragma: no branch\n pass # pragma: no cover\n else:\n df = df[columns_to_choose]\n if df.index.duplicated().any(): # pragma: no cover\n raise ValueError(\"df_to_rows needs a unique index\")\n result = {}\n for row in df.itertuples():\n result[row[0]] = row\n return result\n", "id": "5504645", "language": "Python", "matching_score": 4.171039581298828, "max_stars_count": 0, "path": "src/mbf_genomes/common.py" }, { "content": "import pytest\nfrom mbf_genomes.common import (\n reverse_complement,\n iter_fasta,\n EukaryoticCode,\n reverse_complement_iupac,\n)\nfrom mbf_sampledata import get_sample_data\n\n\ndef test_reverse_complement():\n assert reverse_complement(\"AGTC\") == \"GACT\"\n assert reverse_complement(\"agtc\") == \"gact\"\n assert reverse_complement(\"Agtc\") == \"gacT\"\n assert reverse_complement(\"Ngtc\") == \"gacN\"\n\n\ndef test_iter_fasta():\n fn = get_sample_data(\"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.dna.toplevel.fa.gz\")\n a = list(iter_fasta(fn))\n assert len(a[0][1]) == 159662\n b = list(iter_fasta(fn, block_size=10))\n assert a == b\n\n\ndef test_translate_raises_on_non_multiple_of_three():\n with pytest.raises(ValueError):\n EukaryoticCode.translate_dna(\"AA\")\n\n\ndef test_translate_non_start():\n assert EukaryoticCode.translate_dna(\"TTG\") == \"L\"\n\n\ndef test_translate_till_stop():\n assert EukaryoticCode.translate_dna_till_stop(\"ATGTTGTAATTG\") == \"ML*\"\n\n\ndef test_translate_till_stop_non_start_start():\n assert EukaryoticCode.translate_dna_till_stop(\"TTGTTGTAATTG\") == \"LL*\"\n\n\ndef test_translate_till_stop_not_three():\n with pytest.raises(ValueError):\n EukaryoticCode.translate_dna_till_stop(\"TTGTTGTA\")\n\n\ndef test_reverse_complement_iupac():\n assert reverse_complement_iupac(\"AGTC\") == \"GACT\"\n assert reverse_complement_iupac(\"agtc\") == \"gact\"\n assert reverse_complement_iupac(\"Agtc\") == \"gacT\"\n assert reverse_complement_iupac(\"Ngtc\") == \"gacN\"\n assert reverse_complement_iupac(\"R\") == \"Y\"\n", "id": "4408147", "language": "Python", "matching_score": 3.439364433288574, "max_stars_count": 0, "path": "tests/test_common.py" }, { "content": "import pytest\nfrom pathlib import Path\nimport pypipegraph as ppg\nfrom mbf_genomes import FileBasedGenome, InteractiveFileBasedGenome\nfrom mbf_genomes.common import iter_fasta, ProkaryoticCode\nfrom mbf_externals.util import UpstreamChangedError\nfrom pandas.testing import assert_frame_equal\nfrom mbf_sampledata import get_sample_data\n\n\[email protected](\"new_pipegraph\")\nclass TestFilebased:\n def test_fasta_and_gtf_indexing(self):\n g = FileBasedGenome(\n \"Candidatus_carsonella\",\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.dna.toplevel.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.42.gtf.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.cdna.all.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.pep.all.fa.gz\"\n ),\n )\n g.download_genome()\n g.job_transcripts()\n g.job_genes()\n with pytest.raises(ValueError):\n g.df_transcripts\n with pytest.raises(ValueError):\n g.df_genes\n ppg.run_pipegraph()\n assert g.find_file(\"genome.fasta\").exists()\n assert g.find_prebuild(\"genome.fasta\") == g.genome_fasta_dependencies\n assert g.find_file(\"genome.fasta\").with_suffix(\".fasta.fai\").exists()\n for should_file, actual_file in [\n (\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.dna.toplevel.fa.gz\"\n ),\n g.find_file(\"genome.fasta\"),\n ),\n (\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.cdna.all.fa.gz\"\n ),\n g.find_file(\"cdna.fasta\"),\n ),\n ]:\n should = dict(iter_fasta(should_file))\n should = {k[: k.find(b\" \")]: v for (k, v) in should.items()}\n actual = dict(iter_fasta(actual_file))\n if should != actual:\n assert len(should) == len(actual)\n assert set(should.keys()) == set(actual.keys())\n assert False == should_file # noqa:E712\n tf = g.df_transcripts\n assert \"BAF35033\" in tf.index\n assert not hasattr(g, \"_transcripts\")\n assert tf.loc[\"BAF35033\"].exons == ((1313, 2816),)\n\n gf = g.df_genes\n assert len(gf) == 246\n # transcript_stable_ids is tuples, this genome has only one transcript\n # per gene\n assert set([len(x) for x in gf.transcript_stable_ids]) == set([1])\n\n assert g.find_file(\"pep.fasta\").exists()\n assert g.find_prebuild(\"pep.fasta\") == g.protein_fasta_dependencies\n assert g.find_file(\"pep.fasta\").with_suffix(\".fasta.fai\").exists()\n assert (\n g.get_protein_sequence(\"BAF35037\")\n == \"MFKFINRFLNLKKRYFYIFLINFFYFFNKCNFIKKKKIYKKIITKKFENYLLKLIIQKYAK\"\n )\n\n def test_build_index(self):\n from mbf_externals.aligners.subread import Subread\n\n g = FileBasedGenome(\n \"Candidatus_carsonella\",\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.dna.toplevel.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.42.gtf.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.cdna.all.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.pep.all.fa.gz\"\n ),\n )\n g.download_genome()\n subread = Subread(version=\"1.6.3\")\n index = g.build_index(subread)\n ppg.run_pipegraph()\n assert len(g.df_transcripts) > 0\n assert len(g.get_gtf()) > 0\n assert len(g.df_genes) > 0\n assert len(g.df_proteins) > 0\n assert (Path(index.filenames[0]).parent / \"subread_index.reads\").exists()\n\n def test_cdna_creation(self):\n g = FileBasedGenome(\n \"Candidatus_carsonella\",\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.dna.toplevel.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.42.gtf.gz\"\n ),\n None,\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.pep.all.fa.gz\"\n ),\n )\n g.download_genome()\n g.job_transcripts()\n ppg.run_pipegraph()\n assert g.find_file(\"genome.fasta\").exists()\n assert g.find_file(\"genome.fasta\").with_suffix(\".fasta.fai\").exists()\n tf = g.df_transcripts\n assert \"BAF35033\" in tf.index\n assert tf.loc[\"BAF35033\"].exons == ((1313, 2816),)\n\n should = dict(\n iter_fasta(\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.cdna.all.fa.gz\"\n )\n )\n )\n should = {k[: k.find(b\" \")]: v for (k, v) in should.items()}\n actual = dict(iter_fasta(g.find_file(\"cdna.fasta\")))\n if actual != should:\n assert not set(should.keys()).difference(\n set(actual.keys())\n ) # they are all here, we just have more (tRNA...)\n for k in should:\n assert actual[k] == should[k]\n\n def test_empty_gtf_and_cdna_and_protein(self):\n from mbf_externals.aligners.subread import Subread\n\n g = FileBasedGenome(\n \"Candidatus_carsonella\",\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.dna.toplevel.fa.gz\"\n ),\n None,\n None,\n )\n g.download_genome()\n assert g.gtf_filename is None\n assert g.cdna_fasta_filename is None\n g.job_transcripts()\n g.job_genes()\n g.job_proteins()\n subread = Subread(version=\"1.6.3\")\n index = g.build_index(subread)\n ppg.run_pipegraph()\n assert len(g.df_transcripts) == 0\n assert len(g.get_gtf()) == 0\n assert len(g.df_genes) == 0\n assert len(g.df_proteins) == 0\n assert (Path(index.filenames[0]).parent / \"subread_index.reads\").exists()\n\n def test_protein_creation(self):\n g = FileBasedGenome(\n \"Candidatus_carsonella\",\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.dna.toplevel.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.42.gtf.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.cdna.all.fa.gz\"\n ),\n None,\n ProkaryoticCode(),\n )\n g.download_genome()\n g.job_transcripts()\n ppg.run_pipegraph()\n\n should = dict(\n iter_fasta(\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.pep.all.fa.gz\"\n )\n )\n )\n should = {k[: k.find(b\" \")]: v for (k, v) in should.items()}\n actual = dict(iter_fasta(g.find_file(\"pep.fasta\")))\n if actual != should:\n assert not set(should.keys()).difference(\n set(actual.keys())\n ) # they are all here, we just have more (tRNA...)\n for k in should:\n if actual[k] != should[k]:\n print(k)\n print(len(actual[k]))\n print(len(should[k]))\n\n print(actual[k])\n print(should[k])\n # print(g.get_cds_sequence(k.decode('utf-8')))\n # else:\n # print('ok', k)\n # assert actual[k] == should[k]\n assert False\n\n def test_job_creating_fasta(self, new_pipegraph):\n new_pipegraph.quiet = False\n\n def gen_fasta():\n import shutil\n\n shutil.copy(\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.dna.toplevel.fa.gz\"\n ),\n \"shu.fasta.gz\",\n )\n\n fasta_job = ppg.FileGeneratingJob(\"shu.fasta.gz\", gen_fasta)\n g = FileBasedGenome(\n \"Candidatus_carsonella\",\n fasta_job,\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.42.gtf.gz\"\n ),\n None,\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.pep.all.fa.gz\"\n ),\n )\n g.download_genome()\n ppg.run_pipegraph()\n assert (\n g.get_cdna_sequence(\"BAF35032\")\n == \"ATGAATACTATATTTTCAAGAATAACACCATTAGGAAATGGTACGTTATGTGTTATAAGAAT\"\n \"TTCTGGAAAAAATGTAAAATTTTTAATACAAAAAATTGTAAAAAAAAATATAAAAGAAAAAATAG\"\n \"CTACTTTTTCTAAATTATTTTTAGATAAAGAATGTGTAGATTATGCAATGATTATTTTTTTTAAA\"\n \"AAACCAAATACGTTCACTGGAGAAGATATAATCGAATTTCATATTCACAATAATGAAACTATTGT\"\n \"AAAAAAAATAATTAATTATTTATTATTAAATAAAGCAAGATTTGCAAAAGCTGGCGAATTTTTAG\"\n \"AAAGACGATATTTAAATGGAAAAATTTCTTTAATAGAATGCGAATTAATAAATAATAAAATTTTA\"\n \"TATGATAATGAAAATATGTTTCAATTAACAAAAAATTCTGAAAAAAAAATATTTTTATGTATAAT\"\n \"TAAAAATTTAAAATTTAAAATAAATTCTTTAATAATTTGTATTGAAATCGCAAATTTTAATTTTA\"\n \"GTTTTTTTTTTTTTAATGATTTTTTATTTATAAAATATACATTTAAAAAACTATTAAAACTTTTA\"\n \"AAAATATTAATTGATAAAATAACTGTTATAAATTATTTAAAAAAGAATTTCACAATAATGATATT\"\n \"AGGTAGAAGAAATGTAGGAAAGTCTACTTTATTTAATAAAATATGTGCACAATATGACTCGATTG\"\n \"TAACTAATATTCCTGGTACTACAAAAAATATTATATCAAAAAAAATAAAAATTTTATCTAAAAAA\"\n \"ATAAAAATGATGGATACAGCAGGATTAAAAATTAGAACTAAAAATTTAATTGAAAAAATTGGAAT\"\n \"TATTAAAAATATAAATAAAATTTATCAAGGAAATTTAATTTTGTATATGATTGATAAATTTAATA\"\n \"TTAAAAATATATTTTTTAACATTCCAATAGATTTTATTGATAAAATTAAATTAAATGAATTAATA\"\n \"ATTTTAGTTAACAAATCAGATATTTTAGGAAAAGAAGAAGGAGTTTTTAAAATAAAAAATATATT\"\n \"AATAATTTTAATTTCTTCTAAAAATGGAACTTTTATAAAAAATTTAAAATGTTTTATTAATAAAA\"\n \"TCGTTGATAATAAAGATTTTTCTAAAAATAATTATTCTGATGTTAAAATTCTATTTAATAAATTT\"\n \"TCTTTTTTTTATAAAGAATTTTCATGTAACTATGATTTAGTGTTATCAAAATTAATTGATTTTCA\"\n \"AAAAAATATATTTAAATTAACAGGAAATTTTACTAATAAAAAAATAATAAATTCTTGTTTTAGAA\"\n \"ATTTTTGTATTGGTAAATGA\"\n )\n\n def test_multiple_fasta_files(self, new_pipegraph):\n import tempfile\n\n tf = tempfile.NamedTemporaryFile(suffix=\".fasta\")\n tf.write(b\">Extra\\nAGTC\")\n tf.flush()\n g = FileBasedGenome(\n \"Candidatus_carsonella\",\n [\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.dna.toplevel.fa.gz\"\n ),\n tf.name,\n ],\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.42.gtf.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.cdna.all.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.pep.all.fa.gz\"\n ),\n )\n g.download_genome()\n ppg.run_pipegraph()\n assert g.get_genome_sequence(\"Extra\", 0, 4) == \"AGTC\"\n assert g.get_chromosome_lengths() == {\"Extra\": 4, \"Chromosome\": 159662}\n\n # test that changing the fasta leads to an explosion\n new_pipegraph.new_pipegraph()\n tf.seek(0, 0)\n tf.write(b\">Extra\\nAGTCA\")\n tf.flush()\n g = FileBasedGenome(\n \"Candidatus_carsonella\",\n [\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.dna.toplevel.fa.gz\"\n ),\n tf.name,\n ],\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.42.gtf.gz\"\n ),\n None,\n )\n g.download_genome()\n with pytest.raises(UpstreamChangedError):\n ppg.run_pipegraph()\n\n def test_get_gtf_using_additional_gtf(self):\n g = FileBasedGenome(\n \"Candidatus_carsonella\",\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.dna.toplevel.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.42.gtf.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.cdna.all.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.pep.all.fa.gz\"\n ),\n )\n g.get_additional_gene_gtfs = lambda: [\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.42.additional.gtf.gz\"\n )\n ]\n g.download_genome()\n j = g.job_genes()\n for x in j.prerequisites:\n if hasattr(x, \"filenames\"):\n print(x, x.filenames)\n if (\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.42.additional.gtf.gz\"\n )\n ) in x.filenames:\n break\n else:\n assert False # wrong preqs\n ppg.run_pipegraph()\n assert \"TEST1_001\" in g.df_genes.index\n\n def test_genes_unique_check(self):\n g = FileBasedGenome(\n \"Candidatus_carsonella\",\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.dna.toplevel.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.42.gtf.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.cdna.all.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.pep.all.fa.gz\"\n ),\n )\n g.get_additional_gene_gtfs = lambda: [\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.42.gtf.gz\"\n )\n ]\n g.download_genome()\n job = g.job_genes()\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"gene_stable_ids were not unique\" in str(job.exception)\n\n def test_transcripts_unique_check(self):\n g = FileBasedGenome(\n \"Candidatus_carsonella\",\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.dna.toplevel.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.42.gtf.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.cdna.all.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.pep.all.fa.gz\"\n ),\n )\n g.get_additional_gene_gtfs = lambda: [\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.42.more_transcripts.gtf.gz\"\n )\n ]\n g.download_genome()\n job = g.job_transcripts()\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"transcript_stable_ids were not unique\" in str(job.exception)\n\n def test_genes_wrong_start_stop_order(self):\n g = FileBasedGenome(\n \"Candidatus_carsonella\",\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.dna.toplevel.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.42.broken.gtf.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.cdna.all.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.pep.all.fa.gz\"\n ),\n )\n job = g.job_genes()\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"start > stop\" in str(job.exception)\n\n def test_transcript_wrong_order(self):\n g = FileBasedGenome(\n \"Candidatus_carsonella\",\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.dna.toplevel.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.transcript_wrong_order.gtf.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.cdna.all.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.pep.all.fa.gz\"\n ),\n )\n job = g.job_transcripts()\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"start > stop\" in str(job.exception)\n\n def test_transcript_exon_outside_transcript(self):\n g = FileBasedGenome(\n \"Candidatus_carsonella\",\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.dna.toplevel.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.transcript_exon_outside.gtf.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.cdna.all.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.pep.all.fa.gz\"\n ),\n )\n job = g.job_transcripts()\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"Exon outside of transcript\" in str(job.exception)\n assert isinstance(job.exception, ValueError)\n\n def test_transcript_transcript_outside_gene(self):\n g = FileBasedGenome(\n \"Candidatus_carsonella\",\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.dna.toplevel.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.transcript_outside_gene.gtf.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.cdna.all.fa.gz\"\n ),\n get_sample_data(\n \"mbf_genomes/Candidatus_carsonella_ruddii_pv.ASM1036v1.pep.all.fa.gz\"\n ),\n )\n job = g.job_transcripts()\n with pytest.raises(ppg.RuntimeError):\n ppg.run_pipegraph()\n assert \"Transcript outside of gene\" in str(job.exception)\n assert isinstance(job.exception, ValueError)\n\n def test_example_genome_and_interactive(self, new_pipegraph):\n from mbf_sampledata import get_Candidatus_carsonella_ruddii_pv\n\n g = get_Candidatus_carsonella_ruddii_pv()\n g.download_genome()\n g.job_genes()\n ppg.run_pipegraph()\n assert g.get_chromosome_lengths()\n new_pipegraph.new_pipegraph()\n Path(g.cache_dir, \"lookup\", \"df_genes.msgpack\").unlink() # so we rerun this\n ia = InteractiveFileBasedGenome(\n \"shu\",\n g.find_file(\"genome.fasta\"),\n g.find_file(\"cdna.fasta\"),\n g.find_file(\"pep.fasta\"),\n g.find_file(\"genes.gtf\"),\n g.cache_dir,\n )\n assert ia.get_chromosome_lengths() == g.get_chromosome_lengths()\n ia.job_genes()\n ppg.run_pipegraph()\n assert_frame_equal(ia.df_genes, g.df_genes)\n ppg.util.global_pipegraph = None\n Path(g.cache_dir, \"lookup\", \"df_genes.msgpack\").unlink() # so we rerun this\n ia2 = InteractiveFileBasedGenome(\n \"shu\",\n g.find_file(\"genome.fasta\"),\n g.find_file(\"cdna.fasta\"),\n g.find_file(\"pep.fasta\"),\n g.find_file(\"genes.gtf\"),\n g.cache_dir,\n )\n ia.job_genes()\n assert ia2.get_chromosome_lengths() == g.get_chromosome_lengths()\n assert_frame_equal(ia2.df_genes, g.df_genes)\n\n def test_get_true_chromosomes(self):\n from mbf_sampledata import get_Candidatus_carsonella_ruddii_pv\n\n g = get_Candidatus_carsonella_ruddii_pv()\n ppg.run_pipegraph()\n assert set(g.get_chromosome_lengths()) == set(g.get_true_chromosomes())\n", "id": "1375635", "language": "Python", "matching_score": 3.8427295684814453, "max_stars_count": 0, "path": "tests/test_filebased.py" }, { "content": "import pypipegraph as ppg\nfrom pathlib import Path\nfrom mbf_sampledata import get_Candidatus_carsonella_ruddii_pv\nfrom mbf_genomes import InteractiveFileBasedGenome\nfrom mbf_genomes import HardCodedGenome # noqa: F401\nfrom pypipegraph.testing import ( # noqa: F401\n RaisesDirectOrInsidePipegraph,\n run_pipegraph,\n force_load,\n)\nfrom mbf_genomics.testing import MockGenome # noqa: F401\n\nppg_genome = None\n\n\ndef get_genome(name='get_genome_genome'):\n global ppg_genome\n cache_dir = Path(__file__).parent / \"run\" / \"genome_cache\"\n if ppg_genome is None or ppg_genome.name != name:\n old_pipegraph = ppg.util.global_pipegraph\n ppg.new_pipegraph()\n g = get_Candidatus_carsonella_ruddii_pv(\n name, cache_dir=cache_dir # , ignore_code_changes=True\n )\n g.download_genome()\n # g.job_genes()\n # g.job_transcripts()\n ppg_genome = g\n ppg.run_pipegraph()\n ppg.util.global_pipegraph = old_pipegraph\n return InteractiveFileBasedGenome(\n name,\n ppg_genome._filename_lookups[\"genome.fasta\"],\n ppg_genome._filename_lookups[\"cdna.fasta\"],\n ppg_genome._filename_lookups[\"proteins.fasta\"],\n ppg_genome._filename_lookups[\"genes.gtf\"],\n ppg_genome.cache_dir,\n )\n\n\ndef get_genome_chr_length(chr_lengths=None, genome=None):\n if not genome:\n genome = get_genome()\n # raise ValueError(\"pass in a genome\")\n if chr_lengths is None:\n chr_lengths = {\n \"1\": 100_000,\n \"2\": 200_000,\n \"3\": 300_000,\n \"4\": 400_000,\n \"5\": 500_000,\n }\n if not isinstance(chr_lengths, dict):\n raise TypeError()\n genome = genome\n genome.get_chromosome_lengths = lambda: chr_lengths\n return genome\n\n\ndef inside_ppg():\n return ppg.util.inside_ppg()\n", "id": "135610", "language": "Python", "matching_score": 1.989424467086792, "max_stars_count": 0, "path": "tests/shared.py" }, { "content": "from pathlib import Path\nfrom .base import GenomeBase, HardCodedGenome\nfrom .ensembl import EnsemblGenome\nfrom .filebased import FileBasedGenome, InteractiveFileBasedGenome\n\ndata_path = Path(__file__).parent.parent.parent / \"data\"\n__version__ = '0.4'\n\ndef Homo_sapiens(rev):\n return EnsemblGenome('Homo_sapiens', rev)\ndef Mus_musculus(rev):\n return EnsemblGenome('Mus_musculus', rev)\n\n\n__all__ = [\n \"GenomeBase\",\n \"EnsemblGenome\",\n \"FileBasedGenome\",\n \"InteractiveFileBasedGenome\",\n \"data_path\",\n \"HardCodedGenome\",\n \"Homo_sapiens\",\n \"Mus_musculus\",\n]\n", "id": "9669195", "language": "Python", "matching_score": 1.6689643859863281, "max_stars_count": 0, "path": "src/mbf_genomes/__init__.py" }, { "content": "from . import ddf\n\n__all__ = [ddf]\n\n__version__ = '0.2'", "id": "3399994", "language": "Python", "matching_score": 0.22233429551124573, "max_stars_count": 0, "path": "src/mbf_heatmap/__init__.py" } ]
2.588572
Matgen-project
[ { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 23 20:02:39 2018\r\n\r\n@author: <NAME>\r\n\"\"\"\r\nPotentialDict = {'PotLDA': {'H': 'H.LDA.UPF',\r\n 'Li': 'Li.LDA.UPF',\r\n 'Be': 'Be.LDA.UPF',\r\n 'B': 'B.LDA.UPF',\r\n 'C': 'C.pz-vbc.UPF',\r\n 'N': 'N.LDA.UPF',\r\n 'O': 'O.LDA.UPF',\r\n 'F': 'F.LDA.UPF',\r\n 'Na': 'Na.LDA.UPF',\r\n 'Mg': 'Mg.LDA.UPF',\r\n 'Al': 'Al.pz-vbc.UPF',\r\n 'Si': 'Si.LDA.UPF',\r\n 'P': 'P.LDA.UPF',\r\n 'S': 'S.LDA.UPF',\r\n 'Cl': 'Cl.pz-bhs.UPF',\r\n 'K': 'K.LDA.UPF',\r\n 'Ca': 'Ca.pz-n-vbc.UPF',\r\n 'Ti': 'Ti.LDA.UPF',\r\n 'Mn': 'Mn.LDA.UPF',\r\n 'Fe': 'Fe.LDA.UPF',\r\n 'Cu': 'Cu.LDA.UPF',\r\n 'Zn': 'Zn.LDA.UPF',\r\n 'Ga': 'Ga.LDA.UPF',\r\n 'Ge': 'Ge.pz-bhs.UPF',\r\n 'As': 'As.pz-bhs.UPF',\r\n 'Se': 'Se.pz-bhs.UPF',\r\n 'Br': 'Br.LDA.UPF',\r\n 'In': 'In.pz-bhs.UPF',\r\n 'Sn': 'Sn.pz-bhs.UPF',\r\n 'I': 'I.LDA.UPF'},\r\n 'PotPBE': {'H': 'H.PBE.UPF',\r\n 'Li': 'Li.PBE.UPF',\r\n 'Be': 'Be.PBE.UPF',\r\n 'B': 'B.PBE.UPF',\r\n 'C': 'C.PBE.UPF',\r\n 'N': 'N.PBE.UPF',\r\n 'O': 'O.PBE.UPF',\r\n 'F': 'F.PBE.UPF',\r\n 'Na': 'Na.PBE.UPF',\r\n 'Mg': 'Mg.PBE.UPF',\r\n 'Al': 'Al.PBE.UPF',\r\n 'Si': 'Si.PBE.UPF',\r\n 'P': 'P.PBE.UPF',\r\n 'S': 'S.PBE.UPF',\r\n 'Cl': 'Cl.PBE.UPF',\r\n 'K': 'K.PBE.UPF',\r\n 'Ti': 'Ti.PBE.UPF',\r\n 'Mn': 'Mn.PBE.UPF',\r\n 'Fe': 'Fe.PBE.UPF',\r\n 'Cu': 'Cu.PBE.UPF',\r\n 'Zn': 'Zn.pbe.UPF',\r\n 'Ga': 'Ga.PBE.UPF',\r\n 'Br': 'Br.PBE.UPF',\r\n 'I': 'I.PBE.UPF'},\r\n 'PotSG15': {'Ag': 'Ag_ONCV_PBE-1.0.upf',\r\n 'Al': 'Al_ONCV_PBE-1.0.upf',\r\n 'Ar': 'Ar_ONCV_PBE-1.0.upf',\r\n 'As': 'As_ONCV_PBE-1.0.upf',\r\n 'Au': 'Au_ONCV_PBE-1.0.upf',\r\n 'B': 'B_ONCV_PBE-1.0.upf',\r\n 'Ba': 'Ba_ONCV_PBE-1.0.upf',\r\n 'Be': 'Be_ONCV_PBE-1.0.upf',\r\n 'Bi': 'Bi_ONCV_PBE-1.0.upf',\r\n 'Br': 'Br_ONCV_PBE-1.0.upf',\r\n 'C': 'C_ONCV_PBE-1.0.upf',\r\n 'Ca': 'Ca_ONCV_PBE-1.0.upf',\r\n 'Cd': 'Cd_ONCV_PBE-1.0.upf',\r\n 'Cl': 'Cl_ONCV_PBE-1.0.upf',\r\n 'Co': 'Co_ONCV_PBE-1.0.upf',\r\n 'Cr': 'Cr_ONCV_PBE-1.0.upf',\r\n 'Cs': 'Cs_ONCV_PBE-1.0.upf',\r\n 'Cu': 'Cu_ONCV_PBE-1.0.upf',\r\n 'F': 'F_ONCV_PBE-1.0.upf',\r\n 'Fe': 'Fe_ONCV_PBE-1.0.upf',\r\n 'Ga': 'Ga_ONCV_PBE-1.0.upf',\r\n 'Ge': 'Ge_ONCV_PBE-1.0.upf',\r\n 'H': 'H_ONCV_PBE-1.0.upf',\r\n 'He': 'He_ONCV_PBE-1.0.upf',\r\n 'Hf': 'Hf_ONCV_PBE-1.0.upf',\r\n 'Hg': 'Hg_ONCV_PBE-1.0.upf',\r\n 'I': 'I_ONCV_PBE-1.0.upf',\r\n 'In': 'In_ONCV_PBE-1.0.upf',\r\n 'Ir': 'Ir_ONCV_PBE-1.0.upf',\r\n 'K': 'K_ONCV_PBE-1.0.upf',\r\n 'Kr': 'Kr_ONCV_PBE-1.0.upf',\r\n 'La': 'La_ONCV_PBE-1.0.upf',\r\n 'Li': 'Li_ONCV_PBE-1.0.upf',\r\n 'Mg': 'Mg_ONCV_PBE-1.0.upf',\r\n 'Mn': 'Mn_ONCV_PBE-1.0.upf',\r\n 'Mo': 'Mo_ONCV_PBE-1.0.upf',\r\n 'N': 'N_ONCV_PBE-1.0.upf',\r\n 'Na': 'Na_ONCV_PBE-1.0.upf',\r\n 'Nb': 'Nb_ONCV_PBE-1.0.upf',\r\n 'Ne': 'Ne_ONCV_PBE-1.0.upf',\r\n 'Ni': 'Ni_ONCV_PBE-1.0.upf',\r\n 'O': 'O_ONCV_PBE-1.0.upf',\r\n 'Os': 'Os_ONCV_PBE-1.0.upf',\r\n 'P': 'P_ONCV_PBE-1.0.upf',\r\n 'Pb': 'Pb_ONCV_PBE-1.0.upf',\r\n 'Pd': 'Pd_ONCV_PBE-1.0.upf',\r\n 'Pt': 'Pt_ONCV_PBE-1.0.upf',\r\n 'Rb': 'Rb_ONCV_PBE-1.0.upf',\r\n 'Re': 'Re_ONCV_PBE-1.0.upf',\r\n 'Rh': 'Rh_ONCV_PBE-1.0.upf',\r\n 'Ru': 'Ru_ONCV_PBE-1.0.upf',\r\n 'S': 'S_ONCV_PBE-1.0.upf',\r\n 'Sb': 'Sb_ONCV_PBE-1.0.upf',\r\n 'Sc': 'Sc_ONCV_PBE-1.0.upf',\r\n 'Se': 'Se_ONCV_PBE-1.0.upf',\r\n 'Si': 'Si_ONCV_PBE-1.0.upf',\r\n 'Sn': 'Sn_ONCV_PBE-1.0.upf',\r\n 'Sr': 'Sr_ONCV_PBE-1.0.upf',\r\n 'Ta': 'Ta_ONCV_PBE-1.0.upf',\r\n 'Tc': 'Tc_ONCV_PBE-1.0.upf',\r\n 'Te': 'Te_ONCV_PBE-1.0.upf',\r\n 'Ti': 'Ti_ONCV_PBE-1.0.upf',\r\n 'Tl': 'Tl_ONCV_PBE-1.0.upf',\r\n 'V': 'V_ONCV_PBE-1.0.upf',\r\n 'W': 'W_ONCV_PBE-1.0.upf',\r\n 'Xe': 'Xe_ONCV_PBE-1.0.upf',\r\n 'Y': 'Y_ONCV_PBE-1.0.upf',\r\n 'Zn': 'Zn_ONCV_PBE-1.0.upf',\r\n 'Zr': 'Zr_ONCV_PBE-1.0.upf'}}\r\n", "id": "899672", "language": "Python", "matching_score": 3.2658286094665527, "max_stars_count": 0, "path": "abacus/potential.py" }, { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 23 20:02:39 2018\r\n\r\n@author: <NAME>\r\n\"\"\"\r\n\r\n\"\"\"\r\nPotential of LDA\r\n\"\"\"\r\npotential_lda = {'H':'H.LDA.UPF',\r\n 'Li':'Li.LDA.UPF',\r\n 'Be':'Be.LDA.UPF',\r\n 'B':'B.LDA.UPF',\r\n 'C':'C.pz-vbc.UPF',\r\n 'N':'N.LDA.UPF',\r\n 'O':'O.LDA.UPF',\r\n 'F':'F.LDA.UPF',\r\n 'Na':'Na.LDA.UPF',\r\n 'Mg':'Mg.LDA.UPF',\r\n 'Al':'Al.pz-vbc.UPF',\r\n 'Si':'Si.LDA.UPF',\r\n 'P':'P.LDA.UPF',\r\n 'S':'S.LDA.UPF',\r\n 'Cl':'Cl.pz-bhs.UPF',\r\n 'K':'K.LDA.UPF',\r\n 'Ca':'Ca.pz-n-vbc.UPF',\r\n 'Ti':'Ti.LDA.UPF',\r\n 'Mn':'Mn.LDA.UPF',\r\n 'Fe':'Fe.LDA.UPF',\r\n 'Cu':'Cu.LDA.UPF',\r\n 'Zn':'Zn.LDA.UPF',\r\n 'Ga':'Ga.LDA.UPF',\r\n 'Ge':'Ge.pz-bhs.UPF',\r\n 'As':'As.pz-bhs.UPF',\r\n 'Se':'Se.pz-bhs.UPF',\r\n 'Br':'Br.LDA.UPF',\r\n 'In':'In.pz-bhs.UPF',\r\n 'Sn':'Sn.pz-bhs.UPF',\r\n 'I':'I.LDA.UPF'}\r\n\r\n\"\"\"\r\nPotential of PBE\r\n\"\"\"\r\npotential_pbe = {'H':'H.PBE.UPF',\r\n 'Li':'Li.PBE.UPF',\r\n 'Be':'Be.PBE.UPF',\r\n 'B':'B.PBE.UPF',\r\n 'C':'C.PBE.UPF',\r\n 'N':'N.PBE.UPF',\r\n 'O':'O.PBE.UPF',\r\n 'F':'F.PBE.UPF',\r\n 'Na':'Na.PBE.UPF',\r\n 'Mg':'Mg.PBE.UPF',\r\n 'Al':'Al.PBE.UPF',\r\n 'Si':'Si.PBE.UPF',\r\n 'P':'P.PBE.UPF',\r\n 'S':'S.PBE.UPF',\r\n 'Cl':'Cl.PBE.UPF',\r\n 'K':'K.PBE.UPF',\r\n 'Ti':'Ti.PBE.UPF',\r\n 'Mn':'Mn.PBE.UPF',\r\n 'Fe':'Fe.PBE.UPF',\r\n 'Cu':'Cu.PBE.UPF',\r\n 'Zn':'Zn.pbe.UPF',\r\n 'Ga':'Ga.PBE.UPF',\r\n 'Br':'Br.PBE.UPF',\r\n 'I':'I.PBE.UPF'}\r\n\r\n\"\"\"\r\nPotential of SG15\r\n\"\"\"\r\npotential_pbe_sg15={'Ag':'Ag_ONCV_PBE-1.0.upf',\r\n 'Al':'Al_ONCV_PBE-1.0.upf',\r\n 'Ar':'Ar_ONCV_PBE-1.0.upf',\r\n 'As':'As_ONCV_PBE-1.0.upf',\r\n 'Au':'Au_ONCV_PBE-1.0.upf',\r\n 'B':'B_ONCV_PBE-1.0.upf',\r\n 'Ba':'Ba_ONCV_PBE-1.0.upf',\r\n 'Be':'Be_ONCV_PBE-1.0.upf',\r\n 'Bi':'Bi_ONCV_PBE-1.0.upf',\r\n 'Br':'Br_ONCV_PBE-1.0.upf',\r\n 'C':'C_ONCV_PBE-1.0.upf',\r\n 'Ca':'Ca_ONCV_PBE-1.0.upf',\r\n 'Cd':'Cd_ONCV_PBE-1.0.upf',\r\n 'Cl':'Cl_ONCV_PBE-1.0.upf',\r\n 'Co':'Co_ONCV_PBE-1.0.upf',\r\n 'Cr':'Cr_ONCV_PBE-1.0.upf',\r\n 'Cs':'Cs_ONCV_PBE-1.0.upf',\r\n 'Cu':'Cu_ONCV_PBE-1.0.upf',\r\n 'F':'F_ONCV_PBE-1.0.upf',\r\n 'Fe':'Fe_ONCV_PBE-1.0.upf',\r\n 'Ga':'Ga_ONCV_PBE-1.0.upf',\r\n 'Ge':'Ge_ONCV_PBE-1.0.upf',\r\n 'H':'H_ONCV_PBE-1.0.upf',\r\n 'He':'He_ONCV_PBE-1.0.upf',\r\n 'Hf':'Hf_ONCV_PBE-1.0.upf',\r\n 'Hg':'Hg_ONCV_PBE-1.0.upf',\r\n 'I':'I_ONCV_PBE-1.0.upf',\r\n 'In':'In_ONCV_PBE-1.0.upf',\r\n 'Ir':'Ir_ONCV_PBE-1.0.upf',\r\n 'K':'K_ONCV_PBE-1.0.upf',\r\n 'Kr':'Kr_ONCV_PBE-1.0.upf',\r\n 'La':'La_ONCV_PBE-1.0.upf',\r\n 'Li':'Li_ONCV_PBE-1.0.upf',\r\n 'Mg':'Mg_ONCV_PBE-1.0.upf',\r\n 'Mn':'Mn_ONCV_PBE-1.0.upf',\r\n 'Mo':'Mo_ONCV_PBE-1.0.upf',\r\n 'N':'N_ONCV_PBE-1.0.upf',\r\n 'Na':'Na_ONCV_PBE-1.0.upf',\r\n 'Nb':'Nb_ONCV_PBE-1.0.upf',\r\n 'Ne':'Ne_ONCV_PBE-1.0.upf',\r\n 'Ni':'Ni_ONCV_PBE-1.0.upf',\r\n 'O':'O_ONCV_PBE-1.0.upf',\r\n 'Os':'Os_ONCV_PBE-1.0.upf',\r\n 'P':'P_ONCV_PBE-1.0.upf',\r\n 'Pb':'Pb_ONCV_PBE-1.0.upf',\r\n 'Pd':'Pd_ONCV_PBE-1.0.upf',\r\n 'Pt':'Pt_ONCV_PBE-1.0.upf',\r\n 'Rb':'Rb_ONCV_PBE-1.0.upf',\r\n 'Re':'Re_ONCV_PBE-1.0.upf',\r\n 'Rh':'Rh_ONCV_PBE-1.0.upf',\r\n 'Ru':'Ru_ONCV_PBE-1.0.upf',\r\n 'S':'S_ONCV_PBE-1.0.upf',\r\n 'Sb':'Sb_ONCV_PBE-1.0.upf',\r\n 'Sc':'Sc_ONCV_PBE-1.0.upf',\r\n 'Se':'Se_ONCV_PBE-1.0.upf',\r\n 'Si':'Si_ONCV_PBE-1.0.upf',\r\n 'Sn':'Sn_ONCV_PBE-1.0.upf',\r\n 'Sr':'Sr_ONCV_PBE-1.0.upf',\r\n 'Ta':'Ta_ONCV_PBE-1.0.upf',\r\n 'Tc':'Tc_ONCV_PBE-1.0.upf',\r\n 'Te':'Te_ONCV_PBE-1.0.upf',\r\n 'Ti':'Ti_ONCV_PBE-1.0.upf',\r\n 'Tl':'Tl_ONCV_PBE-1.0.upf',\r\n 'V':'V_ONCV_PBE-1.0.upf',\r\n 'W':'W_ONCV_PBE-1.0.upf',\r\n 'Xe':'Xe_ONCV_PBE-1.0.upf',\r\n 'Y':'Y_ONCV_PBE-1.0.upf',\r\n 'Zn':'Zn_ONCV_PBE-1.0.upf',\r\n 'Zr':'Zr_ONCV_PBE-1.0.upf'}\r\n\r\n", "id": "1472571", "language": "Python", "matching_score": 0.2459140419960022, "max_stars_count": 0, "path": "abacus/potential_set.py" }, { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 23 20:07:08 2018\r\n\r\n@author: <NAME>\r\n\"\"\"\r\nBasisDict = {'LDAmin': {'H': 'H_lda_5.0au_50Ry_2s1p',\r\n 'Li': 'Li_lda_8.0au_50Ry_2s2p1d',\r\n 'Be': 'Be_lda_8.0au_50Ry_2s2p1d',\r\n 'B': 'B_lda_7.0au_50Ry_2s2p1d',\r\n 'C': 'C_pz-vbc_6.0au_50Ry_2s2d1p',\r\n 'N': 'N_lda_5.0au_50Ry_2s2p1d',\r\n 'O': 'O_lda_5.0au_50Ry_2s2p1d',\r\n 'F': 'F_lda_5.0au_50Ry_2s2p1d',\r\n 'Na': 'Na_lda_9.0au_50Ry_2s2p1d',\r\n 'Mg': 'Mg_lda_8.0au_50Ry_2s2p1d',\r\n 'Al': 'Al_pz-vbc_9.0au_50Ry_2s2d1p',\r\n 'Si': 'Si_lda_8.0au_50Ry_2s2p1d',\r\n 'P': 'P_lda_7.0au_50Ry_2s2p1d',\r\n 'S': 'S_lda_6.0au_50Ry_2s2p1d',\r\n 'Cl': 'Cl_pz-bhs_6.0au_50Ry_2s2d1p',\r\n 'K': 'K_lda_10.0au_50Ry_2s2p1d',\r\n 'Ca': 'Ca_pz-n-vbc_10.0au_50Ry_2s2p1d',\r\n 'Ti': 'Ti_lda_8.0au_100Ry_2s2p2d',\r\n 'Mn': 'Mn_lda_7.0au_100Ry_2s2p2d',\r\n 'Fe': 'Fe_lda_7.0au_100Ry_2s2p2d',\r\n 'Cu': 'Cu_lda_6.0au_100Ry_2s2p2d',\r\n 'Zn': 'Zn_lda_6.0au_120Ry_2s2p2d',\r\n 'Ga': 'Ga_lda_7.0au_100Ry_2s2p2d',\r\n 'Ge': 'Ge_pz-bhs_8.0au_50Ry_2s2d1p',\r\n 'As': 'As_pz-bhs_7.0au_50Ry_2s2p1d',\r\n 'Se': 'Se_pz-bhs_7.0au_50Ry_2s2p1d',\r\n 'Br': 'Br_lda_6.0au_50Ry_2s2p1d',\r\n 'In': 'In_pz-bhs_8.0au_50Ry_2s2p1d',\r\n 'Sn': 'Sn_pz-bhs_8.0au_16Ry_2s2p1d',\r\n 'I': 'I_lda_6.0au_50Ry_2s2p2d'},\r\n 'LDAmid': {'H': 'H_lda_6.0au_50Ry_2s1p',\r\n 'Li': 'Li_lda_10.0au_50Ry_2s2p1d',\r\n 'Be': 'Be_lda_9.0au_50Ry_2s2p1d',\r\n 'B': 'B_lda_8.0au_50Ry_2s2p1d',\r\n 'C': 'C_pz-vbc_7.0au_50Ry_2s2d1p',\r\n 'N': 'N_lda_6.0au_50Ry_2s2p1d',\r\n 'O': 'O_lda_6.0au_50Ry_2s2p1d',\r\n 'F': 'F_lda_6.0au_50Ry_2s2p1d',\r\n 'Na': 'Na_lda_10.0au_50Ry_2s2p1d',\r\n 'Mg': 'Mg_lda_10.0au_50Ry_2s2p1d',\r\n 'Al': 'Al_pz-vbc_10.0au_50Ry_2s2d1p',\r\n 'Si': 'Si_lda_9.0au_50Ry_2s2p1d',\r\n 'P': 'P_lda_8.0au_50Ry_2s2p1d',\r\n 'S': 'S_lda_7.0au_50Ry_2s2p1d',\r\n 'Cl': 'Cl_pz-bhs_7.0au_50Ry_2s2d1p',\r\n 'K': 'K_lda_11.0au_50Ry_2s2p1d',\r\n 'Ca': 'Ca_pz-n-vbc_11.0au_50Ry_2s2p1d',\r\n 'Ti': 'Ti_lda_10.0au_100Ry_2s2p2d',\r\n 'Mn': 'Mn_lda_9.0au_100Ry_2s2p2d',\r\n 'Fe': 'Fe_lda_8.0au_100Ry_2s2p2d',\r\n 'Cu': 'Cu_lda_7.0au_100Ry_2s2p2d',\r\n 'Zn': 'Zn_lda_8.0au_120Ry_2s2p2d',\r\n 'Ga': 'Ga_lda_8.0au_100Ry_2s2p2d',\r\n 'Ge': 'Ge_pz-bhs_9.0au_50Ry_2s2d1p',\r\n 'As': 'As_pz-bhs_8.0au_50Ry_2s2p1d',\r\n 'Se': 'Se_pz-bhs_8.0au_50Ry_2s2p1d',\r\n 'Br': 'Br_lda_7.0au_50Ry_2s2p1d',\r\n 'In': 'In_pz-bhs_10.0au_50Ry_2s2p1d',\r\n 'Sn': 'Sn_pz-bhs_9.0au_16Ry_2s2p1d',\r\n 'I': 'I_lda_7.0au_50Ry_2s2p2d'},\r\n 'LDAact': {'H': 'H_lda_8.0au_50Ry_2s1p',\r\n 'Li': 'Li_lda_12.0au_50Ry_2s2p1d',\r\n 'Be': 'Be_lda_10.0au_50Ry_2s2p1d',\r\n 'B': 'B_lda_9.0au_50Ry_2s2p1d',\r\n 'C': 'C_pz-vbc_8.0au_50Ry_2s2d1p',\r\n 'N': 'N_lda_7.0au_50Ry_2s2p1d',\r\n 'O': 'O_lda_7.0au_50Ry_2s2p1d',\r\n 'F': 'F_lda_7.0au_50Ry_2s2p1d',\r\n 'Na': 'Na_lda_12.0au_50Ry_2s2p1d',\r\n 'Mg': 'Mg_lda_12.0au_50Ry_2s2p1d',\r\n 'Al': 'Al_pz-vbc_11.0au_50Ry_2s2d1p',\r\n 'Si': 'Si_lda_10.0au_50Ry_2s2p1d',\r\n 'P': 'P_lda_9.0au_50Ry_2s2p1d',\r\n 'S': 'S_lda_8.0au_50Ry_2s2p1d',\r\n 'Cl': 'Cl_pz-bhs_8.0au_50Ry_2s2d1p',\r\n 'K': 'K_lda_12.0au_50Ry_2s2p1d',\r\n 'Ca': 'Ca_pz-n-vbc_12.0au_50Ry_2s2p1d',\r\n 'Ti': 'Ti_lda_11.0au_100Ry_2s2p2d',\r\n 'Mn': 'Mn_lda_10.0au_100Ry_2s2p2d',\r\n 'Fe': 'Fe_lda_9.0au_100Ry_2s2p2d',\r\n 'Cu': 'Cu_lda_8.0au_100Ry_2s2p2d',\r\n 'Zn': 'Zn_lda_9.0au_120Ry_2s2p2d',\r\n 'Ga': 'Ga_lda_9.0au_100Ry_2s2p2d',\r\n 'Ge': 'Ge_pz-bhs_10.0au_50Ry_2s2d1p',\r\n 'As': 'As_pz-bhs_9.0au_50Ry_2s2p1d',\r\n 'Se': 'Se_pz-bhs_9.0au_50Ry_2s2p1d',\r\n 'Br': 'Br_lda_8.0au_50Ry_2s2p1d',\r\n 'In': 'In_pz-bhs_11.0au_50Ry_2s2p1d',\r\n 'Sn': 'Sn_pz-bhs_10.0au_16Ry_2s2p1d',\r\n 'I': 'I_lda_8.0au_50Ry_2s2p2d'},\r\n 'PBEmin': {'H': 'H_pbe_5.0au_50Ry_2s1p',\r\n 'Li': 'Li_pbe_8.0au_50Ry_2s2p1d',\r\n 'Be': 'Be_pbe_8.0au_50Ry_2s2p1d',\r\n 'B': 'B_pbe_7.0au_50Ry_2s2p1d',\r\n 'C': 'C_pbe_6.0au_50Ry_2s2d1p',\r\n 'N': 'N_pbe_5.0au_50Ry_2s2p1d',\r\n 'O': 'O_pbe_5.0au_50Ry_2s2p1d',\r\n 'F': 'F_pbe_5.0au_50Ry_2s2p1d',\r\n 'Na': 'Na_pbe_9.0au_50Ry_2s2p1d',\r\n 'Mg': 'Mg_pbe_8.0au_50Ry_2s2p1d',\r\n 'Al': 'Al_pbe_9.0au_50Ry_2s2d1p',\r\n 'Si': 'Si_pbe_8.0au_50Ry_2s2p1d',\r\n 'P': 'P_pbe_7.0au_50Ry_2s2p1d',\r\n 'S': 'S_pbe_6.0au_50Ry_2s2p1d',\r\n 'Cl': 'Cl_pbe_6.0au_50Ry_2s2d1p',\r\n 'K': 'K_pbe_10.0au_50Ry_2s2p1d',\r\n 'Ti': 'Ti_pbe_8.0au_100Ry_2s2p2d',\r\n 'Mn': 'Mn_pbe_7.0au_100Ry_2s2p2d',\r\n 'Fe': 'Fe_pbe_7.0au_100Ry_2s2p2d',\r\n 'Cu': 'Cu_pbe_6.0au_100Ry_2s2p2d',\r\n 'Zn': 'Zn_pbe_6.0au_120Ry_2s2p2d',\r\n 'Ga': 'Ga_pbe_7.0au_100Ry_2s2p2d',\r\n 'Br': 'Br_pbe_6.0au_50Ry_2s2p1d',\r\n 'I': 'I_pbe_6.0au_50Ry_2s2p2d'},\r\n 'PBEmid': {'H': 'H_pbe_6.0au_50Ry_2s1p',\r\n 'Li': 'Li_pbe_10.0au_50Ry_2s2p1d',\r\n 'Be': 'Be_pbe_9.0au_50Ry_2s2p1d',\r\n 'B': 'B_pbe_8.0au_50Ry_2s2p1d',\r\n 'C': 'C_pbe_7.0au_50Ry_2s2d1p',\r\n 'N': 'N_pbe_6.0au_50Ry_2s2p1d',\r\n 'O': 'O_6.0au_50Ry_2s2p1d',\r\n 'F': 'F_pbe_6.0au_50Ry_2s2p1d',\r\n 'Na': 'Na_pbe_10.0au_50Ry_2s2p1d',\r\n 'Mg': 'Mg_pbe_10.0au_50Ry_2s2p1d',\r\n 'Al': 'Al_pbe_10.0au_50Ry_2s2d1p',\r\n 'Si': 'Si_pbe_9.0au_50Ry_2s2p1d',\r\n 'P': 'P_8.0au_50Ry_2s2p1d',\r\n 'S': 'S_pbe_7.0au_50Ry_2s2p1d',\r\n 'Cl': 'Cl_pbe_7.0au_50Ry_2s2d1p',\r\n 'K': 'K_pbe_11.0au_50Ry_2s2p1d',\r\n 'Ti': 'Ti_pbe_10.0au_100Ry_2s2p2d',\r\n 'Mn': 'Mn_pbe_9.0au_100Ry_2s2p2d',\r\n 'Fe': 'Fe_pbe_8.0au_100Ry_2s2p2d',\r\n 'Cu': 'Cu_pbe_7.0au_100Ry_2s2p2d',\r\n 'Zn': 'Zn_pbe_8.0au_120Ry_2s2p2d',\r\n 'Ga': 'Ga_pbe_8.0au_100Ry_2s2p2d',\r\n 'Br': 'Br_pbe_7.0au_50Ry_2s2p1d',\r\n 'I': 'I_pbe_7.0au_50Ry_2s2p2d'},\r\n 'PBEact': {'H': 'H_pbe_8.0au_50Ry_2s1p',\r\n 'Li': 'Li_pbe_12.0au_50Ry_2s2p1d',\r\n 'Be': 'Be_pbe_10.0au_50Ry_2s2p1d',\r\n 'B': 'B_pbe_9.0au_50Ry_2s2p1d',\r\n 'C': 'C_pbe_8.0au_50Ry_2s2d1p',\r\n 'N': 'N_pbe_7.0au_50Ry_2s2p1d',\r\n 'O': 'O_pbe_7.0au_50Ry_2s2p1d',\r\n 'F': 'F_pbe_7.0au_50Ry_2s2p1d',\r\n 'Na': 'Na_pbe_12.0au_50Ry_2s2p1d',\r\n 'Mg': 'Mg_pbe_12.0au_50Ry_2s2p1d',\r\n 'Al': 'Al_pbe_11.0au_50Ry_2s2d1p',\r\n 'Si': 'Si_pbe_10.0au_50Ry_2s2p1d',\r\n 'P': 'P_pbe_9.0au_50Ry_2s2p1d',\r\n 'S': 'S_pbe_8.0au_50Ry_2s2p1d',\r\n 'Cl': 'Cl_pbe_8.0au_50Ry_2s2d1p',\r\n 'K': 'K_pbe_12.0au_50Ry_2s2p1d',\r\n 'Ti': 'Ti_pbe_11.0au_100Ry_2s2p2d',\r\n 'Mn': 'Mn_pbe_10.0au_100Ry_2s2p2d',\r\n 'Fe': 'Fe_pbe_9.0au_100Ry_2s2p2d',\r\n 'Cu': 'Cu_pbe_8.0au_100Ry_2s2p2d',\r\n 'Zn': 'Zn_pbe_9.0au_120Ry_2s2p2d',\r\n 'Ga': 'Ga_pbe_9.0au_100Ry_2s2p2d',\r\n 'Br': 'Br_pbe_8.0au_50Ry_2s2p1d',\r\n 'I': 'I_pbe_8.0au_50Ry_2s2p2d'},\r\n 'SG15std': {'Ag': 'Ag_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Al': 'Al_gga_9au_60Ry_4s4p1d.orb',\r\n 'Ar': 'Ar_gga_7au_60Ry_2s2p1d.orb',\r\n 'As': 'As_gga_8au_60Ry_2s2p1d.orb',\r\n 'Au': 'Au_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Ba': 'Ba_gga_11au_60Ry_4s2p2d.orb',\r\n 'Be': 'Be_gga_8au_60Ry_4s1p.orb',\r\n 'Bi': 'Bi_gga_9au_60Ry_2s2p2d.orb',\r\n 'Br': 'Br_gga_8au_60Ry_2s2p1d.orb',\r\n 'B': 'B_gga_8au_60Ry_2s2p1d.orb',\r\n 'Ca': 'Ca_gga_9au_60Ry_4s2p1d.orb',\r\n 'Cd': 'Cd_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Cl': 'Cl_gga_8au_60Ry_2s2p1d.orb',\r\n 'Co': 'Co_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Cr': 'Cr_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Cs': 'Cs_gga_11au_60Ry_4s2p1d.orb',\r\n 'Cu': 'Cu_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'C': 'C_gga_8au_60Ry_2s2p1d.orb',\r\n 'Fe': 'Fe_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'F': 'F_gga_7au_60Ry_2s2p1d.orb',\r\n 'Ga': 'Ga_gga_9au_60Ry_2s2p2d.orb',\r\n 'Ge': 'Ge_gga_8au_60Ry_2s2p2d.orb',\r\n 'He': 'He_gga_6au_60Ry_2s1p.orb',\r\n 'Hf': 'Hf_gga_10au_60Ry_4s2p2d2f.orb',\r\n 'Hg': 'Hg_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'H': 'H_gga_8au_60Ry_2s1p.orb',\r\n 'In': 'In_gga_9au_60Ry_2s2p2d.orb',\r\n 'Ir': 'Ir_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'I': 'I_gga_8au_60Ry_2s2p2d.orb',\r\n 'Kr': 'Kr_gga_7au_60Ry_2s2p1d.orb',\r\n 'K': 'K_gga_9au_60Ry_4s2p1d.orb',\r\n 'Li': 'Li_gga_9au_60Ry_4s1p.orb',\r\n 'Mg': 'Mg_gga_9au_60Ry_4s2p1d.orb',\r\n 'Mn': 'Mn_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Mo': 'Mo_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Na': 'Na_gga_10au_60Ry_4s2p1d.orb',\r\n 'Nb': 'Nb_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Ne': 'Ne_gga_6au_60Ry_2s2p1d.orb',\r\n 'Ni': 'Ni_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'N': 'N_gga_8au_60Ry_2s2p1d.orb',\r\n 'Os': 'Os_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'O': 'O_gga_7au_60Ry_2s2p1d.orb',\r\n 'Pb': 'Pb_gga_9au_60Ry_2s2p2d.orb',\r\n 'Pd': 'Pd_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Pt': 'Pt_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'P': 'P_gga_8au_60Ry_2s2p1d.orb',\r\n 'Rb': 'Rb_gga_10au_60Ry_4s2p1d.orb',\r\n 'Re': 'Re_gga_10au_60Ry_4s2p2d1f.orb',\r\n 'Rh': 'Rh_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Ru': 'Ru_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Sb': 'Sb_gga_9au_60Ry_2s2p2d.orb',\r\n 'Sc': 'Sc_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Se': 'Se_gga_8au_60Ry_2s2p1d.orb',\r\n 'Si': 'Si_gga_8au_60Ry_2s2p1d.orb',\r\n 'Sn': 'Sn_gga_9au_60Ry_2s2p2d.orb',\r\n 'Sr': 'Sr_gga_10au_60Ry_4s2p1d.orb',\r\n 'S': 'S_gga_8au_60Ry_2s2p1d.orb',\r\n 'Ta': 'Ta_gga_10au_60Ry_4s2p2d2f.orb',\r\n 'Tc': 'Tc_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Te': 'Te_gga_9au_60Ry_2s2p2d.orb',\r\n 'Ti': 'Ti_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Tl': 'Tl_gga_9au_60Ry_2s2p2d.orb',\r\n 'V': 'V_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'W': 'W_gga_10au_60Ry_4s2p2d2f.orb',\r\n 'Xe': 'Xe_gga_8au_60Ry_2s2p2d.orb',\r\n 'Y': 'Y_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Zn': 'Zn_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Zr': 'Zr_gga_9au_60Ry_4s2p2d1f.orb'},\r\n 'SG15act': {'Ag': 'Ag_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Al': 'Al_gga_9au_100Ry_4s4p1d.orb',\r\n 'Ar': 'Ar_gga_7au_100Ry_2s2p1d.orb',\r\n 'As': 'As_gga_8au_100Ry_2s2p1d.orb',\r\n 'Au': 'Au_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'B': 'B_gga_8au_100Ry_2s2p1d.orb',\r\n 'Ba': 'Ba_gga_11au_100Ry_4s2p2d.orb',\r\n 'Be': 'Be_gga_8au_100Ry_4s1p.orb',\r\n 'Bi': 'Bi_gga_9au_100Ry_2s2p2d.orb',\r\n 'Br': 'Br_gga_8au_100Ry_2s2p1d.orb',\r\n 'C': 'C_gga_8au_100Ry_2s2p1d.orb',\r\n 'Ca': 'Ca_gga_9au_100Ry_4s2p1d.orb',\r\n 'Cd': 'Cd_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Cl': 'Cl_gga_8au_100Ry_2s2p1d.orb',\r\n 'Co': 'Co_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Cr': 'Cr_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Cs': 'Cs_gga_11au_100Ry_4s2p1d.orb',\r\n 'Cu': 'Cu_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'F': 'F_gga_7au_100Ry_2s2p1d.orb',\r\n 'Fe': 'Fe_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Ga': 'Ga_gga_9au_100Ry_2s2p2d.orb',\r\n 'Ge': 'Ge_gga_8au_100Ry_2s2p2d.orb',\r\n 'H': 'H_gga_8au_100Ry_2s1p.orb',\r\n 'He': 'He_gga_6au_100Ry_2s1p.orb',\r\n 'Hf': 'Hf_gga_10au_100Ry_4s2p2d2f.orb',\r\n 'Hg': 'Hg_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'I': 'I_gga_8au_100Ry_2s2p2d.orb',\r\n 'In': 'In_gga_9au_100Ry_2s2p2d.orb',\r\n 'Ir': 'Ir_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'K': 'K_gga_9au_100Ry_4s2p1d.orb',\r\n 'Kr': 'Kr_gga_7au_100Ry_2s2p1d.orb',\r\n 'Li': 'Li_gga_9au_100Ry_4s1p.orb',\r\n 'Mg': 'Mg_gga_9au_100Ry_4s2p1d.orb',\r\n 'Mn': 'Mn_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Mo': 'Mo_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'N': 'N_gga_8au_100Ry_2s2p1d.orb',\r\n 'Na': 'Na_gga_9au_100Ry_4s2p1d.orb',\r\n 'Nb': 'Nb_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Ne': 'Ne_gga_6au_100Ry_2s2p1d.orb',\r\n 'Ni': 'Ni_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'O': 'O_gga_7au_100Ry_2s2p1d.orb',\r\n 'Os': 'Os_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'P': 'P_gga_8au_100Ry_2s2p1d.orb',\r\n 'Pb': 'Pb_gga_9au_100Ry_2s2p2d.orb',\r\n 'Pd': 'Pd_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Pt': 'Pt_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Rb': 'Rb_gga_10au_100Ry_4s2p1d.orb',\r\n 'Re': 'Re_gga_10au_100Ry_4s2p2d1f.orb',\r\n 'Rh': 'Rh_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Ru': 'Ru_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'S': 'S_gga_8au_100Ry_2s2p1d.orb',\r\n 'Sb': 'Sb_gga_9au_100Ry_2s2p2d.orb',\r\n 'Sc': 'Sc_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Se': 'Se_gga_8au_100Ry_2s2p1d.orb',\r\n 'Si': 'Si_gga_8au_100Ry_2s2p1d.orb',\r\n 'Sn': 'Sn_gga_9au_100Ry_2s2p2d.orb',\r\n 'Sr': 'Sr_gga_10au_100Ry_4s2p1d.orb',\r\n 'Ta': 'Ta_gga_10au_100Ry_4s2p2d2f.orb',\r\n 'Tc': 'Tc_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Te': 'Te_gga_9au_100Ry_2s2p2d.orb',\r\n 'Ti': 'Ti_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Tl': 'Tl_gga_9au_100Ry_2s2p2d.orb',\r\n 'V': 'V_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'W': 'W_gga_10au_100Ry_4s2p2d2f.orb',\r\n 'Xe': 'Xe_gga_7au_100Ry_2s2p2d.orb',\r\n 'Y': 'Y_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Zn': 'Zn_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Zr': 'Zr_gga_9au_100Ry_4s2p2d1f.orb'},\r\n 'SG15tzdp': {'Ag': 'Ag_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'Al': 'Al_gga_9au_100Ry_5s5p2d.orb',\r\n 'Ar': 'Ar_gga_8au_100Ry_3s3p2d.orb',\r\n 'As': 'As_gga_8au_100Ry_3s3p2d.orb',\r\n 'Au': 'Au_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'Ba': 'Ba_gga_11au_100Ry_5s3p3d1f.orb',\r\n 'Be': 'Be_gga_8au_100Ry_5s2p.orb',\r\n 'Bi': 'Bi_gga_9au_100Ry_3s3p3d1f.orb',\r\n 'Br': 'Br_gga_8au_100Ry_3s3p2d.orb',\r\n 'B': 'B_gga_8au_100Ry_3s3p2d.orb',\r\n 'Ca': 'Ca_gga_9au_100Ry_5s3p2d.orb',\r\n 'Cd': 'Cd_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'Cl': 'Cl_gga_8au_100Ry_3s3p2d.orb',\r\n 'Co': 'Co_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'Cr': 'Cr_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'Cs': 'Cs_gga_11au_100Ry_5s3p2d.orb',\r\n 'Cu': 'Cu_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'C': 'C_gga_8au_100Ry_3s3p2d.orb',\r\n 'Fe': 'Fe_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'F': 'F_gga_8au_100Ry_3s3p2d.orb',\r\n 'Ga': 'Ga_gga_9au_100Ry_3s3p3d1f.orb',\r\n 'Ge': 'Ge_gga_8au_100Ry_3s3p3d1f.orb',\r\n 'He': 'He_gga_8au_100Ry_3s2p.orb',\r\n 'Hf': 'Hf_gga_10au_100Ry_5s3p3d3f1g.orb',\r\n 'Hg': 'Hg_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'H': 'H_gga_8au_100Ry_3s2p.orb',\r\n 'In': 'In_gga_10au_100Ry_3s3p3d1f.orb',\r\n 'Ir': 'Ir_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'I': 'I_gga_8au_100Ry_3s3p3d1f.orb',\r\n 'Kr': 'Kr_gga_8au_100Ry_3s3p2d.orb',\r\n 'K': 'K_gga_9au_100Ry_5s3p2d.orb',\r\n 'Li': 'Li_gga_9au_100Ry_5s2p.orb',\r\n 'Mg': 'Mg_gga_9au_100Ry_5s3p2d.orb',\r\n 'Mn': 'Mn_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'Mo': 'Mo_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'Na': 'Na_gga_9au_100Ry_5s3p2d.orb',\r\n 'Nb': 'Nb_gga_10au_100Ry_5s3p3d2f.orb',\r\n 'Ne': 'Ne_gga_8au_100Ry_3s3p2d.orb',\r\n 'Ni': 'Ni_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'N': 'N_gga_8au_100Ry_3s3p2d.orb',\r\n 'Os': 'Os_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'O': 'O_gga_8au_100Ry_3s3p2d.orb',\r\n 'Pb': 'Pb_gga_10au_100Ry_3s3p3d1f.orb',\r\n 'Pd': 'Pd_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'Pt': 'Pt_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'P': 'P_gga_8au_100Ry_3s3p2d.orb',\r\n 'Rb': 'Rb_gga_10au_100Ry_5s3p2d.orb',\r\n 'Re': 'Re_gga_10au_100Ry_5s3p3d2f.orb',\r\n 'Rh': 'Rh_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'Ru': 'Ru_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'Sb': 'Sb_gga_9au_100Ry_3s3p3d1f.orb',\r\n 'Sc': 'Sc_gga_10au_100Ry_5s3p3d2f.orb',\r\n 'Se': 'Se_gga_8au_100Ry_3s3p2d.orb',\r\n 'Si': 'Si_gga_8au_100Ry_3s3p2d.orb',\r\n 'Sn': 'Sn_gga_10au_100Ry_3s3p3d1f.orb',\r\n 'Sr': 'Sr_gga_10au_100Ry_5s3p2d.orb',\r\n 'S': 'S_gga_8au_100Ry_3s3p2d.orb',\r\n 'Ta': 'Ta_gga_10au_100Ry_5s3p3d3f1g.orb',\r\n 'Tc': 'Tc_gga_10au_100Ry_5s3p3d2f.orb',\r\n 'Te': 'Te_gga_9au_100Ry_3s3p3d1f.orb',\r\n 'Ti': 'Ti_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'Tl': 'Tl_gga_10au_100Ry_3s3p3d1f.orb',\r\n 'V': 'V_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'W': 'W_gga_10au_100Ry_5s3p3d3f1g.orb',\r\n 'Xe': 'Xe_gga_8au_100Ry_3s3p3d1f.orb',\r\n 'Y': 'Y_gga_10au_100Ry_5s3p3d2f.orb',\r\n 'Zn': 'Zn_gga_9au_100Ry_5s3p3d2f.orb',\r\n 'Zr': 'Zr_gga_9au_100Ry_5s3p3d2f.orb'}\r\n }\r\n", "id": "12533584", "language": "Python", "matching_score": 4.645752906799316, "max_stars_count": 0, "path": "abacus/basis.py" }, { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 23 20:07:08 2018\r\n\r\n@author: <NAME>\r\n\"\"\"\r\n\r\nbasis_dic={ 'basis_lda minimal':\r\n {'H':'H_lda_5.0au_50Ry_2s1p',\r\n 'Li':'Li_lda_8.0au_50Ry_2s2p1d',\r\n 'Be':'Be_lda_8.0au_50Ry_2s2p1d',\r\n 'B':'B_lda_7.0au_50Ry_2s2p1d',\r\n 'C':'C_pz-vbc_6.0au_50Ry_2s2d1p',\r\n 'N':'N_lda_5.0au_50Ry_2s2p1d',\r\n 'O':'O_lda_5.0au_50Ry_2s2p1d',\r\n 'F':'F_lda_5.0au_50Ry_2s2p1d',\r\n 'Na':'Na_lda_9.0au_50Ry_2s2p1d',\r\n 'Mg':'Mg_lda_8.0au_50Ry_2s2p1d',\r\n 'Al':'Al_pz-vbc_9.0au_50Ry_2s2d1p',\r\n 'Si':'Si_lda_8.0au_50Ry_2s2p1d',\r\n 'P':'P_lda_7.0au_50Ry_2s2p1d',\r\n 'S':'S_lda_6.0au_50Ry_2s2p1d',\r\n 'Cl':'Cl_pz-bhs_6.0au_50Ry_2s2d1p',\r\n 'K':'K_lda_10.0au_50Ry_2s2p1d',\r\n 'Ca':'Ca_pz-n-vbc_10.0au_50Ry_2s2p1d',\r\n 'Ti':'Ti_lda_8.0au_100Ry_2s2p2d',\r\n 'Mn':'Mn_lda_7.0au_100Ry_2s2p2d',\r\n 'Fe':'Fe_lda_7.0au_100Ry_2s2p2d',\r\n 'Cu':'Cu_lda_6.0au_100Ry_2s2p2d',\r\n 'Zn':'Zn_lda_6.0au_120Ry_2s2p2d',\r\n 'Ga':'Ga_lda_7.0au_100Ry_2s2p2d',\r\n 'Ge':'Ge_pz-bhs_8.0au_50Ry_2s2d1p',\r\n 'As':'As_pz-bhs_7.0au_50Ry_2s2p1d',\r\n 'Se':'Se_pz-bhs_7.0au_50Ry_2s2p1d',\r\n 'Br':'Br_lda_6.0au_50Ry_2s2p1d',\r\n 'In':'In_pz-bhs_8.0au_50Ry_2s2p1d',\r\n 'Sn':'Sn_pz-bhs_8.0au_16Ry_2s2p1d',\r\n 'I':'I_lda_6.0au_50Ry_2s2p2d'\r\n },\r\n #\r\n 'basis_lda medium':\r\n {'H':'H_lda_6.0au_50Ry_2s1p',\r\n 'Li':'Li_lda_10.0au_50Ry_2s2p1d',\r\n 'Be':'Be_lda_9.0au_50Ry_2s2p1d',\r\n 'B':'B_lda_8.0au_50Ry_2s2p1d',\r\n 'C':'C_pz-vbc_7.0au_50Ry_2s2d1p',\r\n 'N':'N_lda_6.0au_50Ry_2s2p1d',\r\n 'O':'O_lda_6.0au_50Ry_2s2p1d',\r\n 'F':'F_lda_6.0au_50Ry_2s2p1d',\r\n 'Na':'Na_lda_10.0au_50Ry_2s2p1d',\r\n 'Mg':'Mg_lda_10.0au_50Ry_2s2p1d',\r\n 'Al':'Al_pz-vbc_10.0au_50Ry_2s2d1p',\r\n 'Si':'Si_lda_9.0au_50Ry_2s2p1d',\r\n 'P':'P_lda_8.0au_50Ry_2s2p1d',\r\n 'S':'S_lda_7.0au_50Ry_2s2p1d',\r\n 'Cl':'Cl_pz-bhs_7.0au_50Ry_2s2d1p',\r\n 'K':'K_lda_11.0au_50Ry_2s2p1d',\r\n 'Ca':'Ca_pz-n-vbc_11.0au_50Ry_2s2p1d',\r\n 'Ti':'Ti_lda_10.0au_100Ry_2s2p2d',\r\n 'Mn':'Mn_lda_9.0au_100Ry_2s2p2d',\r\n 'Fe':'Fe_lda_8.0au_100Ry_2s2p2d',\r\n 'Cu':'Cu_lda_7.0au_100Ry_2s2p2d',\r\n 'Zn':'Zn_lda_8.0au_120Ry_2s2p2d',\r\n 'Ga':'Ga_lda_8.0au_100Ry_2s2p2d',\r\n 'Ge':'Ge_pz-bhs_9.0au_50Ry_2s2d1p',\r\n 'As':'As_pz-bhs_8.0au_50Ry_2s2p1d',\r\n 'Se':'Se_pz-bhs_8.0au_50Ry_2s2p1d',\r\n 'Br':'Br_lda_7.0au_50Ry_2s2p1d',\r\n 'In':'In_pz-bhs_10.0au_50Ry_2s2p1d',\r\n 'Sn':'Sn_pz-bhs_9.0au_16Ry_2s2p1d',\r\n 'I':'I_lda_7.0au_50Ry_2s2p2d'\r\n },\r\n #\r\n 'basis_lda accurate':\r\n {'H':'H_lda_8.0au_50Ry_2s1p',\r\n 'Li':'Li_lda_12.0au_50Ry_2s2p1d',\r\n 'Be':'Be_lda_10.0au_50Ry_2s2p1d',\r\n 'B':'B_lda_9.0au_50Ry_2s2p1d',\r\n 'C':'C_pz-vbc_8.0au_50Ry_2s2d1p',\r\n 'N':'N_lda_7.0au_50Ry_2s2p1d',\r\n 'O':'O_lda_7.0au_50Ry_2s2p1d',\r\n 'F':'F_lda_7.0au_50Ry_2s2p1d',\r\n 'Na':'Na_lda_12.0au_50Ry_2s2p1d',\r\n 'Mg':'Mg_lda_12.0au_50Ry_2s2p1d',\r\n 'Al':'Al_pz-vbc_11.0au_50Ry_2s2d1p',\r\n 'Si':'Si_lda_10.0au_50Ry_2s2p1d',\r\n 'P':'P_lda_9.0au_50Ry_2s2p1d',\r\n 'S':'S_lda_8.0au_50Ry_2s2p1d',\r\n 'Cl':'Cl_pz-bhs_8.0au_50Ry_2s2d1p',\r\n 'K':'K_lda_12.0au_50Ry_2s2p1d',\r\n 'Ca':'Ca_pz-n-vbc_12.0au_50Ry_2s2p1d',\r\n 'Ti':'Ti_lda_11.0au_100Ry_2s2p2d',\r\n 'Mn':'Mn_lda_10.0au_100Ry_2s2p2d',\r\n 'Fe':'Fe_lda_9.0au_100Ry_2s2p2d',\r\n 'Cu':'Cu_lda_8.0au_100Ry_2s2p2d',\r\n 'Zn':'Zn_lda_9.0au_120Ry_2s2p2d',\r\n 'Ga':'Ga_lda_9.0au_100Ry_2s2p2d',\r\n 'Ge':'Ge_pz-bhs_10.0au_50Ry_2s2d1p',\r\n 'As':'As_pz-bhs_9.0au_50Ry_2s2p1d',\r\n 'Se':'Se_pz-bhs_9.0au_50Ry_2s2p1d',\r\n 'Br':'Br_lda_8.0au_50Ry_2s2p1d',\r\n 'In':'In_pz-bhs_11.0au_50Ry_2s2p1d',\r\n 'Sn':'Sn_pz-bhs_10.0au_16Ry_2s2p1d',\r\n 'I':'I_lda_8.0au_50Ry_2s2p2d'\r\n }\r\n ,\r\n\r\n 'basis_pbe minimal':\r\n {'H':'H_pbe_5.0au_50Ry_2s1p',\r\n 'Li':'Li_pbe_8.0au_50Ry_2s2p1d',\r\n 'Be':'Be_pbe_8.0au_50Ry_2s2p1d',\r\n 'B':'B_pbe_7.0au_50Ry_2s2p1d',\r\n 'C':'C_pbe_6.0au_50Ry_2s2d1p',\r\n 'N':'N_pbe_5.0au_50Ry_2s2p1d',\r\n 'O':'O_pbe_5.0au_50Ry_2s2p1d',\r\n 'F':'F_pbe_5.0au_50Ry_2s2p1d',\r\n 'Na':'Na_pbe_9.0au_50Ry_2s2p1d',\r\n 'Mg':'Mg_pbe_8.0au_50Ry_2s2p1d',\r\n 'Al':'Al_pbe_9.0au_50Ry_2s2d1p',\r\n 'Si':'Si_pbe_8.0au_50Ry_2s2p1d',\r\n 'P':'P_pbe_7.0au_50Ry_2s2p1d',\r\n 'S':'S_pbe_6.0au_50Ry_2s2p1d',\r\n 'Cl':'Cl_pbe_6.0au_50Ry_2s2d1p',\r\n 'K':'K_pbe_10.0au_50Ry_2s2p1d',\r\n 'Ti':'Ti_pbe_8.0au_100Ry_2s2p2d',\r\n 'Mn':'Mn_pbe_7.0au_100Ry_2s2p2d',\r\n 'Fe':'Fe_pbe_7.0au_100Ry_2s2p2d',\r\n 'Cu':'Cu_pbe_6.0au_100Ry_2s2p2d',\r\n 'Zn':'Zn_pbe_6.0au_120Ry_2s2p2d',\r\n 'Ga':'Ga_pbe_7.0au_100Ry_2s2p2d',\r\n 'Br':'Br_pbe_6.0au_50Ry_2s2p1d',\r\n 'I':'I_pbe_6.0au_50Ry_2s2p2d'\r\n },\r\n #\r\n 'basis_pbe medium':\r\n {'H':'H_pbe_6.0au_50Ry_2s1p',\r\n 'Li':'Li_pbe_10.0au_50Ry_2s2p1d',\r\n 'Be':'Be_pbe_9.0au_50Ry_2s2p1d',\r\n 'B':'B_pbe_8.0au_50Ry_2s2p1d',\r\n 'C':'C_pbe_7.0au_50Ry_2s2d1p',\r\n 'N':'N_pbe_6.0au_50Ry_2s2p1d',\r\n 'O':'O_6.0au_50Ry_2s2p1d',\r\n 'F':'F_pbe_6.0au_50Ry_2s2p1d',\r\n 'Na':'Na_pbe_10.0au_50Ry_2s2p1d',\r\n 'Mg':'Mg_pbe_10.0au_50Ry_2s2p1d',\r\n 'Al':'Al_pbe_10.0au_50Ry_2s2d1p',\r\n 'Si':'Si_pbe_9.0au_50Ry_2s2p1d',\r\n 'P':'P_8.0au_50Ry_2s2p1d',\r\n 'S':'S_pbe_7.0au_50Ry_2s2p1d',\r\n 'Cl':'Cl_pbe_7.0au_50Ry_2s2d1p',\r\n 'K':'K_pbe_11.0au_50Ry_2s2p1d',\r\n 'Ti':'Ti_pbe_10.0au_100Ry_2s2p2d',\r\n 'Mn':'Mn_pbe_9.0au_100Ry_2s2p2d',\r\n 'Fe':'Fe_pbe_8.0au_100Ry_2s2p2d',\r\n 'Cu':'Cu_pbe_7.0au_100Ry_2s2p2d',\r\n 'Zn':'Zn_pbe_8.0au_120Ry_2s2p2d',\r\n 'Ga':'Ga_pbe_8.0au_100Ry_2s2p2d',\r\n 'Br':'Br_pbe_7.0au_50Ry_2s2p1d',\r\n 'I':'I_pbe_7.0au_50Ry_2s2p2d'\r\n },\r\n #\r\n 'basis_pbe accurate':\r\n {'H':'H_pbe_8.0au_50Ry_2s1p',\r\n 'Li':'Li_pbe_12.0au_50Ry_2s2p1d',\r\n 'Be':'Be_pbe_10.0au_50Ry_2s2p1d',\r\n 'B':'B_pbe_9.0au_50Ry_2s2p1d',\r\n 'C':'C_pbe_8.0au_50Ry_2s2d1p',\r\n 'N':'N_pbe_7.0au_50Ry_2s2p1d',\r\n 'O':'O_pbe_7.0au_50Ry_2s2p1d',\r\n 'F':'F_pbe_7.0au_50Ry_2s2p1d',\r\n 'Na':'Na_pbe_12.0au_50Ry_2s2p1d',\r\n 'Mg':'Mg_pbe_12.0au_50Ry_2s2p1d',\r\n 'Al':'Al_pbe_11.0au_50Ry_2s2d1p',\r\n 'Si':'Si_pbe_10.0au_50Ry_2s2p1d',\r\n 'P':'P_pbe_9.0au_50Ry_2s2p1d',\r\n 'S':'S_pbe_8.0au_50Ry_2s2p1d',\r\n 'Cl':'Cl_pbe_8.0au_50Ry_2s2d1p',\r\n 'K':'K_pbe_12.0au_50Ry_2s2p1d',\r\n 'Ti':'Ti_pbe_11.0au_100Ry_2s2p2d',\r\n 'Mn':'Mn_pbe_10.0au_100Ry_2s2p2d',\r\n 'Fe':'Fe_pbe_9.0au_100Ry_2s2p2d',\r\n 'Cu':'Cu_pbe_8.0au_100Ry_2s2p2d',\r\n 'Zn':'Zn_pbe_9.0au_120Ry_2s2p2d',\r\n 'Ga':'Ga_pbe_9.0au_100Ry_2s2p2d',\r\n 'Br':'Br_pbe_8.0au_50Ry_2s2p1d',\r\n 'I':'I_pbe_8.0au_50Ry_2s2p2d'\r\n }\r\n ,\r\n\r\n 'basis_pbe_sg15 accurate':\r\n {'Ag':'Ag_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Al':'Al_gga_9au_100Ry_4s4p1d.orb',\r\n 'Ar':'Ar_gga_7au_100Ry_2s2p1d.orb',\r\n 'As':'As_gga_8au_100Ry_2s2p1d.orb',\r\n 'Au':'Au_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'B':'B_gga_8au_100Ry_2s2p1d.orb',\r\n 'Ba':'Ba_gga_11au_100Ry_4s2p2d.orb',\r\n 'Be':'Be_gga_8au_100Ry_4s1p.orb',\r\n 'Bi':'Bi_gga_9au_100Ry_2s2p2d.orb',\r\n 'Br':'Br_gga_8au_100Ry_2s2p1d.orb',\r\n 'C':'C_gga_8au_100Ry_2s2p1d.orb',\r\n 'Ca':'Ca_gga_9au_100Ry_4s2p1d.orb',\r\n 'Cd':'Cd_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Cl':'Cl_gga_8au_100Ry_2s2p1d.orb',\r\n 'Co':'Co_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Cr':'Cr_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Cs':'Cs_gga_11au_100Ry_4s2p1d.orb',\r\n 'Cu':'Cu_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'F':'F_gga_7au_100Ry_2s2p1d.orb',\r\n 'Fe':'Fe_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Ga':'Ga_gga_9au_100Ry_2s2p2d.orb',\r\n 'Ge':'Ge_gga_8au_100Ry_2s2p2d.orb',\r\n 'H':'H_gga_8au_100Ry_2s1p.orb',\r\n 'He':'He_gga_6au_100Ry_2s1p.orb',\r\n 'Hf':'Hf_gga_10au_100Ry_4s2p2d2f.orb',\r\n 'Hg':'Hg_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'I':'I_gga_8au_100Ry_2s2p2d.orb',\r\n 'In':'In_gga_9au_100Ry_2s2p2d.orb',\r\n 'Ir':'Ir_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'K':'K_gga_9au_100Ry_4s2p1d.orb',\r\n 'Kr':'Kr_gga_7au_100Ry_2s2p1d.orb',\r\n 'Li':'Li_gga_9au_100Ry_4s1p.orb',\r\n 'Mg':'Mg_gga_9au_100Ry_4s2p1d.orb',\r\n 'Mn':'Mn_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Mo':'Mo_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'N':'N_gga_8au_100Ry_2s2p1d.orb',\r\n 'Na':'Na_gga_9au_100Ry_4s2p1d.orb',\r\n 'Nb':'Nb_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Ne':'Ne_gga_6au_100Ry_2s2p1d.orb',\r\n 'Ni':'Ni_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'O':'O_gga_7au_100Ry_2s2p1d.orb',\r\n 'Os':'Os_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'P':'P_gga_8au_100Ry_2s2p1d.orb',\r\n 'Pb':'Pb_gga_9au_100Ry_2s2p2d.orb',\r\n 'Pd':'Pd_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Pt':'Pt_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Rb':'Rb_gga_10au_100Ry_4s2p1d.orb',\r\n 'Re':'Re_gga_10au_100Ry_4s2p2d1f.orb',\r\n 'Rh':'Rh_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Ru':'Ru_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'S':'S_gga_8au_100Ry_2s2p1d.orb',\r\n 'Sb':'Sb_gga_9au_100Ry_2s2p2d.orb',\r\n 'Sc':'Sc_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Se':'Se_gga_8au_100Ry_2s2p1d.orb',\r\n 'Si':'Si_gga_8au_100Ry_2s2p1d.orb',\r\n 'Sn':'Sn_gga_9au_100Ry_2s2p2d.orb',\r\n 'Sr':'Sr_gga_10au_100Ry_4s2p1d.orb',\r\n 'Ta':'Ta_gga_10au_100Ry_4s2p2d2f.orb',\r\n 'Tc':'Tc_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Te':'Te_gga_9au_100Ry_2s2p2d.orb',\r\n 'Ti':'Ti_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Tl':'Tl_gga_9au_100Ry_2s2p2d.orb',\r\n 'V':'V_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'W':'W_gga_10au_100Ry_4s2p2d2f.orb',\r\n 'Xe':'Xe_gga_7au_100Ry_2s2p2d.orb',\r\n 'Y':'Y_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Zn':'Zn_gga_9au_100Ry_4s2p2d1f.orb',\r\n 'Zr':'Zr_gga_9au_100Ry_4s2p2d1f.orb'}\r\n ,\r\n 'basis_pbe_sg15 standard':\r\n {'Ag':'Ag_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Al':'Al_gga_9au_60Ry_4s4p1d.orb',\r\n 'Ar':'Ar_gga_7au_60Ry_2s2p1d.orb',\r\n 'As':'As_gga_8au_60Ry_2s2p1d.orb',\r\n 'Au':'Au_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'B':'B_gga_8au_60Ry_2s2p1d.orb',\r\n 'Ba':'Ba_gga_11au_60Ry_4s2p2d.orb',\r\n 'Be':'Be_gga_8au_60Ry_4s1p.orb',\r\n 'Bi':'Bi_gga_9au_60Ry_2s2p2d.orb',\r\n 'Br':'Br_gga_8au_60Ry_2s2p1d.orb',\r\n 'C':'C_gga_8au_60Ry_2s2p1d.orb',\r\n 'Ca':'Ca_gga_9au_60Ry_4s2p1d.orb',\r\n 'Cd':'Cd_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Cl':'Cl_gga_8au_60Ry_2s2p1d.orb',\r\n 'Co':'Co_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Cr':'Cr_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Cs':'Cs_gga_11au_60Ry_4s2p1d.orb',\r\n 'Cu':'Cu_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'F':'F_gga_7au_60Ry_2s2p1d.orb',\r\n 'Fe':'Fe_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Ga':'Ga_gga_9au_60Ry_2s2p2d.orb',\r\n 'Ge':'Ge_gga_8au_60Ry_2s2p2d.orb',\r\n 'H':'H_gga_8au_60Ry_2s1p.orb',\r\n 'He':'He_gga_6au_60Ry_2s1p.orb',\r\n 'Hf':'Hf_gga_10au_60Ry_4s2p2d2f.orb',\r\n 'Hg':'Hg_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'I':'I_gga_8au_60Ry_2s2p2d.orb',\r\n 'In':'In_gga_9au_60Ry_2s2p2d.orb',\r\n 'Ir':'Ir_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'K':'K_gga_9au_60Ry_4s2p1d.orb',\r\n 'Kr':'Kr_gga_7au_60Ry_2s2p1d.orb',\r\n 'Li':'Li_gga_9au_60Ry_4s1p.orb',\r\n 'Mg':'Mg_gga_9au_60Ry_4s2p1d.orb',\r\n 'Mn':'Mn_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Mo':'Mo_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'N':'N_gga_8au_60Ry_2s2p1d.orb',\r\n 'Na':'Na_gga_10au_60Ry_4s2p1d.orb',\r\n 'Nb':'Nb_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Ne':'Ne_gga_6au_60Ry_2s2p1d.orb',\r\n 'Ni':'Ni_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'O':'O_gga_7au_60Ry_2s2p1d.orb',\r\n 'Os':'Os_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'P':'P_gga_8au_60Ry_2s2p1d.orb',\r\n 'Pb':'Pb_gga_9au_60Ry_2s2p2d.orb',\r\n 'Pd':'Pd_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Pt':'Pt_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Rb':'Rb_gga_10au_60Ry_4s2p1d.orb',\r\n 'Re':'Re_gga_10au_60Ry_4s2p2d1f.orb',\r\n 'Rh':'Rh_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Ru':'Ru_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'S':'S_gga_8au_60Ry_2s2p1d.orb',\r\n 'Sb':'Sb_gga_9au_60Ry_2s2p2d.orb',\r\n 'Sc':'Sc_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Se':'Se_gga_8au_60Ry_2s2p1d.orb',\r\n 'Si':'Si_gga_8au_60Ry_2s2p1d.orb',\r\n 'Sn':'Sn_gga_9au_60Ry_2s2p2d.orb',\r\n 'Sr':'Sr_gga_10au_60Ry_4s2p1d.orb',\r\n 'Ta':'Ta_gga_10au_60Ry_4s2p2d2f.orb',\r\n 'Tc':'Tc_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Te':'Te_gga_9au_60Ry_2s2p2d.orb',\r\n 'Ti':'Ti_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Tl':'Tl_gga_9au_60Ry_2s2p2d.orb',\r\n 'V':'V_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'W':'W_gga_10au_60Ry_4s2p2d2f.orb',\r\n 'Xe':'Xe_gga_8au_60Ry_2s2p2d.orb',\r\n 'Y':'Y_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Zn':'Zn_gga_9au_60Ry_4s2p2d1f.orb',\r\n 'Zr':'Zr_gga_9au_60Ry_4s2p2d1f.orb'}\r\n }\r\n", "id": "654715", "language": "Python", "matching_score": 0.7186469435691833, "max_stars_count": 0, "path": "abacus/basis_set.py" }, { "content": "#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n\n#PBS -N job_name\n#PBS -o job.log\n#PBS -e job.err\n#PBS -q gold5120\n#PBS -l nodes=1:ppn=12\n#PBS -l walltime=22:00:00\n\nimport os\n#os.system(\"module load intelmpi/2018.1.163\")\n#os.system(\"export OMP_NUM_THREADS=1\")\n#os.system(\"cd $PBS_O_WORKDIR\")\nfrom ase import Atoms,Atom\n\nfrom ase.calculators.abacus.abacus_out import *\nfrom ase.calculators.abacus.create_input import *\n\nfrom ase.io import write\nfrom ase.optimize import MDMin\nfrom ase.neb import NEB \n#import matplotlib.pyplot as plt \nfrom ase.neb import NEBTools\n\nos.environ['ASE_ABACUS_COMMAND']=\"mpirun -machinefile $PBS_NODEFILE -np 12 /home/shenzx/software/abacus/abacus_v2.0/bin/ABACUS.mpi.2.0>>PREFIX.log\"\n#Create a structure\nal110_cell = (4.0614,2.8718,2.8718)\nal110_positions = [(0.0,0.0,0.0),\n (2.0307,1.4359,-1.4359)]\ninitial = Atoms('Al2',\n positions=al110_positions,\n cell=al110_cell,\n pbc=(1,1,0))\n#initial *= (2, 2, 1)\ninitial.append(Atom('C', (2.0307, 1.4359, 4.3077)))\ninitial.center(vacuum=4.0, axis=2)\n\nfinal = initial.copy()\nfinal.positions[-1][1] += 2.8718\nfinal.positions[-1][0] += 4.0614\n\n# Construct a list of images:\nimages = [initial]\nfor i in range(3):\n images.append(initial.copy())\nimages.append(final)\nprint('Create images successfully')\n\nfor image in images:\n # Let all images use an abacus calculator:\n image.set_calculator(Abacus(\n label = \"/home/shenzx/project/python_20190718/ase_20191211/example/neb/AlC/neb\",\n atoms=image,\n pseudo_dir = \"/home/shenzx/software/abacus/SG15_ONCV_PBE_1.0\",\n potential_name = \"potential_pbe_sg15\" , \n basis_dir = \"/home/shenzx/software/abacus/Orb_DZP_E100_Standard_v1.0\" ,\n basis_name = [ \"Al_gga_9au_100Ry_4s4p1d.orb\", \"C_gga_8au_100Ry_2s2p1d.orb\" ] ,\n calculation='scf',\n ntype=2,\n nbands=20,\n ecutwfc=50,\n dr2=\"1.0e-6\",\n niter=100,\n force=1,\n smearing='gaussian',\n sigma=0.02,\n mixing_type='pulay-kerker',\n ks_solver = 'genelpa',\n mixing_beta=0.4,\n basis_type='lcao',\n atom_file='STRU',\n gamma_only = 0,\n knumber = 0, \n kmode = 'Gamma', \n kpts = [ 1, 1, 1, 0, 0, 0]\n ))\n\n# Create a Nudged Elastic Band:\nneb = NEB(images)\n# Make a starting guess for the minimum energy path (a straight line\n# from the initial to the final state):\nneb.interpolate()\n# Relax the NEB path:\nminimizer = MDMin(neb)\nprint('Relax the NEB path, please wait!!!')\nminimizer.run(fmax=0.05)\n# Write the path to a trajectory:\nwrite('neb.traj', images)\nprint('Write the path to a trajectory successfully')\n\n#images = read('neb.traj@:')\n#view(images1)\n#view(images2)\n#view(images)\n\nnebtools = NEBTools(images)\n\n# Get the calculated barrier and the energy change of the reaction.\nEf, dE = nebtools.get_barrier()\nprint(Ef,' ',dE)\n# Get the barrier without any interpolation between highest images.\nEf, dE = nebtools.get_barrier(fit=False)\nprint(Ef,' ',dE)\n# Get the actual maximum force at this point in the simulation.\nmax_force = nebtools.get_fmax()\nprint('max_force: ',max_force)\n# Create a figure like that coming from ASE-GUI.\n#print('Create a figure,wait !!!')\n#fig = nebtools.plot_band()\n#fig.savefig('diffusion-barrier.png')\n\n# Create a figure with custom parameters.\n#fig = plt.figure(figsize=(5.5, 4.0))\n#ax = fig.add_axes((0.15, 0.15, 0.8, 0.75))\n#nebtools.plot_band(ax)\n#fig.savefig('diffusion-barrier.png')\n#print('Create a figure successfully')\n", "id": "7823133", "language": "Python", "matching_score": 7.921552658081055, "max_stars_count": 0, "path": "abacus/example/neb/ase_abacus_neb.py" }, { "content": "#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n\n#PBS -N job_name\n#PBS -o job.log\n#PBS -e job.err\n#PBS -q gold5120\n#PBS -l nodes=1:ppn=8\n#PBS -l walltime=22:00:00\n\nimport os\nos.system(\"module load intelmpi/2018.1.163\")\nos.system(\"export OMP_NUM_THREADS=1\")\nos.system(\"cd $PBS_O_WORKDIR\")\nimport ase\nimport ase.io as aio\nimport ase.build as abd\nimport ase.optimize as aopt\nimport ase.visualize as av\nimport ase.constraints as ac\nfrom ase import Atoms\nfrom ase.units import Ry\n\nfrom ase.calculators.abacus.abacus_out import *\nfrom ase.calculators.abacus.create_input import *\n\n#os.environ['ASE_ABACUS_COMMAND'] = \"mpijob /opt/ABACUS/1.0.1dev_intel-mpi-mkl-2017_genELPA/bin/ABACUS>>PREFIX.log\"\nos.environ['ASE_ABACUS_COMMAND']=\"mpirun -machinefile $PBS_NODEFILE -np 8 /home/shenzx/software/abacus/abacus_v2.0/bin/ABACUS.mpi.2.0 >> PREFIX.log\"\n\nbulk = aio.read( \"/home/shenzx/project/python_20190718/ase_20191211/example/property/SiC_mp-8062_conventional_standard.cif\",\n format='cif') \n\ncalc = Abacus( label = \"/home/shenzx/project/python_20190718/ase_20191211/example/property/SiC/test\",\n atoms = bulk,\n pseudo_dir = \"/home/shenzx/software/abacus/SG15_ONCV_PBE_1.0\",\n potential_name = \"potential_pbe_sg15\" , \n basis_dir = \"/home/shenzx/software/abacus/Orb_DZP_E100_Standard_v1.0\" ,\n basis_name = [ \"Si_gga_8au_100Ry_2s2p1d.orb\", \"C_gga_8au_100Ry_2s2p1d.orb\" ] , \n niter = 1000, \n dr2 = \"5.0e-7\", \n ecutwfc = 100, \n calculation = \"scf\",\n nspin = 1,\n force = 1,\n ks_solver = 'genelpa', \n basis_type = 'lcao', \n gamma_only = 0, \n knumber = 0, \n kmode = 'Gamma', \n kpts = [ 3, 3, 3, 0, 0, 0], \n ) \n\nbulk.set_calculator(calc)\n\nprint(\"Potential Energy: eV\")\nprint(bulk.get_potential_energy())\nprint(\"Force:\")\nprint(bulk.get_forces())\nprint(\"Fermi Level:\")\nprint(calc.get_fermi_level())\n\nquit()\n", "id": "2183935", "language": "Python", "matching_score": 2.4283084869384766, "max_stars_count": 0, "path": "abacus/example/property/ase_abacus.py" }, { "content": "#!/usr/env/python3\n\nimport sys\nimport os\nimport json\nfrom pathlib import Path\nfrom monty.os import cd\nfrom subprocess import getoutput\nfrom time import sleep\n\nfrom ase.calculators.abacus.create_input import AbacusInput\n\nHELPER = r\"/WORK/nscc-gz_material_1/ICSD_vasp/abacus_calc/scripts/abacus_helper/\"\n\ndef yield_stru(path):\n for i in path.rglob(\"*\"):\n if len(i.parts) == len(path.parts) + 1:\n yield i\n\ndef _write_kpath(path):\n with cd(HELPER):\n kpath_dat = getoutput(f\"python main.py -t kpath -s {path/path.name} -n 20\")\n kpd = json.loads(kpath_dat)\n with cd(path):\n with open(\"KPT\", \"w\") as f:\n for _, v in eval(kpath_dat).items():\n f.write(v)\n\ndef _write_input(path):\n with cd(path):\n nbands = getoutput(\"grep NBANDS SCF_OUT.ABACUS/running_scf.log |awk '{print $NF}' |head -n 1\")\n ntype = getoutput(\"ls *.orb |wc -l\")\n input = AbacusInput()\n input.set(atom_file=path.name,\n ntype=int(ntype),\n nbands=int(nbands),\n kpoint_file='KPT',\n pseudo_dir='./',\n calculation='nscf',\n nspin=2,\n ecutwfc=60,\n niter=50,\n dr2=1.0e-9,\n ethr=1.0e-7,\n start_charge=\"file\",\n out_band=1,\n smearing='gaussian',\n sigma=0.02,\n ks_solver='genelpa',\n basis_type='lcao',\n mixing_type='pulay',\n mixing_beta=0.4,\n gamma_only=0)\n input.write_input_input()\n os.system('yhbatch -N 1 abacus.sh') \n sleep(1)\n\n\ndef make_band_inputs(scf):\n with cd(scf):\n os.system(\"mv *.out ./OUT.ABACUS/\")\n os.system(\"mv OUT.ABACUS SCF_OUT.ABACUS\")\n os.system(\"mv INPUT SCF_INPUT\")\n os.system(\"mv KPT SCF_KPT\")\n _write_kpath(scf)\n _write_input(scf)\n\n\nif __name__ == \"__main__\":\n args = sys.argv\n\n scf = Path(args[1])\n for s in yield_stru(scf):\n s = s.absolute()\n try:\n make_band_inputs(s)\n except:\n os.system(f\"echo {s} >> err.log\") \n \n", "id": "7320446", "language": "Python", "matching_score": 7.539890289306641, "max_stars_count": 0, "path": "get_band.py" }, { "content": "#!/usr/env/python3\n\nimport sys\nimport os\nimport json\nfrom pathlib import Path\nfrom monty.os import cd\nfrom subprocess import getoutput\nfrom time import sleep\n\nfrom ase.calculators.abacus.create_input import AbacusInput\nfrom math import ceil, pi\nimport numpy as np\nfrom ase.io import read\nfrom ase.io.abacus import write_input_stru\n\n\nHELPER = r\"/WORK/nscc-gz_material_1/ICSD_vasp/abacus_calc/scripts/abacus_helper/\"\n\ndef yield_stru(path):\n for i in path.rglob(\"*\"):\n if len(i.parts) == len(path.parts) + 1:\n yield i\n\ndef _write_input(path):\n with cd(path):\n os.system(\"cp SCF_KPT KPT\")\n ntype = getoutput(\"ls *.orb |wc -l\")\n input = AbacusInput()\n input.set(atom_file=path.name,\n ntype=int(ntype),\n kpoint_file='KPT',\n pseudo_dir='./',\n calculation='nscf',\n nspin=2,\n ecutwfc=60,\n niter=50,\n dr2=1.0e-9,\n ethr=1.0e-7,\n start_charge=\"file\",\n out_dos=1,\n smearing='gaussian',\n sigma=0.02,\n ks_solver='genelpa',\n basis_type='lcao',\n mixing_type='pulay',\n mixing_beta=0.4,\n gamma_only=0)\n input.write_input_input()\n os.system('yhbatch -N 1 abacus.sh') \n sleep(1)\n\n\ndef make_band_inputs(scf):\n with cd(scf):\n os.system(\"mv *.out ./OUT.ABACUS/\")\n os.system(\"mv OUT.ABACUS BAND_OUT.ABACUS\")\n os.system(\"mv INPUT BAND_INPUT\")\n os.system(\"mv KPT BAND_KPT\")\n _write_input(scf)\n\n\nif __name__ == \"__main__\":\n args = sys.argv\n\n scf = Path(args[1])\n for s in yield_stru(scf):\n s = s.absolute()\n #try:\n make_band_inputs(s)\n #except:\n # os.system(f\"echo {s} >> err_dos.log\") \n \n", "id": "974910", "language": "Python", "matching_score": 5.148013114929199, "max_stars_count": 0, "path": "get_dos.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# 导入模块\r\nimport os,sys\r\nfrom math import ceil, pi\r\nimport numpy as np\r\nfrom ase.io import read\r\nfrom ase.io.abacus import write_input_stru\r\nfrom ase.calculators.abacus.create_input import AbacusInput\r\n\r\n\r\n# 指定结构文件、赝势库、轨道库路径,以及K点密度相关的KSPACING值\r\n#STRUPATH='/WORK/nscc-gz_material_1/ICSD_vasp/abacus/abacus/benchmark/modify'\r\n#STRUPATH='./test'\r\nSTRUPATH='/WORK/nscc-gz_material_1/ICSD_vasp/abacus/abacus/AbacusHighThroughput/test'\r\nPOTPATH='/WORK/nscc-gz_material_1/ICSD_vasp/abacus/abacus/AbacusHighThroughput/PotSG15/'\r\nORBPATH='/WORK/nscc-gz_material_1/ICSD_vasp/abacus/abacus/AbacusHighThroughput/OrbSG15std/'\r\nKSPACING=0.13\r\n#CALC_PATH = r'/WORK/nscc-gz_material_1/ICSD_vasp/abacus/abacus/calc_benchmark'\r\n#CALC_PATH = r'/WORK/nscc-gz_material_1/ICSD_vasp/abacus/abacus/compare/abacus'\r\nCALC_PATH = r'/WORK/nscc-gz_material_1/ICSD_vasp/abacus/abacus/AbacusHighThroughput/test2'\r\nBASH_DIR = r'/WORK/nscc-gz_material_1/ICSD_vasp/abacus/abacus/AbacusHighThroughput'\r\n\r\nfor root, dirs, files in os.walk(top=STRUPATH, topdown=True):\r\n for file in files:\r\n # 读取vasp格式的结构文件,获取结构元素种类,结构文件名字\r\n stru = read(os.path.join(root, file), format='vasp')\r\n ntype = len(set(stru.get_chemical_symbols()))\r\n atom_file = file.split('.')[0]\r\n \r\n # 根据结构文件名字创建abacus运行文件夹\r\n cpth = os.path.join(CALC_PATH, atom_file)\r\n print(cpth) \r\n if not os.path.exists(cpth):\r\n os.makedirs(cpth)\r\n os.chdir(cpth)\r\n \r\n # 在abacus运行文件夹下创建INPUT\r\n input = AbacusInput()\r\n input.set(atom_file=atom_file,\r\n ntype=ntype,\r\n kpoint_file='KPT',\r\n pseudo_dir='./',\r\n calculation='scf',\r\n nspin=2,\r\n ecutwfc=60,\r\n dr2=1e-06,\r\n ks_solver='genelpa',\r\n niter=100,\r\n basis_type='lcao',\r\n smearing='gauss',\r\n sigma=0.002,\r\n mixing_type='pulay',\r\n mixing_beta=0.4,\r\n symmetry=1,\r\n gamma_only=0)\r\n input.write_input_input()\r\n \r\n # 在abacus运行文件夹下创建KPT\r\n Kpoints = [int(ceil(2 * pi / KSPACING * np.linalg.norm(stru.get_reciprocal_cell()[i]))) for\r\n i in range(3)]\r\n Kpoints += [0, 0, 0]\r\n input.set(knumber=0,\r\n kmode='Gamma',\r\n kpts=Kpoints)\r\n input.write_input_kpt()\r\n\r\n # 在abacus运行文件夹下创建abacus结构文件(与vasp结构文件同名),\r\n # 并复制相应元素赝势和轨道到运行文件夹下\r\n write_input_stru(stru=stru,\r\n pseudo_dir=POTPATH,\r\n basis_dir=ORBPATH,\r\n potential_name='PotSG15',\r\n basis_name='SG15std',\r\n coordinates_type='Direct',\r\n spin=2,\r\n filename=atom_file)\r\n \r\n # 复制abacus运行脚本并提交到服务器\r\n os.system('cp {}/abacus.sh {}'.format(BASH_DIR,CALC_PATH))\r\n os.system('yhbatch -N 1 '+CALC_PATH+'/abacus.sh')\r\n # os.chdir(CALC_PATH)\r\n", "id": "2462470", "language": "Python", "matching_score": 1.996698021888733, "max_stars_count": 0, "path": "calc.py" }, { "content": "#!/usr/env/python3\n\n\nfrom pathlib import Path\nimport shutil\n\ndef support():\n pfile = r\"./abacus.pot\"\n ofile = r\"./abacus.orb\"\n def read(file):\n su = set()\n with open(file, \"r\") as s:\n for i in s:\n su.add(i.strip(\"\\n\"))\n return su\n psu = read(pfile)\n osu = read(ofile)\n print(\"Pot support: \", len(psu))\n print(\"Orb support: \", len(osu))\n return psu & osu\n\ndef yield_stru():\n file = Path(r\"/WORK/nscc-gz_material_1/ICSD_vasp/abacus_calc/matgen_scf/simple_substance/stru_from_matgen\")\n for stru in file.rglob(\"*.vasp\"):\n yield stru\n\n\ndef get_e(stru):\n with open(stru, \"r\") as f:\n for idx, line in enumerate(f):\n if idx == 5:\n elements = line.strip('\\n').split()\n return elements\n\ndef in_support(es, sue):\n for i in es:\n if i not in sue:\n return False\n return True\n\n\nif __name__ == \"__main__\":\n sue = support()\n print(sue)\n print(\"total support: \", len(sue))\n sim = set()\n for stru in yield_stru():\n ye = get_e(stru)\n print(ye) \n if len(ye) == 1:\n sim.add(ye[0])\n if in_support(ye, sue):\n shutil.copy(stru, r\"/WORK/nscc-gz_material_1/ICSD_vasp/abacus_calc/matgen_scf/simple_substance/run\")\n print(sim)\n print(len(sim))\n \n\n\n\n\n", "id": "1195124", "language": "Python", "matching_score": 3.0602939128875732, "max_stars_count": 0, "path": "screen.py" }, { "content": "#!/usr/env/python3\n\nimport json\n\nwith open(r\"abacus_simple.json\", \"r\") as f:\n si = json.load(f)\n\n\ndef support():\n pfile = r\"../../abacus.pot\"\n ofile = r\"../../abacus.orb\"\n def read(file):\n su = set()\n with open(file, \"r\") as s:\n for i in s:\n su.add(i.strip(\"\\n\"))\n return su\n psu = read(pfile)\n osu = read(ofile)\n print(\"Pot support: \", len(psu))\n print(\"Orb support: \", len(osu))\n return psu & osu\n\nalls = support()\nfor k in alls:\n if k not in list(si.keys()):\n print(k)\n \n", "id": "11536236", "language": "Python", "matching_score": 0.01853722706437111, "max_stars_count": 0, "path": "postprocess/find_miss.py" }, { "content": "#!/usr/env/python3\n\n\nimport pymongo\n\nCOUNT = 0\n\ndef get_db(db):\n addr = \"192.168.3.11:10102\"\n client = pymongo.MongoClient(addr)\n db = client[db]\n return db\n\ndef get_id_from(col):\n global COUNT\n for item in col.find().batch_size(2):\n if \"icsd_id\" in item.keys() and \"oqmd_id\" in item.keys() and \"cod_id\" in item.keys():\n COUNT += 1\n print(f\"skip: {COUNT}\")\n continue\n if \"icsd_id\" in item.keys() and item[\"icsd_id\"] != -1:\n yield item, {\"icsd_id\": item[\"icsd_id\"]}, \"icsd\"\n elif \"oqmd_id\" in item.keys() and item[\"oqmd_id\"] != -1:\n yield item, {\"oqmd_id\": item[\"oqmd_id\"]}, \"oqmd\"\n elif \"cod_id\" in item.keys() and item[\"cod_id\"] != -1:\n yield item, {\"cod_id\": item[\"cod_id\"]}, \"cod\"\ndef get_nil(name):\n return {\"icsd\": {\"cod_id\": -1, \"oqmd_id\": -1}, \"cod\": {\"icsd_id\": -1, \"oqmd_id\": -1}, \"oqmd\": {\"icsd_id\": -1, \"cod_id\": -1}}.get(name)\n\n\nif __name__ == \"__main__\":\n import sys\n\n args = sys.argv\n colname = args[1]\n\n org_col = get_db(\"dft_data\")[colname]\n for stru, lid, db in get_id_from(org_col):\n qs = get_nil(db)\n #stru.update(qs)\n #exist = org_col.find_one(lid)\n #org_col.update_one(exist, {'$set': stru})\n org_col.update(lid, {'$set': qs})\n u = org_col.find_one(lid)\n print(u[\"icsd_id\"], u[\"cod_id\"], u[\"oqmd_id\"])\n", "id": "6443811", "language": "Python", "matching_score": 3.2329726219177246, "max_stars_count": 0, "path": "postprocess/modify_id.py" }, { "content": "#!/usr/env/python3\n\n\nimport pymongo\n\ndef get_db(db):\n addr = \"172.16.17.32:10102\"\n client = pymongo.MongoClient(addr)\n db = client[db]\n return db\n\ndef get_id_from_abacus(col):\n for item in col.find():\n if \"icsd_id\" in item.keys():\n yield item, {\"icsd_id\": item[\"icsd_id\"]}, \"icsd\"\n elif \"oqmd_id\" in item.keys():\n yield item, {\"oqmd_id\": item[\"oqmd_id\"]}, \"oqmd\"\n elif \"cod_id\" in item.keys():\n yield item, {\"cod_id\": item[\"cod_id\"]}, \"cod\"\ndef get_nil(name):\n return {\"icsd\": {\"cod_id\": -1, \"oqmd_id\": -1}, \"cod\": {\"icsd_id\": -1, \"oqmd_id\": -1}, \"oqmd\": {\"icsd_id\": -1, \"cod_id\": -1}}.get(name)\n\n\nif __name__ == \"__main__\":\n abacus_stru = get_db(\"abacus_data\")\n dft_stru = get_db(\"dft_data\")\n for stru, lid, db in get_id_from_abacus(abacus_stru[\"stru\"]):\n qs = get_nil(db)\n stru.update(qs)\n print(stru)\n exist = abacus_stru[\"stru\"].find_one(lid)\n abacus_stru[\"stru\"].update_one(exist, {'$set': stru})\n \n", "id": "2784794", "language": "Python", "matching_score": 3.3622944355010986, "max_stars_count": 0, "path": "postprocess/modify.py" }, { "content": "#!/usr/env/python3\n\n\nimport pymongo\n\ndef get_db(db):\n addr = \"172.16.31.10:10102\"\n client = pymongo.MongoClient(addr)\n db = client[db]\n return db\n\ndef get_id_from_abacus(col):\n for item in col.find():\n if \"icsd_id\" in item.keys():\n yield item, {\"icsd_id\": item[\"icsd_id\"]}\n elif \"oqmd_id\" in item.keys():\n yield item, {\"oqmd_id\": item[\"oqmd_id\"]}\n elif \"cod_id\" in item.keys():\n yield item, {\"cod_id\": item[\"cod_id\"]}\n\ndef seek_matid_in_dft(col, key):\n return {\"matid\": col.find_one(key).get(\"matid\")}\n\ndef seek_sim_in_dft(col, key):\n if col.find_one(key).get(\"same_file\") is None:\n val = {\"same_file\": int(-1)}\n else:\n val = {\"same_file\": int(col.find_one(key).get(\"same_file\"))}\n return val\n\nif __name__ == \"__main__\":\n abacus_stru = get_db(\"abacus_data\")\n dft_stru = get_db(\"dft_data\")\n for stru, lid in get_id_from_abacus(abacus_stru[\"cif\"]):\n mid = seek_matid_in_dft(dft_stru[\"sp\"], lid)\n #sim = seek_sim_in_dft(dft_stru[\"sp\"], lid)\n #stru.update(sim)\n stru.update(mid)\n exist = abacus_stru[\"cif\"].find_one(lid)\n abacus_stru[\"cif\"].update_one(exist, {'$set': stru})\n \n", "id": "5268491", "language": "Python", "matching_score": 0.597636878490448, "max_stars_count": 0, "path": "postprocess/add_sth.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nimport os\r\nimport math\r\nimport shutil\r\nimport readcif\r\n\r\n''' \r\nCreate an initial calculation folder and all simulation input file templates\r\n(Helium void Fraction; Adsorption; Henry coefficient;) and calculate the unitcell value;\r\nCIFPATH \r\neg: python init_calculation.py ~/MOFs/cif\r\n'''\r\n\r\ndef seek_mof_cif(file_url):\r\n f_list = os.listdir(file_url)\r\n cifs = [os.path.splitext(i)[0] for i in f_list if os.path.splitext(i)[1] == '.cif']\r\n\r\n return cifs\r\n\r\ndef init_calc_space(file_url):\r\n cifs = seek_mof_cif(file_url)\r\n init_calc_space_list = []\r\n essential_thing_extension = '.cif'\r\n for essential_thing in cifs:\r\n src_file = file_url + os.sep + essential_thing + essential_thing_extension\r\n des_path = file_url + os.sep + essential_thing + os.sep\r\n des_file = des_path + essential_thing + essential_thing_extension\r\n if not os.path.exists(des_path):\r\n os.mkdir(des_path)\r\n shutil.copy(src_file,des_file)\r\n init_calc_space_list.append(des_path)\r\n\r\n return init_calc_space_list\r\n\r\ndef babel_cif(essential_cif):\r\n mof_file = essential_cif\r\n calc_cif = mof_file\r\n #babel_mof_file = \"babel_\" + mof_file\r\n #babelcmd = \"babel \" + mof_file + \" \" + babel_mof_file\r\n '''\r\n try:\r\n os.system(babelcmd)\r\n os.remove(essential_cif)\r\n calc_cif = babel_mof_file\r\n except:\r\n print(\"babel conversion error!\")\r\n calc_cif = mof_file\r\n with open(\"~/MOFslog\",\"a+\") as bb_error:\r\n bb_error.writelines(essential_cif+\"\\n\")\r\n '''\r\n return calc_cif\r\n\r\ndef simulation_cell_value(essential_cif):\r\n cif_data = readcif.read_cif_file(essential_cif)\r\n lattice = readcif.get_lattice(cif_data)\r\n a, b, c = lattice[0], lattice[1], lattice[2]\r\n av, bv, cv = 0, 0, 0\r\n for i in range(3):\r\n av += a[i] ** 2\r\n bv += b[i] ** 2\r\n cv += c[i] ** 2\r\n av, bv, cv = math.sqrt(av), math.sqrt(bv), math.sqrt(cv)\r\n # print(av,bv,cv)\r\n #ra, rb, rc = 26 / av, 26 / bv, 26 / cv\r\n ra, rb, rc = 20 / av, 20 / bv, 20 / cv\r\n l = [ra, rb, rc]\r\n # print(l)\r\n #for num in range(3):\r\n # if abs(l[num] - round(l[num])) < 0.5:\r\n # l[num] += 1\r\n # la, lb, lc = int(l[0]), int(l[1]), int(l[2])\r\n def _floor(x):\r\n a = math.floor(x)\r\n if a == 0:\r\n a = 1\r\n return int(x)\r\n la, lb, lc = list(map(_floor, l))\r\n if la == 0:\r\n la = 1\r\n if lb == 0:\r\n lb = 1\r\n if lc == 0:\r\n lc = 1\r\n \r\n length = str(la) + \" \" + str(lb) + \" \" + str(lc)\r\n\r\n return length\r\n\r\ndef init_mof_ppcalc(file_url,calculation_mode,section):\r\n homepath = os.path.expanduser('~')\r\n os.chdir(file_url)\r\n ppcalc_dir_list = [\"HeliumVF\",\"Adsorption\",\"HenryC\"]\r\n if calculation_mode == \"hvf\":\r\n joblist_path = homepath + \"/MOFs/work/MOF_VoidFraction/\"+ calculation_mode + \"_\" + section + os.sep\r\n if not os.path.exists(joblist_path):\r\n os.makedirs(joblist_path)\r\n ppath = ppcalc_dir_list[0]\r\n elif calculation_mode == \"ads\":\r\n joblist_path = homepath + \"/MOFs/work/MOF_Adsorption/\"+ calculation_mode + \"_\" + section + os.sep\r\n if not os.path.exists(joblist_path):\r\n os.makedirs(joblist_path)\r\n ppath = ppcalc_dir_list[1]\r\n elif calculation_mode == \"hc\":\r\n joblist_path = homepath + \"/MOFs/work/MOF_HenryC/\"+ calculation_mode + \"_\" + section + os.sep\r\n if not os.path.exists(joblist_path):\r\n os.makedirs(joblist_path)\r\n ppath = ppcalc_dir_list[2]\r\n else:\r\n raise NotSupport\r\n init_ppath = file_url + os.sep + ppath\r\n if not os.path.exists(init_ppath):\r\n os.makedirs(init_ppath)\r\n essential_cif = \"\".join(seek_mof_cif(file_url))\r\n essential_cif_extension = '.cif'\r\n cif_file = essential_cif + essential_cif_extension\r\n src_file = file_url + cif_file\r\n des_path = file_url + ppath + os.sep\r\n des_file = des_path + cif_file\r\n shutil.copyfile(src_file, des_file)\r\n os.chdir(des_path)\r\n length = simulation_cell_value(cif_file)\r\n submit1_sh = [\"#!/bin/sh\\n\",\r\n \"cd \"+ des_path + \"\\n\", \r\n \"export RASPA_DIR=/WORK/nscc-gz_material_1/MOFs/sf_box/raspa2/src\\n\",\r\n \"#export RASPA_DIR=/WORK/nscc-gz_material_5/Apps/raspa2/src\\n\",\r\n \"$RASPA_DIR/bin/simulate\\n\"\r\n ]\r\n if calculation_mode == \"hvf\":\r\n s_hvf_in = [\"SimulationType MonteCarlo\\n\",\r\n \"NumberOfCycles 1000\\n\",\r\n \"PrintEvery 1000\\n\",\r\n \"PrintPropertiesEvery 1000\\n\",\r\n \"\\n\",\r\n \"Forcefield UFF4MOFs\\n\",\r\n \"ChargeFromChargeEquilibration yes\\n\",\r\n \"CutOff 9.8\\n\",\r\n \"\\n\",\r\n \"Framework 0\\n\",\r\n \"FrameworkName\" + \" \" + str(essential_cif) + \"\\n\",\r\n \"UnitCells\" + \" \" + length + \"\\n\",\r\n \"ExternalTemperature 273.0\\n\",\r\n \"\\n\",\r\n \"Component 0 MoleculeName \" + \"helium\" + \"\\n\",\r\n \" MoleculeDefinition TraPPE\\n\",\r\n \" WidomProbability 1.0\\n\",\r\n \" CreateNumberOfMolecules 0\\n\"\r\n ]\r\n\r\n sub_file = \"hvf\" + str(essential_cif) + \".sh\"\r\n with open(\"./simulation.input\", \"w\") as f_hvf:\r\n f_hvf.writelines(s_hvf_in)\r\n os.chdir(joblist_path)\r\n with open(r\"./\" + sub_file, \"w\") as f_sub:\r\n f_sub.writelines(submit1_sh)\r\n \r\n elif calculation_mode == \"ads\":\r\n s_adp_in = [\"SimulationType MonteCarlo\\n\",\r\n \"NumberOfCycles 10000\\n\",\r\n \"NumberOfInitializationCycles 1000\\n\"\r\n \"PrintEvery 1000\\n\",\r\n \"PrintPropertiesEvery 1000\\n\",\r\n \"\\n\",\r\n \"Forcefield UFF4MOFs\\n\",\r\n \"ChargeFromChargeEquilibration yes\\n\",\r\n \"CutOff 9.8\\n\",\r\n \"\\n\",\r\n \"Framework 0\\n\",\r\n \"FrameworkName\" + \" \" + str(essential_cif) + \"\\n\",\r\n \"UnitCells\" + \" \" + length + \"\\n\",\r\n \"ExternalTemperature 273.0\\n\",\r\n \"HeliumVoidFraction\" + \" hvf_value\" + \"\\n\",\r\n \"ExternalPressure pressure_value\\n\",\r\n \r\n \"\\n\",\r\n \"Component 0 MoleculeName CO2\\n\",\r\n \" MoleculeDefinition TraPPE\\n\",\r\n \" TranslationProbability 0.5\\n\",\r\n \" RotationProbability 0.5\\n\",\r\n \" ReinsertionProbability 0.5\\n\",\r\n \" SwapProbability 1.0\\n\",\r\n \" CreateNumberOfMolecules 0\\n\"\r\n ]\r\n #sub_file = \"ads\" + str(essential_cif) + \".sh\"\r\n with open(\"./simulation.input\", \"w\") as f_ads:\r\n f_ads.writelines(s_adp_in)\r\n #os.chdir(adsorption_joblist_path)\r\n #with open(r\"./\" + sub_file, \"w\") as f_sub:\r\n # f_sub.writelines(submit_sh)\r\n \r\n elif calculation_mode == \"hc\":\r\n s_hc_in = [\"SimulationType MonteCarlo\\n\",\r\n \"NumberOfCycles 10000\\n\",\r\n \"NumberOfInitializationCycles 0\\n\"\r\n \"PrintEvery 1000\\n\",\r\n \"PrintPropertiesEvery 1000\\n\",\r\n \"\\n\",\r\n \"Forcefield UFF4MOFs\\n\",\r\n \"\\n\",\r\n \"Framework 0\\n\",\r\n \"FrameworkName\" + \" \" + str(essential_cif) + \"\\n\",\r\n \"RemoveAtomNumberCodeFromLabel yes\\n\"\r\n \"UnitCells\" + \" \" + length + \"\\n\",\r\n \"ExternalTemperature 77.0\\n\",\r\n \"\\n\",\r\n \"Component 0 MoleculeName N2\\n\",\r\n \" MoleculeDefinition TraPPE\\n\",\r\n \" IdealRosenbluthValue 1.0\\n\",\r\n \" WidomProbability 1.0\\n\",\r\n \" CreateNumberOfMolecules 0\\n\"\r\n ]\r\n sub_file = \"hc\" + str(essential_cif) + \".sh\"\r\n with open(\"./simulation.input\", \"w\") as f_ads:\r\n f_ads.writelines(s_hc_in)\r\n os.chdir(joblist_path)\r\n with open(r\"./\" + sub_file, \"w\") as f_sub:\r\n f_sub.writelines(submit1_sh)\r\n \r\n return joblist_path\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n args = sys.argv\r\n file_url = args[1]\r\n calculation_mode = args[2]\r\n part = args[3]\r\n work_dir = init_calc_space(file_url)\r\n for mofpath in work_dir:\r\n essential_cif = seek_mof_cif(mofpath)\r\n os.chdir(mofpath)\r\n calc_cif = babel_cif(\"\".join(essential_cif) + \".cif\")\r\n init_mof_ppcalc(mofpath,calculation_mode,part)\r\n", "id": "7029446", "language": "Python", "matching_score": 9.120766639709473, "max_stars_count": 0, "path": "init_calculation.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Author : SenGao\r\nimport os\r\nimport math\r\nimport shutil\r\nimport readcif\r\n\r\n''' \r\nCreate an initial calculation folder and all simulation input file templates\r\n(Helium void Fraction; Adsorption; Henry coefficient;) and calculate the unitcell value;\r\nCIFPATH \r\neg: python MOFs_step1.py /WORK/nscc-gz_material_5/MOF/test/ref_try/mofdb/cif\r\n'''\r\n\r\ndef find_cif(filepath):\r\n f_list = os.listdir(filepath)\r\n cifs = [os.path.splitext(i)[0] for i in f_list if os.path.splitext(i)[1] == '.cif']\r\n\r\n return cifs\r\n\r\ndef calc_directory(filepath):\r\n cifs = find_cif(filepath)\r\n mof_file_dir = []\r\n for mof_name in cifs:\r\n src_file = filepath + \"/\" + mof_name + \".cif\"\r\n des_path = filepath + \"/\" + mof_name + \"/\"\r\n des_file = des_path + mof_name + \".cif\"\r\n os.chdir(filepath)\r\n if not os.path.exists(des_path):\r\n os.mkdir(des_path)\r\n shutil.move(src_file,des_file)\r\n mof_file_dir.append(des_path)\r\n\r\n return mof_file_dir\r\n\r\ndef babel_cif(cifname):\r\n mof_file = cifname\r\n calc_cif = mof_file\r\n #babel_mof_file = \"babel_\" + mof_file\r\n #babelcmd = \"babel \" + mof_file + \" \" + babel_mof_file\r\n '''\r\n try:\r\n os.system(babelcmd)\r\n os.remove(cifname)\r\n calc_cif = babel_mof_file\r\n except:\r\n print(\"babel conversion error!\")\r\n calc_cif = mof_file\r\n with open(\"~/MOFslog\",\"a+\") as bb_error:\r\n bb_error.writelines(cifname+\"\\n\")\r\n '''\r\n return calc_cif\r\n\r\ndef unitcell_value(filename):\r\n cif_data = readcif.read_cif_file(filename)\r\n lattice = readcif.get_lattice(cif_data)\r\n a, b, c = lattice[0], lattice[1], lattice[2]\r\n av, bv, cv = 0, 0, 0\r\n for i in range(3):\r\n av += a[i] ** 2\r\n bv += b[i] ** 2\r\n cv += c[i] ** 2\r\n av, bv, cv = math.sqrt(av), math.sqrt(bv), math.sqrt(cv)\r\n # print(av,bv,cv)\r\n ra, rb, rc = 26 / av, 26 / bv, 26 / cv\r\n l = [ra, rb, rc]\r\n # print(l)\r\n for num in range(3):\r\n if abs(l[num] - round(l[num])) < 0.5:\r\n l[num] += 1\r\n la, lb, lc = int(l[0]), int(l[1]), int(l[2])\r\n length = str(la) + \" \" + str(lb) + \" \" + str(lc)\r\n\r\n return length\r\n\r\ndef calc_propery(filepath,mode,part):\r\n os.chdir(filepath)\r\n calc_list = [\"HeliumVF\",\"Adsorption\",\"HenryC\"]\r\n homepath = os.path.expanduser('~')\r\n if mode == \"hvf\":\r\n joblist_path = homepath + \"/MOFs/work/\"+ mode + \"_\" + part+\"/MOF_VF/Joblist/VoidFraction\"\r\n if not os.path.exists(joblist_path):\r\n os.makedirs(joblist_path)\r\n elif mode == \"ads\":\r\n joblist_path = homepath + \"/MOFs/work/\"+ mode + \"_\" + part+\"/MOF_ADS/Joblist/Adsorption\"\r\n if not os.path.exists(joblist_path):\r\n os.makedirs(joblist_path)\r\n elif mode == \"hc\":\r\n joblist_path = homepath + \"/MOFs/work/\"+ mode + \"_\" + part+\"/MOF_HC/Joblist/HenryC\"\r\n if not os.path.exists(joblist_path):\r\n os.makedirs(joblist_path)\r\n #heliumvf_job_path_list,henryc_job_path_list = [],[]\r\n for path in calc_list:\r\n os.mkdir(filepath+\"/\"+path)\r\n filename = \"\".join(find_cif(filepath))\r\n cif_file = \"\".join(filename)+\".cif\"\r\n des_path = filepath + path + \"/\"\r\n src_file = filepath + cif_file\r\n des_file = des_path + cif_file\r\n shutil.copyfile(src_file, des_file)\r\n os.chdir(des_path)\r\n length = unitcell_value(cif_file)\r\n submit1_sh = [\"#!/bin/sh\\n\",\r\n \"cd \"+des_path+\"\\n\", \r\n \"export RASPA_DIR=/WORK/nscc-gz_material_1/MOFs/sf_box/raspa2/src\\n\",\r\n \"$RASPA_DIR/bin/simulate\\n\"\r\n ]\r\n if (mode == \"hvf\") and (path == \"HeliumVF\"):\r\n s_hvf_in = [\"SimulationType MonteCarlo\\n\",\r\n \"NumberOfCycles 10000\\n\",\r\n \"PrintEvery 1000\\n\",\r\n \"PrintPropertiesEvery 1000\\n\",\r\n \"\\n\",\r\n \"Forcefield UFF4MOFs\\n\",\r\n \"\\n\",\r\n \"Framework 0\\n\",\r\n \"FrameworkName\" + \" \" + str(filename) + \"\\n\",\r\n \"UnitCells\" + \" \" + length + \"\\n\",\r\n \"ExternalTemperature 298.0\\n\",\r\n \"\\n\",\r\n \"Component 0 MoleculeName \" + \"helium\" + \"\\n\",\r\n \" MoleculeDefinition TraPPE\\n\",\r\n \" WidomProbability 1.0\\n\",\r\n \" CreateNumberOfMolecules 0\\n\"\r\n ]\r\n\r\n sub_file = \"hvf\" + str(filename) + \".sh\"\r\n with open(\"./simulation.input\", \"w\") as f_hvf:\r\n f_hvf.writelines(s_hvf_in)\r\n os.chdir(joblist_path)\r\n with open(r\"./\" + sub_file, \"w\") as f_sub:\r\n f_sub.writelines(submit1_sh)\r\n \r\n elif (mode == \"ads\") and (path == \"Adsorption\"):\r\n s_adp_in = [\"SimulationType MonteCarlo\\n\",\r\n \"NumberOfCycles 10000\\n\",\r\n \"NumberOfInitializationCycles 5000\\n\"\r\n \"PrintEvery 1000\\n\",\r\n \"PrintPropertiesEvery 1000\\n\",\r\n \"\\n\",\r\n \"Forcefield UFF4MOFs\\n\",\r\n \"ChargeFromChargeEquilibration yes\\n\",\r\n \"CutOff 12.8\\n\",\r\n \"\\n\",\r\n \"Framework 0\\n\",\r\n \"FrameworkName\" + \" \" + str(filename) + \"\\n\",\r\n \"UnitCells\" + \" \" + length + \"\\n\",\r\n \"ExternalTemperature 77.0\\n\",\r\n \"HeliumVoidFraction\" + \" hvf_value\" + \"\\n\",\r\n \"ExternalPressure pressure_value\\n\",\r\n \r\n \"\\n\",\r\n \"Component 0 MoleculeName N2\\n\",\r\n \" TranslationProbability 0.5\\n\",\r\n \" RotationProbability 0.5\\n\",\r\n \" ReinsertionProbability 0.5\\n\",\r\n \" SwapProbability 1.0\\n\",\r\n \" CreateNumberOfMolecules 0\\n\"\r\n ]\r\n #sub_file = \"ads\" + str(filename) + \".sh\"\r\n with open(\"./simulation.input\", \"w\") as f_ads:\r\n f_ads.writelines(s_adp_in)\r\n #os.chdir(adsorption_joblist_path)\r\n #with open(r\"./\" + sub_file, \"w\") as f_sub:\r\n # f_sub.writelines(submit_sh)\r\n \r\n elif (mode) == \"hc\" and (path == \"HenryC\"):\r\n s_hc_in = [\"SimulationType MonteCarlo\\n\",\r\n \"NumberOfCycles 10000\\n\",\r\n \"NumberOfInitializationCycles 0\\n\"\r\n \"PrintEvery 1000\\n\",\r\n \"PrintPropertiesEvery 1000\\n\",\r\n \"\\n\",\r\n \"Forcefield UFF4MOFs\\n\",\r\n \"\\n\",\r\n \"Framework 0\\n\",\r\n \"FrameworkName\" + \" \" + str(filename) + \"\\n\",\r\n \"RemoveAtomNumberCodeFromLabel yes\\n\"\r\n \"UnitCells\" + \" \" + length + \"\\n\",\r\n \"ExternalTemperature 77.0\\n\",\r\n \"\\n\",\r\n \"Component 0 MoleculeName N2\\n\",\r\n \" MoleculeDefinition TraPPE\\n\",\r\n \" IdealRosenbluthValue 1.0\\n\",\r\n \" WidomProbability 1.0\\n\",\r\n \" CreateNumberOfMolecules 0\\n\"\r\n ]\r\n sub_file = \"hc\" + str(filename) + \".sh\"\r\n with open(\"./simulation.input\", \"w\") as f_ads:\r\n f_ads.writelines(s_hc_in)\r\n os.chdir(joblist_path)\r\n with open(r\"./\" + sub_file, \"w\") as f_sub:\r\n f_sub.writelines(submit1_sh)\r\n \r\n return joblist_path\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n args = sys.argv\r\n filepath = args[1]\r\n mode = args[2]\r\n part = args[3]\r\n work_dir = calc_directory(filepath)\r\n for mofpath in work_dir:\r\n filename = find_cif(mofpath)\r\n #print(filename) \r\n os.chdir(mofpath)\r\n calc_cif = babel_cif(\"\".join(filename)+\".cif\")\r\n #print(calc_cif)\r\n calc_propery(mofpath,mode,part)\r\n", "id": "12629182", "language": "Python", "matching_score": 3.3704581260681152, "max_stars_count": 0, "path": "result_exract/MOFs_step1.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport re\nimport shutil\nimport init_calculation\nimport subprocess\n\n''' \n\nExtract Helium void Fraction from before calculation and perform Adsorption calculation\n: CIFPATH + INDIVIDUAL CIF DIRECTORY\neg: python MOFs_step2.py /WORK/nscc-gz_material_5/MOF/test/ref_try/mofdb/cif/ABAFUH.MOF_subset\n\n'''\n\ndef read_outfile_name(single_mof_calcf):\n output_file_path = single_mof_calcf + \"/HeliumVF/Output/System_0/\"\n mof_id = single_mof_calcf.split(\"/\")[-1]\n homepath = os.path.expanduser('~')\n print('the path is ', output_file_path)\n try:\n outfile_name = os.listdir(output_file_path)\n output_file = output_file_path+\"\".join(outfile_name)\n #print(output_file)\n #pat = \"Rosenbluth factor new: (\\d+.\\d+)\"\n pat = \"Rosenbluth factor new: (\\d+.\\d+|\\d+)\"\n kw = 'Rosenbluth factor new'\n kwr = subprocess.getoutput(\"grep \\'Rosenbluth factor new\\' {}\".format(output_file))\n #hvf_value = re.findall(pat,open(output_file).read())[-1]\n #hvf_value_list = re.findall(pat,open(output_file).read())\n print(kwr)\n hvf_value_list = re.findall(pat, kwr)\n print(hvf_value_list)\n if len(hvf_value_list) != 0:\n hvf_value = hvf_value_list[-1]\n else:\n hvf_value = None\n except FileNotFoundError:\n print(mof_id,\" no hvf value,skipping\") \n with open(homepath + \"/MOFs/submit_state/adsorption_without_hvf.txt\",\"a+\") as f:\n f.writelines(mof_id)\n hvf_value = None\n\n return hvf_value\n\ndef modify_simulation_input(single_mof_calcf):\n hvf_value = read_outfile_name(single_mof_calcf)\n pat = \"hvf_value\"\n ads_path = single_mof_calcf + \"/Adsorption\"\n modify_file = ads_path + \"/simulation.input\"\n new_simulation = single_mof_calcf + \"/Adsorption/newsimulation.input\"\n open(new_simulation, 'w').write(re.sub(pat, str(hvf_value), open(modify_file).read()))\n os.system('mv '+ new_simulation + ' ' + modify_file)\n\n return ads_path\n\ndef apply_pressure(single_mof_calcf,mode,part):\n #pressure = [\"5e7\",\"1e4\",\"5e4\",\"1e5\",\"5e5\",\"1e6\",\"1.5e6\",\"2e7\"]\n #N2\n #pressure = [\"2e2\", \"4e4\", \"1e5\"]\n #CH4\t\n #pressure = [\"5e4\", \"5e5\", \"100e5\"]\n #CO2\n pressure = [\"5e4\"]\n #pressure = [\"1e4\", \"5e5\", \"2e7\"]\n #pressure = [\"2e2\",\"5e3\",\"1e4\",\"2e4\",\"4e4\",\"6e4\",\"8e4\",\"1e5\"]\n diff_p_path = modify_simulation_input(single_mof_calcf)\n pat = \"pressure_value\"\n mol = \"CO2\"\n #mol = \"methane\"\n #mol = \"N2\"\n #print(diff_p_path)\n files = os.listdir(diff_p_path)\n #newin = \"./new.input\"\n #open(\"./simulation.input\", 'w').write(re.sub(\"N2\", mol, open(newin).read()))\n #os.system('mv new.input simulation.input')\n homepath = os.path.expanduser('~')\n adsorption_joblist_path = homepath + \"/MOFs/work/MOF_Adsorption/\"+ mode + \"_\" + part + os.sep\n for p in pressure:\n p_path = diff_p_path + os.sep + p + os.sep\n if not os.path.exists(p_path):\n os.mkdir(p_path)\n for i in files:\n ifile = diff_p_path + os.sep + i\n if os.path.isfile(ifile):\n shutil.copyfile(ifile, p_path + i)\n old_input_file = p_path + \"/simulation.input\"\n new_input_file = p_path + \"/ap_simulation.input\"\n new2_input_file = p_path + \"CO2.input\"\n cifname = init_calculation.seek_mof_cif(p_path)\n open(new_input_file, 'w').write(re.sub(pat, p, open(old_input_file).read()))\n open(new2_input_file, 'w').write(re.sub(\"N2\", mol, open(new_input_file).read()))\n os.system('mv ' + new2_input_file + ' ' + old_input_file)\n os.remove(new_input_file)\n #homepath = os.path.expanduser('~')\n if not os.path.exists(adsorption_joblist_path):\n os.makedirs(adsorption_joblist_path)\n submit2_sh = [\"#!/bin/sh\\n\",\n \"cd \"+ p_path +\"\\n\", \n \"export RASPA_DIR=/WORK/nscc-gz_material_1/MOFs/sf_box/raspa2/src\\n\",\n \"#export RASPA_DIR=/WORK/nscc-gz_material_5/Apps/raspa2/src\\n\",\n \"$RASPA_DIR/bin/simulate\\n\"\n ]\n os.chdir(adsorption_joblist_path)\n sub_file = \"ads\" + \"\".join(cifname) +\"_\"+ p + \".sh\"\n with open(r\"./\" + sub_file, \"w\") as f_sub:\n f_sub.writelines(submit2_sh)\n\n return adsorption_joblist_path\n\nif __name__ == '__main__':\n import sys\n args = sys.argv\n single_mof_calcf = args[1]\n mode = args[2]\n part = args[3]\n apply_pressure(single_mof_calcf,mode,part)\n", "id": "5686537", "language": "Python", "matching_score": 5.364809513092041, "max_stars_count": 0, "path": "adsorption_calculation.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport re\nimport shutil\nimport MOFs_step1\n\n''' \n\nExtract Helium void Fraction from before calculation and perform Adsorption calculation\n: CIFPATH + INDIVIDUAL CIF DIRECTORY\neg: python MOFs_step2.py /WORK/nscc-gz_material_5/MOF/test/ref_try/mofdb/cif\n\n'''\n\ndef find_path(calc_dir):\n filepath_list = os.listdir(calc_dir) \n \n return filepath_list\ndef read_output(filepath):\n global wrong_calc_filepath\n homepath = os.path.expanduser('~')\n wrong_calc_filepath = homepath + \"/MOFs/wrong_hc_calc/\" \n if not os.path.exists(wrong_calc_filepath):\n os.makedirs(wrong_calc_filepath)\n output_file_path = filepath +\"/HeliumVF/Output/System_0/\"\n try:\n ofn = os.listdir(output_file_path)\n output_file = output_file_path+\"\".join(ofn)\n check_result = check_calc_if_finished(output_file)\n if check_result is not True:\n cifname = filepath.split(\"/\")[-1] + \".cif\"\n shutil.copyfile(filepath + \"/\" + cifname ,wrong_calc_filepath + cifname)\n hc_value = None\n else:\n #pat = \"Rosenbluth factor new: (\\d+.\\d+)\"\n pat = \" Average Henry coefficient: (.*?) [+]\"\n try:\n hc_value = re.findall(pat,open(output_file).read())[-1]\n except:\n print(\"The outputfile has no henryc value\")\n if not os.path.exists(wrong_calc_filepath):\n os.makedirs(wrong_calc_filepath)\n cifname = filepath.split(\"/\")[-1] + \".cif\"\n srcfile = filepath + \"/\" + cifname\n desfile = wrong_calc_filepath + cifname\n shutil.copyfile(srcfile ,desfile)\n hc_value = None\n except FileNotFoundError:\n #homepath = os.path.expanduser('~')\n print(\"failed to find outputfile,moving cif file..\")\n cifname = filepath.split(\"/\")[-1] + \".cif\"\n srcfile = filepath + \"/\" + cifname\n #print(srcfile)\n desfile = wrong_calc_filepath + cifname\n #print(desfile)\n shutil.copyfile(srcfile ,desfile)\n hc_value = None\n\n return hc_value\n'''\ndef filter_wrongcalc_cif(filename):\n homepath = os.path.expanduser('~')\n wrong_calc_filepath = homepath + \"/MOFs/Wrong_calc/\" \n output_file_path = filepath +\"/HeliumVF/Output/System_0/\"\n try:\n ofn = os.listdir(output_file_path)\n output_file = output_file_path+\"\".join(ofn)\n except: \n if not os.path.exists(wrong_calc_filepath):\n os.makedirs(wrong_calc_filepath)\n if not os.path.exists(output_file):\n cifname = filename.split(\"/\")[-1] + \".cif\"\n srcfile = filename + \"/\" + cifname\n print(srcfile)\n desfile = wrong_calc_filepath + cifname\n print(desfile)\n shutil.copyfile(srcfile ,desfile) \n \n #if check_calc_if_finished(filename) is not True:\n # shutil.copyfile(filename,wrong_calc_filepath+filename.split(\"/\")[-1]) \n'''\ndef check_calc_if_finished(filename):\n check_pat_start = re.compile(\"Starting simulation\")\n check_pat_end = re.compile(\"Simulation finished\")\n with open(filename,\"r\") as f_check:\n data = f_check.read()\n #print(data)\n check_result = False\n mofname = str(filename.split(\"/\")[-1])\n if check_pat_start.findall(data):\n print(mofname + \" was submission sucessful\")\n if check_pat_end.findall(data):\n print(mofname + \" was calculated\")\n check_result = True\n if check_result is not True:\n print(mofname + \" calculation was not finished\")\n with open(\"./unfinished_mof_job\",\"a+\") as f_check:\n f_check.writelines(mofname + \"\\n\" + filename + \"\\n\" )\n return check_result\n\ndef modify_input(filepath):\n hc_value = read_output(filepath)\n pat = \"hc_value\"\n ads_path = filepath + \"/Adsorption\"\n modify_file = ads_path + \"/simulation.input\"\n new_simulation = filepath + \"/Adsorption/newsimulation.input\"\n open(new_simulation, 'w').write(re.sub(pat, str(hc_value), open(modify_file).read()))\n os.system('mv '+ new_simulation + ' ' + modify_file)\n\n return ads_path\n\ndef apply_pressure(filepath):\n pressure = [\"2e2\",\"5e3\",\"1e4\",\"2e4\",\"4e4\",\"6e4\",\"8e4\",\"1e5\"]\n diff_p_path = modify_input(filepath)\n pat = \"pressure_value\"\n #print(diff_p_path)\n files = os.listdir(diff_p_path)\n homepath = os.path.expanduser('~')\n adsorption_joblist_path = homepath + \"/MOF_WORK/Joblist/Adsorption/\"\n for p in pressure:\n p_path = diff_p_path + \"/\" + p + \"/\"\n os.mkdir(p_path)\n for i in files:\n shutil.copyfile(diff_p_path + \"/\" + i, p_path + i)\n old_input_file = p_path + \"/simulation.input\"\n new_input_file = p_path + \"/ap_simulation.input\"\n cifname = MOFs_step1.find_cif(p_path)\n#P\n open(new_input_file, 'w').write(re.sub(pat, p, open(old_input_file).read()))\n os.system('mv ' + new_input_file + ' ' + old_input_file)\n #homepath = os.path.expanduser('~')\n #adsorption_joblist_path = homepath + \"/MOF_WORK/Joblist/Adsorption/\"\n if not os.path.exists(adsorption_joblist_path):\n os.makedirs(adsorption_joblist_path)\n submit2_sh = [\"#!/bin/sh\\n\",\n \"cd \"+ p_path +\"\\n\", \n \"export RASPA_DIR=/WORK/nscc-gz_material_5/Apps/raspa2/src\\n\",\n \"$RASPA_DIR/bin/simulate\\n\"\n ]\n os.chdir(adsorption_joblist_path)\n sub_file = \"ads\" + str(cifname) +\"_\"+ p + \".sh\"\n with open(r\"./\" + sub_file, \"w\") as f_sub:\n f_sub.writelines(submit2_sh)\n\n return adsorption_joblist_path\n\nif __name__ == '__main__':\n import sys\n args = sys.argv\n filepath = args[1]\n workpath_list = find_path(filepath)\n for mof_name in workpath_list:\n mof_dir = filepath + \"/\" + mof_name\n print(mof_dir)\n hc_value = read_output(mof_dir) \n if hc_value is not None:\n print(\"Writing calculation results.... \" + mof_name )\n with open(\"./henry_coefficient_result.txt\",\"a+\") as f1:\n f1.writelines(str(mof_name) +\" \"*3 + format(str(hc_value),\">s\")+ \"\\n\")\n else:\n with open(\"./wrong_result.txt\",\"a+\") as f2:\n f2.writelines(mof_name + \"\\n\")\n continue\n #apply_pressure(pressure_path)\n", "id": "5469940", "language": "Python", "matching_score": 2.46586012840271, "max_stars_count": 0, "path": "result_exract/hc.py" }, { "content": "#!/usr/bin/env python\nimport os\nimport re\n\ndef iter_files(root_dir):\n outputfile = []\n for root,dirs,files in os.walk(root_dir):\n for file in files:\n file_name = os.path.join(root,file)\n outfile_extension = \".data\"\n path,tmpfilename = os.path.split(file_name)\n filename,extension = os.path.splitext(tmpfilename)\n if extension == outfile_extension:\n outputfile.append(file_name)\n\n return outputfile\n\ndef confirm_generate(filepath):\n generate = False\n calc_p_list = [\"1e4\", \"1e5\", \"2e2\", \"2e4\", \"4e4\", \"5e3\", \"6e4\", \"8e4\"]\n for i in calc_p_list:\n avp_path = filepath + os.sep + \"Adsorption/\" + i + os.sep + \"Output/System_0/\"\n result_path = filepath + os.sep + \"Adsorption/\" + i + os.sep + \"Output\"\n try:\n outfile = avp_path + \"\".join(os.listdir(avp_path))\n except FileNotFoundError:\n generate = False\n else:\n generate = True\n\n return generate\n\ndef check_exists_files(filepath):\n calc = False\n with open(filepath,\"r\") as f:\n data = f.read()\n #start_check = re.compile(r'Starting simulation')\n start_check = 'Starting simulation'\n #end_check = re.compile(r'Simulation finished,')\n end_check = 'Simulation finished,'\n #if ( start_check.findall(data) and end_check.findall(data) ) is not None:\n if end_check in data:\n calc = True\n else:\n calc = False\n\n return calc\n\nif __name__ == '__main__':\n import sys\n args = sys.argv\n mofdir = args[1]\n mof_result = os.listdir(mofdir)\n nonf,noncalc = [],[]\n for mof in mof_result:\n mofpath = mofdir + os.sep + mof\n mofoutfile = iter_files(mofpath)\n generate = confirm_generate(mofpath)\n if generate:\n for outfile in mofoutfile:\n calc = check_exists_files(outfile)\n if calc:\n continue\n else:\n noncalc.append(mof)\n print(\"Calculation Not Finished \", mof)\n with open(\"./nonf\",\"a+\") as fnfile:\n fnfile.writelines(mof + \"\\n\")\n else:\n nonf.append(mof)\n print(\"No Outfile found \", mof)\n with open(\"./noncalc\",\"a+\") as fncfile:\n fncfile.writelines(mof + \"\\n\")\n print(\"Calc Not finished \",len(noncalc))\n print(\"Not calc \",len(nonf))\n\n", "id": "12131785", "language": "Python", "matching_score": 5.415889739990234, "max_stars_count": 0, "path": "result_exract/findfile.py" }, { "content": "#!/usr/bin/env python\nimport os\nimport re\nimport json\nimport shutil\n\ndef iter_files(root_dir):\n outputfile = []\n for root,dirs,files in os.walk(root_dir):\n for file in files:\n file_name = os.path.join(root,file)\n outfile_extension = \".data\"\n path,tmpfilename = os.path.split(file_name)\n #print(tmpfilename)\n filename,extension = os.path.splitext(tmpfilename)\n ads = str(file_name).split(\"/\")[-5]\n if extension == outfile_extension and ads == \"Adsorption\":\n #print(filename)\n outputfile.append(file_name)\n\n return outputfile\n\ndef confirm_generate(filepath):\n generate = False\n #calc_p_list = [\"1e4\", \"1e5\", \"2e2\", \"2e4\", \"4e4\", \"5e3\", \"6e4\", \"8e4\"]\n calc_p_list = [\"5e3\",\"1e4\",\"5e4\",\"1e5\",\"5e5\",\"1e6\",\"1.5e6\",\"2e7\"]\n for i in calc_p_list:\n avp_path = filepath + os.sep + \"Adsorption/\" + i + os.sep + \"Output/System_0/\"\n result_path = filepath + os.sep + \"Adsorption/\" + i + os.sep + \"Output\"\n try:\n outfile = avp_path + \"\".join(os.listdir(avp_path))\n except FileNotFoundError:\n generate = False\n files = os.listdir(filepath)\n #cifpath = \"/\".join(str(filepath).split(\"/\")[0:-2]) \n cifile =\"\".join([i for i in files if os.path.splitext(i)[1] == '.cif'])\n srcfile = filepath + os.sep + cifile\n #print(srcfile)\n #despath = \"/WORK/nscc-gz_material_1/MOFs/NotFinishedCalc/Adsorption/N2/NOTcalc/\"\n despath = \"/WORK/nscc-gz_material_1/MOFs/NotFinishedCalc/Adsorption/CO2/NOTcalc/\"\n if not os.path.exists(despath):\n os.makedirs(despath)\n desfile = despath + cifile \n #print(desfile)\n shutil.copy(srcfile,desfile)\n else:\n generate = True\n\n return generate\n\ndef check_exists_files(filepath):\n calc = False\n with open(filepath,\"r\") as f:\n data = f.read()\n #start_check = re.compile(r'Starting simulation')\n start_check = 'Starting simulation'\n #end_check = re.compile(r'Simulation finished,')\n end_check = 'Simulation finished,'\n #if ( start_check.findall(data) and end_check.findall(data) ) is not None:\n if end_check in data:\n calc = True\n else:\n calc = False\n cifpath = \"/\".join(str(filepath).split(\"/\")[0:-3]) \n files = os.listdir(cifpath)\n cifile = [i for i in files if os.path.splitext(i)[1] == '.cif'] \n srcfile = cifpath + os.sep + \"\".join(cifile)\n #print(srcfile)\n #despath = \"/WORK/nscc-gz_material_1/MOFs/NotFinishedCalc/Adsorption/N2/NOTfinished/\"\n despath = \"/WORK/nscc-gz_material_1/MOFs/NotFinishedCalc/Adsorption/CO2/NOTfinished/\"\n if not os.path.exists(despath):\n os.makedirs(despath)\n desfile = despath + \"\".join(cifile) \n #print(desfile)\n shutil.copyfile(srcfile,desfile)\n calc = False\n\n return calc\n\ndef get_result(file):\n try:\n with open(file,\"r\") as resultf:\n info = resultf.read()\n #resultpat = re.compile(r\"\\s+[A]\\w{6}\\s\\w{7}\\s\\w{8}\\s[[].*?[]]\\s+(-?\\d+.\\d+)\\s[+]\")\n #resultpat = re.compile(r\"\\s+Average loading absolute [[]milligram/gram framework[]]\\s+(-?\\d+.\\d+)\")\n resultpat = re.compile(r\"\\s+Average loading absolute [[]mol/kg framework[]]\\s+(-?\\d+.\\d+)\")\n adsresult = resultpat.findall(info)\n #print(adsresult)\n except FileNotFoundError:\n pass \n\n return adsresult\n\ndef return_data(mofpath):\n noncalc, nonf, result_list = [], [], []\n adsdata = {}\n #data = {'name':None,'N2_Adsorption':None}\n data = {'name':None,'CO2_Adsorption':None}\n mofname = os.path.split(mofpath)[1]\n mofoutfile = iter_files(mofpath)\n #print(mofoutfile)\n mofoutfile.sort()\n generate = confirm_generate(mofpath)\n if generate:\n data['name']= mofname.split(\"_\")[0]\n for outfile in mofoutfile:\n calc = check_exists_files(outfile)\n tmpoutfile_name = os.path.split(outfile)[1]\n outname = os.path.splitext(tmpoutfile_name)[0] \n if calc:\n result = \"\".join(get_result(outfile))\n underp = str(outname).split(\"_\")[-1]\n if underp == '1.5e+06':\n underp = 1500000\n elif underp == '1e+06':\n underp = 1000000\n elif underp == '2e+07':\n underp = 20000000\n elif underp == '5e+07':\n underp = 50000000\n if len(result) != 0:\n value = float(result)\n print(\"now exract \",mofname)\n adsdata[underp] = value\n else:\n adsdata[underp] = -999\n else:\n noncalc.append(mof)\n underp = str(outname).split(\"_\")[-1]\n if underp == '1.5e+06':\n underp = 1500000\n elif underp == '1e+06':\n underp = 1000000\n elif underp == '2e+07':\n underp = 20000000\n elif underp == '5e+07':\n underp = 50000000\n adsdata[underp] = -999\n #print(\"Calculation Not Finished \", mof)\n with open(\"./nonf\", \"a+\") as fnfile:\n fnfile.writelines(mof + \" \" + str(underp) + \"\\n\")\n data['CO2_Adsorption'] = adsdata\n #data['CO2_Adsorption'] = adsdata\n else:\n data['name']= mofname.split(\"_\")[0]\n nonf.append(mof)\n #print(\"No Outfile found \", mof)\n with open(\"./noncalc\", \"a+\") as fncfile:\n fncfile.writelines(mof + \"\\n\")\n\n return data\n\nif __name__ == '__main__':\n import sys\n args = sys.argv\n mofdir = args[1]\n molecule = args[2]\n mof_result = os.listdir(mofdir)\n result_list = []\n for mof in mof_result:\n mofpath = mofdir + os.sep + mof\n data = return_data(mofpath)\n result_list.append(data)\n jsondata = json.dumps(result_list)\n #print(jsondata)\n with open(\"/WORK/nscc-gz_material_1/MOFs/script/calc_result_data/Adsorption_data/Adsorption_\" + molecule,\"a+\") as adsf:\n json.dump(result_list, adsf, sort_keys=False, indent=4, separators=(',', ':'))\n", "id": "3744945", "language": "Python", "matching_score": 5.160965919494629, "max_stars_count": 0, "path": "result_exract/co2.py" }, { "content": "#!/usr/bin/env python\nimport os\nimport re\nimport shutil\n\ndef iter_files(root_dir):\n outputfile = []\n for root,dirs,files in os.walk(root_dir):\n for file in files:\n file_name = os.path.join(root,file)\n outfile_extension = \".data\"\n path,tmpfilename = os.path.split(file_name)\n #print(tmpfilename)\n filename,extension = os.path.splitext(tmpfilename)\n ads = str(file_name).split(\"/\")[-5]\n if extension == outfile_extension and ads == \"Adsorption\":\n #print(filename)\n outputfile.append(file_name)\n\n return outputfile\n\ndef confirm_generate(filepath):\n a,b =0,0\n generate = False\n #calc_p_list = [\"1e4\", \"1e5\", \"2e2\", \"2e4\", \"4e4\", \"5e3\", \"6e4\", \"8e4\"]\n #calc_p_list = [\"5e3\",\"1e4\",\"5e4\",\"1e5\",\"5e5\",\"1e6\",\"1.5e6\",\"2e7\"]\n calc_p_list = [\"1.5e6\",\"1e4\",\"1e5\",\"1e6\",\"2e7\",\"5e4\",\"5e5\",\"5e7\"]\n for i in calc_p_list:\n avp_path = filepath + os.sep + \"Adsorption/\" + i + os.sep + \"Output/System_0/\"\n result_path = filepath + os.sep + \"Adsorption/\" + i + os.sep + \"Output\"\n try:\n outfile = avp_path + \"\".join(os.listdir(avp_path))\n except FileNotFoundError:\n src = filepath + os.sep + \"Adsorption/\" + i \n generate = False\n des = '/WORK/nscc-gz_material_1/MOFs/jobCheck/notcalc' + os.sep + filepath.split('/')[-1] \n if not os.path.exists(des):\n os.makedirs(des)\n #mvcmd = 'cp -r ' + src + ' ' + des\n mvcmd = 'mv ' + src + ' ' + des\n a+=1\n print('now move uncalc job...\\n' + src + ' to \\n' + des )\n os.system(mvcmd)\n else:\n generate = True\n if not check_exists_files(outfile):\n src = filepath + os.sep + \"Adsorption/\" + i \n des = '/WORK/nscc-gz_material_1/MOFs/jobCheck/notfinished' + os.sep + filepath.split('/')[-1] \n if not os.path.exists(des):\n os.makedirs(des)\n #mvcmd = 'cp -r ' + src + ' ' + des\n mvcmd = 'mv ' + src + ' ' + des\n os.system(mvcmd)\n print('now move unfinished job...\\n' + src + ' to \\n' + des )\n else:\n continue \n\ndef check_exists_files(filepath):\n calc = False\n with open(filepath,\"r\") as f:\n data = f.read()\n #start_check = re.compile(r'Starting simulation')\n start_check = 'Starting simulation'\n #end_check = re.compile(r'Simulation finished,')\n end_check = 'Simulation finished,'\n #if ( start_check.findall(data) and end_check.findall(data) ) is not None:\n if end_check in data:\n calc = True\n else:\n calc = False\n return calc\n\n\nif __name__ == '__main__':\n import sys\n args = sys.argv\n mofdir = args[1]\n mof_result = os.listdir(mofdir)\n for mof in mof_result:\n mofpath = mofdir + os.sep + mof\n confirm_generate(mofpath)\n", "id": "5129391", "language": "Python", "matching_score": 1.0007258653640747, "max_stars_count": 0, "path": "check_job/pnc.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport random\nimport os\nimport node\nimport init_calculation\nimport adsorption_calculation\n\n\nargs = sys.argv\nfile_url = args[1]\ncalculation_mode = args[2]\npart = args[3]\nhomepath = os.path.expanduser('~')\nprint(\"Extracting Helium Void Fraction values\")\nwork_dir = init_calculation.init_calc_space(file_url)\nfor mofpath in work_dir:\n essential_cif = init_calculation.seek_mof_cif(mofpath)\n os.chdir(mofpath)\n calc_cif = init_calculation.babel_cif(\"\".join(essential_cif) + \".cif\")\n init_calculation.init_mof_ppcalc(mofpath,calculation_mode,part)\nmofs = [i for i in os.listdir(file_url) if not str(i).split(\".\")[-1] == \"cif\"]\n#mofs = os.listdir(file_url)\nfor mof in mofs:\n filepath = file_url + os.sep + \"\".join(mof)\n try:\n shpath = adsorption_calculation.apply_pressure(filepath,calculation_mode,part)\n print(shpath)\n except FileNotFoundError:\n print(mof,\" Helium void Fraction was not found\")\n continue \n#submit job\nprint(\"creating the batch scripts...\")\nnode.creat_workdir(homepath,calculation_mode,part)\n#node.split_job(mode,shpath,part)\nprint(\"Start submitting mof \"+ calculation_mode +\" calculation work...\")\nsubmission = node.submit_job(calculation_mode,shpath,part)\nif submission is True:\n os.chdir(file_url)\n print(\"now remove excess cif file...\")\n os.system(\"rm *.cif\")\n exit(0)\n", "id": "10620771", "language": "Python", "matching_score": 4.446262359619141, "max_stars_count": 0, "path": "submit_adsorption_calculation.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nimport sys\r\nimport random\r\nimport os\r\nimport node\r\nimport init_calculation\r\nimport adsorption_calculation\r\n\r\nargs = sys.argv\r\nfilepath = args[1]\r\nmode = args[2]\r\npart = args[3]\r\n# prepare init_calculation\r\nprint(\"Creating the calculation directory...\")\r\nwork_dir = init_calculation.init_calc_space(filepath)\r\nfor mofpath in work_dir:\r\n filename = init_calculation.seek_mof_cif(mofpath)\r\n os.chdir(mofpath)\r\n shpath = init_calculation.init_mof_ppcalc(mofpath,mode,part)\r\n# submit init_calculation\r\nhomepath = os.path.expanduser('~')\r\nprint(\"creating the batch scripts...\")\r\nnode.creat_workdir(homepath,mode,part)\r\nnode.split_job(mode,shpath,part)\r\nprint(\"Start submitting mof \"+ mode + \" calculation work...\")\r\n#yhbatch_id = node.submit_job(mode,shpath)\r\nsubmission = node.submit_job(mode,shpath,part)\r\nif submission is True:\r\n exit(0)\r\n", "id": "3399894", "language": "Python", "matching_score": 2.514350414276123, "max_stars_count": 0, "path": "submit_init_calculation.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nimport os\r\nimport time\r\nimport random\r\nimport string\r\nimport init_calculation\r\n\r\n'''\r\nCreate batch job submission script path and script file;\r\neg: python .py /WORK/nscc-gz_material_5/MOF_WORK/Joblist/HeliumVF\r\n'''\r\n\r\ndef creat_workdir(homepath,mode,part):\r\n submit_workdir = homepath + \"/MOFs/work/\"+ mode+\"_\"+ part+\"/Submit_sh/\"\r\n if not os.path.exists(submit_workdir):\r\n os.makedirs(submit_workdir)\r\n if mode == \"hvf\":\r\n sh_path = homepath + \"/MOFs/work/MOF_VoidFraction/\"+ mode + \"_\" + part +\"/Joblist/\"\r\n if not os.path.exists(sh_path):\r\n os.makedirs(sh_path)\r\n elif mode == \"ads\":\r\n sh_path = homepath + \"/MOFs/work/MOF_Adsorption/\"+ mode + \"_\" + part +\"/Joblist/\"\r\n if not os.path.exists(sh_path):\r\n os.makedirs(sh_path)\r\n elif mode == \"hc\":\r\n sh_path = homepath + \"/MOFs/work/MOF_HenryC/\"+ mode + \"_\" + part +\"/Joblist/\"\r\n if not os.path.exists(sh_path):\r\n os.makedirs(sh_path)\r\n\r\n return sh_path,submit_workdir\r\n\r\n\r\ndef split_job(mode,shpath,part):\r\n homepath = os.path.expanduser('~')\r\n shnum = 24\r\n batchdir = creat_workdir(homepath,mode,part)[0]\r\n filepath = shpath\r\n total_sh = os.listdir(filepath)\r\n total_sh.sort()\r\n total_num = len(total_sh)\r\n batch_name = [total_sh[i:i + shnum] for i in range(0, total_num, shnum)]\r\n batchnum = len(batch_name)\r\n batchlist =[]\r\n for run_i in range(batchnum):\r\n id_num = random.randint(0,50000)\r\n id_str = string.ascii_lowercase \r\n r_str = random.choice(id_str)\r\n codename = \"run_mof\"+str(r_str)+str(id_num)+\"_\"+ str(run_i) + \".sh\"\r\n batchlist.append(batchdir + codename)\r\n with open(batchdir + \"/\" + codename, \"w\") as fm:\r\n fm.writelines(\"#!/bin/bash\\n\")\r\n for item in batch_name[run_i]:\r\n #print(item)\r\n if item in batch_name[run_i][0:-1]:\r\n fm.writelines(\"bash \" + filepath +\"/\" + item + \" &\\n\")\r\n if item == batch_name[run_i][-1]:\r\n #print(\"This is the last\", item)\r\n fm.writelines(\"bash \" + filepath +\"/\"+ item + \" &\\nwait\")\r\n return batchlist\r\n\r\ndef submit_job(mode,shpath,part):\r\n homepath = os.path.expanduser('~')\r\n submit_path = creat_workdir(homepath,mode,part)[1]\r\n if mode == \"hvf\":\r\n submit_dir = submit_path + \"/hvf_submit\"\r\n if not os.path.exists(submit_dir):\r\n os.makedirs(submit_dir)\r\n elif mode == \"ads\":\r\n submit_dir = submit_path + \"/ads_submit\"\r\n if not os.path.exists(submit_dir):\r\n os.makedirs(submit_dir)\r\n elif mode == \"hc\":\r\n submit_dir = submit_path + \"/hc_submit\"\r\n if not os.path.exists(submit_dir):\r\n os.makedirs(submit_dir)\r\n batch_list = split_job(mode,shpath,part)\r\n os.chdir(submit_dir)\r\n yhbatch_id = []\r\n job_total = len(batch_list)\r\n counter = 0\r\n for batch in batch_list:\r\n batchname = \"\".join(batch)\r\n #os.system(\"yhbatch -N 1 -p nsfc \" + batchname)\r\n #os.system(\"yhbatch -N 1 -p MEM_128 \" + batchname)\r\n os.system(\"yhbatch -N 1 \" + batchname)\r\n job_num_cmd = \"squeue | wc | awk \\'{print $1}\\'\"\r\n job_num = int(\"\".join(os.popen(job_num_cmd).readlines())) - 1\r\n batchcmd = batchname.split(\"/\")[-1]\r\n id_cmd = \"yhacct --name \" + batchcmd + \" | awk '{print $1;}' | sed -n \\\"3, 1p\\\"\"\r\n time.sleep(2)\r\n job_id = \"\".join(os.popen(id_cmd).readlines())\r\n yhbatch_id.append(job_id)\r\n time.sleep(2)\r\n while job_num > 200: # Node limit(i): can submit i+1 job\r\n print(\"Reach Node limit...waiting\")\r\n job_num = int(\"\".join(os.popen(job_num_cmd).readlines())) - 1\r\n time.sleep(10)\r\n while check_job(batchcmd) is not True:\r\n print(\"check \"+job_id+\"job is submitted...\")\r\n time.sleep(2)\r\n job_total -= 1\r\n counter +=1\r\n print(str(job_id)+\" Successfully submitted, remain \"+str(job_total))\r\n with open(\"./sucesscalc.txt\",\"a+\") as f_work:\r\n f_work.writelines(batchname+\" \"+ str(counter) +\"\\n\")\r\n submission = False\r\n if job_total == 0:\r\n print(\"Job submission completed!\")\r\n submission = True\r\n\r\n return submission\r\n\r\ndef check_job(batchname):\r\n id_cmd = \"yhacct --name \" + batchname.replace(\"\\n\",\"\") +\" \"+\"| awk '{print $1;}' | sed -n \\\"3, 1p\\\"\"\r\n job_id = \"\".join(os.popen(id_cmd)).replace(\"\\n\",\"\")\r\n state_cmd = \"yhacct -j \"+job_id.replace(\"\\n\",\"\")+\"| awk '{print $6;}'| sed -n \\\"3, 1p\\\"\"\r\n job_state = \"\".join(os.popen(state_cmd)).replace(\"\\n\",\"\")\r\n if job_state == \"RUNNING\":\r\n slurm = True\r\n elif job_state == \"COMPLETED\":\r\n with open(\"./Named_duplicate.txt\",\"a+\") as f1:\r\n f1.writelines(batchname+\"\\n\")\r\n slurm = True\r\n elif job_state == \"CANCELLED+\":\r\n with open(\"./Submit_failed.txt\",\"a+\") as f2:\r\n f2.writelines(batchname+\"\\n\")\r\n slurm = True\r\n else:\r\n slurm = False\r\n\r\n return slurm\r\n\r\ndef check_finished_job(batch_id):\r\n state_cmd = \"yhacct -j \"+ batch_id.replace(\"\\n\",\"\") +\" | awk '{print $6;}'| sed -n \\\"3, 1p\\\"\"\r\n job_state = \"\".join(os.popen(state_cmd)).replace(\"\\n\",\"\")\r\n if job_state == \"COMPLETED\":\r\n slurm = True\r\n else:\r\n slurm = False\r\n\r\n return slurm\r\n\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n args = sys.argv\r\n filepath = args[1]\r\n mode = args[2]\r\n part = args[3]\r\n homepath = os.path.expanduser('~')\r\n creat_workdir(homepath,mode,part)\r\n #split_job(mode,filepath)\r\n submit_job(mode,filepath)\r\n", "id": "6214588", "language": "Python", "matching_score": 2.5057690143585205, "max_stars_count": 0, "path": "node.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport time\n\nargs = sys.argv\nbatchpath = args[1]\nsection = args[2]\nos.chdir(batchpath)\nsubmitpath = \"/WORK/nscc-gz_material_1/MOFs/resub/\" + section\nif not os.path.exists(submitpath):\n os.makedirs(submitpath) \nbatch_list = [i for i in os.listdir(batchpath)]\nprint(\"total \",len(batch_list))\nnotsubcounter = 0\nfor sub in batch_list:\n yhcheckcmd = 'yhacct --name ' + sub + \" | awk '{print $6;}' | sed -n \\\"3, 1p\\\"\"\n sub_state = str(\"\".join(os.popen(yhcheckcmd).read()).replace(\"\\n\",\"\")).replace(\" \",\"\")\n if sub_state == \"RNNING\" or sub_state == \"COMPLETED\":\n print(\"already submitted \" ,sub)\n continue\n else:\n notsubcounter += 1\n print(\"now submitting unfinished job \", sub )\n os.chdir(submitpath)\n os.system(\"yhbatch -N 1 \" + batchpath + os.sep + sub )\n time.sleep(10)\nprint(notsubcounter)\nexit(0)\n", "id": "3277539", "language": "Python", "matching_score": 0.7117015719413757, "max_stars_count": 0, "path": "resubmit_uncalcjob.py" }, { "content": "#!/usr/bin/env python\nimport os\nimport time\nimport string\n\npartlist = list(string.ascii_uppercase)\n\nfor section in partlist:\n #cmd = 'nohup python resubmit_uncalcjob.py /WORK/nscc-gz_material_1/MOFs/work/MOF_Adsorption/ads_co2_'+section+'/Joblist/ADS CO2_'+section+' &'\n print('now starting to exract calculation result ',section )\n #cmd = 'nohup python pick_adsorption_result_v1.py /WORK/nscc-gz_material_1/MOFs/data/result_N2/result_ads_N2_77k/part_'+ section + ' N2 '+' &'\n #cmd = 'nohup python pick_adsorption_result_v1.py /WORK/nscc-gz_material_1/MOFs/data/supplement/N2_77K/calc/part_'+ section + ' N2sup '+' &'\n #cmd = 'nohup yhrun -N 1 python pick_adsorption_result_v1.py /WORK/nscc-gz_material_1/MOFs/data/supplement/N2_77K/calc/part_'+ section + ' N2sup &'\n #cmd = 'nohup yhrun -N 1 python co2.py /WORK/nscc-gz_material_1/MOFs/data/result_co2/part_'+ section + ' CO2 &'\n cmd = 'nohup yhrun -N 1 python co2.py /WORK/nscc-gz_material_1/MOFs/data/supplement/CO2_sup/calc/part_'+ section + ' CO2sup &'\n os.system(cmd)\n print(cmd)\n time.sleep(1)\n\nexit(0)\n", "id": "4156432", "language": "Python", "matching_score": 4.830286979675293, "max_stars_count": 0, "path": "result_exract/autoexract.py" }, { "content": "#!/usr/bin/env python\nimport os\nimport time\n#sublist = ['B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',\\\n# 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',\\\n# 'V', 'W', 'X', 'Y', 'Z']\n\nsublist = ['M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n\nfor section in sublist:\n #cmd = 'nohup python resubmit_uncalcjob.py /WORK/nscc-gz_material_1/MOFs/work/MOF_Adsorption/ads_co2_'+section+'/Joblist/ADS CO2_'+section+' &'\n #cmd = 'nohup python submit_adsorption_calculation.py /WORK/nscc-gz_material_1/MOFs/data/supplement/N2_77K/calc/part_'+ section + ' ads sup_n2_77k_split16'+ section + ' &'\n cmd = 'nohup yhrun -N 1 python submit_init_calculation.py /WORK/nscc-gz_material_1/MOFs/data/result_hvf_77k/part_'+ section + ' hvf n2_77k_' + section + ' &'\n os.system(cmd)\n time.sleep(5)\n\nexit(0)\n\n", "id": "12416594", "language": "Python", "matching_score": 3.4222209453582764, "max_stars_count": 0, "path": "autosub.py" }, { "content": "#!/usr/bin/env python\nimport os\nimport time\nimport string\n\npartlist = list(string.ascii_uppercase)\n\nfor section in partlist:\n print('now starting to split calculation result ',section )\n #cmd = 'nohup yhrun -N 1 python pnc.py /WORK/nscc-gz_material_1/MOFs/data/result_co2/part_'+ section +' &'\n #cmd = 'nohup yhrun -N 1 python pnc.py /WORK/nscc-gz_material_1/MOFs/data/supplement/CO2_sup/calc/part_'+ section +' &'\n cmd = 'nohup yhrun -N 1 python pnc.py /WORK/nscc-gz_material_1/MOFs/data/supplement/CO2_sup/calc/part_'+ section +' &'\n os.system(cmd)\n print(cmd)\n time.sleep(0.5)\n\nexit(0)\n", "id": "6920787", "language": "Python", "matching_score": 0.31656649708747864, "max_stars_count": 0, "path": "check_job/automv.py" }, { "content": "#!/bin/bash\n\nimport sys\nfrom pathlib import Path\nimport os\n\n\ndef yield_null(file):\n with open(file, \"r\") as f:\n for line in f:\n if \"NULL\" in line:\n name = line.split()[0] \n yield name\n\n\n\nif __name__ == \"__main__\":\n args = sys.argv\n scf = Path(\"/WORK/nscc-gz_material_1/ICSD_vasp/abacus_calc/matgen_scf\")\n r = args[1]\n for i in yield_null(r):\n stru = scf / i\n if stru.exists():\n os.system(\"rm -rf {}\".format(stru))\n", "id": "5190882", "language": "Python", "matching_score": 1.4947627782821655, "max_stars_count": 0, "path": "clean_null.py" }, { "content": "#!/usr/env/python\n\nimport os\nimport os.path as osp\nfrom monty.os import cd\nfrom pathlib import Path\nimport sys\n\n\ndef _yield(path):\n yield from (osp.join(path, i) for i in os.listdir(path))\n\n\ndef _mk_cls(path, cls_num):\n lst = list(_yield(path))\n clst = [lst[i:i + cls_num] for i in range(0, len(lst), cls_num)]\n with cd(path):\n for idx, ist in enumerate(clst):\n cls_name = f\"class_{idx + 1}\"\n print(cls_name)\n os.makedirs(cls_name)\n for stru in ist:\n _, name = osp.split(stru)\n print(name)\n os.system(\"mv {} {}/\".format(name, cls_name))\n\nif __name__ == \"__main__\":\n args = sys.argv\n\n root = args[1]\n _mk_cls(root, 70)\n", "id": "12670575", "language": "Python", "matching_score": 0.051126524806022644, "max_stars_count": 0, "path": "make_class.py" }, { "content": "#!/usr/bin/python\n'''\nProgram name:\nStructure Prototype Analysis Package (SPAP)\n\nDescription:\nSPAP can analyze symmetry and compare similarity of a large number of atomic\nstructures. Typically, SPAP can process structures predicted by CALYPSO\n(www.calypso.cn). We use spglib to analyze symmetry. Coordination\nCharacterization Function (CCF) is used to measure structural similarity. If\nyou use this program and method in your research, please read and cite the\nfollowing publication:\nJ. Phys. Condens. Matter 2017, 29, 165901.\n\nAuthor:\nDr. <NAME>\n\nEmail:\n<EMAIL> / <EMAIL>\n\nDependency:\nThis program uses numpy, spglib, and ase (https://wiki.fysik.dtu.dk/ase/). You\ncan install them by one command: pip install numpy spglib ase\n\nUsage:\nSPAP can run at four different modes.\n1. To analyze CALYPSO structure prediction results, set i_mode=1. Run spap.py\n in results directory generated by CALYPSO.\n2. To calculate symmetry and compare similarity of a bunch of structures, set\n i_mode=2. Put structure files in struc directory. The files should be\n named *.cif, *.vasp (VASP format), or any name and format ase can read\n automatically.\n3. To read and analyze structures optimized by VASP, set i_mode=3. Define\n work_dir. It is the path where you put the optimized structures.\n4. To calculate symmetry and compare similarity of a list of structures, set\n i_mode=4. Assign a list of Atoms objects to structure_list.\nYou could customize other parameters at the end of this script.\n\nOutput:\nI try to organize the output files similar to those generated by cak.py. So\nthat it's easier for user to get familiar with this program.\nAnalysis_Output.dat lists the information about the analyzed structures.\nHowever we provide more information such as density, formula unit, and volume.\ndistance.dat stores the distances between structures and some other attributes.\nanalyzed_structures.db is in Atomic Simulation Environment (ASE) database\nformat for atoms. You can easily read and analyze these structures through\nASE. You can also add and store properties you are interested in. This\nfunctionality is very useful for screening out good functional materials. SPAP can\nalso write structures in cif and VASP format in dir_* directory. Technically\nspeaking, SPAP can easily write any structure format supported by ASE.\n'''\n\nimport os\nimport shutil\nimport argparse\nimport numpy as np\nfrom ase import Atoms\nfrom ase.db import connect\nfrom ase.visualize import view\nfrom ase.io import write, read\nfrom ase.data import atomic_numbers\nfrom spglib import standardize_cell, get_spacegroup\nfrom .ccf import cal_inter_atomic_d, d2ccf, struc2ccf, cal_ccf_d, element_tag\n\n# from spap.ccf import cal_inter_atomic_d, d2ccf, struc2ccf, cal_ccf_d, element_tag\n\ntry:\n import pickle\n import matplotlib.pyplot as plt\nexcept:\n pass\n\n\ndef run_spap(symprec=0.1, e_range=0.4, total_struc=None, l_comp=True, threshold=None, r_cut_off=None, extend_r=1.0,\n ilat=2, ccf_step=0.02, l_db=True, l_cif=False, l_poscar=False, lprm=False, l_view=False, work_dir='./',\n structure_list=[], i_mode=1, lplt=False, ftype='CCF', apw=60.0, readf='XDATCAR', index=':', nsm=False,\n nfu=False):\n '''\n This function starts all the calculations.\n :param type:\n :param lplt:\n :param symprec: float\n This precision is used to analyze symmetry of atomic structures.\n :param e_range: float\n To define an energy range in which structures will be analyzed.\n :param total_struc: int\n This number of structures will be analyzed.\n :param l_comp: bool\n Whether to compare similarity of structures.\n :param threshold: float\n Threshold for similar/dissimilar boundary.\n :param r_cut_off: float\n Inter-atomic distances within this cut off radius will contribute to\n CCF.\n :param extend_r: float\n CCF will be calculated in the range of (0,r_cut_off+extend_r).\n :param ilat: int\n This parameter controls which method will be used to deal with lattice\n for comparing structural similarity.\n 0 don't change lattice;\n 1 equal particle number density;\n 2 try equal particle number density and equal lattice.\n :param ccf_step: float\n Length of step for calculating CCF.\n :param l_db: bool\n Whether to write structures into ase (https://wiki.fysik.dtu.dk/ase/)\n database file.\n :param l_cif: bool\n Whether to write structures into cif files.\n :param l_poscar: bool\n Whether to write structures into files in VASP format.\n :param l_view: bool\n Whether to display the structures.\n :param work_dir: str\n Set working directory.\n :param structure_list: list of Atoms objects.\n Assign a list of Atoms objects to structure_list when using i_mode=4.\n :param i_mode: int\n Different functionality of SPAP.\n 1 analyze CALYPSO prediction results;\n 2 calculate symmetry and similarity of structures in struc directory;\n 3 read and analyze structures optimized by VASP;\n 4 calculate symmetry and similarity of structures in structure_list.\n 8 read and analyze structures optimized by ABACUS;\n :return:\n '''\n print('Welcome using Structure Prototype Analysis Package (SPAP). The Coordination\\n'\n 'Characterization Function (CCF) is used to assess structural similarity. If\\n'\n 'you use this program and method in your research, please read and cite the\\n'\n 'following publication:\\n'\n '<NAME>. Condens. Matter 2017, 29, 165901.\\n')\n low_symm_er = 0.06\n min_e = 0.0\n max_e = 0.0\n e_list = []\n pbc = [True, True, True]\n dir_name = 'dir_' + str(symprec)\n debug = False\n if i_mode == 5 or i_mode == 7:\n l_comp = False\n if r_cut_off == None:\n r_cut_off = 9.0\n ccf_range = r_cut_off + extend_r\n r_vector = np.linspace(0.0, ccf_range, int(ccf_range / ccf_step) + 2)\n # os.chdir('D:\\\\share\\\\wks\\\\1_example\\\\results2')\n # os.chdir('D:\\\\share\\\\wks\\\\Examples\\\\1_example\\\\results')\n # os.chdir('D:\\\\share\\\\wks\\\\2mg\\\\results')\n os.chdir(work_dir)\n chemical_symbols = ''\n prediction_method = 'Unknown'\n if i_mode == 1:\n prediction_method = 'CALYPSO'\n cal_p = 'Not collected\\n'\n pseudopotential = 'Not collected\\n'\n pressure = 0.0 # in GPa\n\n if i_mode == 1 or i_mode == 6:\n with open('../input.dat', 'r') as input:\n prediction_parameters = input.readlines()\n input.closed\n for line in prediction_parameters:\n if (line.lstrip(' '))[0] == '#':\n continue\n elif 'NameOfAtoms' in line:\n # chemical_symbols = (line[line.find('=') + 1:-1]).split(' ')\n chemical_symbols = [symbol for symbol in (line[line.find('=') + 1:-1]).split(' ') if symbol != '']\n n_species = len(chemical_symbols)\n elif 'ICode' in line:\n if int(line[line.find('=') + 1:-1]) == 1:\n calculator = 'VASP'\n elif int(line[line.find('=') + 1:-1]) == 7:\n calculator = 'Gaussian'\n else:\n calculator = 'Unknown'\n elif 'NumberOfLocalOptim' in line:\n nolo = (line[line.find('=') + 1:-1]).replace(' ', '')\n elif 'Cluster' in line:\n if (line[line.find('=') + 1:-1].lstrip())[0] == 'T':\n pbc = [False, False, False]\n dir_name = 'dir_origin'\n if l_comp:\n ilat = 0\n if threshold == None:\n threshold = 0.035\n elif '2D' in line:\n if (line[line.find('=') + 1:-1].lstrip())[0] == 'T':\n pbc = [True, True, False]\n if l_comp:\n ilat = 0\n if threshold == None:\n threshold = 0.06\n # print('Not supported yet.')\n # exit()\n elif 'VSC' in line:\n if (line[line.find('=') + 1:-1].lstrip())[0] == 'T':\n print('Not supported yet.')\n exit()\n elif 'LSurface' in line:\n if (line[line.find('=') + 1:-1].lstrip())[0] == 'T':\n print('Not supported yet.')\n exit()\n if calculator == 'VASP':\n if os.path.exists('../INCAR_' + nolo):\n with open('../INCAR_' + nolo, 'r') as incar:\n cal_p = incar.readlines()\n incar.closed\n for line in cal_p:\n if (line.lstrip(' '))[0] == '#':\n pass\n elif 'PSTRESS' in line:\n pressure = float(line[line.find('=') + 1:-1]) / 10\n if os.path.exists('../POTCAR'):\n with open('../POTCAR', 'r') as potcar:\n # potcar_lines=potcar.readlines()\n switch = False\n first_line = True\n for line in potcar:\n if first_line:\n pseudopotential = (line.lstrip()).rstrip()\n # temp_n=1\n # if n_species==1:\n # break\n first_line = False\n elif line[:4] == ' End':\n switch = True\n elif switch:\n pseudopotential += ' || ' + (line.lstrip()).rstrip()\n # temp_n+=1\n # if temp_n==n_species:\n # break\n switch = False\n potcar.closed\n with open('struct.dat', 'r') as struct:\n struct_lines = struct.readlines()\n struct.closed\n n_structure = 0\n\n print('Reading energy')\n for i, line in enumerate(struct_lines):\n if 'Energy=' in line:\n n_structure = n_structure + 1\n e_list.append([i, float(line[8:]), n_structure])\n if total_struc == None or total_struc < 1:\n min_e = min([x[1] for x in e_list])\n max_e = min_e + e_range\n e_list = [e for e in e_list if (e[1] < max_e) and (610612509.0 - e[1] > 0.1)]\n else:\n e_list = [e for e in e_list if 610612509.0 - e[1] > 0.1]\n e_list.sort(key=lambda x: x[1], reverse=False)\n if total_struc != None and total_struc > 0:\n e_list = e_list[:total_struc]\n total_struc = len(e_list)\n\n print('Reading structure')\n ill = []\n for ii in range(total_struc):\n element_numbers = (struct_lines[e_list[ii][0] + 3][9:-1]).split(' ')\n element_numbers = [n for n in element_numbers if n != '']\n chemical_formula = ''\n for j, symbol in enumerate(chemical_symbols):\n chemical_formula = chemical_formula + symbol + element_numbers[j]\n # cell=[[float(struct_lines[e_list[ii][0]+6+k][j-16:j]) for j in [17,33,49]] for k in [0,1,2]]\n # number_of_atom=sum([int(n) for n in element_numbers])\n # positions=[[float(struct_lines[e_list[ii][0]+13+k][j-12:j]) for j in [13,25,37]] for k in range(sum([int(n) for n in element_numbers]))]\n try:\n structure_list.append(Atoms(\n chemical_formula,\n cell=[[float(struct_lines[e_list[ii][0] + 6 + k][j - 16:j]) for j in [17, 33, 49]] for k in\n [0, 1, 2]],\n scaled_positions=[[float(struct_lines[e_list[ii][0] + 13 + k][j - 12:j]) for j in [13, 25, 37]] for\n k in range(sum([int(n) for n in element_numbers]))], pbc=pbc))\n except:\n ill.append(ii)\n print('Warning: structure in line {} in struct.dat was discarded.'.format(e_list[ii][0]))\n # structure_list[-1].e = e_list[ii][1]\n # structure_list[-1].n_structure = e_list[ii][2]\n e_list = [e_list[x] for x in range(total_struc) if not x in ill]\n total_struc = len(e_list)\n # structure_list.sort(key=lambda x:x.e,reverse=False)\n if i_mode == 6:\n return [[structure_list[i], e_list[i][1]] for i in range(total_struc)]\n elif i_mode == 2:\n print('Reading structure')\n total_struc = 0\n for root, dirs, files in os.walk('struc', topdown=True):\n for name in files:\n try:\n structure_list.append(read(os.path.join(root, name)))\n structure_list[-1].fnm = work_dir+os.path.join(root, name)[1:]\n total_struc += 1\n # print(name+'\\n')\n except:\n print('Cann\\'t read this file: ' + os.path.join(root, name))\n\n elif i_mode == 3:\n calculator = 'VASP'\n print('Reading structure')\n i = 0\n for root, dirs, files in os.walk('.', topdown=True):\n for name in files:\n if i == 0 and ('INCAR' in name):\n if calculator == 'VASP':\n with open(os.path.join(root, name), 'r') as incar:\n cal_p = incar.readlines()\n # incar.closed\n for line in cal_p:\n if (line.lstrip(' '))[0] == '#':\n pass\n elif 'PSTRESS' in line:\n pressure = float(line[line.find('=') + 1:-1]) / 10\n if os.path.exists(os.path.join(root, 'POTCAR')):\n with open(os.path.join(root, 'POTCAR'), 'r') as potcar:\n # potcar_lines=potcar.readlines()\n switch = False\n first_line = True\n for line in potcar:\n if first_line:\n pseudopotential = (line.lstrip()).rstrip()\n first_line = False\n elif line[:4] == ' End':\n switch = True\n elif switch:\n pseudopotential += ' || ' + (line.lstrip()).rstrip()\n switch = False\n # potcar.closed\n if 'OUTCAR' in name:\n try:\n structure_list.append(read(os.path.join(root, name), format='vasp-out'))\n structure_list[-1].fnm = work_dir+os.path.join(root, name)[1:]\n i += 1\n if debug:\n e_list.append(\n [0, structure_list[-1].calc.results['energy'] / len(structure_list[-1].numbers),\n i, os.path.join(root, name)])\n else:\n e_list.append(\n [0, structure_list[-1].calc.results['energy'] / len(structure_list[-1].numbers),\n i])\n # Be careful!!! Energy is changed!!!\n structure_list[-1].calc.results['energy'] = e_list[-1][1]\n except:\n print('Cann\\'t read this file: ' + os.path.join(root, name))\n if total_struc == None or total_struc < 1:\n min_e = min([x[1] for x in e_list])\n max_e = min_e + e_range\n e_list = [e for e in e_list if e[1] < max_e]\n structure_list = [s for s in structure_list if s.calc.results['energy'] < max_e]\n e_list.sort(key=lambda x: x[1], reverse=False)\n structure_list.sort(key=lambda x: x.calc.results['energy'], reverse=False)\n if total_struc != None and total_struc > 0:\n e_list = e_list[:total_struc]\n structure_list = structure_list[:total_struc]\n total_struc = len(e_list)\n elif i_mode == 4:\n total_struc = len(structure_list)\n elif i_mode == 5 or i_mode == 7:\n print('Reading structures')\n if i_mode == 5:\n structure_list = read(readf, index=index)\n averccf = {}\n total_struc = len(structure_list)\n if total_struc > 0:\n elet = element_tag(structure_list[0].numbers, irt=2)\n for i, struc in enumerate(structure_list):\n struc.ccf = struc2ccf(struc, r_cut_off, r_vector, apw, ftype)\n if i_mode == 7:\n # plt_ccf(struc.ccf, r_vector, ftype,False)\n pass\n if i_mode == 5:\n for key in struc.ccf.keys():\n if i == 0:\n averccf[key] = struc.ccf[key]\n else:\n averccf[key] = averccf[key] + struc.ccf[key]\n if i_mode == 7:\n return [struc.ccf for struc in structure_list]\n for i, key in enumerate(averccf.keys()):\n averccf[key] = averccf[key] / total_struc\n if i == 0:\n sumccf = averccf[key]\n else:\n sumccf = sumccf + averccf[key]\n convccf = {}\n for key in averccf.keys():\n for i, rankn in enumerate([int(x) for x in key.split('_')]):\n for itm in elet.items():\n if itm[1][0] == rankn:\n if i == 0:\n newkey = itm[1][1]\n break\n else:\n newkey += '-' + itm[1][1]\n convccf[newkey] = averccf[key]\n break\n convccf['Total'] = sumccf\n ccff = open('ccf.pickle', 'wb')\n pickle.dump(convccf, ccff)\n ccff.close()\n rvf = open('rvf.pickle', 'wb')\n pickle.dump(r_vector, rvf)\n rvf.close()\n keyl = []\n keyl.append('r')\n keyl += [x for x in convccf.keys()]\n ccfd = open('ccf.csv', 'w')\n ccfd.write('r')\n for x in keyl[1:]:\n ccfd.write(',{}'.format(x))\n for i in range(len(r_vector)):\n ccfd.write('\\n')\n for x in keyl:\n if x == 'r':\n ccfd.write('{}'.format(r_vector[i]))\n else:\n ccfd.write(',{}'.format(convccf[x][i]))\n ccfd.close()\n plt_ccf(convccf, r_vector, ftype)\n return convccf\n \n # Add by shenzx 20200530\n elif (i_mode == 8):\n\n # Read structure and energy\n from .GetEnergyStru import ReadAbacus\n structure_list = ReadAbacus()\n total_struc = len(structure_list)\n e_list = [[0, \n structure_list[i].Energy,\n i + 1] for i in range(total_struc)]\n\n if total_struc == None or total_struc < 1:\n min_e = min([x[1] for x in e_list])\n max_e = min_e + e_range\n e_list = [e for e in e_list if e[1] < max_e]\n structure_list = [s for s in structure_list if s.Energy < max_e]\n e_list.sort(key=lambda x: x[1], reverse=False)\n structure_list.sort(key=lambda x: x.Energy, reverse=False)\n if total_struc != None and total_struc > 0:\n e_list = e_list[:total_struc]\n structure_list = structure_list[:total_struc]\n total_struc = len(e_list)\n \n if pbc == [True, True, True] or pbc == [True, True, False]:\n print('Analyzing symmetry')\n if r_cut_off == None:\n if pbc == [True, True, True]:\n r_cut_off = 9.0\n else:\n r_cut_off = 6.0\n # ccf_range = r_cut_off + extend_r\n # r_vector = np.linspace(0.0, ccf_range, int(ccf_range / ccf_step) + 2)\n elif r_cut_off == None:\n r_cut_off = 9.0\n ccf_range = r_cut_off + extend_r\n r_vector = np.linspace(0.0, ccf_range, int(ccf_range / ccf_step) + 2)\n space_g_l = []\n for structure in structure_list:\n if pbc == [True, True, True] or pbc == [True, True, False]:\n structure.conventional_cell = standardize_cell(\n (structure.cell, structure.get_scaled_positions(wrap=True), structure.numbers), symprec=symprec)\n if structure.conventional_cell == None:\n structure.conventional_cell = structure\n structure.space_group = 'NULL(0)'\n space_g_l.append(1)\n else:\n structure.conventional_cell = Atoms(cell=structure.conventional_cell[0],\n scaled_positions=structure.conventional_cell[1],\n numbers=structure.conventional_cell[2], pbc=pbc)\n structure.space_group = get_spacegroup(\n (structure.cell, structure.get_scaled_positions(wrap=True), structure.numbers),\n symprec=symprec).replace(' ', '')\n space_g_l.append(int(get_spg_n(structure.space_group)))\n elif pbc == [False, False, False]:\n structure.conventional_cell = structure\n structure.space_group = 'P1(1)'\n space_g_l.append(1)\n structure.conventional_cell.n_atom = len(structure.conventional_cell.numbers)\n if l_cif or l_poscar:\n if os.path.exists(dir_name):\n for root, dirs, files in os.walk(dir_name, topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n else:\n os.mkdir(dir_name)\n for root, dirs, files in os.walk('./', topdown=True):\n for name in dirs:\n if name[:4] == 'dir_' and name != dir_name:\n shutil.rmtree(name)\n break\n if lplt:\n for i, structure in enumerate(structure_list):\n if i != -1:\n # temp_struc=read('77_1.cif')\n # temp_struc.ccf=struc2ccf(temp_struc,r_cut_off,r_vector)\n # structure.conventional_cell.ccf=struc2ccf(structure.conventional_cell,r_cut_off,r_vector)\n # show2ccf(structure.conventional_cell.ccf,temp_struc.ccf,r_vector)\n # plt_ccf(structure.conventional_cell.ccf,r_vector)\n ccf = struc2ccf(structure, r_cut_off, r_vector)\n plt_ccf(ccf, r_vector)\n return None\n\n if l_comp:\n print('Comparing similarity')\n if threshold == None:\n threshold = 0.06\n struc_d = classify_structures([x.conventional_cell for x in structure_list],\n space_g_l, threshold, r_cut_off, ilat, r_vector, nsm, nfu)\n d_f = open('distance.dat', 'w')\n if i_mode == 1:\n d_f.write('{:>11}{:>14}{:>15} {:>13} {:>12} {:>13}\\n'\n .format('No.', 'Enthalpy', symprec, 'Prototype ID', 'Distance', 'Formula unit'))\n elif i_mode == 2 or i_mode == 4:\n d_f.write('{:>5}{:>15} {:>13} {:>12} {:>13}\\n'\n .format('No.', symprec, 'Prototype ID', 'Distance', 'Formula unit'))\n elif i_mode == 3 or i_mode == 8: # Add by shenzx 20200530\n d_f.write('{:>11}{:>14}{:>15} {:>13} {:>12} {:>13}\\n'\n .format('No.', 'Energy', symprec, 'Prototype ID', 'Distance', 'Formula unit'))\n else:\n struc_d = [[-2, 0.0] for i in range(total_struc)]\n\n print('Writing out put files')\n if i_mode == 2 or i_mode == 3 or i_mode == 8: # Add by shenzx 20200530\n fstcn = open('structure_source.dat', 'w')\n for i, struct in enumerate(structure_list):\n fstcn.write('{:<6} {}\\n'.format(i + 1, struct.fnm))\n fstcn.close()\n with open('Analysis_Output.dat', 'w') as anal:\n if i_mode == 1:\n # format_a1='{:>11}{:>14}{:>15} {:>10} {:>12} {:>10}\\n'\n # content_a1=('No.', 'Enthalpy', symprec, 'Density', 'Formula unit', 'Volume')\n anal.write('{:>11}{:>14}{:>15} {:>10} {:>12} {:>10}\\n'.format('No.', 'Enthalpy', symprec, 'Density',\n 'Formula unit', 'Volume'))\n elif i_mode == 2 or i_mode == 4:\n # format_a1='{:>11}{:>15} {:>10} {:>12} {:>10}\\n'\n # content_a1=('No.', symprec, 'Density', 'Formula unit', 'Volume')\n anal.write('{:>5}{:>15} {:>10} {:>12} {:>10}\\n'.format('No.', symprec, 'Density', 'Formula unit', 'Volume'))\n elif i_mode == 3 or i_mode == 8: # Add by shenzx 20200530\n anal.write('{:>11}{:>14}{:>15} {:>10} {:>12} {:>10}\\n'.format('No.', 'Energy', symprec, 'Density',\n 'Formula unit', 'Volume'))\n # anal.write(format_a1.format((content_a1)))\n if i_mode == 1 or i_mode == 3 or i_mode == 8: # Add by shenzx 20200530\n format_a = '{:>4} ({:>4}){:>14.5f}{:>15} {:>10.5f} {:>12} {:>10.3f}\\n'\n elif i_mode == 2 or i_mode == 4:\n format_a = '{:>5}{:>15} {:>10.5f} {:>12} {:>10.3f}\\n'\n for i, id_d in enumerate(struc_d):\n if id_d[0] == -2:\n temp = nele_ctype_fu(count_atoms(structure_list[i].conventional_cell.numbers))\n if i_mode == 1 or i_mode == 3 or i_mode == 8: # Add by shenzx 20200530\n anal.write(format_a\n .format(i + 1, e_list[i][2], e_list[i][1], structure_list[i].space_group,\n structure_list[i].conventional_cell.n_atom /\n structure_list[i].conventional_cell.get_volume(), temp[2],\n structure_list[i].conventional_cell.get_volume()))\n elif i_mode == 2 or i_mode == 4:\n anal.write(format_a\n .format(i + 1, structure_list[i].space_group,\n structure_list[i].conventional_cell.n_atom /\n structure_list[i].conventional_cell.get_volume(), temp[2],\n structure_list[i].conventional_cell.get_volume()))\n if l_comp:\n if i_mode == 1 or i_mode == 3 or i_mode == 8: # Add by shenzx 20200530\n d_f.write('{:>4} ({:>4}){:>14.5f}{:>15} {:>13} {:>12.4e} {:>13}\\n'\n .format(i + 1, e_list[i][2], e_list[i][1], structure_list[i].space_group, i + 1,\n id_d[1], temp[2]))\n elif i_mode == 2 or i_mode == 4:\n d_f.write('{:>5}{:>15} {:>13} {:>12.4e} {:>13}\\n'\n .format(i + 1, structure_list[i].space_group, i + 1, id_d[1], temp[2]))\n for j, id_d2 in enumerate(struc_d[i + 1:]):\n if id_d2[0] == i:\n k = i + j + 1\n temp = nele_ctype_fu(count_atoms(structure_list[k].conventional_cell.numbers))\n if i_mode == 1 or i_mode == 3 or i_mode == 8: # Add by shenzx 20200530\n d_f.write('{:>4} ({:>4}){:>14.5f}{:>15} {:>13} {:>12.4e} {:>13}\\n'\n .format(k + 1, e_list[k][2], e_list[k][1], structure_list[k].space_group,\n i + 1, id_d2[1], temp[2]))\n elif i_mode == 2 or i_mode == 4:\n d_f.write('{:>5}{:>15} {:>13} {:>12.4e} {:>13}\\n'\n .format(k + 1, structure_list[k].space_group, i + 1, id_d2[1], temp[2]))\n d_f.write('\\n')\n anal.closed\n if l_comp:\n d_f.close()\n left_id = [i for i in range(total_struc) if struc_d[i][0] == -2]\n if l_db and total_struc > 0:\n # max_ls_e=e_list[0][1]+low_symm_er\n with connect('analyzed_structures.db', append=False) as db:\n data = {}\n data['spap_parameters'] = {\n 'e_range': e_range, 'l_comp': l_comp, 'threshold': threshold, 'symprec': symprec, 'ilat': ilat,\n 'r_cut_off': r_cut_off, 'extend_r': extend_r, 'ccf_step': ccf_step, 'total_struc': total_struc,\n 'l_view': l_view, 'l_cif': l_cif, 'l_poscar': l_poscar, 'l_db': l_db}\n data['pseudopotential'] = pseudopotential\n if i_mode == 1:\n data['incar'] = ''\n for line in cal_p:\n data['incar'] = data['incar'] + line\n data['prediction_parameters'] = ''\n for line in prediction_parameters:\n data['prediction_parameters'] += line\n db.write(structure_list[left_id[0]].conventional_cell, relaxed=True, enthalpy=e_list[left_id[0]][1],\n space_group=structure_list[left_id[0]].space_group, pressure=pressure,\n prediction_method=prediction_method, experimental=False, opt_code=calculator,\n data=data)\n elif i_mode == 2 or i_mode == 4:\n db.write(structure_list[left_id[0]].conventional_cell,\n space_group=structure_list[left_id[0]].space_group, data=data)\n elif i_mode == 3:\n data['incar'] = ''\n for line in cal_p:\n data['incar'] = data['incar'] + line\n db.write(structure_list[left_id[0]].conventional_cell, relaxed=True, e_per_a=e_list[left_id[0]][1],\n space_group=structure_list[left_id[0]].space_group, pressure=pressure,\n prediction_method=prediction_method, experimental=False, opt_code=calculator,\n data=data)\n\n # Add by shenzx 20200530 --- need modified\n elif i_mode == 8:\n data['input'] = ''\n for line in cal_p:\n data['incar'] = data['incar'] + line\n db.write(structure_list[left_id[0]].conventional_cell, relaxed=True, e_per_a=e_list[left_id[0]][1],\n space_group=structure_list[left_id[0]].space_group, pressure=pressure,\n prediction_method=prediction_method, experimental=False, opt_code=calculator,\n data=data)\n\n if total_struc > 1:\n # for i in [j for j in left_id[1:] if space_g_l[j]>2 or e_list[j][1]<max_ls_e]:\n # for i in range(total_struc):\n for i in left_id[1:]:\n if i_mode == 1:\n db.write(structure_list[i].conventional_cell, relaxed=True, enthalpy=e_list[i][1],\n space_group=structure_list[i].space_group, pressure=pressure,\n experimental=False, opt_code=calculator)\n elif i_mode == 2 or i_mode == 4:\n db.write(structure_list[left_id[0]].conventional_cell,\n space_group=structure_list[left_id[0]].space_group)\n elif i_mode == 3:\n db.write(structure_list[i].conventional_cell, relaxed=True, e_per_a=e_list[i][1],\n space_group=structure_list[i].space_group, pressure=pressure,\n experimental=False, opt_code=calculator)\n # Add by shenzx 20200530 --- need modified\n elif i_mode == 8:\n db.write(structure_list[i].conventional_cell, relaxed=True, e_per_a=e_list[i][1],\n space_group=structure_list[i].space_group, pressure=pressure,\n experimental=False, opt_code=calculator)\n if l_poscar:\n ctat = count_atoms(structure_list[left_id[0]].numbers, 2)\n for i in left_id:\n if lprm:\n prmc = standardize_cell(\n (structure_list[i].cell, structure_list[i].get_scaled_positions(wrap=True), structure_list[i].numbers),\n symprec=symprec, to_primitive=True)\n if prmc == None:\n prmc = structure_list[i]\n else:\n prmc = Atoms(cell=prmc[0], scaled_positions=prmc[1], numbers=prmc[2], pbc=pbc)\n if l_cif:\n write(dir_name + '/' + str(i + 1) + '_' + get_spg_n(structure_list[i].space_group) + '.cif',\n structure_list[i].conventional_cell)\n if lprm:\n write(dir_name + '/' + str(i + 1) + '_' + get_spg_n(structure_list[i].space_group) + '_p.cif',\n prmc)\n if l_poscar:\n write_struc(structure_list[i].conventional_cell, ctat,\n dir_name + '/UCell_' + str(i + 1) + '_' + get_spg_n(structure_list[i].space_group) + '.vasp',\n structure_list[i].space_group)\n if lprm:\n write_struc(prmc, ctat, dir_name + '/PCell_' + str(i + 1) + '_' + get_spg_n(\n structure_list[i].space_group) + '.vasp', structure_list[i].space_group)\n if l_view:\n view([structure_list[i].conventional_cell for i in left_id])\n n_left = len(left_id)\n if n_left != 0:\n print('Multiplicity: {:6.3f}'.format(total_struc / n_left))\n print('Calculation succeeded')\n # return [[structure_list[i],e_list[i][1]] for i in left_id]\n # return structure_list,[e[1] for e in e_list]\n\n\ndef write_struc(struc, ctat, strucn, tag='generated by BDM'):\n poscar = open(strucn, 'w')\n poscar.write(tag + '\\n1.0\\n')\n for v in struc.cell:\n poscar.write('{:>13.7f}{:>13.7f}{:>13.7f}\\n'.format(v[0], v[1], v[2]))\n smbd = getcf(struc.numbers, ctat, 2)\n ele_n = ''\n for smb in smbd.keys():\n poscar.write('{:4}'.format(smb))\n ele_n += ' {:>3}'.format(smbd[smb])\n poscar.write('\\n' + ele_n + '\\nDirect')\n scaled_pos = struc.get_scaled_positions(wrap=True)\n for n in ctat.keys():\n for j, pos in enumerate(scaled_pos):\n if struc.numbers[j] == n:\n poscar.write('\\n{:>10.7f} {:>10.7f} {:>10.7f}'.format(pos[0], pos[1], pos[2]))\n poscar.close()\n\n\ndef getcf(numbers, ctat, irt=1):\n cf = ''\n smbd = {}\n for key in ctat.keys():\n for eles in atomic_numbers.keys():\n if atomic_numbers[eles] == key:\n cf += eles\n break\n ict = np.sum(numbers == key)\n smbd[eles] = ict\n if ict != 1:\n cf += str(ict)\n if irt == 1:\n return cf\n elif irt == 2:\n return smbd\n\n\ndef get_spg_n(spg):\n return spg[spg.index('(') + 1:-1]\n\n\ndef classify_structures(structures, space_groups, threshold, r_cut_off, ilat, r_vector, nsm=False, nfu=False):\n n = len(structures)\n struc_d = [[-1, 0.0] for i in range(n)]\n volume_dict = {}\n\n # for i in range(n):\n # structures[i].n_atom = len(structures[i].numbers)\n for i in range(n):\n if struc_d[i][0] == -1:\n if ilat != 0 and (not structures[i].n_atom in volume_dict):\n volume_dict[structures[i].n_atom] = structures[i].get_volume()\n id_list = [i] + [x for x in range(i + 1, n) if\n (struc_d[x][0] == -1) and ((space_groups[i] == space_groups[x]) or nsm) and\n ((structures[i].n_atom == structures[x].n_atom) or nfu)]\n if ilat != 0:\n cal_struc_d(structures, id_list, struc_d, space_groups[i], threshold, r_cut_off,\n volume_dict[structures[i].n_atom] / structures[i].n_atom, ilat, r_vector)\n else:\n cal_struc_d(structures, id_list, struc_d, space_groups[i], threshold, r_cut_off,\n 100.0, ilat, r_vector)\n return struc_d\n\n\ndef cal_struc_d(structures, id_list, struc_d, spg_n, threshold, r_cut_off, volume, ilat, r_vector):\n struc_d[id_list[0]][0] = -2\n prototype_id = [id_list[0]]\n # temp_c=[]\n if spg_n > 15 and spg_n < 195 and ilat == 2:\n l_same_cell = True\n else:\n l_same_cell = False\n if len(id_list) != 1:\n if ilat == 0 or volume == structures[id_list[0]].get_volume() / structures[id_list[0]].n_atom:\n structures[id_list[0]].ccf = struc2ccf(structures[id_list[0]], r_cut_off, r_vector)\n # if l_same_cell:\n # temp_c=structures[id_list[0]].cell\n else:\n # temp_c=structures[id_list[0]].cell * (volume / structures[id_list[0]].get_volume()) ** (1.0 / 3.0)\n structures[id_list[0]].ccf = \\\n struc2ccf(Atoms(\n cell=structures[id_list[0]].cell * (\n volume / structures[id_list[0]].get_volume() * structures[id_list[0]].n_atom) ** (\n 1.0 / 3.0),\n scaled_positions=structures[id_list[0]].get_scaled_positions(wrap=True),\n numbers=structures[id_list[0]].numbers, pbc=structures[0].pbc), r_cut_off, r_vector)\n # volume = structures[id_list[0]].get_volume()\n for i in id_list[1:]:\n if ilat == 0:\n structures[i].ccf = struc2ccf(structures[i], r_cut_off, r_vector)\n else:\n scaled_positions = structures[i].get_scaled_positions(wrap=True)\n structures[i].ccf = struc2ccf(\n Atoms(cell=structures[i].cell * (volume / structures[i].get_volume() * structures[i].n_atom) ** (\n 1.0 / 3.0),\n scaled_positions=scaled_positions, numbers=structures[i].numbers, pbc=structures[0].pbc),\n r_cut_off, r_vector)\n # if i == 57:\n # ccf_file = open('ccf.dat', 'wb')\n # pickle.dump(structures[i].ccf,ccf_file)\n # ccf_file.close()\n # rvf=open('rvf.dat','wb')\n # pickle.dump(r_vector,rvf)\n # rvf.close()\n # write('out.cif',Atoms(cell=structures[i].cell * (volume / structures[i].get_volume()) ** (1.0 / 3.0),\n # scaled_positions=scaled_positions, numbers=structures[i].numbers,pbc=structures[0].pbc))\n # print(Atoms(cell=structures[i].cell * (volume / structures[i].get_volume()) ** (1.0 / 3.0),\n # scaled_positions=scaled_positions, numbers=structures[i].numbers,pbc=structures[0].pbc).get_volume())\n for j in [prototype_id[-1 - j2] for j2 in range(len(prototype_id))]:\n struc_d[i][1] = cal_ccf_d(structures[j].ccf, structures[i].ccf)\n if struc_d[i][1] < threshold:\n struc_d[i][0] = j\n break\n elif l_same_cell:\n struc_d[i][1] = cal_ccf_d(\n structures[j].ccf, struc2ccf(Atoms(\n cell=structures[j].cell * (volume / structures[j].get_volume() * structures[j].n_atom) ** (\n 1.0 / 3.0),\n scaled_positions=scaled_positions, numbers=structures[i].numbers,\n pbc=structures[0].pbc), r_cut_off, r_vector))\n if struc_d[i][1] < threshold:\n struc_d[i][0] = j\n break\n # Mark this structure as a new prototype.\n if struc_d[i][0] == -1:\n struc_d[i] = [-2, 0.0]\n prototype_id.append(i)\n\n\ndef count_atoms(numbers, imd=1):\n ctype = {}\n for i in numbers:\n if i in ctype:\n ctype[i] += 1\n else:\n ctype[i] = 1\n if imd == 1:\n return sorted(ctype.values())\n elif imd == 2:\n return ctype\n\n\ndef nele_ctype_fu(natom):\n if len(natom) == 0:\n return 0, '0', 0\n elif natom[0] == 0:\n return 0, '0', 0\n gcd = natom[0]\n lctype = len(natom)\n if lctype == 1:\n return 1, '1', gcd\n for i in natom[1:]:\n n1 = gcd\n n2 = i\n while True:\n gcd = n2 % n1\n if gcd == 0:\n gcd = n1\n break\n elif gcd == 1:\n return lctype, strctype(natom), 1\n else:\n n2 = n1\n n1 = gcd\n return lctype, strctype([int(float(i) / gcd + 0.5) for i in natom]), gcd\n\n\ndef strctype(ctype):\n sctype = str(ctype[0])\n if len(ctype) == 1:\n return sctype\n for i in ctype[1:]:\n sctype = sctype + '_' + str(i)\n return sctype\n\n\ndef plt_ccf(ccf, r_vector, ftype, ltt=True):\n stair = 0.0\n plt.title(ftype)\n hd = []\n lb = []\n if ltt:\n hd.append(plt.plot(r_vector, ccf['Total'], 'g-', linewidth=2))\n stair = max(0.3, 1.2 * max(ccf['Total']))\n for key in ccf.keys():\n if key != 'Total':\n hd.append(plt.plot(r_vector, ccf[key] + stair, 'g-', linewidth=2))\n # plt.legend(hd,key,loc='upper right')\n lb.append(key)\n # stair += 1.5 * max(ccf[key])\n stair += max(0.3, 1.2 * max(ccf[key]))\n plt.grid(True)\n # plt.legend(handles=hd,labels=lb,loc='best')\n plt.show()\n\n\ndef show2ccf(ccf1, ccf2, r_vector):\n stair = 0.0\n diff_ccf = {}\n for key in ccf1:\n diff_ccf[key] = ccf1[key] - ccf2[key]\n plt.plot(r_vector, diff_ccf[key] + stair, 'g-', linewidth=2)\n stair += 1.1 * max(diff_ccf[key])\n\n # plt.plot(r_vector, ccf1[key] + stair, 'g-', linewidth=2)\n # stair+=1.1*max(ccf1[key])\n # plt.plot(r_vector, ccf2[key] + stair, 'g-', linewidth=2)\n # stair += 1.1 * max(ccf2[key])\n plt.grid(True)\n plt.show()\n\n\ndef start_cli():\n helpl = '''\nthis parameter controls which method will be used to deal with lattice for comparing structural similarity\n0 don't change lattice\n1 equal particle number density\n2 try equal particle number density and equal lattice (default: %(default)s)\n'''.strip()\n parser = argparse.ArgumentParser(\n description='SPAP can analyze symmetry and compare similarity of a large number of atomic structures. '\n 'Typically, SPAP can process structures predicted by CALYPSO (www.calypso.cn).'\n # 'Coordination Characterization Function (CCF) is used to measure structural '\n # 'similarity. If you use this program and method in your research, please read and cite the '\n # 'following publication: \\nJ. Phys. Condens. Matter 2017, 29, 165901.'\n )\n parser.add_argument('-t', '--tolerance', '--symprec', type=float, default=0.1, dest='symprec',\n help='this precision is used to analyze symmetry of atomic structures (default: %(default)s)')\n parser.add_argument('-e', '--e_range', type=float, default=0.4,\n help='define an energy range in which structures will be analyzed (default: %(default)s)')\n parser.add_argument('-n', '--total_struc', type=int, default=None,\n help='this number of structures will be analyzed (default: %(default)s)')\n parser.add_argument('-a', action='store_true', help='process all the structures')\n parser.add_argument('--nc', '--n_comp', action='store_true',\n help='not to compare similarity of structures (default: %(default)s)')\n parser.add_argument('--th', '--threshold', type=float, default=None, dest='threshold',\n help='threshold for similar/dissimilar boundary (default: %(default)s)')\n parser.add_argument('-r', '--r_cut_off', type=float, default=None,\n help='inter-atomic distances within this cut off radius will contribute to CCF '\n '(default: %(default)s Angstrom)')\n parser.add_argument('-l', '--ilat', type=int, choices=[0, 1, 2], default=2,\n help=helpl\n # 'this parameter controls which method will be used to '\n # 'deal with lattice for comparing structural similarity\\n'\n # '0 don\\'t change lattice\\n'\n # '1 equal particle number density\\n'\n # '2 try equal particle number density and equal lattice (default: %(default)s)'\n )\n parser.add_argument('--nd', '--no_db', action='store_true',\n help='not to write structures into ase (https://wiki.fysik.dtu.dk/ase/) database file '\n '(default: %(default)s)')\n parser.add_argument('--cif', '--l_cif', action='store_true', dest='l_cif',\n help='write structures into cif files (default: %(default)s)')\n parser.add_argument('--pos', '--vasp', '--l_poscar', action='store_true', dest='l_poscar',\n help='write structures into files in VASP format (default: %(default)s)')\n parser.add_argument('-d', '--l_view', action='store_true', help='display the structures (default: %(default)s)')\n parser.add_argument('-w', '--work_dir', type=str, default='./', help='set working directory (default: %(default)s)')\n parser.add_argument('-i', '--i_mode', type=int, choices=[1, 2, 3], default=1,\n help='different functionality of SPAP: \\n1 analyze CALYPSO prediction results; \\n2 calculate '\n 'symmetry and similarity of structures in struc directory; \\n3 read and analyze '\n 'structures optimized by VASP (default: %(default)s)')\n parser.add_argument('-v', '--version', action='version', version='SPAP: 1.0.2')\n args = parser.parse_args()\n if args.a:\n args.total_struc = 99999999\n run_spap(\n symprec=args.symprec,\n e_range=args.e_range,\n # e_range=0.3,\n total_struc=args.total_struc,\n # threshold=0.05,\n threshold=args.threshold,\n # r_cut_off=6.0,\n r_cut_off=args.r_cut_off,\n # extend_r=1.0,\n # ilat=2,\n ilat=args.ilat,\n # ccf_step=0.02,\n # l_comp=False,\n l_comp=not args.nc,\n # l_db=True,\n l_db=not args.nd,\n # l_cif=True,\n l_cif=args.l_cif,\n # l_poscar=True,\n l_poscar=args.l_poscar,\n # work_dir='./example/results',\n # work_dir='C:\\\\Users\\\\null\\\\Documents\\\\share\\\\wks\\\\Examples\\\\1_example\\\\results',\n # work_dir='./results',\n work_dir=args.work_dir,\n # i_mode=1,\n i_mode=args.i_mode,\n # l_view=True,\n l_view=args.l_view,\n )\n\n\nif __name__ == '__main__':\n start_cli()\n", "id": "1578437", "language": "Python", "matching_score": 6.008157253265381, "max_stars_count": 0, "path": "abacus/spap.py" }, { "content": "'''\r\nThis module uses Crystal Structure Prototype Database (CSPD) to generate a\r\nlist of crystal structures for the system defined by user. These structures\r\ncould be used for machine learning, high-throughput calculations, and\r\nstructure prediction for materials design. They are also a very good\r\nsource of structures for fitting empirical potentials. The function\r\natomic_structure_generator could return a list of ASE\r\n(https://wiki.fysik.dtu.dk/ase/) Atoms object. You could use any calculator\r\nsupported by ASE for further calculations, such as structure optimization.\r\n\r\nThis program is developed at SUNCAT Center for Interface Science and\r\nCatalysis, SLAC National Accelerator Laboratory, Stanford University. This\r\nwork is funded by SUNCAT center Toyota Research Institute.\r\n\r\nThe methodology of the program is introduced in the article [Ch<NAME>,\r\net al. \"Construction of crystal structure prototype database: methods and\r\napplications.\" Journal of Physics: Condensed Matter 29.16 (2017): 165901].\r\nPlease cite it if you use the program for your research.\r\n\r\nFeel free to contact the author (information is listed below) whenever you\r\nrun into bugs, or you want some features to be added to the program. Please\r\nlet the author know if you want to add the module to your project or make\r\nchanges to it.\r\n\r\nAuthor:\r\n<NAME>, Ph.D. in Condensed Matter Physics\r\nPostdoctoral Scholar\r\nSUNCAT Center for Interface Science and Catalysis, SLAC, Stanford University\r\nEmail: <EMAIL> / <EMAIL>\r\nQQ: 812758366\r\n\r\nNovember 7, 2018\r\n'''\r\n\r\nimport os\r\nimport math\r\nimport random\r\nimport numpy as np\r\nfrom ase import Atoms\r\nfrom ase.io import write\r\nfrom ase.db import connect\r\nfrom ase.data import covalent_radii, atomic_numbers\r\nfrom spglib import standardize_cell, get_spacegroup\r\n\r\n\r\ndef atomic_structure_generator(symbols, fu=None, ndensity=None, volume=None,\r\n mindis=None, nstr=None, maxatomn=None,\r\n cspd_file=None, lw=None, format=None, clean=None,\r\n sgn=None, to_primitive=None):\r\n \"\"\"\r\n This function will read crystal structure prototypes from CSPD.db file\r\n and return a list of ASE Atoms object for the symbols defined by users.\r\n It could also write the output structures into any file format supported\r\n by ASE.\r\n\r\n The symbols is the only parameter required to define. The function has\r\n very justified default values for the rest of the parameters. You could\r\n also customize them according to your own understanding of your system.\r\n\r\n :param symbols: str (formula) or list of str\r\n Can be a string formula, a list of symbols or a list of\r\n Atom objects. Examples: 'H2O', 'COPt12', ['H', 'H', 'O'],\r\n [Atom('Ne', (x, y, z)), ...]. Same as the symbols in ASE Atoms class\r\n (https://wiki.fysik.dtu.dk/ase/ase/atoms.html). This parameter is\r\n passed to the Atoms class.\r\n :param fu: list of int\r\n Range of formula unit. The symbols is multiplied by every formula\r\n unit in this range. The length of this list has to be 2.\r\n :param ndensity: float\r\n Total number of atoms divided by volume. It controls how dense\r\n the atoms stack.\r\n :param volume: float\r\n Average volume of the structure per symbols. If ndensity is\r\n defined, volume will be ignored. I would strongly recommend to use\r\n ndensity rather than volume.\r\n :param mindis: list of lists of float\r\n Minimum inter-atomic distances. The dimension is nxn for the\r\n structure has number of n type of element. For example, we could\r\n define it as [[1.7, 1.4],[1.4, 1.2]] for binary compound.\r\n mindis[0][1] defines the minimum distance between element 1 with\r\n element 2.\r\n :param nstr: int\r\n This number of structures will be returned. Less of structures might\r\n be returned when there's not enough qualified structures in the\r\n database.\r\n :param maxatomn: int\r\n Maximum number of atoms in the structure.\r\n :param cspd_file: str\r\n Path and file name of CSPD.db.\r\n :param lw: logical\r\n Whether to write the structures into files. The structures are put in\r\n structure_folder.\r\n :param format: str\r\n Used to specify the file-format. Same as the format in ase.io.write()\r\n function. Check out the supported file-format at their website\r\n (https://wiki.fysik.dtu.dk/ase/ase/io/io.html).\r\n :param sgn: list of int\r\n Range of space group sequential number as given in the International\r\n Tables for Crystallography.\r\n :return: list of ASE Atoms object\r\n \"\"\"\r\n random.seed(a=27173)\r\n strulist = []\r\n newstrulist = []\r\n locmindis = {}\r\n rdr = 0.61\r\n scale_ndensity = 2.22\r\n structure_folder = 'structure_folder'\r\n lwf = False\r\n\r\n if fu == None:\r\n fu = [2, 8]\r\n if nstr == None:\r\n nstr = 1600\r\n if maxatomn == None:\r\n maxatomn = 60\r\n if cspd_file == None:\r\n cspd_file = '~/CSPD.db'\r\n if lw == None:\r\n lw = False\r\n if format == None:\r\n format = 'cif'\r\n if clean == None:\r\n clean = True\r\n if sgn == None:\r\n sgn = [1, 230]\r\n if to_primitive == None:\r\n to_primitive = False\r\n\r\n tmpstru = Atoms(symbols)\r\n ctat = count_atoms(tmpstru.numbers, 2)\r\n intctype = count_atoms(tmpstru.numbers)\r\n atomnn = unify_an(tmpstru.numbers)\r\n nele, strctype, gcd = nele_ctype_fu(intctype)\r\n # intctype=[int(float(i)/gcd+0.5) for i in intctype]\r\n fulist = [i * gcd for i in range(fu[0], fu[1] + 1)]\r\n if format == 'db':\r\n dbw = connect(structure_folder + '/' + getcf(tmpstru.numbers, ctat) + '.db', append=False)\r\n if volume:\r\n volume = float(volume)\r\n if ndensity:\r\n ndensity = float(ndensity)\r\n if mindis:\r\n for i, an in enumerate(atomnn.keys()):\r\n for j, an2 in enumerate(atomnn.keys()):\r\n locmindis[str(an) + '_' + str(an2)] = mindis[i][j]\r\n elif not (ndensity or volume):\r\n for an in atomnn.keys():\r\n for an2 in atomnn.keys():\r\n locmindis[str(an) + '_' + str(an2)] = (covalent_radii[an] + covalent_radii[an2]) * rdr\r\n if not ndensity:\r\n if volume:\r\n ndensity = len(tmpstru.numbers) / volume\r\n elif mindis:\r\n ndensity = scale_ndensity * sum(atomnn.values()) / (4 / 3.0 * math.pi / ((2 * rdr) ** 3) / 0.34 * sum(\r\n [atomnn[sym] * mindis[i][i] ** 3 for i, sym in enumerate(atomnn.keys())]))\r\n else:\r\n ndensity = scale_ndensity * sum(atomnn.values()) / (\r\n 4 / 3.0 * math.pi / 0.34 * sum([atomnn[sym] * covalent_radii[sym] ** 3 for sym in atomnn.keys()]))\r\n if (ndensity or volume) and (not mindis):\r\n for an in atomnn.keys():\r\n for an2 in atomnn.keys():\r\n locmindis[str(an) + '_' + str(an2)] = (covalent_radii[an] + covalent_radii[an2]) * rdr\r\n # Need to improve!!! calculate locmindis according to ndensity\r\n\r\n db = connect(cspd_file)\r\n for row in db.select('ctype=_' + strctype):\r\n if row.lfocp and sgn[0] <= row.sgn <= sgn[1]:\r\n tempstru = row.toatoms()\r\n if to_primitive:\r\n pcell = standardize_cell(\r\n (tempstru.cell, tempstru.get_scaled_positions(), tempstru.numbers),\r\n to_primitive=True, symprec=0.01)\r\n # sgsn=get_spacegroup((strulist[struid].cell,strulist[struid].get_scaled_positions(),strulist[struid].numbers),symprec=0.01)\r\n # sgsn2=get_spacegroup(pcell,symprec=0.01)\r\n if pcell:\r\n tempstru = Atoms(cell=pcell[0], scaled_positions=pcell[1], numbers=pcell[2], pbc=True)\r\n natoms = len(tempstru.numbers)\r\n if natoms <= maxatomn:\r\n if to_primitive:\r\n final_fu = int(row.fu / row.natoms * natoms + 0.5)\r\n else:\r\n final_fu = row.fu\r\n for i in fulist:\r\n if final_fu == i:\r\n strulist.append(tempstru)\r\n strulist[-1].sgn = row.sgn\r\n strulist[-1].dname = row.dname\r\n strulist[-1].oid = row.oid\r\n # Or construct a list of rows and convert part of them to atoms object.\r\n break\r\n if lw:\r\n if os.path.exists(structure_folder):\r\n if clean:\r\n for tpfn in os.listdir(structure_folder):\r\n path_file = os.path.join(structure_folder, tpfn)\r\n if os.path.isfile(path_file):\r\n os.remove(path_file)\r\n else:\r\n os.makedirs(structure_folder)\r\n isucc = 0\r\n ifail = 0\r\n for i in range(len(strulist)):\r\n struid = int(random.random() * len(strulist))\r\n tempstru = strulist[struid]\r\n tempstru.set_cell(\r\n tempstru.cell * (len(tempstru.numbers) / tempstru.get_volume() / ndensity) ** (1.0 / 3),\r\n scale_atoms=True)\r\n tempstru.set_atomic_numbers(subst_ele(tempstru.numbers, atomnn))\r\n # Add break points will change the random number!!! Wired!!!\r\n if checkdis(tempstru, locmindis):\r\n newstrulist.append(tempstru)\r\n isucc += 1\r\n else:\r\n if lwf:\r\n write(structure_folder + '/' + str(ifail + 1) + '_failed_' + getcf(tempstru.numbers, ctat)\r\n + '_{}'.format(tempstru.sgn) + '.cif'\r\n , tempstru)\r\n del strulist[struid]\r\n ifail += 1\r\n continue\r\n if lw:\r\n suffix = ''\r\n if format == 'cif':\r\n suffix = '.cif'\r\n elif format == 'vasp':\r\n suffix = '.vasp'\r\n if (format == 'db'):\r\n dbw.write(newstrulist[-1], sgn=newstrulist[-1].sgn, dname=newstrulist[-1].dname,\r\n oid=newstrulist[-1].oid)\r\n elif format == 'vasp':\r\n write_struc(newstrulist[-1], ctat,\r\n structure_folder + '/' + str(isucc) + '_' + getcf(newstrulist[-1].numbers,\r\n ctat) + '_{}'.format(\r\n newstrulist[-1].sgn) + suffix)\r\n else:\r\n write(structure_folder + '/' + str(isucc) + '_' + getcf(newstrulist[-1].numbers, ctat)\r\n + '_{}'.format(newstrulist[-1].sgn) + suffix\r\n , newstrulist[-1], format=format)\r\n print('Chemical Formula: {:9} Space Group: {:4d}'.format(getcf(newstrulist[-1].numbers, ctat),\r\n newstrulist[-1].sgn))\r\n # Start add by shenzx 20200518\r\n \r\n # LogFile = open('StructureLog.dat','a')\r\n # LogFile.write('Chemical Formula: {:9} Space Group: {:4d}'.format(getcf(newstrulist[-1].numbers, ctat),\r\n # newstrulist[-1].sgn))\r\n # End\r\n if isucc == nstr:\r\n break\r\n del strulist[struid]\r\n print('{} structures generated\\n{} physically unjustified structures are filtered out'.format(isucc, ifail))\r\n return newstrulist\r\n\r\n\r\ndef write_struc(struc, ctat, strucn):\r\n poscar = open(strucn, 'w')\r\n poscar.write('initial structure\\n' + '1.0\\n')\r\n for v in struc.cell:\r\n poscar.write('{:>13.7f}{:>13.7f}{:>13.7f}\\n'.format(v[0], v[1], v[2]))\r\n smbd=getcf(struc.numbers,ctat,2)\r\n ele_n=''\r\n for smb in smbd.keys():\r\n poscar.write('{:4}'.format(smb))\r\n ele_n += ' {:>3}'.format(smbd[smb])\r\n poscar.write('\\n' + ele_n+'\\nDirect')\r\n scaled_pos =struc.get_scaled_positions(wrap=True)\r\n for n in ctat.keys():\r\n for j, pos in enumerate(scaled_pos):\r\n if struc.numbers[j] == n:\r\n poscar.write('\\n{:>8.5f} {:>8.5f} {:>8.5f}'.format(pos[0], pos[1], pos[2]))\r\n poscar.close()\r\n\r\n\r\ndef getcf(numbers, ctat, irt=1):\r\n cf = ''\r\n smbd = {}\r\n for key in ctat.keys():\r\n for eles in atomic_numbers.keys():\r\n if atomic_numbers[eles] == key:\r\n cf += eles\r\n break\r\n ict = np.sum(numbers == key)\r\n smbd[eles] = ict\r\n if ict != 1:\r\n cf += str(ict)\r\n if irt == 1:\r\n return cf\r\n elif irt==2:\r\n return smbd\r\n\r\n\r\ndef variable_stoichiometry_generator(symbols, stoichiometry, clean=False, fu=None,\r\n mindis=None, nstr=None, maxatomn=None,\r\n cspd_file=None, lw=None, format=None,\r\n sgn=None, to_primitive=None):\r\n tpstru = Atoms(symbols)\r\n smbl = []\r\n for smb in tpstru.get_chemical_symbols():\r\n if not (smb in smbl):\r\n smbl.append(smb)\r\n for stc in stoichiometry:\r\n cc = ''\r\n for i, n in enumerate(stc):\r\n cc += smbl[i] + '{}'.format(n)\r\n atomic_structure_generator(\r\n symbols=cc, fu=fu, mindis=mindis, nstr=nstr, maxatomn=maxatomn,\r\n cspd_file=cspd_file, lw=lw, format=format, clean=clean, sgn=sgn, to_primitive=to_primitive)\r\n pass\r\n\r\n\r\ndef checkdis(atoms, dis):\r\n '''\r\n This function checks whether the inter-atomic distances is justified or\r\n not, and it's very time consuming for large structure!!! There's no\r\n documentation for the rest of the code. So, good luck and enjoy.\r\n\r\n :param atoms:\r\n :param dis:\r\n :return:\r\n '''\r\n squdis = {}\r\n for key in dis.keys():\r\n squdis[key] = dis[key] ** 2\r\n irange = cell_range(atoms.cell, max(dis.values()))\r\n natoms = len(atoms.numbers)\r\n strnum = [str(i) for i in atoms.numbers]\r\n transa = -irange[0] * atoms.cell[0]\r\n for ia in range(-irange[0] + 1, irange[0] + 1):\r\n transa = np.row_stack((transa, ia * atoms.cell[0]))\r\n transb = -irange[1] * atoms.cell[1]\r\n for ib in range(-irange[1] + 1, irange[1] + 1):\r\n transb = np.row_stack((transb, ib * atoms.cell[1]))\r\n transc = -irange[2] * atoms.cell[2]\r\n for ic in range(-irange[2] + 1, irange[2] + 1):\r\n transc = np.row_stack((transc, ic * atoms.cell[2]))\r\n for i1 in range(natoms):\r\n for i2 in range(i1, natoms):\r\n vct = atoms.positions[i2] - atoms.positions[i1]\r\n tpsqudis = squdis[strnum[i2] + '_' + strnum[i1]]\r\n for ia in range(-irange[0], irange[0] + 1):\r\n for ib in range(-irange[1], irange[1] + 1):\r\n for ic in range(-irange[2], irange[2] + 1):\r\n if i1 == i2 and ia == 0 and ib == 0 and ic == 0:\r\n continue\r\n if np.sum(np.square(\r\n vct - transc[ic + irange[2]] - transb[ib + irange[1]] - transa[\r\n ia + irange[0]])) < tpsqudis:\r\n return False\r\n return True\r\n\r\n\r\ndef cell_range(cell, rcut):\r\n recipc_no2pi = Atoms(cell=cell).get_reciprocal_cell()\r\n return [int(rcut * ((np.sum(recipc_no2pi[i] ** 2)) ** 0.5) + 1.0e-6) + 1 for i in range(3)]\r\n\r\n\r\ndef subst_ele(numbers, atomnn):\r\n locatomnn = atomnn.copy()\r\n origatomnn = unify_an(numbers)\r\n for key in origatomnn.keys():\r\n for key2 in locatomnn.keys():\r\n if origatomnn[key] == locatomnn[key2]:\r\n origatomnn[key] = key2\r\n del locatomnn[key2]\r\n break\r\n tmpn = []\r\n for key in numbers:\r\n tmpn.append(origatomnn[key])\r\n return tmpn\r\n\r\n\r\ndef count_atoms(numbers, irt=1):\r\n ctype = {}\r\n for i in numbers:\r\n if i in ctype:\r\n ctype[i] += 1\r\n else:\r\n ctype[i] = 1\r\n if irt == 1:\r\n return sorted(ctype.values())\r\n elif irt==2:\r\n return ctype\r\n\r\n\r\ndef unify_an(numbers):\r\n '''\r\n Stores the composition type in a dictionary.\r\n :param numbers:\r\n :return:\r\n '''\r\n atmnn = {}\r\n for i in numbers:\r\n if i in atmnn:\r\n atmnn[i] += 1\r\n else:\r\n atmnn[i] = 1\r\n natom = sorted(atmnn.values())\r\n if len(natom) == 1:\r\n for symb in atmnn.keys():\r\n atmnn[symb] = 1\r\n return atmnn\r\n gcd = natom[0]\r\n for i in natom[1:]:\r\n n1 = gcd\r\n n2 = i\r\n while True:\r\n gcd = n2 % n1\r\n if gcd == 0:\r\n gcd = n1\r\n break\r\n elif gcd == 1:\r\n return atmnn\r\n else:\r\n n2 = n1\r\n n1 = gcd\r\n for symb in atmnn.keys():\r\n atmnn[symb] = int(float(atmnn[symb]) / gcd + 0.5)\r\n return atmnn\r\n\r\n\r\ndef nele_ctype_fu(natom):\r\n if len(natom) == 0:\r\n return 0, '0', 0\r\n elif natom[0] == 0:\r\n return 0, '0', 0\r\n gcd = natom[0]\r\n lctype = len(natom)\r\n if lctype == 1:\r\n return 1, '1', gcd\r\n for i in natom[1:]:\r\n n1 = gcd\r\n n2 = i\r\n while True:\r\n gcd = n2 % n1\r\n if gcd == 0:\r\n gcd = n1\r\n break\r\n elif gcd == 1:\r\n return lctype, strctype(natom), 1\r\n else:\r\n n2 = n1\r\n n1 = gcd\r\n return lctype, strctype([int(float(i) / gcd + 0.5) for i in natom]), gcd\r\n\r\n\r\ndef strctype(ctype):\r\n sctype = str(ctype[0])\r\n if len(ctype) == 1:\r\n return sctype\r\n for i in ctype[1:]:\r\n sctype = sctype + '_' + str(i)\r\n return sctype\r\n", "id": "6038875", "language": "Python", "matching_score": 2.626849412918091, "max_stars_count": 0, "path": "abacus/cspd.py" }, { "content": "'''\nAuthor:\nDr. <NAME>\n\nEmail:\n<EMAIL> / <EMAIL>\n'''\nimport numpy as np\nfrom ase import Atoms\nfrom math import sqrt, pi, exp\nfrom ase.data import atomic_numbers\n# from numba import jit\n\n\ndef struc2ccf(struc, r_cut_off, r_vector, apw=60.0, ftype='CCF'):\n rho = len(struc.numbers) / struc.get_volume()\n ccf = d2ccf(cal_inter_atomic_d(struc, r_cut_off), r_cut_off, r_vector, apw, ftype, rho)\n nspec = get_nspec(struc)\n lccf=len(r_vector)\n for i in range(1, nspec + 1):\n for j in range(i, nspec + 1):\n pairt = str(i) + '_' + str(j)\n if not pairt in ccf:\n ccf[pairt] = np.zeros(lccf,float)\n return ccf\n\n\ndef cal_ccf_d(ccf1, ccf2):\n ccf_wf1 = {}\n lenth = len(ccf1)\n for key in ccf1.keys():\n ccf_wf1[key] = np.sum(ccf1[key])\n sum_ccf_wf = sum(ccf_wf1.values())\n if abs(sum_ccf_wf) < 1.0e-20:\n for key in ccf_wf1.keys():\n ccf_wf1[key] = 1.0 / lenth\n else:\n for key in ccf_wf1.keys():\n ccf_wf1[key] = ccf_wf1[key] / sum_ccf_wf\n ccf_wf2 = {}\n for key in ccf2.keys():\n ccf_wf2[key] = np.sum(ccf2[key])\n sum_ccf_wf = sum(ccf_wf2.values())\n if abs(sum_ccf_wf) < 1.0e-20:\n for key in ccf_wf2.keys():\n ccf_wf2[key] = 1.0 / lenth\n else:\n for key in ccf_wf2.keys():\n ccf_wf2[key] = ccf_wf2[key] / sum_ccf_wf\n for key in ccf_wf1.keys():\n ccf_wf1[key] = (ccf_wf1[key] + ccf_wf2[key]) / 2.0\n struc_d = 0.0\n for key in ccf1.keys():\n struc_d += ccf_wf1[key] * pearson_cc(ccf1[key], ccf2[key])\n return 1 - struc_d\n\n\n# @jit(nopython=True)\ndef cal_inter_atomic_d(struc, r_cut_off):\n distances = {}\n # square_rcut=r_cut_off*r_cut_off\n i_range = cell_range(struc.cell, struc.pbc, r_cut_off)\n natoms = len(struc.numbers)\n ele_tag = element_tag(struc.numbers)\n # square_delta=0.01\n d_delta = 0.001\n # d_delta2=0.001\n # n_atom_dict=count_atoms_dict(struc.numbers)\n pair_tags = []\n l_d_empty = int(r_cut_off / d_delta + 0.5) + 1\n # d_empty=np.zeros(l_d_empty,dtype=int)\n # d_empty = l_d_empty * [0]\n\n n_species = len(ele_tag)\n for i1 in range(n_species):\n for i2 in range(n_species):\n if i1 < i2:\n pair_tags.append(str(i1 + 1) + '_' + str(i2 + 1))\n else:\n pair_tags.append(str(i2 + 1) + '_' + str(i1 + 1))\n # distances[pair_tags[-1]] = d_empty.copy()\n # distances[pair_tags[-1]] = d_empty[:]\n distances[pair_tags[-1]] = l_d_empty * [0]\n pair_tags = [[x for x in pair_tags[i * n_species:(i + 1) * n_species]] for i in range(n_species)]\n transa = -i_range[0] * struc.cell[0]\n for ia in range(-i_range[0] + 1, i_range[0] + 1):\n transa = np.row_stack((transa, ia * struc.cell[0]))\n transb = -i_range[1] * struc.cell[1]\n for ib in range(-i_range[1] + 1, i_range[1] + 1):\n transb = np.row_stack((transb, ib * struc.cell[1]))\n transc = -i_range[2] * struc.cell[2]\n for ic in range(-i_range[2] + 1, i_range[2] + 1):\n transc = np.row_stack((transc, ic * struc.cell[2]))\n # temp_d = d_empty.copy()\n # temp_d = d_empty[:]\n temp_d = l_d_empty * [0]\n for ia in range(-i_range[0], i_range[0] + 1):\n for ib in range(-i_range[1], i_range[1] + 1):\n for ic in range(-i_range[2], i_range[2] + 1):\n for i1 in range(natoms):\n for i2 in range(i1 + 1, natoms):\n d = sqrt(np.sum(\n np.square(struc.positions[i1] - struc.positions[i2] + transc[ic + i_range[2]] +\n transb[ib + i_range[1]] + transa[ia + i_range[0]])))\n if d < r_cut_off:\n distances[pair_tags[ele_tag[struc.numbers[i1]] - 1][ele_tag[struc.numbers[i2]] - 1]][\n -int((r_cut_off - d) / d_delta + 0.5) - 1] += 1\n for ic in range(1, i_range[2] + 1):\n d = sqrt(np.sum(np.square(transc[ic + i_range[2]] + transb[ib + i_range[1]] + transa[ia + i_range[0]])))\n if d < r_cut_off:\n temp_d[-int((r_cut_off - d) / d_delta + 0.5) - 1] += 1\n for ib in range(1, i_range[1] + 1):\n d = sqrt(np.sum(np.square(transb[ib + i_range[1]] + transa[ia + i_range[0]])))\n if d < r_cut_off:\n temp_d[-int((r_cut_off - d) / d_delta + 0.5) - 1] += 1\n for ia in range(1, i_range[0] + 1):\n d = sqrt(np.sum(np.square(transa[ia + i_range[0]])))\n if d < r_cut_off:\n temp_d[-int((r_cut_off - d) / d_delta + 0.5) - 1] += 1\n for ele_n, n in count_atoms_dict(struc.numbers).items():\n for i, n_pair in enumerate(temp_d):\n if n_pair != 0:\n distances[pair_tags[ele_tag[ele_n] - 1][ele_tag[ele_n] - 1]][i] += n_pair * n\n final_d = {}\n for key1 in distances.keys():\n final_d[key1] = {}\n for i, n_pair in enumerate(distances[key1]):\n if n_pair != 0:\n final_d[key1][r_cut_off - (l_d_empty - i - 1) * d_delta] = float(n_pair) / natoms\n return final_d\n\n\n# @jit()\ndef d2ccf(distances, r_cut_off, r_vector, a=60.0, ftype='CCF', rho=1.0):\n ccf = {}\n if ftype == 'RDF':\n norm = 2.0 * pi * rho\n for key1 in distances.keys():\n for key2 in distances[key1].keys():\n if key1 in ccf:\n if ftype == 'CCF':\n ccf[key1] = ccf[key1] + gaussian_f(distances[key1][key2] * weight_f(key2, r_cut_off), key2,\n r_vector, a)\n elif ftype == 'RDF':\n ccf[key1] = ccf[key1] + gaussian_f(distances[key1][key2] / norm / key2 ** 2, key2, r_vector, a)\n else:\n if ftype == 'CCF':\n ccf[key1] = gaussian_f(distances[key1][key2] * weight_f(key2, r_cut_off), key2, r_vector, a)\n elif ftype == 'RDF':\n ccf[key1] = gaussian_f(distances[key1][key2] / norm / key2 ** 2, key2, r_vector, a)\n return ccf\n\n\ndef weight_f(r, r_cut_off):\n tail_r = 0.5\n if r < r_cut_off - tail_r:\n return 1.0\n else:\n return exp(-3.0 * (r - r_cut_off + tail_r) / tail_r)\n\n\n# @jit(nopython=True)\ndef pearson_cc(x, y):\n # average1=np.mean(ccf1)\n # average2=np.mean(ccf2)\n # Send in mean(x) might be more efficient. Because we calculated sum(x) before.\n local1 = x - np.mean(x)\n local2 = y - np.mean(y)\n # sum1=np.sum(np.multiply(local1,local2))\n sum2 = np.sum(np.square(local1))\n sum3 = np.sum(np.square(local2))\n if sum2 + sum3 == 0.0:\n return 1.0\n elif (sum2 == 0.0) or (sum3 == 0.0):\n return 0.0\n return np.sum(np.multiply(local1, local2)) / sqrt(sum2 * sum3)\n\n\n# @jit\ndef gaussian_f(weight_f, b, x, a=60.0):\n # a = 60.0\n # a=290\n # return weight_f * sqrt(a / pi) * np.exp(-a * (x - b)**2)\n return weight_f * sqrt(a / pi) * np.exp(-a * np.square(x - b))\n\n\ndef element_tag(numbers, irt=1):\n ele_n = {}\n ele_tag = {}\n for i in numbers:\n if i in ele_n:\n ele_n[i] += 1\n else:\n ele_n[i] = 1\n sotn = sorted(ele_n.items(), key=lambda x: x[1])\n for i, x in enumerate(sotn):\n if irt == 1:\n ele_tag[x[0]] = i + 1\n elif irt == 2:\n for symb in atomic_numbers.keys():\n if atomic_numbers[symb] == x[0]:\n break\n ele_tag[x[0]] = [i + 1, symb]\n # Worth to try something like below\n # ele_tag[x[0]] = i\n return ele_tag\n\n\ndef get_nspec(struc):\n eles = {}\n for i in struc.numbers:\n if not i in eles:\n eles[i] = 1\n return len(eles)\n\n\ndef cell_range(cell, pbc, rcut):\n recipc_no2pi = Atoms(cell=cell).get_reciprocal_cell()\n i_range = []\n for i in range(3):\n if pbc[i] == True:\n i_range.append(int(rcut * ((np.sum(recipc_no2pi[i] ** 2)) ** 0.5) + 1.0e-6) + 1)\n # i_range.append(int(rcut * ((np.sum(recipc_no2pi[i] ** 2)) ** 0.5)) + 1)\n else:\n i_range.append(0)\n return i_range\n # return [int(rcut * ((np.sum(recipc_no2pi[i] ** 2)) ** 0.5)) + 1 for i in range(3)]\n\n\ndef count_atoms_dict(numbers):\n ctype = {}\n for i in numbers:\n if i in ctype:\n ctype[i] += 1\n else:\n ctype[i] = 1\n return ctype\n", "id": "5823121", "language": "Python", "matching_score": 1.372937798500061, "max_stars_count": 0, "path": "abacus/ccf.py" }, { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 13 10:31:30 2018\r\n\r\n@author: shenzx\r\n\"\"\"\r\nfrom os.path import join, basename, exists\r\nfrom ase import Atoms\r\nimport numpy as np\r\nimport shutil\r\nfrom ase.calculators.abacus.potential import PotentialDict\r\nfrom ase.calculators.abacus.basis import BasisDict\r\n# from abacus.potential import PotentialDict\r\n# from abacus.basis import BasisDict\r\n# import sys\r\n# sys.path.append(\"E:\\Git\\project\\ase-abacus\\ase-abacus\")\r\n\r\n\r\ndef potential_list():\r\n return list(PotentialDict.keys())\r\n\r\n\r\ndef basis_list():\r\n return list(BasisDict.keys())\r\n\r\n\r\ndef judge_exist_stru(stru=None):\r\n if stru is None:\r\n return False\r\n else:\r\n return True\r\n\r\n\r\ndef read_ase_stru(stru=None, coordinates_type=\"Cartesian\"):\r\n if judge_exist_stru(stru):\r\n atoms_list = []\r\n atoms_position = []\r\n atoms_masses = []\r\n atoms_magnetism = []\r\n atoms_all = stru.get_chemical_symbols()\r\n\r\n # sort atoms according to atoms\r\n for atoms_all_name in atoms_all:\r\n temp = True\r\n for atoms_list_name in atoms_list:\r\n if atoms_all_name == atoms_list_name:\r\n temp = False\r\n break\r\n\r\n if temp:\r\n atoms_list.append(atoms_all_name)\r\n\r\n for atoms_list_name in atoms_list:\r\n atoms_position.append([])\r\n atoms_masses.append([])\r\n atoms_magnetism.append([])\r\n\r\n # get position, masses, magnetism from ase atoms\r\n if coordinates_type == 'Cartesian':\r\n for i in range(len(atoms_list)):\r\n for j in range(len(atoms_all)):\r\n if atoms_all[j] == atoms_list[i]:\r\n atoms_position[i].append(list(\r\n stru.get_positions()[j]))\r\n atoms_masses[i] = stru.get_masses()[j]\r\n atoms_magnetism[i] = stru.get_initial_magnetic_moments()[j]\r\n # update 20201230\r\n\r\n elif coordinates_type == 'Direct':\r\n for i in range(len(atoms_list)):\r\n for j in range(len(atoms_all)):\r\n if atoms_all[j] == atoms_list[i]:\r\n atoms_position[i].append(list(\r\n stru.get_scaled_positions()[j]))\r\n atoms_masses[i] = stru.get_masses()[j]\r\n atoms_magnetism[i] = stru.get_initial_magnetic_moments()[j]\r\n # update 20201230\r\n else:\r\n raise ValueError(\"'coordinates_type' is ERROR,\"\r\n \"please set to 'Cartesian' or 'Direct'\")\r\n\r\n return atoms_list, atoms_masses, atoms_position, atoms_magnetism\r\n\r\n\r\ndef set_potential(atoms_list=None, pseudo_dir=\"./\", potential_name=None):\r\n if atoms_list is None:\r\n print(\" Please set right 'atoms_list' \")\r\n\r\n else:\r\n potential = []\r\n if potential_name is None:\r\n for atoms_list_name in atoms_list:\r\n potential.append(\r\n join(pseudo_dir,\r\n PotentialDict['PotLDA'][atoms_list_name]))\r\n\r\n elif type(potential_name) == str:\r\n PotList = potential_list()\r\n if potential_name in PotList:\r\n for atoms_list_name in atoms_list:\r\n potential.append(join(\r\n pseudo_dir,\r\n PotentialDict[potential_name][atoms_list_name]))\r\n else:\r\n raise ValueError(\"'potential_name' is ERROR\")\r\n \"\"\"\r\n if potential_name == 'PotLDA':\r\n for atoms_list_name in atoms_list:\r\n potential.append(\r\n join(pseudo_dir,\r\n PotentialDict['PotLDA'][atoms_list_name]))\r\n\r\n elif potential_name == 'PotPBE':\r\n for atoms_list_name in atoms_list:\r\n potential.append(join(\r\n pseudo_dir,\r\n potential.PotPBE[atoms_list_name]))\r\n\r\n elif potential_name == 'PotPBESG15':\r\n for atoms_list_name in atoms_list:\r\n potential.append(join(\r\n pseudo_dir,\r\n potential.PotPBESG15[atoms_list_name]))\r\n\r\n else:\r\n raise ValueError(\"'potential_name' is ERROR\")\r\n \"\"\"\r\n\r\n elif type(potential_name) == list:\r\n ele_name = {}\r\n for i in potential_name:\r\n with open(join(pseudo_dir, i), 'r') as f:\r\n lines = f.readlines()\r\n\r\n for line in lines:\r\n line = line.replace('=', ' = ')\r\n line = line.replace('\"', ' \" ')\r\n data = line.split()\r\n if len(data) == 0:\r\n continue\r\n\r\n elif data[0] == 'element':\r\n ele_name[data[3]] = i\r\n break\r\n\r\n elif len(data) == 2 and data[1] == 'Element':\r\n ele_name[data[0]] = i\r\n break\r\n\r\n else:\r\n continue\r\n\r\n for atoms_list_name in atoms_list:\r\n potential.append(join(pseudo_dir,\r\n ele_name[atoms_list_name]))\r\n\r\n else:\r\n raise ValueError(\"Please sure what you do!!! \")\r\n\r\n return potential\r\n\r\n\r\ndef set_basis(atoms_list=None, basis_dir=\"./\", basis_name=None):\r\n if atoms_list is None:\r\n print(\" Please set right 'atoms_list'\")\r\n\r\n else:\r\n basis = []\r\n if basis_name is None:\r\n for atoms_list_name in atoms_list:\r\n basis.append(join(\r\n basis_dir,\r\n BasisDict['LDAmin'][atoms_list_name]))\r\n\r\n elif type(basis_name) == str:\r\n BasisList = basis_list()\r\n if basis_name in BasisList:\r\n for atoms_list_name in atoms_list:\r\n basis.append(join(\r\n basis_dir,\r\n BasisDict[basis_name][atoms_list_name]))\r\n else:\r\n raise ValueError(\"'basis_name' is ERROR\")\r\n\r\n elif type(basis_name) == list:\r\n ele_name = {}\r\n for i in basis_name:\r\n with open(join(basis_dir, i), 'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n data = line.split()\r\n if len(data) == 0:\r\n continue\r\n elif data[0] == 'Element':\r\n ele_name[data[1]] = i\r\n break\r\n else:\r\n continue\r\n for atoms_list_name in atoms_list:\r\n basis.append(join(basis_dir, ele_name[atoms_list_name]))\r\n\r\n else:\r\n raise ValueError(\"Please sure what you do!!!\")\r\n \r\n return basis\r\n\r\n\r\ndef write_input_stru_core(stru=None,\r\n directory=\"./\",\r\n filename=\"STRU\",\r\n potential=None,\r\n pseudo_dir=\"./\",\r\n basis=None,\r\n basis_dir=\"./\",\r\n coordinates_type=\"Cartesian\",\r\n atoms_list=None,\r\n atoms_position=None, \r\n atoms_masses=None,\r\n atoms_magnetism=None,\r\n fix=1):\r\n if not judge_exist_stru(stru):\r\n return \"No input structure!\"\r\n\r\n elif (atoms_list is None):\r\n return \"Please set right atoms list\"\r\n elif(atoms_position is None):\r\n return \"Please set right atoms position\"\r\n elif(atoms_masses is None):\r\n return \"Please set right atoms masses\"\r\n elif(atoms_magnetism is None):\r\n return \"Please set right atoms magnetism\"\r\n else:\r\n with open(join(directory, filename), 'w') as f:\r\n f.write('ATOMIC_SPECIES\\n')\r\n for i in range(len(atoms_list)):\r\n if(not exists(basename(potential[i]))):\r\n shutil.copyfile(potential[i],\r\n directory+\"/\"+basename(potential[i]))\r\n temp1 = ' ' * (4-len(atoms_list[i]))\r\n temp2 = ' ' * (14-len(str(atoms_masses[i])))\r\n atomic_species = (atoms_list[i] + temp1\r\n + str(atoms_masses[i]) + temp2\r\n + basename(potential[i]))\r\n\r\n f.write(atomic_species)\r\n f.write('\\n')\r\n\r\n f.write('\\n')\r\n f.write('NUMERICAL_ORBITAL\\n')\r\n for i in range(len(atoms_list)):\r\n if(not exists(basename(basis[i]))):\r\n shutil.copyfile(basis[i],\r\n directory+\"/\"+basename(basis[i]))\r\n f.write(basename(basis[i]))\r\n f.write('\\n')\r\n\r\n f.write('\\n')\r\n f.write('LATTICE_CONSTANT\\n')\r\n f.write('1.889726125 \\n')\r\n f.write('\\n')\r\n\r\n f.write('LATTICE_VECTORS\\n')\r\n for i in range(3):\r\n for j in range(3):\r\n temp3 = str(\"{:0<12f}\".format(\r\n stru.get_cell()[i][j])) + ' ' * 3\r\n f.write(temp3)\r\n f.write(' ')\r\n f.write('\\n')\r\n f.write('\\n')\r\n\r\n f.write('ATOMIC_POSITIONS\\n')\r\n f.write(coordinates_type)\r\n f.write('\\n')\r\n f.write('\\n')\r\n for i in range(len(atoms_list)):\r\n f.write(atoms_list[i])\r\n f.write('\\n')\r\n f.write(str(\"{:0<12f}\".format(atoms_magnetism[i])))\r\n # update 20201230\r\n f.write('\\n')\r\n f.write(str(len(atoms_position[i])))\r\n f.write('\\n')\r\n\r\n for j in range(len(atoms_position[i])):\r\n temp4 = str(\"{:0<12f}\".format(\r\n atoms_position[i][j][0])) + ' ' * 3\r\n temp5 = str(\"{:0<12f}\".format(\r\n atoms_position[i][j][1])) + ' ' * 3\r\n temp6 = str(\"{:0<12f}\".format(\r\n atoms_position[i][j][2])) + ' ' * 3\r\n sym_pos = (temp4 + temp5 + temp6 +\r\n (str(fix) + ' ') * 3)\r\n f.write(sym_pos)\r\n f.write('\\n')\r\n # f.write('\\n\\n')\r\n\r\n pb_information = {}\r\n pb_information['pseudo_dir'] = pseudo_dir\r\n pb_information['basis_dir'] = basis_dir\r\n pb_information['potential_name'] = potential\r\n pb_information['basis_name'] = basis\r\n return pb_information\r\n\r\n\r\ndef write_input_stru(stru=None,\r\n pseudo_dir='./',\r\n potential_name=None,\r\n basis_dir='./',\r\n basis_name=None,\r\n fix=1,\r\n filename='STRU',\r\n directory='./',\r\n coordinates_type='Cartesian',\r\n spin=1,\r\n **kwargs):\r\n\r\n if not judge_exist_stru(stru):\r\n return \"No input structure!\"\r\n\r\n else:\r\n (atoms_list,\r\n atoms_masses,\r\n atoms_position,\r\n atoms_magnetism) = read_ase_stru(stru, coordinates_type)\r\n\r\n potential = set_potential(atoms_list,\r\n pseudo_dir,\r\n potential_name)\r\n basis = set_basis(atoms_list,\r\n basis_dir,\r\n basis_name)\r\n if(spin==2):\r\n for i in range(len(atoms_list)):\r\n atoms_magnetism[i] = 1.0\r\n \r\n\r\n pb_information = write_input_stru_core(stru,\r\n directory,\r\n filename,\r\n potential,\r\n pseudo_dir,\r\n basis,\r\n basis_dir,\r\n coordinates_type,\r\n atoms_list,\r\n atoms_position,\r\n atoms_masses,\r\n atoms_magnetism,\r\n fix)\r\n\r\n return pb_information\r\n\r\n\r\ndef read_stru(filename='STRU',\r\n directory='./',\r\n ase=True,\r\n **kwargs):\r\n # Read structure information from abacus structure file\r\n try:\r\n f = open(join(directory, filename), 'r')\r\n except Exception:\r\n return \"Failed to open 'STRU', Please Check!\"\r\n else:\r\n lines = f.readlines()\r\n f.close()\r\n\r\n # initialize reading information\r\n temp = []\r\n for line in lines:\r\n line = line.strip()\r\n line = line.replace('\\n', ' ')\r\n line = line.replace('\\t', ' ')\r\n line = line.replace('//', ' ')\r\n line = line.replace('#', ' ')\r\n\r\n if len(line) != 0:\r\n temp.append(line)\r\n\r\n atom_species = 0\r\n for i in range(len(temp)):\r\n if temp[i] == 'NUMERICAL_ORBITAL':\r\n atom_species = i - 1\r\n break\r\n\r\n atom_symbol = []\r\n atom_mass = []\r\n atom_potential = []\r\n atom_number = []\r\n atom_magnetism = []\r\n atom_positions = []\r\n atom_fix = []\r\n\r\n # get symbol, mass, potential\r\n for i in range(1, atom_species+1):\r\n atom_symbol.append(temp[i].split()[0])\r\n atom_mass.append(float(temp[i].split()[1]))\r\n atom_potential.append(temp[i].split()[2])\r\n atom_number.append(0)\r\n atom_magnetism.append(0)\r\n atom_positions.append([])\r\n atom_fix.append([])\r\n\r\n # get basis\r\n atom_basis = []\r\n for i in range(atom_species+2, (atom_species+1) * 2):\r\n atom_basis.append(temp[i].split()[0])\r\n\r\n # get lattice\r\n atom_lattice_scale = float(temp[(atom_species+1) * 2 + 1].split()[0])\r\n atom_lattice = np.array(\r\n [[float(temp[(atom_species+1) * 2 + 3 + i].split()[:3][j])\r\n for j in range(3)] for i in range(3)])\r\n\r\n # get coordinates type\r\n atom_coor = temp[(atom_species + 1) * 2 + 7].split()[0]\r\n\r\n # get position, atoms number, magnetism, fix\r\n for i in range(atom_species):\r\n pos_start = (atom_species + 1) * 2 + 8 + 3 * i\r\n for j in range(i):\r\n pos_start += atom_number[j]\r\n atom_it = atom_symbol.index(temp[pos_start].split()[0])\r\n atom_magnetism[atom_it] = float(temp[pos_start + 1].split()[0])\r\n atom_number[atom_it] = int(temp[pos_start + 2].split()[0])\r\n\r\n atom_positions[atom_it] = np.array(\r\n [[float(temp[pos_start + 3 + i].split()[:3][j])\r\n for j in range(3)] for i in range(atom_number[atom_it])])\r\n\r\n atom_fix[atom_it] = np.array(\r\n [[int(temp[pos_start + 3 + i].split()[3:6][j])\r\n for j in range(3)]for i in range(atom_number[atom_it])])\r\n\r\n # Reset structure information and return results\r\n formula_symbol = ''\r\n formula_positions = []\r\n for i in range(atom_species):\r\n if atom_number[i] == 1:\r\n formula_symbol += atom_symbol[i]\r\n\r\n else:\r\n formula_symbol += atom_symbol[i] + str(atom_number[i])\r\n\r\n for j in range(atom_number[i]):\r\n formula_positions.append(atom_positions[i][j])\r\n\r\n formula_cell = atom_lattice * atom_lattice_scale * 0.529177210903\r\n\r\n if ase is True:\r\n if atom_coor == 'Direct':\r\n return Atoms(symbols=formula_symbol,\r\n cell=formula_cell,\r\n scaled_positions=formula_positions)\r\n\r\n elif atom_coor == 'Cartesian':\r\n return Atoms(symbols=formula_symbol,\r\n cell=formula_cell,\r\n positions=formula_positions)\r\n\r\n else:\r\n raise ValueError(\"atomic coordinate type is ERROR\")\r\n\r\n else:\r\n return (formula_symbol,\r\n formula_cell,\r\n formula_positions,\r\n atom_potential,\r\n atom_basis)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # print(potential_list())\r\n # print(basis_list())\r\n StruName = read_stru(filename='ABACUS_StruLi4Sn10 ',\r\n directory='E:\\Git\\project\\Structure',\r\n ase=True)\r\n # print(StruName)\r\n print(write_input_stru(stru=StruName,\r\n pseudo_dir='E:\\Git\\project\\Potential\\PotPotSG15',\r\n potential_name=['Li_ONCV_PBE-1.0.upf',\r\n 'Sn_ONCV_PBE-1.0.upf'],\r\n basis_dir='E:\\Git\\project\\Basis\\BasSG15act', \r\n basis_name='SG15act',\r\n fix=1,\r\n filename='STRU',\r\n directory='./',\r\n coordinates_type='Cartesian'))\r\n", "id": "557812", "language": "Python", "matching_score": 4.033132076263428, "max_stars_count": 0, "path": "abacus.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\r\nimport numpy as np\r\n\r\nfrom ._base import Latt, order_table\r\n\r\n\r\nclass Stru:\r\n def __init__(self, formula_symbol, formula_cell, elements, formula_positions, atom_number, species_number=None,\r\n atom_potential=None, atom_basis=None, is_cart=False):\r\n self._fs = formula_symbol\r\n self._e = elements\r\n if species_number is None:\r\n species_number = []\r\n for item in self._e:\r\n species_number.append(\r\n int(order_table.get(item))\r\n )\r\n self._sn = species_number\r\n self._a = atom_number\r\n self._lattice = Latt(formula_cell)\r\n self._fcoords = self._lattice.frac_coords(formula_positions) if is_cart else formula_positions\r\n self._ap = atom_potential\r\n self._ab = atom_basis\r\n self._ccoords = self._lattice.cart_coords(self._fcoords)\r\n\r\n @property\r\n def formula(self):\r\n return self._fs\r\n\r\n @property\r\n def lattice(self):\r\n return self._lattice\r\n\r\n @property\r\n def frac_coords(self):\r\n return self._fcoords\r\n\r\n @property\r\n def cart_coords(self):\r\n return self._ccoords\r\n\r\n @property\r\n def atoms_base(self):\r\n return self._ab\r\n\r\n @property\r\n def atom_potential(self):\r\n return self._ap\r\n\r\n @property\r\n def atom_species(self):\r\n return self._e\r\n\r\n @property\r\n def uniq_atom_species(self):\r\n uniq = []\r\n for i in self._e:\r\n if i not in uniq:\r\n uniq.append(i)\r\n\r\n return uniq\r\n\r\n @property\r\n def atom_number(self):\r\n return self._a\r\n\r\n @property\r\n def atomic_number(self):\r\n return self._sn\r\n\r\n @classmethod\r\n def from_stru(cls, filename='STRU'):\r\n # Read structure information from abacus structure file\r\n with open(filename, \"r\") as stru:\r\n lines = stru.readlines()\r\n # initialize reading information\r\n temp = []\r\n for line in lines:\r\n line = line.strip()\r\n line = line.replace('\\n', ' ')\r\n line = line.replace('\\t', ' ')\r\n line = line.replace('//', ' ')\r\n line = line.replace('#', ' ')\r\n\r\n if len(line) != 0:\r\n temp.append(line)\r\n\r\n atom_species = 0\r\n for i in range(len(temp)):\r\n if temp[i] == 'NUMERICAL_ORBITAL':\r\n atom_species = i - 1\r\n break\r\n\r\n atom_symbol = []\r\n atom_mass = []\r\n atom_potential = []\r\n atom_number = []\r\n atom_magnetism = []\r\n atom_positions = []\r\n atom_fix = []\r\n\r\n # get symbol, mass, potential\r\n for i in range(1, atom_species + 1):\r\n atom_symbol.append(temp[i].split()[0])\r\n atom_mass.append(float(temp[i].split()[1]))\r\n atom_potential.append(temp[i].split()[2])\r\n atom_number.append(0)\r\n atom_magnetism.append(0)\r\n atom_positions.append([])\r\n atom_fix.append([])\r\n\r\n # get basis\r\n atom_basis = []\r\n for i in range(atom_species + 2, (atom_species + 1) * 2):\r\n atom_basis.append(temp[i].split()[0])\r\n\r\n # get lattice\r\n atom_lattice_scale = float(temp[(atom_species + 1) * 2 + 1].split()[0])\r\n atom_lattice = np.array(\r\n [[float(temp[(atom_species + 1) * 2 + 3 + i].split()[:3][j])\r\n for j in range(3)] for i in range(3)])\r\n\r\n # get coordinates type\r\n atom_coor = temp[(atom_species + 1) * 2 + 7].split()[0]\r\n\r\n # get position, atoms number, magnetism, fix\r\n for i in range(atom_species):\r\n pos_start = (atom_species + 1) * 2 + 8 + 3 * i\r\n for j in range(i):\r\n pos_start += atom_number[j]\r\n atom_it = atom_symbol.index(temp[pos_start].split()[0])\r\n atom_magnetism[atom_it] = float(temp[pos_start + 1].split()[0])\r\n atom_number[atom_it] = int(temp[pos_start + 2].split()[0])\r\n\r\n atom_positions[atom_it] = np.array(\r\n [[float(temp[pos_start + 3 + i].split()[:3][j])\r\n for j in range(3)] for i in range(atom_number[atom_it])])\r\n\r\n atom_fix[atom_it] = np.array(\r\n [[int(temp[pos_start + 3 + i].split()[3:6][j])\r\n for j in range(3)] for i in range(atom_number[atom_it])])\r\n\r\n # Reset structure information and return results\r\n formula_symbol = ''\r\n formula_positions = []\r\n for i in range(atom_species):\r\n if atom_number[i] == 1:\r\n formula_symbol += atom_symbol[i]\r\n\r\n else:\r\n formula_symbol += atom_symbol[i] + str(atom_number[i])\r\n\r\n for j in range(atom_number[i]):\r\n formula_positions.append(atom_positions[i][j])\r\n\r\n formula_cell = atom_lattice * atom_lattice_scale * 0.529177210903\r\n\r\n if atom_coor == 'Direct':\r\n is_cart = False\r\n\r\n elif atom_coor == 'Cartesian':\r\n is_cart = True\r\n else:\r\n raise ValueError(\"atomic coordinate type is ERROR\")\r\n species = []\r\n for i, e in enumerate(atom_symbol):\r\n species.extend(\r\n [e, ] * atom_number[i]\r\n )\r\n\r\n return cls(formula_symbol, formula_cell, species, np.asarray(formula_positions),\r\n atom_number, atom_potential=atom_potential,\r\n atom_basis=atom_basis, is_cart=is_cart)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n\r\n\r\n", "id": "10155069", "language": "Python", "matching_score": 2.1800942420959473, "max_stars_count": 0, "path": "postprocess/utils/stru.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom math import floor\r\nimport numpy as np\r\n\r\nfrom .stru import Stru\r\nimport seekpath\r\n\r\n\r\nclass Kpt:\r\n def __init__(self, coords, lattice, weight=None, label=None, to_unit_cell=False,\r\n coords_are_cartesian=False):\r\n self._lattice = lattice\r\n self._fcoords = self._lattice.frac_coords(coords) if coords_are_cartesian else coords\r\n self._label = label\r\n self._w = weight\r\n if to_unit_cell:\r\n for i in range(len(self._fcoords)):\r\n self._fcoords[i] -= floor(self._fcoords[i])\r\n self._ccoords = self._lattice.cart_coords(self._fcoords)\r\n\r\n @property\r\n def label(self):\r\n return self._label\r\n\r\n @property\r\n def frac_coords(self):\r\n return np.copy(self._fcoords)\r\n\r\n @property\r\n def cart_coords(self):\r\n return np.copy(self._ccoords)\r\n\r\n @property\r\n def a(self):\r\n return self._fcoords[0]\r\n\r\n @property\r\n def b(self):\r\n\r\n return self._fcoords[1]\r\n\r\n @property\r\n def c(self):\r\n return self._fcoords[2]\r\n\r\n @property\r\n def weight(self):\r\n return self._w\r\n\r\n def __str__(self):\r\n return \"{} {}\".format(self.frac_coords, self.label)\r\n\r\n @staticmethod\r\n def from_kpt(filename):\r\n with open(filename, \"r\") as f:\r\n lines = [i.strip() for i in f.readlines()]\r\n title, n, line_mode, *high_kpt = lines\r\n if '#' in line_mode:\r\n line_mode = line_mode.split('#')[0].strip(' ')\r\n if line_mode.lower() in [\"direct\", \"cartesian\"]:\r\n tmp = np.asarray(high_kpt.split(), dtype=int)\r\n density, shift = tmp[:3], tmp[3:]\r\n return density, shift, line_mode\r\n elif line_mode.lower in [\"mp\", \"gamma\"]:\r\n infos = []\r\n for i in high_kpt:\r\n if not i:\r\n continue\r\n coord, lb = i.split('#')\r\n infos.append(\r\n np.asarray(coord.split(), dtype=float)\r\n )\r\n tmp = np.asarray(infos)\r\n coords, weight = tmp[:, :3], tmp[:, -1]\r\n return coords, weight, line_mode, n\r\n elif line_mode.lower() == \"line\":\r\n infos, lbs = [], []\r\n for i in high_kpt:\r\n if not i:\r\n continue\r\n coord, lb = i.split('#')\r\n infos.append(\r\n np.asarray(coord.split(), dtype=float)\r\n )\r\n lbs.append(lb.strip(' '))\r\n tmp = np.asarray(infos)\r\n coords, number = tmp[:, :3], tmp[:, -1]\r\n\r\n return coords, number, lbs, line_mode, n\r\n else:\r\n raise RuntimeError(\"something wrong! check the KPT file!\")\r\n\r\n @staticmethod\r\n def generate_kpath_from(filename, time_reversal=True):\r\n structure = Stru.from_stru(filename)\r\n cell = (\r\n structure.lattice.lattice,\r\n structure.frac_coords,\r\n structure.atomic_number\r\n )\r\n\r\n kpath = seekpath.get_explicit_k_path(cell, with_time_reversal=time_reversal)\r\n return kpath\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n\r\n", "id": "6546382", "language": "Python", "matching_score": 2.3038864135742188, "max_stars_count": 0, "path": "postprocess/utils/kpt.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\r\nfrom pymatgen.core import Structure\r\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\r\nfrom pymatgen.io import cif\r\n\r\nfrom utils.stru import Stru\r\nfrom utils.kpt import Kpt\r\n\r\n\r\nclass Crystal:\r\n def __init__(self, filename_stru):\r\n self.fn = filename_stru\r\n self.stru = Stru.from_stru(filename_stru)\r\n self.crystal = self.convert()\r\n\r\n def _to_poscar(self):\r\n e_str = \" \".join(self.stru.uniq_atom_species)\r\n an_str = \" \".join([str(i) for i in self.stru.atom_number])\r\n pos_str = \"\"\r\n for i in self.stru.frac_coords:\r\n pos_str += \" \".join(str(j) for j in i)\r\n pos_str += \"\\n\"\r\n formula = self.stru.formula\r\n la, lb, lc = self.stru.lattice.lattice\r\n las = \" \".join(str(i) for i in la.tolist())\r\n lbs = \" \".join(str(j) for j in lb.tolist())\r\n lcs = \" \".join(str(k) for k in lc.tolist())\r\n poscar_fmt = f\"{formula}\\n\" \\\r\n f\"1.0\\n\" \\\r\n f\" {las}\\n\" \\\r\n f\" {lbs}\\n\" \\\r\n f\" {lcs}\\n\" \\\r\n f\" {e_str}\\n\" \\\r\n f\" {an_str}\\n\" \\\r\n f\"Direct\\n\" \\\r\n f\"{pos_str}\"\r\n\r\n return poscar_fmt\r\n\r\n def convert(self):\r\n return Structure.from_str(self._to_poscar(), fmt=\"poscar\")\r\n\r\n def get_kpath(self, n=20):\r\n kpath = Kpt.generate_kpath_from(self.fn)\r\n point_coords = kpath['point_coords']\r\n _path = kpath['path']\r\n plst = []\r\n count = 0\r\n for i, ipath in enumerate(_path):\r\n if i > 0:\r\n last_k = _path[i - 1][1]\r\n if last_k == ipath[0]:\r\n ipath = [ipath[1]]\r\n for _p in ipath:\r\n coord = point_coords.get(_p)\r\n line = \"%.4f %.4f %.4f %d # %s\\n\" % (coord[0], coord[1], coord[2], n, _p)\r\n plst.append(line)\r\n count += 1\r\n _f = plst[-1].strip().split()\r\n _f[3] = '1'\r\n _fs = \" \".join(_f)\r\n pstring = ''.join(plst[:-1]) + _fs\r\n head = f\"K_POINTS \\n\" \\\r\n f\"{count} # number of high symmetry lines\\n\" \\\r\n \"Line # line-mode\\n\"\r\n return {\"kpath\": head + pstring}\r\n\r\n def matgen_structure_old_style(self):\r\n csga = SpacegroupAnalyzer(self.crystal)\r\n conv_cell = csga.get_conventional_standard_structure()\r\n formula = conv_cell.composition.formula\r\n ccl = conv_cell.as_dict().get('lattice')\r\n ccspg = conv_cell.get_space_group_info()\r\n prmi_cell = csga.get_primitive_standard_structure()\r\n pcl = prmi_cell.as_dict().get('lattice')\r\n cs = csga.get_crystal_system()\r\n pgs = csga.get_point_group_symbol()\r\n c_cell_elements = conv_cell.sites\r\n p_cell = conv_cell.get_primitive_structure()\r\n p_cell_elements = p_cell.sites\r\n density = self.crystal.density\r\n\r\n def get_sp_and_coor(elements):\r\n sp, coor = [], []\r\n for i in elements:\r\n coor.append(i.frac_coords.tolist())\r\n sp.append(i.species_string)\r\n return {'atoms_order': sp, 'atoms_coordinates': coor}\r\n\r\n c_cell_res = get_sp_and_coor(c_cell_elements)\r\n p_cell_res = get_sp_and_coor(p_cell_elements)\r\n\r\n return {\"formula\": formula, \"conventional_cell\": ccl, \"conventional_cell_site\": c_cell_res,\r\n \"primitive_cell\": pcl, \"primitive_cell_site\": p_cell_res, \"crystal_system\": cs,\r\n \"density\": density, \"point_group\": pgs, \"spacegroup\": ccspg,\r\n \"elements\": self.stru.atom_species}\r\n\r\n def matgen_structure_cif_opt(self):\r\n\r\n cf_data = cif.CifWriter(self.crystal)\r\n cf = str(cf_data).replace('# generated using pymatgen',\r\n '# geometry optimization by matgen')\r\n\r\n return {'cif_data': cf}\r\n\r\n @classmethod\r\n def matgen_structure_poscar_unopt(cls, filename):\r\n unopt_structure = cls(filename)._to_poscar()\r\n return {\"POSCAR\": unopt_structure}\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n", "id": "948553", "language": "Python", "matching_score": 1.6296255588531494, "max_stars_count": 0, "path": "postprocess/core/structure.py" }, { "content": "import os\nimport re\nfrom ase import Atoms\nfrom ase.io import write\n \n# Set your working path which the position of you running code\nWorkPath = os.getcwd() + '/'\n\n\ndef OpenFile(FileName = 'STRU_NOW.cif'):\n # Open file\n try:\n FileOne = open(FileName, 'r') \n Lines = FileOne.readlines()\n FileOne.close()\n return Lines\n except:\n # print('Open file error')\n return False\n\n\ndef GetStruOne():\n # Get atoms object from abacus 'STRU_NOW.cif' file \n Lines = OpenFile('STRU_NOW.cif')\n StruCell = list()\n StruSymbol = list()\n StruScalePos = list()\n FindPos = False\n for Line in Lines:\n # Locate position\n if(len(Line.split())== 1): \n if(Line.split()[0] == '_atom_site_fract_z'):\n FindPos = True\n continue\n # Read cell\n elif(len(Line.split())== 2):\n if(Line.split()[0] == '_cell_length_a'):\n StruCell.append(float(Line.split()[1]))\n elif(Line.split()[0] == '_cell_length_b'):\n StruCell.append(float(Line.split()[1]))\n elif(Line.split()[0] == '_cell_length_c'):\n StruCell.append(float(Line.split()[1]))\n elif(Line.split()[0] == '_cell_angle_alpha'):\n StruCell.append(float(Line.split()[1]))\n elif(Line.split()[0] == '_cell_angle_beta'):\n StruCell.append(float(Line.split()[1]))\n elif(Line.split()[0] == '_cell_angle_gamma'):\n StruCell.append(float(Line.split()[1]))\n # Read position\n if(FindPos):\n StruSymbol.append(Line.split()[0])\n StruScalePos.append([float(Line.split()[i]) for i in range(1, 4)])\n # Get structure\n StruOne = Atoms(symbols = StruSymbol,\n cell = StruCell,\n scaled_positions = StruScalePos)\n return StruOne\n\n\ndef GetEnergyAtoms():\n # Get energy and number of atoms from abacus 'running_cell-relax.log' file\n Lines = OpenFile('running_cell-relax.log')\n for line in Lines:\n # Find number of atoms\n if line.find('TOTAL ATOM NUMBER') != -1: \n NumberAtoms = int(line.split(' = ')[1])\n # Find total energy\n elif line.find('final etot is') != -1: \n TotalEnergy = re.findall(r'[-+]?\\d+\\.?\\d*[eE]?[-+]?\\d+', line)\n TotalEnergy = float(TotalEnergy[0])\n return NumberAtoms, TotalEnergy\n\n\nStruAll = list()\ndef ReadAbacus():\n for root, dirs, files in os.walk('.', topdown = True):\n for DirName in dirs:\n if(DirName[:4]=='file'):\n os.chdir(WorkPath + DirName + '/ase_rundir/OUT.ABACUS')\n try:\n StruAll.append(GetStruOne()) # Get structure\n At, En = GetEnergyAtoms() # Get number of atoms and energy\n StruAll[-1].NumberAtoms = len(StruAll[-1].numbers)\n StruAll[-1].Energy = float(En) / StruAll[-1].NumberAtoms\n # StruAll[-1].Item = root + DirName\n StruAll[-1].Item = DirName\n StruAll[-1].fnm = DirName\n except:\n if(len(StruAll) == 0):\n continue\n elif(StruAll[-1] == []):\n del StruAll[-1]\n elif(StruAll[-1].Energy == None):\n del StruAll[-1]\n os.chdir(WorkPath)\n return StruAll\n \n\nif __name__ == '__main__':\n StruAll = ReadAbacus()\n # Set results file\n ResultsDir = os.path.join(WorkPath, 'RunResults')\n if (not os.path.exists(ResultsDir)):\n os.mkdir(ResultsDir)\n os.chdir(ResultsDir)\n # Write result(structure, number of atoms, energy and density of energy)\n ResFile = open('Results.dat', 'w')\n ResFile.write('{0:<20s}{1:<24s}{2:<12s}{3:<24s}\\n'.format('StructureName', \n 'Energy(eV)',\n 'Number',\n 'DensityOfAtom(eV/atom)'))\n for stru in StruAll:\n # Write structure\n FileName = stru.Item + '_' + stru.get_chemical_formula()\n write(filename = FileName + '.cif' ,\n images = stru,\n format = 'cif') \n # Write results\n ResFile.write('{0:<20s}{1:<24s}{2:<12s}{3:<24s}\\n'.format(\n FileName,\n str(stru.Energy),\n str(stru.NumberAtoms),\n str(stru.Energy / stru.NumberAtoms)))\n ResFile.close()\n\n", "id": "12276820", "language": "Python", "matching_score": 2.1561882495880127, "max_stars_count": 0, "path": "abacus/GetEnergyStru.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @File : readcif.py\r\nimport math\r\n\r\ndef read_cif_file(cifname):\r\n ''' copy the cif information to a list'''\r\n cif_data = []\r\n with open(cifname) as f:\r\n try:\r\n for info_line in f:\r\n if len(info_line.strip()) != 0:\r\n cif_data.append(info_line.strip())\r\n except:\r\n print('Error to open file:' + cifname)\r\n exit(0)\r\n return cif_data\r\n\r\ndef correct_value(value):\r\n global cvalue\r\n value = \"\".join(value)\r\n #print(value)\r\n if \"(\" in value:\r\n cvalue = float(value.split(\"(\")[0])\r\n else:\r\n cvalue = float(value)\r\n #print(cvalue)\r\n return cvalue\r\n\r\ndef get_lattice(cif_data):\r\n ''' calculate the lattice constant'''\r\n for item in cif_data:\r\n if \"_cell_length_a\" in item:\r\n a = correct_value(item.split()[1])\r\n #a = item.split()[1]\r\n if \"_cell_length_b\" in item:\r\n b = correct_value(item.split()[1])\r\n if \"_cell_length_c\" in item:\r\n c = correct_value(item.split()[1])\r\n if \"_cell_angle_alpha\" in item:\r\n alpha = correct_value(item.split()[1]) / 180 * math.pi\r\n if \"_cell_angle_beta\" in item:\r\n beta = correct_value(item.split()[1]) / 180 * math.pi\r\n if \"_cell_angle_gamma\" in item:\r\n gamma = correct_value(item.split()[1]) / 180 * math.pi\r\n\r\n bc2 = b ** 2 + c ** 2 - 2 * b * c * math.cos(alpha)\r\n\r\n h1 = a\r\n h2 = b * math.cos(gamma)\r\n h3 = b * math.sin(gamma)\r\n h4 = c * math.cos(beta)\r\n h5 = ((h2 - h4) ** 2 + h3 ** 2 + c ** 2 - h4 ** 2 - bc2) / (2 * h3)\r\n h6 = math.sqrt(c ** 2 - h4 ** 2 - h5 ** 2)\r\n lattice = [[h1, 0., 0.], [h2, h3, 0.], [h4, h5, h6]]\r\n\r\n return lattice\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n args = sys.argv\r\n filename = args[1]\r\n data = read_cif_file(filename)\r\n print(get_lattice(data))", "id": "5718640", "language": "Python", "matching_score": 1.4930874109268188, "max_stars_count": 0, "path": "result_exract/readcif.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nModules for handing crystallographic lattice-parameters.\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\n\r\nclass Latt:\r\n \"\"\"\r\n Construct Lattice parameter object.\r\n \"\"\"\r\n\r\n def __init__(self, matrix=None):\r\n if not isinstance(matrix, np.ndarray):\r\n _m = np.asarray(matrix,\r\n dtype=np.float64).reshape((3, 3))\r\n else:\r\n _m = matrix\r\n self._lat = _m.round(decimals=5)\r\n self._a, self._b, self._c = self._lat\r\n\r\n def __repr__(self):\r\n return \"{}\\n{}\\n{}\".format(*self.lattice)\r\n\r\n @property\r\n def a(self):\r\n return np.linalg.norm(self._a)\r\n\r\n @property\r\n def b(self):\r\n return np.linalg.norm(self._b)\r\n\r\n @property\r\n def c(self):\r\n return np.linalg.norm(self._c)\r\n\r\n @property\r\n def va(self):\r\n return self._a\r\n\r\n @property\r\n def vb(self):\r\n return self._b\r\n\r\n @property\r\n def vc(self):\r\n return self._c\r\n\r\n @classmethod\r\n def from_parameters(cls, a, b, c, alpha, beta, gamma):\r\n \"\"\"\r\n Construct a new Lattice object from parameters.\r\n Args:\r\n a:\r\n b:\r\n c:\r\n alpha:\r\n beta:\r\n gamma:\r\n\r\n Returns:\r\n\r\n \"\"\"\r\n angles_r = np.radians([alpha, beta, gamma])\r\n cos_alpha, cos_beta, cos_gamma = np.cos(angles_r)\r\n sin_alpha, sin_beta, sin_gamma = np.sin(angles_r)\r\n val = cls._abs_cap((cos_alpha * cos_beta - cos_gamma) / (sin_alpha * sin_beta))\r\n va = [a * sin_beta, 0.0, a * cos_beta]\r\n vb = [-b * sin_alpha * np.cos(np.arccos(val)),\r\n b * sin_alpha * np.sin(np.arccos(val)), b * cos_alpha]\r\n vc = [0.0, 0.0, float(c)]\r\n return cls(np.asarray([va, vb, vc]))\r\n\r\n @staticmethod\r\n def _abs_cap(val, max_abs_val=1):\r\n \"\"\"\r\n Return the value with its absolute value capped at max_abs_val.\r\n\r\n Particularly useful in passing values to trignometric functions where\r\n numerical errors may result in an argument > 1 being passed in.\r\n\r\n Args:\r\n\r\n val (float): Input value.\r\n\r\n max_abs_val (float): The maximum absolute value for val. Defaults to 1.\r\n\r\n Returns:\r\n val if abs(val) < 1 else sign of val * max_abs_val.\r\n \"\"\"\r\n return max(min(val, max_abs_val), -max_abs_val)\r\n\r\n @classmethod\r\n def cubic(cls, a):\r\n \"\"\"Construct cubic Lattice from lattice parameter information.\"\"\"\r\n return cls.from_parameters(a, a, a, 90, 90, 90)\r\n\r\n @classmethod\r\n def tetragonal(cls, a, c):\r\n \"\"\"Construct tetragonal Lattice from lattice parameter information.\"\"\"\r\n return cls.from_parameters(a, a, c, 90, 90, 90)\r\n\r\n @classmethod\r\n def orthorhombic(cls, a, b, c):\r\n \"\"\"Construct orthorhombic Lattice.\"\"\"\r\n return cls.from_parameters(a, b, c, 90, 90, 90)\r\n\r\n @classmethod\r\n def monoclinic(cls, a, b, c, beta):\r\n \"\"\"Construct monoclinic Lattice from lattice parameter information.\"\"\"\r\n return cls.from_parameters(a, b, c, 90, beta, 90)\r\n\r\n @classmethod\r\n def hexagonal(cls, a, c):\r\n \"\"\"Construct hexagonal Lattice from lattice parameter information.\"\"\"\r\n return cls.from_parameters(a, a, c, 90, 90, 120)\r\n\r\n @classmethod\r\n def rhombohedral(cls, a, alpha):\r\n \"\"\"Construct rhombohedral Lattice.\"\"\"\r\n return cls.from_parameters(a, a, a, alpha, alpha, alpha)\r\n\r\n @property\r\n def lattice(self):\r\n \"\"\"\r\n\r\n Returns: lattice matrix.\r\n\r\n \"\"\"\r\n return self._lat\r\n\r\n def inv_lattice(self):\r\n \"\"\"\r\n\r\n Returns: inverse lattice matrix.\r\n\r\n \"\"\"\r\n return np.linalg.inv(self._lat)\r\n\r\n def reciprocal_lattice(self):\r\n \"\"\"Return reciprocal Lattice.\"\"\"\r\n return Latt(2 * np.pi * np.linalg.inv(self._lat).T)\r\n\r\n def reciprocal_lattice_crystallographic(self):\r\n \"\"\"Return reciprocal Lattice without 2 * pi.\"\"\"\r\n return Latt(self.reciprocal_lattice().lattice / (2 * np.pi))\r\n\r\n def divide_kpts(self, threshold=0.02):\r\n if threshold == 0:\r\n return [1, ] * 3\r\n rlc = self.reciprocal_lattice_crystallographic()\r\n ratio = []\r\n for i in [rlc.a, rlc.b, rlc.c]:\r\n ratio.append(np.floor(i / threshold))\r\n return np.asarray(ratio).astype(int).tolist()\r\n\r\n def draw_kpath(self):\r\n pass\r\n\r\n def cart_coords(self, frac_coords):\r\n \"\"\"\r\n\r\n Args:\r\n frac_coords: fraction coords\r\n\r\n Returns: cartesian coords from fractional coords using Lattice.\r\n\r\n \"\"\"\r\n return np.dot(np.array(frac_coords), self._lat)\r\n\r\n def frac_coords(self, cart_coords):\r\n \"\"\"\r\n\r\n Args:\r\n cart_coords: cartesian coords\r\n\r\n Returns: fractional coords from cartesian coords using Lattice.\r\n\r\n \"\"\"\r\n return np.dot(np.array(cart_coords), self.inv_lattice())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n", "id": "6383422", "language": "Python", "matching_score": 0.16268210113048553, "max_stars_count": 0, "path": "postprocess/utils/_base/latt.py" }, { "content": "import csv\nimport json\nimport os\nimport pickle\nfrom typing import List\n\nimport numpy as np\n\n\ndef save_features(path: str, features: List[np.ndarray]):\n \"\"\"\n Saves features to a compressed .npz file with array name \"features\".\n\n :param path: Path to a .npz file where the features will be saved.\n :param features: A list of 1D numpy arrays containing the features for molecules.\n \"\"\"\n np.savez_compressed(path, features=features)\n\n\ndef load_features(path: str) -> np.ndarray:\n \"\"\"\n Loads features saved in a variety of formats.\n\n Supported formats:\n - .npz compressed (assumes features are saved with name \"features\")\n - .npz (assumes features are saved with name \"features\")\n - .npy\n - .csv/.txt (assumes comma-separated features with a header and with one line per molecule)\n - .pkl/.pckl/.pickle containing a sparse numpy array (TODO: remove this option once we are no longer dependent on it)\n\n All formats assume that the SMILES strings loaded elsewhere in the code are in the same\n order as the features loaded here.\n\n :param path: Path to a file containing features.\n :return: A 2D numpy array of size (num_molecules, features_size) containing the features.\n \"\"\"\n extension = os.path.splitext(path)[1]\n\n if extension == '.npz':\n features = np.load(path)['features']\n elif extension == '.npy':\n features = np.load(path)\n elif extension in ['.csv', '.txt']:\n with open(path) as f:\n reader = csv.reader(f)\n next(reader) # skip header\n features = np.array([[float(value) for value in row] for row in reader])\n elif extension in ['.pkl', '.pckl', '.pickle']:\n with open(path, 'rb') as f:\n features = np.array([np.squeeze(np.array(feat.todense())) for feat in pickle.load(f)])\n else:\n raise ValueError(f'Features path extension {extension} not supported.')\n\n return features\n\n\nclass AtomInitializer(object):\n \"\"\"\n Base class for initializing the vector representation for atoms.\n\n !!! Use one AtomInitializer per dataset !!!\n \"\"\"\n\n def __init__(self, atom_types):\n self.atom_types = set(atom_types)\n self._embedding = {}\n\n def get_atom_features(self, atom_type):\n assert atom_type in self.atom_types\n return self._embedding[atom_type]\n\n def load_state_dict(self, state_dict):\n self._embedding = state_dict\n self.atom_types = set(self._embedding.keys())\n self._decodedict = {idx: atom_type for atom_type, idx in self._embedding.items()}\n\n def state_dict(self):\n # 92 dimensions\n return self._embedding\n\n def decode(self, idx):\n if not hasattr(self, '_decodedict'):\n self._decodedict = {idx: atom_type for atom_type, idx in self._embedding.items()}\n return self._decodedict[idx]\n\n\nclass AtomCustomJSONInitializer(AtomInitializer):\n \"\"\"\n Initialize atom feature vectors using a JSON file, which is a python\n dictionary mapping from element number to a list representing the\n feature vector of the element.\n\n Parameters\n ----------\n\n elem_embedding_file: str\n The path to the .json file\n \"\"\"\n\n def __init__(self, elem_embedding_file):\n with open(elem_embedding_file) as f:\n elem_embedding = json.load(f)\n elem_embedding = {int(key): value for key, value in elem_embedding.items()}\n atom_types = set(elem_embedding.keys())\n super(AtomCustomJSONInitializer, self).__init__(atom_types)\n for key, value in elem_embedding.items():\n self._embedding[key] = np.array(value, dtype=float)\n\n\nclass GaussianDistance(object):\n \"\"\"\n Expands the distance by Gaussian basis.\n\n Unit: angstrom\n \"\"\"\n\n def __init__(self, dmin, dmax, step, var=None):\n \"\"\"\n Parameters\n ----------\n\n dmin: float\n Minimum interatomic distance\n dmax: float\n Maximum interatomic distance\n step: float\n Step size for the Gaussian filter\n \"\"\"\n assert dmin < dmax\n assert dmax - dmin > step\n self.filter = np.arange(dmin, dmax + step, step)\n self.var = var if var is not None else step\n\n def expand(self, distances):\n \"\"\"\n Apply Gaussian distance filter to a numpy distance array\n\n Parameters\n ----------\n\n distances: np.array shape n-d array\n A distance matrix of any shape\n\n Returns\n -------\n expanded_distance: shape (n+1)-d array\n Expanded distance matrix with the last dimension of length\n len(self.filter)\n \"\"\"\n return np.exp(-(distances[..., np.newaxis] - self.filter) ** 2 / self.var ** 2)\n\n\ndef load_radius_dict(fp):\n with open(fp, 'r') as f:\n lines = f.readlines()\n lines = [line.replace(' ', '').strip('\\n') for line in lines][1:-1]\n return {item.split(':')[0]: np.float(item.split(':')[1]) for item in lines}\n\n", "id": "4337778", "language": "Python", "matching_score": 3.5671393871307373, "max_stars_count": 8, "path": "crystalnet/features/utils.py" }, { "content": "from .featurization import BatchMolGraph, get_atom_fdim, get_bond_fdim, mol2graph, clear_cache\r\nfrom .utils import load_features, save_features, AtomInitializer, AtomCustomJSONInitializer, GaussianDistance, \\\r\n load_radius_dict", "id": "8320428", "language": "Python", "matching_score": 1.7167105674743652, "max_stars_count": 8, "path": "crystalnet/features/__init__.py" }, { "content": "from argparse import Namespace\nfrom typing import List, Union\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\nfrom chemprop.features import BatchMolGraph, get_atom_fdim, get_bond_fdim, mol2graph\nfrom chemprop.nn_utils import index_select_ND, get_activation_function\nimport math\nimport torch.nn.functional as F\nfrom chemprop.data import CrystalDataset\n\n\nclass MPNEncoder(nn.Module):\n def __init__(self, args: Namespace, atom_fdim: int, bond_fdim: int):\n super(MPNEncoder, self).__init__()\n self.atom_fdim = atom_fdim\n self.bond_fdim = bond_fdim\n self.hidden_size = args.hidden_size\n self.bias = args.bias\n self.depth = args.depth\n self.dropout = args.dropout\n self.atom_messages = args.atom_messages\n self.args = args\n\n # Dropout\n self.dropout_layer = nn.Dropout(p=self.dropout)\n\n # Activation\n self.act_func = get_activation_function(args.activation)\n\n # Input\n input_dim = self.atom_fdim\n self.W_i_atom = nn.Linear(input_dim, self.hidden_size, bias=self.bias)\n input_dim = self.bond_fdim\n self.W_i_bond = nn.Linear(input_dim, self.hidden_size, bias=self.bias)\n\n for depth in range(self.depth - 1):\n self._modules[f'W_h_{depth}'] = nn.Linear(self.hidden_size, self.hidden_size, bias=self.bias)\n \n self.W_o = nn.Linear(self.hidden_size * 2, self.hidden_size)\n \n self.gru = BatchGRU(self.hidden_size)\n \n self.lr = nn.Linear(self.hidden_size * 3, self.hidden_size, bias=self.bias)\n\n def forward(self, mol_graph: BatchMolGraph) -> torch.Tensor:\n\n f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope = mol_graph.get_components()\n if self.args.cuda or next(self.parameters()).is_cuda:\n f_atoms, f_bonds, a2b, b2a, b2revb = (f_atoms.cuda(), f_bonds.cuda(), a2b.cuda(), b2a.cuda(), b2revb.cuda())\n\n # Input\n input_atom = self.W_i_atom(f_atoms) # num_atoms x hidden_size\n input_atom = self.act_func(input_atom)\n message_atom = input_atom.clone()\n\n input_bond = self.W_i_bond(f_bonds) # num_bonds x hidden_size\n input_bond = self.act_func(input_bond)\n message_bond = input_bond.clone()\n\n # Message passing\n for depth in range(self.depth - 1):\n agg_message = index_select_ND(message_bond, a2b)\n agg_message = agg_message.sum(dim=1) * agg_message.max(dim=1)[0]\n message_atom = message_atom + agg_message\n \n # directed graph\n rev_message = message_bond[b2revb] # num_bonds x hidden\n message_bond = message_atom[b2a] - rev_message # num_bonds x hidden\n \n message_bond = self._modules[f'W_h_{depth}'](message_bond)\n message_bond = self.dropout_layer(self.act_func(input_bond + message_bond))\n \n agg_message = index_select_ND(message_bond, a2b)\n agg_message = agg_message.sum(dim=1) * agg_message.max(dim=1)[0]\n agg_message = self.lr(torch.cat([agg_message, message_atom, input_atom], 1))\n agg_message = self.gru(agg_message, a_scope)\n \n atom_hiddens = self.dropout_layer(self.act_func(self.W_o(agg_message))) # num_atoms x hidden\n \n # Readout\n mol_vecs = []\n for i, (a_start, a_size) in enumerate(a_scope):\n if a_size == 0:\n assert 0\n cur_hiddens = atom_hiddens.narrow(0, a_start, a_size)\n mol_vecs.append(cur_hiddens.mean(0))\n mol_vecs = torch.stack(mol_vecs, dim=0)\n \n return mol_vecs # B x H\n\n\nclass BatchGRU(nn.Module):\n def __init__(self, hidden_size=300):\n super(BatchGRU, self).__init__()\n self.hidden_size = hidden_size\n self.gru = nn.GRU(self.hidden_size, self.hidden_size, batch_first=True, bidirectional=True)\n self.bias = nn.Parameter(torch.Tensor(self.hidden_size))\n self.bias.data.uniform_(-1.0 / math.sqrt(self.hidden_size), 1.0 / math.sqrt(self.hidden_size))\n\n def forward(self, node, a_scope):\n hidden = node\n message = F.relu(node + self.bias)\n MAX_atom_len = max([a_size for a_start, a_size in a_scope])\n # padding\n message_lst = []\n hidden_lst = []\n for i, (a_start, a_size) in enumerate(a_scope):\n if a_size == 0:\n assert 0\n cur_message = message.narrow(0, a_start, a_size)\n cur_hidden = hidden.narrow(0, a_start, a_size)\n hidden_lst.append(cur_hidden.max(0)[0].unsqueeze(0).unsqueeze(0))\n \n cur_message = torch.nn.ZeroPad2d((0, 0, 0, MAX_atom_len-cur_message.shape[0]))(cur_message)\n message_lst.append(cur_message.unsqueeze(0))\n \n message_lst = torch.cat(message_lst, 0) # (batch, MAX_atom_len, hidden)\n hidden_lst = torch.cat(hidden_lst, 1) # (1, batch, hidden)\n hidden_lst = hidden_lst.repeat(2, 1, 1) # (2, batch, hidden)\n cur_message, cur_hidden = self.gru(message_lst, hidden_lst) # message = (batch, MAX_atom_len, 2 * hidden)\n \n # unpadding\n cur_message_unpadding = []\n for i, (a_start, a_size) in enumerate(a_scope):\n cur_message_unpadding.append(cur_message[i, :a_size].view(-1, 2*self.hidden_size))\n cur_message_unpadding = torch.cat(cur_message_unpadding, 0)\n \n message = torch.cat([torch.cat([message.narrow(0, 0, 1), message.narrow(0, 0, 1)], 1), cur_message_unpadding], 0)\n return message\n\n\nclass MPN(nn.Module):\n def __init__(self,\n args: Namespace,\n atom_fdim: int = None,\n bond_fdim: int = None,\n graph_input: bool = False):\n super(MPN, self).__init__()\n self.args = args\n self.atom_fdim = atom_fdim or get_atom_fdim(args)\n self.bond_fdim = bond_fdim or get_bond_fdim(args) + args.atom_messages * self.atom_fdim\n self.graph_input = graph_input\n self.encoder = MPNEncoder(self.args, self.atom_fdim, self.bond_fdim)\n\n def forward(self, crystal_batch: CrystalDataset) -> torch.Tensor:\n # if features only, batch won't even be used\n if not self.graph_input:\n batch = mol2graph(crystal_batch, self.args)\n else:\n batch = crystal_batch\n\n return self.encoder.forward(batch)\n\n", "id": "10741456", "language": "Python", "matching_score": 3.9402387142181396, "max_stars_count": 8, "path": "crystalnet/models/mpn.py" }, { "content": "from argparse import Namespace\r\nfrom typing import List, Tuple\r\n\r\nimport torch\r\nfrom chemprop.data import CrystalDatapoint, CrystalDataset\r\n\r\n\r\n# Memoization\r\nCRYSTAL_TO_GRAPH = {}\r\nATOM_FDIM = 92\r\nBOND_FDIM = 54\r\n\r\n\r\ndef clear_cache():\r\n \"\"\"Clears featurization cache.\"\"\"\r\n global SMILES_TO_GRAPH\r\n SMILES_TO_GRAPH = {}\r\n\r\n\r\ndef get_atom_fdim(args: Namespace) -> int:\r\n \"\"\"\r\n Gets the dimensionality of atom features.\r\n\r\n :param: Arguments.\r\n \"\"\"\r\n return ATOM_FDIM\r\n\r\n\r\ndef get_bond_fdim(args: Namespace) -> int:\r\n \"\"\"\r\n Gets the dimensionality of bond features.\r\n\r\n :param: Arguments.\r\n \"\"\"\r\n return BOND_FDIM\r\n\r\n\r\nclass MolGraph:\r\n \"\"\"\r\n A MolGraph represents the graph structure and featurization of a single molecule.\r\n\r\n A MolGraph computes the following attributes:\r\n - smiles: Smiles string.\r\n - n_atoms: The number of atoms in the molecule.\r\n - n_bonds: The number of bonds in the molecule.\r\n - f_atoms: A mapping from an atom index to a list atom features.\r\n - f_bonds: A mapping from a bond index to a list of bond features.\r\n - a2b: A mapping from an atom index to a list of incoming bond indices.\r\n - b2a: A mapping from a bond index to the index of the atom the bond originates from.\r\n - b2revb: A mapping from a bond index to the index of the reverse bond.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n crystal_point: CrystalDatapoint,\r\n args: Namespace):\r\n \"\"\"\r\n Computes the graph structure and featurization of a molecule.\r\n\r\n :param crystal_point: a CrystalDatapoint object\r\n :param args: Arguments.\r\n \"\"\"\r\n self.name = crystal_point.name\r\n self.crystal = crystal_point.crystal\r\n self.n_atoms = len(self.crystal) # number of atoms\r\n self.n_bonds = 0 # number of bonds\r\n self.f_atoms = [] # mapping from atom index to atom features\r\n self.f_bonds = [] # mapping from bond index to concat(in_atom, bond) features\r\n self.a2b = [] # mapping from atom index to incoming bond indices\r\n self.b2a = [] # mapping from bond index to the index of the atom the bond is coming from\r\n self.b2revb = [] # mapping from bond index to the index of the reverse bond\r\n\r\n # Get atom features\r\n for _ in range(self.n_atoms):\r\n self.a2b.append([])\r\n self.f_atoms = crystal_point.atom_features\r\n\r\n # Get bond features\r\n for a1 in range(self.n_atoms):\r\n point_idxs = crystal_point.point_indices[a1, :]\r\n bond_features = crystal_point.bond_features[a1, :, :]\r\n\r\n for a2, bond_feature in zip(point_idxs, bond_features):\r\n if args.atom_messages:\r\n self.f_bonds.append(self.f_atoms[a1].tolist() + bond_feature.tolist())\r\n self.f_bonds.append(self.f_atoms[a2].tolist() + bond_feature.tolist())\r\n else:\r\n self.f_bonds.append(bond_feature.tolist())\r\n self.f_bonds.append(bond_feature.tolist())\r\n\r\n # Update index mappings\r\n b1 = self.n_bonds\r\n b2 = b1 + 1\r\n self.a2b[a2].append(b1) # b1 = a1 --> a2\r\n self.b2a.append(a1)\r\n self.a2b[a1].append(b2) # b2 = a2 --> a1\r\n self.b2a.append(a2)\r\n self.b2revb.append(b2)\r\n self.b2revb.append(b1)\r\n self.n_bonds += 2\r\n\r\n\r\nclass BatchMolGraph:\r\n \"\"\"\r\n A BatchMolGraph represents the graph structure and featurization of a batch of molecules.\r\n\r\n A BatchMolGraph contains the attributes of a MolGraph plus:\r\n - smiles_batch: A list of smiles strings.\r\n - n_mols: The number of molecules in the batch.\r\n - atom_fdim: The dimensionality of the atom features.\r\n - bond_fdim: The dimensionality of the bond features (technically the combined atom/bond features).\r\n - a_scope: A list of tuples indicating the start and end atom indices for each molecule.\r\n - b_scope: A list of tuples indicating the start and end bond indices for each molecule.\r\n - max_num_bonds: The maximum number of bonds neighboring an atom in this batch.\r\n - b2b: (Optional) A mapping from a bond index to incoming bond indices.\r\n - a2a: (Optional): A mapping from an atom index to neighboring atom indices.\r\n \"\"\"\r\n\r\n def __init__(self, mol_graphs: List[MolGraph], args: Namespace):\r\n\r\n # get feature dim\r\n self.atom_fdim = get_atom_fdim(args)\r\n self.bond_fdim = get_bond_fdim(args) + args.atom_messages * self.atom_fdim # * 2\r\n\r\n # Start n_atoms and n_bonds at 1 b/c zero padding\r\n self.n_atoms = 1 # number of atoms (start at 1 b/c need index 0 as padding)\r\n self.n_bonds = 1 # number of bonds (start at 1 b/c need index 0 as padding)\r\n self.a_scope = [] # list of tuples indicating (start_atom_index, num_atoms) for each molecule\r\n self.b_scope = [] # list of tuples indicating (start_bond_index, num_bonds) for each molecule\r\n\r\n # All start with zero padding so that indexing with zero padding returns zeros\r\n f_atoms = [[0] * self.atom_fdim] # atom features\r\n f_bonds = [[0] * self.bond_fdim] # combined atom/bond features\r\n a2b = [[]] # mapping from atom index to incoming bond indices\r\n b2a = [0] # mapping from bond index to the index of the atom the bond is coming from\r\n b2revb = [0] # mapping from bond index to the index of the reverse bond\r\n\r\n for mol_graph in mol_graphs:\r\n f_atoms.extend(mol_graph.f_atoms)\r\n f_bonds.extend(mol_graph.f_bonds)\r\n\r\n for a in range(mol_graph.n_atoms):\r\n a2b.append([b + self.n_bonds for b in mol_graph.a2b[a]]) # if b != -1 else 0\r\n\r\n for b in range(mol_graph.n_bonds):\r\n b2a.append(self.n_atoms + mol_graph.b2a[b])\r\n b2revb.append(self.n_bonds + mol_graph.b2revb[b])\r\n\r\n self.a_scope.append((self.n_atoms, mol_graph.n_atoms))\r\n self.b_scope.append((self.n_bonds, mol_graph.n_bonds))\r\n self.n_atoms += mol_graph.n_atoms\r\n self.n_bonds += mol_graph.n_bonds\r\n\r\n # max with 1 to fix a crash in rare case of all single-heavy-atom mols\r\n self.max_num_bonds = max(1, max(len(in_bonds) for in_bonds in a2b))\r\n\r\n self.f_atoms = torch.FloatTensor(f_atoms)\r\n self.f_bonds = torch.FloatTensor(f_bonds)\r\n self.a2b = torch.LongTensor([a2b[a][:self.max_num_bonds] + [0] * (self.max_num_bonds - len(a2b[a])) for a in range(self.n_atoms)])\r\n self.b2a = torch.LongTensor(b2a)\r\n self.b2revb = torch.LongTensor(b2revb)\r\n self.b2b = None # try to avoid computing b2b b/c O(n_atoms^3)\r\n self.a2a = None # only needed if using atom messages\r\n\r\n def get_components(self) -> Tuple[torch.FloatTensor, torch.FloatTensor,\r\n torch.LongTensor, torch.LongTensor, torch.LongTensor,\r\n List[Tuple[int, int]], List[Tuple[int, int]]]:\r\n \"\"\"\r\n Returns the components of the BatchMolGraph.\r\n\r\n :return: A tuple containing PyTorch tensors with the atom features, bond features, and graph structure\r\n and two lists indicating the scope of the atoms and bonds (i.e. which molecules they belong to).\r\n \"\"\"\r\n return self.f_atoms, self.f_bonds, self.a2b, self.b2a, self.b2revb, self.a_scope, self.b_scope\r\n\r\n def get_b2b(self) -> torch.LongTensor:\r\n \"\"\"\r\n Computes (if necessary) and returns a mapping from each bond index to all the incoming bond indices.\r\n\r\n :return: A PyTorch tensor containing the mapping from each bond index to all the incoming bond indices.\r\n \"\"\"\r\n\r\n if self.b2b is None:\r\n b2b = self.a2b[self.b2a] # num_bonds x max_num_bonds\r\n # b2b includes reverse edge for each bond so need to mask out\r\n revmask = (b2b != self.b2revb.unsqueeze(1).repeat(1, b2b.size(1))).long() # num_bonds x max_num_bonds\r\n self.b2b = b2b * revmask\r\n\r\n return self.b2b\r\n\r\n def get_a2a(self) -> torch.LongTensor:\r\n \"\"\"\r\n Computes (if necessary) and returns a mapping from each atom index to all neighboring atom indices.\r\n\r\n :return: A PyTorch tensor containing the mapping from each bond index to all the incodming bond indices.\r\n \"\"\"\r\n if self.a2a is None:\r\n # b = a1 --> a2\r\n # a2b maps a2 to all incoming bonds b\r\n # b2a maps each bond b to the atom it comes from a1\r\n # thus b2a[a2b] maps atom a2 to neighboring atoms a1\r\n self.a2a = self.b2a[self.a2b] # num_atoms x max_num_bonds\r\n\r\n return self.a2a\r\n\r\n\r\ndef mol2graph(crystal_batch: CrystalDataset, args: Namespace) -> BatchMolGraph:\r\n \"\"\"\r\n Converts a list of SMILES strings to a BatchMolGraph containing the batch of molecular graphs.\r\n\r\n :param crystal_batch: a list of CrystalDataset\r\n :param args: Arguments.\r\n :return: A BatchMolGraph containing the combined molecular graph for the molecules\r\n \"\"\"\r\n crystal_graphs = list()\r\n for crystal_point in crystal_batch:\r\n if crystal_point in CRYSTAL_TO_GRAPH.keys():\r\n crystal_graph = CRYSTAL_TO_GRAPH[crystal_point]\r\n else:\r\n crystal_graph = MolGraph(crystal_point, args)\r\n if not args.no_cache and len(CRYSTAL_TO_GRAPH) <= 10000:\r\n CRYSTAL_TO_GRAPH[crystal_point] = crystal_graph\r\n crystal_graphs.append(crystal_graph)\r\n\r\n return BatchMolGraph(crystal_graphs, args)\r\n", "id": "3207356", "language": "Python", "matching_score": 1.6727298498153687, "max_stars_count": 8, "path": "crystalnet/features/featurization.py" }, { "content": "from ccdc.descriptors import MolecularDescriptors as MD, GeometricDescriptors as GD\nfrom ccdc.io import EntryReader\ncsd = EntryReader('CSD')\nimport ccdc.molecule\nimport sys\nimport os\nimport numpy as np\nimport math\n\nimport pickle \nfrom script.get_atom_features import get_atom_features\nfrom script.get_bond_features import get_bond_features\nfrom script.remove_waters import remove_waters, remove_single_oxygen, get_largest_components\n\nmol_name = sys.argv[1]\nmol = csd.molecule(mol_name)\n\nmol = remove_waters(mol)\nmol = remove_single_oxygen(mol)\nif len(mol.components) > 1:\n lg_id = get_largest_components(mol)\n mol = mol.components[lg_id]\n\nmol.remove_hydrogens()\n\natom_features = get_atom_features(mol)\nbond_features = get_bond_features(mol)\n\nmol_features = [atom_features, bond_features]\n\nsave_path = './processed/' + mol_name + '.p'\n\nif not os.path.exists(save_path):\n pickle.dump(mol_features,open(save_path, \"wb\"))\n \n \n", "id": "2159976", "language": "Python", "matching_score": 4.810291290283203, "max_stars_count": 0, "path": "process/prepare_mof_features.py" }, { "content": "from ccdc.descriptors import MolecularDescriptors as MD, GeometricDescriptors as GD\nfrom ccdc.io import EntryReader\ncsd = EntryReader('CSD')\nimport ccdc.molecule\nimport sys\nimport os\nimport numpy as np\nimport math\n\nimport pickle \nfrom tools.get_atom_features import get_atom_features\nfrom tools.get_bond_features import get_bond_features\nfrom tools.remove_waters import remove_waters, remove_single_oxygen, get_largest_components\nimport numpy as np\nfrom sklearn.metrics import pairwise_distances\n\nmol_name = sys.argv[1]\nmol = csd.molecule(mol_name)\n\n\n# remove waters\nmol = remove_waters(mol)\nmol = remove_single_oxygen(mol)\n\n# remove other solvates, here we remove all small components.\n\nif len(mol.components) > 1:\n lg_id = get_largest_components(mol)\n mol = mol.components[lg_id]\n\nmol.remove_hydrogens()\n\natom_features = np.array([get_atom_features(atom) for atom in mol.atoms])\nbond_matrix = get_bond_features(mol)\n\npos_matrix = np.array([[atom.coordinates.x, atom.coordinates.y, atom.coordinates.z] for atom in mol.atoms])\ndist_matrix = pairwise_distances(pos_matrix)\n\nmol_features = [atom_features, bond_matrix, dist_matrix]\n\nsave_path = '../data/processed/' + mol_name + '.p'\n\nif not os.path.exists(save_path):\n pickle.dump(mol_features,open(save_path, \"wb\"))\n \n \n", "id": "1241739", "language": "Python", "matching_score": 5.729019641876221, "max_stars_count": 0, "path": "process/process_csd_data.py" }, { "content": "from ccdc.descriptors import MolecularDescriptors as MD, GeometricDescriptors as GD\nfrom ccdc.io import EntryReader\ncsd = EntryReader('CSD')\nimport ccdc.molecule\nimport sys\nimport os\nimport numpy as np\nimport math\n\nimport pickle \nfrom tools.get_atom_features import get_atom_features\nfrom tools.get_bond_features import get_bond_features_en\nfrom tools.remove_waters import remove_waters, remove_single_oxygen, get_largest_components\nimport numpy as np\n\nmol_name = sys.argv[1]\nmol = csd.molecule(mol_name)\n\n\n# remove waters\nmol = remove_waters(mol)\nmol = remove_single_oxygen(mol)\n\n# remove other solvates, here we remove all small components.\n\nif len(mol.components) > 1:\n lg_id = get_largest_components(mol)\n mol = mol.components[lg_id]\n\nmol.remove_hydrogens()\n\natom_features = np.array([get_atom_features(atom) for atom in mol.atoms])\nrow, col = get_bond_features_en(mol)\n\npos_matrix = np.array([[atom.coordinates.x, atom.coordinates.y, atom.coordinates.z] for atom in mol.atoms])\n\nmol_features = [atom_features, row, col, pos_matrix]\n\nsave_path = '../data/processed_en/' + mol_name + '.p'\n\nos.makedirs('../data/processed_en/', exist_ok=True)\n\nif not os.path.exists(save_path):\n pickle.dump(mol_features,open(save_path, \"wb\"))\n \n \n", "id": "2231660", "language": "Python", "matching_score": 0.7285513281822205, "max_stars_count": 0, "path": "process/process_csd_data_baselines.py" }, { "content": "import numpy as np\r\n\r\ndef get_atom_features(atom):\r\n attributes = []\r\n attributes += one_hot_vector(\r\n atom.atomic_number,\r\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, \\\r\n 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 19, 30, \\\r\n 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, \\\r\n 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, \\\r\n 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, \\\r\n 73, 74, 75, 76, 77, 78, 79, 80, 81, 999]\r\n )\r\n # Connected numbers\r\n attributes += one_hot_vector(\r\n len(atom.neighbours),\r\n [0, 1, 2, 3, 4, 5, 6, 999]\r\n )\r\n\r\n # Test whether or not the atom is a hydrogen bond acceptor\r\n attributes.append(atom.is_acceptor)\r\n attributes.append(atom.is_chiral)\r\n\r\n # Test whether the atom is part of a ring system.\r\n attributes.append(atom.is_cyclic)\r\n attributes.append(atom.is_metal)\r\n\r\n # Test Whether this is a spiro atom.\r\n attributes.append(atom.is_spiro)\r\n\r\n return np.array(list(attributes), dtype=np.float32)\r\n\r\ndef one_hot_vector(val, lst):\r\n \"\"\"Converts a value to a one-hot vector based on options in lst\"\"\"\r\n if val not in lst:\r\n val = lst[-1]\r\n return map(lambda x: x == val, lst)\r\n", "id": "12236277", "language": "Python", "matching_score": 0.9583180546760559, "max_stars_count": 0, "path": "process/tools/get_atom_features.py" }, { "content": "import numpy as np\r\nimport math\r\n\r\ndef get_bond_features(mol):\r\n \"\"\"Calculate bond features.\r\n\r\n Args:\r\n mol (ccdc.molecule.bond): An CSD mol object.\r\n\r\n Returns:\r\n bond matriax.\r\n bond distance.\r\n \"\"\"\r\n adj_matrix = np.eye(len(mol.atoms))\r\n dis_matrix = []\r\n\r\n for bond in mol.bonds:\r\n atom1,atom2 = bond.atoms\r\n # construct atom matrix.\r\n adj_matrix[atom1.index, atom2.index] = adj_matrix[atom2.index, atom1.index] = 1\r\n\r\n # calculate bond distance.\r\n #print(atom1,atom2)\r\n #a_array = [atom1.coordinates.x, atom1.coordinates.y, atom1.coordinates.z]\r\n #b_array = [atom2.coordinates.x, atom2.coordinates.y, atom2.coordinates.z]\r\n #bond_length = calc_distance(a_array, b_array)\r\n #dis_matrix.append(bond_length)\r\n \r\n return adj_matrix\r\n\r\ndef get_bond_features_en(mol):\r\n \"\"\"Calculate bond features.\r\n\r\n Args:\r\n mol (ccdc.molecule.bond): An CSD mol object.\r\n\r\n Returns:\r\n bond matriax (coo).\r\n \"\"\"\r\n row, col = [], []\r\n\r\n for bond in mol.bonds:\r\n atom1,atom2 = bond.atoms\r\n # construct atom matrix.\r\n row.append(atom1.index)\r\n col.append(atom2.index)\r\n row.append(atom2.index)\r\n col.append(atom1.index)\r\n \r\n return row, col\r\n \r\n# function to obtain bond distance\r\ndef calc_distance(a_array, b_array):\r\n delt_d = np.array(a_array) - np.array(b_array)\r\n distance = math.sqrt(delt_d[0]**2 + delt_d[1]**2 + delt_d[2]**2)\r\n return round(distance,3)\r\n \r\n\r\n", "id": "2880147", "language": "Python", "matching_score": 0.9271413087844849, "max_stars_count": 0, "path": "process/tools/get_bond_features.py" }, { "content": "import ccdc.molecule\n\ndef get_largest_components(m):\n s = []\n for c in m.components:\n n = len(c.atoms)\n id_n = int(str(c.identifier))\n l = [(n, id_n)]\n s.append(l)\n t = sorted(s, key=lambda k: k[0])\n largest_id = t[-1][0][1] - 1\n\n return largest_id\n\ndef remove_waters(m):\n keep = []\n waters = 0\n for s in m.components:\n ats = [at.atomic_symbol for at in s.atoms]\n if len(ats) == 3:\n ats.sort()\n if ats[0] == 'H' and ats[1] == 'H' and ats[2] == 'O':\n waters += 1\n else:\n keep.append(s)\n else:\n keep.append(s)\n new = ccdc.molecule.Molecule(m.identifier)\n for k in keep:\n new.add_molecule(k)\n return new\n\ndef remove_single_oxygen(m):\n keep = []\n waters = 0\n for s in m.components:\n ats = [at.atomic_symbol for at in s.atoms]\n if len(ats) == 1:\n ats.sort()\n if ats[0] == 'O':\n waters += 1\n else:\n keep.append(s)\n else:\n keep.append(s)\n new = ccdc.molecule.Molecule(m.identifier)\n for k in keep:\n new.add_molecule(k)\n return new\n", "id": "2805091", "language": "Python", "matching_score": 0.21450676023960114, "max_stars_count": 0, "path": "process/tools/remove_waters.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom collections import defaultdict\r\nfrom functools import lru_cache\r\nfrom decimal import Decimal\r\nimport networkx as nx\r\n\r\nimport logging\r\n\r\nlogging.basicConfig(level=logging.INFO,\r\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass FloatRange:\r\n def __init__(self, start, end, step):\r\n self.s = Decimal(str(start))\r\n self.e = Decimal(str(end))\r\n self.step = Decimal(str(step))\r\n\r\n def __iter__(self):\r\n if self.s < self.e:\r\n pop_item = self.s\r\n while pop_item < self.e:\r\n yield float(pop_item)\r\n pop_item += self.step\r\n elif self.s == self.e:\r\n yield self.s\r\n else:\r\n pop_item = self.s\r\n while pop_item > self.e:\r\n yield float(pop_item)\r\n pop_item -= self.step\r\n\r\n\r\nclass MergeNestedLst:\r\n def __init__(self, nested_list):\r\n self._nl = nested_list\r\n self._sdt = self._start_a_dict()\r\n self._stl = self._start_a_lst()\r\n\r\n def _start_a_dict(self):\r\n dt = defaultdict(set)\r\n for i in self._nl:\r\n dt[i[0]] = dt[i[0]].union(set(i[1:]))\r\n\r\n return dt\r\n\r\n def _start_a_lst(self):\r\n stl = []\r\n for k, v in self._sdt.items():\r\n if k not in v:\r\n v.add(k)\r\n stl.append(list(v))\r\n\r\n return stl\r\n\r\n @lru_cache()\r\n def merge(self):\r\n G = nx.Graph()\r\n G.add_nodes_from(sum(self._stl, []))\r\n q = [[(s[i], s[i+1]) for i in range(len(s) - 1)] for s in self._stl]\r\n for i in q:\r\n G.add_edges_from(i)\r\n return sorted([list(i) for i in nx.connected_components(G)])\r\n\r\n\r\nif __name__ == '__main__':\r\n pass\r\n\r\n", "id": "10056648", "language": "Python", "matching_score": 0.9359731674194336, "max_stars_count": 0, "path": "mat2d_pkg/utils.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom collections.abc import Iterable\r\nfrom functools import lru_cache\r\n# from concurrent.futures.thread import ThreadPoolExecutor\r\n# from concurrent.futures import as_completed\r\n\r\nfrom mat2d_pkg.para import settings\r\nfrom mat2d_pkg.utils import MergeNestedLst\r\n\r\nimport logging\r\nlogging.basicConfig(level=logging.INFO,\r\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass Bonded:\r\n def __init__(self, elements_symbol, distance_array, atomic_position, bonding_scheme=None, tolerance=None):\r\n self._es = elements_symbol\r\n self._da = distance_array\r\n self._ap = atomic_position\r\n self._bs = bonding_scheme\r\n self.tolerance = tolerance\r\n\r\n @property\r\n def uniq_symbol(self):\r\n us = []\r\n for i in self._es:\r\n if i not in us:\r\n us.append(i)\r\n return us\r\n\r\n @property\r\n def elements_radii(self):\r\n erd = {}\r\n for ele in self.uniq_symbol:\r\n erd.update(\r\n {\r\n ele: settings.get(self._bs).get(ele)\r\n }\r\n )\r\n return erd\r\n\r\n def _scheme(self, tolerance):\r\n frame_dim, _ = self._da.shape\r\n bonding_pair = []\r\n\r\n for i in range(frame_dim):\r\n atom_a_radii = self.elements_radii.get(self._es[i])\r\n for j in range(frame_dim):\r\n if j <= i:\r\n continue\r\n atom_b_radii = self.elements_radii.get(self._es[j])\r\n bonding = self._bonding_standard(ra=atom_a_radii,\r\n rb=atom_b_radii,\r\n distance=self._da[i][j],\r\n delta=tolerance)\r\n\r\n if bonding:\r\n bonding_pair.append([i, j])\r\n return {\r\n tolerance: bonding_pair\r\n }\r\n\r\n def _bonding_standard(self, ra, rb, distance, delta):\r\n try:\r\n if 'vdw' in self._bs.lower():\r\n return bool(distance < ra + rb - delta)\r\n return bool(distance < ra + rb + delta)\r\n except TypeError:\r\n raise Exception(\"This radii table may not contain this element, please check\")\r\n\r\n def get_bonding_pair(self):\r\n if not isinstance(self.tolerance, Iterable):\r\n return self._scheme(self.tolerance)\r\n\r\n total_bonding_type = {}\r\n for i in self.tolerance:\r\n total_bonding_type.update(self._scheme(i))\r\n # ##########################################\r\n # the competition between threads causes slower calculations\r\n # with ThreadPoolExecutor(max_workers=5) as t:\r\n # bd_obj_lst = []\r\n # for i in self.tolerance:\r\n # bd_obj_lst.append(t.submit(self._scheme, i))\r\n # for f in as_completed(bd_obj_lst):\r\n # data = f.result()\r\n # total_bonding_type.update(data)\r\n\r\n return total_bonding_type\r\n\r\n @lru_cache()\r\n def get_cluster(self):\r\n bonding_pair = self.get_bonding_pair()\r\n cluster = {}\r\n # cluster_thread_lst = []\r\n # delta = list(bonding_pair.keys())\r\n # executor = ThreadPoolExecutor(max_workers=5)\r\n # for _, v in bonding_pair.items():\r\n # cluster_thread_lst.append(executor.submit(Cluster(v).get_cluster_list, ))\r\n # for index, q in enumerate(as_completed(cluster_thread_lst)):\r\n # cluster.update({\r\n # delta[index]: q.result()\r\n # })\r\n for i, v in bonding_pair.items():\r\n cluster.update({\r\n i: Cluster(v).get_cluster_list()\r\n })\r\n\r\n return cluster\r\n\r\n def get_cluster_number(self):\r\n return dict(zip(self.get_cluster().keys(),\r\n [len(i) for i in self.get_cluster().values()]))\r\n\r\n @lru_cache()\r\n def get_atoms_number_in_cluster(self):\r\n delta = list(self.get_cluster().keys())\r\n num = {}\r\n for k, i in enumerate(self.get_cluster().values()):\r\n n = []\r\n for j in i:\r\n n.append(len(j))\r\n if not n:\r\n continue\r\n\r\n num.update(\r\n {\r\n delta[k]: {\"max\": max(n),\r\n \"min\": min(n),\r\n \"number\": n}\r\n }\r\n )\r\n return num\r\n\r\n\r\nclass Cluster:\r\n def __init__(self, bonding_pair_lst):\r\n \"\"\"\r\n :param bonding_pair_lst: A list of bonded atoms-> [[a, b],[a, c]]\r\n \"\"\"\r\n self.bpl = bonding_pair_lst\r\n\r\n def _sort(self):\r\n \"\"\"\r\n :return: make sure all atoms pair in order\r\n \"\"\"\r\n return sorted(self.bpl, key=lambda x: x[0])\r\n\r\n @lru_cache()\r\n def get_cluster_list(self):\r\n return MergeNestedLst(self._sort()).merge()\r\n\r\n @property\r\n def cluster_number(self):\r\n return len(self.get_cluster_list())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n", "id": "10459252", "language": "Python", "matching_score": 2.942423105239868, "max_stars_count": 0, "path": "mat2d_pkg/atoms.py" }, { "content": "#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\nimport os\r\nimport math\r\nfrom os.path import *\r\nfrom collections.abc import Iterable\r\nfrom functools import lru_cache\r\nfrom pymatgen.io.cif import CifWriter\r\n\r\nfrom mat2d_pkg.atoms import Bonded\r\nfrom mat2d_pkg.mat import *\r\nfrom mat2d_pkg.utils import FloatRange\r\nimport logging\r\n\r\nlogging.basicConfig(level=logging.INFO,\r\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\nlogger = logging.getLogger(__name__)\r\n\r\nnp.set_printoptions(precision=4, suppress=True)\r\n\r\n\r\nclass Layer:\r\n def __init__(self, file_path, box_scale=None, bond_type=None, tolerance=None):\r\n self.ofn = basename(file_path)\r\n if box_scale is None:\r\n box_scale = [2, 2, 2]\r\n self._bx = box_scale\r\n self._cell = Cell(struct_file=file_path, supercell_scale=box_scale)\r\n if bond_type is None:\r\n bond_type = 'vdw_radii'\r\n if isinstance(tolerance, Iterable):\r\n try:\r\n tolerance = list(FloatRange(*tolerance))\r\n except TypeError:\r\n raise Exception(\"If the threshold is a range,\"\r\n \" the format must be (start, end, step)\")\r\n elif tolerance is None:\r\n tolerance = list(FloatRange(1.1, 1.6, 0.1))\r\n else:\r\n tolerance = [tolerance, ]\r\n\r\n self.bt = bond_type\r\n self.tc = tolerance\r\n if isinstance(self.tc, Iterable):\r\n self.long = len(tolerance)\r\n else:\r\n self.long = 1\r\n\r\n @property\r\n def atoms_number(self):\r\n return {\r\n \"primitive\": self._cell.primitive_atoms_number,\r\n \"supercell\": self._cell.supercell_atoms_number\r\n }\r\n\r\n @lru_cache()\r\n def _cluster_obj(self):\r\n return {\r\n \"primitive\": Bonded(elements_symbol=self._cell.primitive_atomic_symbol,\r\n distance_array=self._cell.get_distance_between_atoms_in_primitive(),\r\n atomic_position=self._cell.primitive_atoms_coords,\r\n bonding_scheme=self.bt,\r\n tolerance=self.tc),\r\n \"supercell\": Bonded(elements_symbol=self._cell.supercell_atomic_symbol,\r\n distance_array=self._cell.get_distance_between_atoms_in_supercell(),\r\n atomic_position=self._cell.supercell_atoms_coords,\r\n bonding_scheme=self.bt,\r\n tolerance=self.tc)\r\n }\r\n\r\n @property\r\n @lru_cache()\r\n def primitive_cluster(self):\r\n return self._cluster_obj().get(\"primitive\").get_cluster()\r\n\r\n @property\r\n @lru_cache()\r\n def supercell_cluster(self):\r\n return self._cluster_obj().get(\"supercell\").get_cluster()\r\n\r\n @property\r\n @lru_cache()\r\n def primitive_cluster_number(self):\r\n return self._cluster_obj().get(\"primitive\").get_cluster_number()\r\n\r\n @property\r\n @lru_cache()\r\n def supercell_cluster_number(self):\r\n return self._cluster_obj().get(\"supercell\").get_cluster_number()\r\n\r\n @property\r\n @lru_cache()\r\n def primitive_cluster_atoms_number(self):\r\n return self._cluster_obj().get(\"primitive\").get_atoms_number_in_cluster()\r\n\r\n @property\r\n @lru_cache()\r\n def supercell_cluster_atoms_number(self):\r\n return self._cluster_obj().get(\"supercell\").get_atoms_number_in_cluster()\r\n\r\n @staticmethod\r\n def _mat2d_parser(primitive_cell_max, primitive_cell_min, supercell_max, supercell_min, supercell_atoms_number):\r\n\r\n def mat_type_judge(ratio_number):\r\n return \"2D vdW solid\" \\\r\n if ratio_number == 4 else \"1D vdW solid\" \\\r\n if ratio_number == 2 else \"Intercalated 1D/2D\"\r\n\r\n if primitive_cell_min == 1:\r\n return \"Exclude! has unbonded atoms!\"\r\n ratio = supercell_max / primitive_cell_max\r\n if primitive_cell_min == supercell_min:\r\n return \"Exclude! has unbonded molecules!,\" \\\r\n \"Molecule solid: {}\".format(mat_type_judge(ratio))\r\n else:\r\n if supercell_max == supercell_atoms_number:\r\n return \"Exclude! 3D solid\"\r\n else:\r\n return mat_type_judge(ratio)\r\n\r\n def get_classification_results(self):\r\n supercell_atoms_number = self.atoms_number.get('supercell')\r\n cr = {}\r\n for i in range(self.long):\r\n pc_max, pc_min = list(map(self.primitive_cluster_atoms_number[self.tc[i]].get,\r\n ['max', 'min']))\r\n sc_max, sc_min = list(map(self.supercell_cluster_atoms_number[self.tc[i]].get,\r\n ['max', 'min']))\r\n mat_parser = self._mat2d_parser(pc_max, pc_min, sc_max, sc_min, supercell_atoms_number)\r\n\r\n cr.update({\r\n self.tc[i]: mat_parser\r\n })\r\n\r\n return cr\r\n\r\n @staticmethod\r\n def all(result_dict, mat_type):\r\n # '2D vdW solid'\r\n for _, values in result_dict.items():\r\n if values != mat_type:\r\n return False\r\n return True\r\n\r\n @staticmethod\r\n def any(result_dict, mat_type):\r\n # '2D vdW solid'\r\n for _, values in result_dict.items():\r\n if values == mat_type:\r\n return True\r\n return False\r\n\r\n @classmethod\r\n def judge_result(cls, result_dict):\r\n if cls.all(result_dict, '2D vdW solid'):\r\n return '2D_vdW_solid'\r\n if cls.all(result_dict, '1D vdW solid'):\r\n return '1D_vdW_solid'\r\n if cls.any(result_dict, '2D vdW solid') & cls.any(result_dict, '1D vdW solid'):\r\n return 'Weird_vdW_solid'\r\n if cls.any(result_dict, '2D vdW solid'):\r\n return '2D_vdW_candidate_solid'\r\n if cls.any(result_dict, '1D vdW solid'):\r\n return '1D_vdW_candidate_solid'\r\n\r\n return 'Exclude_mats'\r\n\r\n\r\nclass ConstructLayer(Layer):\r\n def __init__(self, file_path, box_scale, bond_type=None, tolerance=None):\r\n self.ofn = basename(file_path)\r\n if box_scale is None:\r\n box_scale = [3, 3, 3]\r\n self._bx = box_scale\r\n self._cell = Cell(struct_file=file_path, supercell_scale=box_scale)\r\n super(ConstructLayer, self).__init__(file_path, box_scale, bond_type, tolerance)\r\n self.d = self.get_direction()\r\n print(self.d)\r\n self.li = math.floor(self.get_specific_layer_number(0.45) / 2)\r\n\r\n @lru_cache()\r\n def get_cluster(self):\r\n return self.supercell_cluster\r\n\r\n def _run_cluster_loop(self, sort=True):\r\n all_cluster_obj = {}\r\n total_cluster = self.get_cluster()\r\n for index, cl in total_cluster.items():\r\n t = self._get_cluster_obj(index, cl, sort)\r\n all_cluster_obj.update(t)\r\n return all_cluster_obj\r\n\r\n def _get_cluster_obj(self, delta, cluster, sort=True):\r\n cluster_obj_lst = []\r\n for ic in cluster:\r\n cluster_coords_lst, cluster_symbol_lst = [], []\r\n for index in ic:\r\n cluster_coords_lst.append(self._cell.supercell_atoms_coords[index])\r\n cluster_symbol_lst.append(self._cell.supercell_atomic_symbol[index])\r\n ics = Structure(lattice=self._cell.supercell_lattice,\r\n coords=np.asarray(cluster_coords_lst),\r\n species=cluster_symbol_lst,\r\n coords_are_cartesian=True)\r\n cluster_obj_lst.append(ics)\r\n\r\n # if sort:\r\n # return {\r\n # delta: self._sort(cluster_obj_lst)\r\n # }\r\n\r\n return {\r\n delta: cluster_obj_lst\r\n }\r\n\r\n @property\r\n def cluster_dict(self, sort=True):\r\n return self._run_cluster_loop(sort)\r\n\r\n @lru_cache()\r\n def get_specific_cluster(self, tolerance):\r\n # from pprint import pprint\r\n # pprint(self.cluster_dict)\r\n return self.cluster_dict.get(tolerance)\r\n\r\n def _sort(self, cluster_obj_list):\r\n length = list(map(np.linalg.norm, self._cell.supercell_lattice))\r\n max_length_axis = length.index(max(length))\r\n\r\n return sorted(cluster_obj_list,\r\n key=lambda x: max(x.cart_coords[:, max_length_axis]))\r\n\r\n def __getitem__(self, item, tolerance):\r\n return self.get_specific_cluster(tolerance)[item]\r\n\r\n def get_specific_layer_number(self, tolerance):\r\n return len(self.get_specific_cluster(tolerance))\r\n\r\n def get_layer_cif(self, save_path, tolerance, layer_index=None, all_layer=False, centralize=False, vacuum=30):\r\n if not exists(save_path):\r\n os.makedirs(save_path)\r\n fn, _ = splitext(self.ofn)\r\n job_path = join(save_path, fn)\r\n layer_index = self.li\r\n if not exists(job_path):\r\n os.makedirs(job_path)\r\n try:\r\n if all_layer:\r\n if not centralize:\r\n for i, struct in enumerate(self.get_specific_cluster(tolerance)):\r\n des_fn = join(job_path, str(tolerance) + \"-{}-layer-{}.cif\".format(fn, i))\r\n self.get_cif_file(structure=struct, cif_name=des_fn)\r\n else:\r\n logger.warning(\"If choice all layer centralized, \"\r\n \"All cifs may be the same\")\r\n for i, struct in enumerate(self.get_specific_cluster(tolerance)):\r\n des_fn = join(job_path,\r\n str(tolerance) + \"-{}-centralized-{}.cif\".format(fn, i))\r\n self.get_cif_file(structure=self.centralize(self.d, struct, vacuum),\r\n cif_name=des_fn)\r\n\r\n else:\r\n if not centralize:\r\n des_fn = join(job_path,\r\n str(tolerance) + \"-{}-layer-{}.cif\".format(fn, layer_index))\r\n struct = self.get_specific_cluster(tolerance)[layer_index]\r\n\r\n else:\r\n des_fn = join(job_path,\r\n str(tolerance) + \"-{}-centralized-{}.cif\".format(fn, layer_index))\r\n struct = self.centralize(self.d, self.get_specific_cluster(tolerance)[layer_index], vacuum)\r\n self.get_cif_file(structure=struct, cif_name=des_fn)\r\n except TypeError:\r\n raise Exception(\"tolerance value not found !\")\r\n return\r\n\r\n @staticmethod\r\n def get_cif_file(structure, cif_name):\r\n return CifWriter(struct=structure).write_file(cif_name)\r\n\r\n @lru_cache()\r\n def get_layer(self, layer_index, tolerance, all_layer=False):\r\n layer_index = self.li\r\n if all_layer:\r\n return self.get_specific_cluster(tolerance)\r\n\r\n return self.get_specific_cluster(tolerance)[layer_index]\r\n\r\n def get_primitive_layered_mat(self, save_path,\r\n tolerance,\r\n layer_index,\r\n centralize=False, vacuum=30):\r\n layer_index = self.li\r\n one_layer = self.get_specific_cluster(tolerance)[layer_index]\r\n fn, suffix = splitext(basename(self.ofn))\r\n job_path = join(save_path, fn)\r\n if not exists(job_path):\r\n os.makedirs(job_path)\r\n pol = one_layer.get_primitive_structure()\r\n cn = '{}-layer-primitive-{}{}'.format(fn, layer_index, suffix)\r\n cif_name = join(job_path, cn)\r\n if centralize:\r\n pol = self.centralize(self.d, pol, vacuum).get_primitive_structure()\r\n cif_name = join(job_path,\r\n str(tolerance) + \"-centralized-\" + cn)\r\n return self.get_cif_file(structure=pol, cif_name=cif_name)\r\n\r\n @lru_cache()\r\n def get_direction(self):\r\n dft_cl = self.get_specific_cluster(tolerance=0.45)[0]\r\n tm = [[2, 1, 1], [1, 2, 1], [1, 1, 2]]\r\n count = 0\r\n cnl = []\r\n for i in tm:\r\n sc = SupercellTransformation(scaling_matrix=i).apply_transformation(structure=dft_cl)\r\n da = Bonded(Cell.get_atomic_number(sc.sites), sc.distance_matrix,\r\n sc.cart_coords, 'covalent_radii', 0.45)\r\n cn = da.get_cluster_number().get(0.45)\r\n cnl.append(cn)\r\n print(cnl)\r\n\r\n if cnl.count(2) > 1 or cnl.count(2) == 0:\r\n with open(r\"./Non_parallel.struct\", 'a+') as f:\r\n f.write(self.ofn + '\\n')\r\n return\r\n\r\n return cnl.index(2)\r\n @staticmethod\r\n def centralize(direction, struct, vacuum=30):\r\n a, b, c = [struct.lattice.a,\r\n struct.lattice.b,\r\n struct.lattice.c]\r\n lattice_array = np.asarray(struct.as_dict().get('lattice').get('matrix'))\r\n # print('vacuum will be build on {} axis'.format(['a', 'b', 'c'][direction]))\r\n # change lattice\r\n ml = [a, b, c][direction]\r\n ml_array = lattice_array[direction]\r\n positions = struct.cart_coords\r\n thickness = max(positions[:, direction]) - min(positions[:, direction])\r\n vector_scale = ((2 * vacuum) + thickness) / ml\r\n vacuum_vector = ml_array * vector_scale\r\n lattice_array[direction] = vacuum_vector\r\n # get atoms position\r\n # print(positions)\r\n # group_mid_point = positions.mean(axis=0)\r\n # print(mp)\r\n # vertical/ horizontal shift\r\n # looking for the center of the group\r\n # print(positions)\r\n gmp = []\r\n for val in positions.transpose():\r\n mid = ((max(val) - min(val)) / 2) + min(val)\r\n gmp.append(mid)\r\n group_mid_point = np.asarray(gmp)\r\n box_mid_point = lattice_array.sum(axis=0) / 2\r\n diff = group_mid_point - box_mid_point\r\n new_coords = []\r\n for index, coords in enumerate(positions):\r\n new_coord = coords - diff\r\n new_coords.append(new_coord)\r\n return Structure(coords=np.asarray(new_coords), species=struct.species, lattice=lattice_array,coords_are_cartesian=True)\r\n\r\nif __name__ == '__main__':\r\n pass\r\n", "id": "12569005", "language": "Python", "matching_score": 5.223748683929443, "max_stars_count": 0, "path": "mat2d_pkg/layer.py" }, { "content": "#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\n\r\nfrom pymatgen.core.structure import Structure\r\nfrom pymatgen.transformations.standard_transformations import SupercellTransformation\r\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\r\n\r\n\r\nclass Cell:\r\n def __init__(self, struct_file, supercell_scale=None):\r\n print(\"reading from cif: {}\".format(struct_file))\r\n self.struct_file = struct_file\r\n _cell = Structure.from_file(self.struct_file)\r\n self._cell = SpacegroupAnalyzer(_cell).get_conventional_standard_structure()\r\n if supercell_scale is None:\r\n self._scs = np.eye(3) * [2, 2, 2]\r\n elif np.asarray(supercell_scale).shape != (3, 3):\r\n self._scs = np.eye(3) * np.asarray(supercell_scale)\r\n\r\n @property\r\n def _primitive_cell(self):\r\n return self._cell.get_primitive_structure()\r\n\r\n @property\r\n def primitive_structure(self):\r\n return self._primitive_cell\r\n\r\n @property\r\n def primitive_atoms_coords(self):\r\n return self._primitive_cell.cart_coords\r\n\r\n @property\r\n def primitive_lattice(self):\r\n return np.asarray(self._primitive_cell.lattice.as_dict().get(\"matrix\"))\r\n\r\n @staticmethod\r\n def get_atomic_number(periodic_site_lst):\r\n psl = []\r\n for i in periodic_site_lst:\r\n psl.append(i.as_dict().get('species')[0].get('element'))\r\n\r\n return psl\r\n\r\n @property\r\n def primitive_atomic_number(self):\r\n return self._primitive_cell.atomic_numbers\r\n\r\n @property\r\n def primitive_atoms_number(self):\r\n return len(self.primitive_atomic_symbol)\r\n\r\n @property\r\n def primitive_atomic_symbol(self):\r\n return self.get_atomic_number(self._primitive_cell.sites)\r\n\r\n def get_distance_between_atoms_in_primitive(self):\r\n return self._primitive_cell.distance_matrix\r\n\r\n def _get_supercell(self):\r\n return SupercellTransformation(scaling_matrix=self._scs).apply_transformation(structure=self._primitive_cell)\r\n\r\n def __repr__(self):\r\n return \"{\\'Atoms\\': {supercell: %r}\\n{scale: %r}}\" \\\r\n % (self._get_supercell(), self._scs)\r\n\r\n @property\r\n def supercell_atoms_coords(self):\r\n return self._get_supercell().cart_coords\r\n\r\n @property\r\n def supercell_lattice(self):\r\n return self._get_supercell().lattice.as_dict().get(\"matrix\")\r\n\r\n @property\r\n def supercell_atomic_number(self):\r\n return self._get_supercell().atomic_numbers\r\n\r\n @property\r\n def supercell_atoms_number(self):\r\n return len(self.supercell_atomic_number)\r\n\r\n @property\r\n def supercell_atomic_symbol(self):\r\n return self.get_atomic_number(self._get_supercell().sites)\r\n\r\n def get_distance_between_atoms_in_supercell(self):\r\n return self._get_supercell().distance_matrix\r\n\r\n\r\nif __name__ == '__main__':\r\n pass\r\n", "id": "10516825", "language": "Python", "matching_score": 1.7608860731124878, "max_stars_count": 0, "path": "mat2d_pkg/mat.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nfrom os.path import *\r\nimport re\r\nimport json\r\nimport numpy as np\r\n\r\nfrom monty.re import reverse_readfile\r\nfrom pymatgen.analysis import dimensionality\r\nfrom pymatgen.core.structure import Structure\r\nfrom pymatgen.transformations.standard_transformations import SupercellTransformation\r\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\r\nfrom apkg.para import settings\r\n\r\n\r\ndef pull_output(pth):\r\n for root, _, files in os.walk(pth):\r\n if 'Scf' in root:\r\n for f in files:\r\n if f == 'OUTCAR':\r\n yield join(root, f)\r\n\r\n\r\ndef get_energy(ofp):\r\n for info in reverse_readfile(ofp):\r\n if 'energy without entropy' in info:\r\n energy = info.split('=')[-1]\r\n return energy\r\n\r\n return None\r\n\r\n\r\ndef get_id(ofp):\r\n rdn = ofp.split(os.sep)[-3]\r\n code = re.findall(r\"(\\d+)\", rdn)\r\n return code[0]\r\n\r\n\r\ndef get_layer(struct):\r\n cc = SpacegroupAnalyzer(struct).get_conventional_standard_structure()\r\n ca = dimensionality.find_connected_atoms(cc, tolerance=0.45,\r\n ldict=settings['covalent_radii'])\r\n cluster = dimensionality.find_clusters(cc, ca)\r\n return cluster\r\n\r\n\r\ndef get_direction_from(fp, code, tp='icsd'):\r\n with open(r\"/HOME/nscc-gz_material_1/matgen_dft/mat2d_work/scripts/analysis/2danalysis/apkg/bulk_info.json\", \"r\") as f:\r\n bulk_data = json.load(f)\r\n\r\n key = tp+ '_' + str(code)\r\n sin = bulk_data.get(key) \r\n print(key)\r\n print(sin)\r\n return sin\r\n\r\ndef get_direction(bulk_struct):\r\n cc = SpacegroupAnalyzer(bulk_struct).get_conventional_standard_structure()\r\n _, _, init_cc = get_layer(cc)\r\n inc = len(init_cc)\r\n tm = [[2, 1, 1], [1, 2, 1], [1, 1, 2]]\r\n cnl = []\r\n for i in tm:\r\n sc = SupercellTransformation(scaling_matrix=i).apply_transformation(cc)\r\n ca = dimensionality.find_connected_atoms(sc, tolerance=0.45,\r\n ldict=settings['covalent_radii'])\r\n _, _, cluster = dimensionality.find_clusters(sc, ca)\r\n cnl.append(len(cluster))\r\n for i, v in enumerate(cnl):\r\n if v / inc == 2:\r\n return i\r\n\r\ndef get_polygon_area_in_plane(struct, direction):\r\n ps = [(1, 2), (0, 2), (0, 1)]\r\n index = ps[direction]\r\n la = list(map(np.asarray, struct.lattice.as_dict().get('matrix')))\r\n area = np.linalg.norm(np.cross(la[index[0]], la[index[1]]))\r\n return area\r\n\r\n\r\ndef run_layer(pth):\r\n result = {}\r\n count = 0\r\n for job in pull_output(pth):\r\n #print(job)\r\n egy = get_energy(job)\r\n code = get_id(job)\r\n dn = dirname(job)\r\n bdn = dirname(dn)\r\n poscar = join(bdn, 'Test_spin')\r\n sf = join(poscar, 'POSCAR')\r\n struct = Structure.from_file(sf)\r\n ln = get_layer(struct)\r\n if ln == [0, 1, 0]:\r\n with open(r\"./ln_false.log\", 'a+') as f:\r\n f.write(sf + '\\n')\r\n continue\r\n _, _, nn = ln\r\n #ln = 1\r\n direction = get_direction(struct)\r\n #direction = get_direction_from(sf, code)\r\n #{code: {'energy': float(egy.strip(' ')), 'direction': direction, 'ln': len(nn)}}\r\n #{code: {'energy': float(egy.strip(' ')), 'direction': direction['dn'], 'ln': direction['ln']}}\r\n result.update(\r\n {code: {'energy': float(egy.strip(' ')), 'direction': direction, 'ln': len(nn)}}\r\n )\r\n count += 1\r\n print('total: ', count)\r\n\r\n return result\r\n\r\n\r\ndef get_bulk_direction(code, dd):\r\n apply = dd.get(code)\r\n if apply is not None:\r\n return dd.get(code).get('direction')\r\n return\r\n\r\n\r\ndef run_bulk(pth, lsh):\r\n result = {}\r\n with open(lsh, 'r') as f:\r\n dd = json.load(f)\r\n for job in pull_output(pth):\r\n #print(job)\r\n dn = dirname(job)\r\n sf = join(dn, 'CONTCAR')\r\n struct = Structure.from_file(sf)\r\n egy = get_energy(job)\r\n code = get_id(job)\r\n ln = get_layer(struct)\r\n if ln == [0, 1, 0]:\r\n with open(r\"./ln_false.log\", 'a+') as f:\r\n f.write(sf + '\\n')\r\n continue\r\n else:\r\n _, _, nn = ln\r\n n = len(nn)\r\n direction = get_bulk_direction(code, dd)\r\n if direction is None:\r\n continue\r\n plane_area = get_polygon_area_in_plane(struct, direction)\r\n result.update(\r\n {code: {'energy': float(egy.strip(' ')), 'ln': n, 'area': plane_area}}\r\n )\r\n\r\n return result\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n\r\n", "id": "4776087", "language": "Python", "matching_score": 1.2967684268951416, "max_stars_count": 0, "path": "apkg/energy.py" }, { "content": "\"\"\"\r\nThis is a simple model database.\r\n\r\nCreated on Tue May 1 22:36:18 2018\r\n\r\n@author: <NAME>\r\n\"\"\"\r\n\r\ndef graphene():\r\n import numpy as np\r\n from ase import Atoms\r\n p = np.array([[0, 0, 0],\r\n [0.0, 0.0, 3.35],\r\n [0.0, 2.84, 0.0],\r\n [0.0, 1.42, 3.35]])\r\n c = np.array([[1.23, 2.13, 0.0],\r\n [-1.23, 2.13, 0.0],\r\n [0, 0, 6.7]])\r\n print(c)\r\n return Atoms('4C',positions=p, cell=c, pbc=(1,1,1))\r\n\r\ndef magic_angle_graphene(index = 0):\r\n index = index\r\n lattice_a0 = 2.46\r\n lattice_c0 = 3.35\r\n####################################################################################################\r\n########## Calculate length, angle,number of atoms \r\n#################################################################################################### \r\n import math\r\n cos_cta = (int(index)**2 * 3 + 3 * int(index) + 0.5) / (int(index)**2 * 3 + int(index)*3+1)\r\n angle_deg = math.degrees(math.acos(cos_cta))\r\n superlattice_length = math.sqrt(int(index)**2 * 3 + int(index)*3+1) * lattice_a0\r\n natom = 4 *(3 * index ** 2 + 3 * index + 1)\r\n print('#'*70)\r\n print('-'*5,'Index', '-'*5,'Length','-'*13,'Angle(degree)','-'*6,'Atoms')\r\n print('-'*5,index,'-'*6,superlattice_length,'-'*3,angle_deg,'-'*3,natom)\r\n print('#'*70)\r\n print('\\n\\n')\r\n \r\n####################################################################################################\r\n########## Create unit cells of two layers\r\n####################################################################################################\r\n import numpy as np\r\n from ase import Atoms\r\n \r\n pos_layer1_unit = np.array([[0, 0, 0],\r\n [0.0, 2.84, 0.0]])\r\n pos_layer2_unit = np.array([[0.0, 0.0, 3.35],\r\n [0.0, 1.42, 3.35]])\r\n cell_unit = np.array([[-1/2 * lattice_a0, math.sqrt(3)/2 * lattice_a0, 0],\r\n [1/2 * lattice_a0, math.sqrt(3)/2 * lattice_a0, 0], \r\n [0.0,0.0,lattice_c0 * 2]])\r\n\r\n layer1_unit = Atoms('2C',positions=pos_layer1_unit, cell=cell_unit, pbc=[1,1,0])\r\n layer2_unit = Atoms('2C',positions=pos_layer2_unit, cell=cell_unit, pbc=[1,1,0])\r\n\r\n####################################################################################################\r\n########## Create super cells of two layers\r\n####################################################################################################\r\n from ase.build import make_supercell\r\n cell_super_layer1 = np.array([[index, index + 1, 0],\r\n [-(index + 1),(2 * index + 1),0],\r\n [0,0,1]]) \r\n cell_super_layer2 = np.array([[index + 1, index, 0],\r\n [-index ,(2 * index + 1),0],\r\n [0,0,1]])\r\n\r\n super_layer1 = make_supercell(layer1_unit,cell_super_layer1)\r\n super_layer2 = make_supercell(layer2_unit,cell_super_layer2)\r\n \r\n####################################################################################################\r\n########## Create magic angle twisted bilayer graphene\r\n#################################################################################################### \r\n \r\n mat_cell_unit = np.mat(cell_unit)\r\n\r\n mat_cell_super_layer1 = np.mat(cell_super_layer1) * mat_cell_unit\r\n mat_cell_super_layer2 = np.mat(cell_super_layer2) * mat_cell_unit\r\n\r\n cell_super = np.array([[-0.5 * superlattice_length,math.sqrt(3)/2 * superlattice_length,0],\r\n [0.5 * superlattice_length, math.sqrt(3)/2 * superlattice_length,0],\r\n [0,0,6.7]])\r\n\r\n mat_cell_super = np.mat(cell_super)\r\n mat_pos_layer1_super = np.mat(super_layer1.get_positions())\r\n mat_pos_layer2_super = np.mat(super_layer2.get_positions())\r\n\r\n mat_pos_layer1_super_newcell = mat_pos_layer1_super * mat_cell_super_layer1.I *mat_cell_super\r\n mat_pos_layer2_super_newcell = mat_pos_layer2_super * mat_cell_super_layer2.I *mat_cell_super\r\n\r\n super_layer1.set_cell(np.array(mat_cell_super))\r\n super_layer1.set_positions(np.array(mat_pos_layer1_super_newcell))\r\n super_layer2.set_cell(np.array(mat_cell_super))\r\n super_layer2.set_positions(np.array(mat_pos_layer2_super_newcell))\r\n\r\n interface = super_layer1.copy()\r\n interface.extend(super_layer2)\r\n interface.center(vacuum = 20,axis = 2)\r\n\r\n return interface\r\n \r\n\r\ndef magic_angle_graphene_cluster(index = 1,corner=0):\r\n n = index\r\n corner = corner\r\n c = 1.59\r\n \r\n from ase import Atoms,Atom\r\n####################################################################################################\r\n########## Create unit cell\r\n#################################################################################################### \r\n graphene_unit = Atoms('4C',positions=[[0.0,0.0,0.0],\r\n [0.0,0.0,3.35],\r\n [0.0,1.42,0.0],\r\n [0.0,1.42,3.35]],\r\n cell = [[1.23,2.13,0.0],\r\n [-1.23,2.13,0.0],\r\n [0.0,0.0,6.7]],\r\n pbc=(1,1,0))\r\n \r\n\r\n####################################################################################################\r\n########## Create super cell\r\n#################################################################################################### \r\n graphen_natom_C = 12 * n * n\r\n graphen_natom_H = 12 * n\r\n graphen_cell = [2.46*(2*n-1)+15,0.71*(6*n-4)+15+2*c,21]\r\n graphen_cell_center=((2.46*(2*n-1)+15)*0.5,(0.71*(6*n-4)+15+2*c)*0.5,12.175)\r\n \r\n graphene_super = graphene_unit.repeat((2*n+1,2*n+1,1))\r\n #view(graphene_super)\r\n \r\n print(' Number of carbon: ',graphen_natom_C,'\\n',\r\n 'Number of hydrogen: ', graphen_natom_H,'\\n',\r\n 'Number of atoms: ',graphen_natom_C+graphen_natom_H)\r\n \r\n####################################################################################################\r\n########## Create cluster without passivation\r\n#################################################################################################### \r\n graphen_cluster=Atoms()\r\n for atom in graphene_super:\r\n if atom.x >(-1.23*(2*n-1)-0.01) and atom.x <(1.23*(2*n-1)+0.01):\r\n if atom.y >(0.71*(3*n+4)-0.01) and atom.y <(0.71*(9*n+4)+0.01):\r\n line1 = 4.23+2.13/1.23*atom.x-atom.y\r\n line2 = 4.23-2.13/1.23*atom.x-atom.y\r\n if line1 < 0 and line2 < 0:\r\n graphen_cluster.append(atom)\r\n\r\n####################################################################################################\r\n########## Create passivation\r\n#################################################################################################### \r\n graphen_passivation=Atoms()\r\n for atom in graphen_cluster:\r\n if atom.y< 2.13/1.23*atom.x+4.26+0.02:\r\n graphen_passivation.append(Atom('H',\r\n position=(atom.x+c*0.866,atom.y-c*0.5,atom.z)))\r\n elif atom.y<-2.13/1.23*atom.x+4.26+0.02:\r\n graphen_passivation.append(Atom('H',\r\n position=(atom.x-c*0.866,atom.y-c*0.5,atom.z)))\r\n elif atom.y>2.13/1.23*atom.x+4.26+0.71*(12*n-4)-0.02:\r\n graphen_passivation.append(Atom('H',\r\n position=(atom.x-c*0.866,atom.y+c*0.5,atom.z)))\r\n elif atom.y>-2.13/1.23*atom.x+4.26+0.71*(12*n-4)-0.02:\r\n graphen_passivation.append(Atom('H',\r\n position=(atom.x+c*0.866,atom.y+c*0.5,atom.z)))\r\n elif atom.y<0.71*(3*n+5)+0.02:\r\n graphen_passivation.append(Atom('H',\r\n position=(atom.x,atom.y-c,atom.z)))\r\n elif atom.y>0.71*(9*n+3)-0.02:\r\n graphen_passivation.append(Atom('H',\r\n position=(atom.x,atom.y+c,atom.z)))\r\n \r\n graphen_cluster.extend(graphen_passivation)\r\n \r\n####################################################################################################\r\n########## Create magic angle graphene cluster\r\n#################################################################################################### \r\n graphen_cluster.set_cell(graphen_cell)\r\n graphen_cluster.center()\r\n graphen_cluster_corner = graphen_cluster.copy()\r\n graphen_cluster_layer2 = graphen_cluster.copy()\r\n del graphen_cluster_corner[[atom.index for atom in graphen_cluster_corner if atom.z > 10.5]]\r\n del graphen_cluster_layer2[[atom.index for atom in graphen_cluster_layer2 if atom.z < 10.5]]\r\n graphen_cluster_layer2.rotate(a=corner,v='z',center=graphen_cell_center)\r\n graphen_cluster_corner.extend(graphen_cluster_layer2)\r\n \r\n return graphen_cluster_corner\r\n \r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n \r\n ", "id": "3503221", "language": "Python", "matching_score": 0.8992953896522522, "max_stars_count": 0, "path": "abacus/model_base.py" }, { "content": "import torch\nimport numpy as np\nimport logging\nimport os\n\ndef splitdata(length,fold,index):\n fold_length = length // fold\n index_list = np.arange(length)\n if index == 1:\n val = index_list[:fold_length]\n test = index_list[fold_length * (fold - 1):]\n train = index_list[fold_length : fold_length * (fold - 1)]\n elif index == fold:\n val = index_list[fold_length * (fold - 1):]\n test = index_list[fold_length * (fold - 2) : fold_length * (fold - 1)]\n train = index_list[:fold_length * (fold - 2)]\n else:\n val = index_list[fold_length * (index - 1) : fold_length * index]\n test = index_list[fold_length * (index - 2) : fold_length * (index - 1)]\n train = np.concatenate([index_list[:fold_length * (index - 2)],index_list[fold_length * index:]])\n return train,val,test\n\n\ndef printParams(model_params, logger=None):\n print(\"=========== Parameters ==========\")\n for k,v in model_params.items():\n print(f'{k} : {v}')\n print(\"=================================\")\n print()\n if logger:\n for k,v in model_params.items():\n logger.info(f'{k} : {v}')\n\ndef applyIndexOnList(lis,idx):\n ans = []\n for _ in idx:\n ans.append(lis[_])\n return ans\n\ndef set_seed(seed):\n torch.manual_seed(seed) # set seed for cpu \n torch.cuda.manual_seed(seed) # set seed for gpu\n torch.backends.cudnn.deterministic = True # cudnn\n torch.backends.cudnn.benchmark = False\n np.random.seed(seed) # numpy\n\ndef get_logger(save_dir):\n logger = logging.getLogger(__name__)\n logger.setLevel(level = logging.INFO)\n handler = logging.FileHandler(save_dir + \"/log.txt\")\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger\n\nclass CheckpointHandler(object):\n def __init__(self, save_dir, max_save=5):\n self.save_dir = save_dir\n self.max_save = max_save\n self.init_info()\n\n def init_info(self):\n os.makedirs(self.save_dir, exist_ok=True)\n self.metric_dic = {}\n if os.path.exists(self.save_dir+'/eval_log.txt'):\n with open(self.save_dir+'/eval_log.txt','r') as f:\n ls = f.readlines()\n for l in ls:\n l = l.strip().split(':')\n assert len(l) == 2\n self.metric_dic[l[0]] = float(l[1])\n\n \n def save_model(self, model, model_params, epoch, eval_metric):\n max_in_dic = max(self.metric_dic.values()) if len(self.metric_dic) else 1e9\n if eval_metric > max_in_dic:\n return\n if len(self.metric_dic) == self.max_save:\n self.remove_last()\n self.metric_dic['model-'+str(epoch)+'.pt'] = eval_metric\n state = {\"params\":model_params, \"epoch\":epoch, \"model\":model.state_dict()}\n torch.save(state, self.save_dir + '/' + 'model-'+str(epoch)+'.pt')\n log_str = '\\n'.join(['{}:{:.7f}'.format(k,v) for k,v in self.metric_dic.items()])\n with open(self.save_dir+'/eval_log.txt','w') as f:\n f.write(log_str)\n\n\n def remove_last(self):\n last_model = sorted(list(self.metric_dic.keys()),key = lambda x:self.metric_dic[x])[-1]\n if os.path.exists(self.save_dir+'/'+last_model):\n os.remove(self.save_dir+'/'+last_model)\n self.metric_dic.pop(last_model)\n\n def checkpoint_best(self, use_cuda=True):\n best_model = sorted(list(self.metric_dic.keys()),key = lambda x:self.metric_dic[x])[0]\n if use_cuda:\n state = torch.load(self.save_dir + '/' + best_model)\n else:\n state = torch.load(self.save_dir + '/' + best_model,map_location='cpu')\n return state\n\n def checkpoint_avg(self, use_cuda=True):\n return_dic = None\n model_num = 0\n tmp_model_params = None\n for ckpt in os.listdir(self.save_dir):\n if not ckpt.endswith('.pt'):\n continue\n model_num += 1\n if use_cuda:\n state = torch.load(self.save_dir + '/' + ckpt)\n else:\n state = torch.load(self.save_dir + '/' + ckpt,map_location='cpu')\n model,tmp_model_params = state['model'], state['params']\n if not return_dic:\n return_dic = model\n else:\n for k in return_dic:\n return_dic[k] += model[k]\n for k in return_dic:\n return_dic[k] = return_dic[k]/model_num\n return {'params':tmp_model_params, 'model':return_dic}", "id": "7920136", "language": "Python", "matching_score": 1.2515251636505127, "max_stars_count": 0, "path": "utils.py" }, { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Dec 16 17:02:37 2018\r\n\r\n@author: shenzx\r\n\"\"\"\r\nfrom __future__ import print_function\r\nimport warnings\r\nimport shutil\r\nfrom os.path import join\r\nimport numpy as np\r\nfrom ase.io.abacus import write_input_stru\r\nfrom ase.calculators.calculator import FileIOCalculator\r\n# copyright © Key Lab of Quantum Information, CAS, China\r\n\"\"\"This module defines an ASE interface to ABACUS\r\n\r\nDeveloped on the basis of modules by <NAME>.\r\n The path of the directory containing the\r\n pseudopotential and basis directories (LDA, PBE, SG15, ORBITAL, ...)\r\n should be set by the enviromental flag $ABACUS_PP_PATH, $ABACUS_ORBITAL_PATH.\r\n\r\nThe user should also set the enviroment flag\r\n $ABACUS_SCRIPT pointing to a python script looking\r\n\r\nlike::\r\n import os\r\n exitcode = os.system('abacus')\r\nhttp://abacus.ustc.edu.cn/\r\n\"\"\"\r\n\r\n# Parameters list that can be set in INPUT. -START-\r\n# 1\r\ngeneral_keys = [\r\n 'suffix', # the name of main output directory\r\n 'latname', # the name of lattice name\r\n 'atom_file', # the filename of file containing atom positions\r\n 'kpoint_file', # the name of file containing k points\r\n 'pseudo_dir', # the directory containing pseudo files\r\n 'pseudo_type', # the type pseudo files\r\n 'dft_functional', # exchange correlation functional\r\n 'calculation', # test; scf; relax; nscf; ienvelope; istate;\r\n 'ntype', # atom species number\r\n 'nspin', # 1: single spin; 2: up and down spin;\r\n 'nbands', # number of bands\r\n 'nbands_istate', # number of bands around Fermi level for istate calulation\r\n 'symmetry', # turn symmetry on or off\r\n 'nelec' # input number of electrons\r\n ]\r\n# 2\r\npw_keys = [\r\n 'ecutwfc', # energy cutoff for wave functions\r\n 'ethr', # threshold for eigenvalues is cg electron iterations\r\n 'dr2', # charge density error\r\n 'start_wfc', # start wave functions are from 'atomic' or 'file'\r\n 'start_charge', # start charge is from 'atomic' or file\r\n 'charge_extrap', # atomic; first-order; second-order; dm:coefficients of SIA\r\n 'out_charge', # >0 output charge density for selected electron steps\r\n 'out_potential', # output realspace potential\r\n 'out_wf', # output wave functions\r\n 'out_dos', # output energy and dos\r\n 'out_band', # output energy and band structure\r\n 'nx', # number of points along x axis for FFT grid\r\n 'ny', # number of points along y axis for FFT grid\r\n 'nz' # number of points along z axis for FFT grid\r\n ]\r\n# 3\r\nrelaxation_keys = [\r\n 'ks_solver', # cg; david; lapack; genelpa; hpseps;\r\n 'niter', # number of electron iterations\r\n 'vna', # use the vna or not\r\n 'grid_speed', # 1:normal 2:fast\r\n 'force_set', # output the force_set or not\r\n 'force', # calculate the force\r\n 'nstep', # number of ion iteration steps\r\n 'out_stru', # output the structure files after each ion step\r\n 'force_thr', # force threshold, unit: Ry/Bohr\r\n 'force_thr_ev', # force threshold, unit: eV/Angstrom\r\n 'force_thr_ev2', # force invalid threshold, unit: eV/Angstrom\r\n 'stress_thr', # stress threshold\r\n 'press1', # target pressure, unit: KBar\r\n 'press2', # target pressure, unit: KBar\r\n 'press3', # target pressure, unit: KBar\r\n 'bfgs_w1', # wolfe condition 1 for bfgs\r\n 'bfgs_w2', # wolfe condition 2 for bfgs\r\n 'trust_radius_max', # maximal trust radius, unit: Bohr\r\n 'trust_radius_min', # minimal trust radius, unit: Bohr\r\n 'trust_radius_ini', # initial trust radius, unit: Bohr\r\n 'stress', # calculate the stress or not\r\n 'fixed_axes', # which axes are fixed\r\n 'move_method', # bfgs; sd; cg; cg_bfgs;\r\n 'out_level', # ie(for electrons); i(for ions);\r\n 'out_dm' # >0 output density matrix\r\n ]\r\n# 4\r\nlcao_keys = [\r\n 'basis_type', # PW; LCAO in pw; LCAO\r\n 'search_radius', # input search radius (Bohr)\r\n 'search_pbc', # input periodic boundary condition\r\n 'lcao_ecut', # energy cutoff for LCAO\r\n 'lcao_dk', # delta k for 1D integration in LCAO\r\n 'lcao_dr', # delta r for 1D integration in LCAO\r\n 'lcao_rmax', # max R for 1D two-center integration table\r\n 'out_hs', # output H and S matrix\r\n 'out_lowf', # ouput LCAO wave functions\r\n 'bx', # division of an element grid in FFT grid along x\r\n 'by', # division of an element grid in FFT grid along y\r\n 'bz' # division of an element grid in FFT grid along z\r\n ]\r\n# 5\r\nsmearing_keys = [\r\n 'smearing', # type of smearing: gauss; fd; fixed; mp; mp2\r\n 'sigma' # energy range for smearing\r\n ]\r\n# 6\r\ncharge_mixing_keys = [\r\n 'mixing_type', # plain; kerker; pulay; pulay-kerker\r\n 'mixing_beta', # mixing parameter: 0 means no new charge\r\n 'mixing_ndim', # mixing dimension in pulay\r\n 'mixing_gg0' # mixing parameter in kerker\r\n ]\r\n# 7\r\ndos_keys = [\r\n 'dos_emin_ev', # minimal range for dos\r\n 'dos_emax_ev', # maximal range for dos\r\n 'dos_edelta_ev', # delta energy for dos\r\n 'dos_sigma' # gauss b coefficeinet(default=0.07) \r\n ]\r\n# 8\r\ntechnique_keys = [\r\n 'gamma_only', # gamma only\r\n 'diago_proc', # number of proc used to diago\r\n 'npool', # number of pools for k points, pw only\r\n 'sparse_matrix', # use sparse matrix, in DMM\r\n 'atom_distribution', # distribute atoms, in DMM\r\n 'mem_saver', # memory saver for many k points used\r\n 'printe' # print band energy for selectively ionic steps\r\n ]\r\n# 9\r\nsiao_keys = [\r\n 'selinv_npole', # number of selected poles\r\n 'selinv_temp', # temperature for Fermi-Dirac distribution\r\n 'selinv_gap', # supposed gap in the calculation\r\n 'selinv_deltae', # expected energy range\r\n 'selinv_mu', # chosen mu as Fermi energy\r\n 'selinv_threshold', # threshold for calculated electron number\r\n 'selinv_niter', # max number of steps to update mu\r\n ]\r\n# 10\r\nmolecular_dynamics_keys = [\r\n 'md_mdtype', # choose ensemble\r\n 'md_dt', # time step\r\n 'md_nresn', # parameter during integrater\r\n 'md_nyosh', # parameter during integrater\r\n 'md_qmass', # mass of thermostat\r\n 'md_tfirst', # temperature first\r\n 'md_tlast', # temperature last\r\n 'md_dumpmdfred', # The period to dump MD information for monitoring and restarting MD\r\n 'md_mdoutpath', # output path of md\r\n 'md_domsd', # whether compute <r(t)-r(0)>\r\n 'md_domsdatom', # whether compute msd for each atom\r\n 'md_rstmd', # whether restart\r\n 'md_fixtemperature', # period to change temperature\r\n 'md_ediff', # parameter for constraining total energy change\r\n 'md_ediffg', # parameter for constraining max force change\r\n 'md_msdstarttime' # choose which step that msd be calculated\r\n ]\r\n# 11\r\nefield_keys = [\r\n 'efield', # add electric field\r\n 'edir', # add electric field\r\n 'emaxpos', # maximal position of efield [0, 1\r\n 'eopreg', # where sawlike potential decrease\r\n 'eamp', # amplitute of the efield, unit is a.u.\r\n 'eamp_v' # amplitute of the efield, unit is V/A\r\n ]\r\n# 12\r\nbfield_keys = [\r\n 'bfield', # add magnetic field\r\n 'bfield_teslax', # magnetic field strength\r\n 'bfield_teslay', # magnetic field strength\r\n 'bfield_teslaz', # magnetic field strength\r\n 'bfield_gauge_x', # magnetic field gauge origin\r\n 'bfield_gauge_y', # magnetic field gauge origin\r\n 'bfield_gauge_z' # magnetic field gauge origin\r\n ]\r\n# 13\r\ntest_keys = [\r\n 'out_alllog', # output information for each processor, when parallel\r\n 'nurse', # for coders \r\n 'colour', # for coders, make their live colourful\r\n 't_in_h', # calculate the kinetic energy or not\r\n 'vl_in_h', # calculate the local potential or not\r\n 'vnl_in_h', # calculate the nonlocal potential or not\r\n 'zeeman_in_h', # calculate the zeeman term or not\r\n 'test_force', # test the force\r\n 'test_stress' # test the force\r\n ]\r\n# 14\r\nother_methods_keys = [\r\n 'mlwf_flag', # turn MLWF on or off\r\n 'opt_epsilon2', # calculate the dielectic function\r\n 'opt_nbands' # number of bands for optical calculation\r\n ]\r\n# 15\r\nvdw_d2_keys = [\r\n 'vdwD2', # calculate vdw-D2 or not\r\n 'vdwD2_scaling', # scaling of vdw-D2\r\n 'vdwD2_d', # damping parameter\r\n 'vdwD2_C6_file', # filename of C6\r\n 'vdwD2_C6_unit', # unit of C6, Jnm6/mol or eVA6\r\n 'vdwD2_R0_file', # filename of R0\r\n 'vdwD2_R0_unit', # unit of R0, A or Bohr\r\n 'vdwD2_model', # expression model of periodic structure, radius or period\r\n 'vdwD2_radius', # radius cutoff for periodic structure\r\n 'vdwD2_radius_unit', # unit of radius cutoff for periodic structure\r\n 'vdwD2_period' # periods of periodic structure\r\n ]\r\n# 16\r\nspectrum_keys = [\r\n 'spectral_type', # the type of the calculated spectrum\r\n 'spectral_method', # 0: tddft(linear response)\r\n 'kernel_type', # the kernel type: rpa, tdlda ...\r\n 'eels_method', # 0: hilbert_transform method; 1: standard method\r\n 'absorption_method', # 0: vasp's method 1: pwscf's method\r\n 'system', # the calculate system\r\n 'eta', # eta(Ry)\r\n 'domega', # domega(Ry)\r\n 'nomega', # nomega\r\n 'ecut_chi', # the dimension of chi matrix\r\n 'q_start', # the position of the first q point in direct coordinate\r\n 'q_direction', # the q direction\r\n 'nq', # the total number of qpoints for calculation\r\n 'out_epsilon', # output epsilon or not\r\n 'out_chi', # output chi or not\r\n 'out_chi0', # output chi0 or not\r\n 'fermi_level', # the change of the fermi_level(Ry)\r\n 'coulomb_cutoff', # turn on the coulomb_cutoff or not\r\n 'kmesh_interpolation', # calculting <i, 0|j, R>\r\n 'qcar', # (unit: 2PI/lat0)\r\n 'lcao_box', # the scale for searching the existence of the overlap <i, 0|j, R>\r\n 'intrasmear', # Eta\r\n 'shift', # shift\r\n 'metalcalc', # metal or not\r\n 'eps_degauss', # degauss in calculating epsilon0\r\n 'noncolin', # using non-collinear-spin\r\n 'lspinorb', # consider the spin-orbit interaction\r\n 'starting_spin_angle' # starting_spin_angle\r\n\r\n ]\r\n# 17\r\ntddft_keys = [\r\n 'tddft', # calculate tddft or not\r\n 'td_dr2', # threshold for electronic iteration of tddft\r\n 'td_dt', # time of ion step\r\n 'td_force_dt', # time of force change\r\n 'val_elec_01', # val_elec_01\r\n 'val_elec_02', # val_elec_02\r\n 'val_elec_03', # val_elec_03\r\n 'vext', # add extern potential or not\r\n 'vext_dire' # extern potential direction\r\n] \r\n\r\n# Parameters list that can be set in INPUT. -END-\r\n\r\nclass AbacusInput(object):\r\n \r\n # Initialize internal dictionary of input parameters to None -START-\r\n def __init__(self, restart=None):\r\n \"\"\"\r\n self.directory = './' # shenzx v20200724\r\n self.stru_filename = 'STRU' # shenzx v20200724\r\n self.pseudo_dir = './' # shenzx v20200724\r\n self.potential_name = None # shenzx v20200724\r\n self.basis_dir = './' # shenzx v20200724\r\n self.basis_name = None # shenzx v20200724\r\n self.fix = 1 # shenzx v20200724\r\n self.coordinates_type = 'Cartesian' # shenzx v20200724\r\n \"\"\"\r\n self.general_params = {}\r\n self.pw_params = {}\r\n self.relaxation_params = {}\r\n self.lcao_params = {}\r\n self.smearing_params = {}\r\n self.charge_mixing_params = {}\r\n self.dos_params = {}\r\n self.technique_params = {}\r\n self.siao_params = {}\r\n self.molecular_dynamics_params = {}\r\n self.efield_params = {}\r\n self.bfield_params = {}\r\n self.test_params = {}\r\n self.other_method_params = {}\r\n self.vdw_d2_params = {}\r\n self.spectrum_params = {}\r\n self.tddft_params = {}\r\n \r\n for key in general_keys:\r\n self.general_params[key] = None\r\n for key in pw_keys:\r\n self.pw_params[key] = None\r\n for key in relaxation_keys:\r\n self.relaxation_params[key] = None\r\n for key in lcao_keys:\r\n self.lcao_params[key] = None\r\n for key in smearing_keys:\r\n self.smearing_params[key] = None\r\n for key in charge_mixing_keys:\r\n self.charge_mixing_params[key] = None\r\n for key in dos_keys:\r\n self.dos_params[key] = None\r\n for key in technique_keys:\r\n self.technique_params[key] = None\r\n for key in siao_keys:\r\n self.siao_params[key] = None\r\n for key in molecular_dynamics_keys:\r\n self.molecular_dynamics_params[key] = None\r\n for key in efield_keys:\r\n self.efield_params[key] = None\r\n for key in bfield_keys:\r\n self.bfield_params[key] = None\r\n for key in test_keys:\r\n self.test_params[key] = None\r\n for key in other_methods_keys:\r\n self.other_method_params[key] = None\r\n for key in vdw_d2_keys:\r\n self.vdw_d2_params[key] = None\r\n for key in spectrum_keys:\r\n self.spectrum_params[key] = None\r\n for key in tddft_keys:\r\n self.tddft_params[key] = None\r\n # Initialize internal dictionary of input parameters to None -END-\r\n\r\n # Appoint the KPT parameters which are not INPUT parameters -START-\r\n self.kpt_params = {\r\n 'knumber': 0, # The number of K points\r\n 'kmode': 'Gamma', # Mode of K points, can be Gamma, MP, Line, Direct, Cartesian\r\n 'kpts': [1, 1, 1, 0, 0, 0] # Give the K points\r\n }\r\n # Appoint the KPT parameters which are not INPUT parameters -END-\r\n\r\n # Set the INPUT and KPT parameters -START-\r\n def set(self, **kwargs):\r\n for key in kwargs:\r\n if key in self.general_params:\r\n self.general_params[key] = kwargs[key]\r\n elif key in self.pw_params:\r\n self.pw_params[key] = kwargs[key]\r\n elif key in self.relaxation_params:\r\n self.relaxation_params[key] = kwargs[key]\r\n elif key in self.lcao_params:\r\n self.lcao_params[key] = kwargs[key]\r\n elif key in self.smearing_params:\r\n self.smearing_params[key] = kwargs[key]\r\n elif key in self.charge_mixing_params:\r\n self.charge_mixing_params[key] = kwargs[key]\r\n elif key in self.dos_params:\r\n self.dos_params[key] = kwargs[key]\r\n elif key in self.technique_params:\r\n self.technique_params[key] = kwargs[key]\r\n elif key in self.siao_params:\r\n self.siao_params[key] = kwargs[key]\r\n elif key in self.molecular_dynamics_params:\r\n self.molecular_dynamics_params[key] = kwargs[key]\r\n elif key in self.efield_params:\r\n self.efield_params[key] = kwargs[key]\r\n elif key in self.bfield_params:\r\n self.bfield_params[key] = kwargs[key]\r\n elif key in self.test_params:\r\n self.test_params[key] = kwargs[key]\r\n elif key in self.other_method_params:\r\n self.other_method_params[key] = kwargs[key]\r\n elif key in self.vdw_d2_params:\r\n self.vdw_d2_params[key] = kwargs[key]\r\n elif key in self.spectrum_params:\r\n self.spectrum_params[key] = kwargs[key]\r\n elif key in self.tddft_params:\r\n self.spectrum_params[key] = kwargs[key]\r\n elif key in self.kpt_params:\r\n self.kpt_params[key] = kwargs[key]\r\n else:\r\n raise TypeError('Parameter not defined: ' + key)\r\n # Set the INPUT and KPT parameters -END-\r\n\r\n # Write INPUT file -START-\r\n def write_input_input(self, directory='./', **kwargs):\r\n with open(join(directory, 'INPUT'), 'w') as input_file:\r\n input_file.write('INPUT_PARAMETERS\\n')\r\n input_file.write('# Created by Atomic Simulation Enviroment\\n')\r\n for key, val in self.general_params.items():\r\n if val is not None:\r\n params = str(key) + ' ' * (20 - len(key)) + str(val)\r\n input_file.write('%s\\n' % params)\r\n \r\n for key, val in self.pw_params.items():\r\n if val is not None:\r\n params = str(key) + ' ' * (20 - len(key)) + str(val)\r\n input_file.write('%s\\n' % params)\r\n \r\n for key, val in self.relaxation_params.items():\r\n if val is not None:\r\n params = str(key) + ' ' * (20 - len(key)) + str(val)\r\n input_file.write('%s\\n' % params)\r\n \r\n for key, val in self.lcao_params.items():\r\n if val is not None:\r\n params = str(key) + ' ' * (20 - len(key)) + str(val)\r\n input_file.write('%s\\n' % params)\r\n \r\n for key, val in self.smearing_params.items():\r\n if val is not None:\r\n params = str(key) + ' ' * (20 - len(key)) + str(val)\r\n input_file.write('%s\\n' % params)\r\n \r\n for key, val in self.charge_mixing_params.items():\r\n if val is not None:\r\n params = str(key) + ' ' * (20 - len(key)) + str(val)\r\n input_file.write('%s\\n' % params)\r\n \r\n for key, val in self.dos_params.items():\r\n if val is not None:\r\n params = str(key) + ' ' * (20 - len(key)) + str(val)\r\n input_file.write('%s\\n' % params)\r\n \r\n for key, val in self.technique_params.items():\r\n if val is not None:\r\n params = str(key) + ' ' * (20 - len(key)) + str(val)\r\n input_file.write('%s\\n' % params)\r\n \r\n for key, val in self.siao_params.items():\r\n if val is not None:\r\n params = str(key) + ' ' * (20 - len(key)) + str(val)\r\n input_file.write('%s\\n' % params)\r\n \r\n for key, val in self.molecular_dynamics_params.items():\r\n if val is not None:\r\n params = str(key) + ' ' * (20 - len(key)) + str(val)\r\n input_file.write('%s\\n' % params)\r\n \r\n for key, val in self.efield_params.items():\r\n if val is not None:\r\n params = str(key) + ' ' * (20 - len(key)) + str(val)\r\n input_file.write('%s\\n' % params)\r\n \r\n for key, val in self.bfield_params.items():\r\n if val is not None:\r\n params = str(key) + ' ' * (20 - len(key)) + str(val)\r\n input_file.write('%s\\n' % params)\r\n \r\n for key, val in self.test_params.items():\r\n if val is not None:\r\n params = str(key) + ' ' * (20 - len(key)) + str(val)\r\n input_file.write('%s\\n' % params)\r\n \r\n for key, val in self.other_method_params.items():\r\n if val is not None:\r\n params = str(key) + ' ' * (20 - len(key)) + str(val)\r\n input_file.write('%s\\n' % params)\r\n \r\n for key, val in self.vdw_d2_params.items():\r\n if val is not None:\r\n params = str(key) + ' ' * (20 - len(key)) + str(val)\r\n input_file.write('%s\\n' % params)\r\n \r\n for key, val in self.spectrum_params.items():\r\n if val is not None:\r\n params = str(key) + ' ' * (20 - len(key)) + str(val)\r\n input_file.write('%s\\n' % params)\r\n input_file.write('\\n')\r\n\r\n for key, val in self.tddft_params.items():\r\n if val is not None:\r\n params = str(key) + ' ' * (20 - len(key)) + str(val)\r\n input_file.write('%s\\n' % params)\r\n input_file.write('\\n')\r\n # Write INPUT file -END-\r\n # Read INPUT file --START-\r\n\r\n def read_input_input(self,\r\n filename='INPUT',\r\n directory='./',\r\n **kwargs):\r\n with open(join(directory, filename), 'r') as file:\r\n file.readline()\r\n lines = file.readlines()\r\n\r\n for line in lines: \r\n try:\r\n line = line.replace(\"# \", \"# \")\r\n data = line.split()\r\n if len(data) == 0:\r\n continue\r\n elif data[0][0] == \"# \":\r\n continue\r\n \r\n key = data[0]\r\n if key in general_keys:\r\n self.general_params[key] = data[1]\r\n elif key in pw_keys:\r\n self.pw_params[key] = data[1]\r\n elif key in relaxation_keys:\r\n self.relaxation_params[key] = data[1]\r\n elif key in lcao_keys:\r\n self.lcao_params[key] = data[1]\r\n elif key in smearing_keys:\r\n self.smearing_params[key] = data[1]\r\n elif key in charge_mixing_keys:\r\n self.charge_mixing_params[key] = data[1]\r\n elif key in dos_keys:\r\n self.dos_params[key] = data[1]\r\n elif key in technique_keys:\r\n self.technique_params[key] = data[1]\r\n elif key in siao_keys:\r\n self.siao_params[key] = data[1]\r\n elif key in molecular_dynamics_keys:\r\n self.molecular_dynamics_params[key] = data[1]\r\n elif key in efield_keys:\r\n self.efield_params[key] = data[1]\r\n elif key in bfield_keys:\r\n self.bfield_params[key] = data[1]\r\n elif key in test_keys:\r\n self.test_params[key] = data[1]\r\n elif key in other_methods_keys:\r\n self.other_method_params[key] = data[1]\r\n elif key in vdw_d2_keys:\r\n if key == 'vdwD2_period':\r\n self.vdw_d2_params[key] = (data[1] + ' '\r\n + data[2] + ' ' + data[3])\r\n else:\r\n self.vdw_d2_params[key] = data[1]\r\n elif key in spectrum_keys:\r\n if key in ['q_start', 'q_direction', 'qcar', 'lcao_box']:\r\n self.spectrum_params[key] = (data[1] + ' '\r\n + data[2] + ' ' + data[3])\r\n else:\r\n self.spectrum_params[key] = data[1]\r\n elif key in tddft_keys:\r\n self.tddft_params[key] = data[1]\r\n\r\n return 'ok' \r\n \r\n except KeyError:\r\n raise IOError('keyword \"%s\" in INPUT is'\r\n 'not know by calculator.' % key)\r\n \r\n except IndexError:\r\n raise IOError('Value missing for keyword \"%s\" .' % key)\r\n # Read INPUT file --END- \r\n\r\n # Write KPT -START-\r\n def write_input_kpt(self,\r\n directory='./',\r\n filename='KPT',\r\n **kwargs):\r\n k = self.kpt_params\r\n if self.technique_params['gamma_only'] is None:\r\n return warnings.warn(\" 'gamma_only' parameter has not been set, \"\r\n \"please set it to 0 or 1\")\r\n\r\n elif self.technique_params['gamma_only'] == 1:\r\n with open(join(directory, filename), 'w') as kpoint:\r\n kpoint.write('K_POINTS\\n')\r\n kpoint.write('0\\n')\r\n kpoint.write('Gamma\\n')\r\n kpoint.write('1 1 1 0 0 0')\r\n\r\n elif self.technique_params['gamma_only'] == 0:\r\n with open(join(directory, filename), 'w') as kpoint:\r\n kpoint.write('K_POINTS\\n')\r\n kpoint.write('%s\\n' % str(k['knumber']))\r\n kpoint.write('%s\\n' % str(k['kmode']))\r\n if k['kmode'] in ['Gamma', 'MP']:\r\n for n in range(len(k['kpts'])):\r\n kpoint.write('%s ' % str(k['kpts'][n]))\r\n \r\n elif k['kmode'] in ['Direct', 'Cartesian', 'Line']:\r\n for n in range(len(k['kpts'])):\r\n for i in range(len(k['kpts'][n])):\r\n kpoint.write('%s ' % str(k['kpts'][n][i]))\r\n kpoint.write('\\n')\r\n\r\n else:\r\n raise ValueError(\"The value of kmode is not right, set to \"\r\n \"Gamma, MP, Direct, Cartesian, or Line.\")\r\n else:\r\n return warnings.warn(\"The value of gamma_only is not right, \"\r\n \"please set it to 0 or 1\")\r\n # Write KPT -END-\r\n\r\n # Read KPT file -START-\r\n\r\n def read_kpt(self,\r\n filename='KPT',\r\n directory='./',\r\n **kwargs):\r\n with open(filename, 'r') as file:\r\n lines = file.readlines()\r\n\r\n if lines[2][-1] == '\\n':\r\n kmode = lines[2][:-1]\r\n else:\r\n kmode = lines[2]\r\n\r\n if kmode in ['Gamma', 'MP']:\r\n self.kpt_params['kmode'] = lines[2][:-1]\r\n self.kpt_params['knumber'] = lines[1].split()[0]\r\n self.kpt_params['kpts'] = np.array(lines[3].split())\r\n\r\n elif kmode in ['Cartesian', 'Direct', 'Line']:\r\n self.kpt_params['kmode'] = lines[2][:-1]\r\n self.kpt_params['knumber'] = lines[1].split()[0]\r\n self.kpt_params['kpts'] = np.array([list(map(float, line.split())) \r\n for line in lines[3:]])\r\n\r\n else:\r\n raise ValueError(\"The value of kmode is not right, set to \"\r\n \"Gamma, MP, Direct, Cartesian, or Line.\")\r\n # Read KPT file -END-\r\n\r\n # Write and read potential -START-\r\n def write_potential(self,\r\n pseudo_dir='./',\r\n potential_name=None,\r\n directory='./',\r\n **kwargs):\r\n if pseudo_dir == directory:\r\n return 'It is ok, pseudo_dir is in work directory'\r\n else:\r\n if self.potential_name == None:\r\n raise ValueError('The value of \"potential_name\" is not right, '\r\n 'please set it to a list')\r\n else:\r\n self.pseudo_dir = pseudo_dir\r\n self.potential_name = potential_name\r\n for name in self.potential_name:\r\n shutil.copyfile(join(self.pseudo_dir, name),\r\n join(directory, name))\r\n\r\n def read_potential(self,\r\n pseudo_dir='./',\r\n potential_name=None,\r\n **kwargs):\r\n if self.potential_name is None:\r\n raise ValueError('The value of \"potential_name\" is not right, '\r\n 'please set it to a list')\r\n else:\r\n self.pseudo_dir = pseudo_dir\r\n self.potential_name = potential_name\r\n # Write and read potential -END-\r\n\r\n # Write and read orbital basis -START-\r\n def write_basis(self,\r\n basis_dir='./',\r\n basis_name=None,\r\n directory='./',\r\n **kwargs):\r\n if basis_dir == directory:\r\n print('It is ok, basis_dir is in work directory')\r\n else:\r\n if self.basis_name is None:\r\n raise ValueError('The value of \"basis_name\" is not right, '\r\n 'please set it to a list')\r\n else:\r\n self.basis_dir = basis_dir\r\n self.basis_name = basis_name\r\n for name in self.basis_name:\r\n shutil.copyfile(join(self.basis_dir, name),\r\n join(directory, name))\r\n\r\n print(\"basis_dir = %s, basis_name = %s\"%(basis_dir, basis_name))\r\n\r\n def read_basis(self,\r\n basis_dir='./',\r\n basis_name=None,\r\n directory='./',\r\n **kwargs):\r\n if self.basis_name is None:\r\n raise ValueError('The value of \"basis_name\" is not right, '\r\n 'please set it to a list')\r\n else:\r\n self.basis_dir = basis_dir\r\n self.basis_name = basis_name\r\n\r\n # Write and read orbital basis -START-\r\n def write_input(self,\r\n atoms,\r\n properties=None,\r\n system_changes=None):\r\n \"\"\"Write input parameters to files-file.\"\"\"\r\n\r\n FileIOCalculator.write_input(self,\r\n atoms,\r\n properties,\r\n system_changes)\r\n\r\n if(system_changes is None): # shenzx v20200724\r\n system_changes = ' ' # shenzx v20200724\r\n if ('numbers' in system_changes or \r\n 'initial_magmoms' in system_changes):\r\n self.initialize(atoms)\r\n\r\n write_input_stru(stru=atoms,\r\n filename=self.stru_filename,\r\n pseudo_dir=self.pseudo_dir,\r\n potential_name=self.potential_name,\r\n basis_dir=self.basis_dir,\r\n basis_name=self.basis_name,\r\n fix=self.fix,\r\n directory=self.directory,\r\n coordinates_type=self.coordinates_type)\r\n self.write_input_input(directory=self.directory)\r\n self.write_input_kpt(directory=self.directory)\r\n # Write all input file -END-\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # Test a writing...\r\n import os\r\n print(os.getcwd())\r\n from ase import Atoms # just use when test this module\r\n test = AbacusInput()\r\n test.set(gamma_only = 1)\r\n test.write_input(atoms = Atoms(\"CO\"))\r\n", "id": "1967491", "language": "Python", "matching_score": 5.976461887359619, "max_stars_count": 0, "path": "abacus/create_input.py" }, { "content": "from __future__ import print_function\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 8 16:33:38 2018\r\n\r\nModified on Wed Jun 20 15:00:00 2018\r\n@author: <NAME>\r\n\"\"\"\r\n\r\nimport subprocess\r\nfrom os.path import join\r\nimport numpy as np\r\nfrom ase.calculators.abacus.create_input import AbacusInput\r\nfrom ase.calculators.calculator import Calculator, FileIOCalculator, all_changes #Calculator\r\n\r\n\r\nclass Abacus(AbacusInput, FileIOCalculator):\r\n # Initialize parameters and get some information -START-\r\n name = 'Abacus'\r\n implemented_properties = ['energy', 'forces', 'fermi']\r\n\r\n default_parameters = dict(calculation='scf',\r\n ecutwfc=50,\r\n smearing='gaussian',\r\n mixing_type='pulay-kerker',\r\n basis_type='lcao',\r\n gamma_only=1,\r\n ks_solver=\"genelpa\",\r\n atom_file='STRU',\r\n )\r\n\r\n def __init__(self,\r\n restart=None,\r\n ignore_bad_restart_file=False,\r\n directory=None,\r\n label='ase_rundir/ase_test',\r\n atoms=None,\r\n command=None,\r\n log_file=None,\r\n pseudo_dir='./',\r\n potential_name=None,\r\n basis_dir='./',\r\n basis_name=None,\r\n fix=1,\r\n stru_filename='STRU',\r\n coordinates_type=\"Cartesian\",\r\n **kwargs):\r\n\r\n self.species = None\r\n\r\n AbacusInput.__init__(self, restart)\r\n\r\n FileIOCalculator.__init__(self,\r\n restart,\r\n ignore_bad_restart_file,\r\n label,\r\n atoms,\r\n **kwargs)\r\n\r\n self.restart = restart\r\n self.pseudo_dir = pseudo_dir\r\n self.potential_name = potential_name\r\n self.basis_dir = basis_dir\r\n self.basis_name = basis_name\r\n self.fix = fix\r\n self.stru_filename = stru_filename\r\n self.coordinates_type = coordinates_type\r\n\r\n self.out_path = ''\r\n\r\n if directory is not None:\r\n self.directory = directory\r\n if log_file is not None:\r\n self.log_file = log_file\r\n\r\n AbacusInput.set(self, **self.parameters)\r\n AbacusInput.set(self, **kwargs)\r\n\r\n # Initialize parameters and get some information -END-\r\n\r\n def check_state(self, atoms):\r\n system_changes = FileIOCalculator.check_state(self, atoms)\r\n # Ignore boundary conditions:\r\n if 'pbc' in system_changes:\r\n system_changes.remove('pbc')\r\n return system_changes\r\n\r\n def initialize(self, atoms):\r\n numbers = atoms.get_atomic_numbers().copy()\r\n self.species = []\r\n for a, Z in enumerate(numbers):\r\n if Z not in self.species:\r\n self.species.append(Z)\r\n self.general_params[\"ntype\"] = len(self.species)\r\n\r\n # Run abacus\r\n def calculate(self,\r\n atoms=None,\r\n properties=None,\r\n system_changes=all_changes):\r\n FileIOCalculator.calculate(self,\r\n atoms,\r\n properties,\r\n system_changes)\r\n\r\n # Read results\r\n def read_results(self):\r\n a = AbacusInput()\r\n a.read_input_input(directory=self.directory)\r\n\r\n if self.pw_params['dr2'] is None:\r\n self.charge_density_error = float(1e-09)\r\n else:\r\n self.charge_density_error = float(self.pw_params['dr2'])\r\n\r\n if self.general_params['calculation'] in ['scf',\r\n 'relax',\r\n 'cell-relax',\r\n 'nscf',\r\n 'ienvelope',\r\n 'istate',\r\n 'test',\r\n 'md']:\r\n out_file = 'running_' + str(self.general_params['calculation']) + '.log'\r\n else:\r\n raise ValueError('Calculation parameters error')\r\n\r\n if self.general_params['suffix'] is None:\r\n self.out_path = join(self.directory, 'OUT.ABACUS/')\r\n else:\r\n\r\n self.out_path = join(self.directory, 'OUT.%s/' % str(self.general_params['suffix']))\r\n\r\n self.out_log_file = join(self.out_path, out_file)\r\n\r\n f = open(self.out_log_file, 'r')\r\n lines = f.readlines()\r\n f.close()\r\n\r\n n = 0\r\n number_atoms = 0\r\n force_number = 0\r\n force_last = []\r\n force_all = []\r\n fermi_energy = None\r\n final_total_energy = None\r\n\r\n for line in lines:\r\n if line.find('TOTAL ATOM NUMBER') != -1:\r\n number_atoms = int(line.split(' = ')[1])\r\n if line.find('final etot is') != -1:\r\n import re\r\n final_total_energy = re.findall(r'[-+]?\\d+\\.?\\d*[eE]?[-+]?\\d+', line)\r\n final_total_energy = float(final_total_energy[0])\r\n if line.find('EFERMI = ') != -1:\r\n fermi_energy = float(line.split()[2])\r\n if line.find('TOTAL-FORCE') != -1:\r\n for a in range(number_atoms):\r\n force_all.append(\r\n [float(data) for data in lines[n + 4 + a].split()[1:4]])\r\n force_number = force_number + 1\r\n n = n + 1\r\n\r\n force_all = np.array(force_all)\r\n force_last = force_all[-1 - number_atoms:]\r\n force_last = np.array(force_last)\r\n\r\n self.results['energy'] = final_total_energy\r\n self.results['fermi'] = fermi_energy\r\n self.results['forces'] = force_last\r\n\r\n return self.results\r\n\r\n def run(self):\r\n with open(self.log_file, 'a') as f:\r\n run = subprocess.Popen(self.command,\r\n stderr=f,\r\n stdin=f,\r\n stdout=f,\r\n cwd=self.directory,\r\n shell=True)\r\n return run.communicate()\r\n\r\n def get_fermi_level(self):\r\n return self.results['fermi']\r\n\r\n \"\"\"\r\n def get_potential_energy(self, atoms):\r\n return self.get_property('energy', atoms)\r\n\r\n def get_forces(self, atoms):\r\n return self.get_property('forces', atoms)\r\n\r\n def get_property(self, name, atoms = None, allow_calculation = True):\r\n if atoms is None:\r\n atoms = self.atoms\r\n system_changes = []\r\n else:\r\n system_changes = self.check_state(atoms)\r\n if system_changes:\r\n self.reset()\r\n if name not in self.results:\r\n if not allow_calculation:\r\n return None\r\n self.calculate(atoms)\r\n result = self.results[name]\r\n return result\r\n \"\"\"\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n", "id": "2049592", "language": "Python", "matching_score": 2.8707377910614014, "max_stars_count": 0, "path": "abacus/abacus_out.py" }, { "content": "\"\"\"\r\nCreated on Mon Apr 23 14:30:24 2018\r\n\r\n@author: <NAME>\r\n\"\"\"\r\n\r\n\"\"\"\r\nfrom abacus_out import Abacus\r\nfrom create_input import AbacusInput\r\n__all__ = ['Abacus', 'AbacusInput']\r\n\"\"\"\r\n", "id": "10482294", "language": "Python", "matching_score": 0.32260099053382874, "max_stars_count": 0, "path": "abacus/__init__.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom monty.os import cd\r\n\r\nfrom core.structure import Crystal\r\n\r\n\r\nclass Inputs:\r\n def __init__(self, cur_dir):\r\n self.cur_dir = cur_dir\r\n\r\n def _read(self, filename, ftype):\r\n with cd(self.cur_dir):\r\n with open(filename, \"r\") as file:\r\n dat = file.read()\r\n\r\n return {ftype: dat}\r\n\r\n def get_KPT(self, ftype):\r\n _type = {\"relax\": \"KPOINTS_relax\", \"scf\": \"KPOINTS_scf\", \"band\": \"KPATH\"}\r\n return self._read(\"KPT\", _type.get(ftype))\r\n\r\n def get_structure(self, filename):\r\n with cd(self.cur_dir):\r\n return Crystal.matgen_structure_poscar_unopt(filename)\r\n\r\n def get_Input(self):\r\n return self._read(\"INPUT\", \"input\")\r\n\r\n def get_potential(self, filename):\r\n with cd(self.cur_dir):\r\n cy = Crystal(filename).stru\r\n potential = dict(zip(cy.atom_species, cy.atom_potential))\r\n return {\"potential_name\": potential}\r\n\r\n\r\nif __name__ == \"__main__\":\r\n _dir = r\"../test/icsd_ZnTe/Dos/Dos\"\r\n k = Inputs(_dir)\r\n print(k.get_KPT(ftype=\"scf\"))\r\n print(k.get_Input())\r\n print(k.get_structure(\"icsd_104196-Zn1Te1\"))\r\n print(k.get_potential(\"icsd_104196-Zn1Te1\"))\r\n", "id": "9118101", "language": "Python", "matching_score": 1.4966166019439697, "max_stars_count": 0, "path": "abacus_helper/core/inputs.py" }, { "content": "#!/usr/env/python3\n\nfrom main import *\nfrom pathlib import Path\nimport re\nimport os\nfrom subprocess import getoutput\n\nfrom monty.os import cd\nimport pymongo\n\nCOMPATH=Path(\"/WORK/nscc-gz_material_1/ICSD_vasp/abacus_calc/matgen_scf/completed\")\nERRORS = Path(\"/WORK/nscc-gz_material_1/ICSD_vasp/abacus_calc/matgen_scf/some_errors\")\n\n\ndef yield_stru(root):\n for i in root.rglob('*'):\n if len(i.parts) == len(root.parts) + 1:\n yield comp(i), i\n\ndef comp(path):\n scf_out = path / \"SCF_OUT.ABACUS\"\n stru = path / path.name\n if scf_out.exists():\n files = {\"stru\": stru, \"scf_log\": scf_out / \"running_scf.log\",\n \"path\": path, \"scf\": scf_out }\n return files\n\ndef get_energy(log, stru, mid):\n print(log)\n is_sim = False\n cmd1 = f\"grep \\'!FINAL_ETOT_IS\\' {log}\"\n total_e = float(getoutput(cmd1).split()[1])\n cmd2 = f\"grep \\'TOTAL ATOM NUMBER\\' {log}\"\n n = int(getoutput(cmd2).split()[-1])\n cmd3 = f\"grep \\'ntype\\' {log}\"\n ntype = int(getoutput(cmd3).split()[-1])\n cmd4 = f\"grep EFERMI {log}\"\n ef = getoutput(cmd4).split()[2]\n if ntype == 1:\n is_sim = True\n total_e_pa = total_e / n\n cmd5 = f\"grep \\'atom label for species\\' {log}\"\n syb = []\n lbs = getoutput(cmd5)\n for line in lbs.split(\"\\n\"):\n syb.append(line.split(\"=\")[-1].strip(' '))\n cmd6 = f\"grep \\'number of atom for this type\\' {log}\"\n ns = getoutput(cmd6)\n num = []\n for yl in ns.split('\\n'):\n num.append(int(yl.split(\"=\")[-1]))\n species = dict(zip(syb, num))\n\n return {\"id\":mid, \"energy\": total_e, \"epa\": total_e_pa, \"efermi\": ef,\"is_sim\": is_sim, \"symbol\": species} \n \n\ndef get_stru(stru_filepath):\n return get_structure_dat(stru_filepath)\n\ndef get_mag(scf_dir):\n return get_magnetism(scf_dir)\n\ndef get_band(band_dir, stru_filename, scf_log_filepath):\n return get_bandstructure(band_dir, stru_filename, scf_log_filepath)\n\n\ndef get_dos(dos_dir):\n return get_density_of_states(dos_dir)\n\ndef get_paras(calc_dir):\n kpt = get_kpt(calc_dir, \"scf\")\n kpath = get_kpt(calc_dir, \"band\")\n return kpt, kpath\n\n\ndef get_dat(raw):\n db, stru_id, _ = re.split(r\"[_|-]\", raw[\"stru\"].name)\n db += \"_id\"\n key = {db: int(stru_id)}\n #stru = get_stru(raw[\"stru\"])\n #stru.update(key)\n #band = get_band(raw[\"path\"], raw[\"stru\"].name, raw[\"scf_log\"])\n #band.update(key)\n #dos = get_dos(raw[\"path\"])\n #dos.update(key)\n #mag = get_mag(raw[\"path\"])\n #mag.update(key)\n #cif = get_optimized_cif(raw[\"stru\"])\n #cif.update(key)\n #return stru, mag, band, dos, cif, key\n e = get_energy(raw[\"scf_log\"], raw[\"stru\"], int(stru_id))\n e.update(key)\n return e, key\n\ndef goe(e, fe, k):\n r = {}\n r.update(k)\n r['efermi'] = e['efermi']\n r['energy'] = e['energy']\n r.update(fe)\n return r\n\n\ndef get_db():\n addr = \"12.11.70.140:10102\"\n client = pymongo.MongoClient(addr)\n db = client[\"abacus_data\"]\n return db\n\ndef upload_dat(db, *dat):\n stru, mag, band, dos, cif, key = dat\n stru_col = db[\"stru\"] \n band_col = db[\"bs_plotter\"]\n dos_col = db[\"dos_plotter\"]\n mag_col = db[\"mag\"]\n cif_col = db[\"cif\"]\n def _upload(col, data):\n exist = col.find_one(key)\n if exist is not None:\n col.update_one(exist, {'$set': data})\n else:\n col.insert_one(data)\n _upload(stru_col, stru)\n _upload(band_col, band)\n _upload(dos_col, dos)\n _upload(mag_col, mag)\n _upload(cif_col, cif)\n print(f\"upload {key} sucessed.\")\n\ndef upload_eng(db, dat):\n en_col = db[\"energy\"] \n def _upload(col, data):\n exist = col.find_one(key)\n if exist is not None:\n col.update_one(exist, {'$set': data})\n else:\n col.insert_one(data)\n _upload(en_col, dat)\n print(f\"upload sucessed.\")\n\n\ndef calcfe(item, s):\n if item['is_sim'] == 'True':\n syb = list(item['symbol'].keys())[0]\n te = float(item['energy'])\n s2_id = s[syb][1]\n\n if int(item[\"id\"]) == int(s2_id):\n foe = 0\n else:\n s2_e = s[syb][0]\n v = int(list(item['symbol'].values())[0])\n foe = (te - v * s2_e) / v\n else:\n syb = item['symbol']\n te = float(item['energy'])\n an = 0\n for a, v in syb.items():\n ie = int(v) * s[a][0]\n te -= ie\n an += int(v)\n foe = round(te / an, 4)\n\n #return f\"{item['id']}\\t{foe}\"\n return {\"formation_energy\": foe}\n\n\nif __name__ == \"__main__\":\n import json\n adb = get_db()\n print(adb, \" connected!\")\n c = 0\n with open(\"abacus_simple.json\", \"r\") as f:\n s = json.load(f)\n for calc, i in yield_stru(COMPATH):\n if calc is not None:\n try:\n res, key = get_dat(calc)\n foe = calcfe(res, s)\n except Exception as e:\n c += 1\n #os.system(f\"mv {i} {ERRORS}\")\n continue\n else:\n ans = goe(res, foe, key)\n print(ans)\n upload_eng(adb, ans)\n print(\"errors: \", c)\n \n \n \n", "id": "2580926", "language": "Python", "matching_score": 4.534357070922852, "max_stars_count": 0, "path": "postprocess/get_e.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport json\r\n\r\nfrom core.bandstructure import BandStructureSymmLine\r\nfrom core.dos import DensityOfStates\r\nfrom core.structure import Crystal\r\nfrom core.optimize import CellRelax, Scf\r\nfrom core.inputs import Inputs\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n\r\ndef get_bandstructure(band_dir, stru_filename, scf_log_filepath):\r\n band = BandStructureSymmLine(band_dir, stru_filename, scf_log_filepath)\r\n band_data = band.get_bandstructure_using_matgen_old_fmt()\r\n # print(band_data)\r\n # band.plot()\r\n return band_data\r\n\r\n\r\ndef get_density_of_states(dos_dir):\r\n dos = DensityOfStates(dos_dir)\r\n dos_data = dos.get_dos_using_matgen_old_fmt()\r\n # print(dos_data)\r\n # dos.plot()\r\n return dos_data\r\n\r\n\r\ndef get_magnetism(scf_dir):\r\n mag_data = Scf(scf_dir).get_mag()\r\n return mag_data\r\n\r\n\r\ndef get_optimized_stru_filepath(relax_dir):\r\n return CellRelax(relax_dir).get_result()\r\n\r\n\r\ndef get_efermi(scf_dir):\r\n return Scf(scf_dir).get_efermi()\r\n\r\n\r\ndef get_energy(scf_dir):\r\n return Scf(scf_dir).get_energy()\r\n\r\n\r\ndef get_optimized_cif(stru_filepath):\r\n return Crystal(stru_filepath).matgen_structure_cif_opt()\r\n\r\n\r\ndef get_unoptimized_poscar(stru_filepath):\r\n return Crystal.matgen_structure_poscar_unopt(stru_filepath)\r\n\r\n\r\ndef get_structure_dat(stru_filepath):\r\n return Crystal(stru_filepath).matgen_structure_old_style()\r\n\r\n\r\ndef get_kpath(stru_filepath, n=20):\r\n return Crystal(stru_filepath).get_kpath(n)\r\n\r\n\r\ndef get_kpt(calc_dir, ktype):\r\n return Inputs(calc_dir).get_KPT(ktype)\r\n\r\n\r\ndef main():\r\n import argparse\r\n parser = argparse.ArgumentParser(\r\n description='Abacus Post-process tool',\r\n )\r\n parser.add_argument('-t', '--type')\r\n parser.add_argument('-d', '--dir')\r\n parser.add_argument('-s', '--stru')\r\n parser.add_argument('-l', '--log')\r\n parser.add_argument('-n', '--num', type=int, default=20)\r\n parser.add_argument('-k', '--ktype', choices=['relax', 'scf', 'band'])\r\n args = parser.parse_args()\r\n if args.type == 'sp':\r\n res = get_structure_dat(args.stru)\r\n elif args.type == 'band':\r\n res = get_bandstructure(args.dir, args.stru, args.log)\r\n elif args.type == 'dos':\r\n res = get_density_of_states(args.dir)\r\n elif args.type == 'mag':\r\n res = get_magnetism(args.dir)\r\n elif args.type == 'energy':\r\n res = get_energy(args.dir)\r\n elif args.type == 'efermi':\r\n res = get_efermi(args.dir)\r\n elif args.type == 'cif':\r\n res = get_optimized_cif(args.stru)\r\n elif args.type == 'poscar':\r\n res = get_unoptimized_poscar(args.stru)\r\n elif args.type == 'opt':\r\n res = get_optimized_stru_filepath(args.dir)\r\n elif args.type == 'kpath':\r\n res = get_kpath(args.stru, args.num)\r\n elif args.type == 'kpts':\r\n res = get_kpt(args.dir, args.ktype)\r\n else:\r\n raise TypeError\r\n\r\n \r\n print(json.dumps(res))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \"\"\"\r\n # BandStructure\r\n stru = \"icsd_9852-Ti2O4\"\r\n band_spin1 = r\"./test/icsd_9852-Ti2O4/SPIN1\"\r\n spin1_scf_log = r\"./test/icsd_9852-Ti2O4/SPIN1/OUT.ABACUS/running_scf.log\"\r\n band_spin2 = r\"./test/icsd_9852-Ti2O4/SPIN2\"\r\n spin2_scf_log = r\"./test/icsd_9852-Ti2O4/SPIN2/OUT.ABACUS/running_scf.log\"\r\n # x = get_bandstructure(band_spin1, stru, spin1_scf_log)\r\n # y = get_bandstructure(band_spin2, stru, spin2_scf_log)\r\n # DOS\r\n # spin1\r\n spin1 = r\"./test/icsd_23076-Sr1Ti1O3/DOS_SPIN1/\"\r\n # s1 = get_density_of_states(spin1)\r\n spin2 = r\"./test/icsd_23076-Sr1Ti1O3/DOS_SPIN2/\"\r\n # s2 = get_density_of_states(spin2)\r\n # Magnetism\r\n sd = r\"./test/icsd_23076-Sr1Ti1O3/SCF\"\r\n m = get_magnetism(sd)\r\n print(m)\r\n e = get_energy(sd)\r\n print(e)\r\n stru_fp = band_spin1 + '/' + stru\r\n cif = get_optimized_cif(stru_fp)\r\n print(cif)\r\n un_opt_poscar = get_unoptimized_poscar(stru_fp)\r\n print(un_opt_poscar)\r\n sp = get_structure_dat(stru_fp)\r\n print(sp)\r\n \"\"\"\r\n main()\r\n", "id": "6330854", "language": "Python", "matching_score": 3.5395262241363525, "max_stars_count": 0, "path": "abacus_helper/main.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom os.path import *\r\nfrom utils.output import (\r\n get_energy, get_efermi, get_total_magnetism,\r\n get_bandgap, get_final_structure\r\n)\r\n\r\n\r\nclass CellRelax:\r\n def __init__(self, cell_relax_dir):\r\n self._relax_dir = join(cell_relax_dir, \"OUT.ABACUS\")\r\n\r\n def get_result(self) -> str:\r\n return get_final_structure(self._relax_dir)\r\n\r\n\r\nclass Relax(CellRelax):\r\n def __init__(self, relax_dir):\r\n super(Relax, self).__init__(relax_dir)\r\n\r\n\r\nclass Scf:\r\n def __init__(self, scf_dir):\r\n #self._scf_log = join(scf_dir, \"OUT.ABACUS/running_scf.log\")\r\n self._scf_log = join(scf_dir, \"SCF_OUT.ABACUS/running_scf.log\")\r\n\r\n def get_efermi(self):\r\n return {\"efermi\": get_efermi(self._scf_log)}\r\n\r\n def get_energy(self):\r\n return {\"final\": get_energy(self._scf_log)}\r\n\r\n def get_mag(self):\r\n return {\"total\": get_total_magnetism(self._scf_log)}\r\n\r\n def get_bandgap(self):\r\n return get_bandgap(self._scf_log)\r\n\r\n @property\r\n def log(self):\r\n return self._scf_log\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n\r\n", "id": "5326780", "language": "Python", "matching_score": 1.8026962280273438, "max_stars_count": 0, "path": "postprocess/core/optimize.py" }, { "content": "#!~/software/anaconda3/bin/python\n\nimport os\nimport sys\n\nargs =sys.argv\n\nSTRUPATH=args[1]\nCALC_PATH = args[2]\n\nwith open('results.dat', 'w') as f:\n f.write('{0:<25s} {1:<25s} {2:<25s} {3:<25s}\\n'.format(\n '# StructureName',\n 'Energy(eV)',\n 'Magnetism(Bohr mag/cell)',\n 'Band gap(eV)'))\n\n\nfor root, dirs, files in os.walk(top=STRUPATH, topdown=True):\n for file in files:\n nm = file.split('.')[0]\n name = os.path.join(CALC_PATH, nm)\n print(name)\n #cmd1 = (\"grep '!FINAL_ETOT_IS' \" + name + \"/OUT.ABACUS/running_scf.log | awk '{print $2} ' > tmpE\") \n cmd1 = (\"grep '!FINAL_ETOT_IS' \" + name + \"/SCF_OUT.ABACUS/running_scf.log | awk '{print $2} ' > tmpE\") \n cmd2 = (\"grep 'total magnetism' \" + name + \n \"/OUT.ABACUS/running_scf.log | tail -n 1 | awk '{print $6}' > tmpMag\")\n cmd3 = (\"grep 'Band Gap' \" + name + \"/OUT.ABACUS/running_scf.log | awk '{print $6} ' > tmpBandGap\") \n os.system(cmd1)\n os.system(cmd2)\n os.system(cmd3)\n with open('tmpE', 'r') as f:\n ele = f.readline()\n if(ele == ''):\n energy = 'NULL'\n else:\n energy = ele.split()[0]\n with open('tmpMag', 'r') as f:\n ele = f.readline()\n if (ele == ''):\n mag = 'NULL'\n else:\n mag = ele.split()[0]\n with open('tmpBandGap', 'r') as f:\n ele = f.readline()\n if (ele == ''):\n gap = 'NULL'\n else:\n gap = ele.split()[0]\n\n with open('results.dat', 'a') as f:\n f.write('{0:<25s} {1:<25s} {2:<25s} {3:<25s}\\n'.format(nm, energy, mag, gap))\n", "id": "9828660", "language": "Python", "matching_score": 1.010555624961853, "max_stars_count": 0, "path": "getRes.py" }, { "content": "#!/bin/env/python\nwl = []\nwith open(r\"./results.dat\", \"r\") as f:\n for dat in f.readlines():\n dat_line = dat.split()\n want = dat_line[0] + ' ' + dat_line[-1]\n wl.append(want)\n\nprint(wl)\n\nwith open(r\"./abacus.bandgap\", \"w\") as abg:\n for item in wl[1:]:\n abg.write(item + '\\n')\n", "id": "3795358", "language": "Python", "matching_score": 0.6688156127929688, "max_stars_count": 0, "path": "_band.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport re\r\nimport os\r\nimport numpy as np\r\n\r\nfrom monty.io import reverse_readfile\r\n\r\nfrom ._base import Spin\r\n\r\n\r\ndef read_statinfo(filename):\r\n with open(filename, \"r\") as info:\r\n lines = [i.strip() for i in info.readlines()]\r\n rgx = re.compile(r\"\\s\\s+\")\r\n titles = rgx.split(lines[0])\r\n if len(titles) == 7:\r\n ispin = True\r\n else:\r\n ispin = False\r\n _energy, kpoints, occupation = [], [], []\r\n nkpt, _n = 0, 0\r\n for line in lines:\r\n if line.startswith(\"BAND\"):\r\n head_lst = rgx.split(line)\r\n kpt = np.asarray(\r\n head_lst[-1].strip('(').strip(')').split(), dtype=float\r\n )\r\n kpoints.append(kpt)\r\n nkpt += 1\r\n else:\r\n if line:\r\n _n += 1\r\n lst = rgx.split(line)\r\n _energy.append(\r\n np.asarray(lst, dtype=float)\r\n )\r\n\r\n nbands = int(_n / nkpt)\r\n energy = [_energy[i: i + nbands] for i in range(0, _n, nbands)]\r\n\r\n return _merge(energy, ispin, nkpt, nbands)\r\n\r\n\r\ndef _merge(energy, ispin, nkpt, nbands, up_index=(1, 2), down_index=(3, 4)):\r\n if ispin:\r\n eigenvalues = {\r\n Spin.up: np.zeros((nkpt, nbands, 2)),\r\n Spin.down: np.zeros((nkpt, nbands, 2)),\r\n }\r\n else:\r\n eigenvalues = {Spin.up: np.zeros((nkpt, nbands, 2))}\r\n\r\n ikpt = -1\r\n a, b = up_index\r\n c, d = down_index\r\n for sl in energy:\r\n ikpt += 1\r\n for i in range(nbands):\r\n tmp = sl[i]\r\n if len(tmp) == 3:\r\n eigenvalues[Spin.up][ikpt, i, 0] = tmp[a]\r\n eigenvalues[Spin.up][ikpt, i, 1] = tmp[b]\r\n else:\r\n eigenvalues[Spin.up][ikpt, i, 0] = tmp[a]\r\n eigenvalues[Spin.up][ikpt, i, 1] = tmp[b]\r\n eigenvalues[Spin.down][ikpt, i, 0] = tmp[c]\r\n eigenvalues[Spin.down][ikpt, i, 1] = tmp[d]\r\n\r\n return eigenvalues\r\n\r\n\r\ndef read_pdos(filename):\r\n energy_values, energy_flag = [], False\r\n orbital_values, orbital_flag = [], False\r\n atom_pdos, atom_pdos_flag = [], False\r\n index, atom_index, species, l, m, z = [None, ] * 6\r\n\r\n def clean(eqs):\r\n _, val = eqs.split('=')\r\n res = \"\".join(val.split()).strip(\"\\\"\")\r\n return int(res) if res.isdigit() else res\r\n\r\n pdos_dat = []\r\n with open(filename, \"r\") as f:\r\n for i in f:\r\n if \"<energy_values units=\\\"eV\\\">\" in i:\r\n energy_flag = True\r\n continue\r\n if \"</energy_values>\" in i:\r\n energy_flag = False\r\n if energy_flag:\r\n energy_values.append(i.strip())\r\n if \"<orbital\" in i:\r\n orbital_flag = True\r\n continue\r\n if \"</orbital>\" in i:\r\n orbital_flag = False\r\n if orbital_flag:\r\n if \"index\" in i and \"atom\" not in i:\r\n index = clean(i)\r\n if \"atom_index\" in i:\r\n atom_index = clean(i)\r\n if \"species\" in i:\r\n species = clean(i)\r\n if 'l' in i:\r\n l = clean(i)\r\n if 'm' in i:\r\n m = clean(i)\r\n if 'z' in i:\r\n z = clean(i)\r\n if \"<data>\" in i:\r\n atom_pdos_flag = True\r\n continue\r\n if \"</data>\" in i:\r\n atom_pdos_flag = False\r\n ipdos = {\r\n \"index\": index, \"atom_index\": atom_index, \"species\": species,\r\n \"l\": l, \"m\": m, \"z\": z, \"pdos\": np.asarray(atom_pdos, dtype=float)\r\n }\r\n pdos_dat.append(ipdos)\r\n atom_pdos = []\r\n if atom_pdos_flag:\r\n atom_pdos.append(i.strip().split())\r\n return pdos_dat, np.asarray(energy_values, dtype=float)\r\n\r\n\r\ndef read_orbital(filename):\r\n dats = []\r\n with open(filename, \"r\") as orbital:\r\n for i in orbital:\r\n line = [int(i) if i.isdigit() else i for i in i.strip().split()]\r\n if not line:\r\n break\r\n dats.append(line)\r\n return dats[1:]\r\n\r\n\r\ndef read_tdos(filename):\r\n dat = []\r\n with open(filename, \"r\") as tdos:\r\n for i in tdos:\r\n dat.append(\r\n np.asarray(i.strip().split(), dtype=float)\r\n )\r\n return np.asarray(dat)\r\n\r\n\r\ndef get_efermi(running_scf_log):\r\n for i in reverse_readfile(running_scf_log):\r\n if \"EFERMI\" in i:\r\n return round(\r\n float(i.strip().split('=')[-1].split()[0]), 4\r\n )\r\n\r\n\r\ndef get_bandgap(running_scf_log):\r\n for i in reverse_readfile(running_scf_log):\r\n if \"Band Gap\" in i:\r\n return round(\r\n float(i.strip().split('=')[-1].split()[0]), 4\r\n )\r\n\r\n\r\ndef get_energy(running_scf_log):\r\n for i in reverse_readfile(running_scf_log):\r\n if \"!FINAL_ETOT_IS\" in i:\r\n return round(\r\n float(\r\n i.strip().split()[1]\r\n ), 4\r\n )\r\n\r\n\r\ndef get_total_magnetism(running_scf_log):\r\n for i in reverse_readfile(running_scf_log):\r\n if \"total magnetism\" in i:\r\n return round(\r\n float(\r\n i.strip().split('=')[-1]\r\n ), 4\r\n )\r\n\r\n\r\ndef get_final_structure(out_dir):\r\n struct = sorted(\r\n [i for i in os.listdir(out_dir) if \"STRU_ION\" in i]\r\n )[-1]\r\n return os.path.join(out_dir, struct)\r\n\r\n\r\ndef read_band_dat(bands_x):\r\n bands = np.loadtxt(bands_x)\r\n _, k = bands.shape\r\n lb = np.asarray(list(range(1, k)))\r\n all_dat = []\r\n for item in bands:\r\n eig_val = np.vstack([lb, item[1:], [0, ] * (k - 1)])\r\n for j in eig_val.T:\r\n all_dat.append(j)\r\n return all_dat\r\n\r\n\r\nclass BandNscfLog:\r\n def __init__(self, filename):\r\n evf = False\r\n self._frac_kpts = []\r\n # self._eigenvalue = []\r\n self._kpoints = []\r\n self._nbands, self._nspin, self._nkstot, self._vbm, self._cbm, self._bandgap = [None, ] * 6\r\n with open(filename, \"r\") as nscf:\r\n for i in nscf:\r\n if \"NBANDS\" in i:\r\n self._nbands = int(i.strip().split('=')[-1])\r\n if \"SETUP K-POINTS\" in i:\r\n self._nspin = int(nscf.readline().strip('\\n').split('=')[-1].strip())\r\n self._nkstot = int(nscf.readline().strip('\\n').split('=')[-1].strip())\r\n for _ in range(2): nscf.readline()\r\n for _ in range(self._nkstot):\r\n self._frac_kpts.append(\r\n np.asarray(nscf.readline().strip().split()[1:4], dtype=float)\r\n )\r\n\r\n if \"band eigenvalue in this processor\" in i:\r\n evf = True\r\n\r\n if evf:\r\n if \"k-points\" in i:\r\n kpt = np.asarray(i.strip().split(':')[-1].split(), dtype=float)\r\n self._kpoints.append(kpt)\r\n elif \"final_state\" in i:\r\n # ev = np.asarray(i.strip().split()[1:], dtype=float)\r\n # self._eigenvalue.append(ev)\r\n continue\r\n else:\r\n if \"Valence Band maximum is (eV):\" in i:\r\n self._vbm = float(i.strip().split('=')[-1])\r\n if \"Conduction Band minimum\" in i:\r\n self._cbm = float(i.strip().split('=')[-1])\r\n if \"Band Gap is\" in i:\r\n self._bandgap = float(i.strip().split('=')[-1])\r\n\r\n @property\r\n def nkpt(self):\r\n return self._nkstot\r\n\r\n @property\r\n def cbm(self):\r\n return self._cbm\r\n\r\n @property\r\n def vbm(self):\r\n return self._vbm\r\n\r\n @property\r\n def bandgap(self):\r\n return self._bandgap\r\n\r\n @property\r\n def nbands(self):\r\n return self._nbands\r\n\r\n @property\r\n def ispin(self):\r\n return self._nspin == 2\r\n\r\n def _get_cart_kpoints(self):\r\n if not self.ispin:\r\n return self._kpoints\r\n k = int(len(self._kpoints) / 2)\r\n\r\n return self._kpoints[:k]\r\n\r\n @property\r\n def frac_kpoints(self):\r\n return self._frac_kpts\r\n\r\n @property\r\n def cart_kpoints(self):\r\n return self._get_cart_kpoints()\r\n\r\n def get_eigenval(self, eigenvalue):\r\n n = self.nbands * self._nkstot\r\n if self.ispin:\r\n up, down = eigenvalue[:n], eigenvalue[n:]\r\n _eig = np.hstack([up, down])\r\n else:\r\n _eig = eigenvalue\r\n eig = [_eig[i: i + self.nbands] for i in range(0, n, self.nbands)]\r\n\r\n eigenval = _merge(eig, self.ispin, self._nkstot, self.nbands,\r\n up_index=(1, 2), down_index=(4, 5))\r\n return eigenval\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n", "id": "355242", "language": "Python", "matching_score": 4.237648010253906, "max_stars_count": 0, "path": "postprocess/utils/output.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\nimport os\r\n\r\nfrom utils.stru import Stru\r\nfrom utils.kpt import Kpt\r\nfrom utils.output import BandNscfLog, get_efermi, read_band_dat\r\nfrom utils._base.spin import Spin\r\n\r\n# np.set_printoptions(precision=5)\r\n\r\n\r\nclass BandStructureSymmLine:\r\n def __init__(self, band_dir, stru_filename, scf_log):\r\n stru_filepath = os.path.join(band_dir, stru_filename)\r\n #self.out_dir = os.path.join(band_dir, \"OUT.ABACUS\")\r\n self.out_dir = os.path.join(band_dir, \"BAND_OUT.ABACUS\")\r\n self.lattice = Stru.from_stru(stru_filepath).lattice.reciprocal_lattice()\r\n self._b1 = os.path.join(self.out_dir, \"BANDS_1.dat\")\r\n #kpt_filepath = os.path.join(band_dir, \"KPT\")\r\n kpt_filepath = os.path.join(band_dir, \"BAND_KPT\")\r\n coords, self.ikpt, lbs, *_ = Kpt.from_kpt(kpt_filepath)\r\n self.kpoints = []\r\n labels_dict = dict(zip(lbs, coords))\r\n nscf_log = os.path.join(self.out_dir, \"running_nscf.log\")\r\n self.log = BandNscfLog(nscf_log)\r\n self.scf_log = scf_log\r\n _kpt = self.log.frac_kpoints\r\n for k in _kpt:\r\n # let see if this kpoint has been assigned a label\r\n label = None\r\n for c in labels_dict:\r\n c_val = labels_dict[c]\r\n if isinstance(c_val, Kpt):\r\n c_val = c_val.frac_coords\r\n if np.linalg.norm(k - c_val) < 0.0001:\r\n label = c\r\n labels_dict[label] = Kpt(\r\n k,\r\n self.lattice,\r\n label=label,\r\n coords_are_cartesian=False,\r\n )\r\n self.kpoints.append(\r\n Kpt(\r\n k, self.lattice, label=label, coords_are_cartesian=False\r\n )\r\n )\r\n\r\n self.high_kpts, self.distance = self._get_hk()\r\n\r\n def _get_hk(self):\r\n distance = []\r\n tick_distance = []\r\n tick_labels = []\r\n previous_kpoint = self.kpoints[0]\r\n previous_distance = 0.0\r\n previous_label = self.kpoints[0].label\r\n for i in range(len(self.kpoints)):\r\n label = self.kpoints[i].label\r\n if label is not None and previous_label is not None:\r\n distance.append(previous_distance)\r\n else:\r\n distance.append(\r\n np.linalg.norm(\r\n self.kpoints[i].cart_coords - previous_kpoint.cart_coords\r\n )\r\n + previous_distance\r\n )\r\n previous_kpoint = self.kpoints[i]\r\n previous_distance = distance[i]\r\n previous_label = label\r\n if label:\r\n tick_distance.append(distance[i])\r\n tick_labels.append(label)\r\n\r\n high_kpts = {'distance': tick_distance, 'label': tick_labels}\r\n\r\n return high_kpts, distance\r\n\r\n @property\r\n def bandgap(self):\r\n return self.log.bandgap\r\n\r\n @property\r\n def cbm(self):\r\n return self.log.cbm\r\n\r\n @property\r\n def vbm(self):\r\n return self.log.vbm\r\n\r\n @property\r\n def ispin(self):\r\n return self.log.ispin\r\n\r\n @property\r\n def nbands(self):\r\n return self.log.nbands\r\n\r\n def _read_eigenval(self):\r\n b1d = read_band_dat(self._b1)\r\n if self.ispin:\r\n self._b2 = os.path.join(self.out_dir, \"BANDS_2.dat\")\r\n b2d = read_band_dat(self._b2)\r\n b1d.extend(b2d)\r\n\r\n return np.asarray(b1d)\r\n\r\n @property\r\n def bands(self):\r\n return self.log.get_eigenval(self._read_eigenval())\r\n\r\n @property\r\n def nkpt(self):\r\n return self.log.nkpt\r\n\r\n @property\r\n def efermi(self):\r\n return get_efermi(self.scf_log)\r\n\r\n def is_metal(self, efermi_tol=1e-4):\r\n \"\"\"\r\n Check if the band structure indicates a metal by looking if the fermi\r\n level crosses a band.\r\n\r\n Returns:\r\n True if a metal, False if not\r\n \"\"\"\r\n band = []\r\n for i in range(self.nbands):\r\n for item in self.bands[Spin.up]:\r\n band.append(item[i][0])\r\n tmp = np.asarray(band)\r\n band = []\r\n if np.any(tmp < -efermi_tol) and np.any(\r\n tmp > efermi_tol\r\n ):\r\n return True\r\n return False\r\n\r\n def get_bandstructure_using_matgen_old_fmt(self):\r\n energy_data = {}\r\n if self.ispin:\r\n labels = [\"Wave_vector\", \"spin_up\", \"spin_down\"]\r\n up, down = self.bands[Spin.up], self.bands[Spin.down]\r\n for band_index in range(self.nbands):\r\n key = f\"band_index_{band_index + 1}\"\r\n vals = []\r\n for i in range(self.nkpt):\r\n up_band_dat = up[i][band_index]\r\n down_band_dat = down[i][band_index]\r\n vals.append([self.distance[i], up_band_dat[0], down_band_dat[0]])\r\n energy_data.update({key: vals})\r\n else:\r\n labels = [\"Wave_vector\", \"Energy_level\"]\r\n up = self.bands[Spin.up]\r\n for band_index in range(self.nbands):\r\n key = f\"band_index_{band_index + 1}\"\r\n vals = []\r\n for i in range(self.nkpt):\r\n up_band_dat = up[i][band_index]\r\n vals.append([self.distance[i], up_band_dat[0]])\r\n energy_data.update({key: vals})\r\n kpath = {'High_Kpoints_labels': self.high_kpts['label'],\r\n 'High_Kpoints_coordinates': self.high_kpts['distance']}\r\n\r\n return {'Band_Structure': {\r\n 'Band_Gap': self.bandgap if not self.is_metal() else 0.0,\r\n 'Energy_data_labels': labels,\r\n 'Energy_data': energy_data,\r\n 'Spin_state': self.ispin,\r\n 'Hk_points': kpath}}\r\n\r\n def plot(self):\r\n import matplotlib.pyplot as plt\r\n band_data = self.get_bandstructure_using_matgen_old_fmt()\r\n bd_lbs = band_data.get('Band_Structure').get('Energy_data_labels')\r\n bd_arrary = band_data.get('Band_Structure').get('Energy_data')\r\n hklbs = band_data.get('Band_Structure').get('Hk_points')\r\n lbcoord = hklbs.get('High_Kpoints_coordinates')\r\n plt.title(\"Band_Structure\")\r\n plt.xlabel(\"Wave_vector\")\r\n plt.ylabel(\"Energy(eV)\")\r\n plt.ylim(-15, 15)\r\n for bandi, bandd in bd_arrary.items():\r\n if len(bd_lbs) == 3:\r\n x, y1, y2 = np.asarray(bandd).T[0], np.asarray(bandd).T[1], np.asarray(bandd).T[2]\r\n plt.plot(x, y1, color='black')\r\n plt.plot(x, y2, color='blue')\r\n else:\r\n x, y = np.asarray(bandd).T[0], np.asarray(bandd).T[1]\r\n plt.plot(x, y, color='blue')\r\n for k, i in enumerate(lbcoord):\r\n plt.axvline(i, color='red')\r\n plt.show()\r\n # print(self.is_metal())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n\r\n", "id": "1445281", "language": "Python", "matching_score": 2.4361047744750977, "max_stars_count": 0, "path": "postprocess/core/bandstructure.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\nimport os\r\nfrom functools import reduce\r\n\r\nfrom utils.output import read_tdos, read_pdos, read_orbital\r\n\r\n# np.set_printoptions(precision=4)\r\n\r\n\r\nclass DensityOfStates:\r\n def __init__(self, dos_dir):\r\n out_dir = os.path.join(dos_dir, \"OUT.ABACUS\")\r\n self.out = out_dir\r\n self._fp = os.path.join(self.out, \"PDOS\")\r\n self._orb = os.path.join(self.out, \"Orbital\")\r\n self.ispin = self._get_spin()\r\n\r\n def _get_spin(self):\r\n with open(self._fp, \"r\") as f:\r\n for i in f:\r\n if \"nspin\" in i:\r\n spin = int(i.strip().replace(\"<nspin>\", '').replace(\"</nspin>\", ''))\r\n if spin == 1:\r\n return False\r\n return True\r\n\r\n def _get_tdos(self):\r\n dos1_smearing = os.path.join(self.out, \"DOS1_smearing.dat\")\r\n if self.ispin:\r\n self.tdos_label = [\"energy\", \"dos_up\", \"dos_down\", \"int_dos_up\", \"int_dos_down\"]\r\n dos2_smearing = os.path.join(self.out, \"DOS2_smearing.dat\")\r\n up, down = read_tdos(dos1_smearing), read_tdos(dos2_smearing)\r\n energy = up[:, 0]\r\n up_dos_dat = up[:, 1]\r\n up_int_dos_dat = up[:, 2]\r\n down_dos_dat = down[:, 1] * -1\r\n down_int_dos_dat = down[:, 2]\r\n tdos = np.vstack([energy, up_dos_dat, down_dos_dat, up_int_dos_dat, down_int_dos_dat])\r\n return tdos.T\r\n else:\r\n self.tdos_label = [\"energy\", \"dos\", \"int_dos\"]\r\n tdos_data = read_tdos(dos1_smearing)\r\n return tdos_data\r\n\r\n def _get_matgen_tdos_old_style(self):\r\n tdos_dat = self._get_tdos()\r\n return {\r\n \"TDOS\": {\r\n \"tdos_labels\": self.tdos_label,\r\n \"tdos_data\": tdos_dat}\r\n\r\n }\r\n\r\n def _get_orb(self):\r\n _orb = read_orbital(self._orb)\r\n info = {}\r\n for i in _orb:\r\n io, spec, l, m, z, sym = i\r\n if info.get(spec) is None:\r\n info[spec] = [(l, m, z, sym)]\r\n else:\r\n _info = info.get(spec)\r\n _info.append((l, m, z, sym))\r\n info[spec] = _info\r\n\r\n return info\r\n\r\n def _get_pdos(self):\r\n dat, energy = read_pdos(self._fp)\r\n orbs = self._get_orb()\r\n pdos_dict = {}\r\n for item in dat:\r\n spec = item[\"species\"]\r\n ai = item[\"atom_index\"]\r\n name = spec + \"_\" + str(ai)\r\n l, m, z = list(map(item.get, ['l', 'm', 'z']))\r\n spec_orb = orbs[spec]\r\n pdos = item[\"pdos\"]\r\n for i in spec_orb:\r\n if (l, m, z) == i[:3]:\r\n sym = i[3]\r\n if pdos_dict.get(name) is None:\r\n pdos_dict[name] = [[(l, m, z, sym), pdos]]\r\n else:\r\n pdos_dict[name].append([(l, m, z, sym), pdos])\r\n\r\n return pdos_dict, energy\r\n\r\n def _get_matgen_pdos_old_style(self):\r\n tmp, energy = self._get_pdos()\r\n pd = {}\r\n for k, v in tmp.items():\r\n m = {}\r\n for item in v:\r\n sym, pdos = item\r\n lb = sym[-1][0]\r\n if m.get(lb) is None:\r\n m[lb] = pdos\r\n else:\r\n m[lb] += pdos\r\n pd[k] = m\r\n _plus_pd = {}\r\n for m, n in pd.items():\r\n spec = m.split('_')[0]\r\n if _plus_pd.get(spec) is None:\r\n _plus_pd[spec] = n\r\n else:\r\n for orb, orb_pd in n.items():\r\n _plus_pd[spec][orb] += orb_pd\r\n\r\n def add(x, y):\r\n return x + y\r\n\r\n def _vstack(x, y):\r\n return np.vstack([x, y])\r\n\r\n pdos_dict = {}\r\n for element, pd in _plus_pd.items():\r\n up_lst, down_list = [], []\r\n pdos_label = list(pd.keys())\r\n pdos_label.append(\"total\")\r\n pdos_label.insert(0, \"Energy(eV)\")\r\n if self.ispin:\r\n up_name = element + \"_up\"\r\n down_name = element + \"_down\"\r\n for orb, ipd in pd.items():\r\n up, down = ipd[:, 0], ipd[:, 1]\r\n up_lst.append(up)\r\n down_list.append(down * -1)\r\n total_up = reduce(add, up_lst)\r\n total_down = reduce(add, down_list)\r\n up_lst.append(total_up)\r\n up_lst.insert(0, energy)\r\n down_list.append(total_down)\r\n down_list.insert(0, energy)\r\n up_data = reduce(_vstack, up_lst).T\r\n down_data = reduce(_vstack, down_list).T\r\n up_dict = {up_name: {\"pdos_label\": pdos_label, \"pdos_data\": up_data}}\r\n down_dict = {down_name: {\"pdos_label\": pdos_label, \"pdos_data\": down_data}}\r\n pdos_dict.update(up_dict)\r\n pdos_dict.update(down_dict)\r\n else:\r\n for orb, ipd in pd.items():\r\n up_lst.append(ipd[:, 0])\r\n total_up = reduce(add, up_lst)\r\n up_lst.append(total_up)\r\n up_lst.insert(0, energy)\r\n up_data = reduce(_vstack, up_lst).T\r\n up_dict = {element: {\"pdos_label\": pdos_label, \"pdos_data\": up_data}}\r\n pdos_dict.update(up_dict)\r\n\r\n return {\"PDOS\": pdos_dict}\r\n\r\n def get_dos_using_matgen_old_fmt(self):\r\n tdos = self._get_matgen_tdos_old_style()\r\n pdos = self._get_matgen_pdos_old_style()\r\n\r\n tdos.update(pdos)\r\n return {\"Density_of_states\": tdos}\r\n\r\n def plot(self):\r\n import matplotlib.pyplot as plt\r\n dos_data = self.get_dos_using_matgen_old_fmt()\r\n pdos_data = dos_data.get('Density_of_states').get('PDOS')\r\n all_atoms = list(pdos_data.keys())\r\n for at in all_atoms:\r\n plt.xlabel(\"Energy(eV)\")\r\n plt.ylabel(\"electrons/eV\")\r\n plt.title(at.split('_')[0] + \"_Density_of_states\")\r\n dlb = pdos_data.get(at).get('pdos_label')\r\n dd = pdos_data.get(at).get('pdos_data')\r\n Energy = dd.T[0]\r\n for i in range(1, len(dlb)):\r\n plt.plot(Energy, dd.T[i])\r\n if at.split('_')[-1] == 'up':\r\n continue\r\n plt.show()\r\n plt.title(\"Total_Density_of_states\")\r\n plt.xlabel(\"Energy(eV)\")\r\n plt.ylabel(\"electrons/eV\")\r\n tdos_data = dos_data.get('Density_of_states').get('TDOS')\r\n tlb = tdos_data.get('tdos_labels')\r\n td = tdos_data.get('tdos_data')\r\n if len(tlb) > 3:\r\n Energy, dos_up, dos_down = td.T[:3]\r\n plt.plot(Energy, dos_up, color='red')\r\n plt.plot(Energy, dos_down, color='blue')\r\n else:\r\n Energy, dos_up = td.T[:2]\r\n plt.plot(Energy, dos_up, color='red')\r\n plt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n", "id": "6112024", "language": "Python", "matching_score": 0.9293013215065002, "max_stars_count": 0, "path": "abacus_helper/core/dos.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom enum import unique, Enum\r\n\r\n\r\n@unique\r\nclass Spin(Enum):\r\n \"\"\"\r\n Enum type for Spin. Only up and down.\r\n Usage: Spin.up, Spin.down.\r\n \"\"\"\r\n\r\n up, down = (1, -1)\r\n\r\n def __int__(self):\r\n return self.value\r\n\r\n def __float__(self):\r\n return float(self.value)\r\n\r\n def __str__(self):\r\n return str(self.value)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n", "id": "1072901", "language": "Python", "matching_score": 0.3865208923816681, "max_stars_count": 0, "path": "postprocess/utils/_base/spin.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nimport json\r\nimport os\r\n\r\nfrom .spin import Spin\r\nfrom .latt import Latt\r\n\r\n_fn = os.path.join(\r\n os.path.dirname(__file__), \"elements.json\"\r\n)\r\nwith open(_fn, \"r\") as ele:\r\n periodic_table = json.load(ele)\r\n\r\norder_table = {v: k for k, v in periodic_table.items()}\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n", "id": "6851932", "language": "Python", "matching_score": 1.1703002452850342, "max_stars_count": 0, "path": "postprocess/utils/_base/__init__.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport json\r\n\r\nsettings = {}\r\njs = [os.path.join(os.path.dirname(__file__), i)\r\n for i in os.listdir(os.path.dirname(__file__))\r\n if os.path.splitext(i)[-1] == '.json']\r\n\r\nfor file in js:\r\n with open(file, \"r\") as f:\r\n js_obj = json.load(f)\r\n settings[os.path.basename(file).replace('.json', '')] = js_obj\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n", "id": "8799359", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "mat2d_pkg/para/__init__.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\n\n\ndef load(fp):\n with open(fp, 'r') as f:\n data = json.load(f)\n return data\n\n\ndef read_bulk(bfp):\n return load(bfp)\n\n\ndef read_layer(lfp):\n return load(lfp)\n\n\ndef get_eex(Eiso, Ebulk, A, m, n=1):\n # unit meV/A2\n return 1000 * (Eiso - n * Ebulk / m) / A\n\n\ndef run_eex(bfp, lfp):\n bd = read_bulk(bfp)\n ld = read_layer(lfp)\n exres = {}\n for k, v in ld.items():\n mbd = bd.get(k)\n if mbd is None:\n continue\n ebulk = mbd.get('energy')\n elayer = v.get('energy')\n m = mbd.get('ln')\n area = mbd.get('area')\n Eex = get_eex(elayer, ebulk, area, m)\n exres.update(\n {k: Eex}\n )\n return exres\n \n\nif __name__ == \"__main__\":\n pass\n\n", "id": "9506717", "language": "Python", "matching_score": 2.1136064529418945, "max_stars_count": 0, "path": "apkg/eex.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom apkg.energy import *\r\nfrom apkg.eex import *\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n import json\r\n\r\n args = sys.argv\r\n fp = args[1]\r\n tp = args[2]\r\n if tp.lower() == 'bulk':\r\n data = args[3]\r\n res = run_bulk(fp, data)\r\n elif tp.lower() == 'eex':\r\n lfp = args[3]\r\n res = run_eex(fp, lfp)\r\n else:\r\n res = run_layer(fp)\r\n with open(r\"./{}.log\".format(tp), 'w') as f:\r\n json.dump(res, f, indent=4)\r\n\r\n\r\n", "id": "7214500", "language": "Python", "matching_score": 0.23557507991790771, "max_stars_count": 0, "path": "main.py" }, { "content": "import json\nimport os\nimport pandas as pd\nfrom tqdm import tqdm\nfrom argparse import ArgumentParser\n\nt_dict = {77:\"Nitrogen\", 273:\"Carbon Dioxide\", 298:\"Methane\"}\nunit_dic = {\"mmol/g\":1, \"mol/g\":0.001, 'mmol/kg':1000}\nm_dic = {\"Nitrogen\":28.0134, \"Methane\":16.0424, \"Carbon Dioxide\":44.0094}\ndef get_unit_factor(unit,ads):\n if unit in unit_dic:\n return 1 / unit_dic[unit]\n elif unit == \"cm3(STP)/g\":\n return 1 / 22.4139\n elif unit == 'mg/g':\n return 1 / m_dic[ads]\n else:\n return None\n\ndef norm_str(ori):\n ori = ori.split('.')[0].split('-')\n if ori[-1] == 'clean':\n ori = ori[:-1]\n elif ori[-2] == 'clean':\n ori = ori[:-2]\n return '-'.join(ori[1:])\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument('--data_dir', type=str,\n help='NIST data directory.')\n args = parser.parse_args()\n prefix = os.path.join(args.data_dir,'isotherm_data')\n pres_all = {\"CH4\":{\"num\":0, \"data\":[]}, \"CO2\":{\"num\":0, \"data\":[]}, \"N2\":{\"num\":0, \"data\":[]}}\n for gas_type in ['CH4','CO2','N2']:\n gas_pref = os.path.join(prefix, gas_type)\n files = os.listdir(gas_pref)\n for js in tqdm(files):\n with open(os.path.join(gas_pref, js), \"r\") as f:\n dic = json.load(f)\n name = dic['adsorbent']['name']\n t = dic['temperature']\n if t not in t_dict:\n continue\n tar_obj = t_dict[t]\n unit_factor = get_unit_factor(dic['adsorptionUnits'], tar_obj)\n if not unit_factor:\n continue\n tar_key = None\n for ads in dic['adsorbates']:\n if ads['name'] == tar_obj:\n tar_key = ads['InChIKey']\n break\n if not tar_key:\n continue\n pres_ret = []\n for d in dic['isotherm_data']:\n pres = d['pressure'] * 1e5\n for sd in d['species_data']:\n if sd['InChIKey'] == tar_key:\n tar_abs = sd['adsorption'] * unit_factor\n pres_ret.append({'pressure':pres, 'adsorption':tar_abs})\n pres_all[gas_type]['num'] += 1\n pres_all[gas_type]['data'].append({\"name\":name, \"filename\":js, \"isotherm_data\":pres_ret})\n with open(os.path.join(prefix,'all.json'),'w') as f:\n json.dump(pres_all, f)\n \n ", "id": "10126775", "language": "Python", "matching_score": 2.270524024963379, "max_stars_count": 0, "path": "process/process_nist_data.py" }, { "content": "from argparse import ArgumentParser\nimport os\n\ndef parse_train_args():\n parser = ArgumentParser()\n add_data_args(parser)\n add_train_args(parser)\n args = parser.parse_args()\n args = vars(args)\n lambda_mat = [float(_) for _ in args['weight_split'].split(',')]\n assert len(lambda_mat) == 3\n lambda_sum = sum(lambda_mat)\n args['lambda_attention'] = lambda_mat[0] / lambda_sum\n args['lambda_distance'] = lambda_mat[-1] / lambda_sum\n if args['d_mid_list'] == 'None':\n args['d_mid_list'] = []\n else:\n args['d_mid_list'] = [int(_) for _ in args['d_mid_list'].split(',')]\n makedirs(args['save_dir'] + f\"/{args['gas_type']}_{args['pressure']}/\")\n return args\n\ndef parse_predict_args():\n parser = ArgumentParser()\n add_data_args(parser)\n add_train_args(parser)\n args = parser.parse_args()\n args = vars(args)\n lambda_mat = [float(_) for _ in args['weight_split'].split(',')]\n assert len(lambda_mat) == 3\n lambda_sum = sum(lambda_mat)\n args['lambda_attention'] = lambda_mat[0] / lambda_sum\n args['lambda_distance'] = lambda_mat[-1] / lambda_sum\n if args['d_mid_list'] == 'None':\n args['d_mid_list'] = []\n else:\n args['d_mid_list'] = [int(_) for _ in args['d_mid_list'].split(',')]\n p_cond = args['pressure'].split(',')\n assert len(p_cond) == 3\n args['pressure'] = (float(p_cond[0]), float(p_cond[1]), int(p_cond[2]))\n return args\n\ndef parse_baseline_args():\n parser = ArgumentParser()\n add_data_args(parser)\n add_baseline_args(parser)\n args = parser.parse_args()\n args = vars(args)\n makedirs(args['save_dir'] + f\"/{args['gas_type']}_{args['pressure']}/\")\n return args \n\ndef parse_finetune_args():\n parser = ArgumentParser()\n add_data_args(parser)\n add_finetune_args(parser)\n args = parser.parse_args()\n args = vars(args)\n makedirs(args['save_dir'] + f\"/{args['gas_type']}_{args['pressure']}/\")\n return args\n\ndef parse_ml_args():\n parser = ArgumentParser()\n add_data_args(parser)\n add_ml_args(parser)\n args = parser.parse_args()\n args = vars(args)\n makedirs(args['save_dir'] + f\"/{args['ml_type']}/{args['gas_type']}_{args['pressure']}/\")\n return args \n\ndef makedirs(path: str, isfile: bool = False):\n if isfile:\n path = os.path.dirname(path)\n if path != '':\n os.makedirs(path, exist_ok=True)\n\ndef add_ml_args(parser: ArgumentParser):\n parser.add_argument('--ml_type', type=str, default='RF',\n help='ML algorithm, SVR/DT/RF.')\n\n parser.add_argument('--seed', type=int, default=9999,\n help='Random seed to use when splitting data into train/val/test sets.'\n 'When `num_folds` > 1, the first fold uses this seed and all'\n 'subsequent folds add 1 to the seed.') \n parser.add_argument('--fold', type=int, default=10,\n help='Fold num.') \n\ndef add_data_args(parser: ArgumentParser):\n parser.add_argument('--data_dir', type=str,\n help='Dataset directory, containing label/ and processed/ subdirectories.')\n\n parser.add_argument('--save_dir', type=str, \n help='Model directory.') \n \n\n parser.add_argument('--gas_type', type=str,\n help='Gas type for prediction.')\n\n parser.add_argument('--pressure', type=str, \n help='Pressure condition for prediction.')\n\n parser.add_argument('--img_dir', type=str, default='',\n help='Directory for visualized isotherms')\n\n \n parser.add_argument('--name', type=str, default='',\n help='Target MOF name for attention visualization.')\n\ndef add_finetune_args(parser: ArgumentParser):\n parser.add_argument('--ori_dir', type=str,\n help='Pretrained model directory, containing model of different Folds.')\n \n parser.add_argument('--epoch', type=int, default=100,\n help='Epoch num.') \n\n parser.add_argument('--batch_size', type=int, default=32,\n help='Batch size.') \n\n parser.add_argument('--fold', type=int, default=10,\n help='Fold num.') \n\n parser.add_argument('--lr', type=float, default=0.0007,\n help='Learning rate.')\n\n parser.add_argument('--adapter_dim', type=int, default=8,\n help='Adapted vector dimension')\n\n parser.add_argument('--seed', type=int, default=9999,\n help='Random seed to use when splitting data into train/val/test sets.')\n\ndef add_baseline_args(parser: ArgumentParser):\n \n parser.add_argument('--model_name',type=str,default='gin',\n help='Baseline Model, gin/egnn/schnet/painn.')\n\n parser.add_argument('--gpu', type=int,\n help='GPU id to allocate.')\n\n parser.add_argument('--seed', type=int, default=9999,\n help='Random seed to use when splitting data into train/val/test sets.')\n\n parser.add_argument('--d_model', type=int, default=1024,\n help='Hidden size of baseline model.')\n\n parser.add_argument('--N', type=int, default=2,\n help='Layer num of baseline model.')\n\n parser.add_argument('--use_global_feature', action='store_true',\n help='Whether to use global features(graph-level features).')\n\n parser.add_argument('--warmup_step', type=int, default=2000,\n help='Warmup steps.')\n\n parser.add_argument('--epoch', type=int, default=100,\n help='Epoch num.') \n\n parser.add_argument('--batch_size', type=int, default=32,\n help='Batch size.') \n\n parser.add_argument('--fold', type=int, default=10,\n help='Fold num.') \n\n parser.add_argument('--lr', type=float, default=0.0007,\n help='Maximum learning rate, (warmup_step * d_model) ** -0.5 .')\n\ndef add_train_args(parser: ArgumentParser):\n \n parser.add_argument('--seed', type=int, default=9999,\n help='Random seed to use when splitting data into train/val/test sets.')\n\n parser.add_argument('--d_model', type=int, default=1024,\n help='Hidden size of transformer model.')\n\n parser.add_argument('--N', type=int, default=2,\n help='Layer num of transformer model.')\n\n parser.add_argument('--h', type=int, default=16,\n help='Attention head num of transformer model.')\n\n parser.add_argument('--n_generator_layers', type=int, default=2,\n help='Layer num of generator(MLP) model')\n\n parser.add_argument('--weight_split', type=str, default='1,1,1',\n help='Unnormalized weights of Self-Attention/Adjacency/Distance Matrix respectively in Graph Transformer.')\n\n parser.add_argument('--leaky_relu_slope', type=float, default=0.0,\n help='Leaky ReLU slope for activation functions.')\n\n parser.add_argument('--dense_output_nonlinearity',type=str,default='silu',\n help='Activation Function for predict module, silu/relu/tanh/none.')\n\n parser.add_argument('--distance_matrix_kernel',type=str,default='bessel',\n help='Kernel applied on Distance Matrix, bessel/softmax/exp. For example, exp means setting D(i,j) of node i,j with distance d by exp(-d)')\n\n parser.add_argument('--dropout', type=float, default=0.1,\n help='Dropout ratio.')\n\n parser.add_argument('--aggregation_type', type=str, default='mean',\n help='Type for aggregeting node feature into graph feature, mean/sum/dummy_node.')\n\n parser.add_argument('--use_global_feature', action='store_true',\n help='Whether to use global features(graph-level features).')\n\n parser.add_argument('--use_ffn_only', action='store_true',\n help='Use DNN Generator which only considers global features. ')\n \n parser.add_argument('--d_mid_list', type=str, default='128,512',\n help='Projection Layers to augment global feature dim to local feature dim.')\n\n parser.add_argument('--warmup_step', type=int, default=2000,\n help='Warmup steps.')\n\n parser.add_argument('--epoch', type=int, default=300,\n help='Epoch num.') \n\n parser.add_argument('--batch_size', type=int, default=64,\n help='Batch size.') \n\n parser.add_argument('--fold', type=int, default=10,\n help='Fold num.') \n\n parser.add_argument('--lr', type=float, default=0.0007,\n help='Maximum learning rate, (warmup_step * d_model) ** -0.5 .')\n\n \n\n \n", "id": "1936705", "language": "Python", "matching_score": 3.587839126586914, "max_stars_count": 0, "path": "argparser.py" }, { "content": "import math, copy\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef make_model(d_atom, N=2, d_model=128, h=8, dropout=0.1, \n lambda_attention=0.3, lambda_distance=0.3, trainable_lambda=False,\n N_dense=2, leaky_relu_slope=0.0, aggregation_type='mean', \n dense_output_nonlinearity='relu', distance_matrix_kernel='softmax',\n use_edge_features=False, n_output=1,\n control_edges=False, integrated_distances=False, \n scale_norm=False, n_generator_layers=1, d_feature=8, use_global_feature=False, d_mid_list=None, d_ff_list=None, use_ffn_only=False, adj_mask=None, adapter_finetune=False, **kwargs):\n c = copy.deepcopy\n attn = MultiHeadedAttention(h, d_model, dropout, lambda_attention, lambda_distance, trainable_lambda, distance_matrix_kernel, use_edge_features, control_edges, integrated_distances, adj_mask)\n ff = PositionwiseFeedForward(d_model, N_dense, dropout, leaky_relu_slope, dense_output_nonlinearity)\n pooling = PoolingLayer(aggregation_type)\n if use_ffn_only:\n if d_ff_list is None and n_generator_layers > 1:\n d_ff_list = [d_model] * (n_generator_layers - 1)\n model = DNNGenerator(d_model, d_feature, n_output, leaky_relu_slope, dropout, scale_norm, d_ff_list)\n elif not use_global_feature:\n model = GraphTransformer(\n Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout, scale_norm), N, scale_norm),\n Embeddings(d_model, d_atom, dropout),\n c(pooling),\n Generator(d_model, aggregation_type, n_output, n_generator_layers, leaky_relu_slope, dropout, scale_norm, d_ff_list))\n else:\n if d_ff_list is None and n_generator_layers > 1:\n d_ff_list = [d_model] * (n_generator_layers - 1)\n model = GraphTransformerWithGlobalFeature(\n Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout, scale_norm), N, scale_norm),\n Embeddings(d_model, d_atom, dropout),\n c(pooling),\n GeneratorWithGlobalFeaturev3(d_model, d_feature, aggregation_type, n_output, dense_output_nonlinearity, leaky_relu_slope, dropout, scale_norm, d_mid_list, d_ff_list, adapter_finetune))\n for name,para in model.named_parameters():\n if name.endswith('weight'):\n if 'self_attn' in name:\n nn.init.xavier_uniform_(para,gain=1/math.sqrt(2))\n else:\n nn.init.xavier_uniform_(para)\n elif name.endswith('bias') and 'src_embed' not in name:\n nn.init.constant_(para,0.0)\n elif 'adapter_vec' in name:\n nn.init.constant_(para,0.0)\n\n return model\n\n\nclass GraphTransformer(nn.Module):\n def __init__(self, encoder, src_embed, pooling, generator):\n super(GraphTransformer, self).__init__()\n self.encoder = encoder\n self.src_embed = src_embed\n self.pooling = pooling\n self.generator = generator\n \n def forward(self, src, src_mask, adj_matrix, distances_matrix, global_feature):\n return self.predict(self.encode(src, src_mask, adj_matrix, distances_matrix, None))\n \n def encode(self, src, src_mask, adj_matrix, distances_matrix, edges_att):\n return self.pooling(self.encoder(self.src_embed(src), src_mask, adj_matrix, distances_matrix, edges_att),src_mask)\n \n def predict(self, out):\n return self.generator(out)\n\nclass GraphTransformerWithGlobalFeature(nn.Module):\n def __init__(self, encoder, src_embed, pooling, generator):\n super(GraphTransformerWithGlobalFeature, self).__init__()\n self.encoder = encoder\n self.src_embed = src_embed\n self.pooling = pooling\n self.generator = generator\n self.adapter_dim = None\n\n def set_adapter_dim(self,adapter_dim):\n self.adapter_dim = adapter_dim\n if hasattr(self.generator, 'adapter_dim'):\n self.generator.adapter_dim = adapter_dim\n \n def forward(self, src, src_mask, adj_matrix, distances_matrix, global_feature):\n return self.predict(self.encode(src, src_mask, adj_matrix, distances_matrix, None), global_feature)\n \n def encode(self, src, src_mask, adj_matrix, distances_matrix, edges_att):\n return self.pooling(self.encoder(self.src_embed(src), src_mask, adj_matrix, distances_matrix, edges_att),src_mask)\n \n def predict(self, out, global_feature):\n return self.generator(out, global_feature)\n \nclass Generator(nn.Module):\n \n def __init__(self, d_model, aggregation_type='mean', n_output=1, n_layers=1, \n leaky_relu_slope=0.01, dropout=0.0, scale_norm=False, d_ff_list=None):\n super(Generator, self).__init__()\n self.d_hidden = d_model\n if n_layers == 1:\n self.proj = nn.Linear(d_model, n_output)\n else:\n if d_ff_list is None:\n self.proj = nn.Linear(self.d_hidden, n_output)\n else:\n self.proj = []\n for d1,d2 in zip([self.d_hidden] + d_ff_list[:-1], d_ff_list):\n self.proj.append(nn.Linear(d1, d2))\n self.proj.append(nn.LeakyReLU(leaky_relu_slope))\n self.proj.append(ScaleNorm(d2) if scale_norm else LayerNorm(d2))\n self.proj.append(nn.Dropout(dropout))\n self.proj.append(nn.Linear(d_ff_list[-1], n_output))\n self.proj = torch.nn.Sequential(*self.proj)\n self.aggregation_type = aggregation_type\n\n def forward(self, x):\n projected = self.proj(x)\n return projected\n\nclass DNNGenerator(nn.Module):\n \n def __init__(self, d_model, d_feature, n_output=1,leaky_relu_slope=0.01,dropout=0.0,scale_norm=False, d_ff_list=None):\n super(DNNGenerator, self).__init__()\n self.d_feature = d_feature\n if d_ff_list is None:\n self.proj = nn.Linear(self.d_feature, n_output)\n else:\n self.proj = []\n for d1,d2 in zip([self.d_feature] + d_ff_list[:-1], d_ff_list):\n self.proj.append(nn.Linear(d1, d2))\n self.proj.append(nn.LeakyReLU(leaky_relu_slope))\n self.proj.append(ScaleNorm(d2) if scale_norm else LayerNorm(d2))\n self.proj.append(nn.Dropout(dropout))\n self.proj.append(nn.Linear(d_ff_list[-1], n_output))\n self.proj = torch.nn.Sequential(*self.proj)\n\n def forward(self, src, src_mask, adj_matrix, distances_matrix, global_feature):\n return self.proj(global_feature)\n\nclass BesselBasis(nn.Module):\n \n\n def __init__(self, cutoff=5.0, n_rbf=None):\n \n super(BesselBasis, self).__init__()\n \n freqs = torch.arange(1, n_rbf + 1) * math.pi / cutoff\n self.register_buffer(\"freqs\", freqs)\n\n def forward(self, inputs):\n a = self.freqs[None, None, None, :]\n ax = inputs * a\n sinax = torch.sin(ax)\n\n norm = torch.where(inputs == 0, torch.tensor(1.0, device=inputs.device), inputs)\n y = sinax / norm\n\n return y\n\nclass CosineCutoff(nn.Module):\n \n def __init__(self, cutoff=5.0):\n super(CosineCutoff, self).__init__()\n self.register_buffer(\"cutoff\", torch.FloatTensor([cutoff]))\n\n def forward(self, distances):\n \n cutoffs = 0.5 * (torch.cos(distances * np.pi / self.cutoff) + 1.0)\n \n cutoffs *= (distances < self.cutoff).float()\n return cutoffs\n\nclass PoolingLayer(nn.Module):\n def __init__(self, aggregation_type='mean'):\n super(PoolingLayer,self).__init__()\n self.aggregation_type = aggregation_type\n def forward(self, x, mask):\n mask = mask.unsqueeze(-1).float()\n out_masked = x * mask\n if self.aggregation_type == 'mean':\n out_sum = out_masked.sum(dim=1)\n mask_sum = mask.sum(dim=(1))\n out_avg_pooling = out_sum / mask_sum\n elif self.aggregation_type == 'sum':\n out_sum = out_masked.sum(dim=1)\n out_avg_pooling = out_sum\n elif self.aggregation_type == 'dummy_node':\n out_avg_pooling = out_masked[:,0]\n return out_avg_pooling\n\nclass GeneratorWithGlobalFeaturev3(nn.Module):\n \n def __init__(self, d_model, d_feature, aggregation_type='mean', n_output=1, dense_output_nonlinearity='relu',\n leaky_relu_slope=0.01, dropout=0.0, scale_norm=False, d_mid_list=None, d_ff_list=None, adapter_finetune=False):\n super(GeneratorWithGlobalFeaturev3, self).__init__()\n c = copy.deepcopy\n if dense_output_nonlinearity == 'relu':\n self.act = nn.LeakyReLU(leaky_relu_slope)\n elif dense_output_nonlinearity == 'tanh':\n self.act = nn.Tanh()\n elif dense_output_nonlinearity == 'silu':\n self.act = nn.SiLU()\n\n if d_mid_list is not None:\n self.equal_proj = []\n for d1,d2 in zip([d_feature] + d_mid_list, d_mid_list + [d_model]):\n self.equal_proj.append(nn.Linear(d1,d2))\n self.equal_proj.append(c(self.act))\n self.equal_proj.append(ScaleNorm(d2) if scale_norm else LayerNorm(d2))\n self.equal_proj.append(nn.Dropout(dropout))\n self.equal_proj = torch.nn.Sequential(*self.equal_proj)\n else:\n self.equal_proj = nn.Sequential(nn.Linear(d_feature,d_model),nn.LeakyReLU(leaky_relu_slope),ScaleNorm(d_feature) if scale_norm else LayerNorm(d_feature),nn.Dropout(dropout))\n self.d_hidden = d_model * 2\n self.adapter_dim = None\n if d_ff_list is None:\n self.proj = nn.Linear(self.d_hidden, n_output)\n else:\n self.proj = []\n for d1,d2 in zip([self.d_hidden] + d_ff_list[:-1], d_ff_list):\n self.proj.append(nn.Linear(d1, d2))\n self.proj.append(c(self.act))\n self.proj.append(ScaleNorm(d2) if scale_norm else LayerNorm(d2))\n self.proj.append(nn.Dropout(dropout))\n self.proj.append(nn.Linear(d_ff_list[-1], n_output))\n self.proj = torch.nn.Sequential(*self.proj)\n self.aggregation_type = aggregation_type\n self.adapter_finetune = adapter_finetune\n if adapter_finetune:\n self.adapter_vec = torch.nn.Parameter(torch.Tensor(d_mid_list[0] if d_mid_list else d_model))\n\n def adapted_equal_proj(self,global_feature, adapter_dim):\n gf_ori = self.equal_proj[0](global_feature[...,:-adapter_dim]).repeat_interleave(adapter_dim,dim=0)\n gf_apt = global_feature[...,-adapter_dim:].reshape(-1,1) * self.adapter_vec\n return self.equal_proj[1:](gf_ori + gf_apt)\n\n def forward(self, x, global_feature):\n adapter_dim = self.adapter_dim\n out_avg_pooling = x\n if self.adapter_finetune:\n out_avg_pooling = out_avg_pooling.repeat_interleave(adapter_dim,dim=0)\n global_feature = self.adapted_equal_proj(global_feature, adapter_dim)\n else:\n global_feature = self.equal_proj(global_feature)\n out_avg_pooling = torch.cat([out_avg_pooling, global_feature],dim=1)\n # out_avg_pooling = global_feature\n projected = self.proj(out_avg_pooling)\n return projected \n \nclass PositionGenerator(nn.Module):\n \n def __init__(self, d_model):\n super(PositionGenerator, self).__init__()\n self.norm = LayerNorm(d_model)\n self.proj = nn.Linear(d_model, 3)\n\n def forward(self, x, mask):\n mask = mask.unsqueeze(-1).float()\n out_masked = self.norm(x) * mask\n projected = self.proj(out_masked)\n return projected\n \n\ndef clones(module, N):\n \n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])\n\n\nclass Encoder(nn.Module):\n \n def __init__(self, layer, N, scale_norm):\n super(Encoder, self).__init__()\n self.layers = clones(layer, N)\n self.norm = ScaleNorm(layer.size) if scale_norm else LayerNorm(layer.size)\n \n def forward(self, x, mask, adj_matrix, distances_matrix, edges_att):\n \n for layer in self.layers:\n x = layer(x, mask, adj_matrix, distances_matrix, edges_att)\n return self.norm(x)\n\n \nclass LayerNorm(nn.Module):\n \n def __init__(self, features, eps=1e-6):\n super(LayerNorm, self).__init__()\n self.a_2 = nn.Parameter(torch.ones(features))\n self.b_2 = nn.Parameter(torch.zeros(features))\n self.eps = eps\n\n def forward(self, x):\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.a_2 * (x - mean) / (std + self.eps) + self.b_2\n \n \nclass ScaleNorm(nn.Module):\n\n def __init__(self, scale, eps=1e-5):\n super(ScaleNorm, self).__init__()\n self.scale = nn.Parameter(torch.tensor(math.sqrt(scale)))\n self.eps = eps\n \n def forward(self, x):\n norm = self.scale / torch.norm(x, dim=-1, keepdim=True).clamp(min=self.eps)\n return x * norm\n\n \nclass SublayerConnection(nn.Module):\n \n def __init__(self, size, dropout, scale_norm):\n super(SublayerConnection, self).__init__()\n self.norm = ScaleNorm(size) if scale_norm else LayerNorm(size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, sublayer):\n \n return x + self.dropout(sublayer(self.norm(x)))\n\n \nclass EncoderLayer(nn.Module):\n \n def __init__(self, size, self_attn, feed_forward, dropout, scale_norm):\n super(EncoderLayer, self).__init__()\n self.self_attn = self_attn\n self.feed_forward = feed_forward\n self.sublayer = clones(SublayerConnection(size, dropout, scale_norm), 2)\n self.size = size\n\n def forward(self, x, mask, adj_matrix, distances_matrix, edges_att):\n \n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, adj_matrix, distances_matrix, edges_att, mask))\n return self.sublayer[1](x, self.feed_forward)\n\nclass FFNEncoderLayer(nn.Module):\n def __init__(self, size, self_attn, feed_forward, dropout, scale_norm):\n super(FFNEncoderLayer, self).__init__()\n self.feed_forward = feed_forward\n self.sublayer = SublayerConnection(size, dropout, scale_norm)\n self.size = size\n\n def forward(self, x, mask, adj_matrix, distances_matrix, edges_att):\n return self.sublayer(x, self.feed_forward) \n\n\nclass EdgeFeaturesLayer(nn.Module):\n def __init__(self, d_model, d_edge, h, dropout):\n super(EdgeFeaturesLayer, self).__init__()\n assert d_model % h == 0\n d_k = d_model // h\n self.linear = nn.Linear(d_edge, 1, bias=False)\n with torch.no_grad():\n self.linear.weight.fill_(0.25)\n\n def forward(self, x):\n p_edge = x.permute(0, 2, 3, 1)\n p_edge = self.linear(p_edge).permute(0, 3, 1, 2)\n return torch.relu(p_edge)\n \n\ndef attention(query, key, value, adj_matrix, distances_matrix, edges_att,\n mask=None, dropout=None, \n lambdas=(0.3, 0.3, 0.4), trainable_lambda=False,\n distance_matrix_kernel=None, use_edge_features=False, control_edges=False,\n eps=1e-6, inf=1e12):\n d_k = query.size(-1)\n scores = torch.matmul(query, key.transpose(-2, -1)) \\\n / math.sqrt(d_k)\n if mask is not None:\n scores = scores.masked_fill(mask.unsqueeze(1).repeat(1, query.shape[1], query.shape[2], 1) == 0, -inf)\n p_attn = F.softmax(scores, dim = -1)\n\n if use_edge_features:\n adj_matrix = edges_att.view(adj_matrix.shape)\n\n adj_matrix = adj_matrix / (adj_matrix.sum(dim=-1).unsqueeze(2) + eps)\n adj_matrix = adj_matrix.unsqueeze(1).repeat(1, query.shape[1], 1, 1)\n p_adj = adj_matrix\n \n p_dist = distances_matrix\n \n if trainable_lambda:\n softmax_attention, softmax_distance, softmax_adjacency = lambdas.cuda()\n p_weighted = softmax_attention * p_attn + softmax_distance * p_dist + softmax_adjacency * p_adj\n else:\n lambda_attention, lambda_distance, lambda_adjacency = lambdas\n p_weighted = lambda_attention * p_attn + lambda_distance * p_dist + lambda_adjacency * p_adj\n\n\n if dropout is not None:\n p_weighted = dropout(p_weighted)\n\n atoms_featrues = torch.matmul(p_weighted, value) \n return atoms_featrues, p_weighted, p_attn\n\ndef cosineAttention(query, key, value, adj_matrix, distances_matrix, edges_att,\n mask=None, dropout=None, \n lambdas=(0.3, 0.3, 0.4), trainable_lambda=False,\n distance_matrix_kernel=None, use_edge_features=False, control_edges=False,\n eps=1e-6, inf=1e12):\n \n q = query / (torch.norm(query, p = 2, dim = -1, keepdim = True).detach() + eps)\n k = key / (torch.norm(key, p = 2, dim = -1, keepdim = True).detach() + eps)\n\n scores = torch.matmul(q, k.transpose(-2, -1))\n if mask is not None:\n scores = scores.masked_fill(mask.unsqueeze(1).repeat(1, query.shape[1], query.shape[2], 1) == 0, 0)\n p_attn = F.softmax(scores, dim = -1)\n\n if use_edge_features:\n adj_matrix = edges_att.view(adj_matrix.shape)\n\n adj_matrix = adj_matrix / (adj_matrix.sum(dim=-1).unsqueeze(2) + eps)\n adj_matrix = adj_matrix.unsqueeze(1).repeat(1, query.shape[1], 1, 1)\n p_adj = adj_matrix\n \n p_dist = distances_matrix\n \n if trainable_lambda:\n softmax_attention, softmax_distance, softmax_adjacency = lambdas.cuda()\n p_weighted = softmax_attention * p_attn + softmax_distance * p_dist + softmax_adjacency * p_adj\n else:\n lambda_attention, lambda_distance, lambda_adjacency = lambdas\n p_weighted = lambda_attention * p_attn + lambda_distance * p_dist + lambda_adjacency * p_adj\n\n\n if dropout is not None:\n p_weighted = dropout(p_weighted)\n\n atoms_featrues = torch.matmul(p_weighted, value) \n return atoms_featrues, p_weighted, p_attn\n\ndef attentionOnAdj(query, key, value, adj_matrix, distances_matrix, edges_att,\n mask=None, dropout=None, \n lambdas=(0.3, 0.3, 0.4), trainable_lambda=False,\n distance_matrix_kernel=None, use_edge_features=False, control_edges=False,\n eps=1e-6, inf=1e12):\n d_k = query.size(-1)\n scores = torch.matmul(query, key.transpose(-2, -1)) \\\n / math.sqrt(d_k)\n scores = scores.masked_fill(adj_matrix.unsqueeze(1).repeat(1, query.shape[1], 1, 1) == 0, -inf)\n p_attn = F.softmax(scores, dim = -1)\n\n if use_edge_features:\n adj_matrix = edges_att.view(adj_matrix.shape)\n\n \n p_dist = distances_matrix\n \n if trainable_lambda:\n softmax_attention, softmax_distance, softmax_adjacency = lambdas.cuda()\n softmax_useful = softmax_attention + softmax_distance\n softmax_attention /= softmax_useful\n softmax_distance /= softmax_useful\n p_weighted = softmax_attention * p_attn + softmax_distance * p_dist\n else:\n lambda_attention, lambda_distance, lambda_adjacency = lambdas\n lambda_useful = lambda_attention + lambda_distance\n lambda_attention /= lambda_useful\n lambda_distance /= lambda_useful\n p_weighted = lambda_attention * p_attn + lambda_distance * p_dist\n\n\n if dropout is not None:\n p_weighted = dropout(p_weighted)\n\n atoms_featrues = torch.matmul(p_weighted, value) \n return atoms_featrues, p_weighted, p_attn\n\n\nclass MultiHeadedAttention(nn.Module):\n def __init__(self, h, d_model, dropout=0.1, lambda_attention=0.3, lambda_distance=0.3, trainable_lambda=False, \n distance_matrix_kernel='softmax', use_edge_features=False, control_edges=False, integrated_distances=False, adj_mask=False, n_rbf=20):\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n \n self.d_k = d_model // h\n self.h = h\n self.trainable_lambda = trainable_lambda\n if trainable_lambda:\n lambda_adjacency = 1. - lambda_attention - lambda_distance\n lambdas_tensor = torch.tensor([lambda_attention, lambda_distance, lambda_adjacency], requires_grad=True)\n self.lambdas = torch.nn.Parameter(lambdas_tensor)\n else:\n lambda_adjacency = 1. - lambda_attention - lambda_distance\n self.lambdas = (lambda_attention, lambda_distance, lambda_adjacency)\n \n self.linears = clones(nn.Linear(d_model, d_model), 4)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)\n self.use_filter = False\n if distance_matrix_kernel == 'softmax':\n self.distance_matrix_kernel = lambda x: F.softmax(-x, dim = -1)\n elif distance_matrix_kernel == 'exp':\n self.distance_matrix_kernel = lambda x: torch.exp(-x)\n elif distance_matrix_kernel == 'bessel':\n self.bessel = BesselBasis(n_rbf=n_rbf)\n self.cutoff = CosineCutoff()\n self.filter_act = nn.SiLU()\n self.distance_matrix_kernel = None\n self.filter_layer = nn.Linear(n_rbf, self.h)\n self.use_filter = True\n self.integrated_distances = integrated_distances\n self.use_edge_features = use_edge_features\n self.control_edges = control_edges\n self.adj_mask = adj_mask\n if use_edge_features:\n d_edge = 11 if not integrated_distances else 12\n self.edges_feature_layer = EdgeFeaturesLayer(d_model, d_edge, h, dropout)\n \n def forward(self, query, key, value, adj_matrix, distances_matrix, edges_att, mask=None):\n if mask is not None:\n mask = mask.unsqueeze(1)\n nbatches = query.size(0)\n \n query, key, value = \\\n [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)\n for l, x in zip(self.linears, (query, key, value))]\n \n \n if self.use_filter:\n distances_matrix = distances_matrix.unsqueeze(-1)\n distances_matrix_rbf = self.bessel(distances_matrix)\n p_dist = self.filter_layer(distances_matrix_rbf).masked_fill(mask.unsqueeze(-1).repeat(1, mask.shape[-1], 1, self.h)==0, 0)\n p_dist = self.filter_act(p_dist) * self.cutoff(distances_matrix)\n p_dist = p_dist.permute(0,3,1,2)\n else:\n distances_matrix = distances_matrix.masked_fill(mask.repeat(1, mask.shape[-1], 1) == 0, np.inf)\n distances_matrix = self.distance_matrix_kernel(distances_matrix)\n p_dist = distances_matrix.unsqueeze(1).repeat(1, query.shape[1], 1, 1)\n\n if self.use_edge_features:\n if self.integrated_distances:\n edges_att = torch.cat((edges_att, distances_matrix.unsqueeze(1)), dim=1)\n edges_att = self.edges_feature_layer(edges_att)\n \n if self.adj_mask is None:\n x, self.attn, self.self_attn = attention(query, key, value, adj_matrix, \n p_dist, edges_att,\n mask=mask, dropout=self.dropout,\n lambdas=self.lambdas,\n trainable_lambda=self.trainable_lambda,\n distance_matrix_kernel=self.distance_matrix_kernel,\n use_edge_features=self.use_edge_features,\n control_edges=self.control_edges)\n elif self.adj_mask == 'adj':\n x, self.attn, self.self_attn = attentionOnAdj(query, key, value, adj_matrix, \n p_dist, edges_att,\n mask=mask, dropout=self.dropout,\n lambdas=self.lambdas,\n trainable_lambda=self.trainable_lambda,\n distance_matrix_kernel=self.distance_matrix_kernel,\n use_edge_features=self.use_edge_features,\n control_edges=self.control_edges)\n elif self.adj_mask == 'qk':\n x, self.attn, self.self_attn = attention(query, query, value, adj_matrix, \n p_dist, edges_att,\n mask=mask, dropout=self.dropout,\n lambdas=self.lambdas,\n trainable_lambda=self.trainable_lambda,\n distance_matrix_kernel=self.distance_matrix_kernel,\n use_edge_features=self.use_edge_features,\n control_edges=self.control_edges) \n elif self.adj_mask == 'cosine':\n x, self.attn, self.self_attn = cosineAttention(query, key, value, adj_matrix, \n p_dist, edges_att,\n mask=mask, dropout=self.dropout,\n lambdas=self.lambdas,\n trainable_lambda=self.trainable_lambda,\n distance_matrix_kernel=self.distance_matrix_kernel,\n use_edge_features=self.use_edge_features,\n control_edges=self.control_edges) \n elif self.adj_mask == 'cosineqk':\n x, self.attn, self.self_attn = cosineAttention(query, query, value, adj_matrix, \n p_dist, edges_att,\n mask=mask, dropout=self.dropout,\n lambdas=self.lambdas,\n trainable_lambda=self.trainable_lambda,\n distance_matrix_kernel=self.distance_matrix_kernel,\n use_edge_features=self.use_edge_features,\n control_edges=self.control_edges) \n\n x = x.transpose(1, 2).contiguous() \\\n .view(nbatches, -1, self.h * self.d_k)\n return self.linears[-1](x)\n\nclass PositionwiseFeedForward(nn.Module):\n \"Implements FFN equation.\"\n def __init__(self, d_model, N_dense, dropout=0.1, leaky_relu_slope=0.0, dense_output_nonlinearity='relu'):\n super(PositionwiseFeedForward, self).__init__()\n self.N_dense = N_dense\n self.linears = clones(nn.Linear(d_model, d_model), N_dense)\n self.dropout = clones(nn.Dropout(dropout), N_dense)\n self.leaky_relu_slope = leaky_relu_slope\n if dense_output_nonlinearity == 'relu':\n self.dense_output_nonlinearity = lambda x: F.leaky_relu(x, negative_slope=self.leaky_relu_slope)\n elif dense_output_nonlinearity == 'tanh':\n self.tanh = torch.nn.Tanh()\n self.dense_output_nonlinearity = lambda x: self.tanh(x)\n elif dense_output_nonlinearity == 'silu':\n self.silu = nn.SiLU()\n self.dense_output_nonlinearity = lambda x: self.silu(x)\n elif dense_output_nonlinearity == 'none':\n self.dense_output_nonlinearity = lambda x: x\n \n\n def forward(self, x):\n if self.N_dense == 0:\n return x\n \n for i in range(len(self.linears)-1):\n x = self.dropout[i](F.leaky_relu(self.linears[i](x), negative_slope=self.leaky_relu_slope))\n \n return self.dropout[-1](self.dense_output_nonlinearity(self.linears[-1](x)))\n\nclass Embeddings(nn.Module):\n def __init__(self, d_model, d_atom, dropout):\n super(Embeddings, self).__init__()\n self.lut = nn.Linear(d_atom, d_model)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n return self.dropout(self.lut(x))\n", "id": "2095859", "language": "Python", "matching_score": 2.883849620819092, "max_stars_count": 0, "path": "models/transformer.py" }, { "content": "from ast import mod\nfrom turtle import forward\nfrom .egnn import *\nfrom .painn import *\nfrom .schnet import *\nfrom .dimenet_pp import *\nfrom torch import nn\nfrom torch.nn import functional as F\n\ndef make_baseline_model(d_atom, model_name, N=2, d_model=128, use_global_feature=False, d_feature=9, **kwargs):\n model = None\n if model_name == 'egnn':\n representation = EGNN(in_node_nf=d_atom, hidden_nf=d_model, n_layers=N, attention=True)\n use_adj = True\n elif model_name == 'dimenetpp':\n representation = DimeNetPlusPlus(hidden_channels=d_model, out_channels=d_model, num_input=d_atom, num_blocks=N, int_emb_size=d_model // 2, basis_emb_size=8, out_emb_channels=d_model * 2, num_spherical=7, num_radial=6)\n use_adj = True\n elif model_name == 'schnet':\n representation = SchNet(n_atom_basis=d_model, n_filters=d_model, n_interactions=N, max_z=d_atom)\n use_adj = False\n elif model_name == 'painn':\n representation = PaiNN(n_atom_basis=d_model, n_interactions=N, max_z=d_atom)\n use_adj = False\n if use_global_feature:\n out = Generator_with_gf(d_model=d_model, d_gf=d_feature)\n else:\n out = Generator(d_model=d_model)\n model = BaselineModel(representation=representation, output=out, use_adj=use_adj)\n return model\n\nclass Generator(nn.Module):\n def __init__(self, d_model):\n super(Generator, self).__init__()\n self.hidden_nf = d_model\n self.node_dec = nn.Sequential(nn.Linear(self.hidden_nf, self.hidden_nf),\n nn.SiLU(),\n nn.Linear(self.hidden_nf, self.hidden_nf))\n\n self.graph_dec = nn.Sequential(nn.Linear(self.hidden_nf, self.hidden_nf),\n nn.SiLU(),\n nn.Linear(self.hidden_nf, 1))\n\n def forward(self, h, atom_mask, global_feature=None):\n h = self.node_dec(h)\n h = h * atom_mask.unsqueeze(-1)\n h = torch.sum(h, dim=1)\n pred = self.graph_dec(h)\n return pred.squeeze(1) \n\nclass Generator_with_gf(nn.Module):\n def __init__(self, d_model, d_gf):\n super(Generator_with_gf, self).__init__()\n self.hidden_nf = d_model\n self.input_nf = d_gf\n self.node_dec = nn.Sequential(nn.Linear(self.hidden_nf, self.hidden_nf),\n nn.SiLU(),\n nn.Linear(self.hidden_nf, self.hidden_nf))\n\n self.gf_enc = nn.Sequential(nn.Linear(self.input_nf, self.hidden_nf // 2),\n nn.SiLU(),\n nn.Linear(self.hidden_nf // 2, self.hidden_nf))\n\n self.graph_dec = nn.Sequential(nn.Linear(self.hidden_nf * 2, self.hidden_nf),\n nn.SiLU(),\n nn.Linear(self.hidden_nf, 1))\n\n def forward(self, h, atom_mask, global_feature):\n h = self.node_dec(h)\n h = h * atom_mask.unsqueeze(-1)\n h = torch.sum(h, dim=1)\n g = self.gf_enc(global_feature)\n h = torch.cat([h,g], dim=1)\n pred = self.graph_dec(h)\n return pred.squeeze(1)\n\nclass BaselineModel(nn.Module):\n def __init__(self, representation, output, use_adj=True):\n super(BaselineModel, self).__init__()\n self.representation = representation\n self.output = output\n self.use_adj = use_adj\n def forward(self, node_features, batch_mask, pos, adj, global_feature=None):\n if not self.use_adj:\n neighbors, neighbor_mask = adj\n rep = self.representation(node_features, pos, neighbors, neighbor_mask, batch_mask)\n else:\n rep = self.representation(node_features, batch_mask, pos, adj)\n out = self.output(rep, batch_mask, global_feature)\n return out", "id": "9440364", "language": "Python", "matching_score": 2.9735476970672607, "max_stars_count": 0, "path": "baselines/__init__.py" }, { "content": "import math\nfrom . import spk_utils as snn\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .spk_utils.neighbors import atom_distances\nfrom typing import Union, Callable\n\nclass BesselBasis(nn.Module):\n \"\"\"\n Sine for radial basis expansion with coulomb decay. (0th order Bessel from DimeNet)\n \"\"\"\n\n def __init__(self, cutoff=5.0, n_rbf=None):\n \"\"\"\n Args:\n cutoff: radial cutoff\n n_rbf: number of basis functions.\n \"\"\"\n super(BesselBasis, self).__init__()\n # compute offset and width of Gaussian functions\n freqs = torch.arange(1, n_rbf + 1) * math.pi / cutoff\n self.register_buffer(\"freqs\", freqs)\n\n def forward(self, inputs):\n a = self.freqs[None, None, None, :]\n ax = inputs * a\n sinax = torch.sin(ax)\n\n norm = torch.where(inputs == 0, torch.tensor(1.0, device=inputs.device), inputs)\n y = sinax / norm\n\n return y\n\nact_class_mapping = {\n \"ssp\": snn.ShiftedSoftplus,\n \"silu\": nn.SiLU,\n \"tanh\": nn.Tanh,\n \"sigmoid\": nn.Sigmoid,\n}\n\n\nclass GatedEquivariantBlock(nn.Module):\n \"\"\"Gated Equivariant Block as defined in Schütt et al. (2021):\n Equivariant message passing for the prediction of tensorial properties and molecular spectra\n \"\"\"\n\n def __init__(\n self,\n hidden_channels,\n out_channels,\n intermediate_channels=None,\n activation=\"silu\",\n scalar_activation=False,\n ):\n super(GatedEquivariantBlock, self).__init__()\n self.out_channels = out_channels\n\n if intermediate_channels is None:\n intermediate_channels = hidden_channels\n\n self.vec1_proj = nn.Linear(hidden_channels, hidden_channels)\n self.vec2_proj = nn.Linear(hidden_channels, out_channels)\n\n act_class = act_class_mapping[activation]\n self.update_net = nn.Sequential(\n nn.Linear(hidden_channels * 2, intermediate_channels),\n act_class(),\n nn.Linear(intermediate_channels, out_channels * 2),\n )\n\n self.act = act_class() if scalar_activation else None\n\n def reset_parameters(self):\n nn.init.xavier_uniform_(self.vec1_proj.weight)\n nn.init.xavier_uniform_(self.vec2_proj.weight)\n nn.init.xavier_uniform_(self.update_net[0].weight)\n self.update_net[0].bias.data.fill_(0)\n nn.init.xavier_uniform_(self.update_net[2].weight)\n self.update_net[2].bias.data.fill_(0)\n\n def forward(self, x, v):\n vec1 = torch.norm(self.vec1_proj(v), dim=-2)\n vec2 = self.vec2_proj(v)\n\n x = torch.cat([x, vec1], dim=-1)\n x, v = torch.split(self.update_net(x), self.out_channels, dim=-1)\n v = v.unsqueeze(2) * vec2\n\n if self.act is not None:\n x = self.act(x)\n return x, v\n\nclass PaiNN(nn.Module):\n \"\"\" Polarizable atom interaction neural network \"\"\"\n def __init__(\n self,\n n_atom_basis: int = 128,\n n_interactions: int = 3,\n n_rbf: int = 20,\n cutoff: float = 5.,\n cutoff_network: Union[nn.Module, str] = 'cosine',\n radial_basis: Callable = BesselBasis,\n activation=F.silu,\n max_z: int = 100,\n store_neighbors: bool = False,\n store_embeddings: bool = False,\n n_edge_features: int = 0,\n ):\n super(PaiNN, self).__init__()\n\n self.n_atom_basis = n_atom_basis\n self.n_interactions = n_interactions\n self.cutoff = cutoff\n self.cutoff_network = snn.get_cutoff_by_string(cutoff_network)(cutoff)\n self.radial_basis = radial_basis(cutoff=cutoff, n_rbf=n_rbf)\n self.embedding = nn.Linear(max_z, n_atom_basis)\n\n self.store_neighbors = store_neighbors\n self.store_embeddings = store_embeddings\n self.n_edge_features = n_edge_features\n\n # if self.n_edge_features:\n # self.edge_embedding = nn.Embedding(n_edge_features, self.n_interactions * 3 * n_atom_basis, padding_idx=0, max_norm=1.0)\n\n if type(activation) is str:\n if activation == 'swish':\n activation = F.silu\n elif activation == 'softplus':\n activation = snn.shifted_softplus\n\n self.filter_net = snn.Dense(\n n_rbf + n_edge_features, self.n_interactions * 3 * n_atom_basis, activation=None\n )\n\n self.interatomic_context_net = nn.ModuleList(\n [\n nn.Sequential(\n snn.Dense(n_atom_basis, n_atom_basis, activation=activation),\n snn.Dense(n_atom_basis, 3 * n_atom_basis, activation=None),\n )\n for _ in range(self.n_interactions)\n ]\n )\n\n self.intraatomic_context_net = nn.ModuleList(\n [\n nn.Sequential(\n snn.Dense(\n 2 * n_atom_basis, n_atom_basis, activation=activation\n ),\n snn.Dense(n_atom_basis, 3 * n_atom_basis, activation=None),\n )\n for _ in range(self.n_interactions)\n ]\n )\n\n self.mu_channel_mix = nn.ModuleList(\n [\n nn.Sequential(\n snn.Dense(n_atom_basis, 2 * n_atom_basis, activation=None, bias=False)\n )\n for _ in range(self.n_interactions)\n ]\n )\n\n # self.node_dec = nn.Sequential(snn.Dense(self.n_atom_basis, self.n_atom_basis, activation=F.silu),\n # snn.Dense(self.n_atom_basis, self.n_atom_basis))\n\n # self.graph_dec = nn.Sequential(snn.Dense(self.n_atom_basis, self.n_atom_basis, activation=F.silu),\n # snn.Dense(self.n_atom_basis, 1)) \n\n def forward(self, node_features, positions, neighbors, neighbor_mask, atom_mask):\n cell = None\n cell_offset = None\n # get interatomic vectors and distances\n rij, dir_ij = atom_distances(\n positions=positions,\n neighbors=neighbors,\n neighbor_mask=neighbor_mask,\n cell=cell,\n cell_offsets=cell_offset,\n return_vecs=True,\n normalize_vecs=True,\n )\n\n phi_ij = self.radial_basis(rij[..., None])\n\n fcut = self.cutoff_network(rij) * neighbor_mask\n # fcut = neighbor_mask\n fcut = fcut.unsqueeze(-1)\n\n filters = self.filter_net(phi_ij)\n\n # if self.n_edge_features:\n # edge_types = inputs['edge_types']\n # filters = filters + self.edge_embedding(edge_types)\n\n filters = filters * fcut\n filters = torch.split(filters, 3 * self.n_atom_basis, dim=-1)\n\n # initialize scalar and vector embeddings\n scalars = self.embedding(node_features)\n\n sshape = scalars.shape\n vectors = torch.zeros((sshape[0], sshape[1], 3, sshape[2]), device=scalars.device)\n\n for i in range(self.n_interactions):\n # message function\n h_i = self.interatomic_context_net[i](scalars)\n h_j, vectors_j = self.collect_neighbors(h_i, vectors, neighbors)\n\n # neighborhood context\n h_i = filters[i] * h_j\n\n dscalars, dvR, dvv = torch.split(h_i, self.n_atom_basis, dim=-1)\n dvectors = torch.einsum(\"bijf,bijd->bidf\", dvR, dir_ij) + torch.einsum(\n \"bijf,bijdf->bidf\", dvv, vectors_j\n )\n dscalars = torch.sum(dscalars, dim=2)\n scalars = scalars + dscalars\n vectors = vectors + dvectors\n\n # update function\n mu_mix = self.mu_channel_mix[i](vectors)\n vectors_V, vectors_U = torch.split(mu_mix, self.n_atom_basis, dim=-1)\n mu_Vn = torch.norm(vectors_V, dim=2)\n\n ctx = torch.cat([scalars, mu_Vn], dim=-1)\n h_i = self.intraatomic_context_net[i](ctx)\n ds, dv, dsv = torch.split(h_i, self.n_atom_basis, dim=-1)\n dv = dv.unsqueeze(2) * vectors_U\n dsv = dsv * torch.einsum(\"bidf,bidf->bif\", vectors_V, vectors_U)\n\n # calculate atomwise updates\n scalars = scalars + ds + dsv\n vectors = vectors + dv\n\n # h = self.node_dec(scalars)\n # h = h * atom_mask.unsqueeze(-1)\n # h = torch.sum(h, dim=1)\n # pred = self.graph_dec(h)\n # return pred.squeeze(1)\n return scalars\n\n # for layer in self.output_network:\n # scalars, vectors = layer(scalars, vectors)\n # # include v in output to make sure all parameters have a gradient\n # pred = scalars + vectors.sum() * 0\n # pred = pred.squeeze(-1) * atom_mask\n # return torch.sum(pred, dim = -1)\n # # scalars = self.scalar_LN(scalars)\n # # vectors = self.vector_LN(vectors)\n\n \n\n def collect_neighbors(self, scalars, vectors, neighbors):\n nbh_size = neighbors.size()\n nbh = neighbors.view(-1, nbh_size[1] * nbh_size[2], 1)\n\n scalar_nbh = nbh.expand(-1, -1, scalars.size(2))\n scalars_j = torch.gather(scalars, 1, scalar_nbh)\n scalars_j = scalars_j.view(nbh_size[0], nbh_size[1], nbh_size[2], -1)\n\n vectors_nbh = nbh[..., None].expand(-1, -1, vectors.size(2), vectors.size(3))\n vectors_j = torch.gather(vectors, 1, vectors_nbh)\n vectors_j = vectors_j.view(nbh_size[0], nbh_size[1], nbh_size[2], 3, -1)\n return scalars_j, vectors_j\n", "id": "4309571", "language": "Python", "matching_score": 2.9061381816864014, "max_stars_count": 0, "path": "baselines/painn.py" }, { "content": "import numpy as np\nimport torch.nn as nn\nfrom torch.nn import functional\n\n\ndef shifted_softplus(x):\n r\"\"\"Compute shifted soft-plus activation function.\n\n .. math::\n y = \\ln\\left(1 + e^{-x}\\right) - \\ln(2)\n\n Args:\n x (torch.Tensor): input tensor.\n\n Returns:\n torch.Tensor: shifted soft-plus of input.\n\n \"\"\"\n return functional.softplus(x) - np.log(2.0)\n\nclass ShiftedSoftplus(nn.Module):\n def __init__(self):\n super(ShiftedSoftplus, self).__init__()\n self.shift = torch.log(torch.tensor(2.0)).item()\n\n def forward(self, x):\n return functional.softplus(x) - self.shift\n", "id": "6423648", "language": "Python", "matching_score": 0.7802753448486328, "max_stars_count": 0, "path": "baselines/spk_utils/activations.py" }, { "content": "import torch\nfrom torch import nn\n\nfrom . import shifted_softplus, Dense\n\n__all__ = [\"MLP\", \"TiledMultiLayerNN\", \"ElementalGate\", \"GatedNetwork\"]\n\n\nclass MLP(nn.Module):\n \"\"\"Multiple layer fully connected perceptron neural network.\n\n Args:\n n_in (int): number of input nodes.\n n_out (int): number of output nodes.\n n_hidden (list of int or int, optional): number hidden layer nodes.\n If an integer, same number of node is used for all hidden layers resulting\n in a rectangular network.\n If None, the number of neurons is divided by two after each layer starting\n n_in resulting in a pyramidal network.\n n_layers (int, optional): number of layers.\n activation (callable, optional): activation function. All hidden layers would\n the same activation function except the output layer that does not apply\n any activation function.\n\n \"\"\"\n\n def __init__(\n self, n_in, n_out, n_hidden=None, n_layers=2, activation=shifted_softplus\n ):\n super(MLP, self).__init__()\n # get list of number of nodes in input, hidden & output layers\n if n_hidden is None:\n c_neurons = n_in\n self.n_neurons = []\n for i in range(n_layers):\n self.n_neurons.append(c_neurons)\n c_neurons = max(n_out, c_neurons // 2)\n self.n_neurons.append(n_out)\n else:\n # get list of number of nodes hidden layers\n if type(n_hidden) is int:\n n_hidden = [n_hidden] * (n_layers - 1)\n self.n_neurons = [n_in] + n_hidden + [n_out]\n\n # assign a Dense layer (with activation function) to each hidden layer\n layers = [\n Dense(self.n_neurons[i], self.n_neurons[i + 1], activation=activation)\n for i in range(n_layers - 1)\n ]\n # assign a Dense layer (without activation function) to the output layer\n layers.append(Dense(self.n_neurons[-2], self.n_neurons[-1], activation=None))\n # put all layers together to make the network\n self.out_net = nn.Sequential(*layers)\n\n def forward(self, inputs):\n \"\"\"Compute neural network output.\n\n Args:\n inputs (torch.Tensor): network input.\n\n Returns:\n torch.Tensor: network output.\n\n \"\"\"\n return self.out_net(inputs)\n\n\nclass TiledMultiLayerNN(nn.Module):\n \"\"\"\n Tiled multilayer networks which are applied to the input and produce n_tiled different outputs.\n These outputs are then stacked and returned. Used e.g. to construct element-dependent prediction\n networks of the Behler-Parrinello type.\n\n Args:\n n_in (int): number of input nodes\n n_out (int): number of output nodes\n n_tiles (int): number of networks to be tiled\n n_hidden (int): number of nodes in hidden nn (default 50)\n n_layers (int): number of layers (default: 3)\n \"\"\"\n\n def __init__(\n self, n_in, n_out, n_tiles, n_hidden=50, n_layers=3, activation=shifted_softplus\n ):\n super(TiledMultiLayerNN, self).__init__()\n self.mlps = nn.ModuleList(\n [\n MLP(\n n_in,\n n_out,\n n_hidden=n_hidden,\n n_layers=n_layers,\n activation=activation,\n )\n for _ in range(n_tiles)\n ]\n )\n\n def forward(self, inputs):\n \"\"\"\n Args:\n inputs (torch.Tensor): Network inputs.\n\n Returns:\n torch.Tensor: Tiled network outputs.\n\n \"\"\"\n return torch.cat([net(inputs) for net in self.mlps], 2)\n\n\nclass ElementalGate(nn.Module):\n \"\"\"\n Produces a Nbatch x Natoms x Nelem mask depending on the nuclear charges passed as an argument.\n If onehot is set, mask is one-hot mask, else a random embedding is used.\n If the trainable flag is set to true, the gate values can be adapted during training.\n\n Args:\n elements (set of int): Set of atomic number present in the data\n onehot (bool): Use one hit encoding for elemental gate. If set to False, random embedding is used instead.\n trainable (bool): If set to true, gate can be learned during training (default False)\n \"\"\"\n\n def __init__(self, elements, onehot=True, trainable=False):\n super(ElementalGate, self).__init__()\n self.trainable = trainable\n\n # Get the number of elements, as well as the highest nuclear charge to use in the embedding vector\n self.nelems = len(elements)\n maxelem = int(max(elements) + 1)\n\n self.gate = nn.Embedding(maxelem, self.nelems)\n\n # if requested, initialize as one hot gate for all elements\n if onehot:\n weights = torch.zeros(maxelem, self.nelems)\n for idx, Z in enumerate(elements):\n weights[Z, idx] = 1.0\n self.gate.weight.data = weights\n\n # Set trainable flag\n if not trainable:\n self.gate.weight.requires_grad = False\n\n def forward(self, atomic_numbers):\n \"\"\"\n Args:\n atomic_numbers (torch.Tensor): Tensor containing atomic numbers of each atom.\n\n Returns:\n torch.Tensor: One-hot vector which is one at the position of the element and zero otherwise.\n\n \"\"\"\n return self.gate(atomic_numbers)\n\n\nclass GatedNetwork(nn.Module):\n \"\"\"\n Combines the TiledMultiLayerNN with the elemental gate to obtain element specific atomistic networks as in typical\n Behler--Parrinello networks [#behler1]_.\n\n Args:\n nin (int): number of input nodes\n nout (int): number of output nodes\n nnodes (int): number of nodes in hidden nn (default 50)\n nlayers (int): number of layers (default 3)\n elements (set of ints): Set of atomic number present in the data\n onehot (bool): Use one hit encoding for elemental gate. If set to False, random embedding is used instead.\n trainable (bool): If set to true, gate can be learned during training (default False)\n activation (callable): activation function\n\n References\n ----------\n .. [#behler1] Behler, Parrinello:\n Generalized Neural-Network Representation of High-Dimensional Potential-Energy Surfaces.\n Phys. Rev. Lett. 98, 146401. 2007.\n\n \"\"\"\n\n def __init__(\n self,\n nin,\n nout,\n elements,\n n_hidden=50,\n n_layers=3,\n trainable=False,\n onehot=True,\n activation=shifted_softplus,\n ):\n super(GatedNetwork, self).__init__()\n self.nelem = len(elements)\n self.gate = ElementalGate(elements, trainable=trainable, onehot=onehot)\n self.network = TiledMultiLayerNN(\n nin,\n nout,\n self.nelem,\n n_hidden=n_hidden,\n n_layers=n_layers,\n activation=activation,\n )\n\n def forward(self, atomic_numbers, representation):\n \"\"\"\n Args:\n inputs (dict of torch.Tensor): SchNetPack format dictionary of input tensors.\n\n Returns:\n torch.Tensor: Output of the gated network.\n \"\"\"\n # At this point, inputs should be the general schnetpack container\n gated_network = self.gate(atomic_numbers) * self.network(representation)\n return torch.sum(gated_network, -1, keepdim=True)\n", "id": "4181405", "language": "Python", "matching_score": 2.109919548034668, "max_stars_count": 0, "path": "baselines/spk_utils/blocks.py" }, { "content": "\"\"\"\nBasic building blocks of SchNetPack models. Contains various basic and specialized network layers, layers for\ncutoff functions, as well as several auxiliary layers and functions.\n\"\"\"\n\nfrom .acsf import *\nfrom .activations import *\nfrom .base import *\nfrom .blocks import *\nfrom .cfconv import *\nfrom .cutoff import *\nfrom .initializers import *\nfrom .neighbors import *\n", "id": "9245780", "language": "Python", "matching_score": 0.15145155787467957, "max_stars_count": 0, "path": "baselines/spk_utils/__init__.py" }, { "content": "from typing import List\nimport torch\nimport torch.nn as nn\n\nfrom chemprop.data import CrystalDataset, StandardScaler\n\n\ndef predict(model: nn.Module,\n data: CrystalDataset,\n batch_size: int,\n scaler: StandardScaler = None) -> List[List[float]]:\n \"\"\"\n Makes predictions on a dataset using an ensemble of models.\n\n :param model: A model.\n :param data: A CrystalDataset.\n :param batch_size: Batch size.\n :param scaler: A StandardScaler object fit on the training targets.\n :return: A list of lists of predictions. The outer list is examples\n while the inner list is tasks.\n \"\"\"\n model.eval()\n\n preds = []\n\n num_iters, iter_step = len(data), batch_size\n\n for i in range(0, num_iters, iter_step):\n # Prepare batch\n crystal_batch = CrystalDataset(data[i:i + batch_size])\n\n with torch.no_grad():\n preds_batch = model(crystal_batch)\n\n preds_batch = preds_batch.data.cpu().numpy()\n\n # Inverse scale if regression\n if scaler is not None:\n preds_batch = scaler.inverse_transform(preds_batch)\n\n # Collect vectors\n preds_batch = preds_batch.tolist()\n preds.extend(preds_batch)\n\n return preds\n\n\ndef transfer_predict(model: nn.Module,\n data: CrystalDataset,\n batch_size: int,\n scaler: StandardScaler = None) -> List[List[float]]:\n \"\"\"\n Makes predictions on a dataset using an ensemble of models.\n\n :param model: A model.\n :param data: A CrystalDataset.\n :param batch_size: Batch size.\n :param scaler: A StandardScaler object fit on the training targets.\n :return: A list of lists of predictions. The outer list is examples\n while the inner list is tasks.\n \"\"\"\n model.eval()\n\n preds = []\n\n num_iters, iter_step = len(data), batch_size\n\n for i in range(0, num_iters, iter_step):\n # Prepare batch\n crystal_batch = CrystalDataset(data[i:i + batch_size])\n\n with torch.no_grad():\n preds_batch = model(crystal_batch)\n\n preds_batch = preds_batch.data.cpu().numpy()\n\n # Inverse scale if regression\n if scaler is not None:\n preds_batch = scaler.inverse_transform(preds_batch)\n\n # Collect vectors, None for keeping dimension\n preds.extend(preds_batch.tolist())\n\n return preds\n\n", "id": "10277122", "language": "Python", "matching_score": 2.362532377243042, "max_stars_count": 8, "path": "crystalnet/train/predict.py" }, { "content": "from .data import CrystalDatapoint, CrystalDataset\r\nfrom .scaler import StandardScaler\r\n", "id": "6254325", "language": "Python", "matching_score": 1.6641180515289307, "max_stars_count": 8, "path": "crystalnet/data/__init__.py" }, { "content": "from argparse import Namespace\nimport random\nfrom typing import Callable, List, Union\nimport numpy as np\nfrom torch.utils.data.dataset import Dataset\nfrom pymatgen.io.vasp import Poscar\n\n\nclass CrystalDatapoint:\n \"\"\"A CrystalDatapoint contains a single crystal and its associated features and targets.\"\"\"\n\n def __init__(self,\n crystal_name: str = None,\n crystal_dict: dict = None,\n targets: List[float] = None,\n ari: object = None,\n gdf: object = None,\n radius_dic: dict = None,\n args: Namespace = None\n ):\n \"\"\"\n Initializes a CrystalDatapoint, which contains a single crystal.\n\n # :param line: A list of strings generated by separating a line in a data CSV file by comma.\n # :param args: Arguments.\n # :param features: A numpy array containing additional features (ex. Morgan fingerprint).\n \"\"\"\n\n self.name = crystal_name\n self.crystal = Poscar.from_dict(crystal_dict).structure\n self.targets = targets\n\n # construct atom features\n self.atom_features = np.vstack([ari.get_atom_features(self.crystal[i].specie.number) for i in range(len(self.crystal))])\n\n # construct bond features\n all_neighbors = self.crystal.get_all_neighbors(r=args.radius, include_index=True)\n all_neighbors = [sorted(neighbors, key=lambda x: x[1]) for neighbors in all_neighbors]\n\n point_indices, offset_vectors, distances = [], [], []\n\n for neighbors in all_neighbors:\n if len(neighbors) < args.max_num_neighbors:\n point_indices.append(list(map(lambda x: x[2], neighbors)) + [0] * (args.max_num_neighbors - len(neighbors)))\n offset_vectors.append(list(map(lambda x: list(x[3]), neighbors)) + [[0.0, 0.0, 0.0]] * (args.max_num_neighbors - len(neighbors)))\n distances.append(list(map(lambda x: x[1], neighbors)) + [args.radius + 1.] * (args.max_num_neighbors - len(neighbors)))\n else:\n point_indices.append(list(map(lambda x: x[2], neighbors[:args.max_num_neighbors])))\n offset_vectors.append(list(map(lambda x: list(x[3]), neighbors[:args.max_num_neighbors])))\n distances.append(list(map(lambda x: x[1], neighbors[:args.max_num_neighbors])))\n\n # (len(crystal), args.max_number_neighbors)\n self.point_indices = np.array(point_indices)\n # (len(crystal), args.max_number_neighbors, gdf_array_dimensions)\n self.bond_features = np.concatenate([np.array(offset_vectors), gdf.expand(np.array(distances))], axis=-1)\n\n def num_tasks(self) -> int:\n \"\"\"\n Returns the number of prediction tasks.\n\n :return: The number of tasks.\n \"\"\"\n return len(self.targets)\n\n def set_targets(self, targets: List[float]):\n \"\"\"\n Sets the targets of a crystal.\n\n :param targets: A list of floats containing the targets.\n \"\"\"\n self.targets = targets\n\n\nclass CrystalDataset(Dataset):\n \"\"\"A CrystalDataset contains a list of crystals and their associated features and targets.\"\"\"\n\n def __init__(self, data: List[CrystalDatapoint], args: Namespace = None):\n \"\"\"\n Initializes a CrystalDataset, which contains a list of CrystalDatapoints (i.e. a list of crystals).\n\n :param data: A list of CrystalDatapoints.\n \"\"\"\n self.data = data\n self.args = args\n self.scaler = None\n\n def names(self) -> List[str]:\n \"\"\"\n Returns the crystal names associated with the crystal (if they exist).\n\n :return: A list of crystal names or None if the dataset does not contain crystal names.\n \"\"\"\n if len(self.data) == 0 or self.data[0].name is None:\n return []\n else:\n return [d.name for d in self.data]\n\n def crystals(self) -> List[Poscar]:\n \"\"\"\n Returns the structure associated with the crystal.\n\n :return: A list of Poscar crystal structures.\n \"\"\"\n return [d.crystal for d in self.data]\n\n def targets(self) -> List[List[float]]:\n \"\"\"\n Returns the targets associated with each crystal.\n\n :return: A list of lists of floats containing the targets.\n \"\"\"\n return [d.targets for d in self.data]\n\n def num_tasks(self) -> int:\n \"\"\"\n Returns the number of prediction tasks.\n\n :return: The number of tasks.\n \"\"\"\n return self.data[0].num_tasks() if len(self.data) > 0 else None\n\n def shuffle(self, seed: int = None):\n \"\"\"\n Shuffles the dataset.\n\n :param seed: Optional random seed.\n \"\"\"\n if seed is not None:\n random.seed(seed)\n random.shuffle(self.data)\n\n def set_targets(self, targets: List[List[float]]):\n \"\"\"\n Sets the targets for each crystal in the dataset. Assumes the targets are aligned with the datapoints.\n\n :param targets: A list of lists of floats containing targets for each crystal. This must be the\n same length as the underlying dataset.\n \"\"\"\n assert len(self.data) == len(targets)\n for i in range(len(self.data)):\n self.data[i].set_targets(targets[i])\n\n def sort(self, key: Callable):\n \"\"\"\n Sorts the dataset using the provided key.\n\n :param key: A function on a CrystalDatapoint to determine the sorting order.\n \"\"\"\n self.data.sort(key=key)\n\n def __len__(self) -> int:\n \"\"\"\n Returns the length of the dataset (i.e. the number of crystals).\n\n :return: The length of the dataset.\n \"\"\"\n return len(self.data)\n\n def __getitem__(self, item) -> Union[CrystalDatapoint, List[CrystalDatapoint]]:\n \"\"\"\n Gets one or more CrystalDatapoints via an index or slice.\n\n :param item: An index (int) or a slice object.\n :return: A CrystalDatapoint if an int is provided or a list of CrystalDatapoints if a slice is provided.\n \"\"\"\n return self.data[item]\n", "id": "1062998", "language": "Python", "matching_score": 1.7541223764419556, "max_stars_count": 8, "path": "crystalnet/data/data.py" }, { "content": "import logging\nimport os\nimport pickle\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils.data import Dataset, dataset\nimport json\nimport copy\n\n\nFloatTensor = torch.FloatTensor\nLongTensor = torch.LongTensor\nIntTensor = torch.IntTensor\nDoubleTensor = torch.DoubleTensor\n\ndef cutname(ori_name):\n ori_name = ori_name[:-3]\n if ori_name.endswith('out.'):\n ori_name = ori_name[:-4]\n elif ori_name.endswith('faps.'):\n ori_name = ori_name[:-5]\n return ori_name + 'p'\n\n\ndef load_data_from_df(dataset_path, gas_type, pressure, add_dummy_node=True, use_global_features=False, return_names=False):\n\n data_df = pd.read_csv(dataset_path + f'/label_by_GCMC/{gas_type}_ads_all.csv',header=0)\n data_x = data_df['name'].values\n if pressure == 'all':\n data_y = data_df.iloc[:,1:].values\n else:\n data_y = data_df[pressure].values\n\n if data_y.dtype == np.float64:\n data_y = data_y.astype(np.float32)\n\n x_all, y_all, name_all = load_data_from_processed(dataset_path, data_x, data_y, add_dummy_node=add_dummy_node)\n\n if return_names:\n x_all = (x_all, name_all)\n\n if use_global_features:\n f_all = load_data_with_global_features(dataset_path, name_all, gas_type)\n if pressure == 'all':\n return x_all, f_all, y_all, data_df.columns.values[1:]\n return x_all, f_all, y_all\n\n if pressure == 'all':\n return x_all, y_all, data_df.columns.values[1:]\n return x_all, y_all\n\ndef norm_str(ori):\n ori = ori.split('.')[0].split('-')\n if ori[-1] == 'clean':\n ori = ori[:-1]\n elif ori[-2] == 'clean':\n ori = ori[:-2]\n return '-'.join(ori[1:])\n\n\ndef load_real_data(dataset_path, gas_type):\n \n data_df = pd.read_csv(dataset_path + f'/global_features/exp_geo_all.csv', header=0)\n data_x = data_df['name'].values\n data_y = data_df.iloc[:,1:].values\n global_dic = {}\n for x,y in zip(data_x, data_y):\n global_dic[x] = y\n with open(dataset_path + '/isotherm_data/all.json') as f:\n labels = json.load(f)[gas_type]['data']\n label_dict = {_['name']:_[\"isotherm_data\"] for _ in labels}\n \n with open(dataset_path + f'/isotherm_data/{gas_type}.txt','r') as f:\n ls = f.readlines()\n ls = [_.strip().split() for _ in ls]\n X_all, y_all, f_all, p_all, n_all = [],[],[],[],[]\n for l in ls:\n if l[0] not in global_dic:\n continue\n gf = global_dic[l[0]]\n afm, adj, dist = pickle.load(open(dataset_path + f'/local_features/{l[0]}.cif.p', \"rb\"))\n afm, adj, dist = add_dummy_node_func(afm, adj, dist)\n iso = label_dict[norm_str(l[0])]\n p,y = [],[]\n for _ in iso:\n if _['pressure'] > 0:\n p.append(_['pressure'])\n y.append(_['adsorption'])\n if len(p) == 0:\n continue\n X_all.append([afm,adj,dist])\n f_all.append(gf)\n p_all.append(p)\n y_all.append(y)\n n_all.append(norm_str(l[0]))\n return X_all, f_all, y_all, p_all, n_all\n\n\n\n\n\ndef load_data_with_global_features(dataset_path, processed_files, gas_type):\n global_feature_path = dataset_path + f'/global_features/{gas_type}_global_features_update.csv'\n data_df = pd.read_csv(global_feature_path,header=0)\n data_x = data_df.iloc[:, 0].values\n data_f = data_df.iloc[:,1:].values.astype(np.float32)\n data_dict = {}\n for i in range(data_x.shape[0]):\n data_dict[data_x[i]] = data_f[i]\n f_all = [data_dict[_] for _ in processed_files]\n return f_all\n\n\n\ndef load_data_from_processed(dataset_path, processed_files, labels, add_dummy_node=True):\n x_all, y_all, name_all = [], [], []\n\n for files, label in zip(processed_files, labels):\n \n data_file = dataset_path + '/local_features/' + files + '.p'\n try:\n afm, adj, dist = pickle.load(open(data_file, \"rb\"))\n if add_dummy_node:\n afm, adj, dist = add_dummy_node_func(afm, adj, dist)\n x_all.append([afm, adj, dist])\n y_all.append([label])\n name_all.append(files)\n except:\n pass\n\n return x_all, y_all, name_all\n\ndef add_dummy_node_func(node_features, adj_matrix, dist_matrix):\n m = np.zeros((node_features.shape[0] + 1, node_features.shape[1] + 1))\n m[1:, 1:] = node_features\n m[0, 0] = 1.\n node_features = m\n\n m = np.ones((adj_matrix.shape[0] + 1, adj_matrix.shape[1] + 1))\n m[1:, 1:] = adj_matrix\n adj_matrix = m\n\n m = np.full((dist_matrix.shape[0] + 1, dist_matrix.shape[1] + 1), 1e6)\n m[1:, 1:] = dist_matrix\n dist_matrix = m\n\n return node_features, adj_matrix, dist_matrix\n\n\nclass MOF:\n def __init__(self, x, y, index, feature = None):\n self.node_features = x[0]\n self.adjacency_matrix = x[1]\n self.distance_matrix = x[2]\n self.y = y\n self.index = index\n self.global_feature = feature\n\n\nclass MOFDataset(Dataset):\n\n def __init__(self, data_list):\n self.data_list = data_list\n\n def __len__(self):\n return len(self.data_list)\n\n def __getitem__(self, key):\n if type(key) == slice:\n return MOFDataset(self.data_list[key])\n return self.data_list[key]\n\n\nclass RealMOFDataset(Dataset):\n def __init__(self, data_list, pressure_list, ori_point):\n self.data_list = data_list\n self.pressure_list = pressure_list\n self.ori_point = np.log(np.float32(ori_point))\n def __len__(self):\n return len(self.data_list)\n def __getitem__(self,key):\n if type(key) == slice:\n return RealMOFDataset(self.data_list[key], self.pressure_list[key], self.ori_point)\n tar_mol = copy.deepcopy(self.data_list[key])\n tar_p = np.log(self.pressure_list[key]) - self.ori_point\n tar_mol.global_feature = np.append(tar_mol.global_feature, tar_p)\n tar_mol.y = tar_mol.y\n return tar_mol\n\nclass MOFDatasetPressureVer(Dataset):\n\n def __init__(self, data_list, pressure_list, mask_point=None, is_train=True, tar_point=None):\n self.data_list = data_list\n self.pressure_list = pressure_list\n self.mask_point = mask_point\n self.is_train = is_train\n self.tar_point = tar_point\n if is_train:\n self.use_idx = np.where(pressure_list != mask_point)[0]\n else:\n self.use_idx = np.where(pressure_list == tar_point)[0]\n self.calcMid()\n\n def __len__(self):\n return len(self.data_list)\n\n def toStr(self):\n return {\"data_list\":self.data_list,\"pressure_list\":self.pressure_list,\"mask_point\":self.mask_point,\"is_train\":self.is_train, \"tar_point\":self.tar_point}\n def __getitem__(self, key):\n if type(key) == slice:\n return MOFDataset(self.data_list[key], self.pressure_list, self.mask_point, self.is_train)\n tar_mol = copy.deepcopy(self.data_list[key])\n if self.is_train:\n tar_p = self.float_pressure - self.mid\n tar_mol.global_feature = np.append(tar_mol.global_feature, tar_p)\n tar_mol.y = tar_mol.y[0]\n else:\n tar_idx = self.use_idx\n tar_p = self.float_pressure[tar_idx] - self.mid\n tar_mol.global_feature = np.append(tar_mol.global_feature, tar_p)\n tar_mol.y = [tar_mol.y[0][tar_idx]]\n return tar_mol\n\n def changeTarPoint(self,tar_point):\n self.tar_point = tar_point\n if not tar_point:\n self.is_train = True\n else:\n self.is_train = False\n if not self.is_train:\n self.use_idx = np.where(self.pressure_list == tar_point)[0]\n\n def calcMid(self):\n self.float_pressure = np.log(self.pressure_list.astype(np.float))\n self.mid = np.log(np.float(self.mask_point))\n\n\ndef pad_array(array, shape, dtype=np.float32):\n padded_array = np.zeros(shape, dtype=dtype)\n padded_array[:array.shape[0], :array.shape[1]] = array\n return padded_array\n\n\ndef mof_collate_func_gf(batch):\n adjacency_list, distance_list, features_list, global_features_list = [], [], [], []\n labels = []\n\n max_size = 0\n for molecule in batch:\n if type(molecule.y[0]) == np.ndarray:\n labels.append(molecule.y[0])\n else:\n labels.append(molecule.y)\n if molecule.adjacency_matrix.shape[0] > max_size:\n max_size = molecule.adjacency_matrix.shape[0]\n\n for molecule in batch:\n adjacency_list.append(pad_array(molecule.adjacency_matrix, (max_size, max_size)))\n distance_list.append(pad_array(molecule.distance_matrix, (max_size, max_size)))\n features_list.append(pad_array(molecule.node_features, (max_size, molecule.node_features.shape[1])))\n global_features_list.append(molecule.global_feature)\n\n return [FloatTensor(features) for features in (adjacency_list, features_list, distance_list, global_features_list, labels)] \n\n\ndef construct_dataset(x_all, y_all):\n output = [MOF(data[0], data[1], i)\n for i, data in enumerate(zip(x_all, y_all))]\n return MOFDataset(output)\n\ndef construct_dataset_gf(x_all, f_all, y_all):\n output = [MOF(data[0], data[2], i, data[1])\n for i, data in enumerate(zip(x_all, f_all, y_all))]\n return MOFDataset(output)\n\ndef construct_dataset_gf_pressurever(x_all, f_all, y_all, pressure_list, is_train=True, mask_point=None, tar_point=None):\n output = [MOF(data[0], data[2], i, data[1])\n for i, data in enumerate(zip(x_all, f_all, y_all))]\n return MOFDatasetPressureVer(output, pressure_list, is_train=is_train, mask_point=mask_point,tar_point=tar_point)\n\ndef construct_dataset_real(x_all, f_all, y_all, pressure_list, tar_point=None):\n output = [MOF(data[0], data[2], i, data[1])\n for i, data in enumerate(zip(x_all, f_all, y_all))]\n return RealMOFDataset(output, pressure_list, ori_point=tar_point)\n\ndef construct_loader_gf(x,f,y, batch_size, shuffle=True):\n data_set = construct_dataset_gf(x, f, y)\n loader = torch.utils.data.DataLoader(dataset=data_set,\n batch_size=batch_size,\n\t\t\t\t\t num_workers=0,\n collate_fn=mof_collate_func_gf,\n\t\t\t\t\t pin_memory=True,\n shuffle=shuffle)\n return loader\n\ndef construct_loader_gf_pressurever(data_set, batch_size, shuffle=True):\n loader = torch.utils.data.DataLoader(dataset=data_set,\n batch_size=batch_size,\n\t\t\t\t\t num_workers=0,\n collate_fn=mof_collate_func_gf,\n\t\t\t\t\t pin_memory=True,\n shuffle=shuffle)\n return loader\n\nclass data_prefetcher():\n def __init__(self, loader):\n self.loader = iter(loader)\n self.stream = torch.cuda.Stream()\n self.preload()\n\n def preload(self):\n try:\n self.next_data = next(self.loader)\n except StopIteration:\n self.next_data = None\n return\n with torch.cuda.stream(self.stream):\n self.next_data = tuple(_.cuda(non_blocking=True) for _ in self.next_data)\n \n def next(self):\n torch.cuda.current_stream().wait_stream(self.stream)\n batch = self.next_data\n self.preload()\n return batch\n", "id": "5902777", "language": "Python", "matching_score": 6.537533760070801, "max_stars_count": 0, "path": "featurization/data_utils.py" }, { "content": "import logging\nimport os\nimport pickle\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom sklearn.metrics import pairwise_distances\nfrom torch.utils.data import Dataset, dataset\nfrom scipy.sparse import coo_matrix\nimport json\nimport copy\n\n\nFloatTensor = torch.FloatTensor\nLongTensor = torch.LongTensor\nIntTensor = torch.IntTensor\nDoubleTensor = torch.DoubleTensor\n\ndef load_data_from_df(dataset_path, gas_type, pressure, use_global_features=False):\n print(dataset_path + f'/label/{gas_type}/{gas_type}_ads_all.csv')\n data_df = pd.read_csv(dataset_path + f'/label/{gas_type}/{gas_type}_ads_all.csv',header=0)\n \n data_x = data_df['name'].values\n if pressure == 'all':\n data_y = data_df.iloc[:,1:].values\n else:\n data_y = data_df[pressure].values\n\n if data_y.dtype == np.float64:\n data_y = data_y.astype(np.float32)\n\n x_all, y_all, name_all = load_data_from_processed(dataset_path, data_x, data_y)\n\n if use_global_features:\n f_all = load_data_with_global_features(dataset_path, name_all, gas_type)\n if pressure == 'all':\n return x_all, f_all, y_all, data_df.columns.values[1:]\n return x_all, f_all, y_all\n\n if pressure == 'all':\n return x_all, y_all, data_df.columns.values[1:]\n return x_all, y_all\n\ndef load_data_with_global_features(dataset_path, processed_files, gas_type):\n global_feature_path = dataset_path + f'/label/{gas_type}/{gas_type}_global_features_update.csv'\n data_df = pd.read_csv(global_feature_path,header=0)\n data_x = data_df.iloc[:, 0].values\n data_f = data_df.iloc[:,1:].values.astype(np.float32)\n data_dict = {}\n for i in range(data_x.shape[0]):\n data_dict[data_x[i]] = data_f[i]\n f_all = [data_dict[_] for _ in processed_files]\n return f_all\n\n\n\ndef load_data_from_processed(dataset_path, processed_files, labels):\n x_all, y_all, name_all = [], [], []\n\n for files, label in zip(processed_files, labels):\n \n data_file = dataset_path + '/processed_en/' + files + '.p'\n try:\n afm, row, col, pos = pickle.load(open(data_file, \"rb\"))\n x_all.append([afm, row, col, pos])\n y_all.append([label])\n name_all.append(files)\n except:\n pass\n\n return x_all, y_all, name_all\n\nclass MOF:\n\n def __init__(self, x, y, index, feature = None):\n self.node_features = x[0]\n self.edges = np.array([x[1],x[2]])\n self.pos = x[3]\n self.y = y\n self.index = index\n self.global_feature = feature\n self.size = x[0].shape[0]\n self.adj, self.nbh, self.nbh_mask = self.neighbor_matrix()\n\n def neighbor_matrix(self):\n csr = coo_matrix((np.ones_like(self.edges[0]), self.edges), shape=(self.size, self.size)).tocsr()\n rowptr, col = csr.indptr, csr.indices\n degree = rowptr[1:] - rowptr[:-1]\n max_d = degree.max()\n _range = np.tile(np.arange(max_d),(self.size,1)).reshape(-1)\n _degree = degree.repeat(max_d).reshape(-1)\n mask = _range < _degree\n ret_nbh = np.zeros(self.size * max_d)\n ret_nbh[mask] = col\n return csr.toarray(), ret_nbh.reshape(self.size, max_d), mask.reshape(self.size, max_d)\n\n\nclass MOFDataset(Dataset):\n\n def __init__(self, data_list):\n \n self.data_list = data_list\n\n def __len__(self):\n return len(self.data_list)\n\n def __getitem__(self, key):\n if type(key) == slice:\n return MOFDataset(self.data_list[key])\n return self.data_list[key]\n\ndef construct_dataset_gf(x_all, f_all, y_all):\n output = [MOF(data[0], data[2], i, data[1])\n for i, data in enumerate(zip(x_all, f_all, y_all))]\n return MOFDataset(output)\n\ndef pad_array(array, shape, dtype=np.float32):\n padded_array = np.zeros(shape, dtype=dtype)\n padded_array[:array.shape[0], :array.shape[1]] = array\n return padded_array\n\ndef mof_collate_func_adj(batch):\n pos_list, features_list,global_features_list = [], [], []\n adjs = []\n labels = []\n\n max_size = 0\n for molecule in batch:\n if type(molecule.y[0]) == np.ndarray:\n labels.append(molecule.y[0])\n else:\n labels.append(molecule.y)\n if molecule.node_features.shape[0] > max_size:\n max_size = molecule.node_features.shape[0]\n\n for molecule in batch:\n pos_list.append(pad_array(molecule.pos, (max_size, 3)))\n features_list.append(pad_array(molecule.node_features, (max_size, molecule.node_features.shape[1])))\n adjs.append(pad_array(molecule.adj, (max_size, max_size)))\n global_features_list.append(molecule.global_feature)\n\n return [FloatTensor(features_list), FloatTensor(pos_list), LongTensor(adjs), FloatTensor(global_features_list), FloatTensor(labels)] \n\ndef mof_collate_func_nbh(batch):\n pos_list, features_list, global_features_list = [], [], []\n nbhs, nbh_masks = [],[]\n labels = []\n\n max_size = 0\n max_degree = 0\n for molecule in batch:\n if type(molecule.y[0]) == np.ndarray:\n labels.append(molecule.y[0])\n else:\n labels.append(molecule.y)\n if molecule.node_features.shape[0] > max_size:\n max_size = molecule.node_features.shape[0]\n if molecule.nbh.shape[1] > max_degree:\n max_degree = molecule.nbh.shape[1]\n\n for molecule in batch:\n pos_list.append(pad_array(molecule.pos, (max_size, 3)))\n features_list.append(pad_array(molecule.node_features, (max_size, molecule.node_features.shape[1])))\n nbhs.append(pad_array(molecule.nbh, (max_size, max_degree)))\n nbh_masks.append(pad_array(molecule.nbh_mask, (max_size, max_degree)))\n global_features_list.append(molecule.global_feature)\n\n return [FloatTensor(features_list), FloatTensor(pos_list), LongTensor(nbhs), FloatTensor(nbh_masks), FloatTensor(global_features_list), FloatTensor(labels)] \n\ndef construct_loader(x, f, y, batch_size, shuffle=True, use_adj=True):\n data_set = construct_dataset_gf(x, f, y)\n loader = torch.utils.data.DataLoader(dataset=data_set,\n batch_size=batch_size,\n\t\t\t\t\t num_workers=8,\n collate_fn=mof_collate_func_adj if use_adj else mof_collate_func_nbh,\n\t\t\t\t\t pin_memory=True,\n shuffle=shuffle)\n return loader\n\nclass data_prefetcher():\n def __init__(self, loader, device):\n self.loader = iter(loader)\n self.stream = torch.cuda.Stream(device)\n self.preload()\n\n def preload(self):\n try:\n self.next_data = next(self.loader)\n except StopIteration:\n self.next_data = None\n return\n with torch.cuda.stream(self.stream):\n self.next_data = tuple(_.cuda(non_blocking=True) for _ in self.next_data)\n \n def next(self):\n torch.cuda.current_stream().wait_stream(self.stream)\n batch = self.next_data\n self.preload()\n return batch", "id": "9704393", "language": "Python", "matching_score": 2.5910916328430176, "max_stars_count": 0, "path": "baselines/data_utils.py" }, { "content": "import shap \nimport torch\nfrom collections import defaultdict\nfrom featurization.data_utils import load_data_from_df, construct_loader_gf_pressurever, construct_dataset_gf_pressurever, data_prefetcher\nfrom models.transformer import make_model\nimport numpy as np\nimport os\nfrom argparser import parse_train_args\nimport pickle\nfrom tqdm import tqdm\nfrom utils import *\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ndef gradient_shap(model, sample_loader, test_loader, batch_size):\n model.eval()\n model.set_adapter_dim(1)\n graph_reps, global_feas = [],[]\n for data in tqdm(sample_loader):\n adjacency_matrix, node_features, distance_matrix, global_features, y = (_.cpu() for _ in data)\n batch_mask = torch.sum(torch.abs(node_features), dim=-1) != 0\n batch_mask = batch_mask.float()\n graph_rep = model.encode(node_features, batch_mask, adjacency_matrix, distance_matrix, None)\n graph_reps.append(graph_rep)\n global_feas.append(global_features)\n graph_reps = torch.cat(graph_reps)\n global_feas = torch.cat(global_feas)\n e = shap.GradientExplainer(model.generator, [graph_reps, global_feas])\n shap_all = []\n for data in tqdm(test_loader):\n adjacency_matrix, node_features, distance_matrix, global_features, y = (_.cpu() for _ in data)\n batch_mask = torch.sum(torch.abs(node_features), dim=-1) != 0\n batch_mask = batch_mask.float()\n graph_rep = model.encode(node_features, batch_mask, adjacency_matrix, distance_matrix, None)\n ans = e.shap_values([graph_rep, global_features],nsamples=10)\n local_shap = np.abs(ans[0].sum(axis=1)).reshape(-1,1)\n global_shap = np.abs(ans[-1])[:,:9]\n shap_values = np.concatenate([local_shap, global_shap],axis=1)\n shap_all.append(shap_values)\n shap_all = np.concatenate(shap_all, axis=0)\n return shap_all\n\nif __name__ == '__main__':\n model_params = parse_train_args()\n device_ids = [0,1,2,3]\n X, f, y, p = load_data_from_df(model_params['data_dir'],gas_type=model_params['gas_type'], pressure='all',add_dummy_node = True,use_global_features = True)\n tar_idx = np.where(p==model_params['pressure'])[0][0]\n print(f'Loaded {len(X)} data.')\n y = np.array(y)\n mean = y[...,tar_idx].mean()\n std = y[...,tar_idx].std()\n y = (y - mean) / std\n f = np.array(f)\n fmean = f.mean(axis=0)\n fstd = f.std(axis=0)\n f = (f - fmean) / fstd\n batch_size = model_params['batch_size']\n fold_num = model_params['fold']\n idx_list = np.arange(len(X))\n set_seed(model_params['seed'])\n np.random.shuffle(idx_list)\n X = applyIndexOnList(X,idx_list)\n f = f[idx_list]\n y = y[idx_list]\n\n\n\n for fold_idx in range(1,2):\n set_seed(model_params['seed'])\n save_dir = model_params['save_dir'] + f\"/{model_params['gas_type']}_{model_params['pressure']}/Fold-{fold_idx}\"\n ckpt_handler = CheckpointHandler(save_dir)\n state = ckpt_handler.checkpoint_best()\n model = make_model(**state['params'])\n model = torch.nn.DataParallel(model)\n model.load_state_dict(state['model'])\n model = model.module\n train_idx, val_idx, test_idx = splitdata(len(X),fold_num,fold_idx)\n train_sample = construct_dataset_gf_pressurever(applyIndexOnList(X,train_idx), f[train_idx], y[train_idx],p, is_train=False, tar_point=model_params['pressure'],mask_point=model_params['pressure'])\n test_set = construct_dataset_gf_pressurever(applyIndexOnList(X,test_idx), f[test_idx], y[test_idx],p, is_train=False, tar_point=model_params['pressure'],mask_point=model_params['pressure'])\n shaps = {pres:[] for pres in [p[3]]}\n for pres in [p[3]]:\n train_sample.changeTarPoint(pres)\n test_set.changeTarPoint(pres)\n sample_loader = construct_loader_gf_pressurever(train_sample, batch_size, shuffle=False)\n test_loader = construct_loader_gf_pressurever(test_set, batch_size, shuffle=False)\n shap_values = gradient_shap(model, sample_loader, test_loader, batch_size)\n shaps[pres].append(shap_values) \n \n for pres in [p[3]]:\n shaps[pres] = np.concatenate(shaps[pres],axis=0)\n\n with open(model_params['save_dir'] + f\"/{model_params['gas_type']}_{model_params['pressure']}/shap_result_{p[3]}.p\",'wb') as f:\n pickle.dump(shaps, f)\n\n \n", "id": "754056", "language": "Python", "matching_score": 5.581523895263672, "max_stars_count": 0, "path": "model_shap.py" }, { "content": "import shap \nimport torch\nfrom collections import defaultdict\nfrom featurization.data_utils import load_data_from_df, construct_loader_gf_pressurever, construct_dataset_gf_pressurever, data_prefetcher\nfrom models.transformer import make_model\nimport numpy as np\nimport os\nfrom argparser import parse_train_args\nimport pickle\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom utils import *\n\nperiodic_table = ('Dummy','H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar',\n 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br', \n 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Te', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te', \n 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', \n 'Yb', 'Lu', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Unk')\n\n\nmodel_params = parse_train_args()\nimg_dir = os.path.join(model_params['img_dir'],'attn')\nos.makedirs(img_dir,exist_ok=True)\n\n\ndef heapmap(atoms, attn, name):\n plt.cla()\n f, ax = plt.subplots(figsize=(20, 15))\n colormap = 'Reds'\n h = sns.heatmap(attn, vmax=attn.max(), yticklabels = atoms, xticklabels = atoms, square=True, cmap=colormap, cbar=False)\n fontsize = 15\n cb=h.figure.colorbar(h.collections[0]) \n cb.ax.tick_params(labelsize=fontsize) \n ax.tick_params(labelsize=fontsize,rotation=0)\n ax.set_xticklabels(ax.get_xticklabels(), rotation=90)\n plt.savefig(os.path.join(img_dir, name + '.pdf'))\n\ndef test(model, data_loader, name_list):\n model.eval()\n batch_idx = -1\n ans = {}\n for data in tqdm(data_loader):\n batch_idx += 1\n adjacency_matrix, node_features, distance_matrix, global_features, y = (_.cpu() for _ in data)\n batch_mask = torch.sum(torch.abs(node_features), dim=-1) != 0\n graph_rep = model.encode(node_features, batch_mask, adjacency_matrix, distance_matrix, None)\n attn = model.encoder.layers[0].self_attn.self_attn.detach().cpu().numpy()\n atoms = node_features.numpy()[:,:,:83].argmax(axis=-1).reshape(-1)\n attn = attn[0].mean(axis=0)\n atoms = applyIndexOnList(periodic_table, atoms)\n ans[name_list[batch_idx]] = {\n 'atoms':atoms,\n 'attn':attn\n }\n heapmap(atoms, attn, name_list[batch_idx])\n return ans\n\nif __name__ == '__main__':\n batch_size = 1\n device_ids = [0,1,2,3]\n X, f, y,p = load_data_from_df(model_params['data_dir'],gas_type=model_params['gas_type'], pressure=\"all\",add_dummy_node = True,use_global_features = True, return_names=True)\n print(\"X,f,y,p\")\n tar_idx = np.where(p==model_params['pressure'])[0][0]\n y = np.array(y)\n mean = y[...,tar_idx].mean()\n std = y[...,tar_idx].std()\n f = np.array(f)\n fmean = f.mean(axis=0)\n fstd = f.std(axis=0)\n test_errors_all = []\n f = (f - fmean) / fstd\n X, names = X\n\n print(f'Loaded {len(X)} data.')\n\n fold_idx = 1\n save_dir = model_params['save_dir'] + f\"/{model_params['gas_type']}_{model_params['pressure']}/Fold-{fold_idx}\"\n ckpt_handler = CheckpointHandler(save_dir)\n state = ckpt_handler.checkpoint_best(use_cuda=False)\n model = make_model(**state['params'])\n model = torch.nn.DataParallel(model)\n model.load_state_dict(state['model'])\n model = model.module\n if model_params['name'] == '':\n sample_idx = np.arange(1000)\n tar_name = 'all'\n else:\n if model_params['name'] in names:\n sample_idx = [names.index(model_params['name'])]\n tar_name = model_params['name']\n else:\n sample_idx = [0]\n tar_name = 'random'\n train_sample = construct_dataset_gf_pressurever(applyIndexOnList(X,sample_idx), f[sample_idx], y[sample_idx],p, is_train=False, tar_point=model_params['pressure'],mask_point=model_params['pressure'])\n sample_loader = construct_loader_gf_pressurever(train_sample, 1, shuffle=False)\n ans = test(model, sample_loader, applyIndexOnList(names, sample_idx))\n\n with open(os.path.join(img_dir,f\"attn_{tar_name}.p\"),'wb') as f:\n pickle.dump(ans, f)\n\n", "id": "4656925", "language": "Python", "matching_score": 6.313868522644043, "max_stars_count": 0, "path": "attn_vis.py" }, { "content": "from cProfile import label\nimport os\nimport pandas as pd\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nimport time\nfrom featurization.data_utils import load_data_from_df, construct_loader_gf_pressurever, construct_dataset_gf_pressurever, data_prefetcher, load_real_data, construct_dataset_real\nfrom models.transformer import make_model\nfrom argparser import parse_train_args\nfrom utils import *\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport pickle\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ndef ensemble_test(models,data_loader, mean, std, img_dir, names, p_ori):\n os.makedirs(img_dir,exist_ok=True)\n for model in models:\n model.eval()\n batch_idx = 0\n p_ori = np.log(float(p_ori))\n ans = {}\n for data in tqdm(data_loader):\n adjacency_matrix, node_features, distance_matrix, global_features, y = data\n batch_mask = torch.sum(torch.abs(node_features), dim=-1) != 0\n adapter_dim = global_features.shape[-1] - 9\n pressure = global_features[...,-adapter_dim:]\n outputs = []\n for model in models:\n model.module.set_adapter_dim(adapter_dim)\n output = model(node_features, batch_mask, adjacency_matrix, distance_matrix, global_features)\n outputs.append(output.cpu().detach().numpy().reshape(-1) * std + mean)\n y_tmp = y.cpu().detach().numpy().reshape(-1)\n futures_tmp = np.mean(np.array(outputs),axis=0)\n pres = pressure.cpu().detach().numpy().reshape(-1) + p_ori\n \n plt.xlabel('log pressure(Pa)')\n plt.ylabel('adsorption(mol/kg)')\n l1 = plt.scatter(pres, y_tmp, c ='r', marker = 'o')\n l2 = plt.scatter(pres, futures_tmp, c = 'g', marker = 'x')\n plt.legend(handles=[l1,l2],labels=['label','prediction'],loc='best')\n plt.savefig(f'{img_dir}/{names[batch_idx]}.png')\n plt.cla()\n ans[names[batch_idx]] = {\n 'pressure':np.exp(pres),\n 'label':y_tmp,\n 'pred':futures_tmp\n }\n batch_idx += 1\n return ans\n\nif __name__ == '__main__':\n\n model_params = parse_train_args()\n batch_size = 1\n device_ids = [0,1,2,3]\n\n save_dir = f\"{model_params['save_dir']}/{model_params['gas_type']}_{model_params['pressure']}\"\n\n with open(os.path.join(save_dir,f'offset.p'),'rb') as f:\n p_ori, mean, std, fmean, fstd = pickle.load(f)\n\n test_errors_all = []\n\n X, f, y, p, names = load_real_data(model_params['data_dir'], model_params['gas_type'])\n f = np.array(f)\n f = (f - fmean) / fstd\n test_errors = []\n models = []\n img_dir = os.path.join(model_params['img_dir'],model_params['gas_type'])\n predict_res = []\n for fold_idx in range(1,11):\n save_dir_fold = f\"{save_dir}/Fold-{fold_idx}\"\n state = CheckpointHandler(save_dir_fold).checkpoint_best()\n model = make_model(**state['params'])\n model = torch.nn.DataParallel(model)\n model.load_state_dict(state['model'])\n model = model.to(device)\n models.append(model)\n test_set = construct_dataset_real(X, f, y, p, p_ori)\n test_loader = construct_loader_gf_pressurever(test_set,1,shuffle=False)\n test_res = ensemble_test(models, test_loader, mean, std, img_dir, names, p_ori)\n with open(os.path.join(img_dir,f\"results.p\"),'wb') as f:\n pickle.dump(test_res,f)\n", "id": "1827890", "language": "Python", "matching_score": 4.384921550750732, "max_stars_count": 0, "path": "nist_test.py" }, { "content": "import os\nimport pandas as pd\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nimport time\nfrom featurization.data_utils import load_data_from_df, construct_loader_gf_pressurever, construct_dataset_gf_pressurever, data_prefetcher\nfrom models.transformer import make_model\nfrom argparser import parse_finetune_args\nimport pickle\nfrom utils import *\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef train(model, epoch, train_loader, optimizer, scheduler, adapter_dim):\n model.train()\n loss = 0\n loss_all = 0\n prefetcher = data_prefetcher(train_loader)\n batch_idx = 0\n data = prefetcher.next()\n while data is not None:\n lr = scheduler.optimizer.param_groups[0]['lr']\n adjacency_matrix, node_features, distance_matrix, global_features, y = data\n batch_mask = torch.sum(torch.abs(node_features), dim=-1) != 0\n\n optimizer.zero_grad()\n output = model(node_features, batch_mask, adjacency_matrix, distance_matrix, global_features)\n loss = F.mse_loss(output.reshape(-1), y.reshape(-1))\n loss.backward()\n step_loss = loss.cpu().detach().numpy()\n loss_all += step_loss\n optimizer.step()\n scheduler.step()\n print(f'After Step {batch_idx} of Epoch {epoch}, Loss = {step_loss}, Lr = {lr}')\n batch_idx += 1\n data = prefetcher.next()\n return loss_all / len(train_loader.dataset)\n\n\n\ndef test(model, data_loader, mean, std, adapter_dim):\n model.eval()\n error = 0\n prefetcher = data_prefetcher(data_loader)\n batch_idx = 0\n data = prefetcher.next()\n futures, ys = None, None \n while data is not None:\n adjacency_matrix, node_features, distance_matrix, global_features, y = data\n batch_mask = torch.sum(torch.abs(node_features), dim=-1) != 0\n output = model(node_features, batch_mask, adjacency_matrix, distance_matrix, global_features)\n output = output.reshape(y.shape).cpu().detach().numpy()\n y = y.cpu().detach().numpy()\n ys = y if ys is None else np.concatenate([ys,y], axis=0)\n futures = output if futures is None else np.concatenate([futures,output], axis=0)\n batch_idx += 1\n data = prefetcher.next()\n\n futures = np.array(futures) * std + mean\n ys = np.array(ys) * std + mean\n mae = np.mean(np.abs(futures - ys), axis=0)\n rmse = np.sqrt(np.mean((futures - ys)**2, axis=0))\n # pcc = np.corrcoef(futures,ys)[0][1]\n pcc = np.array([np.corrcoef(futures[:,i],ys[:,i])[0][1] for i in range(adapter_dim)])\n smape = 2 * np.mean(np.abs(futures-ys)/(np.abs(futures)+np.abs(ys)), axis=0)\n\n return {'MAE':mae, 'RMSE':rmse, 'PCC':pcc, 'sMAPE':smape}\n\n\n\ndef get_RdecayFactor(warmup_step):\n\n def warmupRdecayFactor(step):\n if step < warmup_step:\n return step / warmup_step\n else:\n return (warmup_step / step) ** 0.5\n\n return warmupRdecayFactor\n\nif __name__ == '__main__':\n\n model_params = parse_finetune_args()\n batch_size = model_params['batch_size']\n device_ids = [0,1,2,3]\n logger = get_logger(model_params['save_dir'] + f\"/{model_params['gas_type']}_{model_params['pressure']}\")\n X, f, y, p = load_data_from_df(model_params['data_dir'],gas_type=model_params['gas_type'], pressure='all',add_dummy_node = True,use_global_features = True)\n tar_idx = np.where(p==model_params['pressure'])[0][0]\n print(f'Loaded {len(X)} data.')\n logger.info(f'Loaded {len(X)} data.')\n y = np.array(y)\n mean = y[...,tar_idx].mean()\n std = y[...,tar_idx].std()\n y = (y - mean) / std\n f = np.array(f)\n fmean = f.mean(axis=0)\n fstd = f.std(axis=0)\n f = (f - fmean) / fstd\n\n with open(os.path.join(model_params['save_dir'] + f\"/{model_params['gas_type']}_{model_params['pressure']}\",f'offset.p'),'wb') as file:\n pickle.dump((model_params['pressure'], mean, std, fmean, fstd), file)\n \n printParams(model_params,logger)\n fold_num = model_params['fold']\n epoch_num = model_params['epoch']\n test_errors = []\n idx_list = np.arange(len(X))\n set_seed(model_params['seed'])\n np.random.shuffle(idx_list)\n X = applyIndexOnList(X,idx_list)\n f = f[idx_list]\n y = y[idx_list]\n test_errors = []\n\n for fold_idx in range(1, fold_num + 1):\n\n set_seed(model_params['seed'])\n ori_state = CheckpointHandler(model_params['ori_dir']+f'/Fold-{fold_idx}').checkpoint_avg()\n ori_params = ori_state['params']\n ori_params['adapter_finetune'] = True\n model = make_model(**ori_params)\n model.set_adapter_dim(model_params['adapter_dim'])\n model = torch.nn.DataParallel(model, device_ids=device_ids)\n model.load_state_dict(ori_state['model'],strict=False)\n model = model.to(device)\n lr = model_params['lr']\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = get_RdecayFactor(ori_params['warmup_step']))\n best_val_error = 0\n best_val_error_s = 0\n test_error = 0\n best_epoch = -1\n\n train_idx, val_idx, test_idx = splitdata(len(X),fold_num, fold_idx)\n\n train_set = construct_dataset_gf_pressurever(applyIndexOnList(X,train_idx), f[train_idx], y[train_idx],p, is_train=True, mask_point=model_params['pressure'])\n\n\n val_set = construct_dataset_gf_pressurever(applyIndexOnList(X,val_idx), f[val_idx], y[val_idx],p, is_train=True, mask_point=model_params['pressure'])\n\n\n test_set = construct_dataset_gf_pressurever(applyIndexOnList(X,test_idx), f[test_idx], y[test_idx],p, is_train=True, mask_point=model_params['pressure'])\n\n ckpt_handler = CheckpointHandler(model_params['save_dir'] + f\"/{model_params['gas_type']}_{model_params['pressure']}/Fold-{fold_idx}\")\n\n for epoch in range(1,epoch_num + 1):\n train_adapter_dim = model_params['adapter_dim']\n train_loader = construct_loader_gf_pressurever(train_set,batch_size)\n loss = train(model, epoch, train_loader,optimizer,scheduler, train_adapter_dim)\n val_loader = construct_loader_gf_pressurever(val_set, batch_size, shuffle=False)\n val_error = test(model, val_loader, mean, std, train_adapter_dim)['MAE']\n val_error_ = np.mean(val_error)\n ckpt_handler.save_model(model,ori_params,epoch,val_error_)\n\n if best_val_error == 0 or val_error_ <= best_val_error:\n print(\"Enter test step.\\n\")\n best_epoch = epoch\n best_val_error = val_error_\n test_loader = construct_loader_gf_pressurever(test_set, batch_size, shuffle=False)\n test_error = test(model, test_loader, mean, std, train_adapter_dim)\n for idx, pres in enumerate(p):\n for _ in test_error.keys():\n print('Fold: {:02d}, Epoch: {:03d}, Pressure: {}, Test {}: {:.7f}'.format(fold_idx, epoch, pres, _, test_error[_][idx]))\n logger.info('Fold: {:02d}, Epoch: {:03d}, Pressure: {}, Test {}: {:.7f}'.format(fold_idx, epoch, pres, _, test_error[_][idx]))\n lr = scheduler.optimizer.param_groups[0]['lr']\n p_str = 'Fold: {:02d}, Epoch: {:03d}, Val MAE: {:.7f}, Best Val MAE: {:.7f}'.format(fold_idx, epoch, val_error_, best_val_error)\n print(p_str)\n logger.info(p_str)\n\n for idx, pres in enumerate(p):\n for _ in test_error.keys():\n print('Fold: {:02d}, Epoch: {:03d}, Pressure: {}, Test {}: {:.7f}'.format(fold_idx, epoch, pres, _, test_error[_][idx]))\n logger.info('Fold: {:02d}, Epoch: {:03d}, Pressure: {}, Test {}: {:.7f}'.format(fold_idx, epoch, pres, _, test_error[_][idx])) \n\n test_errors.append(test_error) \n\n for idx, pres in enumerate(p):\n for _ in test_errors[0].keys():\n mt_list = [__[_][idx] for __ in test_errors]\n p_str = 'Pressure {}, Test {} of {:02d}-Folds: {:.7f}({:.7f})'.format(pres, _, fold_num, np.mean(mt_list), np.std(mt_list))\n print(p_str)\n logger.info(p_str)", "id": "3397897", "language": "Python", "matching_score": 6.013445854187012, "max_stars_count": 0, "path": "pressure_adapt.py" }, { "content": "import os\nimport pandas as pd\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nimport time\nfrom baselines.data_utils import load_data_from_df, construct_loader, data_prefetcher\nfrom baselines import make_baseline_model\nfrom argparser import parse_baseline_args\nfrom utils import *\n\nmodel_params = parse_baseline_args()\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef warmupRdecayFactor(step):\n warmup_step = model_params['warmup_step']\n if step < warmup_step:\n return step / warmup_step\n else:\n return (warmup_step / step) ** 0.5\n\n\ndef train(epoch, train_loader, optimizer, scheduler, use_adj=True):\n model.train()\n loss = 0\n loss_all = 0\n prefetcher = data_prefetcher(train_loader, device)\n batch_idx = 0\n data = prefetcher.next()\n while data is not None:\n lr = scheduler.optimizer.param_groups[0]['lr']\n if use_adj:\n node_features, pos, adj, global_feature, y = data\n else:\n node_features, pos, nbh, nbh_mask, global_feature, y = data\n adj = (nbh, nbh_mask)\n batch_mask = torch.sum(torch.abs(node_features), dim=-1) != 0\n\n optimizer.zero_grad()\n output = model(node_features, batch_mask, pos, adj, global_feature)\n y = y.squeeze(-1)\n loss = F.mse_loss(output, y)\n loss.backward()\n step_loss = loss.cpu().detach().numpy()\n loss_all += step_loss\n optimizer.step()\n scheduler.step()\n print(f'After Step {batch_idx} of Epoch {epoch}, Loss = {step_loss}, Lr = {lr}')\n batch_idx += 1\n data = prefetcher.next()\n return loss_all / len(train_loader.dataset)\n\n\ndef test(data_loader, mean, std, use_adj=True):\n model.eval()\n error = 0\n prefetcher = data_prefetcher(data_loader, device)\n batch_idx = 0\n data = prefetcher.next()\n futures, ys = [], []\n while data is not None:\n \n if use_adj:\n node_features, pos, adj, global_feature, y = data\n else:\n node_features, pos, nbh, nbh_mask, global_feature, y = data\n adj = (nbh, nbh_mask)\n batch_mask = torch.sum(torch.abs(node_features), dim=-1) != 0\n\n optimizer.zero_grad()\n output = model(node_features, batch_mask, pos, adj, global_feature)\n ys += list(y.cpu().detach().numpy().reshape(-1))\n futures += list(output.cpu().detach().numpy().reshape(-1))\n batch_idx += 1\n data = prefetcher.next()\n\n futures = np.array(futures) * std + mean\n ys = np.array(ys) * std + mean\n mae = np.mean(np.abs(futures - ys))\n rmse = np.sqrt(np.mean((futures - ys)**2))\n pcc = np.corrcoef(futures,ys)[0][1]\n smape = 2 * np.mean(np.abs(futures-ys)/(np.abs(futures)+np.abs(ys)))\n\n return {'MAE':mae, 'RMSE':rmse, 'PCC':pcc, 'sMAPE':smape}\n\nif __name__ == '__main__':\n\n model_name = model_params['model_name']\n if model_name == 'egnn' or 'dimenetpp':\n use_adj = True\n else:\n use_adj = False\n batch_size = model_params['batch_size']\n device_ids = [0,1,2,3]\n logger = get_logger(model_params['save_dir'] + f\"/{model_params['gas_type']}_{model_params['pressure']}\")\n X, f, y = load_data_from_df(model_params['data_dir'],gas_type=model_params['gas_type'], pressure=model_params['pressure'],use_global_features = True)\n print(f'Loaded {len(X)} data.')\n logger.info(f'Loaded {len(X)} data.')\n y = np.array(y)\n mean = y.mean()\n std = y.std()\n y = (y - mean) / std\n f = np.array(f)\n fmean = f.mean(axis=0)\n fstd = f.std(axis=0)\n f = (f - fmean) / fstd\n\n model_params['d_atom'] = X[0][0].shape[1]\n model_params['d_feature'] = f.shape[-1]\n \n printParams(model_params,logger)\n fold_num = model_params['fold']\n epoch_num = model_params['epoch']\n test_errors = []\n idx_list = np.arange(len(X))\n set_seed(model_params['seed'])\n np.random.shuffle(idx_list)\n X = applyIndexOnList(X,idx_list)\n f = f[idx_list]\n y = y[idx_list]\n\n for fold_idx in range(1,fold_num + 1):\n set_seed(model_params['seed'])\n model = make_baseline_model(**model_params)\n model = torch.nn.DataParallel(model, device_ids=device_ids)\n model = model.to(device)\n lr = model_params['lr']\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = warmupRdecayFactor)\n best_val_error = 0\n test_error = 0\n best_epoch = -1\n train_idx, val_idx, test_idx = splitdata(len(X),fold_num,fold_idx)\n\n train_loader = construct_loader(applyIndexOnList(X,train_idx), f[train_idx], y[train_idx],batch_size, shuffle=True, use_adj=use_adj)\n val_loader = construct_loader(applyIndexOnList(X,val_idx), f[val_idx], y[val_idx],batch_size, shuffle=False, use_adj=use_adj)\n test_loader = construct_loader(applyIndexOnList(X,test_idx),f[test_idx], y[test_idx],batch_size, shuffle=False, use_adj=use_adj)\n\n ckpt_handler = CheckpointHandler(model_params['save_dir'] + f\"/{model_params['gas_type']}_{model_params['pressure']}/Fold-{fold_idx}\")\n\n for epoch in range(1,epoch_num + 1):\n loss = train(epoch,train_loader,optimizer,scheduler, use_adj=use_adj)\n val_error = test(val_loader, mean, std, use_adj=use_adj)['MAE']\n ckpt_handler.save_model(model,model_params,epoch,val_error)\n if best_val_error == 0 or val_error <= best_val_error:\n print(\"Enter test step.\\n\")\n best_epoch = epoch\n test_error = test(test_loader, mean, std, use_adj=use_adj)\n best_val_error = val_error\n state = {\"params\":model_params, \"epoch\":epoch, \"model\":model.state_dict()}\n lr = scheduler.optimizer.param_groups[0]['lr']\n\n epoch_op_str = 'Fold: {:02d}, Epoch: {:03d}, LR: {:.7f}, Loss: {:.7f}, Validation MAE: {:.7f}, \\\n Test MAE: {:.7f}, Test RMSE: {:.7f}, Test PCC: {:.7f}, Test sMAPE: {:.7f}, Best Val MAE {:.7f}(epoch {:03d})'.format(fold_idx, epoch, lr, loss, val_error, test_error['MAE'], test_error['RMSE'], test_error['PCC'], test_error['sMAPE'], best_val_error, best_epoch)\n\n print(epoch_op_str)\n\n logger.info(epoch_op_str)\n \n test_errors.append(test_error)\n print('Fold: {:02d}, Test MAE: {:.7f}, Test RMSE: {:.7f}, Test PCC: {:.7f}, Test sMAPE: {:.7f}'.format(fold_idx, test_error['MAE'], test_error['RMSE'], test_error['PCC'], test_error['sMAPE']))\n logger.info('Fold: {:02d}, Test MAE: {:.7f}, Test RMSE: {:.7f}, Test PCC: {:.7f}, Test sMAPE: {:.7f}'.format(fold_idx, test_error['MAE'], test_error['RMSE'], test_error['PCC'], test_error['sMAPE']))\n for _ in test_errors[0].keys():\n err_mean = np.mean([__[_] for __ in test_errors])\n err_std = np.std([__[_] for __ in test_errors])\n print('Test {} of {:02d}-Folds : {:.7f}({:.7f})'.format(_,fold_num,err_mean,err_std))\n logger.info('Test {} of {:02d}-Folds : {:.7f}({:.7f})'.format(_,fold_num,err_mean,err_std))\n", "id": "2537783", "language": "Python", "matching_score": 4.537750720977783, "max_stars_count": 0, "path": "train_baselines.py" }, { "content": "import numpy as np\nfrom sklearn import tree, svm, ensemble\nfrom featurization.data_utils import load_data_from_df, construct_loader_gf, data_prefetcher\nfrom argparser import parse_train_args,parse_ml_args\nfrom utils import *\n\ndef get_metric_dict(predicted, ground_truth):\n mae = np.mean(np.abs(predicted - ground_truth))\n smape = np.mean(np.abs(predicted - ground_truth) / ((np.abs(ground_truth) + np.abs(predicted)) / 2))\n pcc = np.corrcoef(predicted, ground_truth)[0][1]\n rmse = np.sqrt(np.mean((predicted - ground_truth) ** 2))\n return {'MAE':mae, 'sMAPE':smape, 'PCC': pcc, 'RMSE':rmse}\n\nif __name__ == '__main__':\n\n model_params = parse_ml_args()\n device_ids = [0,1,2,3]\n logger = get_logger(model_params['save_dir'] + f\"/{model_params['ml_type']}/{model_params['gas_type']}_{model_params['pressure']}/\")\n X, f, y = load_data_from_df(model_params['data_dir'],gas_type=model_params['gas_type'], pressure=model_params['pressure'],add_dummy_node = True,use_global_features = True)\n print(f'Loaded {len(X)} data.')\n logger.info(f'Loaded {len(X)} data.')\n y = np.array(y).reshape(-1)\n mean = y.mean()\n std = y.std()\n y = (y - mean) / std\n f = np.array(f)\n fmean = f.mean(axis=0)\n fstd = f.std(axis=0)\n f = (f - fmean) / fstd\n\n Xs = [np.mean(_[0][:,1:],axis=0) for _ in X]\n f = np.concatenate((Xs,f),axis=1)\n \n printParams(model_params,logger)\n fold_num = model_params['fold']\n test_errors = []\n idx_list = np.arange(len(X))\n set_seed(model_params['seed'])\n np.random.shuffle(idx_list)\n X = applyIndexOnList(X,idx_list)\n f = f[idx_list]\n y = y[idx_list]\n\n for fold_idx in range(1,fold_num + 1):\n set_seed(model_params['seed'])\n\n train_idx, val_idx, test_idx = splitdata(len(X),fold_num,fold_idx)\n\n train_f,train_y = f[train_idx], y[train_idx]\n test_f,test_y = f[test_idx], y[test_idx]\n\n if model_params['ml_type'] == 'RF':\n\n model = ensemble.RandomForestRegressor(n_estimators=100,criterion='mse',min_samples_split=2,min_samples_leaf=1,max_features='auto')\n\n elif model_params['ml_type'] == 'SVR':\n\n model = svm.SVR()\n\n elif model_params['ml_type'] == 'DT':\n\n model = tree.DecisionTreeRegressor()\n\n elif model_params['ml_type'] == 'GBRT':\n\n model = ensemble.GradientBoostingRegressor()\n\n model.fit(train_f,train_y)\n\n future = model.predict(test_f) * std + mean\n\n test_y = test_y * std + mean\n test_error = get_metric_dict(future, test_y)\n for _ in test_error.keys():\n print('Fold: {:02d}, Test {}: {:.7f}'.format(fold_idx, _, test_error[_]))\n logger.info('Fold: {:02d}, Test {}: {:.7f}'.format(fold_idx, _, test_error[_]))\n test_errors.append(test_error)\n for _ in test_errors[0].keys():\n err_mean = np.mean([__[_] for __ in test_errors])\n err_std = np.std([__[_] for __ in test_errors])\n print('Test {} of {:02d}-Folds : {:.7f}({:.7f})'.format(_,fold_num,err_mean,err_std))\n logger.info('Test {} of {:02d}-Folds : {:.7f}({:.7f})'.format(_,fold_num,err_mean,err_std))", "id": "7506682", "language": "Python", "matching_score": 1.9261642694473267, "max_stars_count": 0, "path": "train_ml.py" }, { "content": "from argparse import Namespace\nfrom logging import Logger\nimport os\nfrom typing import List, Tuple, Union\n\nimport numpy as np\nfrom tensorboardX import SummaryWriter\nimport torch\nfrom torch.optim.lr_scheduler import ExponentialLR\n\nfrom .evaluate import evaluate, evaluate_predictions\nfrom .predict import predict\nfrom .train import train\nfrom chemprop.models import build_model\nfrom chemprop.data.utils import get_class_sizes\nfrom chemprop.data.scaler import StandardScaler\nfrom chemprop.nn_utils import param_count\nfrom chemprop.utils import build_optimizer, build_lr_scheduler, get_loss_func, get_metric_func, load_checkpoint, makedirs, save_checkpoint\nfrom chemprop.data import CrystalDataset\n\n\ndef run_training(train_data: CrystalDataset, valid_data: CrystalDataset, test_data: CrystalDataset,\n args: Namespace, logger: Logger = None) -> Tuple[List[Union[float, np.ndarray]], List[float]]:\n \"\"\"\n Trains a model and returns test scores on the model checkpoint with the highest validation score.\n\n # :param args: Arguments.\n # :param logger: Logger.\n # :return: A list of ensemble scores for each task.\n \"\"\"\n if logger is not None:\n debug, info = logger.debug, logger.info\n else:\n debug = info = print\n\n # Set GPU\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n\n # Feature Scaler\n if args.features_scaling:\n features_scaler = train_data.normalize_features(replace_nan_token=0)\n valid_data.normalize_features(features_scaler)\n test_data.normalize_features(features_scaler)\n else:\n features_scaler = None\n\n # Target adjust\n if args.dataset_type == 'classification':\n class_sizes = get_class_sizes(train_data)\n info('Class sizes')\n for i, task_class_sizes in enumerate(class_sizes):\n info(f'{args.task_names[i]} '\n f'{\", \".join(f\"{cls}: {size * 100:.2f}%\" for cls, size in enumerate(task_class_sizes))}')\n\n if args.dataset_type == 'regression':\n info('Fitting scaler')\n train_targets = train_data.targets()\n scaler = StandardScaler().fit(train_targets)\n scaled_targets = scaler.transform(train_targets).tolist()\n train_data.set_targets(scaled_targets)\n else:\n scaler = None\n\n # Get loss and metric functions\n loss_func = get_loss_func(args)\n metric_func = get_metric_func(metric=args.metric)\n\n # Get best validation loss\n best_validation_scores = None\n\n # Set up test set evaluation\n\n if args.dataset_type == 'multiclass':\n sum_test_preds = np.zeros((len(test_data), args.num_tasks, args.multiclass_num_classes))\n else:\n sum_test_preds = np.zeros((len(test_data), args.num_tasks))\n\n # Train ensemble of models\n for model_idx in range(args.ensemble_size):\n # Tensorboard writer\n save_dir = os.path.join(args.save_dir, f'fold_{args.run_fold}', f'model_{model_idx}')\n makedirs(save_dir)\n try:\n writer = SummaryWriter(log_dir=save_dir)\n except:\n writer = SummaryWriter(logdir=save_dir)\n\n # Load/build model\n if args.checkpoint_paths is not None:\n debug(f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}')\n model = load_checkpoint(args.checkpoint_paths[model_idx], current_args=args, logger=logger)\n else:\n debug(f'Building model {model_idx}')\n model = build_model(args)\n debug(model)\n debug(f'Number of parameters = {param_count(model):,}')\n if args.cuda:\n debug('Moving model to cuda')\n model = model.cuda()\n\n # Ensure that model is saved in correct location for evaluation if 0 epochs\n save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args)\n\n # Optimizers\n optimizer = build_optimizer(model, args)\n\n # Learning rate schedulers\n scheduler = build_lr_scheduler(optimizer, args)\n\n # Run training\n best_score = float('inf') if args.minimize_score else -float('inf')\n best_epoch, n_iter = 0, 0\n for epoch in range(args.epochs):\n debug(f'Epoch {epoch}')\n\n n_iter = train(\n model=model,\n data=train_data,\n loss_func=loss_func,\n optimizer=optimizer,\n scheduler=scheduler,\n args=args,\n n_iter=n_iter,\n logger=logger,\n writer=writer\n )\n if isinstance(scheduler, ExponentialLR):\n scheduler.step()\n\n # (num_tasks,)\n valid_scores = evaluate(\n model=model,\n data=valid_data,\n num_tasks=args.num_tasks,\n metric_func=metric_func,\n batch_size=args.batch_size,\n dataset_type=args.dataset_type,\n scaler=scaler,\n logger=logger\n )\n\n # Average validation score\n avg_val_score = np.nanmean(valid_scores)\n debug(f'Valid {args.metric} = {avg_val_score:.6f}')\n writer.add_scalar(f'valid_{args.metric}', avg_val_score, n_iter)\n\n if args.show_individual_scores:\n # Individual validation scores\n for task_name, val_score in zip(args.task_names, valid_scores):\n debug(f'Valid {task_name} {args.metric} = {val_score:.6f}')\n writer.add_scalar(f'valid_{task_name}_{args.metric}', val_score, n_iter)\n\n # Save model checkpoint if improved validation score\n if args.minimize_score and avg_val_score < best_score or not args.minimize_score and avg_val_score > best_score:\n best_validation_scores = valid_scores\n best_score, best_epoch = avg_val_score, epoch\n save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args)\n\n # Evaluate on test set using model with best validation score\n info(f'Model {model_idx} best valid {args.metric} = {best_score:.6f} on epoch {best_epoch}')\n model = load_checkpoint(os.path.join(save_dir, 'model.pt'), cuda=args.cuda, logger=logger)\n\n # (len(data), num_tasks)\n test_preds = predict(\n model=model,\n data=test_data,\n batch_size=args.batch_size,\n scaler=scaler\n )\n # (num_tasks,)\n test_scores = evaluate_predictions(\n preds=test_preds,\n targets=test_data.targets(),\n num_tasks=args.num_tasks,\n metric_func=metric_func,\n dataset_type=args.dataset_type,\n logger=logger\n )\n\n if len(test_preds) != 0:\n sum_test_preds += np.array(test_preds)\n\n # Average test score\n avg_test_score = np.nanmean(test_scores)\n info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}')\n writer.add_scalar(f'test_{args.metric}', avg_test_score, 0)\n\n if args.show_individual_scores:\n # Individual test scores\n for task_name, test_score in zip(args.task_names, test_scores):\n info(f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}')\n writer.add_scalar(f'test_{task_name}_{args.metric}', test_score, n_iter)\n\n # Evaluate ensemble on test set\n avg_test_preds = (sum_test_preds / args.ensemble_size).tolist()\n\n # (num_tasks,)\n ensemble_test_scores = evaluate_predictions(\n preds=avg_test_preds,\n targets=test_data.targets(),\n num_tasks=args.num_tasks,\n metric_func=metric_func,\n dataset_type=args.dataset_type,\n logger=logger\n )\n\n # Average ensemble score\n avg_ensemble_test_score = np.nanmean(ensemble_test_scores)\n info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}')\n writer.add_scalar(f'ensemble_test_{args.metric}', avg_ensemble_test_score, 0)\n\n # Individual ensemble scores\n if args.show_individual_scores:\n for task_name, ensemble_score in zip(args.task_names, ensemble_test_scores):\n info(f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}')\n\n return best_validation_scores, ensemble_test_scores\n", "id": "6520617", "language": "Python", "matching_score": 7.481989860534668, "max_stars_count": 8, "path": "crystalnet/train/run_training.py" }, { "content": "from argparse import Namespace\r\nfrom logging import Logger\r\nimport os\r\nimport pickle\r\nimport numpy as np\r\nimport torch\r\nfrom tensorboardX import SummaryWriter\r\nfrom torch.optim.lr_scheduler import ExponentialLR\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nfrom crystalnet.train import transfer_train, transfer_evaluate\r\nfrom crystalnet.models import build_model\r\nfrom crystalnet.data.scaler import StandardScaler\r\nfrom crystalnet.nn_utils import param_count\r\nfrom crystalnet.utils import build_optimizer, build_lr_scheduler, get_loss_func, get_metric_func, load_checkpoint, makedirs, save_checkpoint\r\nfrom crystalnet.data import CrystalDataset\r\nfrom crystalnet.data.utils import get_task_names, get_data, get_class_sizes\r\nfrom crystalnet.parsing import parse_train_args\r\nfrom crystalnet.utils import create_logger\r\nfrom crystalnet.features import AtomCustomJSONInitializer, GaussianDistance, load_radius_dict\r\n\r\n\r\ndef run_training(train_data: CrystalDataset, valid_data: CrystalDataset, args: Namespace, logger: Logger = None):\r\n\r\n if logger is not None:\r\n debug, info = logger.debug, logger.info\r\n else:\r\n debug = info = print\r\n\r\n # Set GPU\r\n if args.gpu is not None:\r\n torch.cuda.set_device(args.gpu)\r\n\r\n # Target adjust\r\n if args.dataset_type == 'classification':\r\n class_sizes = get_class_sizes(train_data)\r\n info('Class sizes')\r\n for i, task_class_sizes in enumerate(class_sizes):\r\n info(f'{args.task_names[i]} '\r\n f'{\", \".join(f\"{cls}: {size * 100:.2f}%\" for cls, size in enumerate(task_class_sizes))}')\r\n\r\n if args.dataset_type == 'regression':\r\n info('Fitting scaler')\r\n train_targets = train_data.targets()\r\n scaler = StandardScaler().fit(train_targets)\r\n scaled_targets = scaler.transform(train_targets).tolist()\r\n train_data.set_targets(scaled_targets)\r\n else:\r\n scaler = None\r\n\r\n # Get loss and metric functions\r\n loss_func = get_loss_func(args)\r\n metric_func = get_metric_func(metric=args.metric)\r\n\r\n # Get best validation loss\r\n best_validation_scores = None\r\n\r\n # Train ensemble of models\r\n for model_idx in range(args.ensemble_size):\r\n # Tensorboard writer\r\n save_dir = os.path.join(args.save_dir, f'model_{model_idx}')\r\n makedirs(save_dir)\r\n try:\r\n writer = SummaryWriter(log_dir=save_dir)\r\n except:\r\n writer = SummaryWriter(logdir=save_dir)\r\n\r\n # Load/build model\r\n if args.checkpoint_paths is not None:\r\n debug(f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}')\r\n model = load_checkpoint(args.checkpoint_paths[model_idx], current_args=args, logger=logger)\r\n else:\r\n debug(f'Building model {model_idx}')\r\n model = build_model(args)\r\n debug(model)\r\n debug(f'Number of parameters = {param_count(model):,}')\r\n if args.cuda:\r\n debug('Moving model to cuda')\r\n model = model.cuda()\r\n\r\n # transfer learning, only transfer ffn\r\n for name, param in model.named_parameters():\r\n if name.find('ffn') != -1:\r\n param.requires_grad = True\r\n else:\r\n param.requires_grad = False\r\n\r\n # Ensure that model is saved in correct location for evaluation if 0 epochs\r\n save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler=None, args=args)\r\n\r\n # Optimizers\r\n optimizer = build_optimizer(model.ffn, args)\r\n\r\n # Learning rate schedulers\r\n scheduler = build_lr_scheduler(optimizer, args)\r\n\r\n # Run training\r\n best_score = float('inf') if args.minimize_score else -float('inf')\r\n best_epoch, n_iter = 0, 0\r\n\r\n for epoch in range(args.epochs):\r\n debug(f'Epoch {epoch}')\r\n\r\n n_iter = transfer_train(\r\n model=model,\r\n data=train_data,\r\n loss_func=loss_func,\r\n optimizer=optimizer,\r\n scheduler=scheduler,\r\n args=args,\r\n n_iter=n_iter,\r\n logger=logger,\r\n writer=writer\r\n )\r\n if isinstance(scheduler, ExponentialLR):\r\n scheduler.step()\r\n\r\n # (num_tasks,)\r\n valid_scores = transfer_evaluate(\r\n model=model,\r\n data=valid_data,\r\n num_tasks=args.num_tasks,\r\n metric_func=metric_func,\r\n batch_size=args.batch_size,\r\n dataset_type=args.dataset_type,\r\n scaler=scaler,\r\n logger=logger\r\n )\r\n\r\n # Average validation score\r\n avg_val_score = np.nanmean(valid_scores)\r\n debug(f'Valid {args.metric} = {avg_val_score:.6f}')\r\n writer.add_scalar(f'valid_{args.metric}', avg_val_score, n_iter)\r\n\r\n if args.show_individual_scores:\r\n # Individual validation scores\r\n for task_name, val_score in zip(args.task_names, valid_scores):\r\n debug(f'Valid {task_name} {args.metric} = {val_score:.6f}')\r\n writer.add_scalar(f'valid_{task_name}_{args.metric}', val_score, n_iter)\r\n\r\n # Save model checkpoint if improved validation score\r\n if args.minimize_score and avg_val_score < best_score or not args.minimize_score and avg_val_score > best_score:\r\n best_validation_scores = valid_scores\r\n best_score, best_epoch = avg_val_score, epoch\r\n save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler=None, args=args)\r\n\r\n # Evaluate on test set using model with best validation score\r\n info(f'Model {model_idx} best valid {args.metric} = {best_score:.6f} on epoch {best_epoch}')\r\n if args.show_individual_scores:\r\n # Individual validation scores\r\n for task_name, val_score in zip(args.task_names, best_validation_scores):\r\n debug(f'Valid {task_name} {args.metric} = {val_score:.6f}')\r\n writer.add_scalar(f'valid_{task_name}_{args.metric}', val_score, n_iter)\r\n\r\n\r\ndef transfer(args: Namespace, logger: Logger = None):\r\n info = logger.info if logger is not None else print\r\n\r\n # Load feature object\r\n ari = AtomCustomJSONInitializer(f'{args.data_path}/atom_init.json')\r\n dmin, dmax, step, var = args.rbf_parameters\r\n gdf = GaussianDistance(dmin=dmin, dmax=dmax, step=step, var=var)\r\n radius_dic = load_radius_dict(f'{args.data_path}/hubbard_u.yaml')\r\n\r\n # Load and cache data\r\n info('Loading data')\r\n if os.path.exists(f'{args.train_path}/graph_cache.pickle'):\r\n with open(f'{args.train_path}/graph_cache.pickle', 'rb') as f:\r\n all_graph = pickle.load(f)\r\n else:\r\n assert \"There is no poscar graph cache, please use preprocess.py to generate poscar graph cache!\"\r\n\r\n if os.path.exists(f'{args.train_path}/transfer.pickle'):\r\n with open(f'{args.train_path}/transfer.pickle', 'rb') as f:\r\n all_data = pickle.load(f)\r\n else:\r\n all_data = get_data(path=f'{args.train_path}/experiment_band_gap.csv',\r\n graph=all_graph, ari=ari, gdf=gdf, radius_dic=radius_dic, args=args, logger=logger)\r\n with open(f'{args.train_path}/transfer.pickle', 'wb') as fw:\r\n pickle.dump(all_data, fw)\r\n\r\n # Split data\r\n train_data, valid_data = train_test_split(all_data, test_size=0.1, random_state=args.run_fold)\r\n train_data, valid_data = CrystalDataset(train_data), CrystalDataset(valid_data)\r\n args.task_names = get_task_names(path=f'{args.train_path}/experiment_band_gap.csv', use_compound_names=True)\r\n # fake the num_tasks for loading the pretrain model\r\n args.num_tasks = train_data.num_tasks()\r\n\r\n info(f'Number of tasks = {args.num_tasks}')\r\n info(f'Total size = {len(train_data)+len(valid_data):,} | '\r\n f'train size = {len(train_data):,}({len(train_data)/(len(train_data)+len(valid_data)):.1f}) | '\r\n f'valid size = {len(valid_data):,}({len(valid_data)/(len(train_data)+len(valid_data)):.1f})')\r\n\r\n # Required for NormLR\r\n args.train_data_size = len(train_data)\r\n\r\n # Training\r\n run_training(train_data, valid_data, args, logger)\r\n\r\n\r\nif __name__ == '__main__':\r\n args = parse_train_args()\r\n logger = create_logger(name='train', save_dir=args.save_dir, quiet=args.quiet)\r\n transfer(args, logger)\r\n", "id": "4582968", "language": "Python", "matching_score": 5.855317115783691, "max_stars_count": 0, "path": "transfer_train.py" }, { "content": "from argparse import Namespace\nfrom logging import Logger\nimport os\nimport pickle\nfrom typing import Tuple\nimport numpy as np\nimport torch\n\nfrom crystalnet.train.run_training import run_training\nfrom crystalnet.data.utils import get_task_names, get_data\nfrom crystalnet.utils import makedirs\nfrom crystalnet.parsing import parse_train_args\nfrom crystalnet.utils import create_logger\nfrom crystalnet.features import AtomCustomJSONInitializer, GaussianDistance, load_radius_dict\nfrom crystalnet.data import CrystalDataset\n\n\ndef run(args: Namespace, logger: Logger = None) -> Tuple[np.ndarray, np.ndarray]:\n info = logger.info if logger is not None else print\n info('=' * 20 + f' training on fold {args.run_fold} ' + '=' * 20)\n\n # Load feature object\n ari = AtomCustomJSONInitializer(f'{args.data_path}/atom_init.json')\n dmin, dmax, step, var = args.rbf_parameters\n gdf = GaussianDistance(dmin=dmin, dmax=dmax, step=step, var=var)\n radius_dic = load_radius_dict(f'{args.data_path}/hubbard_u.yaml')\n\n # Load and cache data\n info('Loading data')\n if os.path.exists(f'{args.train_path}/graph_cache.pickle'):\n with open(f'{args.train_path}/graph_cache.pickle', 'rb') as f:\n all_graph = pickle.load(f)\n elif os.path.exists(f'{args.data_path}/graph_cache.pickle'):\n with open(f'{args.data_path}/graph_cache.pickle', 'rb') as f:\n all_graph = pickle.load(f)\n else:\n assert \"There is no poscar graph cache, please use preprocess.py to generate poscar graph cache!\"\n\n if os.path.exists(f'{args.train_path}/seed_{args.seed}/test_cgcmpnn.pickle'):\n with open(f'{args.train_path}/seed_{args.seed}/test_cgcmpnn.pickle', 'rb') as f:\n test_data = pickle.load(f)\n else:\n test_data = get_data(path=f'{args.train_path}/seed_{args.seed}/test.csv',\n graph=all_graph, ari=ari, gdf=gdf, radius_dic=radius_dic, args=args, logger=logger)\n with open(f'{args.train_path}/seed_{args.seed}/test_cgcmpnn.pickle', 'wb') as fw:\n pickle.dump(test_data, fw)\n\n # assert False\n if os.path.exists(f'{args.train_path}/seed_{args.seed}/train_fold_{args.run_fold}_cgcmpnn.pickle'):\n with open(f'{args.train_path}/seed_{args.seed}/train_fold_{args.run_fold}_cgcmpnn.pickle', 'rb') as f:\n train_data = pickle.load(f)\n else:\n train_data = get_data(path=f'{args.train_path}/seed_{args.seed}/train_fold_{args.run_fold}.csv',\n graph=all_graph, ari=ari, gdf=gdf, radius_dic=radius_dic, args=args, logger=logger)\n with open(f'{args.train_path}/seed_{args.seed}/train_fold_{args.run_fold}_cgcmpnn.pickle', 'wb') as fw:\n pickle.dump(train_data, fw)\n\n if os.path.exists(f'{args.train_path}/seed_{args.seed}/valid_fold_{args.run_fold}_cgcmpnn.pickle'):\n with open(f'{args.train_path}/seed_{args.seed}/valid_fold_{args.run_fold}_cgcmpnn.pickle', 'rb') as f:\n valid_data = pickle.load(f)\n else:\n valid_data = get_data(path=f'{args.train_path}/seed_{args.seed}/valid_fold_{args.run_fold}.csv',\n graph=all_graph, ari=ari, gdf=gdf, radius_dic=radius_dic, args=args, logger=logger)\n with open(f'{args.train_path}/seed_{args.seed}/valid_fold_{args.run_fold}_cgcmpnn.pickle', 'wb') as fw:\n pickle.dump(valid_data, fw)\n\n # subsample for incremental experiment\n if args.max_data_size != float('inf'):\n train_data.shuffle(seed=args.seed)\n train_data = CrystalDataset(train_data[:args.max_data_size], args=args)\n\n task_indices = get_task_names(path=f'{args.train_path}/property.csv', use_compound_names=True)\n args.task_index = task_indices[args.dataset_name]\n args.task_names = [args.dataset_name]\n args.num_tasks = 1\n info(task_indices)\n info(args.task_names)\n info(args.task_index)\n\n # convert multi targets to single target, just temp using before revising to multitask\n train_targets = [[targets[args.task_index]] for targets in train_data.targets()]\n train_data.set_targets(train_targets)\n\n valid_targets = [[targets[args.task_index]] for targets in valid_data.targets()]\n valid_data.set_targets(valid_targets)\n\n test_targets = [[targets[args.task_index]] for targets in test_data.targets()]\n test_data.set_targets(test_targets)\n\n info(f'Total size = {len(train_data)+len(valid_data)+len(test_data):,} | '\n f'train size = {len(train_data):,}({len(train_data)/(len(train_data)+len(valid_data)+len(test_data)):.1f}) | '\n f'valid size = {len(valid_data):,}({len(valid_data)/(len(train_data)+len(valid_data)+len(test_data)):.1f}) | '\n f'test size = {len(test_data):,}({len(test_data)/(len(train_data)+len(valid_data)+len(test_data)):.1f})')\n\n # Required for NormLR\n args.train_data_size = len(train_data)\n\n # Training\n save_dir = os.path.join(args.save_dir, f'fold_{args.run_fold}')\n makedirs(save_dir)\n valid_scores, test_scores = run_training(train_data, valid_data, test_data, args, logger)\n\n # Report scores\n for task_name, valid_score, test_score in zip(args.task_names, valid_scores, test_scores):\n info(f'Task name \"{task_name}\": Valid {args.metric} = {valid_score:.6f}, Test {args.metric} = {test_score:.6f}')\n\n return np.nanmean(valid_scores), np.nanmean(test_scores)\n\n\nif __name__ == '__main__':\n args = parse_train_args()\n logger = create_logger(name='train', save_dir=args.save_dir, quiet=args.quiet)\n mean_valid_score, mean_test_score = run(args, logger)\n print(f'Results on the fold {args.run_fold}')\n print(f'Overall Valid {args.metric}: {mean_valid_score:.5f}, Test scores: {mean_test_score:.5f}')\n", "id": "3484246", "language": "Python", "matching_score": 3.2040655612945557, "max_stars_count": 0, "path": "train.py" }, { "content": "import os\r\nimport pickle\r\nimport pandas as pd\r\nfrom sklearn.model_selection import KFold, train_test_split\r\nfrom pymatgen.io.vasp import Poscar\r\nfrom tqdm import tqdm\r\n\r\n\r\ndef generate_graph_cache(poscar_path, save_path, save_name):\r\n all_data = {crystal_name: Poscar.from_file(os.path.join(poscar_path, crystal_name)).as_dict()\r\n for crystal_name in tqdm(os.listdir(poscar_path))}\r\n with open(os.path.join(save_path, f'{save_name}.pickle'), 'wb') as f:\r\n pickle.dump(all_data, f)\r\n\r\n\r\ndef split_and_save_data(file_path, seed):\r\n kfold = KFold(n_splits=9, shuffle=True, random_state=seed)\r\n\r\n if not os.path.exists(f'./calculate/seed_{seed}/'):\r\n os.makedirs(f'./calculate/seed_{seed}')\r\n\r\n data = pd.read_csv(file_path)\r\n train_val_data, test_data = train_test_split(data, test_size=0.1, random_state=seed)\r\n test_data.to_csv(f'./calculate/seed_{seed}/test.csv', index=None)\r\n for fold_num, (train_index, valid_index) in enumerate(kfold.split(train_val_data)):\r\n train_data, valid_data = train_val_data.iloc[train_index], train_val_data.iloc[valid_index]\r\n train_data.to_csv(f'./calculate/seed_{seed}/train_fold_{fold_num + 1}.csv', index=None)\r\n valid_data.to_csv(f'./calculate/seed_{seed}/valid_fold_{fold_num + 1}.csv', index=None)\r\n\r\n\r\ndef fine_tune_split_data(file_path, seed):\r\n kfold = KFold(n_splits=9, shuffle=True, random_state=seed)\r\n\r\n if not os.path.exists(f'./seed_{seed}/'):\r\n os.makedirs(f'./seed_{seed}')\r\n\r\n data = pd.read_csv(file_path)\r\n for fold_num, (train_index, valid_index) in enumerate(kfold.split(data)):\r\n train_data, valid_data = data.iloc[train_index], data.iloc[valid_index]\r\n train_data.to_csv(f'./seed_{seed}/finetune_train_fold_{fold_num + 1}.csv', index=None)\r\n valid_data.to_csv(f'./seed_{seed}/finetune_valid_fold_{fold_num + 1}.csv', index=None)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n #split_and_save_data('./calculate/property_rm_outliers.csv', seed=333)\r\n #split_and_save_data('./calculate/property.csv', seed=333)\r\n generate_graph_cache(poscar_path='./poscar_size_influence/seed_333/400_big', save_path='./poscar_size_influence/seed_333/',\r\n save_name='graph_cache_big_size')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "id": "9814155", "language": "Python", "matching_score": 0.8909457921981812, "max_stars_count": 0, "path": "data/proprecess.py" }, { "content": "from crystalnet.parsing import parse_predict_args, modify_predict_args\r\nfrom crystalnet.train import make_predictions\r\n\r\nif __name__ == '__main__':\r\n args = parse_predict_args()\r\n test_name, test_prediction = make_predictions(args)\r\n\r\n with open(f'{args.test_path}/seed_{args.seed}/predict_fold8_cgcmpnn.csv', 'w') as fw:\r\n fw.write(f'name,{args.dataset_name}\\n')\r\n\r\n for name, prediction in zip(test_name, test_prediction):\r\n fw.write(f'{name},{\",\".join([str(predict) for predict in prediction])}\\n')\r\n", "id": "6135037", "language": "Python", "matching_score": 1.1415815353393555, "max_stars_count": 0, "path": "predict.py" }, { "content": "from .evaluate import evaluate, evaluate_predictions, transfer_evaluate\nfrom .make_predictions import make_predictions\nfrom .predict import predict, transfer_predict\nfrom .run_training import run_training\nfrom .train import train, transfer_train\n", "id": "9892227", "language": "Python", "matching_score": 0, "max_stars_count": 8, "path": "crystalnet/train/__init__.py" }, { "content": "#/usr/bin/env python\nimport os\nimport re\n\ndef find_job(job_id):\n id = False\n jobcmd = 'squeue | awk \\'{print $1}\\'' \n job = os.popen(jobcmd).read()\n id_list = re.split(\"\\n\",job)\n if job_id in id_list:\n print('find job ',job_id)\n id = True\n else: \n print('can not find ',job_id)\n\n return id \n\ndef get_workdir(job_id):\n dir_cmd = 'yhcontrol show job ' + job_id + ' | grep WorkDir | cut -d \"=\" -f 2'\n job_dir = os.popen(dir_cmd).read().rstrip()\n script_dir = os.popen('pwd').read().rstrip()\n log = job_dir + os.sep + 'slurm-' + job_id + '.out'\n print(\"The src dir is:\\n\",script_dir)\n print(\"This job dir is:\\n\",job_dir)\n print(\"The log file is here:\\n\",log)\n \n \n \n\nif __name__ == '__main__':\n job_id = input('input job id:')\n if find_job(job_id):\n get_workdir(job_id)\n else:\n exit(1)\n \n", "id": "11044158", "language": "Python", "matching_score": 0.707866907119751, "max_stars_count": 0, "path": "check_job/ent_job.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nfrom os.path import *\r\nimport shutil\r\n# import fcntl\r\nimport json\r\n\r\nfrom .layer import Layer\r\nfrom .layer import ConstructLayer\r\n\r\n\r\ndef get_struct_files(struct_dir):\r\n for root, _, files in os.walk(struct_dir):\r\n for f in files:\r\n if splitext(f)[-1] == '.cif' or 'POSCAR' in f.upper():\r\n yield join(root, f)\r\n\r\n\r\ndef split_task(task_dir, top=20):\r\n count, name = 0, 1\r\n for item in get_struct_files(task_dir):\r\n des_dir = join(task_dir, str(name))\r\n if not exists(join(des_dir)):\r\n os.makedirs(des_dir)\r\n shutil.move(item, join(des_dir, basename(item)))\r\n count += 1\r\n if count == top:\r\n name += 1\r\n top += top\r\n\r\n\r\ndef safe_write(result, fn):\r\n with open(fn, \"a+\") as f:\r\n #fcntl.flock(f, fcntl.LOCK_EX)\r\n f.write(result)\r\n #fcntl.flock(f, fcntl.LOCK_UN)\r\n\r\n\r\ndef safe_to_json(file_name, result):\r\n with open(file_name, \"w\") as f:\r\n #fcntl.flock(f, fcntl.LOCK_EX)\r\n json.dump(result, f, indent=4)\r\n #fcntl.flock(f, fcntl.LOCK_UN)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n", "id": "12800339", "language": "Python", "matching_score": 0.5022597908973694, "max_stars_count": 0, "path": "mat2d_pkg/__init__.py" }, { "content": "#!/usr/bin/env python\nimport os\nimport re\nimport shutil\n \nsou_folder = '/WORK/nscc-gz_material_1/MOFs/data/result_hvf_77k'\ndes_folder = '/WORK/nscc-gz_material_1/MOFs/data/supplement/N2_77K/calc'\ncif_folder = '/WORK/nscc-gz_material_1/MOFs/data/supplement/N2_77K/cif'\nfileinfo = '/WORK/nscc-gz_material_1/MOFs/data/supplement/N2_77K/ciflist'\n\nresub_list = os.listdir(cif_folder)\nfor i in resub_list:\n j = os.path.splitext(i)[0]\n with open(fileinfo,\"a+\") as f1:\n f1.write(j + '\\n')\nwith open(fileinfo,\"r\") as f2:\n cifinfo = f2.read()\n#print(cifinfo) \nfor root, dirs, files in os.walk(sou_folder): \n for file in files:\n if \"\".join(file).split(\".\")[-1] == 'data':\n sfile_path=os.path.join(root,file) \n sfpart = \"/\".join(sfile_path.split(\"/\")[-6:])\n mofname = sfpart.split(\"/\")[1]\n if mofname in cifinfo:\n sfpath = des_folder + os.sep + \"/\".join(sfpart.split(\"/\")[:-1])\n if not os.path.exists(sfpath):\n os.makedirs(sfpath)\n dfile_path= des_folder + os.sep + sfpart\n shutil.copy(sfile_path,dfile_path)\nprint('moving hvf file was done!\\n now start to moving cif file')\npath = cif_folder\nfiles_list = os.listdir(path)\nfor cif_file in files_list:\n filename, suffix = os.path.splitext(cif_file)\n for patnum in (range(65,91) or range(91,123)):\n pat = r\"^\"+chr(patnum)\n pattren = re.compile(pat)\n matchcif = pattren.match(filename)\n if matchcif is not None:\n filepath = des_folder + \"/part_\"+\"\".join(chr(patnum))\n print(\"Moving \"+ cif_file + \" to \" + filepath + \"...\")\n if not os.path.exists(filepath):\n os.makedirs(filepath)\n srcfile = path + os.sep + cif_file\n desfile = filepath + os.sep + cif_file\n shutil.move(srcfile,desfile)\nprint('done\\n now it\\'s ok to submit ads job!')\nexit(0)\n", "id": "10699119", "language": "Python", "matching_score": 1.814544916152954, "max_stars_count": 0, "path": "resubmit_unfinishedjob.py" }, { "content": "#!/usr/bin/env python\nimport re\nimport os\nimport linecache\nimport shutil\nimport sys\n\nargs = sys.argv\nsrcpath = args[1]\n#despath = args[2]\n#srcpath = \"/WORK/nscc-gz_material_1/MOFs/data/cif/all_cif\"\n#despath = \"/WORK/nscc-gz_material_1/MOFs/data/cif/changecif\" \n#if not os.path.exists(despath):\n# os.makedirs(despath)\nfiles = [ i for i in os.listdir(srcpath) if os.path.splitext(i)[1] == \".cif\"]\npbfile = []\nprint(len(files))\npat = re.compile(r\"#END\")\nfor file in files:\n with open(srcpath + os.sep + file,\"r\") as f:\n data = f.read()\n findstr = pat.findall(data)\n if len(findstr) != 1:\n print(findstr)\n print(\"find it \",file,\" now starting modeify...\")\n modata = data.split(\"#END\")[0]\n #print(modata)\n os.system('mv ' + file + ' wrong_cif' )\n with open(srcpath + os.sep + file,\"w\") as f2:\n f2.write(modata + \"\\n#END\")\n pbfile.append(file)\n srcfile = srcpath + os.sep + file\n #desfile = despath + os.sep + file\n #shutil.copy(srcfile,desfile)\nprint(len(pbfile),\" edited\")\n\n \n\n", "id": "2951925", "language": "Python", "matching_score": 1.9289777278900146, "max_stars_count": 0, "path": "chk_cif/checkcif.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nimport threading\r\nimport argparse\r\nimport re\r\nimport os\r\n\r\nparser = argparse.ArgumentParser(\"modefiy cif files\")\r\nparser.add_argument(\"jobpath\", help=\"job check path\")\r\nargs = parser.parse_args()\r\njbp = args.jobpath\r\n\r\nnum, qnum = 0, 0\r\n\r\nmutex = threading.Lock()\r\nclass mythread(threading.Thread):\r\n def __init__(self, cf):\r\n super(mythread, self).__init__()\r\n self.cf = cf\r\n\r\n def run_job(self):\r\n global num,qnum\r\n if mutex.acquire(1):\r\n chk = chkcf(self.cf)\r\n wnum = get_lines_cf(self.cf)\r\n\r\n if chk:\r\n num += 1\r\n print(self.cf, \"Already modify \", num, \" file\")\r\n if wnum == 1:\r\n qnum += 1\r\n print(\"got it %d!, wrong cif line! %s\" % (qnum, self.cf))\r\n mutex.release()\r\n\r\ndef findcf(jbp):\r\n ciflist = []\r\n for root, list_dirnames, list_filenames in os.walk(jbp):\r\n for file in list_filenames:\r\n if os.path.splitext(file)[-1] == \".cif\":\r\n fn = os.path.join(root, file)\r\n ciflist.append(fn)\r\n\r\n return ciflist\r\n\r\ndef chkcf(cf):\r\n chk = False\r\n cfname = os.path.split(cf)[-1]\r\n pat = re.compile(r\"#END\")\r\n with open(cf, \"r\") as f:\r\n data = f.read()\r\n findstr = pat.findall(data)\r\n\r\n if len(findstr) > 1:\r\n print(findstr)\r\n print(\"find it \", cfname, \" now starting modify...\")\r\n modata = data.split(\"#END\")[0]\r\n #os.rename(cf, cfname + \"_wrong_cif\")\r\n with open(cf, \"w\") as f2:\r\n f2.write(modata + \"\\n#END\")\r\n chk = True\r\n\r\n return chk\r\n\r\ndef get_lines_cf(cf):\r\n with open(cf, \"r\") as f:\r\n data = f.readlines()\r\n wnum = len(data)\r\n\r\n return wnum\r\n\r\ncflist = findcf(jbp)\r\n\r\nthd_list = []\r\n\r\nfor cf in cflist:\r\n t = mythread(cf)\r\n t.start()\r\n thd_list.append(t)\r\n\r\nfor mythd in thd_list:\r\n mythd.run_job()\r\n\r\nprint(\"Total chk cif \",len(thd_list))\r\nprint(\"Total wrong cif \",qnum)\r\nprint(\"Total muti-end cif \",num)\r\n\r\nprint(\"GG, well done!\")\r\n", "id": "7110328", "language": "Python", "matching_score": 2.1540634632110596, "max_stars_count": 0, "path": "check_job/chk_cif.py" }, { "content": "#!/usr/bin/env/python\nimport threading\nimport re\nimport os\nimport linecache\nimport shutil\nimport sys\n\nthdmutex = threading.Lock()\n\nclass chkthd(threading.Thread):\n def __init__(self, path):\n self.path = path\n def run_job(self,path):\n \n \n", "id": "5040931", "language": "Python", "matching_score": 0.7774841785430908, "max_stars_count": 0, "path": "chk_cif/chk.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nwith open(\"./joblist\",\"r\") as f:\n job = f.readlines()\n for line in job:\n jobcode = line\n os.system(\"yhcancel \" + jobcode)\n print(jobcode)\n", "id": "2627725", "language": "Python", "matching_score": 0.1429242342710495, "max_stars_count": 0, "path": "cleanjob/clean.py" } ]
2.168141
Cactusmachete
[ { "content": "\"\"\"\nTests for compatibility workarounds.\n\n\"\"\"\nimport os\n\nimport pytest\n\nfrom tests.lib import assert_all_changes, pyversion\n\n\[email protected]\ndef test_debian_egg_name_workaround(script):\n \"\"\"\n We can uninstall packages installed with the pyversion removed from the\n egg-info metadata directory name.\n\n Refs:\n http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=618367\n https://bugs.launchpad.net/ubuntu/+source/distribute/+bug/725178\n https://bitbucket.org/ianb/pip/issue/104/pip-uninstall-on-ubuntu-linux\n\n \"\"\"\n result = script.pip('install', 'INITools==0.2')\n\n egg_info = os.path.join(\n script.site_packages, \"INITools-0.2-py%s.egg-info\" % pyversion)\n\n # Debian only removes pyversion for global installs, not inside a venv\n # so even if this test runs on a Debian/Ubuntu system with broken\n # setuptools, since our test runs inside a venv we'll still have the normal\n # .egg-info\n assert egg_info in result.files_created, \"Couldn't find %s\" % egg_info\n\n # The Debian no-pyversion version of the .egg-info\n mangled = os.path.join(script.site_packages, \"INITools-0.2.egg-info\")\n assert mangled not in result.files_created, \"Found unexpected %s\" % mangled\n\n # Simulate a Debian install by copying the .egg-info to their name for it\n full_egg_info = os.path.join(script.base_path, egg_info)\n assert os.path.isdir(full_egg_info)\n full_mangled = os.path.join(script.base_path, mangled)\n os.renames(full_egg_info, full_mangled)\n assert os.path.isdir(full_mangled)\n\n # Try the uninstall and verify that everything is removed.\n result2 = script.pip(\"uninstall\", \"INITools\", \"-y\")\n assert_all_changes(result, result2, [script.venv / 'build', 'cache'])\n\n\ndef test_setup_py_with_dos_line_endings(script, data):\n \"\"\"\n It doesn't choke on a setup.py file that uses DOS line endings (\\\\r\\\\n).\n\n Refs https://github.com/pypa/pip/issues/237\n \"\"\"\n to_install = data.packages.joinpath(\"LineEndings\")\n script.pip('install', to_install)\n", "id": "904172", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tests/functional/test_install_compat.py" }, { "content": "import sysconfig\n\nimport pytest\nfrom mock import patch\n\nfrom pip._internal import pep425tags\n\n\[email protected]('version_info, expected', [\n ((2,), '2'),\n ((2, 8), '28'),\n ((3,), '3'),\n ((3, 6), '36'),\n # Test a tuple of length 3.\n ((3, 6, 5), '36'),\n # Test a 2-digit minor version.\n ((3, 10), '310'),\n])\ndef test_version_info_to_nodot(version_info, expected):\n actual = pep425tags.version_info_to_nodot(version_info)\n assert actual == expected\n\n\nclass TestPEP425Tags(object):\n\n def mock_get_config_var(self, **kwd):\n \"\"\"\n Patch sysconfig.get_config_var for arbitrary keys.\n \"\"\"\n get_config_var = sysconfig.get_config_var\n\n def _mock_get_config_var(var):\n if var in kwd:\n return kwd[var]\n return get_config_var(var)\n return _mock_get_config_var\n\n def test_no_hyphen_tag(self):\n \"\"\"\n Test that no tag contains a hyphen.\n \"\"\"\n import pip._internal.pep425tags\n\n mock_gcf = self.mock_get_config_var(SOABI='cpython-35m-darwin')\n\n with patch('sysconfig.get_config_var', mock_gcf):\n supported = pip._internal.pep425tags.get_supported()\n\n for tag in supported:\n assert '-' not in tag.interpreter\n assert '-' not in tag.abi\n assert '-' not in tag.platform\n\n\nclass TestManylinux2010Tags(object):\n\n @pytest.mark.parametrize(\"manylinux2010,manylinux1\", [\n (\"manylinux2010_x86_64\", \"manylinux1_x86_64\"),\n (\"manylinux2010_i686\", \"manylinux1_i686\"),\n ])\n def test_manylinux2010_implies_manylinux1(self, manylinux2010, manylinux1):\n \"\"\"\n Specifying manylinux2010 implies manylinux1.\n \"\"\"\n groups = {}\n supported = pep425tags.get_supported(platform=manylinux2010)\n for tag in supported:\n groups.setdefault(\n (tag.interpreter, tag.abi), []\n ).append(tag.platform)\n\n for arches in groups.values():\n if arches == ['any']:\n continue\n assert arches[:2] == [manylinux2010, manylinux1]\n\n\nclass TestManylinux2014Tags(object):\n\n @pytest.mark.parametrize(\"manylinuxA,manylinuxB\", [\n (\"manylinux2014_x86_64\", [\"manylinux2010_x86_64\",\n \"manylinux1_x86_64\"]),\n (\"manylinux2014_i686\", [\"manylinux2010_i686\", \"manylinux1_i686\"]),\n ])\n def test_manylinuxA_implies_manylinuxB(self, manylinuxA, manylinuxB):\n \"\"\"\n Specifying manylinux2014 implies manylinux2010/manylinux1.\n \"\"\"\n groups = {}\n supported = pep425tags.get_supported(platform=manylinuxA)\n for tag in supported:\n groups.setdefault(\n (tag.interpreter, tag.abi), []\n ).append(tag.platform)\n\n expected_arches = [manylinuxA]\n expected_arches.extend(manylinuxB)\n for arches in groups.values():\n if arches == ['any']:\n continue\n assert arches[:3] == expected_arches\n", "id": "1231409", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tests/unit/test_pep425tags.py" } ]
0
LukasErlenbach
[ { "content": "\"\"\"\ncompare_gpa_rand.py\n\nThis script compares the performance of the Fast GPA Sampler to a baseline of\nrandomly selection points in the active learning iterations.\n\nTo change the experiment setup, change the values of the variables size_pool,\nsize_test, random_seed below, or edit the config.yaml files in configs/.\n\nLoads configs from the \"configs\" directory.\nSaves results in \"results_gpa_vs_rand_rs_\" plus the used random seed.\n\"\"\"\n\n\nfrom time import time\nimport matplotlib.pyplot as plt\nimport logging\n\nfrom run_experiment import prepare_experiments, run_experiment\nfrom utils.plotting import plot_rmse_metrics\n\nlog_level = logging.INFO\nsize_pool = 0\nsize_test = 0\nrandom_seed = 0\nconfig_gpa = \"configs/config_gpa.yaml\"\nconfig_rand = \"configs/config_rand.yaml\"\n\nresults_dir = \"results_gpa_vs_rand_rs_\" + str(random_seed) + \"/\"\nds = prepare_experiments(results_dir, size_pool, size_test, random_seed, log_level)\n\nmetrics_gpa = run_experiment(ds, results_dir, config_gpa, random_seed)\nmetrics_rand = run_experiment(ds, results_dir, config_rand, random_seed)\n\ntitle = \"FastGPA Sampler vs random point selection\"\nlabels = [\"FastGPA Sampler\", \"Random Selection\"]\nfig = plot_rmse_metrics([metrics_gpa, metrics_rand], labels, title=title)\n\nfigpath = results_dir + \"comparison_gpa_rand\"\nprint(\"Saving figure as \", figpath)\nfig.savefig(figpath)\n", "id": "7746871", "language": "Python", "matching_score": 4.948103904724121, "max_stars_count": 2, "path": "source/compare_gpa_rand.py" }, { "content": "\"\"\"\ncompare_fastgpa_batchgpa.py\n\nThis script compares the Fast GPA Sampler to the Batch GPA Sampler.\n\nTo change the experiment setup, change the values of the variables size_pool,\nsize_test, random_seed below, or edit the config.yaml files in configs/.\n\nLoads configs from the \"configs\" directory.\nSaves results in \"results_fgpa_vs_bgpa_rs_\" plus the used random seed.\n\"\"\"\n\n\nfrom time import time\nimport matplotlib.pyplot as plt\nimport logging\n\nfrom run_experiment import prepare_experiments, run_experiment\nfrom utils.plotting import plot_rmse_metrics\nfrom utils.timing import secs_to_str\n\nlog_level = logging.INFO\nsize_pool = 0\nsize_test = 0\nrandom_seed = 0\nconfig_fgpa = \"configs/config_small_fgpa.yaml\"\nconfig_bgpa = \"configs/config_small_bgpa.yaml\"\n\nresults_dir = \"results_fgpa_vs_bgpa_rs_\" + str(random_seed) + \"/\"\nds = prepare_experiments(results_dir, size_pool, size_test, random_seed, log_level)\n\nmetrics_fgpa = run_experiment(ds, results_dir, config_fgpa, random_seed)\nmetrics_bgpa = run_experiment(ds, results_dir, config_bgpa, random_seed)\n\ntitle = \"FastGPA Sampler took \" + secs_to_str(metrics_fgpa.time_sampling) + \" secs\\n\"\ntitle += \"Batch GPA Sampler took \" + secs_to_str(metrics_bgpa.time_sampling) + \" secs\"\nlabels = [\"FastGPA Sampler\", \"Batch GPA Sampler\"]\nfig = plot_rmse_metrics([metrics_fgpa, metrics_bgpa], labels, title=title)\n\nfigpath = results_dir + \"comparison_fgpa_bgpa\"\nprint(\"Saving figure as \", figpath)\nfig.savefig(figpath)\n", "id": "8448928", "language": "Python", "matching_score": 2.0909910202026367, "max_stars_count": 2, "path": "source/compare_fastgpa_batchgpa.py" }, { "content": "from .run_experiment import prepare_experiments, run_experiment\n", "id": "8138142", "language": "Python", "matching_score": 0.11389598995447159, "max_stars_count": 2, "path": "source/run_experiment/__init__.py" }, { "content": "\"\"\"\nprepare_variables.py\n\nThe module gets called from run_experiments.py and initializes several global\nvaliables such as logger, random seed and the result directory.\n\nIf the result directory is not empty, the user is asked in the command line, if\nresults should be overwritten.\n\"\"\"\n\nimport os\nfrom classes import ExpConfig, init_logger\nimport tensorflow.compat.v1 as tf\nimport logging\n\n# tf.disable_v2_behavior()\nfrom utils import set_global_var, get_global_var\n\n# if results_dir does not exists -> create it\n# if results_dir does exists -> ask user if it can be overwritten\ndef prepare_results_dir(results_dir, logger):\n if not os.path.isdir(results_dir):\n os.mkdir(results_dir)\n else:\n response = \"\"\n while not response in [\"yes\", \"no\"]:\n response = input(\n 'WARNING Do you want to overwrite the directory \"'\n + results_dir\n + '\" ? (yes/no) '\n )\n if response == \"no\":\n raise Exception(\"Result directory will not be overwritten.\")\n else:\n logger.warning(\"Overwriting the directory \", results_dir)\n\n\ndef prepare_global_vars(results_dir, random_seed, log_level=logging.INFO):\n logger = set_global_var(\"logger\", init_logger(log_level))\n # create dir for saving results\n prepare_results_dir(results_dir, logger)\n\n # add filehandler after preparing the results directory\n logger.clear_filehandler()\n logger.add_filehandler(results_dir + \"logfile\")\n\n infos = \"\"\n infos += \"The current working directory is \" + os.getcwd() + \"\\n\"\n infos += \" Saving results in directory \" + results_dir + \"\\n\"\n infos += \" Tensorflow version is \" + tf.__version__\n logger.info(infos)\n\n set_global_var(\"results_dir\", results_dir)\n set_global_var(\"global_seed\", random_seed)\n return results_dir\n", "id": "2211388", "language": "Python", "matching_score": 1.978762149810791, "max_stars_count": 2, "path": "source/run_experiment/prepare_variables.py" }, { "content": "from .custom_logger import init_logger\nfrom .exp_config import ExpConfig\nfrom .dataset import Dataset\n", "id": "12090830", "language": "Python", "matching_score": 0.88193279504776, "max_stars_count": 2, "path": "source/classes/__init__.py" }, { "content": "\"\"\"\ncustom_logger.py\n\nThis module implements the CustomLogger class.\n\nThis is a customized version of the standard logging.Logger with additional\noutput features. At the beginning of the program flow, a CustomLogger gets\ninitialized as global variable which then can be called from everywhere in\nthe code.\n\"\"\"\n\nimport logging\nfrom time import strftime, mktime\nfrom utils import secs_to_str\n\n\ndef log(*args):\n return \" \".join(map(str, args))\n\n\nclass CustomLogger(logging.Logger):\n def set_prefix(self, prefix):\n self.prefix = prefix\n\n def info(self, *args):\n super().info(log(self.prefix, \" \\n \", *args))\n\n def debug(self, *args):\n super().debug(log(self.prefix, \" \\n \", *args))\n\n def warning(self, *args):\n super().warning(log(self.prefix, \" \\n \", *args))\n\n def critical(self, *args):\n super().critical(log(self.prefix, \" \\n \", *args))\n\n def format_elapsed_time(self, start_time, end_time):\n return (\n \"Start time:\"\n + strftime(\"%Y-%m-%d %H:%M:%S\", start_time)\n + \"\\n End time:\"\n + strftime(\"%Y-%m-%d %H:%M:%S\", end_time)\n + \"\\n Elapsed time(s):\"\n + secs_to_str(mktime(end_time) - mktime(start_time))\n )\n\n def add_filehandler(self, logpath):\n # clear and create logfile\n open(logpath, \"w\").close()\n fh = logging.FileHandler(logpath)\n fh.setLevel(self.level)\n formatter = logging.Formatter(\"%(levelname)s - %(asctime)s - %(message)s\")\n fh.setFormatter(formatter)\n self.addHandler(fh)\n\n def clear_filehandler(self):\n # only keep StreamHandler (which is always initialized first)\n self.handlers = [self.handlers[0]]\n\n\ndef init_logger(log_level):\n logger = CustomLogger(\"dummy_name\")\n logger.set_prefix(\"\")\n logger.setLevel(log_level)\n # create formatter\n formatter = logging.Formatter(\"%(levelname)s - %(asctime)s - %(message)s\")\n # create console handler and set log level\n ch = logging.StreamHandler()\n ch.setLevel(log_level)\n # add formatter to console handler\n ch.setFormatter(formatter)\n # add console handler to logger\n logger.addHandler(ch)\n return logger\n", "id": "3330981", "language": "Python", "matching_score": 1.2200862169265747, "max_stars_count": 2, "path": "source/classes/custom_logger.py" }, { "content": "\"\"\"\ntiming.py\n\nThis module implements time related functions.\n\"\"\"\n\nfrom datetime import timedelta\n\n\ndef secs_to_str(secs):\n return str(timedelta(seconds=round(secs)))\n", "id": "2933076", "language": "Python", "matching_score": 0.0503559336066246, "max_stars_count": 2, "path": "source/utils/timing.py" }, { "content": "\"\"\"\nsampling.property\n\nThis module implements various sampling policies.\nEach policy hold a reference to X_pool and X_train and implements a\npolicy.request_samples(, n_samples, learner) function.\n\nAvailable policies:\n RandomPolicy (samples random points)\n GPAPolicy (Fast GPA Sampler as described in the thesis, based on the\n GPApproximator implementation from gpa.py)\n BatchGPAPolicy (as Fast GPA Sampler, but without fast updating)\n\"\"\"\n\n\nimport numpy as np\nfrom sklearn.utils.random import sample_without_replacement\nfrom run_experiment.gpa import GPApproximator\nfrom utils import get_global_var\n\n\n# base class\nclass Policy:\n def __init__(self):\n pass\n\n def update(self, X_pool, X_train):\n self.X_pool = X_pool\n self.X_train = X_train\n\n\n# sample points at random\nclass RandomPolicy(Policy):\n def __str__(self):\n return \"RandomPolicy\"\n\n # returns predicted variance of the network\n def var(self, learner, X_train, X_pool):\n mean, std = learner.predict_w_std(X_pool)\n var = np.power(std, 2)\n return var\n\n def request_samples(self, n_samples, learner=None):\n idxs_samples = sample_without_replacement(\n self.X_pool.shape[0], n_samples=n_samples\n )\n return idxs_samples\n\n\n# implementation of the Fast GPA Sampler\nclass GPAPolicy(Policy):\n def __init__(self, batch_size, n_mc_samples):\n self.batch_size = batch_size\n self.n_mc_samples = n_mc_samples\n\n def __str__(self):\n return \"Fast GPA Policy\"\n\n def var(self, learner, X_train, X_pool):\n # return post variance of pool data\n gpa = GPApproximator(self.n_mc_samples)\n post_var = gpa.eval_post_var(learner, X_train, X_pool)\n return post_var\n\n def request_samples(self, n_samples, learner):\n logger = get_global_var(\"logger\")\n logger.info(\n \"Requesting\",\n n_samples,\n \"samples from Fast GPA Policy with batch_size \",\n self.batch_size,\n )\n gpa = GPApproximator(self.n_mc_samples)\n # this is the list we want to return\n idxs_sample = []\n # sample at most self.batch_size many points in each iteration\n while len(idxs_sample) < n_samples:\n # standard evaluation\n if len(idxs_sample) == 0:\n post_var = gpa.eval_post_var(learner, self.X_train, self.X_pool)\n # fast updating procedure\n else:\n post_var = gpa.eval_post_var_new_points(max_var_idxs_in_pool)\n # ensure we do not overshoot the requested number of samples\n n_request_max = np.min((self.batch_size, n_samples - len(idxs_sample)))\n # select points with highest post_var values\n max_var_idxs_in_pool = list(post_var.argsort()[-n_request_max:])\n idxs_sample += max_var_idxs_in_pool\n\n # ensure the sampled idxs are unique\n # -> sampled idxs should have post_var=0\n if len(set(idxs_sample)) < len(idxs_sample):\n raise Exception(\"GPA Sampler selected some idxs multiple times.\")\n return idxs_sample\n\n\n# implementation of the Batch GPA Sampler\n# similar to the Fast GPA Sampler above but without the fast update\nclass BatchGPAPolicy(Policy):\n def __init__(self, batch_size, n_mc_samples):\n self.batch_size = batch_size\n self.n_mc_samples = n_mc_samples\n\n def __str__(self):\n return \"Batch GPA Policy\"\n\n def request_samples(self, n_samples, learner):\n logger = get_global_var(\"logger\")\n logger.info(\"Requesting\", n_samples, \"samples from Batch GPA Policy.\")\n gpa = GPApproximator(self.n_mc_samples)\n # this is the list we want to return, real idxs\n idxs_sample = []\n # this is the list we fill during one iteration, reset local idxs\n idxs_sample_iter = []\n # to keep track of the original idxs, use to transform _iter to idxs_sample\n idxs_pool = np.arange(len(self.X_pool))\n\n counter_all_idxs = lambda: len(idxs_sample) + len(idxs_sample_iter)\n\n while counter_all_idxs() < n_samples:\n n_request_samples = min(self.batch_size, n_samples - counter_all_idxs())\n # standard evaluation\n post_var = gpa.eval_post_var(learner, self.X_train, self.X_pool)\n max_var_idxs_in_pool = post_var.argsort()[-n_request_samples:]\n if n_request_samples == 1:\n max_var_idxs_in_pool = [max_var_idxs_in_pool]\n idxs_sample_iter = max_var_idxs_in_pool\n\n if counter_all_idxs() < n_samples:\n # idxs in the original pool\n real_idxs_sample_iter = [int(idxs_pool[i]) for i in idxs_sample_iter]\n idxs_sample = idxs_sample + real_idxs_sample_iter\n # update pool and train data\n idxs_pool = np.setdiff1d(idxs_pool, real_idxs_sample_iter)\n remove_from_pool = self.X_pool[idxs_sample_iter, :]\n if len(idxs_sample_iter) == 1:\n remove_from_pool = remove_from_pool.reshape(1, -1)\n self.X_train = np.append(self.X_train, remove_from_pool, axis=0)\n self.X_train = np.append(self.X_train, remove_from_pool, axis=0)\n self.X_pool = np.delete(self.X_pool, idxs_sample_iter, axis=0)\n idxs_sample_iter = []\n\n # add remaining idxs_samples_iter in last iteration\n idxs_sample_iter = [int(idxs_pool[i]) for i in idxs_sample_iter]\n idxs_sample += idxs_sample_iter\n # ensure the sampled idxs are unique\n # -> sampled idxs should have post_var=0\n if len(set(idxs_sample)) < len(idxs_sample):\n raise Exception(\"GPA Sampler selected some idxs multiple times.\")\n return np.array(idxs_sample)\n", "id": "5455414", "language": "Python", "matching_score": 3.164449453353882, "max_stars_count": 2, "path": "source/run_experiment/sampling.py" }, { "content": "\"\"\"\ngpa.py\n\nThis module implements a GPApproximator which can be used to compute the\nposterior variance of an approximate Gaussian Process.\n\"\"\"\n\nimport numpy as np\n\n# init defines number of Monte Carlo samples used for the estimation and the\n# regularization la(mbda)\nclass GPApproximator:\n def __init__(self, n_samples_mc=25, la=0.01):\n self.n_samples_mc = n_samples_mc\n self.la = la\n # member variables set during computation\n self.samples = None\n self.cov_est = None\n self.post_cov_est = None\n\n # expects the learner to implement a .sample_y function\n # samples self.n_samples_mc many instances and centralizes them\n def sample_y_values(self, learner, X_data):\n self.samples = np.array(learner.sample_y(X_data, self.n_samples_mc))\n self.samples -= np.mean(self.samples, axis=0)\n assert self.samples.shape[0] == self.n_samples_mc\n\n # estimate covariance matrix and saves it to self.cov_est\n # if resample==True, call self.sample_y_values()\n def estimate_cov(self, learner, X_data, resample):\n if self.samples is None or resample:\n self.sample_y_values(learner, X_data)\n self.cov_est = self.samples.T.dot(self.samples) / self.n_samples_mc\n return self.cov_est\n\n # calls self.estimate_cov() and return diagonal of self.cov_est\n def eval_sample_var(self, learner, X_anchor, X_pool, estimate_cov=True):\n if estimate_cov:\n self.estimate_cov(learner, np.append(X_anchor, X_pool, axis=0), True)\n return np.diag(self.cov_est)[len(X_anchor) :]\n\n # use self.cov_est to estimate posterior variance\n # sets self.post_cov_est\n def eval_post_var(self, learner, X_anchor, X_pool, estimate_cov=True):\n len_X_anchor = len(X_anchor)\n len_X_pool = len(X_pool)\n # calculate self.cov_est\n if estimate_cov:\n self.estimate_cov(learner, np.append(X_anchor, X_pool, axis=0), True)\n # self.cov_est is (len(X_anchor) + len(X_pool))**2\n idxs_anchor = np.arange(len_X_anchor)\n idxs_pool = np.arange(len_X_anchor, len_X_anchor + len_X_pool)\n assert (self.cov_est == self.cov_est.T).all()\n\n # get submatrices\n cov_old_old = self.cov_est[np.ix_(idxs_anchor, idxs_anchor)]\n cov_old_new = self.cov_est[np.ix_(idxs_anchor, idxs_pool)]\n cov_new_new = self.cov_est[np.ix_(idxs_pool, idxs_pool)]\n\n # add self.la*identidy and invert\n coo_inv = np.linalg.inv(cov_old_old + np.eye(len(idxs_anchor)) * self.la)\n # calculate posterior covariance matrix\n self.post_cov_est = cov_new_new - cov_old_new.T.dot(coo_inv).dot(cov_old_new)\n return np.diag(self.post_cov_est)\n\n # fast updating procedure, calculates new posterior covariance matrix,\n # after idxs_in_X_pool have been selected\n def eval_post_var_new_points(self, idxs_in_X_pool):\n if len(idxs_in_X_pool) == 1:\n return self.eval_post_var_new_point(idxs_in_X_pool[0])\n selected = (\n self.post_cov_est[np.ix_(idxs_in_X_pool, idxs_in_X_pool)]\n + np.eye(len(idxs_in_X_pool)) * self.la\n )\n covariances = self.post_cov_est[idxs_in_X_pool, :]\n update_mat = covariances.T.dot(np.linalg.inv(selected)).dot(covariances)\n self.post_cov_est -= update_mat\n # ensure that selected idxs have zero covariance\n self.post_cov_est[idxs_in_X_pool, :] = 0\n self.post_cov_est[:, idxs_in_X_pool] = 0\n return np.diag(self.post_cov_est)\n\n # same as above, optimized for len(idx_in_X_pool)==1\n def eval_post_var_new_point(self, idx_in_X_pool):\n submat = self.post_cov_est[idx_in_X_pool, :].reshape(-1, 1)\n update_mat = submat.dot(submat.T)\n\n update_mat = update_mat / (\n self.post_cov_est[idx_in_X_pool, idx_in_X_pool] + self.la\n )\n self.post_cov_est = self.post_cov_est - update_mat\n self.post_cov_est[idx_in_X_pool, :] = 0\n self.post_cov_est[:, idx_in_X_pool] = 0\n return np.diag(self.post_cov_est)\n", "id": "432694", "language": "Python", "matching_score": 1.4516067504882812, "max_stars_count": 2, "path": "source/run_experiment/gpa.py" }, { "content": "\"\"\"\nbnn_model.py\n\nThis module implements a Bayesian neural network in tensorflow probability.\nIt reuses code from\n\n @misc{Hafner2018,\n \ttitle={Noise Contrastive Priors for Functional Uncertainty},\n \tauthor={<NAME> and <NAME> and <NAME> and <NAME> and <NAME>},\n \tyear={2018, accessed 28.09.2020},\n \teprint={1807.09289},\n \tarchivePrefix={arXiv},\n \turl={https://github.com/brain-research/ncp}\n }\n\nThe _network_ function defines the network layers and is called by define_graph.\nThe _define_graph_ function initializes the corresponding tensors in a graph.\n\nThe defined graph can be accessed and run in a tf.Session via BNN.graph\n\"\"\"\n\nfrom tensorflow_probability import distributions as tfd\nimport tensorflow_probability as tfp\nimport tensorflow.compat.v1 as tf\n\ntf.disable_v2_behavior()\n\nfrom classes.attrdict import AttrDict\nimport numpy as np\n\nfrom .base_model import BaseModel\n\n# defines the network architecture and makes it Bayes by defining prior, posterior\n# and a DenseReparameterization which implements the inference via stochastic\n# forward passes such that an tf.optimizer can be used for training\ndef network(inputs, config):\n # classical dense connected layers\n hidden = inputs\n for i, size in enumerate(config.layer_sizes):\n hidden = tf.layers.dense(hidden, size, tf.nn.leaky_relu)\n\n # define the posterior as an independent normal with given std\n init_std = np.log(np.exp(config.weight_std) - 1).astype(np.float32)\n kernel_posterior = tfd.Independent(\n tfd.Normal(\n tf.get_variable(\n \"kernel_mean\",\n (hidden.shape[-1].value, 1),\n tf.float32,\n tf.random_normal_initializer(0, config.weight_std),\n ),\n tf.nn.softplus(\n tf.get_variable(\n \"kernel_std\",\n (hidden.shape[-1].value, 1),\n tf.float32,\n tf.constant_initializer(init_std),\n )\n ),\n ),\n 2,\n )\n # prior is a normal as well and has the same shape as the posterior\n kernel_prior = tfd.Independent(\n tfd.Normal(\n tf.zeros_like(kernel_posterior.mean()),\n tf.zeros_like(kernel_posterior.mean()) + tf.nn.softplus(init_std),\n ),\n 2,\n )\n # for the bias, the posterior is simply a constant\n bias_prior = None\n bias_posterior = tfd.Deterministic(\n tf.get_variable(\"bias_mean\", (1,), tf.float32, tf.constant_initializer(0.0))\n )\n # add the KL divergence of prior and posterior to tf.GraphKeys.REGULARIZATION_LOSSES\n # to add them later in the loss computation\n tf.add_to_collection(\n tf.GraphKeys.REGULARIZATION_LOSSES,\n tfd.kl_divergence(kernel_posterior, kernel_prior),\n )\n # make the network probabilistic\n mean = tfp.layers.DenseReparameterization(\n 1,\n kernel_prior_fn=lambda *args, **kwargs: kernel_prior,\n kernel_posterior_fn=lambda *args, **kwargs: kernel_posterior,\n bias_prior_fn=lambda *args, **kwargs: bias_prior,\n bias_posterior_fn=lambda *args, **kwargs: bias_posterior,\n )(hidden)\n # define output\n mean_dist = tfd.Normal(\n tf.matmul(hidden, kernel_posterior.mean()) + bias_posterior.mean(),\n tf.sqrt(tf.matmul(hidden ** 2, kernel_posterior.variance())),\n )\n std = tf.layers.dense(hidden, 1, tf.nn.softplus) + 1e-6\n data_dist = tfd.Normal(mean, std)\n return data_dist, mean_dist\n\n\n# defines the computational graph in tensorflow\n# contains the tensors for the network model, as well as the optimizer\ndef define_graph(config):\n network_tpl = tf.make_template(\"network\", network, config=config)\n inputs = tf.placeholder(tf.float32, [None, config.num_inputs], name=\"inputs\")\n targets = tf.placeholder(tf.float32, [None, 1], name=\"targets\")\n num_samples = tf.placeholder(tf.int32, [], name=\"num_samples\")\n num_rand_samples = tf.placeholder(tf.int32, [])\n batch_size = tf.shape(inputs)[0]\n\n data_dist, mean_dist = network_tpl(inputs)\n assert len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n divergence = sum(\n [\n tf.reduce_sum(tensor)\n for tensor in tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n ]\n )\n num_batches = tf.cast(num_samples, float) / tf.cast(batch_size, float)\n losses = [\n config.divergence_scale * divergence / num_batches,\n -data_dist.log_prob(targets),\n ]\n loss = sum(tf.reduce_sum(loss) for loss in losses) / tf.cast(batch_size, float)\n optimizer = tf.train.AdamOptimizer(config.learning_rate)\n gradients, variables = zip(\n *optimizer.compute_gradients(loss, colocate_gradients_with_ops=True)\n )\n if config.clip_gradient:\n gradients, _ = tf.clip_by_global_norm(gradients, config.clip_gradient)\n optimize = optimizer.apply_gradients(zip(gradients, variables))\n reset_optimizer = tf.variables_initializer(optimizer.variables())\n\n data_mean = mean_dist.mean()\n data_mean_sample = mean_dist.sample(num_rand_samples)\n data_noise = data_dist.stddev()\n data_uncertainty = mean_dist.stddev()\n\n return AttrDict(locals())\n\n\nclass BNN(BaseModel):\n def __init__(self, net_config, train_schedule):\n super().__init__(net_config)\n self.graph = define_graph(net_config)\n self.ts = train_schedule\n self.has_uncertainty = True\n\n def sample_y(self, X_data, num_rand_samples=1):\n feed_dict = {\n self.graph.inputs: X_data,\n self.graph.num_rand_samples: num_rand_samples,\n }\n y_data = self.session.run(self.graph.data_mean_sample, feed_dict=feed_dict)\n y_data = y_data.reshape(num_rand_samples, len(X_data))\n return y_data\n", "id": "3787718", "language": "Python", "matching_score": 4.426312446594238, "max_stars_count": 2, "path": "source/models/bnn_model.py" }, { "content": "\"\"\"\nbase_model.py\n\nThis module implements the BaseModel class from which the other network models\nare derived. The class holds a tensorflow session and a net_config.\n\nThe class implements training, evaluation and prediction functions.\n\"\"\"\n\n\nfrom tensorflow_probability import distributions as tfd\nimport tensorflow_probability as tfp\nimport tensorflow.compat.v1 as tf\n\nimport numpy as np\nimport scipy.stats\n\nfrom classes.attrdict import AttrDict\nfrom utils import get_global_var\n\n\nclass BaseModel:\n def __init__(self, net_config):\n self.net_config = net_config\n self.session = None\n\n def set_session(self, sess):\n self.session = sess\n\n \"\"\"\n This is the core training function for all neural networks.\n Training is performed in the classical SGD fashion on random subsets of the\n training data. A AttrDict with metrics is returned after training.\n \"\"\"\n\n def train(self, X_train, y_train, X_test, y_test, start_epoch=0, n_epochs=None):\n if n_epochs is None:\n n_epochs = self.ts.num_epochs\n logger = get_global_var(\"logger\")\n if self.session is None:\n raise Exception(\"TF session in the network model is not set.\")\n if len(X_train) == 0:\n raise Exception(\"Training data is empty.\")\n\n metrics = AttrDict(\n train_likelihoods=[],\n train_rmses=[],\n test_likelihoods=[],\n test_rmses=[],\n epochs=[],\n )\n # reset optimizer\n self.session.run(self.graph.reset_optimizer)\n # classical training iterations on random subsets of the training data\n # training is performed by calling self.graph.optimize\n for epoch in range(n_epochs):\n indices = np.arange(len(X_train))\n np.random.shuffle(indices)\n for from_idx in range(0, len(X_train), self.ts.batch_size):\n to_idx = min(from_idx + self.ts.batch_size, len(X_train))\n current = indices[from_idx:to_idx]\n self.session.run(\n self.graph.optimize,\n {\n self.graph.inputs: X_train[current],\n self.graph.targets: y_train[current],\n self.graph.num_samples: len(X_train),\n },\n )\n # eval metrics after defined epochs\n if epoch in self.ts.eval_after_epochs:\n train_llh, train_rmse = self.evaluate(X_train, y_train)\n test_llh, test_rmse = self.evaluate(X_test, y_test)\n metrics.train_likelihoods.append(train_llh)\n metrics.train_rmses.append(train_rmse)\n metrics.test_likelihoods.append(test_llh)\n metrics.test_rmses.append(test_rmse)\n metrics.epochs.append(start_epoch + epoch)\n\n # print metrics after defined epochs\n if epoch in self.ts.log_after_epochs:\n logger.info(\n \" Epoch:\",\n metrics.epochs[-1],\n \"\\n train log likelihood:\",\n metrics.train_likelihoods[-1],\n \"\\n train rmses:\",\n metrics.train_rmses[-1],\n \"\\n test log likelihood:\",\n metrics.test_likelihoods[-1],\n \"\\n test rmses:\",\n metrics.test_rmses[-1],\n )\n return metrics\n\n # returns likelihood and rmse\n def evaluate(self, X_data, y_data, batch_size=64):\n likelihoods, squared_distances = [], []\n for index in range(0, len(X_data), batch_size):\n target = y_data[index : index + batch_size]\n feed_dict = {self.graph.inputs: X_data[index : index + batch_size]}\n\n mean, noise, uncertainty = self.session.run(\n [\n self.graph.data_mean,\n self.graph.data_noise,\n self.graph.data_uncertainty,\n ],\n feed_dict,\n )\n squared_distances.append((target - mean) ** 2)\n if self.has_uncertainty:\n std = np.sqrt(noise ** 2 + uncertainty ** 2 + 1e-8)\n else:\n std = noise\n likelihood = scipy.stats.norm(mean, std).logpdf(target)\n likelihoods.append(likelihood)\n likelihood = np.concatenate(likelihoods, 0).sum(1).mean(0)\n rmse = np.sqrt(np.concatenate(squared_distances, 0).sum(1).mean(0))\n return likelihood, rmse\n\n # predict y-values for X_data\n def predict(self, X_data, batch_size=64):\n assert len(X_data) > 0\n means = []\n for index in range(0, len(X_data), batch_size):\n feed_dict = {self.graph.inputs: X_data[index : index + batch_size]}\n\n mean = self.session.run(\n [\n self.graph.data_mean,\n ],\n feed_dict=feed_dict,\n )\n means.append(mean)\n mean = np.concatenate(means, axis=0)\n return mean\n\n # predict y-values for X_data and return std as well\n def predict_w_std(self, X_data, batch_size=64):\n assert len(X_data) > 0\n means, noises, uncertainties = [], [], []\n for index in range(0, len(X_data), batch_size):\n feed_dict = {self.graph.inputs: X_data[index : index + batch_size]}\n mean, noise, uncertainty = self.session.run(\n [\n self.graph.data_mean,\n self.graph.data_noise,\n self.graph.data_uncertainty,\n ],\n feed_dict=feed_dict,\n )\n means.append(mean)\n noises.append(noise)\n uncertainties.append(uncertainty)\n\n mean = np.concatenate(means, axis=0)\n noise = np.concatenate(noises, axis=0)\n if self.has_uncertainty:\n uncertainty = np.concatenate(uncertainties, axis=0)\n std = np.sqrt(noise ** 2 + uncertainty ** 2) if self.has_uncertainty else noise\n return mean, std\n", "id": "9765970", "language": "Python", "matching_score": 1.8679143190383911, "max_stars_count": 2, "path": "source/models/base_model.py" }, { "content": "\"\"\"\nyaml_load_dump.py\n\nThis module implements all interactions with .yaml files and is based on ruamel.\n\nWe store the experiment configurations in .yaml files and the functions in this\nmodule load and dump the configs in a persistent way for reproducablity.\n\nIf variables of configs are not set by the .yaml file, defaults are loaded from\nsource/utils/default_configs.py.\n\"\"\"\n\n\nimport ruamel.yaml as yaml\nfrom classes.attrdict import AttrDict\nfrom utils.default_configs import default_net_config, default_train_schedule\n\n\ndef load_yaml(file_path):\n with open(file_path, \"r\") as file:\n data = yaml.load(file, Loader=yaml.Loader)\n for key in data.keys():\n if isinstance(data[key], dict):\n data[key] = AttrDict(data[key])\n return AttrDict(data)\n\n\ndef dump_yaml(file_path, data):\n with open(file_path, \"w\") as file:\n yaml.dump(data.copy(), file, default_flow_style=False)\n\n\ndef update_dict(d_to_update, d_with_update):\n for key in d_with_update:\n d_to_update[key] = d_with_update[key]\n return d_to_update\n\n\ndef load_complete_config_yaml(file_path, num_inputs):\n config = load_yaml(file_path)\n\n parms = config.parms\n name_model = parms.name_model\n al_schedule = config.al_schedule\n\n nc_from_file = config.net_config\n nc = default_net_config(name_model, num_inputs)\n nc = update_dict(nc, nc_from_file)\n\n ts_from_file = config.train_schedule\n ts_from_file.eval_after_epochs = range(\n 0, ts_from_file.num_epochs, ts_from_file.eval_after_epochs\n )\n ts_from_file.log_after_epochs = range(\n 0, ts_from_file.num_epochs, ts_from_file.log_after_epochs\n )\n ts = default_train_schedule(name_model)\n ts = update_dict(ts, ts_from_file)\n\n seed = config.seed\n return parms, al_schedule, nc, ts, seed\n\n\ndef dump_complete_config_yaml(\n file_path, parms, al_schedule, net_config, train_schedule, seed\n):\n dump_dict = dict()\n dump_dict[\"parms\"] = dict(parms)\n dump_dict[\"al_schedule\"] = dict(al_schedule)\n dump_dict[\"net_config\"] = dict(net_config)\n dump_dict[\"net_config\"].pop(\"clip_gradient\")\n dump_dict[\"net_config\"].pop(\"num_inputs\")\n dump_dict[\"net_config\"].pop(\"weight_std\")\n dump_dict[\"train_schedule\"] = dict(train_schedule)\n dump_dict[\"train_schedule\"][\"eval_after_epochs\"] = dump_dict[\"train_schedule\"][\n \"eval_after_epochs\"\n ].step\n dump_dict[\"train_schedule\"][\"log_after_epochs\"] = dump_dict[\"train_schedule\"][\n \"log_after_epochs\"\n ].step\n dump_dict[\"seed\"] = seed\n dump_yaml(file_path, dump_dict)\n", "id": "10368026", "language": "Python", "matching_score": 4.436865329742432, "max_stars_count": 2, "path": "source/utils/yaml_load_dump.py" }, { "content": "\"\"\"\ndefault_configs.py\n\nThis module contains defaults for al_schedule, train_schedule and net_config.\nIt is called by source/utils/yaml_load_dump.py.\n\"\"\"\n\n\nfrom classes.attrdict import AttrDict\n\n\ndef default_al_schedule():\n config = AttrDict()\n config.num_train_init = 100\n config.al_iter = 4\n config.num_al_incr = 50\n return config\n\n\ndef default_train_schedule(name_model, num_epochs=1000):\n config = AttrDict()\n config.num_epochs = num_epochs\n config.eval_after_epochs = range(0, config.num_epochs, 250)\n config.log_after_epochs = range(0, config.num_epochs, 500)\n config.batch_size = 64\n config.temperature = 0.5\n config.has_uncertainty = True\n return config\n\n\ndef default_net_config(name_model, num_inputs=1):\n config = AttrDict()\n config.num_inputs = num_inputs\n config.layer_sizes = [50, 50]\n config.divergence_scale = 1.0\n config.learning_rate = 3e-3\n config.weight_std = 0.1\n config.clip_gradient = 100.0\n config.dropout_rate = 0.0\n return config\n", "id": "10552227", "language": "Python", "matching_score": 2.648951292037964, "max_stars_count": 2, "path": "source/utils/default_configs.py" }, { "content": "from .global_variables import set_global_var, get_global_var\nfrom .timing import secs_to_str\nfrom .yaml_load_dump import load_complete_config_yaml, dump_complete_config_yaml\nfrom .default_configs import (\n default_al_schedule,\n default_train_schedule,\n default_net_config,\n)\nfrom .plotting import plot_rmse_metrics\n", "id": "12779556", "language": "Python", "matching_score": 1.3711360692977905, "max_stars_count": 2, "path": "source/utils/__init__.py" }, { "content": "\"\"\"\nplotter.py\n\nThis moduls implements the Plotter class.\nUsually an instance of Plotter is initialized as global varianble at the start\nof the program flow. A Plotter can visualize metrics as well as models and\npolicies.\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom utils import get_global_var\n\n\ndef moving_avg(data, width):\n avg = pd.Series(data).rolling(width).mean().iloc[width - 1 :].values\n return avg\n\n\ndef plot_metrics(ax, epochs, data, label, avg_w=1, color=None):\n epochs, data = moving_avg(epochs, avg_w), moving_avg(data, avg_w)\n ax.plot(epochs, data, label=label, color=color)\n\n\ndef plot_rmse_metrics(runs_metrics, labels, title, avg_w=10):\n plt.rcParams.update({\"font.size\": 15})\n colors = [\"C\" + str(i) for i in range(10)]\n plt.close()\n fig, ax = plt.subplots(2, 1, figsize=(15, 8))\n fig.suptitle(title)\n for i, metrics in enumerate(runs_metrics):\n label = labels[i]\n color = colors[i]\n plot_metrics(\n ax[0],\n metrics.epochs,\n metrics.train_rmses,\n label,\n avg_w=avg_w,\n color=color,\n )\n plot_metrics(\n ax[1],\n metrics.epochs,\n metrics.test_rmses,\n label,\n avg_w=avg_w,\n color=color,\n )\n ax[0].set_ylabel(\"train RMSE\")\n ax[1].set_ylabel(\"test RMSE\")\n for a in ax.flatten():\n a.legend()\n a.set_xlabel(\"training epochs\")\n return fig\n", "id": "6480461", "language": "Python", "matching_score": 2.0315780639648438, "max_stars_count": 2, "path": "source/utils/plotting.py" }, { "content": "\"\"\"\nglobal_variables.py\n\nWhen this module gets loaded, a dict() of global variables is initialized.\nThis is really handy for using a global logger, plotter and random seed yet usage\nof global variables should be avoided whenever possible.\n\nImplements set_global_var() and get_global_var() for definition and access\nof global variables.\n\"\"\"\n\nGLOBAL_VARIABLES = dict()\n\n\ndef set_global_var(var_name, var_value):\n global GLOBAL_VARIABLES\n GLOBAL_VARIABLES[var_name] = var_value\n return var_value\n\n\ndef get_global_var(var_name):\n global GLOBAL_VARIABLES\n if not var_name in GLOBAL_VARIABLES.keys():\n msg = \"Global variable \" + str(var_name) + \" was not set.\"\n raise Exception(msg)\n return GLOBAL_VARIABLES[var_name]\n", "id": "872689", "language": "Python", "matching_score": 0.3486131429672241, "max_stars_count": 2, "path": "source/utils/global_variables.py" }, { "content": "\"\"\"\nattrdict.py\n\nfrom https://danijar.com/patterns-for-fast-prototyping-with-tensorflow/\n\nSimple dict with faster access to items.\n\"\"\"\n\n\nclass AttrDict(dict):\n __getattr__ = dict.__getitem__\n __setattr__ = dict.__setitem__\n", "id": "3143320", "language": "Python", "matching_score": 0, "max_stars_count": 2, "path": "source/classes/attrdict.py" }, { "content": "from .bnn_model import BNN\n", "id": "11275748", "language": "Python", "matching_score": 0.025975940749049187, "max_stars_count": 2, "path": "source/models/__init__.py" }, { "content": "\"\"\"\nexp_config.py\n\nThis module implements the class ExpConfig which is derived from class AttrDict.\n\nAn instance of ExpConfig holds all the information needed for a single experiment.\nIn particular: the dataset, the training and active learning schedule, the\nsampling policy and the current random seed.\n\"\"\"\n\nfrom .attrdict import AttrDict\n\n\nclass ExpConfig(AttrDict):\n def __init__(\n self, name, ds, model, train_schedule, al_schedule, sample_policy, seed\n ):\n self.name = name\n self.ds = ds\n self.model = model\n self.train_schedule = train_schedule\n self.al_schedule = al_schedule\n self.sample_policy = sample_policy\n self.seed = seed\n\n # returns a sting with important information about the ExpConfig instance\n def important_stats(self):\n s = \"Model: \" + str(type(self.model).__name__) + \"\\n\"\n s += \" Layers: \" + str(self.model.net_config.layer_sizes) + \"\\n\"\n s += \" Learn Rate: \" + str(self.model.net_config.learning_rate) + \"\\n\"\n s += \" Batch size: \" + str(self.train_schedule.batch_size) + \"\\n\"\n s += \" Num epochs: \" + str(self.train_schedule.num_epochs) + \"\\n\"\n s += \" Active Learning: \" + str(self.al_schedule) + \"\\n\"\n s += \" Sample Policy: \" + str(type(self.sample_policy).__name__) + \"\\n\"\n try:\n s += (\n \" Sample batch_size: \"\n + str(self.sample_policy.batch_size)\n + \"\\n Number MC samples: \"\n + str(self.sample_policy.n_mc_samples)\n + \"\\n\"\n )\n except:\n s += \" No batch size and MC samples for this policy.\\n\"\n s += \" Seed: \" + str(self.seed) + \"\\n\"\n return s\n", "id": "5640077", "language": "Python", "matching_score": 2.9209935665130615, "max_stars_count": 2, "path": "source/classes/exp_config.py" }, { "content": "\"\"\"\nmake_exp_config.py\n\nThis module is called by run_experiments.py and contains a function, which builds\na ExpConfig (source/classes/exp_config.py) from parameters.\n\"\"\"\n\n\nimport utils.yaml_load_dump as ydl\nfrom classes import ExpConfig\nfrom run_experiment.initialize import init_model, init_sample_policy\n\n\ndef make_exp_config(ds, exp_conf_path, global_seed, fp_dump_to):\n num_inputs = ds.X_pool().shape[1]\n\n (\n parms,\n al_schedule,\n net_config,\n train_schedule,\n exp_seed,\n ) = ydl.load_complete_config_yaml(exp_conf_path, num_inputs)\n\n train_schedule.has_uncertainty = not (\n parms[\"name_model\"] == \"det\" or parms[\"name_model\"] == \"dets\"\n )\n # assert that we have sufficient datapoints for the active learning schedule\n if (\n len(ds.X_pool())\n < al_schedule.num_train_init + al_schedule.num_al_incr * al_schedule.al_iter\n ):\n raise Exception(\"Pool is to small for active learning schedule.\")\n\n model = init_model(\n parms[\"name_model\"], train_schedule, net_config, global_seed + exp_seed\n )\n sample_policy = init_sample_policy(\n parms[\"sample_policy\"], parms[\"sampling_batch_size\"], parms[\"n_mc_samples\"]\n )\n\n # filename as exp name\n exp_name = exp_conf_path.split(\"/\")[-1]\n # additionally dump config to results folder\n ydl.dump_complete_config_yaml(\n fp_dump_to + exp_name,\n parms,\n al_schedule,\n net_config,\n train_schedule,\n exp_seed,\n )\n return ExpConfig(\n exp_name,\n ds.make_copy(),\n model,\n train_schedule,\n al_schedule,\n sample_policy,\n global_seed + exp_seed,\n )\n", "id": "7340457", "language": "Python", "matching_score": 3.187830686569214, "max_stars_count": 2, "path": "source/run_experiment/make_exp_config.py" }, { "content": "\"\"\"\nrun_experiment.py\n\nThis modul represents the current API of the project.\n\nIt prepares, runs and performs the actual active learning experiment.\n\nIn particular it implements the function prepare_experiments() which calls functions\nfrom the run_experiment/initialize.py and run_experiment/prepare_variables.py modules.\nAs well as the function run_experiment() which can be utilized to run a single\nexperiment, i.e. which loads a config.yaml and initializes an independent networks.\n\"\"\"\n\n\nfrom time import time\nfrom utils import set_global_var, get_global_var\nfrom .prepare_variables import prepare_global_vars\nfrom .initialize import (\n init_tf_session,\n init_rs,\n init_dataset,\n get_initial_idxs,\n)\nfrom .make_exp_config import make_exp_config\n\n\ndef add_metrics(metric1, metric2):\n for key in metric1:\n metric1[key] += metric2[key]\n return metric1\n\n\ndef perform_active_learning(ex_name, ds, model, sample_policy, al_schedule, seed):\n # set seed again to get the same initial training points for fair comparison\n init_rs(seed)\n logger = get_global_var(\"logger\")\n set_global_var(\"spolicy\", sample_policy)\n\n train_indices = get_initial_idxs(ds, al_schedule.num_train_init)\n ds.add_to_training(train_indices)\n sample_policy.update(ds.X_pool(), ds.X_train())\n logger.info(\"initial training on \", len(ds.X_train()), \" data points\")\n metrics = model.train(ds.X_train(), ds.y_train(), ds.X_test(), ds.y_test())\n metrics.time_sampling = 0.0\n for i in range(al_schedule.al_iter):\n st = time()\n idxs_sample = sample_policy.request_samples(al_schedule.num_al_incr, model)\n time_sampling = time() - st\n ds.add_to_training(idxs_sample)\n logger.info(\n \"Adding\",\n len(idxs_sample),\n \"idxs to training data. Total:\",\n len(ds.X_train()),\n )\n # accout for final iteration in training\n start_epoch = metrics.epochs[-1] + model.ts.eval_after_epochs.step\n new_metrics = model.train(\n ds.X_train(),\n ds.y_train(),\n ds.X_test(),\n ds.y_test(),\n start_epoch=start_epoch,\n )\n new_metrics.time_sampling = time_sampling\n metrics = add_metrics(metrics, new_metrics)\n sample_policy.update(ds.X_pool(), ds.X_train())\n return metrics\n\n\ndef prepare_experiments(results_dir, size_pool, size_test, random_seed, log_level):\n init_rs(random_seed)\n prepare_global_vars(results_dir, random_seed, log_level)\n ds = init_dataset(\"housing\", size_pool, size_test)\n return ds\n\n\ndef run_experiment(ds, results_dir, config_yaml, random_seed):\n config = make_exp_config(ds, config_yaml, random_seed, results_dir)\n\n logger = get_global_var(\"logger\")\n logger.set_prefix(config.name)\n logger.info(\"\\n\\nStarting new experiment:\", config.name)\n logger.info(config.important_stats())\n metrics = perform_active_learning(\n config.name,\n config.ds,\n config.model,\n config.sample_policy,\n config.al_schedule,\n config.seed,\n )\n return metrics\n", "id": "12026642", "language": "Python", "matching_score": 4.2776384353637695, "max_stars_count": 2, "path": "source/run_experiment/run_experiment.py" }, { "content": "\"\"\"\ninitialize.py\n\nThis module gets called from run_experiment.py and initializes various variables.\nIn particular: global random seed, tensorflow session, the network model, the\nsampling policy, the dataset and the initial training points.\n\"\"\"\n\nimport numpy as np\n\nimport tensorflow.compat.v1 as tf\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.utils.random import sample_without_replacement\n\nfrom models import BNN\nfrom utils.global_variables import set_global_var, get_global_var\nfrom datasets import (\n get_housing_dataset_pickle,\n)\n\nfrom run_experiment.sampling import RandomPolicy, GPAPolicy, BatchGPAPolicy\n\n# set all needed random seeds\ndef init_rs(seed):\n tf.set_random_seed(seed)\n np.random.seed(seed)\n\n\n# each model needs and holds its own tf.Session() instance\ndef init_tf_session():\n init = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(init)\n return sess\n\n\n# initialize a network model with tensorflow session\n# models are can be found in source/models/\n# raises: Exception if name_model is unknown\ndef init_model(name_model, train_schedule, net_config, seed):\n tf.reset_default_graph()\n init_rs(seed)\n # only bnn available here\n if name_model == \"bnn\":\n model = BNN(net_config=net_config, train_schedule=train_schedule)\n else:\n raise Exception(\n \"Unknown model requested: \" + str(policy_name) + \" (valid input: {bnn})\"\n )\n sess = init_tf_session()\n model.set_session(sess)\n return model\n\n\n# initialize a sample policy\n# sample policies are stored in source/sampling.py\n# raises: Exception if policy_name is unknown\ndef init_sample_policy(policy_name, sampling_batch_size, n_mc_samples):\n if policy_name == \"random\":\n return RandomPolicy()\n elif policy_name == \"gpa\":\n return GPAPolicy(sampling_batch_size, n_mc_samples)\n elif policy_name == \"bgpa\":\n return BatchGPAPolicy(sampling_batch_size, n_mc_samples)\n else:\n raise Exception(\n \"Unknown sampling policy requested: \"\n + str(policy_name)\n + \" (valud: {random, gpa, bgpa})\"\n )\n\n\n# initialize a dataset, resizes it and applies a StandardScaler\ndef init_dataset(name_dataset, size_pool, size_test):\n logger = get_global_var(\"logger\")\n\n # only housing data is available here\n assert name_dataset == \"housing\"\n\n # load data\n ds = get_housing_dataset_pickle()\n\n # size_pool and size_test are 0? -> do not resize dataset\n if size_pool != 0 or size_test != 0:\n ds.reduce_size(size_pool, size_test)\n\n scaler = StandardScaler(copy=False)\n ds.apply_scaler(scaler)\n scaler_y = StandardScaler(copy=False)\n ds.apply_scaler_y(scaler_y)\n\n logger.info(\n \"Working on dataset:\",\n ds.name,\n \"with pool data shape:\",\n ds.X_pool().shape,\n \"and test data shape:\",\n ds.X_test().shape,\n )\n logger.info(\"Applying scaler:\", str(scaler))\n return ds\n\n\n# returns the initial training idxs\ndef get_initial_idxs(ds, n_points):\n return sample_without_replacement(ds.X_pool().shape[0], n_points)\n", "id": "3179641", "language": "Python", "matching_score": 2.5525248050689697, "max_stars_count": 2, "path": "source/run_experiment/initialize.py" }, { "content": "\"\"\"\ndataset.py\n\nThis module implements the class Dataset.\n\nThe Dataset class holds the data on which the experiments are performed.\nIt distinguishes between pool, test and training data.\nThe actual data is only stored *once* and access is granted via reference.\nAdditional copies with unique access indices can be obtained via .make_copy().\n\"\"\"\n\nimport numpy as np\nfrom sklearn.utils.random import sample_without_replacement\n\n\nclass Dataset:\n # X_pool, y_pool, X_test, y_test are refences to the real data storage\n # due to python's mutable variable paradigm\n # different instances of Dataset hold differnt sets of train/pool_idxs\n def __init__(self, X_pool, y_pool, X_test, y_test, name=None):\n self.train_idxs = np.empty(0)\n self.pool_idxs = np.arange(len(X_pool))\n self._X_pool = X_pool\n self._y_pool = y_pool\n self._X_test = X_test\n self._y_test = y_test\n self.name = name\n\n def apply_scaler(self, scaler):\n self.scaler = scaler\n self.scaler.fit(self._X_pool)\n self._X_pool = self.scaler.transform(self._X_pool)\n self._X_test = self.scaler.transform(self._X_test)\n\n def apply_scaler_y(self, scaler):\n self.scaler_y = scaler\n self.scaler_y.fit(self._y_pool)\n self._y_pool = self.scaler_y.transform(self._y_pool)\n self._y_test = self.scaler_y.transform(self._y_test)\n\n def reset_pool(self):\n self.train_idxs = np.empty(0)\n self.pool_idxs = np.arange(len(self._X_pool))\n\n # different copies hold reference ot the same data, but different indices\n def make_copy(self, approximate_pool=False):\n ds = Dataset(self._X_pool, self._y_pool, self._X_test, self._y_test, self.name)\n # if training data is not empty -> copy the idxs\n if len(self.train_idxs) > 2:\n ds.train_idxs = np.array(self.train_idxs)\n ds.pool_idxs = np.array(self.pool_idxs)\n return ds\n\n # subsample data to requested size\n def reduce_size(self, size_pool, size_test):\n assert size_pool <= self._X_pool.shape[0]\n assert size_test <= self._X_test.shape[0]\n pool_sample = sample_without_replacement(\n self._X_pool.shape[0], n_samples=size_pool\n )\n test_sample = sample_without_replacement(\n self._X_test.shape[0], n_samples=size_test\n )\n self._X_pool = self._X_pool[pool_sample]\n self._y_pool = self._y_pool[pool_sample]\n self._X_test = self._X_test[test_sample]\n self._y_test = self._y_test[test_sample]\n self.train_idxs = np.empty(0)\n self.pool_idxs = np.arange(len(self._X_pool))\n\n def add_to_training(self, idxs, return_data=False):\n if not self.train_idxs.size > 0:\n self.train_idxs = np.array(idxs)\n else:\n assert np.max(idxs) < len(self.pool_idxs)\n self.train_idxs = np.append(self.train_idxs, self.pool_idxs[idxs])\n\n if return_data:\n added_data = self._X_pool[self.pool_idxs[idxs]]\n self.pool_idxs = np.delete(self.pool_idxs, idxs)\n return added_data\n else:\n self.pool_idxs = np.delete(self.pool_idxs, idxs)\n\n def X_train(self):\n if not self.train_idxs.size > 0:\n return np.empty(0)\n else:\n return self._X_pool[self.train_idxs]\n\n def y_train(self):\n if not self.train_idxs.size > 0:\n return np.empty(0)\n else:\n return self._y_pool[self.train_idxs]\n\n def X_pool(self):\n if not self.pool_idxs.size > 0:\n return np.empty(0)\n else:\n return self._X_pool[self.pool_idxs]\n\n def y_pool(self):\n if not self.pool_idxs.size > 0:\n return np.empty(0)\n else:\n return self._y_pool[self.pool_idxs]\n\n def X_test(self):\n return self._X_test\n\n def y_test(self):\n return self._y_test\n\n def get_data(self):\n return (\n self.X_train(),\n self.y_train(),\n self.X_pool(),\n self.y_pool(),\n self.X_test(),\n self.y_test(),\n )\n", "id": "3667229", "language": "Python", "matching_score": 2.2600409984588623, "max_stars_count": 2, "path": "source/classes/dataset.py" }, { "content": "\"\"\"\nhousing_dataset.py\n\nThis module tries to load the housing dataset from 'data/housing.pickle' as a\ninstance of class Dataset (source/classes/dataset.py).\n\nIf called as __main__, the script downloades the dataset from sklearn and creates\na 'housing.pickle' file.\n\nAdditional information can be found at:\nhttps://scikit-learn.org/stable/datasets/index.html#california-housing-dataset\n\"\"\"\n\n\nimport sklearn.datasets\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport pandas as pd\nimport pickle\n\nfrom classes import Dataset\n\n\n# combine Bedrooms and general rooms, drop geographic infos\ndef get_housing_dataset():\n data_bunch = sklearn.datasets.fetch_california_housing()\n df = pd.DataFrame(data_bunch[\"data\"], columns=data_bunch[\"feature_names\"])\n df[\"AveRooms\"] += df[\"AveBedrms\"]\n df = df.drop(labels=[\"Latitude\", \"Longitude\", \"AveBedrms\"], axis=1)\n X_data = df.values\n y_data = data_bunch[\"target\"][:, np.newaxis]\n X_train, X_test, y_train, y_test = train_test_split(\n X_data, y_data, test_size=0.3, random_state=0\n )\n return X_train, y_train, X_test, y_test\n\n\ndef get_housing_dataset_pickle():\n try:\n with open(\"data/housing.pickle\", \"rb\") as file:\n d = pickle.load(file)\n X_train = d[\"X_train\"]\n X_train = X_train\n y_train = d[\"y_train\"]\n X_test = d[\"X_test\"]\n y_test = d[\"y_test\"]\n return Dataset(X_train, y_train, X_test, y_test, \"Housing Dataset\")\n\n except Exception as e:\n print(e)\n raise Exception(\n \"data/housing.pickle does not exist, please call source/datasets/housing_dataset.py\"\n )\n\n\ndef main():\n X_train, y_train, X_test, y_test = get_housing_dataset()\n\n d = dict()\n d[\"X_train\"] = X_train\n d[\"y_train\"] = y_train\n d[\"X_test\"] = X_test\n d[\"y_test\"] = y_test\n\n with open(\"data/housing.pickle\", \"wb\") as file:\n pickle.dump(d, file)\n print(\"Dumped housing data as data/housing.pickle\")\n\n ds = get_housing_dataset_pickle()\n print(\"Loaded succesfully from housing.pickle\")\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "11677188", "language": "Python", "matching_score": 2.4056460857391357, "max_stars_count": 2, "path": "source/datasets/housing_dataset.py" }, { "content": "from .housing_dataset import get_housing_dataset_pickle\n", "id": "3949134", "language": "Python", "matching_score": 1.1893692016601562, "max_stars_count": 2, "path": "source/datasets/__init__.py" } ]
2.031578
MrVic2011
[ { "content": "import pygame\n\nfrom constants import *\n\n\nclass Menu:\n \"\"\"\n A Class to manage the main menu of the game where\n you can choose the volume of the music, and the level\n you want to play.\n \"\"\"\n\n def __init__(self):\n self.btn_list = []\n self.font = pygame.font.Font(\"./assets/font/doublefeature.ttf\", 30)\n\n self.spawn_btn()\n\n def spawn_btn(self):\n \"\"\"\n Method of listing all menu's buttons\n \"\"\"\n\n # Center the button in the middle of the screen\n tmp_p1 = (SCREEN_WIDTH - 200) / 2\n tmp_p2 = (SCREEN_HEIGHT / 2) + 50\n\n # Initialize the button\n tmp_msg = \"JOUER\"\n tmp = Button(1, tmp_p1, tmp_p2, 50, 200, tmp_msg)\n self.btn_list.append(tmp)\n\n # Buttons for choosing level\n # Plus Button\n tmp_p1 = (SCREEN_WIDTH - 50) / 2\n tmp_p1 += 75\n tmp_p2 = (SCREEN_HEIGHT - 100) / 2\n\n tmp_msg = \"+\"\n tmp = Button(2, tmp_p1, tmp_p2, 50, 50, tmp_msg)\n self.btn_list.append(tmp)\n\n # Minus Button\n tmp_p1 = (SCREEN_WIDTH - 50) / 2\n tmp_p1 -= 75\n tmp_p2 = (SCREEN_HEIGHT - 100) / 2\n\n tmp_msg = \"-\"\n tmp = Button(3, tmp_p1, tmp_p2, 50, 50, tmp_msg)\n self.btn_list.append(tmp)\n\n def display_btn(self, window, back_color):\n \"\"\"\n Method for displaying all buttons of btn_list attribute\n :param back_color:\n :param window: Pygame Surface object\n \"\"\"\n for btn in self.btn_list:\n # Drawing the display_background of the button\n square = pygame.Rect(btn.pos[0], btn.pos[1], btn.width, btn.height)\n btn_color = back_color\n pygame.draw.rect(window, btn_color, square)\n\n # Center the text of the button\n txt_size = self.font.size(btn.txt)\n pos_1 = (btn.width - txt_size[0]) / 2\n pos_2 = (btn.height - txt_size[1]) / 2\n\n pos_1 = btn.pos[0] + pos_1\n pos_2 = btn.pos[1] + pos_2\n txt_pos = (pos_1, pos_2)\n\n # Print the button's text on screen\n text = self.font.render(btn.txt, 1, (0, 0, 0))\n window.blit(text, (txt_pos[0], txt_pos[1]))\n\n def btn_clicked(self, event):\n \"\"\"\n Check all button of the menu to see if they get clicked\n :param event: pygame Mouse's events\n :return:\n \"\"\"\n for btn in self.btn_list:\n if btn.clicked(event):\n return btn.id\n\n return None\n\n def display_lvl(self, window, lvl_id):\n \"\"\"\n\n :param lvl_id: level_id of Level object\n :param window: pygame Surface object\n :return:\n \"\"\"\n pos_1 = (SCREEN_WIDTH - 100) / 2\n pos_2 = (SCREEN_HEIGHT - 100) / 2\n\n lvl_id = str(lvl_id)\n\n level = Button(4, pos_1, pos_2, 50, 100, lvl_id)\n\n square = pygame.Rect(level.pos[0], level.pos[1], level.width, level.height)\n color = (0, 0, 0)\n pygame.draw.rect(window, color, square)\n\n # Center the text of the button\n txt_size = self.font.size(level.txt)\n pos_1 = (level.width - txt_size[0]) / 2\n pos_2 = (level.height - txt_size[1]) / 2\n\n pos_1 = level.pos[0] + pos_1\n pos_2 = level.pos[1] + pos_2\n txt_pos = (pos_1, pos_2)\n\n # Print the button's text on screen\n text = self.font.render(level.txt, 1, (178, 34, 34))\n window.blit(text, (txt_pos[0], txt_pos[1]))\n\n @staticmethod\n def display_background(window):\n \"\"\"\n Display the background of the Menu\n :return:\n \"\"\"\n window.fill((0, 0, 0))\n\n def update(self, window, lvl_number):\n \"\"\"\n Method to move all elements on the menu\n :param lvl_number:\n :type window:\n :return:\n \"\"\"\n self.display_background(window)\n self.display_btn(window, (34, 139, 34))\n self.display_lvl(window, lvl_number)\n\n\nclass Button:\n def __init__(self, btn_id, x, y, height, width, txt=\"\"):\n self.id = btn_id\n self.pos = (x, y)\n self.height = height\n self.width = width\n self.txt = txt\n\n def clicked(self, mouse):\n if mouse.pos[0] >= self.pos[0] + self.width:\n return False\n if mouse.pos[0] <= self.pos[0]:\n return False\n if mouse.pos[1] >= self.pos[1] + self.height:\n return False\n if mouse.pos[1] <= self.pos[1]:\n return False\n return True\n", "id": "8771186", "language": "Python", "matching_score": 2.600775957107544, "max_stars_count": 4, "path": "core/Menu.py" }, { "content": "# Imports\nimport pygame\n\nfrom constants import *\nfrom core.Bullet import Bullet\nfrom core.Level import Level\nfrom core.Menu import Menu\nfrom core.Player import Player\n\n\n# Main\nfrom core.Sound import Sound\n\n\ndef main():\n \"\"\"\n Main process of the game.\n Execute all classes and manage event returned by pygame.\n \"\"\"\n pygame.init()\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\n sound = Sound()\n sound.generate_sound()\n\n # Menu\n menu = Menu()\n\n game = False\n config = True\n lvl_nbr = 1\n keys = KEYS_1\n skeys = KEYS_2\n\n while config:\n menu.update(screen, lvl_nbr)\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == QUIT:\n config = False\n if event.type == KEYDOWN and event.key == K_F1:\n config = False\n\n if event.type == MOUSEBUTTONDOWN and event.button == 1:\n # Play Button\n btn_return = menu.btn_clicked(event)\n if btn_return == 1:\n game = True\n config = False\n if btn_return == 2 and 1 <= lvl_nbr < LEVEL_NUMBER:\n lvl_nbr += 1\n if btn_return == 3 and 1 < lvl_nbr <= LEVEL_NUMBER:\n lvl_nbr -= 1\n\n # Level Initialization\n level = Level(lvl_nbr)\n player = Player(50, 100, 4)\n\n # Game\n while game:\n level.update_sprites(screen, player)\n\n for e in level.enemies:\n e.move(level)\n e.display(screen)\n\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == QUIT:\n game = False\n if event.type == KEYDOWN:\n if event.key == K_F1:\n message_on_screen(screen)\n game = False\n\n # PLayer movement event check\n if event.type == KEYDOWN:\n if event.key == keys[0]:\n player.statement_keys[0] = True\n if event.key == keys[1]:\n player.statement_keys[1] = True\n if event.key == keys[2]:\n player.statement_keys[2] = True\n if event.key == keys[3]:\n player.statement_keys[3] = True\n\n if event.type == KEYUP:\n if event.key == keys[0]:\n player.statement_keys[0] = False\n if event.key == keys[1]:\n player.statement_keys[1] = False\n if event.key == keys[2]:\n player.statement_keys[2] = False\n if event.key == keys[3]:\n player.statement_keys[3] = False\n\n if event.type == KEYDOWN:\n if event.key == skeys[0]:\n tmp = Bullet(player, 2, UP)\n level.bullets.append(tmp)\n if event.key == skeys[1]:\n tmp = Bullet(player, 2, LEFT)\n level.bullets.append(tmp)\n if event.key == skeys[2]:\n tmp = Bullet(player, 2, DOWN)\n level.bullets.append(tmp)\n if event.key == skeys[3]:\n tmp = Bullet(player, 2, RIGHT)\n level.bullets.append(tmp)\n\n if player.statement_keys != [False] * 4:\n player.move(level)\n\n pygame.quit()\n\n\ndef message_on_screen(window):\n \"\"\"\n Function to display a message on full screen.\n Used wehn the player loose.\n :param window: pygame Surface object\n :return:\n \"\"\"\n gameover = 1\n while gameover:\n window.fill((0, 0, 0))\n font = pygame.font.Font(\"./assets/font/pixel.ttf\", 100)\n\n text = \"Game Over !\"\n text_size = font.size(text)\n x = (SCREEN_WIDTH - text_size[0]) / 2\n y = (SCREEN_HEIGHT - text_size[1]) / 2\n\n msg = font.render(text, 1, (255, 255, 255))\n window.blit(msg, (x, y))\n\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == QUIT:\n gameover = False\n if event.type == KEYDOWN:\n if event.key == K_F1:\n gameover = False\n\n\nmain()\n", "id": "8096887", "language": "Python", "matching_score": 4.855607032775879, "max_stars_count": 4, "path": "main.py" }, { "content": "\"\"\"\nFile to stock all constants needed by the game.\n\"\"\"\nfrom pygame.locals import *\n\n# DIRECTION\nUP = 0\nLEFT = 1\nDOWN = 2\nRIGHT = 3\n\n# COLOR CODES\nBTN = (34, 139, 34)\nBTN_CLICKED = (0, 100, 0)\n\n# SCREEN\nSCREEN_WIDTH = 1080\nSCREEN_HEIGHT = 720\n\n# LEVELS\nLEVEL_NUMBER = 3\n\n# KEYS CONFIGURATION\nKEYS_1 = [K_w, K_a, K_s, K_d]\nKEYS_2 = [K_UP, K_LEFT, K_DOWN, K_RIGHT]\n", "id": "9954067", "language": "Python", "matching_score": 0.506891131401062, "max_stars_count": 4, "path": "constants.py" }, { "content": "import pygame\n\nfrom core.Enemy import Enemy\nfrom core.Wall import Wall\n\n\nclass Level:\n \"\"\"\n Class to generate the selected level from a file and manage all actions that happend in the level\n \"\"\"\n\n def __init__(self, level_id):\n self.wall_list = []\n self.bullets = []\n self.enemies = []\n self.id = str(level_id)\n self.file = open(\"./levels/lvl_\" + self.id + \".txt\")\n\n print(\"Level {0} created\".format(self.id))\n\n self.generate_level()\n\n @staticmethod\n def display_background(window):\n \"\"\"\n Method to display a display_background on the game screen\n :param window: pygame Surface object\n \"\"\"\n background = (200, 200, 200)\n window.fill(background)\n\n def generate_level(self):\n \"\"\"\n Method to read level's file and create walls using position which are in a text file\n :return:\n \"\"\"\n file_line = self.file.readline()\n while len(file_line) > 0:\n self.generate_wall(file_line)\n file_line = self.file.readline()\n\n self.file.close()\n\n for i in range(0, 5):\n enemy = Enemy(self)\n self.enemies.append(enemy)\n\n def generate_wall(self, string):\n \"\"\"\n Method to initialize each wall of the level using the class Wall\n \"\"\"\n index1 = string.find(' ')\n index2 = string.find(' ', index1 + 1)\n index3 = string.find(' ', index2 + 1)\n\n wx = string[0:index1]\n wx = int(wx)\n\n wy = string[index1 + 1:index2]\n wy = int(wy)\n\n ww = string[index2 + 1:index3]\n ww = int(ww)\n\n wh = string[index3 + 1:len(string)]\n wh = int(wh)\n\n wall = Wall(wx, wy, ww, wh)\n self.wall_list.append(wall)\n\n def display_wall(self, window):\n \"\"\"\n Method to draw a black square at the postion of each wall in the wall list of the Level instance\n :param window: pygame Surface object\n \"\"\"\n for wall in self.wall_list:\n square = pygame.Rect(wall.pos[0], wall.pos[1], wall.width, wall.height)\n color = (0, 0, 0)\n pygame.draw.rect(window, color, square)\n\n def get_constraints(self, entity, direction):\n \"\"\"\n Method to check for each wall if the player is touching it\n :param entity:\n :param direction:\n :return:\n \"\"\"\n for wall in self.wall_list:\n c = wall.get_constraint(entity, direction)\n if c is not None:\n return c\n return None\n\n def display_bullets(self, window):\n \"\"\"\n Method to draw bullets shooted by player on the screen\n :param window: pygame Surface object\n :return:\n \"\"\"\n for b in self.bullets:\n b_rect = pygame.Surface((b.size[0], b.size[1]))\n b_rect.set_alpha(b.alpha)\n b_rect.fill(b.color)\n window.blit(b_rect, (b.pos[0], b.pos[1]))\n\n def update_sprites(self, window, player):\n \"\"\"\n Method to upadate sprties on the screen with their new positions\n :return:\n \"\"\"\n for bullet in self.bullets:\n bullet.move(self)\n\n # Graphics Update for all elements on screen\n self.display_background(window)\n self.display_wall(window)\n self.display_bullets(window)\n player.display(window)\n", "id": "11045150", "language": "Python", "matching_score": 2.684129476547241, "max_stars_count": 4, "path": "core/Level.py" }, { "content": "from constants import *\n\n\nclass Wall:\n \"\"\"\n A class to define a wall or an obstacle and manage collisions\n \"\"\"\n\n def __init__(self, x, y, width, height):\n self.pos = (x, y)\n self.height = height\n self.width = width\n\n def get_constraint(self, entity, direction):\n \"\"\"\n Method to check if the player is touching a wall or not\n \"\"\"\n px, py = entity.pos\n pw, ph = entity.size\n wx, wy = self.pos\n ww = self.width\n wh = self.height\n\n if direction == DOWN:\n if px + pw <= wx or px >= wx + ww: # no collision\n return None\n if py + ph >= wy > py:\n return wy - ph\n\n if direction == RIGHT:\n if py + ph <= wy or py >= wy + wh: # no collision\n return None\n if px + pw >= wx > px:\n return wx - pw\n\n if direction == UP:\n if px + pw <= wx or px >= wx + ww: # no collision\n return None\n if py <= wy + wh < py + ph:\n return wy + wh\n\n if direction == LEFT:\n if py + ph <= wy or py >= wy + wh: # no collision\n return None\n if px <= wx + ww < px + pw:\n return wx + ww\n\n return None\n", "id": "1636125", "language": "Python", "matching_score": 0.04275178909301758, "max_stars_count": 4, "path": "core/Wall.py" }, { "content": "import pygame\n\n\nclass Sound:\n \"\"\"Class to manage sound and special effects\"\"\"\n\n def __init__(self):\n self.master_volume = 100\n self.music_volume = 100\n self.effects_volume = 100\n self.music = \"musique\"\n\n def generate_sound(self):\n sound = pygame.mixer.Sound(\"./assets/music_1.wav\")\n sound.play()\n self.master_volume = pygame.mixer.music.get_volume()\n pygame.mixer.music.set_volume(10)\n", "id": "5005232", "language": "Python", "matching_score": 0.12946732342243195, "max_stars_count": 4, "path": "core/Sound.py" }, { "content": "import pygame\n\nfrom constants import *\nfrom core.Entity import Entity\n\n\nclass Player(Entity):\n \"\"\"\n Class to create a player and manage it.\n \"\"\"\n\n def __init__(self, x, y, speed):\n # PLayer statistics and state attributes\n size = (64, 64)\n super().__init__(x, y, speed, 3, size)\n self.statement_keys = [False, False, False, False] # Forward, Left, Backward, Right\n self.dir = \"./assets/img/player_\"\n self.img = pygame.image.load(self.dir + \"right.png\")\n\n def display(self, window):\n \"\"\"\n Display the player on the screen\n :param window: pygame Surface object\n \"\"\"\n if self.statement_keys[0]:\n self.img = pygame.image.load(self.dir + \"up.png\")\n window.blit(self.img, self.pos)\n\n if self.statement_keys[1]:\n self.img = pygame.image.load(self.dir + \"left.png\")\n window.blit(self.img, self.pos)\n\n if self.statement_keys[2]:\n self.img = pygame.image.load(self.dir + \"down.png\")\n window.blit(self.img, self.pos)\n\n if self.statement_keys[3]:\n self.img = pygame.image.load(self.dir + \"right.png\")\n window.blit(self.img, self.pos)\n\n if self.statement_keys == [False]*4:\n self.img = pygame.image.load(self.dir + \"right.png\")\n window.blit(self.img, self.pos)\n\n def move(self, level):\n \"\"\"\n move the player in the direction of True's keys in statement_keys list\n :param level: instance of Level Class\n \"\"\"\n\n if self.statement_keys[0]:\n c = level.get_constraints(self, UP)\n if c is not None:\n self.pos[1] = c\n else:\n self.pos[1] -= self.speed\n\n if self.statement_keys[1]:\n c = level.get_constraints(self, LEFT)\n if c is not None:\n self.pos[0] = c\n else:\n self.pos[0] -= self.speed\n\n if self.statement_keys[2]:\n c = level.get_constraints(self, DOWN)\n if c is not None:\n self.pos[1] = c\n else:\n self.pos[1] += self.speed\n\n if self.statement_keys[3]:\n c = level.get_constraints(self, RIGHT)\n if c is not None:\n self.pos[0] = c\n else:\n self.pos[0] += self.speed\n", "id": "874403", "language": "Python", "matching_score": 3.0826096534729004, "max_stars_count": 4, "path": "core/Player.py" }, { "content": "import random\n\nimport pygame\n\nfrom constants import *\nfrom core.Entity import Entity\n\n\nclass Enemy(Entity):\n def __init__(self, level):\n super().__init__(-1, -1, 0.75, 3, (64, 64))\n\n self.direction = random.randint(0, 3)\n self.set_spawn(level)\n\n self.dir = \"./assets/img/player_\"\n self.img = pygame.image.load(self.dir + \"right.png\")\n\n def set_spawn(self, level):\n \"\"\"\n Setting random spawn positions for enemies\n :return:\n \"\"\"\n c = level.get_constraints(self, self.direction)\n while c is not None:\n self.pos[0] = random.randint(0, 1016)\n self.pos[1] = random.randint(0, 656)\n c = level.get_constraints(self, self.direction)\n\n def move(self, level):\n c = level.get_constraints(self, self.direction)\n if self.direction == UP:\n if c is not None:\n self.pos[1] = c\n self.direction = DOWN\n c = None\n else:\n self.pos[1] -= self.speed\n if self.direction == LEFT:\n if c is not None:\n self.pos[0] = c\n self.direction = RIGHT\n c = None\n else:\n self.pos[0] -= self.speed\n if self.direction == DOWN:\n if c is not None:\n self.pos[1] = c\n self.direction = UP\n c = None\n else:\n self.pos[1] += self.speed\n if self.direction == RIGHT:\n if c is not None:\n self.pos[0] = c\n self.direction = LEFT\n else:\n self.pos[0] += self.speed\n\n def display(self, window):\n\n if self.direction == UP:\n self.img = pygame.image.load(self.dir + \"up.png\")\n window.blit(self.img, self.pos)\n\n if self.direction == LEFT:\n self.img = pygame.image.load(self.dir + \"left.png\")\n window.blit(self.img, self.pos)\n\n if self.direction == DOWN:\n self.img = pygame.image.load(self.dir + \"down.png\")\n window.blit(self.img, self.pos)\n\n if self.direction == RIGHT:\n self.img = pygame.image.load(self.dir + \"right.png\")\n window.blit(self.img, self.pos)\n", "id": "6771164", "language": "Python", "matching_score": 2.680035352706909, "max_stars_count": 4, "path": "core/Enemy.py" }, { "content": "from constants import *\nfrom core.Entity import Entity\n\n\nclass Bullet(Entity):\n \"\"\"\n Class to manage bullet physics\n \"\"\"\n\n def __init__(self, player, speed, direction):\n self.direction = direction\n self.color = (0, 255, 0)\n self.alpha = 255\n size = (20, 20)\n\n x, y = self.set_spawn_pos(player, size)\n\n super().__init__(x, y, speed, 1, size)\n\n def set_spawn_pos(self, player, size):\n \"\"\"\n\n :return:\n \"\"\"\n if self.direction == UP:\n x = player.pos[0] + (player.size[0] / 2)\n x -= (size[0] / 2)\n\n y = player.pos[1]\n\n return x, y\n\n if self.direction == LEFT:\n x = player.pos[0]\n\n y = player.pos[1] + (player.size[1] / 2)\n y -= (size[1] / 2)\n\n return x, y\n\n if self.direction == DOWN:\n x = player.pos[0] + (player.size[0] / 2)\n x -= (size[0] / 2)\n\n y = player.pos[1] + player.size[1]\n\n return x, y\n\n if self.direction == RIGHT:\n x = player.pos[0]\n\n y = player.pos[1] + (player.size[1] / 2)\n y -= (size[1] / 2)\n\n return x, y\n\n else:\n return None\n\n def move(self, level):\n \"\"\"\n Method to manage all bullets path\n :param level:\n :return:\n \"\"\"\n\n c = level.get_constraints(self, self.direction)\n if c is None and self.direction == UP:\n self.pos[1] -= self.speed\n\n if c is None and self.direction == LEFT:\n self.pos[0] -= self.speed\n\n if c is None and self.direction == DOWN:\n self.pos[1] += self.speed\n\n if c is None and self.direction == RIGHT:\n self.pos[0] += self.speed\n\n if c is not None:\n self.alpha = 0\n", "id": "10858191", "language": "Python", "matching_score": 1.5664867162704468, "max_stars_count": 4, "path": "core/Bullet.py" }, { "content": "class Entity:\n \"\"\"\n Class to manage all types of entities in the game\n \"\"\"\n\n def __init__(self, x, y, speed, health, size=(64, 64)):\n self.pos = [x, y]\n self.size = size\n self.speed = speed\n self.health = health\n", "id": "1108071", "language": "Python", "matching_score": 0.6255841851234436, "max_stars_count": 4, "path": "core/Entity.py" } ]
2.083631
janix
[ { "content": "#!/usr/bin/python\n\n#run as root\n#author: <NAME>\n\n\nimport smbus\nimport time\nimport sys\nimport os\n\nPATH = '/tmp/timestamp'\n\ndef main():\n\n\tread_value = read_sensor(0x39) + read_sensor(0x49)\n\tsensors_limit_value = 1000\n\t\n#\tprint read_value\n\n\tif (read_value > 0 and read_value < sensors_limit_value):\n\t\tif call_permit():\n\t\t\tmake_call(3, 'wszyscy')\n#\t\t\tmake_call(1, 'janix')\n\n\telif (read_value < 0):\n\t\ttime.sleep(30)\n\t\tread_value2 = read_sensor(0x39) + read_sensor(0x49)\n\t\tif (read_value2 < sensors_limit_value):\n\t\t\tif call_permit():\n\t\t\t\tmake_call(3, 'wszyscy')\n\n\n\ndef read_sensor(address):\n\n\t# Get I2C bus\n\tbus = smbus.SMBus(1)\n\n\t# Get address from script parameter (for example ./TSL2561.py 39)\n#\taddr = int(sys.argv[1], 16)\n\n\t# TSL2561 address, 0x39(57)\n\t# Select control register, 0x00(00) with command register, 0x80(128)\n\t# 0x03(03) Power ON mode\n\n\ttry:\n\t\tbus.write_byte_data(address, 0x00 | 0x80, 0x03)\n\t\t# TSL2561 address, 0x39(57)\n\t\t# Select timing register, 0x01(01) with command register, 0x80(128)\n\t\t# 0x02(02) Nominal integration time = 402ms\n\t\tbus.write_byte_data(address, 0x01 | 0x80, 0x02)\n\n\t\ttime.sleep(0.5)\n\n\t\t# Read data back from 0x0C(12) with command register, 0x80(128), 2 bytes\n\t\t# ch0 LSB, ch0 MSB\n\t\tdata = bus.read_i2c_block_data(address, 0x0C | 0x80, 2)\n\n\t\t# Read data back from 0x0E(14) with command register, 0x80(128), 2 bytes\n\t\t# ch1 LSB, ch1 MSB\n\t\tdata1 = bus.read_i2c_block_data(address, 0x0E | 0x80, 2)\n\n\t\t# Convert the data\n\t\tch0 = data[1] * 256 + data[0]\n\t\tch1 = data1[1] * 256 + data1[0]\n\n\n\t\t# Output data to screen\n\t\tprint \"sensor {0:02x}: Full Spectrum(IR + Visible) :{1} lux\".format(address, ch0)\n\t\tprint \"sensor {0:02x}: Infrared Value :{1} lux\".format(address, ch1)\n\t\tprint \"sensor {0:02x}: Visible Value :{1} lux\".format(address, ch0-ch1)\n\t\t\n\t\treturn ch0\n\n\texcept IOError:\n\t\tprint \"IOError Sensor {0:02x}\".format(address)\n\t\treturn -1\n\ndef call_permit():\n\tset_timestamp = 900\n\ttry:\n\t\tif check_timestamp() < 0:\n\t\t\tprint \"timestamp < 0\"\n\t\t\treturn True\n\t\telif check_timestamp() > 0 and check_timestamp() <= set_timestamp:\n\t\t\tprint \"waiting for delay time\"\n\t\t\treturn False\n\t\telif check_timestamp() > set_timestamp:\n\t\t\tprint \"delay time has passed, making call\"\n\t\t\treturn True\n\n\texcept IOError:\n\t\tprint \"/tmp/timestamp is missing\"\n\t\treturn True\n\texcept ValueError:\n\t\tprint \"/tmp/timestamp was modified by hand\"\n\t\treturn True\n\ndef create_file():\n\tf = open(PATH, 'w+')\n\ttimestamp = time.time()\n\tf.write(str(timestamp))\n\tprint timestamp\n\tf.close()\n\ndef check_timestamp():\n\tf = open(PATH, 'r')\n\told_timestamp = float(f.read())\n\tnew_timestamp = time.time()\n\ttime_interval = new_timestamp - old_timestamp\n\treturn time_interval\n\ndef make_call(repetitions_number, group):\n\tcreate_file()\n\tfor n in range(repetitions_number):\n\t\tos.system(\"/var/scripts/alarmy/dzwonienie.py {0}\".format(group))\n\n\nmain()\n", "id": "7946708", "language": "Python", "matching_score": 3.219137191772461, "max_stars_count": 0, "path": "TSL2561.py" }, { "content": "#!/usr/bin/python\n\nimport os\nimport time\n\nPATH = '/tmp/timestamp'\n\ndef main():\n\n\ttempfile = open(\"/sys/bus/w1/devices/28-04146f491dff/w1_slave\")\n\tthetext = tempfile.read()\n\ttempfile.close()\n\ttempdata = thetext.split(\"\\n\")[1].split(\" \")[9]\n\ttemperature = int(tempdata[2:])\n\n\tif temperature > 30000:\n\t\tif call_permit():\n\t\t\tmake_call(3, 'wszyscy')\n#\t\t\tmake_call(1, 'janix')\n\n\tprint temperature\n\ndef call_permit():\n\tset_timestamp = 900\n\ttry:\n\t\tif check_timestamp() < 0:\n\t\t\tprint \"timestamp < 0\"\n\t\t\treturn True\n\t\telif check_timestamp() > 0 and check_timestamp() <= set_timestamp:\n\t\t\tprint \"waiting for delay time\"\n\t\t\treturn False\n\t\telif check_timestamp() > set_timestamp:\n\t\t\tprint \"delay time has passed, making call\"\n\t\t\treturn True\n\n\texcept IOError:\n\t\tprint \"/tmp/timestamp is missing\"\n\t\treturn True\n\texcept ValueError:\n\t\tprint \"/tmp/timestamp was modified by hand\"\n\t\treturn True\n\ndef create_file():\n\tf = open(PATH, 'w+')\n\ttimestamp = time.time()\n\tf.write(str(timestamp))\n\tprint timestamp\n\tf.close()\n\ndef check_timestamp():\n\tf = open(PATH, 'r')\n\told_timestamp = float(f.read())\n\tnew_timestamp = time.time()\n\ttime_interval = new_timestamp - old_timestamp\n\treturn time_interval\n\ndef make_call(repetitions_number, group):\n\tcreate_file()\n\tfor n in range(repetitions_number):\n\t\tos.system(\"/var/scripts/alarmy/dzwonienie.py {0}\".format(group))\n\nmain()\n", "id": "8201544", "language": "Python", "matching_score": 0.4309020936489105, "max_stars_count": 0, "path": "temperatura_alarm.py" }, { "content": "#!/usr/bin/python\n\n#author: <NAME>\n\nimport serial\nimport time\nimport sys\n\nser = serial.Serial(\n\tport='/dev/ttyUSB0'\n)\n\ngroup = sys.argv[1]\n\ndef dialup(phone_number):\n\tser.write(\"atdt+48{0};\\r\".format(phone_number))\n\twhile True:\n\t\tdata = ser.readline().rstrip()\n\t\tprint data\n \n\t\tif ('ERROR' in data):\n\t\t\ttime.sleep(5)\n\t\t\tser.write(\"atdt+48{0};\\r\".format(phone_number))\n\t\t\tcontinue\n\n\t\telif ('^CONF:1' in data):\n\t\t\ttime.sleep(30)\n\t\t\tser.write(\"at+chup\\r\")\n\t\telif ('^CEND' in data):\n\t\t\ttime.sleep(2)\n\t\t\tprint \"Ending call\"\n\t\t\tbreak\t\n\t\t\t\n\nnumbersList = [xxxxxxxxx,yyyyyyyyy,zzzzzzzzz]\n\nfor number in numbersList:\n\tdialup(number)\n", "id": "7925517", "language": "Python", "matching_score": 0.11291786283254623, "max_stars_count": 0, "path": "dzwonienie.py" }, { "content": "#!/usr/bin/python -u\n\n# add to /etc/snmp/snmpd.conf\n# pass_persist .1.3.6.1.2.1.25.1.8 /path/to/sensors.py\n\n\nimport sys\nlista = [\n\"/sys/bus/w1/devices/28-03146a7692ff/w1_slave\",\n\"/sys/bus/w1/devices/28-03146a78d9ff/w1_slave\",\n\"/sys/bus/w1/devices/28-03146a777fff/w1_slave\",\n\"/sys/bus/w1/devices/28-03146aaf1fff/w1_slave\",\n\"/sys/bus/w1/devices/28-051685e09cff/w1_slave\",\n\"/sys/bus/w1/devices/28-0416747c6cff/w1_slave\",\n\"/sys/bus/w1/devices/28-0516810060ff/w1_slave\"\n\n]\nsensors = {}\n\ndef iter():\n i = 1\n\n for item in lista:\n tempfile = open(item)\n thetext = tempfile.read()\n tempfile.close()\n tempdata = thetext.split(\"\\n\")[1].split(\" \")[9]\n temperature = int(tempdata[2:])\n #ignore sensor errors\n if temperature < 10000000:\n sensors[i] = temperature\n# print sensors[i]\n i += 1\n\nimport snmp_passpersist as snmp\nOID_BASE=\".1.3.6.1.2.1.25.1.8\"\n\n\ndef update():\n global pp\n iter()\n #OID == .1.3.6.1.2.1.25.1.8.3\n pp.add_int('3',sensors[1])\n #OID == .1.3.6.1.2.1.25.1.8.4\n pp.add_int('4',sensors[2])\n #...\n pp.add_int('5',sensors[3])\n pp.add_int('6',sensors[4])\n pp.add_int('9',sensors[5])\n pp.add_int('10',sensors[6])\n pp.add_int('11',sensors[7])\n\n\ntry:\n pp=snmp.PassPersist(OID_BASE)\n pp.start(update,30)\n\nexcept KeyboardInterrupt:\n print \"Exiting on user request.\"\n sys.exit(0)\n", "id": "4825574", "language": "Python", "matching_score": 0.019999755546450615, "max_stars_count": 0, "path": "sensors.py" } ]
0.27191
ppond454
[ { "content": "import numpy as np\nimport cv2\nimport tensorflow.keras\nfrom keras.preprocessing import image\nimport tensorflow as tf\n\n\ncap= cv2.VideoCapture(\"test2.mp4\")\nmodel = tensorflow.keras.models.load_model(\"keras_model.h5\")\nface_cascade = \"haarcascade_frontalface.xml\"\nface_classifier = cv2.CascadeClassifier(face_cascade)\nsize = (224, 224)\n\nif not cap.isOpened():\n raise IOError(\"Cannot open webcam\")\n\nwhile(True): \n ret, capture = cap.read()\n capcopy = capture.copy()\n capture = cv2.resize(capture,size)\n img = np.array(capture,dtype=np.float32)\n img = np.expand_dims(img,axis=0)\n img = img/255\n gray = cv2.cvtColor(capture, cv2.COLOR_BGR2GRAY)\n faces = face_classifier.detectMultiScale(gray, 1.1,4)\n prediction = model.predict(img)\n\n for (x, y, w, h) in faces:\n \n if(prediction[0][0]>prediction[0][1]) :\n cv2.rectangle(capture, (x, y), (x+w, y+h), (0, 0, 255), 2)\n cv2.putText(capture,\"Kuma\",(x,y-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,255),2)\n else :\n cv2.rectangle(capture, (x, y), (x+w, y+h), (255, 0, 0), 2)\n cv2.putText(capture,\"Human\",(x,y-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,0,0),2)\n \n cv2.imshow(\"image\",capture)\n if cv2.waitKey(1) & 0xFF == ord(\"q\") :\n break \n\ncap.release()\ncv2.destroyAllWindows()", "id": "8002048", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "app.py" } ]
0
afshinabdi
[ { "content": "\"\"\"\n Implementation of the paper 'Sparsified SGD with Memory'\n This is mainly based on the code available at 'https://github.com/epfml/sparsifiedSGD'\n especially the file memory.py\n\"\"\"\n\nimport numpy as np\n\nclass topk_quantizer:\n def __init__(self, k):\n self._k = k\n self._residue = 0\n\n def quantize(self, X, reconstructed=True):\n \"\"\"\n Top-K SGD sparsification with memory\n Parameters:\n g (np:ndarray) : input gradient\n residue (np:ndarray) : residue of the same shape as g\n k (int) : number of elements to keep\n \"\"\"\n\n self._residue += X\n self._k = min(X.size, self._k)\n indices = np.argpartition(np.abs(self._residue.ravel()), -self._k)[-self._k:]\n indices = np.unravel_index(indices, X.shape)\n\n Xh = np.zeros_like(self._residue)\n Xh[indices] = self._residue[indices]\n self._residue[indices] = 0\n\n if reconstructed:\n return Xh\n else:\n return indices, Xh[indices]\n\n def reset(self):\n self._residue = 0\n", "id": "260959", "language": "Python", "matching_score": 2.334794282913208, "max_stars_count": 0, "path": "QuantizerTest/models/quantizers/topK_sgd.py" }, { "content": "\"\"\"\n Implementation of the paper 'Sparsified SGD with Memory'\n This is mainly based on the code available at 'https://github.com/epfml/sparsifiedSGD'\n especially the file memory.py\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\ndef topk_sgd(W, k):\n \"\"\"\n Top-K SGD sparsification with memory\n \"\"\"\n\n W_shape = W.get_shape().as_list()\n W_size = np.prod(W_shape)\n k = min(np.prod(W_shape), k)\n\n w = tf.reshape(W, shape=(-1,))\n residue = tf.Variable(tf.zeros(shape=(W_size,)), dtype=tf.float32, trainable=False)\n\n x = w + residue\n _, indices = tf.math.top_k(tf.abs(x), k, sorted=False)\n\n new_residue = tf.tensor_scatter_update(x, tf.expand_dims(indices, 1), tf.zeros(k, tf.float32))\n xh = x - new_residue\n Wh = tf.reshape(xh, W_shape)\n with tf.control_dependencies([Wh, new_residue]):\n update_residue = residue.assign(new_residue)\n\n return Wh, update_residue, residue\n\n", "id": "8160841", "language": "Python", "matching_score": 0.07555879652500153, "max_stars_count": 0, "path": "SimpleMSE/tf/quantizers/topK_sgd.py" }, { "content": "\"\"\"\n Fully connected neural network for classification of data, hidden ReLU and final Softmax layers.\n The default network creates a 4 layers fully connected network, (784-300-100-10), for classification of MNSIT data.\n\"\"\"\n\nfrom .DistributedBaseModel import DistributedBaseModel\nimport tensorflow as tf\nimport numpy as np\nimport scipy.stats as st\n\n\nclass FCModel(DistributedBaseModel):\n\n # _________________________________________________________________________\n # build the neural network\n # create neural network with random initial parameters\n def _generate_random_parameters(self, parameters):\n layer_shapes = parameters.get('layer_shapes', [784, 1000, 300, 100, 10])\n self._num_layers = len(layer_shapes) - 1\n\n initial_weights = [0] * self._num_layers\n initial_biases = [0] * self._num_layers\n # create initial parameters for the network\n for n in range(self._num_layers):\n initial_weights[n] = st.truncnorm(-2, 2, loc=0, scale=0.1).rvs((layer_shapes[n], layer_shapes[n + 1]))\n initial_biases[n] = np.ones(layer_shapes[n + 1]) * 0.1\n\n return initial_weights, initial_biases\n\n # create a fully connected neural network with given initial parameters\n def _create_initialized_network(self, initial_weights, initial_biases):\n self._num_layers = len(initial_weights)\n input_len = initial_weights[0].shape[0]\n\n # create weights and biases of the neural network\n self._nn_weights = []\n self._nn_biases = []\n for init_w, init_b in zip(initial_weights, initial_biases):\n w = tf.Variable(init_w.astype(np.float32), dtype=tf.float32)\n b = tf.Variable(init_b.astype(np.float32), dtype=tf.float32)\n self._nn_weights += [w]\n self._nn_biases += [b]\n\n self._input = tf.placeholder(tf.float32, shape=[None, input_len])\n self._target = tf.placeholder(tf.int32, shape=None)\n self._drop_rate = tf.placeholder(tf.float32)\n\n z = self._input\n for n in range(self._num_layers - 1):\n # create a fully connected layer with relu activation function\n x = tf.nn.dropout(z, rate=self._drop_rate)\n y = tf.matmul(x, self._nn_weights[n]) + self._nn_biases[n]\n z = tf.nn.relu(y)\n\n # output layer of the neural network\n n = self._num_layers - 1\n x = tf.nn.dropout(z, rate=self._drop_rate)\n y = tf.matmul(x, self._nn_weights[n]) + self._nn_biases[n]\n z = tf.nn.softmax(y)\n\n # outputs of the neural network\n self._logit = y\n self._output = z\n\n # loss function\n self._loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self._target, logits=self._logit)\n )\n\n # accuracy of the model\n matches = tf.equal(self._target, tf.argmax(self._logit, 1, output_type=tf.dtypes.int32))\n self._accuracy = tf.reduce_mean(tf.cast(matches, tf.float32))\n", "id": "10791428", "language": "Python", "matching_score": 5.330805778503418, "max_stars_count": 0, "path": "CompressionError/models/FullyConnected.py" }, { "content": "\"\"\"\n Basic convolutional neural network for classification of MNIST data.\n The default is Lenet-5 like structure, two convolutional layers, followed by two fully connected ones.\n The filters' shapes are:\n [5, 5, 1, 32], [5, 5, 32, 64], [7 * 7 * 64, 512], [512, 10]\n\"\"\"\n\nfrom .DistributedBaseModel import DistributedBaseModel\nimport tensorflow as tf\nimport numpy as np\nimport scipy.stats as st\n\n\nclass LenetModel(DistributedBaseModel):\n\n # _________________________________________________________________________\n # build the neural network\n # create neural network with random initial parameters\n def _generate_random_parameters(self, parameters):\n layer_shapes = [[5, 5, 1, 32], [5, 5, 32, 64], [7 * 7 * 64, 512], [512, 10]]\n num_layers = 4\n\n initial_weights = [0] * num_layers\n initial_biases = [0] * num_layers\n # create initial parameters for the network\n for n in range(num_layers):\n initial_weights[n] = st.truncnorm(-2, 2, loc=0, scale=0.1).rvs(layer_shapes[n])\n initial_biases[n] = np.ones(layer_shapes[n][-1]) * 0.1\n\n return initial_weights, initial_biases\n\n # create a convolutional neural network with given initial parameters\n def _create_initialized_network(self, initial_weights, initial_biases):\n self._nn_weights = []\n self._nn_biases = []\n\n # create weights and biases of the neural network\n for init_w, init_b in zip(initial_weights, initial_biases):\n w = tf.Variable(init_w.astype(np.float32), dtype=tf.float32)\n b = tf.Variable(init_b.astype(np.float32), dtype=tf.float32)\n self._nn_weights += [w]\n self._nn_biases += [b]\n\n self._input = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])\n self._target = tf.placeholder(tf.int32, shape=None)\n self._drop_rate = tf.placeholder(tf.float32)\n\n x = self._input\n # first convolutional layer\n y = tf.nn.conv2d(x, self._nn_weights[0], strides=[\n 1, 1, 1, 1], padding='SAME') + self._nn_biases[0]\n x = tf.nn.relu(y)\n\n # max pooling\n x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[\n 1, 2, 2, 1], padding='SAME')\n\n # second convolutional layer\n y = tf.nn.conv2d(x, self._nn_weights[1], strides=[\n 1, 1, 1, 1], padding='SAME') + self._nn_biases[1]\n x = tf.nn.relu(y)\n\n # max pooling\n x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[\n 1, 2, 2, 1], padding='SAME')\n\n # flatten the signal\n x = tf.reshape(x, [-1, initial_weights[2].shape[0]])\n\n # first fully connected layer, relu (for hidden layers)\n x = tf.nn.dropout(x, rate=self._drop_rate)\n y = tf.matmul(x, self._nn_weights[2]) + self._nn_biases[2]\n z = tf.nn.relu(y)\n\n # output fully connected layer with softmax activation function\n x = tf.nn.dropout(z, rate=self._drop_rate)\n y = tf.matmul(x, self._nn_weights[3]) + self._nn_biases[3]\n z = tf.nn.softmax(y)\n\n # output of the neural network\n self._logit = y\n self._output = z\n\n # loss function\n self._loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self._target, logits=self._logit))\n\n # accuracy of the model\n matches = tf.equal(self._target, tf.argmax(self._logit, axis=1, output_type=tf.int32))\n self._accuracy = tf.reduce_mean(tf.cast(matches, tf.float32))\n", "id": "3445588", "language": "Python", "matching_score": 5.338937759399414, "max_stars_count": 0, "path": "CompressionError/models/Lenet.py" }, { "content": "\"\"\"\n Convolutional neural network for classification of CIFAR10 data.\n The default is Lenet-5 like structure, two convolutional layers, followed by two fully connected ones.\n The filters' shapes are:\n [5, 5, 1, 32], [5, 5, 32, 64], [7 * 7 * 64, 384], [384, 192], [192, 10]\n\"\"\"\n\nfrom .DistributedBaseModel import DistributedBaseModel\nimport itertools\nimport numpy as np\nimport scipy.stats as st\nimport tensorflow as tf\n\n\nclass CifarNetModel(DistributedBaseModel):\n def __init__(self):\n super().__init__()\n\n self._image_size = 24\n\n # _________________________________________________________________________\n # build the neural network\n # create neural network with random initial parameters\n def _generate_random_parameters(self, parameters):\n flat_dim = self._image_size * self._image_size * 64 // 4 // 4\n layer_shapes = [[5, 5, 3, 64], [5, 5, 64, 64], [flat_dim, 384], [384, 192], [192, 10]]\n num_layers = len(layer_shapes)\n\n init_std = [0.05, 0.05, 0.04, 0.04, 1 / 192.0]\n init_bias = [0.0, 0.1, 0.1, 0.1, 0.0]\n initial_weights = [0] * num_layers\n initial_biases = [0] * num_layers\n\n # create initial parameters for the network\n for n in range(num_layers):\n initial_weights[n] = st.truncnorm(-2, 2, loc=0, scale=init_std[n]).rvs(layer_shapes[n])\n initial_biases[n] = np.ones(layer_shapes[n][-1]) * init_bias[n]\n\n return initial_weights, initial_biases\n\n # create a convolutional neural network with given initial parameters\n def _create_initialized_network(self, initial_weights, initial_biases):\n self._nn_weights = []\n self._nn_biases = []\n\n # create weights and biases of the neural network\n name_scopes = ['conv1', 'conv2', 'fc1', 'fc2', 'fc3']\n for layer, init_w, init_b in zip(itertools.count(), initial_weights, initial_biases):\n with tf.variable_scope(name_scopes[layer]):\n w = tf.Variable(init_w.astype(np.float32), dtype=tf.float32, name='weights')\n b = tf.Variable(init_b.astype(np.float32), dtype=tf.float32, name='biases')\n\n self._nn_weights += [w]\n self._nn_biases += [b]\n\n self._input = tf.placeholder(tf.float32, shape=[None, self._image_size, self._image_size, 3])\n self._target = tf.placeholder(tf.int32, shape=None)\n self._drop_rate = tf.placeholder(tf.float32)\n\n x = self._input\n # convolutional layer 1\n y = tf.nn.conv2d(x, self._nn_weights[0], strides=[1, 1, 1, 1], padding='SAME') + self._nn_biases[0]\n x = tf.nn.relu(y, name=name_scopes[0])\n x = tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')\n x = tf.nn.lrn(x, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)\n\n # convolutional layer 2\n y = tf.nn.conv2d(x, self._nn_weights[1], strides=[1, 1, 1, 1], padding='SAME') + self._nn_biases[1]\n x = tf.nn.relu(y, name=name_scopes[1])\n x = tf.nn.lrn(x, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)\n x = tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # flatten the signal\n x = tf.reshape(x, [-1, initial_weights[2].shape[0]])\n\n # fully connected 1 (layer 3)\n x = tf.nn.dropout(x, rate=self._drop_rate)\n y = tf.matmul(x, self._nn_weights[2]) + self._nn_biases[2]\n z = tf.nn.relu(y, name=name_scopes[2])\n\n # fully connected 2 (layer 4)\n x = tf.nn.dropout(z, rate=self._drop_rate)\n y = tf.matmul(x, self._nn_weights[3]) + self._nn_biases[3]\n z = tf.nn.relu(y, name=name_scopes[3])\n\n # fully connected 3 (layer 5)\n x = tf.nn.dropout(z, rate=self._drop_rate)\n y = tf.matmul(x, self._nn_weights[4]) + self._nn_biases[4]\n z = tf.nn.softmax(y, name=name_scopes[4])\n\n # output of the neural network\n self._logit = y\n self._output = z\n\n # loss function\n self._loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self._target, logits=self._logit))\n\n # accuracy of the model\n matches = tf.equal(self._target, tf.argmax(self._logit, axis=1, output_type=tf.int32))\n self._accuracy = tf.reduce_mean(tf.cast(matches, tf.float32))\n", "id": "3525655", "language": "Python", "matching_score": 5.545173168182373, "max_stars_count": 0, "path": "CompressionError/models/CifarNet.py" }, { "content": "\"\"\"\n Basic Alexnet neural network for image classification.\n This is a modified implementation of\n www.cs.toronto.edu/~guerzhoy/\n\n feed('data') -> conv(11, 11, 96, 4, 4, padding='VALID', name='conv1') -> lrn(2, 2e-05, 0.75, name='norm1') ->\n max_pool(3, 3, 2, 2, padding='VALID', name='pool1') -> conv(5, 5, 256, 1, 1, group=2, name='conv2') ->\n lrn(2, 2e-05, 0.75, name='norm2') -> max_pool(3, 3, 2, 2, padding='VALID', name='pool2') ->\n conv(3, 3, 384, 1, 1, name='conv3') -> conv(3, 3, 384, 1, 1, group=2, name='conv4') ->\n conv(3, 3, 256, 1, 1, group=2, name='conv5') -> max_pool(3, 3, 2, 2, padding='VALID', name='pool5') ->\n fc(4096, name='fc6') -> fc(4096, name='fc7') -> fc(1000, relu=False, name='fc8') -> softmax(name='prob')\n\n\"\"\"\n\nfrom .DistributedBaseModel import DistributedBaseModel\nimport tensorflow as tf\nimport numpy as np\nimport scipy.stats as st\n\n\nclass AlexnetModel(DistributedBaseModel):\n def __init__(self):\n super().__init__()\n\n # _________________________________________________________________________\n # build the neural network\n def _add_convolution_layer(self, x, kernel, bias, strides, padding):\n h = tf.Variable(kernel.astype(np.float32), dtype=tf.float32)\n b = tf.Variable(bias.astype(np.float32), dtype=tf.float32)\n\n self._nn_weights += [h]\n self._nn_biases += [b]\n\n output = tf.nn.conv2d(x, h, strides=strides, padding=padding)\n output = tf.nn.relu(tf.nn.bias_add(output, b))\n\n return output\n\n def _add_splitted_convolutional_layer(self, x, kernel, bias, strides, padding):\n h = tf.Variable(kernel.astype(np.float32), dtype=tf.float32)\n b = tf.Variable(bias.astype(np.float32), dtype=tf.float32)\n h0, h1 = tf.split(h, 2, axis=3)\n x0, x1 = tf.split(x, 2, axis=3)\n\n self._nn_weights += [h]\n self._nn_biases += [b]\n\n x0 = tf.nn.conv2d(x0, h0, strides=strides, padding=padding)\n x1 = tf.nn.conv2d(x1, h1, strides=strides, padding=padding)\n\n output = tf.concat([x0, x1], axis=3)\n output = tf.nn.relu(tf.nn.bias_add(output, b))\n\n return output\n\n def _add_fully_connected_layer(self, x, weight, bias, func=''):\n w = tf.Variable(weight.astype(np.float32), dtype=tf.float32)\n b = tf.Variable(bias.astype(np.float32), dtype=tf.float32)\n\n self._nn_weights += [w]\n self._nn_biases += [b]\n\n output = tf.matmul(x, w) + b\n if func == 'relu':\n output = tf.nn.relu(output)\n elif func == 'softmax':\n self._logit = output\n output = tf.nn.softmax(output)\n\n return output\n\n def _add_max_pooling(self, x):\n output = tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n return output\n\n def _generate_random_parameters(self, parameters):\n layer_shapes = [[11, 11, 3, 96], [5, 5, 48, 256], [3, 3, 256, 384], [3, 3, 192, 384], [3, 3, 192, 256],\n [9216, 4096], [4096, 4096], [4096, 1000]]\n\n num_layers = len(layer_shapes)\n\n initial_weights = [0] * num_layers\n initial_biases = [0] * num_layers\n # create initial parameters for the network\n for n in range(num_layers):\n initial_weights[n] = st.truncnorm(-2, 2, loc=0, scale=0.1).rvs(layer_shapes[n])\n initial_biases[n] = np.ones(layer_shapes[n][-1]) * 0.1\n\n return initial_weights, initial_biases\n\n def _create_initialized_network(self, initial_weights, initial_biases):\n input_dim = [None, 227, 227, 3]\n output_dim = [None, 1000]\n\n self._nn_weights = []\n self._nn_biases = []\n\n self._input = tf.placeholder(tf.float32, shape=input_dim)\n self._target = tf.placeholder(tf.float32, shape=output_dim)\n self._drop_rate = tf.placeholder(tf.float32)\n\n\n x = self._input\n\n # 1- convolution, local response normalization, and max-pooling\n x = self._add_convolution_layer(x, initial_weights[0], initial_biases[0], [1, 4, 4, 1], 'SAME')\n x = tf.nn.local_response_normalization(x, depth_radius=2, alpha=2e-5, beta=0.75, bias=1.0)\n x = self._add_max_pooling(x)\n\n # 2- splitted convolution, local response normalization, and max-pooling\n x = self._add_splitted_convolutional_layer(x, initial_weights[1], initial_biases[1], [1, 1, 1, 1], 'SAME')\n x = tf.nn.local_response_normalization(x, depth_radius=2, alpha=2e-5, beta=0.75, bias=1.0)\n x = self._add_max_pooling(x)\n\n # 3- only a convolution\n x = self._add_convolution_layer(x, initial_weights[2], initial_biases[2], [1, 1, 1, 1], 'SAME')\n\n # 4- splitted convolution\n x = self._add_splitted_convolutional_layer(x, initial_weights[3], initial_biases[3], [1, 1, 1, 1], 'SAME')\n\n # 5- splitted convolutional layer, max-pooling\n x = self._add_splitted_convolutional_layer(x, initial_weights[4], initial_biases[4], [1, 1, 1, 1], 'SAME')\n x = self._add_max_pooling(x)\n\n # 6- fully connected layer, relu\n x = tf.reshape(x, [-1, initial_weights[5].shape[0]])\n x = tf.nn.dropout(x, rate=self._drop_rate)\n x = self._add_fully_connected_layer(x, initial_weights[5], initial_biases[5], func='relu')\n\n # 7- another fully connected layer, relu\n x = tf.nn.dropout(x, rate=self._drop_rate)\n x = self._add_fully_connected_layer(x, initial_weights[6], initial_biases[6], func='relu')\n\n # 8- output fully connected layer, softmax\n x = tf.nn.dropout(x, rate=self._drop_rate)\n self._output = self._add_fully_connected_layer(x, initial_weights[7], initial_biases[7], func='softmax')\n\n # loss function\n self._loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits_v2(labels=self._target, logits=self._logit))\n\n # =================================================================\n # accuracy of the model\n matches = tf.equal(tf.argmax(self._target, 1), tf.argmax(self._logit, 1))\n self._accuracy = tf.reduce_mean(tf.cast(matches, tf.float32))\n\n # computing accuracy\n matches = tf.nn.in_top_k(predictions=self._output, targets=tf.argmax(self._target, 1), k=5)\n self._accuracy_top5 = tf.reduce_mean(tf.cast(matches, tf.float32))\n\n matches = tf.equal(tf.argmax(self._target, 1), tf.argmax(self._logit, 1))\n self._accuracy = tf.reduce_mean(tf.cast(matches, tf.float32))\n\n\n # _________________________________________________________________________\n # compute the accuracy of the NN using the given inputs\n def compute_accuracy(self, x, target, top_5=False):\n if top_5:\n return self._sess.run([self._accuracy, self._accuracy_top5],\n feed_dict={self._input: x, self._target: target, self._drop_rate: 0})\n else:\n return self._sess.run(self._accuracy,\n feed_dict={self._input: x, self._target: target, self._drop_rate: 0})\n\n", "id": "657895", "language": "Python", "matching_score": 3.6950104236602783, "max_stars_count": 0, "path": "QuantizerTest/models/Alexnet.py" }, { "content": "\"\"\"\n Fully connected neural network for classification of data, hidden ReLU and final Softmax layers.\n The default network creates a 4 layers fully connected network, (784-300-100-10), for classification of MNSIT data.\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nimport scipy.stats as st\n\n\nclass FCModel:\n def __init__(self):\n self._graph = None\n self._sess = None\n self._initializer = None\n self._accuracy = None\n\n self._optimizer = None\n self._trainOp = None\n self._learning_rate = 0.01\n self._loss = None\n\n # parameters of the neural network\n self._drop_rate = None\n self._input = None\n self._output = None\n self._logit = None\n self._target = None\n\n self._nn_weights = []\n self._nn_biases = []\n \n self._num_layers = 0\n\n # variables to set parameters during training\n self._assign_op = None\n self._input_weights = None\n self._input_biases = None\n\n # gradients of the neural network\n self._grad_W = None\n self._grad_b = None\n\n # to apply externally computed gradients\n self._input_gW = None\n self._input_gb = None\n self._apply_gradients = None\n\n # forward and backward signals in the neural network\n self._fw_signals = None\n self._bp_signals = None\n\n # =========================================================================\n # build the neural network\n def create_network(self, initial_weights=None, initial_biases=None, layer_shapes=[784, 1000, 300, 100, 10]):\n if initial_weights is None:\n self._create_random_network(layer_shapes)\n else:\n self._create_initialized_network(initial_weights, initial_biases)\n\n # create neural network with random initial parameters\n def _create_random_network(self, layer_shapes):\n self._num_layers = len(layer_shapes) - 1\n\n initial_weights = [0] * self._num_layers\n initial_biases = [0] * self._num_layers\n # create initial parameters for the network\n for n in range(self._num_layers):\n initial_weights[n] = st.truncnorm(-2, 2, loc=0,\n scale=0.1).rvs((layer_shapes[n], layer_shapes[n + 1]))\n initial_biases[n] = np.ones(layer_shapes[n + 1]) * 0.1\n\n self._create_initialized_network(initial_weights, initial_biases)\n\n # create a fully connected neural network with given initial parameters\n def _create_initialized_network(self, initial_weights, initial_biases):\n self._num_layers = len(initial_weights)\n\n self._fw_signals = []\n self._bp_signals = []\n self._nn_weights = []\n self._nn_biases = []\n\n input_len = initial_weights[0].shape[0]\n output_len = initial_weights[-1].shape[1]\n self._graph = tf.Graph()\n with self._graph.as_default():\n self._input = tf.placeholder(tf.float32, shape=[None, input_len])\n self._target = tf.placeholder(tf.float32, shape=[None, output_len])\n self._drop_rate = tf.placeholder(tf.float32)\n\n # create weights and biases of the neural network\n for init_w, init_b in zip(initial_weights, initial_biases):\n w = tf.Variable(init_w.astype(np.float32), dtype=tf.float32)\n b = tf.Variable(init_b.astype(np.float32), dtype=tf.float32)\n self._nn_weights += [w]\n self._nn_biases += [b]\n\n z = self._input\n for n in range(self._num_layers):\n # add a fully connected layer, relu (for hidden layers) or softmax (for output layer)\n x = tf.nn.dropout(z, rate=self._drop_rate)\n y = tf.matmul(x, self._nn_weights[n]) + self._nn_biases[n]\n\n self._fw_signals += [x]\n self._bp_signals += [y]\n\n if n == self._num_layers - 1:\n z = tf.nn.softmax(y)\n else:\n z = tf.nn.relu(y)\n\n # output of the neural network\n self._logit = y\n self._output = z\n\n # loss function\n self._loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits_v2(labels=self._target, logits=self._logit))\n\n # accuracy of the model\n matches = tf.equal(tf.argmax(self._target, 1), tf.argmax(self._logit, 1))\n self._accuracy = tf.reduce_mean(tf.cast(matches, tf.float32))\n\n # =========================================================================\n # add regulizer to the loss function\n def add_regulizer(self, l1_weight=None, l2_weight=None):\n if self._loss is None:\n raise ValueError(\n 'The network has not been defined yet. First build the network, then add the regularizer.')\n\n if l1_weight is None:\n l1_weight = 0.0\n\n if l2_weight is None:\n l2_weight = 0.0\n\n num_parameters = len(self._nn_weights)\n if type(l1_weight) is float:\n l1_weight = [l1_weight] * num_parameters\n\n if type(l2_weight) is float:\n l2_weight = [l2_weight] * num_parameters\n\n if len(l1_weight) != num_parameters or len(l2_weight) != num_parameters:\n raise ValueError(\n 'Number of weights for the l1/l2 regularization is not the same as the number of weights.')\n\n with self._graph.as_default():\n l1_loss = tf.add_n([(s * tf.norm(w, ord=1))\n for (w, s) in zip(self._nn_weights, l1_weight)])\n l2_loss = tf.add_n([(s * tf.nn.l2_loss(w))\n for (w, s) in zip(self._nn_weights, l2_weight)])\n\n self._loss += (l1_loss + l2_loss)\n\n # =================================================================\n # update (assign) operator for the parameters of the NN model\n def add_assign_operators(self):\n self._assign_op = []\n self._input_weights = ()\n self._input_biases = ()\n\n with self._graph.as_default():\n for w in self._nn_weights:\n w_placeholder = tf.placeholder(dtype=tf.float32, shape=w.get_shape())\n w_assign_op = w.assign(w_placeholder)\n self._assign_op.append(w_assign_op)\n self._input_weights += (w_placeholder,)\n\n for b in self._nn_biases:\n b_placeholder = tf.placeholder(dtype=tf.float32, shape=b.get_shape())\n b_assign_op = b.assign(b_placeholder)\n self._assign_op.append(b_assign_op)\n self._input_biases += (b_placeholder,)\n\n # =========================================================================\n # define optimizer of the neural network\n def create_optimizer(self, training_algorithm='Adam', learning_rate=0.01, decay_rate=0.95, decay_step=100):\n with self._graph.as_default():\n # define the learning rate\n train_counter = tf.Variable(0, dtype=tf.float32)\n # decayed_learning_rate = learning_rate * decay_rate ^ (train_counter // decay_step)\n self._learning_rate = tf.train.exponential_decay(learning_rate, train_counter, decay_step,\n decay_rate=decay_rate, staircase=True)\n\n # define the appropriate optimizer to use\n if (training_algorithm == 0) or (training_algorithm == 'GD'):\n self._optimizer = tf.train.GradientDescentOptimizer(\n learning_rate=self._learning_rate)\n elif (training_algorithm == 1) or (training_algorithm == 'RMSProp'):\n self._optimizer = tf.train.RMSPropOptimizer(learning_rate=self._learning_rate)\n elif (training_algorithm == 2) or (training_algorithm == 'Adam'):\n self._optimizer = tf.train.AdamOptimizer(learning_rate=self._learning_rate)\n elif (training_algorithm == 3) or (training_algorithm == 'AdaGrad'):\n self._optimizer = tf.train.AdagradOptimizer(learning_rate=self._learning_rate)\n elif (training_algorithm == 4) or (training_algorithm == 'AdaDelta'):\n self._optimizer = tf.train.AdadeltaOptimizer(learning_rate=self._learning_rate)\n else:\n raise ValueError(\"Unknown training algorithm.\")\n\n # =================================================================\n # training operator\n var_list = self._nn_weights + self._nn_biases\n self._trainOp = self._optimizer.minimize(\n self._loss, var_list=var_list, global_step=train_counter)\n\n # =================================================================\n # computing gradients\n self._grad_W = tf.gradients(self._loss, self._nn_weights)\n self._grad_b = tf.gradients(self._loss, self._nn_biases)\n\n # applying gradients to the optimizer\n self._input_gW = tuple(\n [tf.placeholder(dtype=tf.float32, shape=w.get_shape()) for w in self._nn_weights])\n self._input_gb = tuple(\n [tf.placeholder(dtype=tf.float32, shape=b.get_shape()) for b in self._nn_biases])\n gv = [(g, v) for g, v in zip(self._input_gW, self._nn_weights)]\n gv += [(g, v) for g, v in zip(self._input_gb, self._nn_biases)]\n\n self._apply_gradients = self._optimizer.apply_gradients(gv, global_step=train_counter)\n\n # add backpropagation to the graph\n self._bp_signals = tf.gradients(self._loss, self._bp_signals)\n\n # =========================================================================\n # initialize the computation graph, if necessary create the initializer and the session\n\n def initialize(self, device=None):\n if self._initializer is None:\n with self._graph.as_default():\n self._initializer = tf.global_variables_initializer()\n\n if device is None:\n self._sess = tf.Session(graph=self._graph)\n else:\n config = tf.ConfigProto(log_device_placement=True, device_count=device)\n self._sess = tf.Session(config=config)\n\n self._sess.run(self._initializer)\n\n # =========================================================================\n # compute the accuracy of the NN using the given inputs\n def compute_accuracy(self, x, target):\n return self._sess.run(self._accuracy, feed_dict={self._input: x, self._target: target, self._drop_rate: 0})\n\n # =========================================================================\n # One iteration of the training algorithm with input data\n def train(self, x, y, drop_rate=0):\n if self._trainOp is None:\n raise ValueError('Training algorithm has not been set.')\n\n self._sess.run(self._trainOp, feed_dict={\n self._input: x, self._target: y, self._drop_rate: drop_rate})\n\n # =========================================================================\n # get or set neural network's weights\n def set_weights(self, new_weights, new_biases):\n if self._sess is None or self._assign_op is None:\n raise ValueError('The assign operators has been added to the graph.')\n\n self._sess.run(self._assign_op, feed_dict={\n self._input_weights: new_weights, self._input_biases: new_biases})\n\n def get_weights(self):\n return self._sess.run([self._nn_weights, self._nn_biases])\n\n def learning_rate(self):\n return self._sess.run(self._learning_rate)\n\n # =========================================================================\n # Compute the gradients of the parameters of the NN for the given input\n def compute_gradients(self, x, target, drop_rate=0):\n if self._grad_W is None:\n raise ValueError('The operators to compute the gradients have not been defined.')\n\n return self._sess.run([self._grad_W, self._grad_b],\n feed_dict={self._input: x, self._target: target, self._drop_rate: drop_rate})\n\n # =========================================================================\n # Apply the gradients externally computed to the optimizer\n def apply_gradients(self, gw, gb):\n if self._apply_gradients is None:\n raise ValueError('The operators to apply the gradients have not been defined.')\n\n feed_dict = {self._input_gW: gw, self._input_gb: gb}\n self._sess.run(self._apply_gradients, feed_dict=feed_dict)\n\n # =========================================================================\n # get forward and backward signals\n def get_fw_bp_signals(self, x, target, drop_rate=0):\n if (self._sess is None) or (self._fw_signals is None) or (self._bp_signals is None):\n raise ValueError('The model has not been fully created and initialized.')\n\n return self._sess.run([self._fw_signals, self._bp_signals],\n feed_dict={self._input: x, self._target: target, self._drop_rate: drop_rate})\n\n # =========================================================================\n # get gradients and signals\n def get_gradients_signals(self, x, target, drop_rate=0):\n if (self._sess is None) or (self._grad_W is None) or (self._fw_signals is None) or (self._bp_signals is None):\n raise ValueError('The model has not been fully created and initialized.')\n\n return self._sess.run([self._grad_W, self._grad_b, self._fw_signals, self._bp_signals],\n feed_dict={self._input: x, self._target: target, self._drop_rate: drop_rate})\n\n # =========================================================================\n # get number of layers\n def get_number_layers(self):\n if (self._sess is None) or (self._grad_W is None):\n raise ValueError('The model has not been fully created and initialized.')\n\n return len(self._nn_weights)\n", "id": "2636427", "language": "Python", "matching_score": 7.430337905883789, "max_stars_count": 3, "path": "models/FullyConnected.py" }, { "content": "\"\"\"\n Convolutional neural network for classification of CIFAR10 data.\n The default is Lenet-5 like structure, two convolutional layers, followed by two fully connected ones.\n The filters' shapes are:\n [5, 5, 1, 32], [5, 5, 32, 64], [7 * 7 * 64, 384], [384, 192], [192, 10]\n\"\"\"\n\nimport itertools\nimport numpy as np\nimport scipy.stats as st\nimport tensorflow as tf\n\n\nclass AlexnetModel:\n def __init__(self):\n self._image_size = 227\n self._num_layers = 8\n self._num_convlayers = 5\n self._num_fclayers = 3\n self._sess = None\n self._global_counter = None\n\n # parameters of the neural network\n self._drop_rate = None\n self._input = None\n self._output = None\n self._logit = None\n self._target = None\n\n self._nn_weights = []\n self._nn_biases = []\n\n # evaluation and training of the neural network\n self._accuracy = None\n self._optimizer = None\n self._trainOp = None\n self._learning_rate = 0.01\n self._loss = None\n\n # gradients of the parameters\n self._grad_W = None\n self._grad_b = None\n\n # to apply externally computed gradients\n self._input_gW = None\n self._input_gb = None\n self._apply_gradients = None\n\n # forward and backward signals in the neural network\n self._fw_signals = None\n self._bp_signals = None\n\n # =========================================================================\n # build the neural network\n def create_network(self, parameters: dict):\n if parameters.get('initial_w') is None:\n initial_weights, initial_biases = self._generate_random_parameters()\n else:\n initial_weights = parameters.get('initial_w')\n initial_biases = parameters.get('initial_b')\n\n graph = tf.Graph()\n with graph.as_default():\n # 1- create the neural network with the given/random initial weights/biases\n self._create_initialized_network(initial_weights, initial_biases)\n\n # 2- if required, add regularizer to the loss function\n l1 = parameters.get('l1_regularizer')\n if l1 is not None:\n self._add_l1regulizer(w=l1)\n\n l2 = parameters.get('l2_regularizer')\n if l2 is not None:\n self._add_l2regulizer(w=l2)\n\n # 3- if requried, add the training algorithm\n alg = parameters.get('training_alg')\n if alg is not None:\n self._add_optimizer(parameters)\n\n # 4- compute gradients?\n if parameters.get('compute_gradients', False):\n self._add_gradient_computations()\n\n # 4- add gradient quantization\n if parameters.get('quantizer', False):\n self._add_quantizer(parameters)\n\n initializer = tf.global_variables_initializer()\n\n self._sess = tf.Session(graph=graph)\n self._sess.run(initializer)\n\n def _add_convolution_layer(self, x, kernel, bias, strides, padding):\n output = tf.nn.conv2d(x, kernel, strides=strides, padding=padding)\n output = tf.nn.relu(tf.nn.bias_add(output, bias))\n\n return output\n\n def _add_splitted_convolutional_layer(self, x, kernel, bias, strides, padding):\n h0, h1 = tf.split(kernel, 2, axis=3)\n x0, x1 = tf.split(x, 2, axis=3)\n\n x0 = tf.nn.conv2d(x0, h0, strides=strides, padding=padding)\n x1 = tf.nn.conv2d(x1, h1, strides=strides, padding=padding)\n\n output = tf.concat([x0, x1], axis=3)\n output = tf.nn.relu(tf.nn.bias_add(output, bias))\n\n return output\n\n def _add_fully_connected_layer(self, x, weight, bias, func=''): \n x = tf.nn.dropout(x, rate=self._drop_rate)\n y = tf.matmul(x, weight) + bias\n self._fw_signals += [x]\n self._bp_signals += [y]\n\n if func == 'relu':\n output = tf.nn.relu(y)\n elif func == 'softmax':\n self._logit = y\n output = tf.nn.softmax(y)\n else:\n output = y\n\n return output\n\n def _add_max_pooling(self, x):\n output = tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n return output\n\n # create neural network with random initial parameters\n def _generate_random_parameters(self):\n layer_shapes = [\n [11, 11, 3, 96], [5, 5, 48, 256], [3, 3, 256, 384], [3, 3, 192, 384], [3, 3, 192, 256], [9216, 4096],\n [4096, 4096], [4096, 1000]\n ]\n\n num_layers = len(layer_shapes)\n\n initial_weights = [0] * num_layers\n initial_biases = [0] * num_layers\n # create initial parameters for the network\n for n in range(num_layers):\n initial_weights[n] = st.truncnorm(-2, 2, loc=0, scale=0.1).rvs(layer_shapes[n])\n initial_biases[n] = np.ones(layer_shapes[n][-1]) * 0.1\n\n return initial_weights, initial_biases\n\n # create a convolutional neural network with given initial parameters\n def _create_initialized_network(self, initial_weights, initial_biases):\n self._fw_signals = []\n self._bp_signals = []\n self._nn_weights = []\n self._nn_biases = []\n\n # create weights and biases of the neural network\n name_scopes = ['conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'fc1', 'fc2', 'fc3']\n for layer, init_w, init_b in zip(itertools.count(), initial_weights, initial_biases):\n with tf.variable_scope(name_scopes[layer]):\n w = tf.Variable(init_w.astype(np.float32), dtype=tf.float32, name='weights')\n b = tf.Variable(init_b.astype(np.float32), dtype=tf.float32, name='biases')\n\n self._nn_weights += [w]\n self._nn_biases += [b]\n\n self._input = tf.placeholder(tf.float32, shape=[None, self._image_size, self._image_size, 3])\n self._target = tf.placeholder(tf.int32, shape=None)\n self._drop_rate = tf.placeholder(tf.float32, name='drop-out')\n\n x = self._input\n\n # 1- convolution, local response normalization, and max-pooling\n x = self._add_convolution_layer(x, self._nn_weights[0], self._nn_biases[0], [1, 4, 4, 1], 'SAME')\n x = tf.nn.local_response_normalization(x, depth_radius=2, alpha=2e-5, beta=0.75, bias=1.0)\n x = self._add_max_pooling(x)\n\n # 2- splitted convolution, local response normalization, and max-pooling\n x = self._add_splitted_convolutional_layer(x, self._nn_weights[1], self._nn_biases[1], [1, 1, 1, 1], 'SAME')\n x = tf.nn.local_response_normalization(x, depth_radius=2, alpha=2e-5, beta=0.75, bias=1.0)\n x = self._add_max_pooling(x)\n\n # 3- only a convolution\n x = self._add_convolution_layer(x, self._nn_weights[2], self._nn_biases[2], [1, 1, 1, 1], 'SAME')\n\n # 4- splitted convolution\n x = self._add_splitted_convolutional_layer(x, self._nn_weights[3], self._nn_biases[3], [1, 1, 1, 1], 'SAME')\n\n # 5- splitted convolutional layer, max-pooling\n x = self._add_splitted_convolutional_layer(x, self._nn_weights[4], self._nn_biases[4], [1, 1, 1, 1], 'SAME')\n x = self._add_max_pooling(x)\n\n # 6- fully connected layer, relu\n x = tf.reshape(x, [-1, initial_weights[5].shape[0]])\n x = tf.nn.dropout(x, rate=self._drop_rate)\n x = self._add_fully_connected_layer(x, self._nn_weights[5], self._nn_biases[5], func='relu')\n\n # 7- another fully connected layer, relu\n x = tf.nn.dropout(x, rate=self._drop_rate)\n x = self._add_fully_connected_layer(x, self._nn_weights[6], self._nn_biases[6], func='relu')\n\n # 8- output fully connected layer, softmax\n x = tf.nn.dropout(x, rate=self._drop_rate)\n self._output = self._add_fully_connected_layer(x, self._nn_weights[7], self._nn_biases[7], func='softmax')\n\n\n # loss function\n self._loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits_v2(labels=self._target, logits=self._logit))\n\n # accuracy of the model\n matches = tf.nn.in_top_k(predictions=self._output, targets=tf.argmax(self._target, 1), k=5)\n self._accuracy_top5 = tf.reduce_mean(tf.cast(matches, tf.float32))\n\n matches = tf.equal(tf.argmax(self._target, 1), tf.argmax(self._logit, 1))\n self._accuracy = tf.reduce_mean(tf.cast(matches, tf.float32))\n\n # =========================================================================\n # add regulizers to the loss function\n def _add_l1regulizer(self, w):\n num_layers = len(self._nn_weights)\n\n if type(w) is float:\n w = [w] * num_layers\n\n assert len(w) == num_layers, 'Not enough weights for the regularizer.'\n\n l1_loss = tf.add_n([(s * tf.norm(v, ord=1)) for (v, s) in zip(self._nn_weights, w)])\n self._loss += l1_loss\n\n def _add_l2regulizer(self, w):\n num_layers = len(self._nn_weights)\n\n if type(w) is float:\n w = [w] * num_layers\n\n assert len(w) == num_layers, 'Not enough weights for the regularizer.'\n\n l2_loss = tf.add_n([(s * tf.nn.l2_loss(v)) for (v, s) in zip(self._nn_weights, w)])\n self._loss += l2_loss\n\n # =========================================================================\n # define optimizer of the neural network\n def _add_optimizer(self, parameters):\n alg = parameters.get('training_alg', 'GD')\n lr = parameters.get('initial_learning_rate', 0.01)\n dr = parameters.get('decay_rate', 0.95)\n ds = parameters.get('decay_step', 200)\n\n # define the learning rate\n self._global_counter = tf.Variable(0, dtype=tf.float32, name='global-counter')\n # decayed_learning_rate = learning_rate * dr ^ (train_counter // ds)\n self._learning_rate = tf.train.exponential_decay(lr, self._global_counter, ds, decay_rate=dr, staircase=True)\n\n # define the appropriate optimizer to use\n if (alg == 0) or (alg == 'GD'):\n self._optimizer = tf.train.GradientDescentOptimizer(learning_rate=self._learning_rate)\n elif (alg == 1) or (alg == 'RMSProp'):\n self._optimizer = tf.train.RMSPropOptimizer(learning_rate=self._learning_rate)\n elif (alg == 2) or (alg == 'Adam'):\n self._optimizer = tf.train.AdamOptimizer(learning_rate=self._learning_rate)\n elif (alg == 3) or (alg == 'AdaGrad'):\n self._optimizer = tf.train.AdagradOptimizer(learning_rate=self._learning_rate)\n elif (alg == 4) or (alg == 'AdaDelta'):\n self._optimizer = tf.train.AdadeltaOptimizer(learning_rate=self._learning_rate)\n else:\n raise ValueError(\"Unknown training algorithm.\")\n\n # training and initialization operators\n var_list = self._nn_weights + self._nn_biases\n self._trainOp = self._optimizer.minimize(self._loss, var_list=var_list, global_step=self._global_counter)\n\n # add backpropagation to the graph\n self._bp_signals = tf.gradients(self._loss, self._bp_signals)\n\n # =================================================================\n # computing gradients\n def _add_gradient_computations(self):\n # computing gradients\n self._grad_W = tf.gradients(self._loss, self._nn_weights)\n self._grad_b = tf.gradients(self._loss, self._nn_biases)\n\n # applying gradients to the optimizer\n self._input_gW = tuple([tf.placeholder(dtype=tf.float32, shape=w.get_shape()) for w in self._nn_weights])\n self._input_gb = tuple([tf.placeholder(dtype=tf.float32, shape=b.get_shape()) for b in self._nn_biases])\n gv = [(g, v) for g, v in zip(self._input_gW, self._nn_weights)]\n gv += [(g, v) for g, v in zip(self._input_gb, self._nn_biases)]\n\n self._apply_gradients = self._optimizer.apply_gradients(gv)\n\n # =========================================================================\n # add the operations to quantize gradients\n def _add_quantizer(self, parameters):\n pass\n\n # =========================================================================\n # compute the accuracy of the NN using the given inputs\n def accuracy(self, x, y):\n return self._sess.run(self._accuracy, feed_dict={self._input: x, self._target: y, self._drop_rate: 0.0})\n\n # =========================================================================\n # One iteration of the training algorithm\n def train(self, x, y, drop_rate=0):\n assert self._trainOp is not None, 'Training algorithm has not been set.'\n\n self._sess.run(self._trainOp, feed_dict={self._input: x, self._target: y, self._drop_rate: drop_rate})\n\n # =========================================================================\n # get or set neural network's weights\n def get_weights(self):\n return self._sess.run([self._nn_weights, self._nn_biases])\n\n def learning_rate(self):\n return self._sess.run(self._learning_rate)\n\n # =========================================================================\n # Compute the gradients of the parameters of the NN for the given input\n def compute_gradients(self, x, y, drop_rate=0.0):\n assert self._grad_W is not None, 'The operators to compute the gradients have not been defined.'\n\n return self._sess.run(\n [self._grad_W, self._grad_b], feed_dict={\n self._input: x,\n self._target: y,\n self._drop_rate: drop_rate\n }\n )\n\n # =========================================================================\n # Apply the gradients externally computed to the optimizer\n def apply_gradients(self, gw, gb):\n assert self._apply_gradients is not None, 'The operators to apply the gradients have not been defined.'\n\n feed_dict = {self._input_gW: gw, self._input_gb: gb}\n self._sess.run(self._apply_gradients, feed_dict=feed_dict)\n\n # =========================================================================\n # get forward and backward signals\n # =========================================================================\n # get forward and backward signals\n def get_fw_bp_signals(self, x, target, drop_rate=0):\n if (self._sess is None) or (self._fw_signals is None) or (self._bp_signals is None):\n raise ValueError('The model has not been fully created and initialized.')\n\n return self._sess.run(\n [self._fw_signals, self._bp_signals],\n feed_dict={\n self._input: x,\n self._target: target,\n self._drop_rate: drop_rate\n }\n )\n\n # =========================================================================\n # get gradients and signals\n def get_gradients_signals(self, x, target, drop_rate=0):\n if (self._sess is None) or (self._grad_W is None) or (self._fw_signals is None) or (self._bp_signals is None):\n raise ValueError('The model has not been fully created and initialized.')\n\n return self._sess.run(\n [self._grad_W, self._grad_b, self._fw_signals, self._bp_signals],\n feed_dict={\n self._input: x,\n self._target: target,\n self._drop_rate: drop_rate\n }\n )\n", "id": "10458416", "language": "Python", "matching_score": 6.426660537719727, "max_stars_count": 3, "path": "models/Alexnet.py" }, { "content": "\"\"\"\n Base class to simulate a network of multiple workers.\n input parameter to create the neural network may have the following fields:\n initial_w, initial_b: initial weights and biases of the neural network,\n if not provided the child class will generate them randomly based on its sructure\n l1_regularizer:\n l2_regularizer:\n training_alg:\n learning_rate:\n decay_rate:\n decay_step:\n compute_gradients:\n assign_operator:\n\"\"\"\n\nimport tensorflow as tf\nimport quantizers.tf_implementation.cs_quantizer as tf_csq\nimport quantizers.tf_implementation.qsg_quantizer as tf_qsg\nimport quantizers.tf_implementation.dithered_quantizer as tf_dq\nimport quantizers.tf_implementation.dithered_transform_quantizer as tf_dtq\nimport quantizers.tf_implementation.onebit_quantizer as tf_obq\nimport quantizers.tf_implementation.topK_sgd as tf_topK\n\n_DEFAULT_SEED = 94635\n\n\nclass DistributedBaseModel:\n def __init__(self):\n # parameters of the workers\n self._number_workers = 0\n\n # parameters of the neural network\n self._sess = None\n self._initializer = None\n self._accuracy = None\n\n self._optimizer = None\n self._trainOp = None\n self._global_step = None\n self._learning_rate = 0.01\n self._loss = None\n\n # input, output of the model\n self._drop_rate = None\n self._input = None\n self._output = None\n self._logit = None\n self._target = None\n\n # parameters of the neural network\n self._num_layers = 0\n self._nn_weights = []\n self._nn_biases = []\n\n # gradients\n self._gW = None\n self._gb = None\n\n # reconstructed quantized gradients\n self._gWh = None\n self._gbh = None\n\n # to apply externally computed gradients\n self._input_gW = None\n self._input_gb = None\n self._apply_gradients = None\n\n # _________________________________________________________________________\n # build the neural network\n def create_network(self, parameters: dict):\n self._number_workers = parameters.get('num workers', 1) # number of workers\n seed = parameters.get('seed', _DEFAULT_SEED) # graph level seed\n\n if parameters.get('initial_w') is None:\n initial_weights, initial_biases = self._generate_random_parameters(parameters)\n else:\n initial_weights = parameters.get('initial_w')\n initial_biases = parameters.get('initial_b')\n\n self._num_layers = len(initial_weights)\n\n graph = tf.Graph()\n with graph.as_default():\n # set graph level random number seed\n tf.set_random_seed(seed)\n\n # 1- create the neural network with the given/random initial weights/biases\n self._create_initialized_network(initial_weights, initial_biases)\n\n # 2- if required, add regularizer to the loss function\n l1 = parameters.get('l1_regularizer')\n if l1 is not None:\n self._add_l1regulizer(w=l1)\n\n l2 = parameters.get('l2_regularizer')\n if l2 is not None:\n self._add_l2regulizer(w=l2)\n\n # 3- if requried, add the training algorithm\n alg = parameters.get('training_alg')\n if alg is not None:\n self._add_optimizer(parameters)\n\n # 4- compute gradients? only if optimizer is defined\n if parameters.get('compute_gradients', False):\n self._add_gradient_computations(parameters)\n\n initializer = tf.global_variables_initializer()\n\n self._sess = tf.Session(graph=graph)\n self._sess.run(initializer)\n\n # _________________________________________________________________________\n # create neural network with random initial parameters\n def _generate_random_parameters(self, parameters):\n pass\n\n # create a fully connected neural network with given initial parameters\n def _create_initialized_network(self, initial_weights, initial_biases):\n pass\n\n # _________________________________________________________________________\n # add regulizer to the loss function\n def _add_l1regulizer(self, w):\n if type(w) is float:\n w = [w] * self._num_layers\n\n assert len(w) == self._num_layers, 'Not enough weights for the regularizer.'\n\n l1_loss = tf.add_n([(s * tf.norm(v, ord=1)) for (v, s) in zip(self._nn_weights, w)])\n self._loss += l1_loss\n\n def _add_l2regulizer(self, w):\n if type(w) is float:\n w = [w] * self._num_layers\n\n assert len(w) == self._num_layers, 'Not enough weights for the regularizer.'\n\n l2_loss = tf.add_n([(s * tf.nn.l2_loss(v)) for (v, s) in zip(self._nn_weights, w)])\n self._loss += l2_loss\n\n # _________________________________________________________________________\n # define optimizer of the neural network\n def _add_optimizer(self, parameters):\n alg = parameters.get('training_alg', 'GD')\n lr = parameters.get('learning_rate', 0.01)\n dr = parameters.get('decay_rate', 0.95)\n ds = parameters.get('decay_step', 200)\n\n # define the learning rate\n self._global_step = tf.Variable(0, dtype=tf.float32)\n # decayed_learning_rate = learning_rate * dr ^ (global_step // ds)\n self._learning_rate = tf.train.exponential_decay(lr, self._global_step, ds, decay_rate=dr, staircase=True)\n\n # define the appropriate optimizer to use\n if (alg == 0) or (alg == 'GD'):\n self._optimizer = tf.train.GradientDescentOptimizer(learning_rate=self._learning_rate)\n elif (alg == 1) or (alg == 'RMSProp'):\n self._optimizer = tf.train.RMSPropOptimizer(learning_rate=self._learning_rate)\n elif (alg == 2) or (alg == 'Adam'):\n self._optimizer = tf.train.AdamOptimizer(learning_rate=self._learning_rate)\n elif (alg == 3) or (alg == 'AdaGrad'):\n self._optimizer = tf.train.AdagradOptimizer(learning_rate=self._learning_rate)\n elif (alg == 4) or (alg == 'AdaDelta'):\n self._optimizer = tf.train.AdadeltaOptimizer(learning_rate=self._learning_rate)\n else:\n raise ValueError(\"Unknown training algorithm.\")\n\n # =================================================================\n # training and initialization operators\n var_list = self._nn_weights + self._nn_biases\n self._trainOp = self._optimizer.minimize(self._loss, var_list=var_list, global_step=self._global_step)\n\n def _add_gradient_computations(self, parameters):\n # computing gradients\n self._gW = tf.gradients(self._loss, self._nn_weights)\n self._gb = tf.gradients(self._loss, self._nn_biases)\n\n # applying gradients to the optimizer\n self._input_gW = tuple([tf.placeholder(dtype=tf.float32, shape=w.get_shape()) for w in self._nn_weights])\n self._input_gb = tuple([tf.placeholder(dtype=tf.float32, shape=b.get_shape()) for b in self._nn_biases])\n gv = [(g, v) for g, v in zip(self._input_gW, self._nn_weights)]\n gv += [(g, v) for g, v in zip(self._input_gb, self._nn_biases)]\n\n self._apply_gradients = self._optimizer.apply_gradients(gv, global_step=self._global_step)\n\n if parameters.get('quantizer') is not None:\n self._add_gradient_quantizers(parameters)\n\n # _________________________________________________________________________\n # add the computations for the gradient quantizer\n def _add_gradient_quantizers(self, parameters):\n quantization_method = parameters.get('quantizer', '')\n\n # random number generation seed\n seeds = parameters.get('quantizer_seeds')\n\n self._gWh = [[0] * self._num_layers for _ in range(self._number_workers)]\n self._gbh = [[0] * self._num_layers for _ in range(self._number_workers)]\n\n # operators to update the quantization reside (if necessary)\n self._updateRw = [[tf.no_op()] * self._num_layers for _ in range(self._number_workers)]\n self._updateRb = [[tf.no_op()] * self._num_layers for _ in range(self._number_workers)]\n\n if quantization_method == '':\n for nw in range(self._number_workers):\n self._gWh[nw], self._gbh[nw] = self._gW, self._gb\n\n # add operations for quantization and reconstruction of gradients\n elif quantization_method == 'one-bit':\n for nw in range(self._number_workers):\n for layer in range(self._num_layers):\n _, _, _, gwh, urw = tf_obq.quantize(self._gW[layer])\n _, _, _, gbh, urb = tf_obq.quantize(self._gb[layer])\n\n self._gWh[nw][layer], self._gbh[nw][layer] = gwh, gbh\n self._updateRw[nw][layer], self._updateRb[nw][layer] = urw, urb\n\n elif quantization_method == 'dithered':\n bucket_sizes = parameters.get('bucket_sizes')\n num_levels = parameters.get('num_levels')\n\n for nw in range(self._number_workers):\n for layer in range(self._num_layers):\n _, _, gwh = tf_dq.quantize(\n self._gW[layer], num_levels, bucket_sizes[layer][0], seeds[nw][2 * layer]\n )\n _, _, gbh = tf_dq.quantize(\n self._gb[layer], num_levels, bucket_sizes[layer][1], seeds[nw][2 * layer + 1]\n )\n\n self._gWh[nw][layer], self._gbh[nw][layer] = gwh, gbh\n\n elif quantization_method == 'dithered-transform':\n num_levels = parameters.get('num_levels')\n H_matrices = parameters.get('H')\n\n for nw in range(self._number_workers):\n for layer in range(self._num_layers):\n _, _, gwh = tf_dtq.quantize(self._gW[layer], H_matrices[layer][0], num_levels, seeds[nw][2 * layer])\n _, _, gbh = tf_dtq.quantize(\n self._gb[layer], H_matrices[layer][1], num_levels, seeds[nw][2 * layer + 1]\n )\n\n self._gWh[nw][layer], self._gbh[nw][layer] = gwh, gbh\n\n elif quantization_method == 'qsg':\n bucket_sizes = parameters.get('bucket_sizes')\n num_levels = parameters.get('num_levels')\n\n for nw in range(self._number_workers):\n for layer in range(self._num_layers):\n _, _, gwh = tf_qsg.quantize(self._gW[layer], num_levels, bucket_sizes[layer][0])\n _, _, gbh = tf_qsg.quantize(self._gb[layer], num_levels, bucket_sizes[layer][1])\n\n self._gWh[nw][layer], self._gbh[nw][layer] = gwh, gbh\n\n elif quantization_method == 'quantized-cs':\n num_levels = parameters.get('num_levels')\n T_matrices = parameters.get('H')\n err_feedback = parameters.get('error_feedback', False)\n beta = parameters.get('feedback_weight', 0)\n\n for nw in range(self._number_workers):\n for layer in range(self._num_layers):\n _, _, gwh, urw, _ = tf_csq.quantize(\n self._gW[layer], T_matrices[layer][0], num_levels, seeds[nw][2 * layer], err_feedback, beta\n )\n _, _, gbh, urb, _ = tf_csq.quantize(\n self._gb[layer], T_matrices[layer][1], num_levels, seeds[nw][2 * layer + 1], err_feedback, beta\n )\n\n self._gWh[nw][layer], self._gbh[nw][layer] = gwh, gbh\n\n elif quantization_method == 'topk':\n K = parameters.get('K', 1)\n for nw in range(self._number_workers):\n for layer in range(self._num_layers):\n gwh, urw, _ = tf_topK.topk_sgd(self._gW[layer], K[layer][0])\n gbh, urb, _ = tf_topK.topk_sgd(self._gb[layer], K[layer][1])\n\n self._gWh[nw][layer], self._gbh[nw][layer] = gwh, gbh\n\n else:\n raise ValueError('Unknown quantization method.')\n\n # _________________________________________________________________________\n # compute the accuracy of the NN using the given inputs\n def accuracy(self, x, y):\n return self._sess.run(self._accuracy, feed_dict={self._input: x, self._target: y, self._drop_rate: 0.})\n\n # _________________________________________________________________________\n # compute the output of the NN to the given inputs\n def output(self, x):\n return self._sess.run(self._output, feed_dict={self._input: x, self._drop_rate: 0.})\n\n # _________________________________________________________________________\n # One iteration of the training algorithm with input data\n def train(self, x, y, drop_rate=0):\n assert self._trainOp is not None, 'Training algorithm has not been set.'\n\n self._sess.run(self._trainOp, feed_dict={self._input: x, self._target: y, self._drop_rate: drop_rate})\n\n def get_weights(self):\n return self._sess.run([self._nn_weights, self._nn_biases])\n\n def learning_rate(self):\n return self._sess.run(self._learning_rate)\n\n # _________________________________________________________________________\n # Compute the gradients of the parameters of the NN for the given input\n def get_gradients(self, x, y, drop_rate=0.):\n assert self._gW is not None, 'The operators to compute the gradients have not been defined.'\n\n return self._sess.run(\n [self._gW, self._gb], feed_dict={\n self._input: x,\n self._target: y,\n self._drop_rate: drop_rate\n }\n )\n\n # _________________________________________________________________________\n # Quantize the gradients of the parameters of the NN for the given input\n def quantized_gradients(self, x, y, drop_rate=0., worker_idx=0):\n assert self._gWh is not None, 'The operators to quantize the gradients have not been defined.'\n\n return self._sess.run(\n [self._gWh[worker_idx], self._gbh[worker_idx], self._updateRw[worker_idx], self._updateRb[worker_idx]],\n feed_dict={\n self._input: x,\n self._target: y,\n self._drop_rate: drop_rate\n }\n )\n\n # _________________________________________________________________________\n # Apply the gradients externally computed to the optimizer\n def apply_gradients(self, gw, gb):\n assert self._apply_gradients is not None, 'The operators to apply the gradients have not been defined.'\n\n feed_dict = {self._input_gW: gw, self._input_gb: gb}\n self._sess.run(self._apply_gradients, feed_dict=feed_dict)\n\n # _________________________________________________________________________\n @property\n def number_layers(self):\n return self._num_layers\n", "id": "9780408", "language": "Python", "matching_score": 4.376457214355469, "max_stars_count": 0, "path": "QuantizerTest/models/DistributedBaseModel.py" }, { "content": "'''\n Defines a simple linear regression model to analyze and compare convergence rates\n'''\n\nimport numpy as np\nimport scipy.linalg as sla\nimport scipy.stats as st\nimport tensorflow.compat.v1 as tf # pylint: disable=import-error\nimport quantizers.onebit_quantizer as obq\nimport quantizers.qsg_quantizer as qsg\nimport quantizers.cs_quantizer as csq\nimport quantizers.topK_sgd as topkq\nimport quantizers.dithered_transform_quantizer as dtq\n\n\ndef create_transformation(M, min_eig=1, max_eig=4):\n \"\"\"\n create transformation to generate correlated Gaussain random vector\n \"\"\"\n A = st.ortho_group.rvs(dim=M)\n S = np.random.random(size=M)\n S = min_eig + (max_eig - min_eig) * (S - np.min(S)) / (np.max(S) - np.min(S))\n S = np.diag(np.sqrt(S))\n T = np.matmul(S, A) # transformation to generate correlated Gaussian random vector\n R = np.matmul(T.T, T) # correltation matrix of input data\n\n return T, R\n\n\nclass RegressionModel:\n def __init__(self):\n self._Wshape = None\n self._batch_size = None\n self._learning_rate = None\n\n self._W, self._loss = None, None # parameter and loss function\n self._gW, self._apply_gradients = None, None # gradient of the parameter and update rule\n self._gWh, self._updateR, self._resetR = None, None, None # quantization and residue update\n\n self._reset_op = None # reset the parameters of the model to re-run\n\n self._sess = None\n\n def create(self, T, Wopt, quantizer='', **kwargs):\n self._Wshape = Wopt.shape\n\n # create the model\n self._create_regressor(T, Wopt)\n\n # define the training operations\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=self._learning_rate)\n self._trainOp = optimizer.minimize(self._loss)\n\n # add gradient quantizer\n self._add_gradient(quantizer, **kwargs)\n\n # applying input gradients to the optimizer\n self._input_gW = (tf.placeholder(dtype=tf.float32, shape=self._Wshape), )\n gv = [(self._input_gW[0], self._W)]\n self._apply_gradients = optimizer.apply_gradients(gv)\n\n self._reset_op = [tf.assign(self._W, np.zeros(self._Wshape)), self._resetR]\n\n self._sess = tf.Session()\n self._sess.run(tf.global_variables_initializer())\n\n def _create_regressor(self, T, Wopt):\n M, N = Wopt.shape\n\n self._batch_size = tf.placeholder(dtype=tf.int32)\n self._learning_rate = tf.placeholder(dtype=tf.float32)\n\n # define the linear model to fit data\n _Wopt = tf.constant(Wopt, dtype=tf.float32)\n _T = tf.constant(T, dtype=tf.float32)\n\n x = tf.random.normal(shape=(self._batch_size, M))\n x = tf.matmul(x, _T)\n yopt = tf.matmul(x, _Wopt)\n\n self._W = tf.Variable(np.zeros((M, N)), dtype=tf.float32)\n y = tf.matmul(x, self._W)\n self._loss = tf.nn.l2_loss(y - yopt) / tf.cast(self._batch_size, dtype=tf.float32)\n\n def _add_gradient(self, quantizer='', **kwargs):\n self._gW = tf.gradients(self._loss, self._W)\n self._updateR = tf.no_op()\n self._resetR = tf.no_op()\n\n feedback = kwargs.get('feedback', False)\n\n if quantizer == '':\n self._gWh = self._gW\n\n elif quantizer == 'one-bit':\n _, _, _, self._gWh, self._updateR, residue = obq.quantize(self._gW[0])\n self._resetR = tf.assign(residue, np.zeros(self._Wshape))\n\n elif quantizer == 'qsg':\n num_levels = kwargs.get('num_levels', 1)\n bucket_size = kwargs.get('bucket_size')\n _, _, self._gWh = qsg.quantize(self._gW[0], num_levels, bucket_size)\n\n elif quantizer == 'qcs':\n num_levels = kwargs.get('num_levels', 1)\n H = kwargs.get('H')\n seed = kwargs.get('seed', 73516)\n beta = kwargs.get('beta', 0)\n _, _, self._gWh, self._updateR, residue = csq.quantize(self._gW[0], H, num_levels, seed, feedback, beta)\n\n elif quantizer == 'topk':\n feedback = True\n K = kwargs.get('K')\n self._gWh, self._updateR, residue = topkq.topk_sgd(self._gW[0], K)\n\n elif quantizer == 'dtq':\n num_levels = kwargs.get('num_levels', 1)\n H = kwargs.get('H')\n seed = kwargs.get('seed', 73516)\n _, _, self._gWh = dtq.quantize(self._gW[0], H, num_levels, seed)\n\n if feedback:\n self._resetR = tf.assign(residue, tf.zeros_like(residue))\n\n # __________________________________________________________________________\n # reset the model\n def reset(self):\n self._sess.run(self._reset_op)\n\n def train(self, batch_size, learning_rate):\n self._sess.run(self._trainOp, feed_dict={self._batch_size: batch_size, self._learning_rate: learning_rate})\n\n def compute_gradients(self, batch_size):\n return self._sess.run(self._gW, feed_dict={self._batch_size: batch_size})\n\n def compute_quantized_gradients(self, batch_size):\n Wh, _ = self._sess.run([self._gWh, self._updateR], feed_dict={self._batch_size: batch_size})\n return Wh\n\n def apply_gradients(self, gW, learning_rate):\n self._sess.run(self._apply_gradients, feed_dict={self._input_gW: gW, self._learning_rate: learning_rate})\n\n def loss(self, batch_size):\n return self._sess.run(self._loss, feed_dict={self._batch_size: batch_size})\n\n @property\n def W(self):\n return self._sess.run(self._W)\n", "id": "3443190", "language": "Python", "matching_score": 4.264200210571289, "max_stars_count": 0, "path": "SimpleMSE/tf/regression_model.py" }, { "content": "'''\n Defines a simple linear regression model to analyze and compare convergence rates\n'''\n\nimport numpy as np\nimport scipy.linalg as sla\nimport scipy.stats as st\n\n\ndef create_transformation(M, min_eig=1, max_eig=4):\n \"\"\"\n create transformation to generate correlated Gaussain random vector\n \"\"\"\n A = st.ortho_group.rvs(dim=M)\n S = np.random.random(size=M)\n S = min_eig + (max_eig - min_eig) * (S - np.min(S)) / (np.max(S) - np.min(S))\n S = np.diag(np.sqrt(S))\n T = np.matmul(S, A) # transformation to generate correlated Gaussian random vector\n R = np.matmul(T.T, T) # correltation matrix of input data\n\n return T, R\n\n\nclass RegressionModel:\n def __init__(self, T, Wopt):\n self._Wshape = Wopt.shape\n self._T = T\n self._Wopt = Wopt\n self._W = np.zeros_like(self._Wopt)\n\n def reset(self):\n self._W = np.zeros_like(self._Wopt)\n\n def _create_data_samples(self, batch_size):\n x = np.random.normal(0, 1, size=(batch_size, self._Wopt.shape[0]))\n x = np.matmul(x, self._T)\n y = np.matmul(x, self._Wopt)\n\n return x, y\n\n def loss(self, batch_size):\n x, yd = self._create_data_samples(batch_size)\n y = np.matmul(x, self._W)\n e = yd - y\n\n return np.sum(e**2) / batch_size / 2\n\n def gradient(self, batch_size):\n x, yd = self._create_data_samples(batch_size)\n y = np.matmul(x, self._W)\n g = np.matmul(x.T, y - yd) / batch_size\n\n return g\n\n def update(self, g, learning_rate):\n self._W -= g * learning_rate\n\n @property\n def W(self):\n return self._W\n", "id": "846691", "language": "Python", "matching_score": 1.267799735069275, "max_stars_count": 0, "path": "SimpleMSE/np/regression_model.py" }, { "content": "import os\nimport time\nimport numpy as np\nimport scipy.io as sio\nimport regression_model as rm\nfrom hadamard import load_hadamard_matrix\nimport quantizers.onebit_quantizer as obq\nimport quantizers.qsg_quantizer as qsg\nimport quantizers.cs_quantizer as csq\nimport quantizers.topK_sgd as topkq\n\noutput_folder = 'QuantizedCS/SimpleMSE/new'\nnp.set_printoptions(precision=3, linewidth=80)\nbatch_size = 32\nnum_lr = 100\nrepeat_num = 10\nnum_iterations = 1000\nlearning_rates = np.linspace(0, 0.25, num_lr + 1)[1:]\n\n\ndef evaluate_baseline(T, Wopt, file_name):\n model = rm.RegressionModel(T, Wopt)\n\n loss = np.zeros((num_lr, repeat_num, num_iterations))\n loss2 = np.zeros((num_lr, repeat_num, num_iterations))\n for n, lr in enumerate(learning_rates):\n start = time.time()\n print('\\nLearning rate = ', lr, flush=True)\n for rp in range(repeat_num):\n # create model\n model.reset()\n\n info_str = ' '\n for cnt in range(num_iterations):\n # no quantization of the gradients\n g = model.gradient(batch_size)\n model.update(g, learning_rate=lr)\n\n cur_loss = model.loss(batch_size=1024)\n loss[n, rp, cnt] += cur_loss\n loss2[n, rp, cnt] += (cur_loss**2)\n if cnt % 10 == 0:\n print(' ' * len(info_str), end='\\r', flush=True)\n\n if (not np.isfinite(cur_loss)) or (cur_loss > 1e10):\n print(' Diverged.', end='\\r', flush=True)\n break\n\n info_str = ' exp: {0: 2d}, iteration: {1: 4d}, loss={2:.5f}'.format(rp, cnt, cur_loss)\n print(info_str, end='\\r', flush=True)\n\n print('')\n\n elapsed = time.time() - start\n print(' elapsed time = %.3f' % elapsed, flush=True)\n\n sio.savemat(file_name, mdict={\n 'loss': loss,\n 'loss2': loss2,\n 'lr': learning_rates,\n })\n\n\ndef evaluate_onebit(T, Wopt, file_name):\n model = rm.RegressionModel(T, Wopt)\n quantizer = obq.onebit_quantizer()\n\n loss = np.zeros((num_lr, repeat_num, num_iterations))\n loss2 = np.zeros((num_lr, repeat_num, num_iterations))\n for n, lr in enumerate(learning_rates):\n start = time.time()\n print('\\nLearning rate = ', lr, flush=True)\n for rp in range(repeat_num):\n # create model\n model.reset()\n quantizer.reset()\n\n info_str = ' '\n for cnt in range(num_iterations):\n # apply one-bit quantization method to the gradients\n g = model.gradient(batch_size)\n gh = quantizer.quantize(g)\n model.update(gh, learning_rate=lr)\n\n cur_loss = model.loss(batch_size=1024)\n loss[n, rp, cnt] += cur_loss\n loss2[n, rp, cnt] += (cur_loss**2)\n if cnt % 10 == 0:\n print(' ' * len(info_str), end='\\r', flush=True)\n\n if (not np.isfinite(cur_loss)) or (cur_loss > 1e10):\n print(' Diverged.', end='\\r', flush=True)\n break\n\n info_str = ' exp: {0: 2d}, iteration: {1: 4d}, loss={2:.5f}'.format(rp, cnt, cur_loss)\n print(info_str, end='\\r', flush=True)\n\n print('')\n\n elapsed = time.time() - start\n print(' elapsed time = %.3f' % elapsed, flush=True)\n\n sio.savemat(file_name, mdict={\n 'loss': loss,\n 'loss2': loss2,\n 'lr': learning_rates,\n })\n\n\ndef evaluate_qsgd(T, Wopt, file_name, bucket_size, num_levels):\n # create model\n model = rm.RegressionModel(T, Wopt)\n quantizer = qsg.qsg_quantizer(bucket_size, num_levels)\n\n loss = np.zeros((num_lr, repeat_num, num_iterations))\n loss2 = np.zeros((num_lr, repeat_num, num_iterations))\n for n, lr in enumerate(learning_rates):\n start = time.time()\n print('\\nLearning rate = ', lr, flush=True)\n for rp in range(repeat_num):\n model.reset()\n quantizer.reset()\n\n info_str = ' '\n for cnt in range(num_iterations):\n # apply qsgd quantization method to the gradients\n g = model.gradient(batch_size)\n gh = quantizer.quantize(g)\n model.update(gh, learning_rate=lr)\n\n cur_loss = model.loss(batch_size=1024)\n loss[n, rp, cnt] += cur_loss\n loss2[n, rp, cnt] += (cur_loss**2)\n if cnt % 10 == 0:\n print(' ' * len(info_str), end='\\r', flush=True)\n\n if (not np.isfinite(cur_loss)) or (cur_loss > 1e10):\n print('Diverged.', end='\\r', flush=True)\n break\n\n info_str = ' exp: {0: 2d}, iteration: {1: 4d}, loss={2:.5f}'.format(rp, cnt, cur_loss)\n print(info_str, end='\\r', flush=True)\n\n print('')\n\n elapsed = time.time() - start\n print(' elapsed time = %.3f' % elapsed, flush=True)\n\n sio.savemat(file_name, mdict={\n 'loss': loss,\n 'loss2': loss2,\n 'lr': learning_rates,\n })\n\n\ndef evaluate_topksgd(T, Wopt, file_name, K):\n model = rm.RegressionModel(T, Wopt)\n quantizer = topkq.topk_quantizer(K)\n\n loss = np.zeros((num_lr, repeat_num, num_iterations))\n loss2 = np.zeros((num_lr, repeat_num, num_iterations))\n for n, lr in enumerate(learning_rates):\n start = time.time()\n print('\\nLearning rate = ', lr, flush=True)\n for rp in range(repeat_num):\n # create model\n model.reset()\n quantizer.reset()\n\n info_str = ' '\n for cnt in range(num_iterations):\n # apply qsgd quantization method to the gradients\n g = model.gradient(batch_size)\n gh = quantizer.quantize(g)\n model.update(gh, learning_rate=lr)\n\n cur_loss = model.loss(batch_size=1024)\n loss[n, rp, cnt] += cur_loss\n loss2[n, rp, cnt] += (cur_loss**2)\n if cnt % 10 == 0:\n print(' ' * len(info_str), end='\\r', flush=True)\n\n if (not np.isfinite(cur_loss)) or (cur_loss > 1e10):\n print('Diverged.', end='\\r', flush=True)\n break\n\n info_str = ' exp: {0: 2d}, iteration: {1: 4d}, loss={2:.5f}'.format(rp, cnt, cur_loss)\n print(info_str, end='\\r', flush=True)\n\n print('')\n\n elapsed = time.time() - start\n print(' elapsed time = %.3f' % elapsed, flush=True)\n\n sio.savemat(file_name, mdict={\n 'loss': loss,\n 'loss2': loss2,\n 'lr': learning_rates,\n })\n\n\ndef evaluate_qcssgd(T, Wopt, file_name, H, num_levels, feedback, beta):\n model = rm.RegressionModel(T, Wopt)\n quantizer = csq.cs_quantizer(H, num_levels, feedback, beta)\n\n loss = np.zeros((num_lr, repeat_num, num_iterations))\n loss2 = np.zeros((num_lr, repeat_num, num_iterations))\n for n, lr in enumerate(learning_rates):\n start = time.time()\n print('\\nLearning rate = ', lr, flush=True)\n for rp in range(repeat_num):\n model.reset()\n quantizer.reset()\n\n info_str = ' '\n for cnt in range(num_iterations):\n # apply qsgd quantization method to the gradients\n g = model.gradient(batch_size)\n gh = quantizer.quantize(g)\n model.update(gh, learning_rate=lr)\n\n cur_loss = model.loss(batch_size=1024)\n loss[n, rp, cnt] += cur_loss\n loss2[n, rp, cnt] += (cur_loss**2)\n if cnt % 10 == 0:\n print(' ' * len(info_str), end='\\r', flush=True)\n\n if (not np.isfinite(cur_loss)) or (cur_loss > 1e10):\n print('Diverged.', end='\\r', flush=True)\n break\n\n info_str = ' exp: {0: 2d}, iteration: {1: 4d}, loss={2:.5f}'.format(rp, cnt, cur_loss)\n print(info_str, end='\\r', flush=True)\n\n print('')\n\n elapsed = time.time() - start\n print(' elapsed time = %.3f' % elapsed, flush=True)\n\n sio.savemat(file_name, mdict={\n 'loss': loss,\n 'loss2': loss2,\n 'lr': learning_rates,\n })\n\n\ndef compare_algorithms():\n M, N = 64, 32\n min_eig = 1\n max_eig = 4\n\n fname = os.path.join(output_folder, 'model.mat')\n if os.path.exists(fname):\n data = sio.loadmat(fname)\n Wopt = data['Wo']\n T = data['T']\n R = data['R']\n else:\n T, R = rm.create_transformation(M, min_eig, max_eig)\n Wopt = np.random.normal(0, 1, size=(M, N))\n\n sio.savemat(fname, mdict={'Wo': Wopt, 'T': T, 'R': R})\n\n print('_' * 40)\n print('Evaluating baseline...')\n fname = os.path.join(output_folder, 'baseline.mat')\n evaluate_baseline(T, Wopt, file_name=fname)\n\n print('_' * 40)\n print('Evaluating 1-bit...')\n fname = os.path.join(output_folder, 'one_bit.mat')\n evaluate_onebit(T, Wopt, file_name=fname)\n\n print('_' * 40)\n print('Evaluating QSGD...')\n fname = os.path.join(output_folder, 'qsgd.mat')\n evaluate_qsgd(T, Wopt, file_name=fname, bucket_size=512, num_levels=1)\n\n print('_' * 40)\n print('Evaluating Top-k SGD...')\n fname = os.path.join(output_folder, 'topk20-sgd.mat')\n evaluate_topksgd(T, Wopt, file_name=fname, K=20)\n\n H = load_hadamard_matrix(n=512)\n k = 400\n Hk = H[:, -k:] * np.sqrt(512) / np.sqrt(k)\n\n print('_' * 40)\n print('Evaluating Quantized CS SGD without feedback, all H...')\n fname = os.path.join(output_folder, 'qcssgd_nfa.mat')\n evaluate_qcssgd(T, Wopt, file_name=fname, H=H, num_levels=1, feedback=False, beta=0)\n\n print('_' * 40)\n print('Evaluating Quantized CS SGD with feedback, all H...')\n fname = os.path.join(output_folder, 'qcssgd_wfa.mat')\n evaluate_qcssgd(T, Wopt, file_name=fname, H=H, num_levels=1, feedback=True, beta=0.1)\n\n print('_' * 40)\n print('Evaluating Quantized CS SGD without feedback, partial H...')\n fname = os.path.join(output_folder, 'qcssgd_nf{}_2.mat'.format(k))\n evaluate_qcssgd(T, Wopt, file_name=fname, num_levels=2, H=Hk, feedback=False, beta=0)\n\n print('_' * 40)\n print('Evaluating Quantized CS SGD with feedback, partial H...')\n fname = os.path.join(output_folder, 'qcssgd_wf{}(0.1)_2.mat'.format(k))\n evaluate_qcssgd(T, Wopt, file_name=fname, H=Hk, num_levels=2, feedback=True, beta=.1)\n\n\n# import seaborns as sns\n# ax = sns.tsplot(time=\"timepoint\", value=\"BOLD signal\",\n# unit=\"subject\", condition=\"ROI\",\n# data=...)\n\nif __name__ == '__main__':\n print('Running numpy version...')\n compare_algorithms()\n", "id": "1412376", "language": "Python", "matching_score": 6.317688941955566, "max_stars_count": 0, "path": "SimpleMSE/np/convergence_region.py" }, { "content": "import os\nimport time\nimport numpy as np\nimport scipy.io as sio\nimport regression_model as rm\nfrom hadamard import load_hadamard_matrix\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n# os.environ['CUDA_VISIBLE_DEVICES'] = '2'\n\noutput_folder = 'QuantizedCS/SimpleMSE/tf(64,50)'\nnp.set_printoptions(precision=3, linewidth=80)\nbatch_size = 32\nrepeat_num = 10\nnum_iterations = 500\nnum_lr = 25\nlearning_rates = np.linspace(0, 0.25, num_lr + 1)[1:]\n# learning_rates = [0.02, 0.05, 0.06, 0.08, 0.1, 0.15]\n# num_lr = len(learning_rates)\n\n\ndef evaluate_baseline(T, Wopt, file_name):\n model = rm.RegressionModel()\n model.create(T, Wopt, quantizer='')\n\n loss = np.zeros((num_lr, repeat_num, num_iterations))\n loss2 = np.zeros((num_lr, repeat_num, num_iterations))\n for n, lr in enumerate(learning_rates):\n start = time.time()\n print('\\nLearning rate = ', lr, flush=True)\n for rp in range(repeat_num):\n # create model\n model.reset()\n\n info_str = ' '\n for cnt in range(num_iterations):\n # no quantization of the gradients\n g = model.compute_gradients(batch_size)\n model.apply_gradients(g, learning_rate=lr)\n\n cur_loss = model.loss(batch_size=1024)\n loss[n, rp, cnt] += cur_loss\n loss2[n, rp, cnt] += (cur_loss**2)\n if cnt % 10 == 0:\n print(' ' * len(info_str), end='\\r', flush=True)\n\n if (not np.isfinite(cur_loss)) or (cur_loss > 1e10):\n print(' Diverged.', end='\\r', flush=True)\n break\n\n info_str = ' exp: {0: 2d}, iteration: {1: 4d}, loss={2:.5f}'.format(rp, cnt, cur_loss)\n print(info_str, end='\\r', flush=True)\n\n print('')\n\n elapsed = time.time() - start\n print(' elapsed time = %.3f' % elapsed, flush=True)\n\n sio.savemat(file_name, mdict={\n 'loss': loss,\n 'loss2': loss2,\n 'lr': learning_rates,\n })\n\n\ndef evaluate_onebit(T, Wopt, file_name):\n model = rm.RegressionModel()\n model.create(T, Wopt, quantizer='one-bit')\n\n loss = np.zeros((num_lr, repeat_num, num_iterations))\n loss2 = np.zeros((num_lr, repeat_num, num_iterations))\n for n, lr in enumerate(learning_rates):\n start = time.time()\n print('\\nLearning rate = ', lr, flush=True)\n for rp in range(repeat_num):\n # create model\n model.reset()\n\n info_str = ' '\n for cnt in range(num_iterations):\n # apply one-bit quantization method to the gradients\n gh = model.compute_quantized_gradients(batch_size)\n model.apply_gradients([gh], learning_rate=lr)\n\n cur_loss = model.loss(batch_size=1024)\n loss[n, rp, cnt] += cur_loss\n loss2[n, rp, cnt] += (cur_loss**2)\n if cnt % 10 == 0:\n print(' ' * len(info_str), end='\\r', flush=True)\n\n if (not np.isfinite(cur_loss)) or (cur_loss > 1e10):\n print(' Diverged.', end='\\r', flush=True)\n break\n\n info_str = ' exp: {0: 2d}, iteration: {1: 4d}, loss={2:.5f}'.format(rp, cnt, cur_loss)\n print(info_str, end='\\r', flush=True)\n\n print('')\n\n elapsed = time.time() - start\n print(' elapsed time = %.3f' % elapsed, flush=True)\n\n sio.savemat(file_name, mdict={\n 'loss': loss,\n 'loss2': loss2,\n 'lr': learning_rates,\n })\n\n\ndef evaluate_qsgd(T, Wopt, file_name, bucket_size):\n # create model\n model = rm.RegressionModel()\n model.create(T, Wopt, quantizer='qsg', num_levels=1, bucket_size=bucket_size)\n\n loss = np.zeros((num_lr, repeat_num, num_iterations))\n loss2 = np.zeros((num_lr, repeat_num, num_iterations))\n for n, lr in enumerate(learning_rates):\n start = time.time()\n print('\\nLearning rate = ', lr, flush=True)\n for rp in range(repeat_num):\n model.reset()\n\n info_str = ' '\n for cnt in range(num_iterations):\n # apply qsgd quantization method to the gradients\n gh = model.compute_quantized_gradients(batch_size)\n model.apply_gradients([gh], learning_rate=lr)\n\n cur_loss = model.loss(batch_size=1024)\n loss[n, rp, cnt] += cur_loss\n loss2[n, rp, cnt] += (cur_loss**2)\n if cnt % 10 == 0:\n print(' ' * len(info_str), end='\\r', flush=True)\n\n if (not np.isfinite(cur_loss)) or (cur_loss > 1e10):\n print('Diverged.', end='\\r', flush=True)\n break\n\n info_str = ' exp: {0: 2d}, iteration: {1: 4d}, loss={2:.5f}'.format(rp, cnt, cur_loss)\n print(info_str, end='\\r', flush=True)\n\n print('')\n\n elapsed = time.time() - start\n print(' elapsed time = %.3f' % elapsed, flush=True)\n\n sio.savemat(file_name, mdict={\n 'loss': loss,\n 'loss2': loss2,\n 'lr': learning_rates,\n })\n\n\ndef evaluate_topksgd(T, Wopt, file_name, K):\n model = rm.RegressionModel()\n model.create(T, Wopt, quantizer='topk', K=K)\n\n loss = np.zeros((num_lr, repeat_num, num_iterations))\n loss2 = np.zeros((num_lr, repeat_num, num_iterations))\n for n, lr in enumerate(learning_rates):\n start = time.time()\n print('\\nLearning rate = ', lr, flush=True)\n for rp in range(repeat_num):\n # create model\n model.reset()\n\n info_str = ' '\n for cnt in range(num_iterations):\n # apply qsgd quantization method to the gradients\n gh = model.compute_quantized_gradients(batch_size)\n model.apply_gradients([gh], learning_rate=lr)\n\n cur_loss = model.loss(batch_size=1024)\n loss[n, rp, cnt] += cur_loss\n loss2[n, rp, cnt] += (cur_loss**2)\n if cnt % 10 == 0:\n print(' ' * len(info_str), end='\\r', flush=True)\n\n if (not np.isfinite(cur_loss)) or (cur_loss > 1e10):\n print('Diverged.', end='\\r', flush=True)\n break\n\n info_str = ' exp: {0: 2d}, iteration: {1: 4d}, loss={2:.5f}'.format(rp, cnt, cur_loss)\n print(info_str, end='\\r', flush=True)\n\n print('')\n\n elapsed = time.time() - start\n print(' elapsed time = %.3f' % elapsed, flush=True)\n\n sio.savemat(file_name, mdict={\n 'loss': loss,\n 'loss2': loss2,\n 'lr': learning_rates,\n })\n\n\ndef evaluate_dtqsgd(T, Wopt, file_name, H):\n model = rm.RegressionModel()\n model.create(T, Wopt, quantizer='dtq', num_levels=1, H=H)\n\n loss = np.zeros((num_lr, repeat_num, num_iterations))\n loss2 = np.zeros((num_lr, repeat_num, num_iterations))\n for n, lr in enumerate(learning_rates):\n start = time.time()\n print('\\nLearning rate = ', lr, flush=True)\n for rp in range(repeat_num):\n model.reset()\n\n info_str = ' '\n for cnt in range(num_iterations):\n # apply qsgd quantization method to the gradients\n gh = model.compute_quantized_gradients(batch_size)\n model.apply_gradients([gh], learning_rate=lr)\n\n cur_loss = model.loss(batch_size=1024)\n loss[n, rp, cnt] += cur_loss\n loss2[n, rp, cnt] += (cur_loss**2)\n if cnt % 10 == 0:\n print(' ' * len(info_str), end='\\r', flush=True)\n\n if (not np.isfinite(cur_loss)) or (cur_loss > 1e10):\n print('Diverged.', end='\\r', flush=True)\n break\n\n info_str = ' exp: {0: 2d}, iteration: {1: 4d}, loss={2:.5f}'.format(rp, cnt, cur_loss)\n print(info_str, end='\\r', flush=True)\n\n print('')\n\n elapsed = time.time() - start\n print(' elapsed time = %.3f' % elapsed, flush=True)\n\n sio.savemat(file_name, mdict={\n 'loss': loss,\n 'loss2': loss2,\n 'lr': learning_rates,\n })\n\n\ndef evaluate_qcssgd(T, Wopt, file_name, H, feedback, beta):\n model = rm.RegressionModel()\n model.create(T, Wopt, quantizer='qcs', num_levels=1, H=H, feedback=feedback, beta=beta)\n\n loss = np.zeros((num_lr, repeat_num, num_iterations))\n loss2 = np.zeros((num_lr, repeat_num, num_iterations))\n for n, lr in enumerate(learning_rates):\n start = time.time()\n print('\\nLearning rate = ', lr, flush=True)\n for rp in range(repeat_num):\n model.reset()\n\n info_str = ' '\n for cnt in range(num_iterations):\n # apply qsgd quantization method to the gradients\n gh = model.compute_quantized_gradients(batch_size)\n model.apply_gradients([gh], learning_rate=lr)\n\n cur_loss = model.loss(batch_size=1024)\n loss[n, rp, cnt] += cur_loss\n loss2[n, rp, cnt] += (cur_loss**2)\n if cnt % 10 == 0:\n print(' ' * len(info_str), end='\\r', flush=True)\n\n if (not np.isfinite(cur_loss)) or (cur_loss > 1e10):\n print('Diverged.', end='\\r', flush=True)\n break\n\n info_str = ' exp: {0: 2d}, iteration: {1: 4d}, loss={2:.5f}'.format(rp, cnt, cur_loss)\n print(info_str, end='\\r', flush=True)\n\n print('')\n\n elapsed = time.time() - start\n print(' elapsed time = %.3f' % elapsed, flush=True)\n\n sio.savemat(file_name, mdict={\n 'loss': loss,\n 'loss2': loss2,\n 'lr': learning_rates,\n })\n\n\ndef compare_algorithms():\n M, N = 50, 64\n bucket_size = 320\n min_eig = 1\n max_eig = 4\n H = load_hadamard_matrix(n=bucket_size)\n k = bucket_size * 3 // 4\n Hk = H[:, -k:] * np.sqrt(bucket_size) / np.sqrt(k)\n K = int(M * N * np.log2(3) / 32)\n\n fname = os.path.join(output_folder, 'model.mat')\n if os.path.exists(fname):\n data = sio.loadmat(fname)\n Wopt = data['Wo']\n T = data['T']\n R = data['R']\n else:\n T, R = rm.create_transformation(M, min_eig, max_eig)\n Wopt = np.random.normal(0, 1, size=(M, N))\n\n sio.savemat(fname, mdict={'Wo': Wopt, 'T': T, 'R': R})\n\n # print('_' * 40)\n # print('Evaluating baseline...')\n # fname = os.path.join(output_folder, 'baseline.mat')\n # evaluate_baseline(T, Wopt, file_name=fname)\n\n # print('_' * 40)\n # print('Evaluating QSGD...')\n # fname = os.path.join(output_folder, 'qsgd.mat')\n # evaluate_qsgd(T, Wopt, file_name=fname, bucket_size=bucket_size)\n\n # print('_' * 40)\n # print('Evaluating Top-k SGD...')\n # fname = os.path.join(output_folder, 'topk{}-sgd.mat'.format(K))\n # evaluate_topksgd(T, Wopt, file_name=fname, K=K)\n\n print('_' * 40)\n print('Evaluating Quantized CS SGD without feedback, all H...')\n fname = os.path.join(output_folder, 'qcssgd_nfa.mat')\n evaluate_qcssgd(T, Wopt, file_name=fname, H=H, feedback=False, beta=0)\n\n # print('_' * 40)\n # print('Evaluating Quantized CS SGD with feedback, all H...')\n # fname = os.path.join(output_folder, 'qcssgd_wfa.mat')\n # evaluate_qcssgd(T, Wopt, file_name=fname, H=H, feedback=True, beta=0.5)\n\n # print('_' * 40)\n # print('Evaluating Quantized CS SGD without feedback, partial H...')\n # fname = os.path.join(output_folder, 'qcssgd_nf{}.mat'.format(k))\n # evaluate_qcssgd(T, Wopt, file_name=fname, H=Hk, feedback=False, beta=0)\n\n # print('_' * 40)\n # print('Evaluating Quantized CS SGD with feedback, partial H...')\n # fname = os.path.join(output_folder, 'qcssgd_wf{}.mat'.format(k))\n # evaluate_qcssgd(T, Wopt, file_name=fname, H=Hk, feedback=True, beta=0.2)\n\n # print('_' * 40)\n # print('Evaluating Dithered Quantized Transformed SGD...')\n # fname = os.path.join(output_folder, 'dtqsgd.mat')\n # evaluate_dtqsgd(T, Wopt, file_name=fname, H=H)\n\n\n# import seaborns as sns\n# ax = sns.tsplot(time=\"timepoint\", value=\"BOLD signal\",\n# unit=\"subject\", condition=\"ROI\",\n# data=...)\n\nif __name__ == '__main__':\n compare_algorithms()\n", "id": "11777119", "language": "Python", "matching_score": 1.7709451913833618, "max_stars_count": 0, "path": "SimpleMSE/tf/convergence_region.py" }, { "content": "import os\nimport time\nimport numpy as np\nimport scipy.io as sio\nimport scipy.stats as st\nimport tensorflow as tf\nfrom models.CifarNet import CifarNetModel\nfrom datasets.cifar10_dataset import Cifar10Dataset\nfrom datasets.hadamard import load_hadamard_matrix\n\n# quantizers\nimport quantizers.cs_quantizer as np_csq\nimport quantizers.qsg_quantizer as np_qsg\nimport quantizers.dithered_transform_quantizer as np_dtq\nimport quantizers.onebit_quantizer as np_obq\nimport quantizers.topK_sgd as np_topK\nimport quantizers.atomo as np_atomo\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n# os.environ['CUDA_VISIBLE_DEVICES'] = '2'\n\ntraining_algorithm = 'GD'\nnn_settings = {\n 'initial_w': None, # initial weights\n 'initial_b': None, # initial bias\n 'training_alg': training_algorithm, # training algorithm\n 'learning_rate': 0.2, # learning rate\n 'decay_rate': 0.98, # decay rate\n 'decay_step': 500, # decay step\n 'compute_gradients': True, # compute gradients for use in distribtued training\n}\n\ndb_params = {\n 'database-dir': 'Database/CIFAR10/raw',\n 'one-hot': False,\n 'output-dimension': (24, 24),\n 'augment-training': True,\n}\n\noutput_folder = 'QuantizedCS/Quantizer/Cifarnet/mse'\nnum_evals = 20\nbatch_size = 256\nlayer_index = 2\n\ndb = Cifar10Dataset(db_settings=db_params)\ngraph = tf.Graph()\nwith graph.as_default():\n db_images, db_labels, initializer_op = db.create_dataset(['train', 'test'], batch_size, 16)\n\ndb_sess = tf.Session(graph=graph)\ndb_sess.run(initializer_op['train'])\n\n\ndef train_base_model(w0=None, b0=None):\n # training is done using batch-size=256\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n nn = CifarNetModel()\n nn.create_network(nn_settings)\n\n for _ in range(150):\n x, y = db_sess.run([db_images, db_labels])\n nn.train(x, y)\n\n w0, b0 = nn.get_weights()\n\n return w0, b0\n\n\ndef evaluate_qsg(nn, bucket_size, fname):\n w0, b0 = nn.get_weights()\n input_bits = w0[layer_index].size * 32\n Q = np.arange(1, 50)\n err = np.zeros((len(Q), num_evals))\n compression_gain = np.zeros(len(Q))\n\n for nq, q in enumerate(Q):\n quantizer = np_qsg.qsg_quantizer(bucket_size, q)\n # compute compression gain\n x, y = db_sess.run([db_images, db_labels])\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n v, s = quantizer.quantize(gw, reconstructed=False)\n compression_gain[nq] = input_bits / (32 * s.size + np.log2(2 * q + 1) * v.size)\n\n # compute error\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n gwh = quantizer.quantize(gw, reconstructed=True)\n err[nq, n] = np.linalg.norm(gwh - gw) / np.linalg.norm(gw)\n\n sio.savemat(fname, mdict={'cg': compression_gain, 'err': err, 'Q': Q})\n\n\ndef evaluate_topksg(nn, fname):\n w0, b0 = nn.get_weights()\n input_bits = w0[layer_index].size * 32\n maxK = w0[layer_index].size // 5\n K = np.arange(1, maxK, 10)\n compression_gain = np.zeros(len(K))\n err = np.zeros((len(K), num_evals))\n\n for nk, k in enumerate(K):\n quantizer = np_topK.topk_quantizer(k)\n # compute compression gain\n x, y = db_sess.run([db_images, db_labels])\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n ind, v = quantizer.quantize(gw, reconstructed=False)\n compression_gain[nk] = input_bits / (8 * (ind[0].size + ind[1].size) + 32 * v.size)\n\n # compute error\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n gwh = quantizer.quantize(gw, reconstructed=True)\n err[nk, n] = np.linalg.norm(gwh - gw) / np.linalg.norm(gw)\n\n sio.savemat(fname, mdict={'cg': compression_gain, 'err': err, 'K': K})\n\n\ndef evaluate_atomo(nn, fname):\n w0, b0 = nn.get_weights()\n input_bits = w0[layer_index].size * 32\n maxR = np.prod(w0[layer_index].shape[:-1])\n R = np.arange(1, maxR, 10)\n compression_gain = np.zeros(len(K))\n err = np.zeros((len(K), num_evals))\n\n for nk, k in enumerate(R):\n quantizer = np_atomo.atomo_quantizer(k, True)\n\n # compute compression gain\n x, y = db_sess.run([db_images, db_labels])\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n u, v, s = quantizer.quantize(gw, reconstructed=False)\n compression_gain[nk] = input_bits / (32 * (u.size + v.size + s.size))\n\n # compute error\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n gwh = quantizer.quantize(gw, reconstructed=True)\n err[nk, n] = np.linalg.norm(gwh - gw) / np.linalg.norm(gw)\n\n sio.savemat(fname, mdict={'cg': compression_gain, 'err': err, 'R': R})\n\n\ndef evaluate_dqtsg(nn, H, fname):\n w0, b0 = nn.get_weights()\n input_bits = w0[layer_index].size * 32\n Q = np.arange(1, 50)\n err = np.zeros((len(Q), num_evals))\n compression_gain = np.zeros(len(Q))\n\n for nq, q in enumerate(Q):\n quantizer = np_dtq.dt_quantizer(H, q)\n # compute compression gain\n x, y = db_sess.run([db_images, db_labels])\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n v, s = quantizer.quantize(gw, reconstructed=False)\n compression_gain[nq] = input_bits / (32 * s.size + np.log2(2 * q + 1) * v.size)\n\n # compute error\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n gwh = quantizer.quantize(gw, reconstructed=True)\n err[nq, n] = np.linalg.norm(gwh - gw) / np.linalg.norm(gw)\n\n sio.savemat(fname, mdict={'cg': compression_gain, 'err': err, 'Q': Q})\n\n\ndef evaluate_qcssg(nn, H, fname):\n w0, b0 = nn.get_weights()\n input_bits = w0[layer_index].size * 32\n maxK = H.shape[0]\n K = np.arange(1, maxK, 10)\n Q = np.arange(1, 5)\n\n compression_gain = np.zeros((len(K), len(Q)))\n err = np.zeros((len(K), len(Q), num_evals))\n for nk, k in enumerate(K):\n print(k/maxK, flush=True)\n Hk = H[:, -k:] * np.sqrt(maxK) / np.sqrt(k)\n for nq, q in enumerate(Q):\n quantizer = np_csq.cs_quantizer(H, q, False, 0)\n # compute compression gain\n x, y = db_sess.run([db_images, db_labels])\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n v, s = quantizer.quantize(gw, reconstructed=False)\n compression_gain[nk, nq] = input_bits / (32 * s.size + np.log2(2 * q + 1) * s.size)\n\n # compute error\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n gwh = quantizer.quantize(gw, reconstructed=True)\n err[nk, nq, n] = np.linalg.norm(gwh - gw) / np.linalg.norm(gw)\n\n sio.savemat(fname, mdict={'cg': compression_gain, 'err': err, 'K': K, 'Q': Q})\n\n\ndef test():\n bucket_sizes = [[320, 64], [320, 64], [384, 384], [384, 192], [192, 10]]\n layer_shapes = [[5, 5, 3, 64], [5, 5, 64, 64], [2304, 384], [384, 192], [192, 10]]\n bucket_size = bucket_sizes[layer_index][0]\n layer_shape = layer_shapes[layer_index]\n\n # load hadamard matrices\n H = load_hadamard_matrix(n=bucket_size)\n\n # load/train initial model\n model_fname = os.path.join(output_folder, 'model.npz')\n if not os.path.exists(model_fname):\n w0, b0 = train_base_model()\n np.savez(model_fname, *w0, *b0)\n else:\n data = np.load(model_fname, encoding='latin1')\n keys = np.sort(list(data.keys()))\n num_layers = len(keys) // 2\n w0 = [data[keys[k]] for k in range(num_layers)]\n b0 = [data[keys[k]] for k in range(num_layers, 2 * num_layers)]\n data.close()\n\n # create the neural network\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n nn = CifarNetModel()\n nn.create_network(nn_settings)\n\n db_sess.run(initializer_op['test'])\n x, y = db_sess.run([db_images, db_labels])\n print('Model accuracy: ', nn.accuracy(x, y))\n\n db_sess.run(initializer_op['train'])\n\n # evaluate QSG\n # fname = os.path.join(output_folder, 'qsg.mat')\n # evaluate_qsg(nn, bucket_size, fname)\n\n # evaluate dithered transformed sg\n # fname = os.path.join(output_folder, 'dqtsg.mat')\n # evaluate_dqtsg(nn, H, fname)\n\n # evaluate quantized compressive sampling\n fname = os.path.join(output_folder, 'qcssg.mat')\n evaluate_qcssg(nn, H, fname)\n\n # evaluate top-k sg\n fname = os.path.join(output_folder, 'topk.mat')\n evaluate_topksg(nn, fname)\n\n # evaluate spectral atomo\n fname = os.path.join(output_folder, 'sp_atomo.mat')\n evaluate_atomo(nn, fname)\n\n\nif __name__ == '__main__':\n test()\n", "id": "2164328", "language": "Python", "matching_score": 7.082010746002197, "max_stars_count": 0, "path": "CompressionError/compare_quantizers_cifarnet.py" }, { "content": "import os\nimport numpy as np\nimport scipy.io as sio\nimport scipy.stats as st\nfrom models.Lenet import LenetModel\nfrom datasets.mnist_dataset import MNISTDataset\nfrom datasets.hadamard import load_hadamard_matrix\n\n# quantizers\nimport quantizers.cs_quantizer as np_csq\nimport quantizers.qsg_quantizer as np_qsg\nimport quantizers.dithered_transform_quantizer as np_dtq\nimport quantizers.onebit_quantizer as np_obq\nimport quantizers.topK_sgd as np_topK\nimport quantizers.atomo as np_atomo\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n# os.environ['CUDA_VISIBLE_DEVICES'] = '2'\n\ntraining_algorithm = 'GD'\nnn_settings = {\n 'initial_w': None, # initial weights\n 'initial_b': None, # initial bias\n 'training_alg': training_algorithm, # training algorithm\n 'learning_rate': 0.1, # learning rate\n 'decay_rate': 0.98, # decay rate\n 'decay_step': 500, # decay step\n 'compute_gradients': True, # compute gradients for use in distribtued training\n}\n\nmnist_folder = 'DataBase/MNIST/raw/'\noutput_folder = 'QuantizedCS/Quantizer/Lenet/mse'\n\nmnist_db = MNISTDataset()\nmnist_db.create_dataset(mnist_folder, vector_fmt=False, one_hot=False)\n\nnum_evals = 20\nbatch_size = 256\nlayer_index = 2\n\n\ndef train_base_model(w0=None, b0=None):\n # training is done using batch-size=256\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n nn = LenetModel()\n nn.create_network(nn_settings)\n\n for _ in range(20):\n x, y = mnist_db.next_batch(batch_size)\n nn.train(x, y)\n\n w0, b0 = nn.get_weights()\n\n return w0, b0\n\n\ndef evaluate_qsg(nn, bucket_size, fname):\n w0, b0 = nn.get_weights()\n input_bits = w0[layer_index].size * 32\n Q = np.arange(1, 50)\n err = np.zeros((len(Q), num_evals))\n compression_gain = np.zeros(len(Q))\n\n for nq, q in enumerate(Q):\n quantizer = np_qsg.qsg_quantizer(bucket_size, q)\n # compute compression gain\n x, y = mnist_db.next_batch(batch_size)\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n v, s = quantizer.quantize(gw, reconstructed=False)\n compression_gain[nq] = input_bits / (32 * s.size + np.log2(2 * q + 1) * v.size)\n\n # compute error\n for n in range(num_evals):\n x, y = mnist_db.next_batch(batch_size)\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n gwh = quantizer.quantize(gw, reconstructed=True)\n err[nq, n] = np.linalg.norm(gwh - gw) / np.linalg.norm(gw)\n\n sio.savemat(fname, mdict={'cg': compression_gain, 'err': err, 'Q': Q})\n\n\ndef evaluate_topksg(nn, fname):\n w0, b0 = nn.get_weights()\n input_bits = w0[layer_index].size * 32\n maxK = w0[layer_index].size\n K = np.arange(maxK//120, maxK//5, maxK//200)\n compression_gain = np.zeros(len(K))\n err = np.zeros((len(K), num_evals))\n\n for nk, k in enumerate(K):\n print(nk/len(K), flush=True)\n quantizer = np_topK.topk_quantizer(k)\n # compute compression gain\n x, y = mnist_db.next_batch(batch_size)\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n ind, v = quantizer.quantize(gw, reconstructed=False)\n compression_gain[nk] = input_bits / (8 * (ind[0].size + ind[1].size) + 32 * v.size)\n\n # compute error\n for n in range(num_evals):\n x, y = mnist_db.next_batch(batch_size)\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n gwh = quantizer.quantize(gw, reconstructed=True)\n err[nk, n] = np.linalg.norm(gwh - gw) / np.linalg.norm(gw)\n\n sio.savemat(fname, mdict={'cg': compression_gain, 'err': err, 'K': K})\n\n\ndef evaluate_atomo(nn, fname):\n w0, b0 = nn.get_weights()\n input_bits = w0[layer_index].size * 32\n maxR = w0[layer_index].shape[-1]\n R = np.arange(1, maxR, maxR//40)\n compression_gain = np.zeros(len(R))\n err = np.zeros((len(R), num_evals))\n\n for nk, k in enumerate(R):\n quantizer = np_atomo.atomo_quantizer(k, True)\n\n # compute compression gain\n x, y = mnist_db.next_batch(batch_size)\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n u, v, s = quantizer.quantize(gw, reconstructed=False)\n compression_gain[nk] = input_bits / (32 * (u.size + v.size + s.size))\n\n # compute error\n for n in range(num_evals):\n x, y = mnist_db.next_batch(batch_size)\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n gwh = quantizer.quantize(gw, reconstructed=True)\n err[nk, n] = np.linalg.norm(gwh - gw) / np.linalg.norm(gw)\n\n sio.savemat(fname, mdict={'cg': compression_gain, 'err': err, 'R': R})\n\n\ndef evaluate_dqtsg(nn, H, fname):\n w0, b0 = nn.get_weights()\n input_bits = w0[layer_index].size * 32\n Q = np.arange(1, 50)\n err = np.zeros((len(Q), num_evals))\n compression_gain = np.zeros(len(Q))\n\n for nq, q in enumerate(Q):\n quantizer = np_dtq.dt_quantizer(H, q)\n # compute compression gain\n x, y = mnist_db.next_batch(batch_size)\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n v, s = quantizer.quantize(gw, reconstructed=False)\n compression_gain[nq] = input_bits / (32 * s.size + np.log2(2 * q + 1) * v.size)\n\n # compute error\n for n in range(num_evals):\n x, y = mnist_db.next_batch(batch_size)\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n gwh = quantizer.quantize(gw, reconstructed=True)\n err[nq, n] = np.linalg.norm(gwh - gw) / np.linalg.norm(gw)\n\n sio.savemat(fname, mdict={'cg': compression_gain, 'err': err, 'Q': Q})\n\n\ndef evaluate_qcssg(nn, H, fname):\n w0, b0 = nn.get_weights()\n input_bits = w0[layer_index].size * 32\n maxK = H.shape[0]\n K = np.arange(1, maxK, 10)\n Q = np.arange(1, 5)\n\n compression_gain = np.zeros((len(K), len(Q)))\n err = np.zeros((len(K), len(Q), num_evals))\n for nk, k in enumerate(K):\n print(k / maxK, flush=True)\n Hk = H[:, -k:] * np.sqrt(maxK) / np.sqrt(k)\n for nq, q in enumerate(Q):\n quantizer = np_csq.cs_quantizer(Hk, q, False, 0)\n # compute compression gain\n x, y = mnist_db.next_batch(batch_size)\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n v, s = quantizer.quantize(gw, reconstructed=False)\n compression_gain[nk, nq] = input_bits / (32 * s.size + np.log2(2 * q + 1) * v.size)\n\n # compute error\n for n in range(num_evals):\n x, y = mnist_db.next_batch(batch_size)\n gw, _ = nn.get_gradients(x, y)\n gw = gw[layer_index]\n gwh = quantizer.quantize(gw, reconstructed=True)\n err[nk, nq, n] = np.linalg.norm(gwh - gw) / np.linalg.norm(gw)\n\n sio.savemat(fname, mdict={'cg': compression_gain, 'err': err, 'K': K, 'Q': Q})\n\n\ndef test():\n bucket_sizes = [[200, 32], [256, 64], [256, 256], [256, 10]]\n layer_shapes = [[5, 5, 1, 32], [5, 5, 32, 64], [7 * 7 * 64, 512], [512, 10]]\n bucket_size = bucket_sizes[layer_index][0]\n layer_shape = layer_shapes[layer_index]\n\n # load hadamard matrices\n H = load_hadamard_matrix(n=bucket_size)\n\n # load/train initial model\n model_fname = os.path.join(output_folder, 'model.npz')\n if not os.path.exists(model_fname):\n w0, b0 = train_base_model()\n np.savez(model_fname, *w0, *b0)\n else:\n data = np.load(model_fname, encoding='latin1')\n keys = np.sort(list(data.keys()))\n num_layers = len(keys) // 2\n w0 = [data[keys[k]] for k in range(num_layers)]\n b0 = [data[keys[k]] for k in range(num_layers, 2 * num_layers)]\n data.close()\n\n # create the neural network\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n nn = LenetModel()\n nn.create_network(nn_settings)\n\n x, y = mnist_db.test_data\n print('Model accuracy: ', nn.accuracy(x, y))\n\n # # evaluate QSG\n # fname = os.path.join(output_folder, 'qsg.mat')\n # evaluate_qsg(nn, bucket_size, fname)\n\n # # evaluate dithered transformed sg\n # fname = os.path.join(output_folder, 'dqtsg.mat')\n # evaluate_dqtsg(nn, H, fname)\n\n # evaluate quantized compressive sampling\n # fname = os.path.join(output_folder, 'qcssg.mat')\n # evaluate_qcssg(nn, H, fname)\n\n # evaluate top-k sg\n # fname = os.path.join(output_folder, 'topk.mat')\n # evaluate_topksg(nn, fname)\n\n # evaluate spectral atomo\n fname = os.path.join(output_folder, 'sp_atomo.mat')\n evaluate_atomo(nn, fname)\n\n\nif __name__ == '__main__':\n test()\n", "id": "9128369", "language": "Python", "matching_score": 4.009809970855713, "max_stars_count": 0, "path": "CompressionError/compare_quantizers_lenet.py" }, { "content": "import os\nimport time\nimport numpy as np\nimport scipy.io as sio\nimport scipy.stats as st\nimport tensorflow as tf\nfrom models.CifarNet import CifarNetModel\nfrom datasets.cifar10_dataset import Cifar10Dataset\nfrom datasets.hadamard import load_hadamard_matrix\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n# os.environ['CUDA_VISIBLE_DEVICES'] = '2'\n\ntraining_algorithm = 'GD'\nnn_settings = {\n 'initial_w': None, # initial weights\n 'initial_b': None, # initial bias\n 'training_alg': training_algorithm, # training algorithm\n 'learning_rate': 0.2, # learning rate\n 'decay_rate': 0.98, # decay rate\n 'decay_step': 500, # decay step\n 'compute_gradients': True, # compute gradients for use in distribtued training\n}\n\ndb_params = {\n 'database-dir': 'Database/CIFAR10/raw',\n 'one-hot': False,\n 'output-dimension': (24, 24),\n 'augment-training': True,\n}\n\noutput_folder = 'QuantizedCS/Quantizer/Cifarnet'\nnum_evals = 10\nbatch_size = 128\niter_per_eval = 100\n\ndb = Cifar10Dataset(db_settings=db_params)\ngraph = tf.Graph()\nwith graph.as_default():\n db_images, db_labels, initializer_op = db.create_dataset(['train', 'test'], batch_size, 16)\n\ndb_sess = tf.Session(graph=graph)\ndb_sess.run(initializer_op['train'])\n\n\ndef evaluate_base_model():\n # training is done using batch-size=256\n nn_settings['initial_w'] = None\n nn_settings['initial_b'] = None\n\n nn = CifarNetModel()\n nn.create_network(nn_settings)\n\n for _ in range(15):\n x, y = db_sess.run([db_images, db_labels])\n nn.train(x, y)\n\n w0, b0 = nn.get_weights()\n\n et = np.zeros(num_evals)\n\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n start = time.time()\n for _ in range(iter_per_eval):\n gw, gb = nn.get_gradients(x, y)\n\n et[n] = (time.time() - start)\n\n return w0, b0, et\n\n\ndef evaluate_qsg(w0, b0, num_levels, bucket_size):\n # initial parameters\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n # quantizer\n nn_settings['quantizer'] = 'qsg'\n nn_settings['bucket_sizes'] = bucket_size\n nn_settings['num_levels'] = num_levels\n nn_settings['H'] = None\n\n nn = CifarNetModel()\n nn.create_network(nn_settings)\n\n et = np.zeros(num_evals)\n\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n start = time.time()\n for _ in range(iter_per_eval):\n qw, sw, qb, sb = nn.quantized_gradients(x, y)\n\n et[n] = (time.time() - start)\n\n return et\n\n\ndef evaluate_topksg(w0, b0, K):\n # initial parameters\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n # quantizer\n nn_settings['quantizer'] = 'topk'\n nn_settings['K'] = K\n\n nn = CifarNetModel()\n nn.create_network(nn_settings)\n\n et = np.zeros(num_evals)\n\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n start = time.time()\n for _ in range(iter_per_eval):\n qw, sw, qb, sb = nn.quantized_gradients(x, y)\n\n et[n] = (time.time() - start)\n\n return et\n\n\ndef evaluate_dqtsg(w0, b0, num_levels, H):\n # initial parameters\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n # quantizer\n nn_settings['quantizer'] = 'dithered-transform'\n nn_settings['num_levels'] = num_levels\n nn_settings['H'] = H\n\n nn = CifarNetModel()\n nn.create_network(nn_settings)\n\n et = np.zeros(num_evals)\n\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n start = time.time()\n for _ in range(iter_per_eval):\n qw, sw, qb, sb = nn.quantized_gradients(x, y)\n\n et[n] = (time.time() - start)\n\n return et\n\n\ndef evaluate_qcssg(w0, b0, num_levels, H, err_feedback, feedback_beta):\n # initial parameters\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n # quantizer\n nn_settings['quantizer'] = 'quantized-cs'\n nn_settings['num_levels'] = num_levels\n nn_settings['H'] = H\n nn_settings['error_feedback'] = err_feedback\n nn_settings['feedback_weight'] = feedback_beta\n\n nn = CifarNetModel()\n nn.create_network(nn_settings)\n\n et = np.zeros(num_evals)\n\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n start = time.time()\n for _ in range(iter_per_eval):\n qw, sw, qb, sb = nn.quantized_gradients(x, y)\n\n et[n] = (time.time() - start)\n\n return et\n\n\ndef test():\n num_levels = 1\n bucket_size = [[320, 64], [320, 64], [384, 384], [384, 192], [192, 10]]\n layer_shapes = [[5, 5, 3, 64], [5, 5, 64, 64], [2304, 384], [384, 192], [192, 10]]\n\n K = [[0, 0] for _ in range(len(layer_shapes))]\n for n in range(len(layer_shapes)):\n K[n][0] = int(0.5 + np.prod(layer_shapes[n]) * np.log2(3) / 32)\n K[n][1] = int(0.5 + layer_shapes[n][-1] * np.log2(3) / 32)\n\n nn_settings['layer_shapes'] = layer_shapes\n nn_settings['quantizer_seeds'] = [np.random.randint(1000, 10000000, size=2 * len(layer_shapes)).tolist()]\n\n # load hadamard matrices\n H = [[0, 0] for _ in range(len(bucket_size))]\n Hk = [[0, 0] for _ in range(len(bucket_size))]\n for layer, d in enumerate(bucket_size):\n H[layer][0] = load_hadamard_matrix(d[0])\n H[layer][1] = load_hadamard_matrix(d[1])\n\n w0, b0, et_base = evaluate_base_model()\n et_qsg = evaluate_qsg(w0, b0, num_levels=num_levels, bucket_size=bucket_size)\n et_tksg = evaluate_topksg(w0, b0, K)\n et_dqtsg = evaluate_dqtsg(w0, b0, num_levels=num_levels, H=H)\n et_qcssg0 = evaluate_qcssg(w0, b0, num_levels=num_levels, H=H, err_feedback=False, feedback_beta=0)\n et_qcssg1 = evaluate_qcssg(w0, b0, num_levels=num_levels, H=H, err_feedback=True, feedback_beta=0.1)\n\n print(\n 'baseline: {:.3f}, QSG: {:.3f}, TopK: {:.3f}, DQTSG: {:.3f}'.format(\n np.mean(et_base), np.mean(et_qsg), np.mean(et_tksg), np.mean(et_dqtsg)\n )\n )\n print('QCSSG,w/o Feedback {:.3f}, QCSSG,w/ Feedback {:.3f}'.format(np.mean(et_qcssg0), np.mean(et_qcssg1)))\n\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n fname = os.path.join(output_folder, 'gpu_run_time_%d.mat' % batch_size)\n sio.savemat(\n fname,\n mdict={\n 'base': et_base,\n 'qsg': et_qsg,\n 'tksg': et_tksg,\n 'dqtsg': et_dqtsg,\n 'qcssg_nf': et_qcssg0,\n 'qcssg_wf': et_qcssg1\n }\n )\n\n\nif __name__ == '__main__':\n test()\n", "id": "4441272", "language": "Python", "matching_score": 4.926519393920898, "max_stars_count": 0, "path": "QuantizerTest/compare_quantizers_cifarnet.py" }, { "content": "import os\nimport time\nimport numpy as np\nimport scipy.io as sio\nimport scipy.stats as st\nimport tensorflow as tf\nfrom models.Alexnet import AlexnetModel\nfrom datasets.tfr.imagenet_tfr import ImagenetDataSet\nfrom datasets.hadamard import load_hadamard_matrix\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n# os.environ['CUDA_VISIBLE_DEVICES'] = '2'\n\ntraining_algorithm = 'GD'\nnn_settings = {\n 'initial_w': None, # initial weights\n 'initial_b': None, # initial bias\n 'training_alg': training_algorithm, # training algorithm\n 'learning_rate': 0.001, # learning rate\n 'decay_rate': 0.98, # decay rate\n 'decay_step': 500, # decay step\n 'compute_gradients': True, # compute gradients for use in distribtued training\n}\n\noutput_folder = 'QuantizedCS/Quantizer/Alexnet'\nnum_evals = 10\nbatch_size = 256\niter_per_eval = 100\n\ndb_params = {\n 'data_dir': 'DataBase/Imagenet/ILSVRC2012/tfr',\n 'image_size': (227, 227),\n 'BGR': True,\n 'one_hot': True,\n 'resize_range': (256, 384),\n 'num_train_samples': 1281167,\n 'num_train_files': 1024,\n 'train_filenames': 'train/train-{0:05d}-of-{1:05d}',\n 'num_validation_samples': 50000,\n 'num_validation_files': 128,\n 'validation_filenames': 'validation/validation-{0:05d}-of-{1:05d}',\n 'augment_training': True,\n 'shuffle_buffer': 0.0001,\n 'num_classes': 1000,\n}\n\ndb = ImagenetDataSet(db_settings=db_params)\ngraph = tf.Graph()\nwith graph.as_default():\n imgnet_data, initializer_op = db.get_data(['train', 'validation'], batch_size=batch_size)\n\ndb_images = imgnet_data[0]\ndb_labels = imgnet_data[1]\ndb_sess = tf.Session(graph=graph)\ndb_sess.run(initializer_op['train'])\n\n\ndef evaluate_base_model():\n # training is done using batch-size=256\n nn_settings['initial_w'] = None\n nn_settings['initial_b'] = None\n\n nn = AlexnetModel()\n nn.create_network(nn_settings)\n\n for _ in range(15):\n x, y = db_sess.run([db_images, db_labels])\n nn.train(x, y)\n\n w0, b0 = nn.get_weights()\n\n et = np.zeros(num_evals)\n\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n start = time.time()\n for _ in range(iter_per_eval):\n gw, gb = nn.get_gradients(x, y)\n\n et[n] = (time.time() - start)\n\n return w0, b0, et\n\n\ndef evaluate_qsg(w0, b0, num_levels, bucket_size):\n # initial parameters\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n # quantizer\n nn_settings['quantizer'] = 'qsg'\n nn_settings['bucket_sizes'] = bucket_size\n nn_settings['num_levels'] = num_levels\n nn_settings['H'] = None\n\n nn = AlexnetModel()\n nn.create_network(nn_settings)\n\n et = np.zeros(num_evals)\n\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n start = time.time()\n for _ in range(iter_per_eval):\n qw, sw, qb, sb = nn.quantized_gradients(x, y)\n\n et[n] = (time.time() - start)\n\n return et\n\n\ndef evaluate_topksg(w0, b0, K):\n # initial parameters\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n # quantizer\n nn_settings['quantizer'] = 'topk'\n nn_settings['K'] = K\n\n nn = AlexnetModel()\n nn.create_network(nn_settings)\n\n et = np.zeros(num_evals)\n\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n start = time.time()\n for _ in range(iter_per_eval):\n qw, sw, qb, sb = nn.quantized_gradients(x, y)\n\n et[n] = (time.time() - start)\n\n return et\n\n\ndef evaluate_dqtsg(w0, b0, num_levels, H):\n # initial parameters\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n # quantizer\n nn_settings['quantizer'] = 'dithered-transform'\n nn_settings['num_levels'] = num_levels\n nn_settings['H'] = H\n\n nn = AlexnetModel()\n nn.create_network(nn_settings)\n\n et = np.zeros(num_evals)\n\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n start = time.time()\n for _ in range(iter_per_eval):\n qw, sw, qb, sb = nn.quantized_gradients(x, y)\n\n et[n] = (time.time() - start)\n\n return et\n\n\ndef evaluate_qcssg(w0, b0, num_levels, H, err_feedback, feedback_beta):\n # initial parameters\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n # quantizer\n nn_settings['quantizer'] = 'quantized-cs'\n nn_settings['num_levels'] = num_levels\n nn_settings['H'] = H\n nn_settings['error_feedback'] = err_feedback\n nn_settings['feedback_weight'] = feedback_beta\n\n nn = AlexnetModel()\n nn.create_network(nn_settings)\n\n et = np.zeros(num_evals)\n\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n start = time.time()\n for _ in range(iter_per_eval):\n qw, sw, qb, sb = nn.quantized_gradients(x, y)\n\n et[n] = (time.time() - start)\n\n return et\n\n\ndef evaluate_qsg(w0, b0, num_levels, bucket_size):\n # initial parameters\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n # quantizer\n nn_settings['quantizer'] = 'qsg'\n nn_settings['bucket_sizes'] = bucket_size\n nn_settings['num_levels'] = num_levels\n nn_settings['H'] = None\n\n nn = AlexnetModel()\n nn.create_network(nn_settings)\n\n err = np.zeros(num_evals)\n\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n gw, _ = nn.get_gradients(x, y)\n gwh, *_ = nn.dequantized_gradients(x, y)\n err[n] = np.linalg.norm(gwh[layer_index] - gw[layer_index]) / (np.linalg.norm(gw[layer_index]) + 1e-12)\n\n return err\n\ndef evaluate_dqsg(w0, b0, num_levels, bucket_size):\n # initial parameters\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n # quantizer\n nn_settings['quantizer'] = 'dqsg'\n nn_settings['bucket_sizes'] = bucket_size\n nn_settings['num_levels'] = num_levels\n\n nn = AlexnetModel()\n nn.create_network(nn_settings)\n\n err = np.zeros(num_evals)\n\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n seeds = np.random.randint(1000, 10000000, size=2 * nn.number_layers).tolist()\n gw, _ = nn.get_gradients(x, y)\n gwh, *_ = nn.dequantized_gradients(x, y, seeds)\n err[n] = np.linalg.norm(gwh[layer_index] - gw[layer_index]) / (np.linalg.norm(gw[layer_index]) + 1e-12)\n\n return err\n\ndef evaluate_dqtsg(w0, b0, num_levels, bucket_size, H):\n # initial parameters\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n # quantizer\n nn_settings['quantizer'] = 'dqtsg'\n nn_settings['bucket_sizes'] = bucket_size\n nn_settings['num_levels'] = num_levels\n nn_settings['H'] = H\n\n nn = AlexnetModel()\n nn.create_network(nn_settings)\n\n err = np.zeros(num_evals)\n\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n seeds = np.random.randint(1000, 10000000, size=2 * nn.number_layers).tolist()\n gw, _ = nn.get_gradients(x, y)\n gwh, *_ = nn.dequantized_gradients(x, y, seeds)\n err[n] = np.linalg.norm(gwh[layer_index] - gw[layer_index]) / (np.linalg.norm(gw[layer_index]) + 1e-12)\n\n return err\n\n\ndef test():\n num_levels = 1\n bucket_size = [[288, 96], [256, 256], [384, 384], [384, 384], [256, 256], [256, 256], [256, 256], [256, 200]]\n layer_shapes = [\n [11, 11, 3, 96], [5, 5, 48, 256], [3, 3, 256, 384], [3, 3, 192, 384], [3, 3, 192, 256], [9216, 4096],\n [4096, 4096], [4096, 1000]\n ]\n\n K = [[0, 0] for _ in range(len(layer_shapes))]\n for n in range(len(layer_shapes)):\n K[n][0] = int(0.5 + np.prod(layer_shapes[n]) * np.log2(3) / 32)\n K[n][1] = int(0.5 + layer_shapes[n][-1] * np.log2(3) / 32)\n\n nn_settings['layer_shapes'] = layer_shapes\n nn_settings['quantizer_seeds'] = [np.random.randint(1000, 10000000, size=2 * len(layer_shapes)).tolist()]\n\n # load hadamard matrices\n H = [[0, 0] for _ in range(len(bucket_size))]\n Hk = [[0, 0] for _ in range(len(bucket_size))]\n for layer, d in enumerate(bucket_size):\n H[layer][0] = load_hadamard_matrix(d[0])\n H[layer][1] = load_hadamard_matrix(d[1])\n\n w0, b0, et_base = evaluate_base_model()\n et_qsg = evaluate_qsg(w0, b0, num_levels=num_levels, bucket_size=bucket_size)\n et_tksg = evaluate_topksg(w0, b0, K)\n et_dqtsg = evaluate_dqtsg(w0, b0, num_levels=num_levels, H=H)\n et_qcssg0 = evaluate_qcssg(w0, b0, num_levels=num_levels, H=H, err_feedback=False, feedback_beta=0)\n et_qcssg1 = evaluate_qcssg(w0, b0, num_levels=num_levels, H=H, err_feedback=True, feedback_beta=0.1)\n\n print(\n 'baseline: {:.3f}, QSG: {:.3f}, TopK: {:.3f}, DQTSG: {:.3f}'.format(\n np.mean(et_base), np.mean(et_qsg), np.mean(et_tksg), np.mean(et_dqtsg)\n )\n )\n print('QCSSG,w/o Feedback {:.3f}, QCSSG,w/ Feedback {:.3f}'.format(np.mean(et_qcssg0), np.mean(et_qcssg1)))\n\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n fname = os.path.join(output_folder, 'test_gpu_run_time_%d.mat' % batch_size)\n sio.savemat(\n fname,\n mdict={\n 'base': et_base,\n 'qsg': et_qsg,\n 'tksg': et_tksg,\n 'dqtsg': et_dqtsg,\n 'qcssg_nf': et_qcssg0,\n 'qcssg_wf': et_qcssg1\n }\n )\n\n\nif __name__ == '__main__':\n test()\n", "id": "9484656", "language": "Python", "matching_score": 6.928875923156738, "max_stars_count": 0, "path": "QuantizerTest/compare_quantizers_alexnet.py" }, { "content": "import os\nimport time\nimport numpy as np\nimport scipy.io as sio\nimport scipy.stats as st\nimport tensorflow as tf\nfrom models.Alexnet import AlexnetModel\nfrom datasets.tfr.imagenet_tfr import ImagenetDataSet\nfrom datasets.hadamard import load_hadamard_matrix\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n# os.environ['CUDA_VISIBLE_DEVICES'] = '2'\n\ntraining_algorithm = 'GD'\nnn_settings = {\n 'initial_w': None, # initial weights\n 'initial_b': None, # initial bias\n 'training_alg': training_algorithm, # training algorithm\n 'learning_rate': 0.001, # learning rate\n 'decay_rate': 0.98, # decay rate\n 'decay_step': 500, # decay step\n 'compute_gradients': True, # compute gradients for use in distribtued training\n}\n\noutput_folder = 'QuantizedCS/Quantizer/'\nnum_evals = 10\nbatch_size = 200\nlayer_index = 6\n\ndb_params = {\n 'data_dir': 'DataBase/Imagenet/ILSVRC2012/tfr',\n 'image_size': (227, 227),\n 'BGR': True,\n 'one_hot': True,\n 'resize_range': (256, 384),\n 'num_train_samples': 1281167,\n 'num_train_files': 1024,\n 'train_filenames': 'train/train-{0:05d}-of-{1:05d}',\n 'num_validation_samples': 50000,\n 'num_validation_files': 128,\n 'validation_filenames': 'validation/validation-{0:05d}-of-{1:05d}',\n 'augment_training': True,\n 'shuffle_buffer': 0.0001,\n 'num_classes': 1000,\n}\n\ndb = ImagenetDataSet(db_settings=db_params)\ngraph = tf.Graph()\nwith graph.as_default():\n imgnet_data, initializer_op = db.get_data(['train', 'validation'], batch_size=batch_size)\n\ndb_images = imgnet_data[0]\ndb_labels = imgnet_data[1]\ndb_sess = tf.Session(graph=graph)\ndb_sess.run(initializer_op['train'])\n\n\ndef train_base_model(w0=None, b0=None):\n # training is done using batch-size=256\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n nn = AlexnetModel()\n nn.create_network(nn_settings)\n\n for n in range(10):\n x, y = db_sess.run([db_images, db_labels])\n nn.train(x, y)\n\n w0, b0 = nn.get_weights()\n\n return w0, b0\n\n\ndef evaluate_qsg(w0, b0, num_levels, bucket_size):\n # initial parameters\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n # quantizer\n nn_settings['quantizer'] = 'qsg'\n nn_settings['bucket_sizes'] = bucket_size\n nn_settings['num_levels'] = num_levels\n nn_settings['H'] = None\n\n nn = AlexnetModel()\n nn.create_network(nn_settings)\n\n err = np.zeros(num_evals)\n\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n gw, _ = nn.get_gradients(x, y)\n gwh, *_ = nn.dequantized_gradients(x, y)\n err[n] = np.linalg.norm(gwh[layer_index] - gw[layer_index]) / (np.linalg.norm(gw[layer_index]) + 1e-12)\n\n return err\n\n\ndef evaluate_dqsg(w0, b0, num_levels, bucket_size):\n # initial parameters\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n # quantizer\n nn_settings['quantizer'] = 'dqsg'\n nn_settings['bucket_sizes'] = bucket_size\n nn_settings['num_levels'] = num_levels\n\n nn = AlexnetModel()\n nn.create_network(nn_settings)\n\n err = np.zeros(num_evals)\n\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n seeds = np.random.randint(1000, 10000000, size=2 * nn.number_layers).tolist()\n gw, _ = nn.get_gradients(x, y)\n gwh, *_ = nn.dequantized_gradients(x, y, seeds)\n err[n] = np.linalg.norm(gwh[layer_index] - gw[layer_index]) / (np.linalg.norm(gw[layer_index]) + 1e-12)\n\n return err\n\n\ndef evaluate_dqtsg(w0, b0, num_levels, bucket_size, H):\n # initial parameters\n nn_settings['initial_w'] = w0\n nn_settings['initial_b'] = b0\n\n # quantizer\n nn_settings['quantizer'] = 'dqtsg'\n nn_settings['bucket_sizes'] = bucket_size\n nn_settings['num_levels'] = num_levels\n nn_settings['H'] = H\n\n nn = AlexnetModel()\n nn.create_network(nn_settings)\n\n err = np.zeros(num_evals)\n\n for n in range(num_evals):\n x, y = db_sess.run([db_images, db_labels])\n seeds = np.random.randint(1000, 10000000, size=2 * nn.number_layers).tolist()\n gw, _ = nn.get_gradients(x, y)\n gwh, *_ = nn.dequantized_gradients(x, y, seeds)\n err[n] = np.linalg.norm(gwh[layer_index] - gw[layer_index]) / (np.linalg.norm(gw[layer_index]) + 1e-12)\n\n return err\n\n\ndef test():\n bucket_size = [[288, 96], [256, 256], [384, 384], [384, 384], [256, 256], [256, 256], [256, 256], [256, 200]]\n\n # load hadamard matrices\n H = [[0, 0] for _ in range(len(bucket_size))]\n for layer, d in enumerate(bucket_size):\n H[layer][0] = load_hadamard_matrix(d[0])\n H[layer][1] = load_hadamard_matrix(d[1])\n\n for exp in range(5):\n w0, b0 = None, None\n for rep in range(5):\n # random orthonormal matrices\n T = [[0, 0] for _ in range(len(bucket_size))]\n for layer, d in enumerate(bucket_size):\n T[layer][0] = st.ortho_group.rvs(dim=d[0])\n T[layer][1] = st.ortho_group.rvs(dim=d[1])\n\n w0, b0 = train_base_model(w0, b0)\n err_qsg = evaluate_qsg(w0, b0, num_levels=1, bucket_size=bucket_size)\n err_dqsg = evaluate_dqsg(w0, b0, num_levels=1, bucket_size=bucket_size)\n err_dqtsgH = evaluate_dqtsg(w0, b0, num_levels=1, bucket_size=bucket_size, H=H)\n err_dqtsgT = evaluate_dqtsg(w0, b0, num_levels=1, bucket_size=bucket_size, H=T)\n\n print(\n 'QSG: {:.3f}, DQSG: {:.3f}, DQTSG(H): {:.3f}, DQTSG(T): {:.3f}'.format(\n np.mean(err_qsg), np.mean(err_dqsg), np.mean(err_dqtsgH), np.mean(err_dqtsgT)\n )\n )\n\n fname = os.path.join(output_folder, 'Alexnet/qe_%d_%d.mat' % (exp, rep))\n sio.savemat(fname, mdict={'qsg': err_qsg, 'dqsg': err_dqsg, 'dqtsgH': err_dqtsgH, 'dqtsgT': err_dqtsgT})\n\n\nif __name__ == '__main__':\n test()\n", "id": "11342987", "language": "Python", "matching_score": 4.056458473205566, "max_stars_count": 0, "path": "CompressionError/compare_quantizers_alexnet.py" }, { "content": "\"\"\"\n Imagenet, ILSVRC2012 data set.\n\"\"\"\n\nimport os\nimport tensorflow as tf\n\n# db_setting = {\n# 'data_dir': '',\n# 'image_size': (227, 227),\n# 'BGR': True,\n# 'one_hot': False,\n# 'resize_range': (256, 384),\n# 'num_train_samples': 1281167,\n# 'num_train_files': 1024,\n# 'train_filenames': '{0:05d}',\n# 'num_validation_samples': 50000,\n# 'num_validation_files': 128,\n# 'validation_filenames': '{0:05d}',\n# 'augment_training': False,\n# 'shuffle_buffer': 0.0001,\n# 'num_classes': 1000,\n# 'label_offset': 1,\n# }\n\n\nclass ImagenetDataSet(object):\n def __init__(self, db_settings: dict):\n # read the settings of the database\n self.data_dir = db_settings.get('data_dir', '')\n self._one_hot = db_settings.get('one_hot', False)\n self._augment_training = db_settings.get('augment_training', False)\n self._image_size = db_settings.get('image_size', (227, 227))\n self._size_range = db_settings.get('resize_range', (256, 384))\n self._BGR = db_settings.get('BGR_format', True)\n \n # the labels in the tfr files is different than the ones some other models trained on. \n # making the labels consistent with class_names in ilsvrc2012_classes\n self._lbl_offset = db_settings.get('label_offset', 1)\n\n # fill the list of training files names\n N = db_settings.get('num_train_files', 1024)\n filename = db_settings.get('train_filenames', '{0:05d}')\n self._train_filenames = [os.path.join(\n self.data_dir, filename.format(n, N)) for n in range(N)]\n\n # fill the list of training files names\n N = db_settings.get('num_validation_files', 128)\n filename = db_settings.get('validation_filenames', '{0:05d}')\n self._validation_filenames = [os.path.join(\n self.data_dir, filename.format(n, N)) for n in range(N)]\n\n self._samples_per_epoch = {\n 'train': db_settings.get('num_train_samples', 1281167),\n 'validation': db_settings.get('num_validation_samples', 50000)}\n\n self._num_classes = db_settings.get('num_classes', 1000)\n self._shuffle_buffer = db_settings.get('shuffle_buffer', 0.002)\n\n def parser(self, serialized_example):\n f = tf.parse_single_example(serialized_example, features={\n 'image/filename': tf.FixedLenFeature([], tf.string),\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/class/text': tf.FixedLenFeature([], tf.string),\n 'image/object/bbox/xmin': tf.FixedLenSequenceFeature([], tf.float32, True),\n 'image/object/bbox/ymin': tf.FixedLenSequenceFeature([], tf.float32, True),\n 'image/object/bbox/xmax': tf.FixedLenSequenceFeature([], tf.float32, True),\n 'image/object/bbox/ymax': tf.FixedLenSequenceFeature([], tf.float32, True),\n })\n\n filename = f['image/filename']\n image = tf.image.decode_jpeg(f['image/encoded'], channels=3)\n label = f['image/class/label'] - self._lbl_offset\n label = tf.floormod(label, self._num_classes)\n if self._one_hot:\n label = tf.one_hot(label, self._num_classes)\n\n label_text = f['image/class/text']\n\n ymin = tf.reduce_min(f['image/object/bbox/ymin'])\n xmin = tf.reduce_min(f['image/object/bbox/xmin'])\n ymax = tf.reduce_max(f['image/object/bbox/ymax'])\n xmax = tf.reduce_max(f['image/object/bbox/xmax'])\n bounding_box = [ymin, xmin, ymax, xmax]\n\n return image, label, label_text, filename, bounding_box\n\n def _resize_image_keep_aspect(self, image, min_size):\n # Take width/height\n initial_width = tf.shape(image)[0]\n initial_height = tf.shape(image)[1]\n\n # Take the greater value, and use it for the ratio\n min_ = tf.minimum(initial_width, initial_height)\n ratio = tf.to_float(tf.truediv(min_, min_size))\n\n new_width = tf.to_int32(tf.to_float(initial_width) / ratio)\n new_height = tf.to_int32(tf.to_float(initial_height) / ratio)\n\n return tf.image.resize_images(image, size=(new_width, new_height),\n method=tf.image.ResizeMethod.BILINEAR)\n\n def _get_training_dataset(self, batch_size, num_parallel_calls=8):\n # Repeat infinitely.\n dataset = tf.data.TFRecordDataset(\n self._train_filenames).repeat()\n\n # Parse records.\n dataset = dataset.map(\n self.parser, num_parallel_calls=num_parallel_calls)\n\n # augment dataset\n if self._augment_training:\n dataset = dataset.map(\n self._augment_training_image, num_parallel_calls=num_parallel_calls)\n else:\n dataset = dataset.map(self._prepare_validation_image,\n num_parallel_calls=num_parallel_calls)\n\n min_queue_examples = int(self._samples_per_epoch['train'] *\n self._shuffle_buffer)\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n dataset = dataset.shuffle(\n buffer_size=min_queue_examples + 3 * batch_size)\n\n # Batch it up.\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(1)\n\n return dataset\n\n def _get_validation_dataset(self, batch_size=None, num_parallel_calls=8):\n if batch_size is None:\n batch_size = self._samples_per_epoch['validation']\n\n # Repeat infinitely.\n dataset = tf.data.TFRecordDataset(\n self._validation_filenames).repeat()\n\n # Parse records.\n dataset = dataset.map(\n self.parser, num_parallel_calls=num_parallel_calls)\n dataset = dataset.map(self._prepare_validation_image,\n num_parallel_calls=num_parallel_calls)\n dataset = dataset.batch(batch_size)\n\n return dataset\n\n def _augment_training_image(self, image, *args):\n # 1- resize image to a random dimension\n min_size = tf.random_uniform([], minval=self._size_range[0],\n maxval=self._size_range[1]+1, dtype=tf.int32)\n image = self._resize_image_keep_aspect(image, min_size)\n\n # 2- crop the image to the desired output size\n image = tf.random_crop(\n image, [self._image_size[0], self._image_size[1], 3])\n image.set_shape([self._image_size[0], self._image_size[1], 3])\n\n # 3- randomly flip the image\n image = tf.image.random_flip_left_right(image)\n\n # 4- if necessary, change RGB to BGR format\n if self._BGR:\n image = tf.reverse(image, axis=[-1])\n\n # 5- subtract the mean\n imagenet_mean = tf.constant([123.68, 116.779, 103.939],\n dtype=tf.float32)\n image = tf.subtract(tf.to_float(image), imagenet_mean)\n\n return (image, ) + args\n\n def _prepare_validation_image(self, image, *args):\n # 1- resize image\n image = self._resize_image_keep_aspect(image, self._size_range[0])\n\n # 2- crop the center of image with the desired output size\n image_height = tf.shape(image)[0]\n image_width = tf.shape(image)[1]\n\n offset_height = (image_height - self._image_size[0]) // 2\n offset_width = (image_width - self._image_size[1]) // 2\n image = tf.image.crop_to_bounding_box(image, offset_height, offset_width,\n self._image_size[0], self._image_size[1])\n image.set_shape([self._image_size[0], self._image_size[1], 3])\n\n # 3- if necessary, change RGB to BGR format\n if self._BGR:\n image = tf.reverse(image, axis=[-1])\n\n # 4- subtract the mean\n imagenet_mean = tf.constant([123.68, 116.779, 103.939],\n dtype=tf.float32)\n image = tf.subtract(tf.to_float(image), imagenet_mean)\n\n return (image, ) + args\n\n def get_data(self, subset, batch_size, num_parallel_calls=8, device='/cpu:0'):\n with tf.device(device):\n databases = {}\n if 'train' in subset:\n databases['train'] = self._get_training_dataset(batch_size, num_parallel_calls)\n\n if 'validation' in subset:\n databases['validation'] = self._get_validation_dataset(batch_size, num_parallel_calls)\n\n assert databases != {}, 'no valid subset of database is provided.'\n\n # get the types and shapes of the database outputs\n tmp_db = next(iter(databases.items()))[1]\n db_types = tmp_db.output_types\n db_shapes = tmp_db.output_shapes\n db_classes = tmp_db.output_classes\n\n # define the iterator and generate different initializers\n iterator = tf.data.Iterator.from_structure(\n db_types, output_shapes=db_shapes, output_classes=db_classes)\n features = iterator.get_next()\n\n # define different initializers\n initializer_op = {}\n for k in databases.keys():\n initializer_op[k] = iterator.make_initializer(databases[k])\n\n return features, initializer_op\n\n def get_number_samples(self, subset='train'):\n assert subset in self._samples_per_epoch.keys(), 'no valid subset of dataset is provided!'\n return self._samples_per_epoch[subset]\n", "id": "2952483", "language": "Python", "matching_score": 6.174973487854004, "max_stars_count": 0, "path": "QuantizerTest/datasets/tfr/imagenet_tfr.py" }, { "content": "\"\"\"\n CIFAR-10 data set.\n See http://www.cs.toronto.edu/~kriz/cifar.html.\n\"\"\"\n\nimport os\nimport tensorflow as tf\n\n# dimensions of original MNIST images\nHEIGHT = 28\nWIDTH = 28\n\n\nclass MNISTDataSet(object):\n def __init__(self, data_dir):\n self.data_dir = data_dir\n\n self._training_filename = os.path.join(\n self.data_dir, 'mnist_train.tfrecord')\n self._test_filename = os.path.join(\n self.data_dir, 'mnist_test.tfrecord')\n\n self._samples_per_epoch = {'train': 60000, 'test': 10000}\n\n def parser(self, serialized_example):\n \"\"\"Parses a single tf.Example into image and label tensors.\"\"\"\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/height': tf.FixedLenFeature([1], tf.int64),\n 'image/width': tf.FixedLenFeature([1], tf.int64),\n })\n image = tf.image.decode_png(\n features['image/encoded'], channels=1, dtype=tf.uint8)\n image = tf.cast(image, tf.float32) / 255\n label = tf.cast(features['image/class/label'], tf.int32)\n\n return image, label\n\n def _get_training_dataset(self, batch_size, num_parallel_calls=8):\n # Repeat infinitely.\n dataset = tf.data.TFRecordDataset(self._training_filename).repeat()\n\n # Parse records.\n dataset = dataset.map(\n self.parser, num_parallel_calls=num_parallel_calls)\n\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n dataset = dataset.shuffle(buffer_size=self._samples_per_epoch['train'])\n\n # Batch it up.\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(1)\n\n return dataset\n\n def _get_test_dataset(self, num_parallel_calls=8):\n # Repeat infinitely.\n dataset = tf.data.TFRecordDataset(self._test_filename).repeat()\n\n # Parse records.\n dataset = dataset.map(\n self.parser, num_parallel_calls=num_parallel_calls)\n dataset = dataset.batch(batch_size=self._samples_per_epoch['test'])\n\n return dataset\n\n def get_data(self, subset=['train', 'test'], batch_size=1, num_parallel_calls=8, device='/cpu:0'):\n with tf.device(device):\n databases = {}\n if 'train' in subset:\n databases['train'] = self._get_training_dataset(\n batch_size, num_parallel_calls)\n\n if 'test' in subset:\n databases['test'] = self._get_test_dataset(num_parallel_calls)\n\n assert databases != {}, 'no valid subset of MNIST database is provided.'\n\n # get the types and shapes of the database outputs\n db_types = next(iter(databases.items()))[1].output_types\n db_shapes = next(iter(databases.items()))[1].output_shapes\n\n # define the iterator and generate different initializers\n iterator = tf.data.Iterator.from_structure(db_types, db_shapes)\n image_batch, label_batch = iterator.get_next()\n\n # define different initializers\n initializer_op = {}\n for k in databases.keys():\n initializer_op[k] = iterator.make_initializer(databases[k])\n\n return image_batch, label_batch, initializer_op\n\n def get_number_samples(self, subset='train'):\n assert subset in self._samples_per_epoch.keys(\n ), 'no valid subset of dataset is provided!'\n return self._samples_per_epoch[subset]\n", "id": "9668780", "language": "Python", "matching_score": 5.586902141571045, "max_stars_count": 0, "path": "QuantizerTest/datasets/tfr/mnist_tfr.py" }, { "content": "\"\"\"\n CIFAR-10 data set.\n See http://www.cs.toronto.edu/~kriz/cifar.html.\n\"\"\"\n\nimport os\nimport tensorflow as tf\n\n# dimensions of original CIFAR10 images\nHEIGHT = 32\nWIDTH = 32\nDEPTH = 3\n\n\nclass FlowersDataSet(object):\n def __init__(self, data_dir, image_size=(128, 128), augment_training=True):\n self.data_dir = data_dir\n self._augment_training = augment_training\n self._output_dim = image_size\n\n self._training_filename = [os.path.join(\n self.data_dir, 'flowers_train_{0:05d}-of-00005.tfrecord'.format(n)) for n in range(5)]\n self._validation_filename = [os.path.join(\n self.data_dir, 'flowers_validation_{0:05d}-of-00005.tfrecord'.format(n)) for n in range(5)]\n\n self._samples_per_epoch = {'train': 3320, 'validation': 350}\n\n def parser(self, serialized_example):\n \"\"\"Parses a single tf.Example into image and label tensors.\"\"\"\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/height': tf.FixedLenFeature([1], tf.int64),\n 'image/width': tf.FixedLenFeature([1], tf.int64),\n })\n\n image = tf.image.decode_png(features['image/encoded'], dtype=tf.uint8)\n image = tf.cast(image, tf.float32) / 255\n label = tf.cast(features['image/class/label'], tf.int32)\n\n return image, label\n\n def _get_training_dataset(self, batch_size, num_parallel_calls=8):\n # Repeat infinitely.\n dataset = tf.data.TFRecordDataset(self._training_filename).repeat()\n\n # Parse records.\n dataset = dataset.map(\n self.parser, num_parallel_calls=num_parallel_calls)\n\n # augment dataset\n if self._augment_training:\n dataset = dataset.map(\n self._augment_image, num_parallel_calls=num_parallel_calls)\n else:\n dataset = dataset.map(self._prepare_image,\n num_parallel_calls=num_parallel_calls)\n\n min_queue_examples = int(self._samples_per_epoch['train'] * 0.4)\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n dataset = dataset.shuffle(\n buffer_size=min_queue_examples + 3 * batch_size)\n\n # Batch it up.\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(1)\n\n return dataset\n\n def _get_validation_dataset(self, num_parallel_calls=8):\n # Repeat infinitely.\n dataset = tf.data.TFRecordDataset(self._validation_filename).repeat()\n\n # Parse records.\n dataset = dataset.map(\n self.parser, num_parallel_calls=num_parallel_calls)\n dataset = dataset.map(self._prepare_image,\n num_parallel_calls=num_parallel_calls)\n dataset = dataset.batch(batch_size=self._samples_per_epoch['validation'])\n\n return dataset\n\n def _augment_image(self, image, label):\n # Randomly crop a [height, width] section of the image.\n image = tf.random_crop(\n image, [self._output_dim[0], self._output_dim[1], 3])\n\n # Randomly flip the image horizontally.\n image = tf.image.random_flip_left_right(image)\n\n image = tf.image.random_brightness(image, max_delta=0.2)\n image = tf.image.random_contrast(image, lower=0.2, upper=1.8)\n\n # Subtract off the mean and divide by the variance of the pixels.\n image = tf.image.per_image_standardization(image)\n\n # Set the shapes of tensors.\n image.set_shape([self._output_dim[0], self._output_dim[1], 3])\n # label.set_shape([1])\n\n return image, label\n\n def _prepare_image(self, image, label):\n # Crop the central [height, width] section of the image.\n image = tf.image.resize_image_with_crop_or_pad(\n image, self._output_dim[0], self._output_dim[1])\n\n # Subtract off the mean and divide by the variance of the pixels.\n image = tf.image.per_image_standardization(image)\n\n # Set the shapes of tensors.\n image.set_shape([self._output_dim[0], self._output_dim[1], 3])\n # label.set_shape([1])\n\n return image, label\n\n def get_data(self, subset=['train', 'validation'], batch_size=1, num_parallel_calls=8, device='/cpu:0'):\n with tf.device(device):\n databases = {}\n if 'train' in subset:\n databases['train'] = self._get_training_dataset(\n batch_size, num_parallel_calls)\n\n if 'validation' in subset:\n databases['validation'] = self._get_validation_dataset(\n num_parallel_calls)\n\n assert databases != {}, 'no valid subset of Flowers database is provided.'\n\n # get the types and shapes of the database outputs\n db_types = next(iter(databases.items()))[1].output_types\n db_shapes = next(iter(databases.items()))[1].output_shapes\n\n # define the iterator and generate different initializers\n iterator = tf.data.Iterator.from_structure(db_types, db_shapes)\n image_batch, label_batch = iterator.get_next()\n\n # define different initializers\n initializer_op = {}\n for k in databases.keys():\n initializer_op[k] = iterator.make_initializer(databases[k])\n\n return image_batch, label_batch, initializer_op\n\n def get_number_samples(self, subset='train'):\n assert subset in self._samples_per_epoch.keys(\n ), 'no valid subset of dataset is provided!'\n return self._samples_per_epoch[subset]\n", "id": "10606067", "language": "Python", "matching_score": 7.562850475311279, "max_stars_count": 0, "path": "QuantizerTest/datasets/tfr/flowers_tfr.py" }, { "content": "\"\"\"\n CIFAR-10 data set.\n See http://www.cs.toronto.edu/~kriz/cifar.html.\n\"\"\"\n\nimport os\nimport tensorflow as tf\n\n# dimensions of original CIFAR10 images\nHEIGHT = 32\nWIDTH = 32\nDEPTH = 3\n\n\nclass Cifar10Dataset(object):\n def __init__(self, data_dir, image_size=(24, 24), augment_training=True):\n self.data_dir = data_dir\n self._augment_training = augment_training\n self._output_dim = image_size\n\n self._training_filename = os.path.join(self.data_dir, 'train.tfrecord')\n self._test_filename = os.path.join(self.data_dir, 'test.tfrecord')\n\n self._samples_per_epoch = {'train': 50000, 'test': 10000}\n\n def parser(self, serialized_example):\n \"\"\"Parses a single tf.Example into image and label tensors.\"\"\"\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/height': tf.FixedLenFeature([1], tf.int64),\n 'image/width': tf.FixedLenFeature([1], tf.int64),\n }\n )\n\n image = tf.image.decode_png(features['image/encoded'], dtype=tf.uint8)\n image = tf.cast(image, tf.float32) / 255\n label = tf.cast(features['image/class/label'], tf.int32)\n\n return image, label\n\n def _get_training_dataset(self, batch_size, num_parallel_calls=8):\n # Repeat infinitely.\n dataset = tf.data.TFRecordDataset(self._training_filename).repeat()\n\n # Parse records.\n dataset = dataset.map(self.parser, num_parallel_calls=num_parallel_calls)\n\n # augment dataset\n if self._augment_training:\n dataset = dataset.map(self._augment_training_image, num_parallel_calls=num_parallel_calls)\n else:\n dataset = dataset.map(self._prepare_test_image, num_parallel_calls=num_parallel_calls)\n\n min_queue_examples = int(self._samples_per_epoch['train'] * 0.4)\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * batch_size)\n\n # Batch it up.\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(1)\n\n return dataset\n\n def _get_test_dataset(self, num_parallel_calls=8):\n # Repeat infinitely.\n dataset = tf.data.TFRecordDataset(self._test_filename).repeat()\n\n # Parse records.\n dataset = dataset.map(self.parser, num_parallel_calls=num_parallel_calls)\n dataset = dataset.map(self._prepare_test_image, num_parallel_calls=num_parallel_calls)\n dataset = dataset.batch(batch_size=self._samples_per_epoch['test'])\n\n return dataset\n\n def _augment_training_image(self, image, label):\n # Randomly crop a [height, width] section of the image.\n if (self._output_dim[0] >= HEIGHT) or (self._output_dim[1] >= WIDTH):\n image = tf.image.resize_with_crop_or_pad(image, self._output_dim[0] + 8, self._output_dim[1] + 8)\n\n image = tf.random_crop(image, [self._output_dim[0], self._output_dim[1], 3])\n\n # Randomly flip the image horizontally.\n image = tf.image.random_flip_left_right(image)\n\n image = tf.image.random_brightness(image, max_delta=0.2)\n image = tf.image.random_contrast(image, lower=0.2, upper=1.8)\n\n # Subtract off the mean and divide by the variance of the pixels.\n image = tf.image.per_image_standardization(image)\n\n # Set the shapes of tensors.\n image.set_shape([self._output_dim[0], self._output_dim[1], 3])\n # label.set_shape([1])\n\n return image, label\n\n def _prepare_test_image(self, image, label):\n # Crop the central [height, width] section of the image.\n image = tf.image.resize_image_with_crop_or_pad(image, self._output_dim[0], self._output_dim[1])\n\n # Subtract off the mean and divide by the variance of the pixels.\n image = tf.image.per_image_standardization(image)\n\n # Set the shapes of tensors.\n image.set_shape([self._output_dim[0], self._output_dim[1], 3])\n # label.set_shape([1])\n\n return image, label\n\n def create_dataset(self, subset=['train', 'test'], batch_size=1, num_parallel_calls=8):\n\t\tdatabases = {}\n\t\tif 'train' in subset:\n\t\t\tdatabases['train'] = self._get_training_dataset(batch_size, num_parallel_calls)\n\n\t\tif 'test' in subset:\n\t\t\tdatabases['test'] = self._get_test_dataset(num_parallel_calls)\n\n\t\tassert databases != {}, 'no valid subset of CIFAR10 database is provided.'\n\n\t\t# get the types and shapes of the database outputs\n\t\tdb_types = next(iter(databases.items()))[1].output_types\n\t\tdb_shapes = next(iter(databases.items()))[1].output_shapes\n\n\t\t# define the iterator and generate different initializers\n\t\titerator = tf.data.Iterator.from_structure(db_types, db_shapes)\n\t\timage_batch, label_batch = iterator.get_next()\n\n\t\t# define different initializers\n\t\tinitializer_op = {}\n\t\tfor k in databases.keys():\n\t\t\tinitializer_op[k] = iterator.make_initializer(databases[k])\n\n return image_batch, label_batch, initializer_op\n\n def get_number_samples(self, subset='train'):\n assert subset in self._samples_per_epoch.keys(), 'no valid subset of dataset is provided!'\n return self._samples_per_epoch[subset]", "id": "3568431", "language": "Python", "matching_score": 1.9736047983169556, "max_stars_count": 0, "path": "QuantizerTest/datasets/tfr/cifar10_tfr.py" }, { "content": "\"\"\"\n implementation of the MNIST/Fashion MNIST database\n\"\"\"\nimport os\nimport numpy as np\nimport gzip\nimport urllib.request\n\nimport tensorflow as tf\nfrom tensorflow.python.platform import gfile # pylint: disable=E0611\n\nDEFAULT_SOURCE_URL = 'https://storage.googleapis.com/cvdf-datasets/mnist/'\n# For Fashion MNIST, use the following link: 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'\n\n\ndef _check_download_file(filename, dir_name, source_url):\n \"\"\"Download the data from source url, unless it's already here.\n\n Args:\n filename: string, name of the file in the directory.\n dir_name: string, path to working directory.\n source_url: url to download from if file doesn't exist.\n\n Returns:\n Path to resulting file.\n \"\"\"\n if not gfile.Exists(dir_name):\n gfile.MakeDirs(dir_name)\n filepath = os.path.join(dir_name, filename)\n\n if not gfile.Exists(filepath):\n urllib.request.urlretrieve(source_url, filepath)\n\n with gfile.GFile(filepath) as f:\n size = f.size()\n print('Successfully downloaded', filename, size, 'bytes.')\n\n return filepath\n\n\ndef _read32(bytestream):\n dt = np.dtype(np.uint32).newbyteorder('>')\n return np.frombuffer(bytestream.read(4), dtype=dt)[0]\n\n\ndef _dense_to_one_hot(labels_dense, num_classes):\n \"\"\"Convert class labels from scalars to one-hot vectors.\"\"\"\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n\n return labels_one_hot\n\n\ndef _extract_images(f):\n \"\"\"Extract the images into a 4D uint8 np array [index, y, x, depth].\n\n Args:\n f: A file object that can be passed into a gzip reader.\n\n Returns:\n data: A 4D uint8 np array [index, y, x, depth].\n\n Raises:\n ValueError: If the bytestream does not start with 2051.\n\n \"\"\"\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' % (magic, f.name))\n\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n images = np.frombuffer(buf, dtype=np.uint8)\n images = images.reshape(num_images, rows, cols, 1)\n\n images = images.astype(np.float32) / 255\n return images\n\n\ndef _extract_labels(f):\n \"\"\"Extract the labels into a 1D uint8 numpy array [index].\n\n Args:\n f: A file object that can be passed into a gzip reader.\n one_hot: Does one hot encoding for the result.\n num_classes: Number of classes for the one hot encoding.\n\n Returns:\n labels: a 1D uint8 numpy array.\n\n Raises:\n ValueError: If the bystream doesn't start with 2049.\n \"\"\"\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n assert magic == 2049, 'Invalid magic number %d in MNIST label file: %s' % (magic, f.name)\n\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n\n return labels.astype(np.int32)\n\n\ndef _read_data_sets(db_dir, source_url):\n\n TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'\n TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'\n TEST_IMAGES = 't10k-images-idx3-ubyte.gz'\n TEST_LABELS = 't10k-labels-idx1-ubyte.gz'\n\n local_file = _check_download_file(TRAIN_IMAGES, db_dir, source_url + TRAIN_IMAGES)\n with gfile.Open(local_file, 'rb') as f:\n train_images = _extract_images(f)\n\n local_file = _check_download_file(TRAIN_LABELS, db_dir, source_url + TRAIN_LABELS)\n with gfile.Open(local_file, 'rb') as f:\n train_labels = _extract_labels(f)\n\n local_file = _check_download_file(TEST_IMAGES, db_dir, source_url + TEST_IMAGES)\n with gfile.Open(local_file, 'rb') as f:\n test_images = _extract_images(f)\n\n local_file = _check_download_file(TEST_LABELS, db_dir, source_url + TEST_LABELS)\n with gfile.Open(local_file, 'rb') as f:\n test_labels = _extract_labels(f)\n\n return train_images, train_labels, test_images, test_labels\n\n\ndef create_dataset(\n db_dir,\n batch_size,\n vector_fmt=False,\n one_hot=True,\n validation_size=5000,\n source_url=DEFAULT_SOURCE_URL,\n dataset_fmt=False\n):\n \"\"\"\n main function to create the mnist data set\n Args:\n db_dir: string, disrectory of the files of the mnist database.\n batch_size: integer or placeholder, training batch size.\n vector_fmt: the datapoints are in the vectorized (-1, 784) or image (-1, 28, 28, 1) format.\n one_hot: boolean, labels are one-hot represented or not.\n validation_size: integer, number of samples in the validation set.\n source_url: url to download from if file doesn't exist.\n\n Returns:\n images, labels\n the initializer operators for the datasets\n number of samples in each subset of the database\n \"\"\"\n # read data from files\n train_images, train_labels, test_images, test_labels = _read_data_sets(db_dir, source_url)\n\n if one_hot:\n train_labels = _dense_to_one_hot(train_labels, num_classes=10)\n test_labels = _dense_to_one_hot(test_labels, num_classes=10)\n\n if vector_fmt:\n train_images = np.reshape(train_images, newshape=(-1, 784))\n test_images = np.reshape(test_images, newshape=(-1, 784))\n\n # separate the validation data\n if not 0 <= validation_size <= len(train_images):\n raise ValueError(\n 'Validation size should be between 0 and {}. Received: {}.'.format(len(train_images), validation_size)\n )\n\n validation_images = train_images[:validation_size]\n validation_labels = train_labels[:validation_size]\n train_images = train_images[validation_size:]\n train_labels = train_labels[validation_size:]\n\n number_samples = {'train': len(train_labels), 'validation': len(validation_labels), 'test': len(test_labels)}\n\n # create training dataset\n train_db = tf.data.Dataset.from_tensor_slices((train_images, train_labels))\n train_db = train_db.shuffle(number_samples['train']).repeat()\n train_db = train_db.batch(batch_size)\n # prefetch data\n train_db = train_db.prefetch(1)\n\n # create validation dataset\n valid_db = tf.data.Dataset.from_tensor_slices((validation_images, validation_labels))\n valid_db = valid_db.batch(number_samples['validation'])\n\n # create test dataset\n test_db = tf.data.Dataset.from_tensor_slices((test_images, test_labels))\n test_db = test_db.batch(number_samples['test'])\n\n if dataset_fmt:\n return train_db, valid_db, test_db, number_samples\n\n # define the iterator and different initializers\n iterator = tf.data.Iterator.from_structure(train_db.output_types, train_db.output_shapes)\n\n images, labels = iterator.get_next()\n\n train_init_op = iterator.make_initializer(train_db)\n valid_init_op = iterator.make_initializer(valid_db)\n test_init_op = iterator.make_initializer(test_db)\n\n init_op = {'train': train_init_op, 'validation': valid_init_op, 'test': test_init_op}\n\n return images, labels, init_op, number_samples\n\n\nclass MNISTDataset:\n def __init__(self):\n self._train_images = None\n self._train_labels = None\n self._validation_images = None\n self._validation_labels = None\n self._test_images = None\n self._test_labels = None\n self._number_samples = {'train': 0, 'validation': 0, 'test': 0}\n\n self._seed = None\n self._index_pos = 0\n self._shuffled_index = [0]\n\n @property\n def number_samples(self):\n return self._number_samples\n\n @property\n def train_data(self):\n return self._train_images, self._train_labels\n\n @property\n def validation(self):\n return self._validation_images, self._validation_labels\n\n @property\n def test_data(self):\n return self._test_images, self._test_labels\n\n def _prepare_samples(self, db_dir, vector_fmt, one_hot, validation_size):\n # read data from files\n self._train_images, self._train_labels, self._test_images, self._test_labels = _read_data_sets(\n db_dir, source_url=DEFAULT_SOURCE_URL\n )\n\n if one_hot:\n self._train_labels = _dense_to_one_hot(self._train_labels, num_classes=10)\n self._test_labels = _dense_to_one_hot(self._test_labels, num_classes=10)\n\n if vector_fmt:\n self._train_images = np.reshape(self._train_images, newshape=(-1, 784))\n self._test_images = np.reshape(self._test_images, newshape=(-1, 784))\n\n # separate the validation data\n if not 0 <= validation_size <= len(self._train_images):\n raise ValueError(\n 'Validation size should be between 0 and {}. Received: {}.'.format(\n len(self._train_images), validation_size\n )\n )\n\n self._validation_images = self._train_images[:validation_size]\n self._validation_labels = self._train_labels[:validation_size]\n self._train_images = self._train_images[validation_size:]\n self._train_labels = self._train_labels[validation_size:]\n\n self._number_samples = {\n 'train': len(self._train_labels),\n 'validation': len(self._validation_labels),\n 'test': len(self._test_labels)\n }\n\n def _reset_shuffled_index(self):\n np.random.seed(self._seed)\n\n self._shuffled_index = np.arange(0, self._number_samples['train'])\n np.random.shuffle(self._shuffled_index)\n self._index_pos = 0\n # update seed for reproducibility and avoiding conflicts with other rand calls\n self._seed = np.random.randint(1000, 1000000)\n\n def create_dataset(self, db_dir, vector_fmt=False, one_hot=True, validation_size=5000, seed=None):\n self._seed = np.random.randint(1000, 1000000) if (seed is None) else seed\n\n # read database samples from file or download them if necessary\n self._prepare_samples(db_dir, vector_fmt, one_hot, validation_size)\n self._reset_shuffled_index()\n\n def next_batch(self, batch_size):\n if (self._index_pos + batch_size) >= self._number_samples['train']:\n self._reset_shuffled_index()\n\n index = self._shuffled_index[self._index_pos:(self._index_pos + batch_size)]\n self._index_pos += batch_size\n\n return self._train_images[index], self._train_labels[index]\n", "id": "11932885", "language": "Python", "matching_score": 5.015338897705078, "max_stars_count": 0, "path": "QuantizerTest/datasets/mnist_dataset.py" }, { "content": "\"\"\"\n CIFAR-10 data set.\n See http://www.cs.toronto.edu/~kriz/cifar.html.\n\"\"\"\n\nimport os\nimport shutil\nimport urllib.request\nimport tarfile\nimport pickle\n\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.python.platform import gfile\nimport numpy as np\n\nCIFAR_FILENAME = 'cifar-10-python.tar.gz'\nCIFAR_DOWNLOAD_URL = 'https://www.cs.toronto.edu/~kriz/' + CIFAR_FILENAME\nCIFAR_LOCAL_FOLDER = 'cifar-10-batches-py'\n\n# dimensions of original CIFAR10 images\nHEIGHT = 32\nWIDTH = 32\nDEPTH = 3\n\n\nclass Cifar10Dataset:\n def __init__(self, db_settings: dict = {}):\n self._num_classes = 10\n self._label_names = None\n self._source_url = db_settings.get('source-url', CIFAR_DOWNLOAD_URL)\n self._data_dir = db_settings.get('database-dir', '')\n self._one_hot = db_settings.get('one-hot', False)\n self._output_dim = db_settings.get('output-dimension', (24, 24))\n self._augment_training = db_settings.get('augment-training', True)\n\n self._train_images, self._train_labels = None, None\n self._test_images, self._test_labels = None, None\n self._num_samples = {'train': 0, 'test': 0}\n\n def _download_and_extract(self):\n # download CIFAR-10 and unzip it if not already downloaded.\n if not gfile.Exists(self._data_dir):\n gfile.MakeDirs(self._data_dir)\n\n filepath = os.path.join(self._data_dir, CIFAR_FILENAME)\n\n if not gfile.Exists(filepath):\n urllib.request.urlretrieve(self._source_url, filepath)\n\n with gfile.GFile(filepath) as f:\n size = f.size()\n print('Successfully downloaded', CIFAR_FILENAME, size, 'bytes.')\n\n tarfile.open(filepath, 'r:gz').extractall(self._data_dir)\n\n source = os.path.join(self._data_dir, CIFAR_LOCAL_FOLDER)\n files = os.listdir(source)\n for f in files:\n shutil.move(os.path.join(source, f), self._data_dir)\n\n os.rmdir(source)\n\n def _read_data_from_files(self, filenames):\n images = None\n labels = None\n for filename in filenames:\n with tf.gfile.Open(filename, 'rb') as f:\n data_dict = pickle.load(f, encoding='bytes')\n\n img = data_dict[b'data']\n img = np.reshape(img, (-1, 3, 32, 32))\n img = np.transpose(img, axes=(0, 2, 3, 1))\n lbl = np.array(data_dict[b'labels'])\n\n if images is None:\n images = img\n labels = lbl\n else:\n images = np.concatenate((images, img), axis=0)\n labels = np.concatenate((labels, lbl), axis=0)\n\n if self._one_hot:\n # convert labels to one-hot vectors\n num_labels = labels.shape[0]\n index_offset = np.arange(num_labels) * self._num_classes\n tmp = np.zeros((num_labels, self._num_classes))\n tmp.flat[index_offset + labels.ravel()] = 1\n labels = tmp\n\n return images, labels\n\n def _read_label_names(self):\n filename = os.path.join(self._data_dir, 'batches.meta')\n with tf.gfile.GFile(filename, 'rb') as f:\n data_dict = pickle.load(f, encoding='bytes')\n\n self._label_names = data_dict[b'label_names']\n\n def _read_data_sets(self, subset):\n if subset == 'train':\n filenames = [os.path.join(self._data_dir, 'data_batch_%d' % i) for i in range(1, 6)]\n elif subset == 'test':\n filenames = [os.path.join(self._data_dir, 'test_batch')]\n else:\n raise ValueError('not a valid subset is provided.')\n\n files_exist = np.all([os.path.exists(fname) for fname in filenames])\n if not files_exist:\n self._download_and_extract()\n\n images, labels = self._read_data_from_files(filenames)\n\n return images, labels\n\n def _load_from_file(self, subset=('train', 'test')):\n self._read_label_names()\n\n if 'train' in subset:\n self._train_images, self._train_labels = self._read_data_sets('train')\n self._num_samples['train'] = self._train_labels.shape[0]\n\n if 'test' in subset:\n self._test_images, self._test_labels = self._read_data_sets('test')\n self._num_samples['test'] = self._test_labels.shape[0]\n\n def _augment_training_image(self, image, label):\n # Randomly crop a [height, width] section of the image.\n if (self._output_dim[0] >= HEIGHT) or (self._output_dim[1] >= WIDTH):\n image = tf.image.resize_with_crop_or_pad(image, self._output_dim[0] + 8, self._output_dim[1] + 8)\n\n image = tf.image.random_crop(image, [self._output_dim[0], self._output_dim[1], 3])\n\n # Randomly flip the image horizontally.\n image = tf.image.random_flip_left_right(image)\n\n image = tf.image.random_brightness(image, max_delta=0.2)\n image = tf.image.random_contrast(image, lower=0.2, upper=1.8)\n\n # Subtract off the mean and divide by the variance of the pixels.\n image = tf.image.per_image_standardization(image)\n\n # Set the shapes of tensors.\n image.set_shape([self._output_dim[0], self._output_dim[1], 3])\n\n return image, label\n\n def _prepare_test_image(self, image, label):\n # Crop the central [height, width] section of the image.\n image = tf.image.resize_with_crop_or_pad(image, self._output_dim[0], self._output_dim[1])\n\n # Subtract off the mean and divide by the variance of the pixels.\n image = tf.image.per_image_standardization(image)\n\n # Set the shapes of tensors.\n image.set_shape([self._output_dim[0], self._output_dim[1], 3])\n\n return image, label\n\n def _create_training_dataset(self, batch_size, num_parallel_calls):\n assert self._train_images is not None, 'training images has not been loaded from file.'\n\n train_db = tf.data.Dataset.from_tensor_slices((self._train_images, self._train_labels))\n train_db = train_db.shuffle(self._num_samples['train']).repeat()\n\n if self._augment_training:\n train_db = train_db.map(self._augment_training_image, num_parallel_calls=num_parallel_calls)\n else:\n train_db = train_db.map(self._prepare_test_image, num_parallel_calls=num_parallel_calls)\n\n train_db = train_db.batch(batch_size, drop_remainder=True)\n # prefetch data\n train_db = train_db.prefetch(1)\n\n return train_db\n\n def _create_test_dataset(self, num_parallel_calls):\n assert self._test_images is not None, 'test images has not been loaded from file.'\n\n test_db = tf.data.Dataset.from_tensor_slices((self._test_images, self._test_labels))\n test_db = test_db.repeat()\n\n test_db = test_db.map(self._prepare_test_image, num_parallel_calls=num_parallel_calls)\n test_db = test_db.batch(batch_size=self._num_samples['test'])\n\n return test_db\n\n def create_dataset(self, subset={'train', 'test'}, batch_size=1, num_parallel_calls=16):\n databases = {}\n self._load_from_file(subset)\n\n if 'train' in subset:\n databases['train'] = self._create_training_dataset(batch_size, num_parallel_calls)\n\n if 'test' in subset:\n databases['test'] = self._create_test_dataset(num_parallel_calls)\n\n assert databases != {}, 'no valid subset of CIFAR10 database is provided.'\n\n # get the types and shapes of the database outputs\n db_types = next(iter(databases.items()))[1].output_types\n db_shapes = next(iter(databases.items()))[1].output_shapes\n\n # define the iterator and generate different initializers\n iterator = tf.data.Iterator.from_structure(db_types, db_shapes)\n \n initializer_op = {}\n for k in databases.keys():\n # define different initializers\n initializer_op[k] = iterator.make_initializer(databases[k])\n \n image_batch, label_batch = iterator.get_next(name='input-data')\n\n return image_batch, label_batch, initializer_op\n\n def get_number_samples(self, subset='train'):\n assert subset in ['train', 'test'], 'no valid subset of dataset is provided!'\n return self._num_samples[subset]\n\n def get_label_texts(self):\n return self._label_names\n", "id": "4611227", "language": "Python", "matching_score": 3.967400312423706, "max_stars_count": 0, "path": "QuantizerTest/datasets/cifar10_dataset.py" }, { "content": "import numpy as np\nimport pickle\n\ndefault_folder = 'D:/Database/CIFAR10/'\n\n\nclass Cifar10DataSet(object):\n def __init__(self, database_folder=None):\n self._database_folder = database_folder\n\n if self._database_folder is None:\n self._database_folder = default_folder\n\n self._index_pos = 0\n self._shuffled_index = [0]\n self._training_number = 0\n self._train_x = None\n self._train_label = None\n\n self._test_number = 0\n self._test_x = None\n self._test_label = None\n\n f = open(self._database_folder + 'batches.meta', 'rb')\n d = pickle.load(f)\n f.close()\n self._class_names = d['label_names']\n\n @property\n def class_names(self):\n return self._class_names\n\n @property\n def training_number(self):\n return self._training_number\n\n def initialize_training_data(self):\n self._training_number = 0\n self._train_x = None\n self._train_label = None\n for i in range(5):\n f = open(self._database_folder + '/data_batch_' + str(i + 1), 'rb')\n d = pickle.load(f, encoding='latin1')\n f.close()\n\n _im = d['data'].reshape([-1, 3072])\n _im = _im.astype(float) / 255\n _r = _im[:, :1024].reshape([-1, 32, 32])\n _g = _im[:, 1024:2048].reshape([-1, 32, 32])\n _b = _im[:, 2048:].reshape([-1, 32, 32])\n im = np.stack((_r, _g, _b), axis=3)\n\n if self._train_x is None:\n self._train_x = im\n self._train_label = d['labels']\n else:\n self._train_x = np.concatenate((self._train_x, im), axis=0)\n self._train_label = np.concatenate((self._train_label, d['labels']), axis=0)\n\n self._training_number = self._train_x.shape[0]\n self._shuffled_index = np.arange(0, self._training_number)\n np.random.shuffle(self._shuffled_index)\n return self._training_number\n\n def initialize_test_data(self):\n f = open(self._database_folder + '/test_batch', 'rb')\n d = pickle.load(f, encoding='latin1')\n f.close()\n\n _im = d['data'].reshape([-1, 3072])\n _im = _im.astype(float) / 255\n _r = _im[:, :1024].reshape([-1, 32, 32])\n _g = _im[:, 1024:2048].reshape([-1, 32, 32])\n _b = _im[:, 2048:].reshape([-1, 32, 32])\n\n self._test_x = np.stack((_r, _g, _b), axis=3)\n self._test_label = [int(l) for l in d['labels']]\n self._test_number = self._test_x.shape[0]\n\n return self._test_number\n\n def get_training_batch(self, batch=100, one_hot=True):\n if type(batch) is int:\n index = self._shuffled_index[self._index_pos:(self._index_pos + batch)]\n self._index_pos += batch\n if self._index_pos >= self._training_number:\n # update the shuffled index\n self._index_pos = 0\n self._shuffled_index = np.arange(0, self._training_number)\n np.random.shuffle(self._shuffled_index)\n elif (type(batch) is list) or (type(batch) is np.ndarray):\n index = batch\n else:\n raise ValueError('Invalid input for batch.')\n\n num_samples = len(index)\n train_images = self._train_x[index, :, :, :]\n if one_hot:\n train_labels = np.zeros((num_samples, 10))\n train_labels[np.arange(0, num_samples), self._train_label[index]] = 1\n else:\n train_labels = self._train_label[index]\n\n return train_images, train_labels\n\n def get_test_data(self, one_hot=True):\n if one_hot:\n test_labels = np.zeros((self._test_number, 10))\n test_labels[np.arange(0, self._test_number), self._test_label] = 1\n else:\n test_labels = self._test_label\n\n return self._test_x, test_labels\n\n\nclass CIFAR10AugmentedDatabase(Cifar10DataSet):\n def __init__(self, database_folder=None, image_width=24, image_height=24):\n super(CIFAR10AugmentedDatabase, self).__init__(database_folder)\n self._image_width = image_width\n self._image_height = image_height\n self._augmented_x = None\n\n def initialize_training_data(self):\n super(CIFAR10AugmentedDatabase, self).initialize_training_data()\n self.reset_augmented_data()\n\n def reset_augmented_data(self):\n self._augmented_x = np.zeros(\n shape=(self._training_number, self._image_height, self._image_width, self._train_x.shape[3]),\n dtype=np.float32)\n\n for idx in range(self._training_number):\n # random crop\n crop_u = np.random.randint(0, 32 - self._image_height)\n crop_b = crop_u + self._image_height\n crop_l = np.random.randint(0, 32 - self._image_width)\n crop_r = crop_l + self._image_width\n # random flip\n if np.random.randint(0, 2) == 0:\n self._augmented_x[idx, :, :, :] = self._train_x[idx, crop_u:crop_b, crop_l:crop_r, :]\n else:\n self._augmented_x[idx, :, :, :] = self._train_x[idx, crop_u:crop_b,\n np.arange(crop_r - 1, crop_l - 1, -1), :]\n\n # image standardization\n mean_img = np.mean(self._augmented_x, axis=(1, 2))\n thr = 1.0 / np.sqrt(self._image_height * self._image_width)\n std_img = np.maximum(thr, np.std(self._augmented_x, axis=(1, 2)))\n self._augmented_x = (self._augmented_x - mean_img[:, np.newaxis, np.newaxis, :]) / std_img[:, np.newaxis,\n np.newaxis, :]\n # shuffle\n self._index_pos = 0\n self._shuffled_index = np.arange(0, self._training_number)\n np.random.shuffle(self._shuffled_index)\n\n def get_training_batch(self, batch=100, one_hot=True, noise=0.0):\n if type(batch) is int:\n if self._index_pos >= (self._training_number - batch):\n self.reset_augmented_data() # reset the augmented data and index\n\t\t\n index = self._shuffled_index[self._index_pos:(self._index_pos + batch)]\n self._index_pos += batch\n elif (type(batch) is list) or (type(batch) is np.ndarray):\n index = batch\n else:\n raise ValueError('Invalid input for batch.')\n\n num_samples = len(index)\n if one_hot:\n train_labels = np.zeros((num_samples, 10))\n train_labels[np.arange(0, num_samples), self._train_label[index]] = 1\n else:\n train_labels = self._train_label[index]\n\n train_images = self._augmented_x[index, :, :, :]\n # add noise\n train_images += np.random.normal(0, 1.0, train_images.shape) * noise\n\n return train_images, train_labels\n\n def get_test_data(self, one_hot=True):\n if one_hot:\n test_labels = np.zeros((self._test_number, 10))\n test_labels[np.arange(0, self._test_number), self._test_label] = 1\n else:\n test_labels = self._test_label\n\n # crop the center of the image to make the images of the desired size\n margin_u = (self._test_x.shape[1] - self._image_height) // 2\n margin_d = margin_u + self._image_height\n margin_l = (self._test_x.shape[2] - self._image_width) // 2\n margin_r = margin_l + self._image_width\n test_images = self._test_x[:, margin_u:margin_d, margin_l:margin_r, :]\n\n # image standardization\n mean_img = np.mean(test_images, axis=(1, 2))\n thr = 1.0 / np.sqrt(self._image_height * self._image_width)\n std_img = np.maximum(thr, np.std(test_images, axis=(1, 2)))\n test_images = (test_images - mean_img[:, np.newaxis, np.newaxis, :]) / std_img[:, np.newaxis, np.newaxis, :]\n\n return test_images, test_labels\n", "id": "8665555", "language": "Python", "matching_score": 0.5051229596138, "max_stars_count": 0, "path": "QuantizerTest/datasets/cifar10_dataset_np.py" }, { "content": "\"\"\"\n Implementation of the paper 'ATOMO: Communication-efficient Learning via Atomic Sparsification'\n This is mainly based on the code available at https://github.com/hwang595/ATOMO\n Since the basic (transform domain) was not available, I implemented Alg. 1.\n\"\"\"\n\nimport numpy as np\nimport scipy.linalg as sla\n\n\nclass atomo_quantizer:\n def __init__(self, rank, spectral_method=True, T=None):\n self._spectral = spectral_method\n self._rank = rank\n self._T = T\n\n def quantize(self, X, reconstructed=True):\n if self._spectral:\n return self._spectral_atomo(X, reconstructed)\n else:\n return self._transform_atomo(X, reconstructed)\n\n def _spectral_atomo(self, X, reconstructed):\n orig_shape = X.shape\n if X.ndim != 2:\n X = _resize_to_2d(X)\n\n u, s, vT = sla.svd(X, full_matrices=False)\n\n i, probs = _sample_svd(s, self._rank)\n u = u[:, i]\n s = s[i] / probs\n vT = vT[i, :]\n\n if reconstructed:\n xh = np.dot(np.dot(u, np.diag(s)), vT)\n Xh = np.reshape(xh, newshape=orig_shape)\n return Xh\n else:\n return u, s, vT\n\n def _transform_atomo(self, X, reconstructed):\n \"\"\"\n Original ATOMO formulation\n It assumes that transform matrix is orthonormal.\n \"\"\"\n\n x = np.reshape(X, -1)\n coeffs = np.matmul(self._T.T, x)\n abs_c = np.abs(coeffs)\n sort_idx = np.argsort(abs_c)[::-1]\n i, probs = _atomo_probabilities(abs_c[sort_idx], self._rank)\n i = sort_idx[i]\n coeffs = coeffs[i] / probs\n\n if reconstructed:\n xh = np.matmul(self._T[:, i], coeffs)\n Xh = np.reshape(xh, newshape=X.shape)\n return Xh\n else:\n return i, coeffs, probs\n\n\ndef _resize_to_2d(x):\n \"\"\"\n x.shape > 2\n If x.shape = (a, b, *c), assumed that each one of (a, b) pairs has relevant information in c.\n \"\"\"\n shape = x.shape\n if x.ndim == 1:\n n = x.shape[0]\n return x.reshape((n // 2, 2))\n\n if all([s == 1 for s in shape[2:]]):\n return x.reshape((shape[0], shape[1]))\n\n # each of (a, b) has related features\n x = x.reshape((shape[0], shape[1], -1))\n # stack those related features into a tall matrix\n x_tmp = x.reshape((shape[0] * shape[1], -1))\n tmp_shape = x_tmp.shape\n return x_tmp.reshape((int(tmp_shape[0] / 2), int(tmp_shape[1] * 2)))\n\n\ndef _sample_svd(s, rank=0):\n if s[0] < 1e-6:\n return [0], np.array([1.0])\n probs = s / s[0] if rank == 0 else rank * s / s.sum()\n for i, p in enumerate(probs):\n if p > 1:\n probs[i] = 1\n sampled_idx = []\n sample_probs = []\n for i, p in enumerate(probs):\n #if np.random.rand() < p:\n # random sampling from bernulli distribution\n if np.random.binomial(1, p):\n sampled_idx += [i]\n sample_probs += [p]\n rank_hat = len(sampled_idx)\n if rank_hat == 0: # or (rank != 0 and np.abs(rank_hat - rank) >= 3):\n return _sample_svd(s, rank=rank)\n return np.array(sampled_idx, dtype=int), np.array(sample_probs)\n\n\ndef _atomo_probabilities(coeffs, s):\n \"\"\"\n Implementation of Alg. 1 in the paper.\n It is assumed that coeffs is a 1D array of sorted absolute values of the atomic representations.\n Parameters:\n coeffs (numpy 1d array) : input sort(|C|)\n s (float) : sparsity budget\n \"\"\"\n\n n = len(coeffs)\n scale = np.sum(coeffs) + 1e-12\n probs = np.zeros(n)\n for i in range(n):\n # scale is np.sum(coeffs[i:])\n p = coeffs[i] * s / scale\n if p <= 1:\n probs[i:] = s * coeffs[i:] / scale\n break\n else:\n probs[i] = 1\n s -= 1\n\n # update the scale for the next iteration\n scale = scale - coeffs[i]\n\n sampled_idx = []\n sample_probs = []\n for i, p in enumerate(probs):\n if np.random.binomial(1, p):\n sampled_idx += [i]\n sample_probs += [p]\n\n return np.array(sampled_idx, dtype=int), np.array(sample_probs)\n", "id": "1870441", "language": "Python", "matching_score": 0.28133219480514526, "max_stars_count": 0, "path": "CompressionError/models/quantizers/atomo.py" }, { "content": "import os\nimport numpy as np\nimport scipy.io as sio\n\nDEFAULT_FOLDER = 'DataBase/HadamardMatrix/'\n\ndef load_hadamard_matrix(n, folder_name=DEFAULT_FOLDER):\n \"\"\"\n Loads or creates Hadamard matrix with given input dimention n \n and normalizes it such that H'H=I\n \"\"\"\n \n fname = os.path.join(folder_name, 'H%d.mat' % n)\n if not os.path.exists(fname):\n if n == 1:\n H = np.array([1], dtype=np.float32)\n elif n % 2 != 0:\n H = None\n else:\n H = load_hadamard_matrix(n//2, folder_name)\n if H is not None:\n H = np.kron(np.array([[1, 1], [1, -1]]), H) / np.sqrt(2)\n else:\n data = sio.loadmat(fname)\n H = data['H'].astype(np.float32) / np.sqrt(n)\n \n return H\n", "id": "12819310", "language": "Python", "matching_score": 0.09813075512647629, "max_stars_count": 0, "path": "SimpleMSE/np/hadamard.py" }, { "content": "# import source_coding.arithmetic_codec as codec\nimport numpy as np\n\n\n# =============================================================================\n# compute entropy for approximate bit-rate calculations\ndef compute_entropy(x, alphabet_size=None):\n if alphabet_size is None:\n min_a = np.min(x)\n max_a = np.max(x)\n alphabet_size = max_a - min_a + 1\n else:\n min_a = 0\n\n if alphabet_size <= 0:\n return 0.\n \n p = np.zeros(alphabet_size)\n for n in range(alphabet_size):\n p[n] = np.count_nonzero(x == (min_a + n))\n\n h = -np.dot(p, np.log2((p + np.finfo(float).eps) / x.size))\n return h\n\n\n# def adaptive_ac_bit_rate(x, alphabet_size):\n# code = codec.adaptive_encoder(input=np.reshape(x, -1).astype(np.uint8), alphabet_size=alphabet_size)\n# return code.size * 8\n", "id": "563876", "language": "Python", "matching_score": 1.1966406106948853, "max_stars_count": 0, "path": "CompressionError/models/quantizers/compression.py" }, { "content": "\"\"\"\n '1-Bit Stochastic Gradient Descent and its Application to Data-Parallel Distributed Training of Speech DNNs',\n Seide, et. al. 2014\n\"\"\"\n\nimport numpy as np\nimport itertools\n\ndef _onebit_sign_quantizer(w):\n q = np.zeros(w.shape, dtype=np.int)\n q[w > 0] = 1\n\n centers = np.zeros(2)\n\n # find the centers\n sum_q1 = np.count_nonzero(q)\n sum_q0 = q.size - np.count_nonzero(q)\n centers[1] = np.sum(w * q) / (sum_q1 + np.finfo(float).eps)\n centers[0] = np.sum(w * (1 - q)) / (sum_q0 + np.finfo(float).eps)\n\n return q, centers\n\nclass onebit_quantizer:\n def __init__(self):\n self._residue = 0\n\n def quantize(self, X, reconstructed=True):\n \"\"\"\n Quantizing the given matrix with only 1 bit. The threshold is fixed to zero and the reconstruction values are computed\n to minimize the MSE. The quantization error is returned, too.\n :param W: input data (vector or ndarray) to quantize\n :return: quantized values, centers, quantization error\n \"\"\"\n Y = X + self._residue\n if Y.ndim == 1:\n Q, centers = _onebit_sign_quantizer(Y)\n Xh = centers[Q]\n\n else:\n Q = np.zeros(Y.shape, dtype=np.int)\n centers = np.zeros((Y.shape[0], 2), dtype=np.float32)\n Xh = np.zeros_like(Y)\n # if W is an nd array, process each column separately\n for n, w in enumerate(Y):\n q, center = _onebit_sign_quantizer(w)\n Q[n, :] = q\n centers[n, :] = center\n Xh[n, :] = center[q]\n\n self._residue = Y - Xh\n \n if reconstructed:\n return Xh\n else:\n return Q, centers\n\n def dequantize(self, Q, centers):\n \"\"\"\n Dequanitze from the given 1-bit quantization and the reconstruction values.\n :param Q: input quantized values\n :param centers: centers of the quantization bins (reconstruction points)\n :return: reconstructed values\n \"\"\"\n\n if Q.ndim == 1:\n X = centers[Q]\n else:\n X = np.zeros(Q.shape)\n for n, q, c in zip(itertools.count(), Q, centers):\n X[n] = c[q]\n\n return X\n\n def reset(self):\n self._residue = 0", "id": "12391572", "language": "Python", "matching_score": 2.91394305229187, "max_stars_count": 0, "path": "CompressionError/models/quantizers/onebit_quantizer.py" }, { "content": "\"\"\"\n '1-Bit Stochastic Gradient Descent and its Application to Data-Parallel Distributed Training of Speech DNNs',\n Seide, et. al. 2014\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef quantize(W):\n \"\"\"\n Quantizing the given matrix with only 1 bit. The threshold is fixed to zero and the reconstruction values are computed\n to minimize the MSE.\n Parameters:\n W : input data (vector or ndarray) to quantize\n \"\"\"\n\n W_shape = W.get_shape().as_list()\n W_size = np.prod(W_shape) + 2e-10\n\n # variable to store the residual signal\n residue = tf.Variable(tf.zeros(shape=W_shape), dtype=tf.float32, trainable=False)\n\n W = W + residue\n Qp = tf.cast(W > 0, tf.float32)\n Qn = 1 - Qp\n num_p = tf.reduce_sum(Qp, axis=0, keepdims=True) + 1e-10\n\n W_positive = tf.multiply(W, Qp)\n W_negative = tf.multiply(W, Qn)\n centers_positive = tf.reduce_sum(W_positive, axis=0, keepdims=True) / num_p\n centers_negative = tf.reduce_sum(W_negative, axis=0, keepdims=True) / (W_size - num_p)\n\n # compute the quantization error\n Wh = centers_positive * Qp + centers_negative * Qn\n new_residue = W - Wh\n with tf.control_dependencies([Wh, new_residue]):\n update_residue = residue.assign(new_residue)\n\n return Qp, centers_positive, centers_negative, Wh, update_residue, residue\n\n\ndef dequantize(Q, cp, cn):\n \"\"\"\n Dequanitze from the given 1-bit quantization and the reconstruction values.\n Parameters:\n Q : input quantized values (+/- 1)\n cp: center of the quantization bins for positive values\n cn: center of the quantization bins for negative values\n \"\"\"\n\n Qn = 1 - Q\n Wh = cp * Q + cn * Qn\n\n return Wh\n", "id": "5755803", "language": "Python", "matching_score": 2.2590034008026123, "max_stars_count": 0, "path": "QuantizerTest/models/quantizers/tf_implementation/onebit_quantizer.py" }, { "content": "\"\"\"\n Implementation of CS-based quantizer with error feedback\n Note that the seed of the graph must be set as well (tf.set_random_seed(.))\n \n Because of limitatiosn of the shuffle oeprator\n 1- transpsoe of T is applied to the buckets \n 2- all buckets use the same shuffled T\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\n_rademacher_seed = 63509\n_shuffle_seed = 18574\n\n\ndef quantize(W, T, num_levels, seed, use_Rademacher=True, shuffle=False, error_feedback=True, beta=1.0):\n if num_levels == 0:\n # for sign-based quantization\n sign_quantizer = True\n num_levels = 0.5\n else:\n sign_quantizer = False\n\n bucket_size = T.shape[0]\n W_shape = W.get_shape().as_list()\n r_shape = [np.prod(W_shape) // bucket_size, T.shape[0]]\n u_shape = [np.prod(W_shape) // bucket_size, T.shape[1]]\n\n if error_feedback:\n # variable to store the residual signal\n residue = tf.Variable(tf.zeros(shape=W_shape), dtype=tf.float32, trainable=False)\n else:\n residue = 0\n\n # transform to be applied\n T = tf.constant(T, dtype=tf.float32)\n\n # random dither and rademacher generator\n u = tf.random.uniform(u_shape, minval=-0.5, maxval=0.5, dtype=tf.float32, seed=seed)\n r = tfp.math.random_rademacher(shape=r_shape, dtype=tf.float32, seed=seed + _rademacher_seed)\n\n # random shuffling the ROWS of T\n if shuffle:\n S = tf.random.shuffle(T, seed=seed + _shuffle_seed)\n else:\n S = T\n\n # A- Quantization\n # 1- add residue to the input data\n Y = W + beta * residue\n\n # 2- reshape to the bucket\n y = tf.reshape(Y, shape=(-1, bucket_size))\n\n # 3- apply the transform\n if use_Rademacher:\n y = tf.multiply(y, r)\n\n y = tf.matmul(y, S)\n\n # 4- normalize y to become in [-num_levels, num_levels]\n max_y = tf.reduce_max(tf.abs(y), axis=1, keepdims=True) + 1e-12\n scale = max_y / num_levels\n y = y / scale\n\n # 5- generate dither, add it to y and then quantize\n if sign_quantizer:\n q = tf.cast((y + u) > 0, tf.float32)\n else:\n q = tf.round(y + u)\n\n # B- Dequantization and saving residue\n # 1- dequantize\n yh = (q - u) * scale\n\n # 2- inverse of the transform\n yh = tf.matmul(yh, tf.transpose(S))\n if use_Rademacher:\n yh = tf.multiply(yh, r)\n\n # 3- reshape\n Wh = tf.reshape(yh, shape=W_shape)\n\n # 4- compute and save residual signal\n if error_feedback:\n new_residue = Y - Wh\n with tf.control_dependencies([Wh, new_residue]):\n update_residue = residue.assign(new_residue)\n else:\n update_residue = tf.no_op()\n\n return q, scale, Wh, update_residue, residue\n\n\ndef dequantize(q, scale, W_shape, T, seed, use_Rademacher=False, shuffle=True):\n bucket_size = T.shape[0]\n r_shape = [np.prod(W_shape) // bucket_size, T.shape[0]]\n u_shape = [np.prod(W_shape) // bucket_size, T.shape[1]]\n\n # transform to be applied\n T = tf.constant(T, dtype=tf.float32)\n\n # random dither and rademacher generator\n u = tf.random.uniform(u_shape, minval=-0.5, maxval=0.5, dtype=tf.float32, seed=seed)\n r = tfp.math.random_rademacher(shape=r_shape, dtype=tf.float32, seed=seed + _rademacher_seed)\n\n # random shuffling the ROWS of T\n if shuffle:\n S = tf.random.shuffle(T, seed=seed + _shuffle_seed)\n else:\n S = T\n\n # 1- dequantize\n yh = (q - u) * scale\n\n # 2- inverse of the transform\n yh = tf.matmul(yh, tf.transpose(S))\n if use_Rademacher:\n yh = tf.multiply(yh, r)\n\n # 3- reshape\n Yh = tf.reshape(yh, shape=W_shape)\n\n return Yh\n", "id": "9534092", "language": "Python", "matching_score": 4.064499855041504, "max_stars_count": 0, "path": "QuantizerTest/models/quantizers/tf_implementation/cs_quantizer_shuffled.py" }, { "content": "\"\"\"\n Implementation of CS-based quantizer with error feedback\n Note that the seed of the graph must be set as well (tf.set_random_seed(.))\n\n Because of limitatiosn of the shuffle oeprator\n 1- transpsoe of T is applied to the buckets\n 2- all buckets use the same shuffled T\n\"\"\"\n\nimport numpy as np\n\n\nclass cs_quantizer:\n def __init__(self, T, num_levels, feedback=False, beta=0):\n self._T = T\n self._num_levels = num_levels\n self._feedback = feedback\n self._beta = beta\n self._bucket_size = T.shape[0]\n self._residue = 0\n\n if not self._feedback:\n self._beta = 0\n\n if self._num_levels == 0:\n self._sign_quantizer = True\n self._num_levels = 0.5\n else:\n self._sign_quantizer = False\n\n def quantize(self, X, reconstructed=True):\n X_shape = X.shape\n r_shape = [X.size // self._bucket_size, self._T.shape[0]]\n u_shape = [X.size // self._bucket_size, self._T.shape[1]]\n\n # random dither and rademacher generator\n u = np.random.uniform(low=-0.5, high=0.5, size=u_shape)\n r = 2 * np.random.randint(low=0, high=2, size=r_shape) - 1\n\n # A- Quantization\n # 1- add residue to the input data\n Y = X + self._beta * self._residue\n\n # 2- reshape to the bucket\n y = np.reshape(Y, newshape=(-1, self._bucket_size))\n\n # 3- apply the transform\n y = np.matmul(y * r, self._T)\n\n # 4- normalize y to become in [-num_levels, num_levels]\n max_y = np.amax(np.abs(y), axis=1, keepdims=True) + 1e-12\n scale = max_y / self._num_levels\n y = y / scale\n\n # 5- generate dither, add it to y and then quantize\n if self._sign_quantizer:\n q = ((y + u) > 0).astype(np.float)\n else:\n q = np.round(y + u)\n\n # B- Dequantization and saving residue\n # 1- dequantize\n yh = (q - u) * scale\n\n # 2- inverse of the transform\n yh = np.matmul(yh, self._T.T)\n yh = yh * r\n\n # 3- reshape\n Xh = np.reshape(yh, newshape=X_shape)\n\n # 4- compute and save residual signal\n self._residue = Y - Xh\n\n if reconstructed:\n return Xh\n else:\n return q, scale\n\n def reset(self):\n self._residue = 0\n", "id": "6889668", "language": "Python", "matching_score": 2.813354015350342, "max_stars_count": 0, "path": "CompressionError/models/quantizers/cs_quantizer.py" }, { "content": "\"\"\"\n Implementation of dithered transformed quantizer\n\"\"\"\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\n_rademacher_seed = 63509\n\n\n# =============================================================================\n# dithered quantization\ndef quantize(W, H, num_levels, seed):\n if num_levels == 0:\n # for sign-based quantization\n sign_quantizer = True\n num_levels = 0.5\n else:\n sign_quantizer = False\n\n bucket_size = H.shape[0]\n H = tf.constant(H, dtype=tf.float32)\n\n # reshape to the bucket\n w = tf.reshape(W, shape=[-1, bucket_size])\n w_shape = tf.shape(w)\n\n # generate random signals: dither signals and Rademacher variables\n u = tf.random.uniform(shape=w_shape, minval=-0.5, maxval=0.5, dtype=tf.float32, seed=seed)\n d = tfp.math.random_rademacher(shape=w_shape, dtype=tf.float32, seed=seed + _rademacher_seed)\n\n # apply the random transform on w\n w = tf.multiply(d, w)\n w = tf.matmul(w, H)\n\n # normalize w to become in [-num_levels, num_levels]\n max_w = tf.reduce_max(tf.abs(w), axis=1, keepdims=True) + 1e-12\n scale = max_w / num_levels\n y = w / scale\n\n # generate dither, add it to y and then quantize\n if sign_quantizer:\n q = tf.cast((y + u) > 0, tf.float32)\n else:\n q = tf.round(y + u)\n\n wh = (q - u) * scale\n\n # dequantize operations\n # apply transform again\n wh = tf.matmul(wh, tf.transpose(H))\n wh = tf.multiply(wh, d)\n\n Wh = tf.reshape(wh, shape=tf.shape(W))\n\n return q, scale, Wh\n\n\ndef dequantize(q, scale, H, seed):\n\n H = tf.constant(H, dtype=tf.float32)\n\n w_shape = tf.shape(q)\n\n # generate random signals: dither signals and Rademacher variables\n u = tf.random.uniform(shape=w_shape, minval=-0.5, maxval=0.5, dtype=tf.float32, seed=seed)\n d = tfp.math.random_rademacher(shape=w_shape, dtype=tf.float32, seed=seed + _rademacher_seed)\n\n w = (q - u) * scale\n\n # apply transform again\n w = tf.matmul(w, tf.transpose(H))\n w = tf.multiply(w, d)\n\n return w\n", "id": "1247994", "language": "Python", "matching_score": 4.297994613647461, "max_stars_count": 0, "path": "QuantizerTest/models/quantizers/tf_implementation/dithered_transform_quantizer.py" }, { "content": "\"\"\"\n Implementation of dithered quantizer\n\"\"\"\n\nimport tensorflow as tf\n\n\n# =============================================================================\n# dithered quantization\ndef quantize(W, num_levels, bucket_size, seed):\n if num_levels == 0:\n # for sign-based quantization\n sign_quantizer = True\n num_levels = 0.5\n else:\n sign_quantizer = False\n\n # reshape to the bucket\n w = tf.reshape(W, shape=[-1, bucket_size])\n w_shape = tf.shape(w)\n\n # generate random signals: dither signals and Rademacher variables\n u = tf.random.uniform(shape=w_shape, minval=-0.5, maxval=0.5, dtype=tf.float32, seed=seed)\n\n # normalize w to become in [-num_levels, num_levels]\n max_w = tf.reduce_max(tf.abs(w), axis=1, keepdims=True) + 1e-12\n scale = max_w / num_levels\n y = w / scale\n\n # generate dither, add it to y and then quantize\n if sign_quantizer:\n q = tf.cast((y + u) > 0, tf.float32)\n else:\n q = tf.round(y + u)\n\n wh = (q - u) * scale\n\n # dequantize operations\n Wh = tf.reshape(wh, shape=tf.shape(W))\n\n return q, scale, Wh\n\n\ndef dequantize(q, scale, seed):\n \"\"\"\n dequantize the received quantized values, usign the bucket size d and scales\n :param Q: quantized values\n :param scale: scale to multiply to the quantized values to reconstruct the original data\n :param d: bucket size\n :return: ndarray of the same shape as Q, dequantized values\n \"\"\"\n\n w_shape = tf.shape(q)\n\n # generate random dither signals\n u = tf.random.uniform(shape=w_shape, minval=-0.5, maxval=0.5, dtype=tf.float32, seed=seed)\n\n w = (q - u) * scale\n\n return w", "id": "919132", "language": "Python", "matching_score": 3.8231215476989746, "max_stars_count": 0, "path": "QuantizerTest/models/quantizers/tf_implementation/dithered_quantizer.py" }, { "content": "import numpy as np\nimport tensorflow as tf\n\n\ndef quantize(W, num_levels, bucket_size, seed):\n \"\"\"\n the input tensor is reshaped into vector form and divided into buckets of length d. it uses maximum value of the vector as the scaling parameter for quantization. The output scale is such that by multiplying it with quantized values, the points will be reconstructed.\n :param W: input tensor to be quantizer\n :param bucket_size: bucket size\n :param num_levels: number of levels for quantizing W, output will be in the range [-num_levels, ..., +num_levels]\n :return: quantized values and the scale\n \"\"\"\n\n w = tf.reshape(W, shape=[-1, bucket_size])\n w_shape = tf.shape(w)\n\n # 1- normalize w to become in [-num_levels, num_levels]\n max_w = tf.reduce_max(tf.abs(w), axis=1, keepdims=True) + 1e-12\n scale = max_w / num_levels\n y = w / scale\n\n # 2- generate dither, add it to y and then quantize\n u = tf.random.stateless_uniform(shape=w_shape, minval=-0.5, maxval=0.5, dtype=tf.float32, seed=[seed, 0])\n # an integer number in the range -num_levels, ..., num_levels\n q = tf.cast(tf.round(y + u), tf.int8)\n\n return q, scale\n\n\ndef dequantize(q, scale, num_levels, bucket_size, seed):\n \"\"\"\n dequantize the received quantized values, usign the bucket size d and scales\n :param Q: quantized values\n :param scale: scale to multiply to the quantized values to reconstruct the original data\n :param bucket_size: bucket size\n :return: ndarray of the same shape as Q, dequantized values\n \"\"\"\n\n w_shape = tf.shape(q)\n\n u = tf.random.stateless_uniform(shape=w_shape, minval=-0.5, maxval=0.5, dtype=tf.float32, seed=[seed, 0])\n w = tf.multiply((q - u), scale)\n\n return w", "id": "539939", "language": "Python", "matching_score": 4.755506992340088, "max_stars_count": 3, "path": "quantizers/tf_dithered_quantizer.py" }, { "content": "\"\"\"\n Implementation of the paper\n <NAME>, <NAME>, <NAME>, and <NAME>,\n 'QSGD: Communication-Efficient SGD via Gradient Quantization and Encoding', NIPS 2017\n\n QSGD quantizer:\n q, scale = _qsgd_quantizer(x, s, seed=None, order=np.inf):\n\n Dequantizer:\n y = scale * q / s\n\"\"\"\n\nimport tensorflow as tf\n\n\ndef quantize(W, num_levels, bucket_size):\n \"\"\"\n quantize input tensor W using QSGD method. the input tensor is reshaped into vecot form and divided into buckets of\n length d. it used maximum value of the vector as the scaling parameter for quantization. The output scale is such that\n by multiplying it with quantized values, the points will be reconstructed.\n :param W: input tensor to be quantizer\n :param d: bucket size\n :param num_levels: number of levels for quantizing |W|\n :return: quantized values and the scale\n \"\"\"\n\n w = tf.reshape(W, shape=[-1, bucket_size])\n w_shape = tf.shape(w)\n\n # 1- normalize w to become in [-num_levels, num_levels]\n max_w = tf.reduce_max(tf.abs(w), axis=1, keepdims=True) + 1e-12\n scale = max_w / num_levels\n y = w / scale\n\n # 2- generate dither, add it to y and then quantize\n u = tf.random.uniform(shape=w_shape, minval=-0.5, maxval=0.5, dtype=tf.float32)\n # an integer number in the range -num_levels, ..., num_levels\n q = tf.round(y + u)\n\n # dequantize\n wh = q * scale\n\n Wh = tf.reshape(wh, shape=tf.shape(W))\n \n return q, scale, Wh\n \n\ndef dequantize(q, scale):\n \"\"\"\n dequantize the received quantized values, usign the bucket size d and scales\n :param q: quantized values\n :param scale: scale to multiply to the quantized values to reconstruct the original data\n :return: ndarray of the same shape as Q, dequantized values\n \"\"\"\n\n w = q * scale\n\n return w\n\n", "id": "5861693", "language": "Python", "matching_score": 3.8042666912078857, "max_stars_count": 0, "path": "QuantizerTest/models/quantizers/tf_implementation/qsg_quantizer.py" }, { "content": "\"\"\"\n Implementation of the paper\n <NAME>, <NAME>, <NAME>, and <NAME>,\n 'QSGD: Communication-Efficient SGD via Gradient Quantization and Encoding', NIPS 2017\n\n QSGD quantizer:\n q, scale = _qsgd_quantizer(x, s, seed=None, order=np.inf):\n\n Dequantizer:\n y = scale * q / s\n\"\"\"\n\nimport numpy as np\n\n\nclass qsg_quantizer:\n def __init__(self, bucket_size, num_levels):\n self._bucket_size = bucket_size\n self._num_levels = num_levels\n\n def quantize(self, X, reconstructed=True):\n \"\"\"\n quantize input tensor W using QSGD method. the input tensor is reshaped into vecot form and divided into buckets\n of length d. it used maximum value of the vector as the scaling parameter for quantization. The output scale is\n such that by multiplying it with quantized values, the points will be reconstructed.\n :param W: input tensor to be quantizer\n :param d: bucket size\n :param num_levels: number of levels for quantizing |W|\n :return: quantized values and the scale\n \"\"\"\n\n if self._bucket_size is None:\n self._bucket_size = X.size\n\n if X.size % self._bucket_size != 0:\n raise ValueError('the number of variables must be divisible by the bucket size (d).')\n\n w = np.reshape(X, newshape=(-1, self._bucket_size))\n norm_w = np.linalg.norm(w, ord=np.inf, axis=1) + np.finfo(float).eps\n\n # 1- normalize w\n sign_w = np.sign(w)\n y = np.abs(w) / norm_w[:, np.newaxis]\n\n # 2- initial quantization (q0(y) = l where y is in [l/s, (l+1)/s)\n q0 = np.floor(y * self._num_levels) # an integer number in the range 0, 1, ..., s\n # d is the normalized distance of each point to the left boundary of the quantization interval\n d = self._num_levels * y - q0\n\n # 3- create random binary numbers, b_i = 0 with probability (1-d) and b_i = 1 with probability d\n b = np.zeros(shape=w.shape)\n b[np.random.random(size=w.shape) < d] = 1\n\n q = sign_w * (q0 + b)\n scale = norm_w / self._num_levels\n\n if reconstructed:\n wh = q * scale[:, np.newaxis]\n Xh = np.reshape(wh, newshape=X.shape)\n\n return Xh\n else:\n Q = np.reshape(q, newshape=X.shape).astype(np.int)\n\n return Q, scale\n\n def dequantize(self, Q, scale):\n \"\"\"\n dequantize the received quantized values, usign the bucket size d and scales\n :param Q: quantized values\n :param scale: scale to multiply to the quantized values to reconstruct the original data\n :param d: bucket size\n :return: ndarray of the same shape as Q, dequantized values\n \"\"\"\n\n if Q.size % self._bucket_size != 0:\n raise ValueError('the number of variables must be divisible by the bucket size (d).')\n\n if self._bucket_size == Q.size:\n Xh = scale[0] * Q\n else:\n q = np.reshape(Q, (-1, self._bucket_size))\n w = q * scale[:, np.newaxis]\n\n Xh = np.reshape(w, newshape=Q.shape)\n\n return Xh\n\n def reset(self):\n return\n", "id": "9034755", "language": "Python", "matching_score": 5.266594886779785, "max_stars_count": 0, "path": "SimpleMSE/np/quantizers/qsg_quantizer.py" }, { "content": "\"\"\"\n Implementation of transformed dithered quantizer one dimensional quantizer\n\"\"\"\n\nimport numpy as np\n\n\nclass dt_quantizer:\n def __init__(self, T, num_levels):\n self._T = T\n self._num_levels = num_levels\n self._bucket_size = T.shape[0]\n\n def quantize(self, X, reconstructed=True):\n \"\"\"\n quantize input tensor W using QSG method. the input tensor is reshaped into vector form and divided into buckets of\n length d. it used maximum value of the vector as the scaling parameter for quantization. The output scale is such that\n by multiplying it with quantized values, the points will be reconstructed.\n :param W: input tensor to be quantizer\n :param num_levels: number of levels for quantizing W, output will be in the range [-num_levels, ..., +num_levels]\n :param d: bucket size\n :return: quantized values and the scale\n \"\"\"\n\n if X.size % self._bucket_size != 0:\n raise ValueError('the number of variables must be divisible by the bucket size (d).')\n\n if self._num_levels == 0:\n # 1-bit (sign) dithered quantization\n return self._onebit_quantizer(X, reconstructed)\n\n w = np.reshape(X, newshape=(-1, self._bucket_size))\n w = np.matmul(w, self._T)\n\n # 1- normalize x\n scale = np.linalg.norm(w, ord=np.inf, axis=1) / self._num_levels + np.finfo(float).eps\n\n y = w / scale[:, np.newaxis]\n\n # 2- generate dither, add it to y and then quantize\n u = np.random.uniform(-0.5, 0.5, size=y.shape)\n q = np.around(y + u) # an integer number in the range -s, ..., -1, 0, 1, ..., s\n\n if reconstructed:\n w = (q - u) * scale[:, np.newaxis]\n w = np.matmul(w, self._T.T)\n Xh = np.reshape(w, newshape=X.shape)\n return Xh\n\n else:\n Q = np.reshape(q, newshape=X.shape).astype(int)\n return Q, scale\n\n\n def dequantize(self, Q, scale):\n \"\"\"\n dequantize the received quantized values, usign the bucket size d and scales\n :param Q: quantized values\n :param scale: scale to multiply to the quantized values to reconstruct the original data\n :param d: bucket size\n :return: ndarray of the same shape as Q, dequantized values\n \"\"\"\n\n if Q.size % self._bucket_size != 0:\n raise ValueError('the number of variables must be divisible by the bucket size (d).')\n\n q = np.reshape(Q, (-1, self._bucket_size))\n u = np.random.uniform(-0.5, 0.5, size=q.shape)\n w = (q - u) * scale[:, np.newaxis]\n w = np.matmul(w, self._T.T)\n Xh = np.reshape(w, newshape=Q.shape)\n\n return Xh\n\n def _onebit_quantizer(self, X, reconstructed):\n \"\"\"\n quantize input tensor W using QSG method. the input tensor is reshaped into vector form and divided into buckets of\n length d. it used maximum value of the vector as the scaling parameter for quantization. The output scale is such that\n by multiplying it with quantized values, the points will be reconstructed.\n :param W: input tensor to be quantizer\n :param d: bucket size\n :return: quantized values and the scale\n \"\"\"\n\n w = np.reshape(X, newshape=(-1, self._bucket_size))\n w = np.matmul(w, self._T)\n\n # 1- normalize x\n scale = np.linalg.norm(w, ord=np.inf, axis=1) + np.finfo(float).eps\n\n y = w / scale[:, np.newaxis]\n\n # 2- generate dither, add it to y and then quantize\n u = np.random.uniform(-1., 1., size=y.shape)\n q = np.sign(y + u) # +/- 1\n q[q == 0] = 1\n\n if reconstructed:\n w = (q - u) * scale[:, np.newaxis]\n w = np.matmul(w, self._T.T)\n Xh = np.reshape(w, newshape=X.shape)\n return Xh\n \n else:\n Q = np.reshape(q, newshape=X.shape).astype(int)\n return Q, scale\n", "id": "6333862", "language": "Python", "matching_score": 1.2252721786499023, "max_stars_count": 0, "path": "CompressionError/models/quantizers/dithered_transform_quantizer.py" }, { "content": "\"\"\"\n Implementation of different indirect quantization algorithms\n\"\"\"\n\nimport numpy as np\nimport scipy.linalg as sla\nimport quadprog\nimport distributed_training.optimum_quantizer as opt_quantizer\n\n\nclass DeterministicISGQuantizer:\n \"\"\"\n implementation of the indirect quantization, G=X' Y.\n naive: X and Y are quantized independently according to their expected distribution\n mv: First, X and Y are quantized independently according to their expected distribution.\n Next, the reconstruction points are optimized to minimize the error |G-X'Y|\n mvu: First, X and Y are quantized independently according to their expected distribution.\n Then, the reconstruction points are optimized such that |G-X'Y| is minimized subject to sum(G-X'Y)=0\n \"\"\"\n\n # initialize the quantizer engine, the supported quantization levels and data models\n def __init__(self, num_levels=(2, 4, 8), models=('sn', 'sfn', 'u', 'su'), sparsity_thr=1e-6):\n self._num_levels = num_levels\n self._models = models\n self._quantizers = {}\n for t in self._models:\n q = opt_quantizer.OptimumQuantizer()\n q.initialize_quantizer(model=t, num_levels=self._num_levels, sparsity_thr=sparsity_thr)\n self._quantizers[t] = q\n\n # quantize the input signals based on their distribution model, number of quantization levels and method\n def quantize(self, X, Y, model=('sfn', 'sn'), num_levels=(2, 2), method='naive', opt_iterations=1):\n\n qX, cX = self._quantize(X, model[0], num_levels[0])\n qY, cY = self._quantize(Y, model[1], num_levels[1])\n\n if method == 'mv':\n # optimize the centers of the bins for the minimum variance indirect quantizer\n G = np.matmul(X.transpose(), Y)\n # optimize for the centers of the quantizers\n for _ in range(opt_iterations):\n X_hat = cX[qX]\n optimize_centers_mviq(A=X_hat.transpose(), B=G, Q=qY, centers=cY, keep_sparsity=(model[0][0] == 's'))\n\n Y_hat = cY[qY]\n optimize_centers_mviq(\n A=Y_hat.transpose(), B=G.transpose(), Q=qX, centers=cX, keep_sparsity=(model[1][0] == 's')\n )\n\n elif method == 'mvu':\n # optimize the centers of the bins for the minimum variance unbiased indirect quantizer\n G = np.matmul(X.transpose(), Y)\n # optimize for the centers of the quantizers\n for _ in range(opt_iterations):\n X_hat = cX[qX]\n optimize_centers_mvuiq(A=X_hat.transpose(), B=G, Q=qY, centers=cY, keep_sparsity=(model[0][0] == 's'))\n\n Y_hat = cY[qY]\n optimize_centers_mvuiq(\n A=Y_hat.transpose(), B=G.transpose(), Q=qX, centers=cX, keep_sparsity=(model[1][0] == 's')\n )\n\n return qX, cX, qY, cY\n\n def _quantize(self, X, model, num_levels):\n # 1- if necessary, normalize x\n if model in ('uniform', 'u', 'sparse-uniform', 'su'):\n scale = 1.0\n else:\n scale = sla.norm(X) / np.sqrt(np.count_nonzero(np.abs(X) > 1e-10) + 1e-12)\n\n y = X / scale\n qX, cX = self._quantizers[model].quantize(y, num_levels)\n cX = scale * cX\n\n return qX, cX\n\n\n# =============================================================================\ndef optimize_centers_mviq(A, B, Q, centers, keep_sparsity=True):\n \"\"\" minimize reconstruction error after weighting by matrix A\n min_{c_i} \\|A.(\\sum_i Q_i c_i) - B\\|_F^2\n \"\"\"\n num_levels = len(centers)\n thr = sla.norm(A) * 1e-6\n\n # 1- compute A*(Q==i) and store it. find the non-empty quantization bins in the process\n valid_idx = []\n AQ = [np.zeros(1) for _ in range(num_levels)]\n for i in range(num_levels):\n AQ[i] = np.matmul(A, Q == i)\n\n if (sla.norm(AQ[i]) >= thr) and ((centers[i] != 0) or not keep_sparsity):\n # check whether the i-th bin has any effect on the quantization performance and\n # do not consider sparse values (center=0)\n valid_idx += [i]\n\n if not valid_idx:\n return\n\n # 2- find the optimum reconstruction points for the non-empty quantization bins\n # 2.a- create matrix M, used in the optimization problem\n num_valid = len(valid_idx)\n M = np.zeros(shape=(num_valid, num_valid))\n e = np.zeros(shape=num_valid)\n for r in range(num_valid):\n for c in range(r, num_valid):\n # np.trace(np.matmul(AQ[valid_idx[c]].transpose(), AQ[valid_idx[r]]))\n M[r, c] = np.sum(AQ[valid_idx[c]] * AQ[valid_idx[r]])\n M[c, r] = M[r, c]\n\n # np.trace(np.matmul(B.transpose(), AQ[valid_idx[r]]))\n e[r] = np.sum(AQ[valid_idx[r]] * B)\n\n # 2.b- solve for Mx=e\n v = sla.lstsq(M, e)[0]\n\n # 3- copy the found center points\n centers[valid_idx] = v\n\n return centers\n\n\ndef optimize_centers_mvuiq(A, B, Q, centers, keep_sparsity=True):\n \"\"\" minimize reconstruction error after weighting by matrix A and make it unbiased\n min_{c_i} \\|A.(\\sum_i Q_i c_i) - B\\|_F^2 such that sum(B-A(\\sum_i Q_i c_i)) = 0\n \"\"\"\n num_levels = len(centers)\n thr = sla.norm(A) * 1e-6\n\n # 1- compute A*(Q==i) and store it. find the non-empty quantization bins in the process\n valid_idx = []\n AQ = [np.zeros(1) for _ in range(num_levels)]\n for i in range(num_levels):\n AQ[i] = np.matmul(A, Q == i)\n\n if (sla.norm(AQ[i]) >= thr) and ((centers[i] != 0) or not keep_sparsity):\n # check whether the i-th bin has any effect on the quantization performance and\n # do not consider sparse values (center=0)\n valid_idx += [i]\n\n if not valid_idx:\n return\n\n # 2- find the optimum reconstruction points for the non-empty quantization bins\n # 2.a- create matrix M, used in the optimization problem\n num_valid = len(valid_idx)\n d = np.sum(B)\n f = np.zeros(num_valid)\n M = np.zeros(shape=(num_valid, num_valid))\n e = np.zeros(shape=num_valid)\n\n for r in range(num_valid):\n f[r] = np.sum(AQ[valid_idx[r]])\n for c in range(r, num_valid):\n # trace(AQ[valid_idx[c]].T @ AQ[valid_idx[r]])\n M[r, c] = np.sum(AQ[valid_idx[c]] * AQ[valid_idx[r]])\n M[c, r] = M[r, c]\n\n # trace(B.T @ AQ[valid_idx[r]])\n e[r] = np.sum(AQ[valid_idx[r]] * B)\n\n # 2.b- solve for min |Mx-e| such that fx=d\n if num_valid == 0:\n v = 0\n elif num_valid == 1:\n v = d / f[0]\n elif num_valid == 2:\n # for the special binary case, the solution can be found easily\n scale = sla.norm(f) + 1e-12\n f /= scale\n d /= scale\n u = np.array([-f[1], f[0]])\n a = (e - d * M.dot(f)).dot(u) / (M.dot(u).dot(u) + 1e-12)\n v = d * f + a * u\n else:\n # use quadratic programming (Goldfarb-Idnani algorithm) to solve the problem\n d = np.array([d]).astype(np.float)\n f = np.reshape(f, newshape=(-1, 1))\n v = quadprog.solve_qp(M, e, f, d, 1)[0]\n\n # 3- copy the found center points\n centers[valid_idx] = v\n\n return centers\n\n\n# =============================================================================\nclass DitheredISGQuantizer:\n \"\"\"\n implementation of the indirect quantization, G=X' Y, using random dithered quantization\n \"\"\"\n def set_seed(self, seed):\n np.random.seed(seed)\n\n # dithered quantization\n def quantize(self, W, num_levels=2, sparse=True, bucket_size=None):\n \"\"\"\n the input tensor is reshaped into vector form and divided into buckets of length d.\n it uses maximum value of the vector as the scaling parameter for quantization.\n The output scale is such that by multiplying it with quantized values, the points will be reconstructed.\n :param W: input tensor to be quantizer\n :param bucket_size: bucket size\n :param num_levels: number of levels for quantizing W, output will be in the range\n [-num_levels, ..., +num_levels]\n :return: quantized values and the scale\n \"\"\"\n\n if bucket_size is None:\n bucket_size = W.size\n\n if W.size % bucket_size != 0:\n raise ValueError('the number of variables must be divisible by the bucket size.')\n\n w = np.reshape(W, newshape=(-1, bucket_size))\n\n # 1- normalize w to become in [-num_levels, num_levels]\n max_w = np.amax(np.abs(w), axis=1) + 1e-12\n scale = max_w / num_levels\n y = w / scale[:, np.newaxis]\n\n # 2- generate dither, add it to y and then quantize\n u = np.random.uniform(-0.5, 0.5, size=y.shape)\n # an integer number in the range -num_levels or 0, ..., num_levels\n q = np.around(y + u).astype(np.int8)\n\n Q = np.reshape(q, newshape=W.shape)\n if sparse:\n # quantize 0 values separately\n Q[np.abs(W) < 1e-12] = num_levels + 1\n\n return Q, scale\n\n def dequantize(self, Q, scale, num_levels=2, sparse=True, bucket_size=None):\n \"\"\"\n dequantize the received quantized values, usign the bucket size d and scales\n :param Q: quantized values\n :param scale: scale to multiply to the quantized values to reconstruct the original data\n :param bucket_size: bucket size\n :return: ndarray of the same shape as Q, dequantized values\n \"\"\"\n\n if bucket_size is None:\n bucket_size = Q.size\n\n if Q.size % bucket_size != 0:\n raise ValueError('the number of variables must be divisible by the bucket size.')\n\n if bucket_size == Q.size:\n u = np.random.uniform(-0.5, 0.5, size=Q.shape)\n W = scale[0] * (Q - u)\n else:\n q = np.reshape(Q, (-1, bucket_size))\n u = np.random.uniform(-0.5, 0.5, size=q.shape)\n w = (q - u) * scale[:, np.newaxis]\n\n W = np.reshape(w, newshape=Q.shape)\n\n # check for the sparse dequantization\n if sparse:\n W[Q == (num_levels + 1)] = 0\n\n return W\n", "id": "10916074", "language": "Python", "matching_score": 3.0888681411743164, "max_stars_count": 3, "path": "quantizers/isgq_quantizer.py" }, { "content": "\"\"\"\n desinging optimum quantizers for different probability distributions.\n\"\"\"\n\nimport itertools\nimport numpy as np\nimport scipy.stats as stat\n\n\nclass OptimumQuantizer:\n def __init__(self):\n self._model = None\n self._valid_levels = None\n self._quantizer_bins = None\n self._quantizer_centers = None\n\n def initialize_quantizer(self, model, num_levels=(2, 4, 8), sparsity_thr=1e-4, x=None):\n self._model = model\n self._valid_levels = np.array(num_levels)\n self._quantizer_bins = [None] * len(self._valid_levels)\n self._quantizer_centers = [None] * len(self._valid_levels)\n\n if model == 'normal' or model == 'n':\n self._initialize_normal_quantizer()\n elif model == 'sparse-normal' or model == 'sn':\n self._initialize_sparse_normal_quantizer(sparsity_thr)\n elif model == 'folded-normal' or model == 'fn':\n self._initialize_folded_normal_quantizer()\n elif model == 'sparse-folded-normal' or model == 'sfn':\n self._initialize_sparse_folded_normal_quantizer(sparsity_thr)\n elif model == 'uniform' or model == 'u':\n self._initialize_uniform_quantizer()\n elif model == 'sparse-uniform' or model == 'su':\n self._initialize_sparse_uniform_quantizer(sparsity_thr)\n elif model == 'empirical' or model == 'e':\n self._initialize_empirical_quantizer(x)\n else:\n raise ValueError('Unknown data distribution model!')\n\n\n def quantize(self, x, num_levels):\n if num_levels not in self._valid_levels:\n raise ValueError('Quantizer for the given number of levels has not been initialized.')\n\n q_idx = np.where(self._valid_levels == num_levels)[0][0]\n\n q = np.digitize(x, self._quantizer_bins[q_idx])\n \n return q, self._quantizer_centers[q_idx]\n\n\n def dequantize(self, q, num_levels):\n if num_levels not in self._valid_levels:\n raise ValueError('Quantizer for the given number of levels has not been initialized.')\n\n q_idx = np.where(self._valid_levels == num_levels)[0][0]\n x = self._quantizer_centers[q_idx][q]\n\n return x\n\n # =========================================================================\n # using Lloyd-Max algorithm, find the optimum quantizer for different distributions\n def _initialize_normal_quantizer(self):\n s = np.sqrt(2*np.pi)\n\n max_iterations = 1000\n for n, num_levels in enumerate(self._valid_levels):\n # initialize quantizer's thresholds and centers\n bins = np.linspace(-1, 1, num_levels + 1)\n centers = (bins[1:] + bins[:-1]) / 2\n bins = bins[1:-1]\n\n for _ in range(max_iterations):\n old_centers = centers.copy()\n cdf_x = stat.norm.cdf(bins)\n exp_x = -np.exp(-bins**2 / 2) / s\n\n # a- updating centers\n centers[0] = exp_x[0] / cdf_x[0]\n centers[1:-1] = (exp_x[1:] - exp_x[0:-1]) / (cdf_x[1:] - cdf_x[0:-1])\n centers[-1] = -exp_x[-1] / (1-cdf_x[-1])\n\n # b- update bins\n bins = (centers[:-1] + centers[1:]) / 2\n\n # c- check for convergence\n if np.max(np.abs(centers - old_centers)) < 1e-3:\n break\n\n self._quantizer_bins[n] = bins\n self._quantizer_centers[n] = centers\n\n def _initialize_sparse_normal_quantizer(self, thr):\n s = np.sqrt(2*np.pi)\n\n max_iterations = 1000\n for n, num_levels in enumerate(self._valid_levels):\n # initialize quantizer's thresholds and centers\n K = 1 + num_levels // 2\n bins = np.linspace(thr, 1, K)\n bins = np.concatenate((np.linspace(-1, -thr, K), np.linspace(thr, 1, K)))\n centers = (bins[1:] + bins[:-1]) / 2\n bins = bins[1:-1]\n\n for _ in range(max_iterations):\n old_centers = centers.copy()\n cdf_x = stat.norm.cdf(bins)\n exp_x = -np.exp(-bins**2 / 2) / s\n\n # a- updating centers\n centers[0] = exp_x[0] / cdf_x[0]\n centers[1:-1] = (exp_x[1:] - exp_x[0:-1]) / (cdf_x[1:] - cdf_x[0:-1])\n centers[-1] = -exp_x[-1] / (1-cdf_x[-1])\n\n # b- update bins\n bins = (centers[:-1] + centers[1:]) / 2\n bins[K - 2] = -thr\n bins[K - 1] = thr\n\n # c- check for convergence\n if np.max(np.abs(centers - old_centers)) < 1e-3:\n break\n\n self._quantizer_bins[n] = bins\n self._quantizer_centers[n] = centers\n\n def _initialize_folded_normal_quantizer(self):\n s = np.sqrt(2 / np.pi)\n\n max_iterations = 1000\n for n, num_levels in enumerate(self._valid_levels):\n # initialize quantizer's thresholds and centers\n bins = np.linspace(0, 1, num_levels + 1)\n centers = (bins[1:] + bins[:-1]) / 2\n bins = bins[1:-1]\n\n for _ in range(max_iterations):\n old_centers = centers.copy()\n cdf_x = 2 * stat.norm.cdf(bins) - 1\n mean_x = s * (1 - np.exp(-bins**2 / 2))\n\n # a- updating centers\n centers[0] = mean_x[0] / cdf_x[0]\n centers[1:-1] = (mean_x[1:] - mean_x[0:-1]) / (cdf_x[1:] - cdf_x[0:-1])\n centers[-1] = (s - mean_x[-1]) / (1-cdf_x[-1])\n\n # b- update bins\n bins = (centers[:-1] + centers[1:]) / 2\n\n # c- check for convergence\n if np.max(np.abs(centers - old_centers)) < 1e-3:\n break\n\n self._quantizer_bins[n] = bins\n self._quantizer_centers[n] = centers\n\n def _initialize_sparse_folded_normal_quantizer(self, thr):\n s = np.sqrt(2 / np.pi)\n\n max_iterations = 1000\n for n, num_levels in enumerate(self._valid_levels):\n # initialize quantizer's thresholds and centers\n bins = np.linspace(thr, 1, num_levels + 1)\n centers = np.concatenate(([0], (bins[1:] + bins[:-1]) / 2))\n bins = bins[:-1]\n\n for _ in range(max_iterations):\n old_centers = centers.copy()\n cdf_x = 2 * stat.norm.cdf(bins) - 1\n mean_x = s * (1 - np.exp(-bins**2 / 2))\n\n # a- updating centers\n centers[1:-1] = (mean_x[1:] - mean_x[0:-1]) / (cdf_x[1:] - cdf_x[0:-1])\n centers[-1] = (s - mean_x[-1]) / (1-cdf_x[-1])\n\n # b- update bins\n bins = (centers[:-1] + centers[1:]) / 2\n bins[0] = thr\n\n # c- check for convergence\n if np.max(np.abs(centers - old_centers)) < 1e-3:\n break\n\n self._quantizer_bins[n] = bins\n self._quantizer_centers[n] = centers\n\n def _initialize_uniform_quantizer(self):\n for n, num_levels in enumerate(self._valid_levels):\n bins = np.linspace(0, 1, num_levels + 1)\n centers = (bins[1:] + bins[:-1]) / 2\n bins = bins[1:-1]\n\n self._quantizer_bins[n] = bins\n self._quantizer_centers[n] = centers\n \n def _initialize_sparse_uniform_quantizer(self, thr):\n for n, num_levels in enumerate(self._valid_levels):\n bins = np.linspace(thr, 1, num_levels + 1)\n bins = np.concatenate(([-thr], bins))\n centers = (bins[1:] + bins[:-1]) / 2\n bins = bins[1:-1]\n\n self._quantizer_bins[n] = bins\n self._quantizer_centers[n] = centers\n\n def _initialize_empirical_quantizer(self, X):\n x = np.reshape(X, newshape=-1)\n min_x = np.min(x)\n max_x = np.max(x)\n\n for n, num_levels in enumerate(self._valid_levels):\n # initialize bins\n bins = np.linspace(min_x, max_x, num_levels + 1)\n centers = (bins[:-1] + bins[1:]) / 2\n bins = bins[1:-1]\n\n for _ in range(1000):\n centers_old = centers.copy()\n # quantize input vector\n q = np.digitize(x, bins)\n _optimize_centers_average(x, q, centers, num_levels)\n bins = (centers[1:] + centers[:-1]) / 2\n\n if np.max(np.abs(centers - centers_old)) < 1e-3:\n break\n\n self._quantizer_bins[n] = bins\n self._quantizer_centers[n] = centers\n\n\n# =============================================================================\n# optimize quantizer's reconstruction points by averaging the points in each bin\ndef _optimize_centers_average(w, q, center, num_levels):\n for n in range(num_levels):\n if n in q:\n center[n] = np.mean(w[q == n])\n", "id": "5038609", "language": "Python", "matching_score": 2.8159844875335693, "max_stars_count": 0, "path": "CompressionError/models/quantizers/optimum_quantizer.py" } ]
4.033134
totallynotvaishnav
[ { "content": "import logging\nfrom smtplib import SMTPRecipientsRefused\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\n\nfrom . import settings as app_settings\n\nlogger = logging.getLogger(__name__)\n\n\ndef send_email(subject, body_text, body_html, recipients, extra_context={}):\n\n mail = EmailMultiAlternatives(\n subject=subject,\n body=strip_tags(body_text),\n from_email=settings.DEFAULT_FROM_EMAIL,\n to=recipients,\n )\n\n if app_settings.OPENWISP_HTML_EMAIL and body_html:\n context = dict(\n subject=subject,\n message=body_html,\n logo_url=app_settings.OPENWISP_EMAIL_LOGO,\n )\n context.update(extra_context)\n\n html_message = render_to_string(\n app_settings.OPENWISP_EMAIL_TEMPLATE,\n context=context,\n )\n mail.attach_alternative(html_message, 'text/html')\n try:\n mail.send()\n except SMTPRecipientsRefused as err:\n logger.warning(f'SMTP recipients refused: {err.recipients}')\n", "id": "8188044", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "openwisp_utils/admin_theme/email.py" } ]
0
thomasehuang
[ { "content": "import calendar\nimport requests\nimport time\nfrom typing import Optional\n\ndef get_termination_time():\n # type: () -> Optional[float]\n resp = requests.get('http://169.254.169.254/latest/meta-data/spot/instance-action')\n if resp.status_code != 200:\n return None\n\n action = resp.json()\n\n if action['action'] == 'hibernate':\n return None\n\n return calendar.timegm(time.strptime(action['time'], '%Y-%m-%dT%H:%M:%SZ'))\n", "id": "10431934", "language": "Python", "matching_score": 0.9850293397903442, "max_stars_count": 105, "path": "aws/aws_utils/spot_checker.py" }, { "content": "import calendar\nimport datetime\nimport json\nimport os\nimport subprocess\nimport time\nfrom typing import Any, Dict, List, Tuple\n\ndef utc_to_local_time(dt):\n # type: (datetime.datetime) -> datetime.datetime\n # https://stackoverflow.com/a/13287083\n timestamp = calendar.timegm(dt.timetuple())\n local_dt = datetime.datetime.fromtimestamp(timestamp)\n return local_dt.replace(microsecond=dt.microsecond)\n\ndef format_instance(instance):\n # type: (Dict[str, Any]) -> str\n name = 'Unnamed'\n try:\n name_tag = next(tag for tag in instance.get('Tags', {}) if tag['Key'] == 'Name')\n name = name_tag['Value']\n except StopIteration:\n pass\n\n instance_id = instance['InstanceId']\n instance_type = instance['InstanceType']\n launch_time = instance['LaunchTime']\n key_name = instance['KeyName']\n ip_addr = instance['PublicIpAddress']\n is_spot = bool(instance.get('SpotInstanceRequestId'))\n\n launch_datetime_utc = datetime.datetime.strptime(launch_time, '%Y-%m-%dT%H:%M:%S.%fZ')\n local_launch_time = utc_to_local_time(launch_datetime_utc).strftime('%m/%d/%Y %H:%M:%S')\n\n identifiers = [\n name,\n instance_id,\n instance_type,\n local_launch_time,\n ip_addr,\n 'Spot' if is_spot else 'On Demand',\n ]\n\n return ' :: '.join(identifiers)\n\nclass AwsInstance(object):\n def __init__(self, identity, require_pem=False):\n # type: (str, bool) -> None\n\n self.identity = identity\n self.pem_key = os.path.expanduser('~/.ssh/{}.pem'.format(identity))\n if require_pem and not os.path.exists(os.path.expanduser(self.pem_key)):\n raise ValueError('Cannot create an AWS instance without the key at {}'.format(self.pem_key))\n\n def get_running_instances(self):\n # type: () -> List[Dict[str, Any]]\n\n args = ['aws', 'ec2', 'describe-instances', '--filters', 'Name=instance-state-name,Values=pending,running']\n\n if self.identity:\n args.append('Name=key-name,Values={}'.format(self.identity))\n\n output = subprocess.check_output(args)\n parsed_out = json.loads(output)\n\n # flatten output\n instances = [instance\n for reservation in parsed_out['Reservations']\n for instance in reservation['Instances']]\n\n def sort_key_of_instance(instance):\n # type: (Dict[str, Any]) -> Tuple[time.struct_time, str]\n return (time.strptime(instance['LaunchTime'], '%Y-%m-%dT%H:%M:%S.%fZ'), instance['InstanceId'])\n\n return sorted(instances, key=sort_key_of_instance)\n", "id": "4047044", "language": "Python", "matching_score": 3.4459521770477295, "max_stars_count": 105, "path": "aws/aws_utils/instance_utils.py" }, { "content": "#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport argparse\nimport base64\nimport json\nimport os\nimport subprocess\nimport sys\nimport time\nfrom typing import Any, Dict, Optional\nfrom aws_utils.instance_utils import format_instance, AwsInstance\nimport command_queue\n\n# Ithemal runs on Python 2 mostly\ntry:\n input = raw_input\nexcept NameError:\n pass\n\n_DIRNAME = os.path.abspath(os.path.dirname(__file__))\n\nclass InstanceMaker(AwsInstance):\n def __init__(self, identity, name, instance_type, db, force, no_connect, spot, queue_name):\n # type: (str, str, str, str, bool, bool, Optional[int], str) -> None\n super(InstanceMaker, self).__init__(identity, require_pem=True)\n self.name = name\n self.instance_type = instance_type\n self.db = db\n self.force = force\n self.no_connect = no_connect\n self.spot = spot\n self.queue_name = queue_name\n\n def start_instance(self):\n # type: () -> None\n\n if not self.force:\n running_instances = self.get_running_instances()\n if running_instances:\n print('You already have {} running instances:'.format(len(running_instances)))\n for instance in running_instances:\n print(format_instance(instance))\n try:\n res = input('Would you still like to continue? (y/n) ').lower()[0]\n except KeyboardInterrupt:\n print('Not creating a new instance')\n return\n\n if res[0] != 'y':\n print('Not creating a new instance')\n return\n\n name = 'Ithemal'\n if self.name:\n name += ': {}'.format(self.name)\n\n block_device_mappings = [{\"DeviceName\": \"/dev/xvda\", \"Ebs\": {\"VolumeSize\": 16}}]\n iam_profile_name = 'ithemal-ec2'\n iam_profile_struct = {'Name': iam_profile_name}\n\n if self.spot:\n launch_specification = {\n 'InstanceType': self.instance_type,\n 'SecurityGroupIds': ['sg-0780fe1760c00d96d'],\n 'BlockDeviceMappings': block_device_mappings,\n 'KeyName': self.identity,\n 'ImageId': 'ami-0b59bfac6be064b78',\n 'IamInstanceProfile': iam_profile_struct,\n }\n run_com = lambda com: json.loads(subprocess.check_output(com))['SpotInstanceRequests'][0]\n com = [\n 'aws', 'ec2', 'request-spot-instances',\n '--launch-specification', json.dumps(launch_specification)\n ]\n if self.spot > 0:\n com.extend(['--block-duration-minutes', str(self.spot * 60)])\n output = run_com(com)\n print('Submitted spot instance request')\n\n try:\n while 'InstanceId' not in output:\n print('\\rWaiting for spot request to be fulfilled ({})...'.format(\n output['Status']['Code']\n ), end=' ' * 20 + '\\r')\n\n time.sleep(1)\n output = run_com([\n 'aws', 'ec2', 'describe-spot-instance-requests',\n '--spot-instance-request-ids', output['SpotInstanceRequestId'],\n ])\n\n except (KeyboardInterrupt, SystemExit):\n subprocess.check_call([\n 'aws', 'ec2', 'cancel-spot-instance-requests',\n '--spot-instance-request-ids', output['SpotInstanceRequestId'],\n ])\n sys.exit(1)\n\n print() # clear status message\n\n instance_id = output['InstanceId']\n # set the name, since spot instances don't let us do that in the creation request\n subprocess.check_call([\n 'aws', 'ec2', 'create-tags',\n '--resources', instance_id,\n '--tags', 'Key=Name,Value=\"{}\"'.format(name)\n ])\n else:\n args = [\n 'aws', 'ec2', 'run-instances',\n '--instance-type', self.instance_type,\n '--key-name', self.identity,\n '--image-id', 'ami-0b59bfac6be064b78',\n '--tag-specifications', 'ResourceType=\"instance\",Tags=[{{Key=\"Name\",Value=\"{}\"}}]'.format(name),\n '--security-group-ids', 'sg-0780fe1760c00d96d',\n '--block-device-mappings', json.dumps(block_device_mappings),\n '--iam-instance-profile', json.dumps(iam_profile_struct),\n ]\n output = subprocess.check_output(args)\n parsed_output = json.loads(output)\n instance = parsed_output['Instances'][0]\n instance_id = instance['InstanceId']\n\n print('Started instance! Waiting for connection...')\n\n subprocess.check_call(['aws', 'ec2', 'wait', 'instance-running', '--instance-ids', instance_id])\n\n instance = next(instance for instance in self.get_running_instances() if instance['InstanceId'] == instance_id)\n ssh_address = 'ec2-user@{}'.format(instance['PublicDnsName'])\n\n # wait for SSH to actually become available\n while subprocess.call(['ssh', '-oStrictHostKeyChecking=no', '-i', self.pem_key, ssh_address, 'exit'],\n stdout=open(os.devnull, 'w'),\n stderr=open(os.devnull, 'w'),\n ):\n time.sleep(1)\n\n\n git_root = subprocess.check_output(['git', 'rev-parse', '--show-toplevel'], cwd=_DIRNAME).strip()\n ls_files = subprocess.Popen(['git', 'ls-files'], cwd=git_root, stdout=subprocess.PIPE)\n tar = subprocess.Popen(['tar', 'Tcz', '-'], cwd=git_root, stdin=ls_files.stdout, stdout=subprocess.PIPE)\n\n aws_credentials = json.loads(subprocess.check_output(['aws', 'ecr', 'get-authorization-token']).strip())\n authorization_datum = aws_credentials['authorizationData'][0]\n aws_authorization = base64.b64decode(authorization_datum['authorizationToken'])\n aws_authorization_user = aws_authorization[:aws_authorization.index(':')]\n aws_authorization_token = aws_authorization[aws_authorization.index(':')+1:]\n aws_endpoint = authorization_datum['proxyEndpoint']\n\n region = subprocess.check_output(['aws', 'configure', 'get', 'region']).strip()\n\n mysql_credentials_dict = json.loads(subprocess.check_output(['aws', 'secretsmanager', 'get-secret-value', '--secret-id', 'ithemal/mysql-{}'.format(self.db)]).strip())\n mysql_credentials = json.loads(mysql_credentials_dict['SecretString'])\n mysql_user = mysql_credentials['username']\n mysql_password = mysql_credentials['password']\n mysql_host = mysql_credentials['host']\n mysql_port = mysql_credentials['port']\n\n initialization_command = 'mkdir ithemal; cd ithemal; cat | tar xz; aws/aws_utils/remote_setup.sh {}'.format(' '.join(map(str, [\n aws_authorization_user,\n aws_authorization_token,\n aws_endpoint,\n mysql_user,\n mysql_password,\n mysql_host,\n mysql_port,\n region,\n ])))\n\n ssh = subprocess.Popen(['ssh', '-oStrictHostKeyChecking=no', '-i', self.pem_key, ssh_address, initialization_command],\n stdin=tar.stdout)\n ls_files.wait()\n tar.wait()\n ssh.wait()\n\n if self.queue_name:\n self.start_queue_on_instance(instance, ssh_address)\n\n if not self.no_connect:\n os.execlp(sys.executable, sys.executable, os.path.join(_DIRNAME, 'connect_instance.py'), self.identity, instance['InstanceId'])\n\n def start_queue_on_instance(self, instance, ssh_address):\n # type: (Dict[str, Any], str) -> None\n\n subprocess.check_call([\n 'aws', 'ec2', 'create-tags',\n '--resources', instance['InstanceId'],\n '--tags', 'Key=QueueName,Value=\"{}\"'.format(self.queue_name)\n ])\n\n queue_url = command_queue.queue_url_of_name(self.queue_name)\n\n subprocess.check_call([\n 'ssh', '-i', self.pem_key, ssh_address,\n 'sudo docker exec -u ithemal -dit ithemal bash -lc \"~/ithemal/aws/aws_utils/queue_process.py --kill {}\"'.format(queue_url)\n ])\n\ndef main():\n # type: () -> None\n\n parser = argparse.ArgumentParser(description='Create an AWS instance to run Ithemal')\n parser.add_argument('identity', help='Key identity to create with')\n parser.add_argument('-n', '--name', help='Name to start the container with', default=None)\n parser.add_argument('-t', '--type', help='Instance type to start (default: t2.large)', default='t2.large')\n parser.add_argument('-f', '--force', help='Make a new instance without worrying about old instances', default=False, action='store_true')\n parser.add_argument('-nc', '--no-connect', help='Don\\'t connect to the instance after it is started', default=False, action='store_true')\n parser.add_argument('-q', '--queue', metavar='QUEUE_NAME', help='Perform actions consumed from given queue')\n\n spot_group = parser.add_mutually_exclusive_group()\n spot_group.add_argument('--spot-reserved', '-sr', help='Start a spot instance, reserved for a specific duration (between 1 and 6 hours)', type=int, dest='spot', metavar='DURATION')\n spot_group.add_argument('--spot-preempt', '-sp', help='Start a spot instance, preemptable', action='store_const', const=-1, dest='spot')\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument('--prod-ro-db', help='Use the read-only prod database (default)', action='store_true')\n group.add_argument('--prod-db', help='Use the writeable prod database', action='store_true')\n group.add_argument('--dev-db', help='Use the development database', action='store_true')\n\n args = parser.parse_args()\n\n if args.prod_db:\n db = 'prod'\n elif args.dev_db:\n db = 'dev'\n else:\n db = 'prod-ro'\n\n # spot can be either unspecified, -1 (for preemptible), or between 1 and 6\n if args.spot not in (None, -1, 1, 2, 3, 4, 5, 6):\n print('Spot duration must be between 1 and 6 hours')\n return\n\n instance_maker = InstanceMaker(args.identity, args.name, args.type, db, args.force, args.no_connect, args.spot, args.queue)\n instance_maker.start_instance()\n\nif __name__ == '__main__':\n main()\n", "id": "8009273", "language": "Python", "matching_score": 4.552530765533447, "max_stars_count": 105, "path": "aws/start_instance.py" }, { "content": "#!/usr/bin/env python\n\nimport argparse\nimport subprocess\nimport os\nimport sys\nfrom typing import Any, Dict, List\n\nfrom aws_utils.instance_utils import format_instance, AwsInstance\nimport connect_instance\n\n_DIRNAME = os.path.abspath(os.path.dirname(__file__))\n_GITROOT = os.path.abspath(subprocess.check_output(['git', 'rev-parse', '--show-toplevel'], cwd=_DIRNAME).strip())\n\nclass InstanceSynchronizer(connect_instance.InstanceConnectorABC):\n def __init__(self, identity, direction, files):\n # type: (str, str, List[str]) -> None\n\n super(InstanceSynchronizer, self).__init__(identity, require_pem=True)\n if direction not in ('to', 'from'):\n raise ValueError('Direction \"{}\" must be either \"to\" or \"from\"'.format(direction))\n self.direction = direction\n\n files = list(map(os.path.abspath, files))\n if not os.path.commonprefix(files + [_GITROOT]) == _GITROOT:\n raise ValueError('All files must be inside of the Ithemal directory!')\n\n files = list(map(lambda x: os.path.relpath(x, _GITROOT), files))\n\n self.files = files\n\n def connect_to_instance(self, instance):\n # type: (Dict[str, Any]) -> None\n\n ssh_address = 'ec2-user@{}'.format(instance['PublicDnsName'])\n\n if self.direction == 'to':\n ssh_command = 'cd ithemal; cat | tar --warning=no-unknown-keyword -xz'\n\n tar = subprocess.Popen(['tar', 'cz'] + self.files, cwd=_GITROOT, stdout=subprocess.PIPE)\n ssh = subprocess.Popen(['ssh', '-oStrictHostKeyChecking=no', '-i', self.pem_key, ssh_address, ssh_command], stdin=tar.stdout)\n elif self.direction == 'from':\n ssh_command = 'cd ithemal; tar cz {}'.format(' '.join(map(lambda f: \"'{}'\".format(f), self.files)))\n\n ssh = subprocess.Popen(['ssh', '-oStrictHostKeyChecking=no', '-i', self.pem_key, ssh_address, ssh_command], stdout=subprocess.PIPE)\n tar = subprocess.Popen(['tar', 'xz'] + self.files, cwd=_GITROOT, stdin=ssh.stdout)\n\n tar.wait()\n ssh.wait()\n\n\ndef main():\n # type: () -> None\n parser = argparse.ArgumentParser(description='Synchronize files in the Ithemal directory to a running AWS EC2 instance')\n\n direction_group = parser.add_mutually_exclusive_group(required=True)\n direction_group.add_argument('--to', help='Send files to the instance', default=False, action='store_true')\n direction_group.add_argument('--from', help='Pull files from the instance', default=False, action='store_true')\n\n parser.add_argument('identity', help='Identity to use to connect')\n parser.add_argument('--all', help='Synchronize with all instances', default=False, action='store_true')\n parser.add_argument('file', help='Files to synchronize', nargs='+')\n args = parser.parse_args()\n\n if args.to:\n direction = 'to'\n else:\n direction = 'from'\n\n synchronizer = InstanceSynchronizer(args.identity, direction, args.file)\n\n if args.all:\n for instance in synchronizer.get_running_instances():\n synchronizer.connect_to_instance(instance)\n else:\n connect_instance.interactively_connect_to_instance(synchronizer)\n\nif __name__ == '__main__':\n main()\n", "id": "4780429", "language": "Python", "matching_score": 3.7398877143859863, "max_stars_count": 105, "path": "aws/synchronize_files.py" }, { "content": "#!/usr/bin/env python\n\nfrom abc import ABCMeta, abstractmethod\nimport argparse\nimport subprocess\nimport os\nimport sys\nfrom typing import Any, Dict, List, Union\n\nfrom aws_utils.instance_utils import format_instance, AwsInstance\n\n# Ithemal runs on Python 2 mostly\ntry:\n input = raw_input\nexcept NameError:\n pass\n\nclass InstanceConnectorABC(AwsInstance):\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def connect_to_instance(self, instance):\n # type: (Dict[str, Any]) -> None\n return NotImplemented\n\nclass InstanceConnector(InstanceConnectorABC):\n def __init__(self, identity, host, root, com):\n # type: (str, str, bool, List[str]) -> None\n super(InstanceConnector, self).__init__(identity, require_pem=True)\n self.host = host\n self.root = root\n self.com = com\n\n def connect_to_instance(self, instance):\n # type: (Dict[str, Any]) -> None\n ssh_address = 'ec2-user@{}'.format(instance['PublicDnsName'])\n ssh_args = ['ssh', '-X', '-i', self.pem_key, '-t', ssh_address]\n\n if self.com:\n conn_com = \"bash -lc '{}'\".format(' '.join(self.com).replace(\"'\", r\"\\'\"))\n else:\n conn_com = \"bash -lc '~/ithemal/aws/aws_utils/tmux_attach.sh || /home/ithemal/ithemal/aws/aws_utils/tmux_attach.sh'\"\n\n if self.host:\n ssh_args.append(conn_com)\n else:\n if self.root:\n user = 'root'\n else:\n user = 'ithemal'\n ssh_args.append('sudo docker exec -u {} -it ithemal {}'.format(user, conn_com))\n\n os.execvp('ssh', ssh_args)\n sys.exit(1)\n\ndef list_instances(instances):\n # type: (List[Dict[str, Any]]) -> None\n if not instances:\n print('No instances running!')\n return\n\n for i, instance in enumerate(instances):\n print('{}) {}'.format(i + 1, format_instance(instance)))\n\n\ndef interactively_connect_to_instance(aws_instances):\n # type: (InstanceConnectorABC) -> None\n while True:\n instances = aws_instances.get_running_instances()\n if not instances:\n print('No instances to connect to!')\n return\n elif len(instances) == 1:\n aws_instances.connect_to_instance(instances[0])\n return\n\n list_instances(instances)\n\n try:\n res = input('Enter a number to connect to that instance, or \"q\" to exit: ')\n except KeyboardInterrupt:\n return\n except EOFError:\n return\n\n if res[0].lower() == 'q':\n return\n else:\n try:\n index_to_connect = int(res)\n except ValueError:\n print('\"{}\" is not an integer.'.format(res))\n continue\n\n if index_to_connect < 1 or index_to_connect > len(instances):\n print('{} is not between 1 and {}.'.format(index_to_connect, len(instances)))\n continue\n\n instance = instances[index_to_connect - 1]\n aws_instances.connect_to_instance(instance)\n\n return\n\ndef connect_to_instance_id_or_index(aws_instances, id_or_index):\n # type: (InstanceConnectorABC, str) -> None\n instances = aws_instances.get_running_instances()\n\n if len(instances) == 0:\n print('No instances to connect to!')\n\n try:\n idx = int(id_or_index)\n if idx <= 0 or idx > len(instances):\n print('Provided index must be in the range [{}, {}]'.format(1, len(instances)))\n return\n\n aws_instances.connect_to_instance(instances[idx - 1])\n except ValueError:\n pass\n\n possible_instances = [instance for instance in instances if instance['InstanceId'].startswith(id_or_index)]\n if len(possible_instances) == 0:\n raise ValueError('{} is not a valid instance ID or index'.format(id_or_index))\n elif len(possible_instances) == 1:\n aws_instances.connect_to_instance(possible_instances[0])\n else:\n raise ValueError('Multiple instances have ambiguous identifier prefix {}'.format(id_or_index))\n\ndef main():\n # type: () -> None\n\n parser = argparse.ArgumentParser(description='Connect to a running AWS EC2 instance')\n\n user_group = parser.add_mutually_exclusive_group()\n user_group.add_argument('--host', help='Connect directly to the host', default=False, action='store_true')\n user_group.add_argument('--root', help='Connect to root in the Docker instance', default=False, action='store_true')\n user_group.add_argument('--list', help='Just list the instances, rather than connecting', default=False, action='store_true')\n\n parser.add_argument('identity', help='Identity to use to connect')\n parser.add_argument('instance_id', help='Instance IDs to manually connect to', nargs='?', default=None)\n parser.add_argument('--com', help='Command to run (uninteractive)', nargs='+')\n args = parser.parse_args()\n\n aws_instances = InstanceConnector(args.identity, args.host, args.root, args.com)\n\n if args.list:\n list_instances(aws_instances.get_running_instances())\n return\n\n if args.instance_id:\n connect_to_instance_id_or_index(aws_instances, args.instance_id)\n else:\n interactively_connect_to_instance(aws_instances)\n\nif __name__ == '__main__':\n main()\n", "id": "12321611", "language": "Python", "matching_score": 2.750631809234619, "max_stars_count": 105, "path": "aws/connect_instance.py" }, { "content": "#!/usr/bin/env python\n\nimport argparse\nimport subprocess\nfrom typing import Any, Dict, List, Optional\n\nfrom aws_utils.instance_utils import format_instance, AwsInstance\n\n# Ithemal runs on Python 2 mostly\ntry:\n input = raw_input\nexcept NameError:\n pass\n\nclass InstanceKiller(AwsInstance):\n def __init__(self, identity, force):\n # type: (str, bool) -> None\n super(InstanceKiller, self).__init__(identity)\n self.force = force\n\n def kill_instances(self, instances_to_kill):\n # type: (List[Dict[str, Any]]) -> None\n if not instances_to_kill:\n return\n\n if not self.force:\n print('Will kill the following instance{}:'.format('s' if len(instances_to_kill) else ''))\n for instance in instances_to_kill:\n if isinstance(instance, str):\n print(instance)\n else:\n print(format_instance(instance))\n\n try:\n res = input('Proceed? (y/n) ')[0].lower()\n except KeyboardInterrupt:\n print('Not killing.')\n return\n\n if res != 'y':\n print('Not killing.')\n return\n\n instance_ids = [instance if isinstance(instance, str) else instance['InstanceId'] for instance in instances_to_kill]\n args = ['aws', 'ec2', 'terminate-instances', '--instance-ids'] + instance_ids\n subprocess.check_call(args)\n\n\ndef interactively_kill_instances(instance_killer):\n # type: (InstanceKiller) -> None\n\n while True:\n instances = instance_killer.get_running_instances()\n if not instances:\n print('No instances to kill!')\n return\n\n print('Active instances:')\n for i, instance in enumerate(instances):\n print('{}) {}'.format(i + 1, format_instance(instance)))\n\n try:\n res = input('Enter a number to kill that instance, \"a\" to kill all, or \"q\" to exit: ')\n except KeyboardInterrupt:\n return\n except EOFError:\n return\n\n if res[0].lower() == 'q':\n return\n elif res[0].lower() == 'a':\n instance_killer.kill_instances(instances)\n else:\n try:\n index_to_kill = int(res)\n except ValueError:\n print('\"{}\" is not an integer.'.format(res))\n continue\n\n if index_to_kill < 1 or index_to_kill > len(instances):\n print('{} is not between 1 and {}.'.format(index_to_kill, len(instances) + 1))\n continue\n\n instance_to_kill = instances[index_to_kill - 1]\n\n instance_killer.kill_instances([instance_to_kill])\n\n\ndef kill_all_instances(instance_killer):\n # type: (InstanceKiller) -> None\n\n instances = instance_killer.get_running_instances()\n if not instances:\n print('No instances to kill!')\n return\n\n instance_killer.kill_instances(instances)\n\ndef main():\n # type: () -> None\n\n parser = argparse.ArgumentParser(description='Kill running AWS EC2 instances')\n parser.add_argument('-a', '--all', help='Kill all running instances by default', default=False, action='store_true')\n parser.add_argument('-f', '--force', help=\"Don't ask for confirmation\", default=False, action='store_true')\n parser.add_argument('identity', help='Key identity to filter by')\n parser.add_argument('instance_id', help='Instance IDs to manually kill', nargs='*', default=[])\n args = parser.parse_args()\n\n instance_killer = InstanceKiller(args.identity, args.force)\n\n if args.instance_id:\n instance_killer.kill_instances(args.instance_id)\n elif args.all:\n kill_all_instances(instance_killer)\n else:\n interactively_kill_instances(instance_killer)\n\n\nif __name__ == '__main__':\n main()\n", "id": "10418108", "language": "Python", "matching_score": 2.75886869430542, "max_stars_count": 105, "path": "aws/stop_instance.py" }, { "content": "#!/usr/bin/env python\n\nimport argparse\nimport atexit\nimport aws_utils.queue_process\nimport curses\nimport json\nimport os\nimport start_instance\nimport subprocess\nimport sys\ntry:\n import urlparse\nexcept ImportError:\n import urrlib.parse as urlparse\nfrom typing import Optional\nimport tempfile\nfrom typing import Any, Dict, List\n\n# Ithemal runs on Python 2 mostly\ntry:\n input = raw_input\nexcept NameError:\n pass\n\n_DIRNAME = os.path.dirname(os.path.abspath(__file__))\n\ndef queue_url_of_name(queue_name):\n # type: (str) -> Optional[str]\n proc = subprocess.Popen(\n ['aws', 'sqs', 'get-queue-url', '--queue-name', queue_name + '.fifo'],\n stdout=subprocess.PIPE,\n stderr=open('/dev/null', 'w'),\n )\n if proc.wait() or not proc.stdout:\n return None\n\n output = json.load(proc.stdout)\n return output['QueueUrl']\n\ndef create_queue(identity, queue, instance_type, instance_count, ignore_exists, no_spot):\n # type: (str, str, str, int, bool, bool) -> None\n\n queue_exists = queue_url_of_name(queue)\n if queue_exists and not ignore_exists:\n print('Queue {} already exists!'.format(queue))\n return\n\n queue_url = json.loads(subprocess.check_output([\n 'aws', 'sqs', 'create-queue',\n '--queue-name', queue + '.fifo',\n '--attributes', json.dumps({'FifoQueue': 'true'}),\n ]))['QueueUrl']\n\n procs = []\n for idx in range(instance_count):\n # keep outputs from last process to get an idea of spot instance stability\n if idx == instance_count - 1:\n stdout = None\n stderr = None\n else:\n stdout = open('/dev/null', 'w')\n stderr = open('/dev/null', 'w')\n\n procs.append(subprocess.Popen(\n [\n os.path.join(_DIRNAME, 'start_instance.py'),\n identity, '-f', '--no-connect',\n '-t', instance_type,\n '--name', '{} Queue Processor'.format(queue),\n '--queue', queue,\n ] + ([] if no_spot else ['--spot-preempt']),\n stdout=stdout,\n stderr=stderr,\n ))\n\n try:\n for proc in procs:\n proc.wait()\n except (KeyboardInterrupt, SystemExit):\n for proc in procs:\n proc.terminate()\n if not queue_exists:\n kill_queue(queue)\n\ndef send_messages(queue, com):\n # type: (str, str) -> None\n\n url = queue_url_of_name(queue)\n if not url:\n print('Queue {} doesn\\'t exist!'.format(queue))\n return\n\n if com:\n aws_utils.queue_process.send_message(url, ' '.join(com))\n else:\n try:\n while True:\n if sys.stdin.isatty():\n com = input('com> ')\n else:\n com = input()\n aws_utils.queue_process.send_message(url, com)\n except (EOFError, KeyboardInterrupt):\n pass\n\ndef kill_queue(queue):\n # type: (str) -> None\n\n url = queue_url_of_name(queue)\n if not url:\n print('Queue {} doesn\\'t exist!'.format(queue))\n return\n\n subprocess.check_call([\n 'aws', 'sqs', 'delete-queue',\n '--queue-url', url,\n ])\n\ndef running_of_queue(identity, queue):\n # type: (str, str) -> None\n\n def has_queue_tag(instance):\n # type: (Dict[str, Any]) -> bool\n\n if 'Tags' not in instance:\n return False\n\n for tag in instance['Tags']:\n if tag['Key'] == 'QueueName' and tag['Value'] == queue:\n return True\n return False\n\n instances_json = json.loads(subprocess.check_output(['aws', 'ec2', 'describe-instances', '--filters', 'Name=instance-state-name,Values=pending,running']))\n instances = [i for res in instances_json['Reservations'] for i in res['Instances'] if has_queue_tag(i)]\n\n for instance in instances:\n out = subprocess.check_output([\n os.path.join(_DIRNAME, 'connect_instance.py'), identity, instance['InstanceId'],\n '--com', os.path.join('${ITHEMAL_HOME}', 'aws', 'aws_utils', 'get_running_queue_command.sh')\n ], stderr=open('/dev/null', 'w')).strip()\n if out:\n print('{} || {}'.format(instance['InstanceId'], out))\n\n\ndef preview_queue(queue):\n # type: (str) -> None\n\n url = queue_url_of_name(queue)\n if not url:\n print('Queue {} doesn\\'t exist!'.format(queue))\n return\n\n output = subprocess.check_output([\n 'aws', 'sqs', 'receive-message',\n '--queue-url', url,\n '--visibility-timeout', '0',\n '--max-number-of-messages', '10',\n ])\n\n if not output:\n print('No messages in queue!')\n return\n\n messages = json.loads(output)['Messages']\n\n for message in messages:\n print('> {}'.format(message['Body']))\n\ndef manage_queue(queue):\n # type: (str) -> None\n\n url_ = queue_url_of_name(queue)\n if not url_:\n print('Queue {} doesn\\'t exist!'.format(queue))\n return\n else:\n url = url_\n\n messages = [] # type: List[Dict[str, Any]]\n\n def reset_messages():\n # type: () -> None\n if not messages:\n return\n\n with tempfile.NamedTemporaryFile(suffix='.json', bufsize=0) as f:\n json.dump([{\n 'Id': message['MessageId'],\n 'ReceiptHandle': message['ReceiptHandle'],\n 'VisibilityTimeout': 0,\n } for message in messages], f)\n\n output = subprocess.check_output([\n 'aws', 'sqs', 'change-message-visibility-batch',\n '--queue-url', url,\n '--entries', 'file://{}'.format(f.name),\n ])\n\n def get_messages():\n # type: () -> List[Dict[str, Any]]\n reset_messages()\n\n output = subprocess.check_output([\n 'aws', 'sqs', 'receive-message',\n '--queue-url', url,\n '--visibility-timeout', '30',\n '--max-number-of-messages', '10',\n ])\n\n if not output:\n return []\n else:\n return json.loads(output)['Messages']\n\n messages = get_messages()\n\n stdscr = curses.initscr()\n curses.noecho()\n curses.cbreak()\n stdscr.keypad(1)\n\n def cleanup():\n # type: () -> None\n curses.nocbreak()\n stdscr.keypad(0);\n curses.echo()\n curses.endwin()\n reset_messages()\n\n atexit.register(cleanup)\n\n selected_idx = 0\n\n while True:\n if not messages:\n return\n\n stdscr.erase()\n stdscr.addstr(0, 0, 'Queue Management mode. Arrow keys to move, x to delete an item, q to exit', curses.A_STANDOUT)\n stdscr.addstr(selected_idx + 2, 0, '>')\n\n (maxy, maxx) = stdscr.getmaxyx()\n for i, message in enumerate(messages):\n body = message['Body'][:maxx-5] + (message['Body'][maxx-5:] and '...')\n stdscr.addstr(i + 2, 2, body)\n stdscr.refresh()\n\n c = stdscr.getch()\n if c == ord('x'):\n message = messages[selected_idx]\n\n stdscr.erase()\n stdscr.addstr(0, 0, 'Really delete the following item? (y/n)')\n stdscr.addstr(1, 0, message['Body'])\n stdscr.refresh()\n\n while True:\n c = stdscr.getch()\n if c == ord('y'):\n subprocess.check_call(['aws', 'sqs', 'delete-message', '--queue-url', url, '--receipt-handle', message['ReceiptHandle']])\n selected_idx = 0\n break\n elif c == ord('n'):\n break\n\n messages = get_messages()\n elif c == ord('q'):\n break\n elif c == curses.KEY_DOWN:\n selected_idx = min(selected_idx + 1, len(messages) - 1)\n elif c == curses.KEY_UP:\n selected_idx = max(selected_idx - 1, 0)\n\ndef list_queues():\n # type: () -> None\n output = subprocess.check_output(['aws', 'sqs', 'list-queues'])\n if not output:\n print('No running queues!')\n return\n\n queues = json.loads(output)['QueueUrls']\n\n def parse_url(url):\n # type: (str) -> str\n full_name = urlparse.urlparse(url).path.split('/')[-1]\n suffix = '.fifo'\n if full_name.endswith(suffix):\n name = full_name[:-len(suffix)]\n else:\n name = full_name\n\n return '{} ({})'.format(name, url)\n\n print('\\n'.join(map(parse_url, queues)))\n\ndef main():\n # type: () -> None\n parser = argparse.ArgumentParser(description='Manage AWS SQS queues and their associated workers')\n\n def add_queue_arg(sp):\n # type: (argparse.ArgumentParser) -> None\n sp.add_argument('queue_name', help='Queue to manage')\n\n subparsers = parser.add_subparsers(dest='subparser')\n\n list_parser = subparsers.add_parser('list', help='List AWS queues')\n\n create_parser = subparsers.add_parser('create', help='Create AWS queues')\n create_parser.add_argument('identity', help='Identity to use to connect')\n add_queue_arg(create_parser)\n create_parser.add_argument('-c', '--count', help='Number of queue processors to create', default=4, type=int)\n create_parser.add_argument('-t', '--type', help='Instance type to start (default: t2.large)', default='t2.large')\n create_parser.add_argument('--ignore-exists', help='Fork instances regardless of if the queue exists or not', action='store_true', default=False)\n create_parser.add_argument('--no-spot', help='Start on-demand instead of spot instances', action='store_true', default=False)\n\n send_parser = subparsers.add_parser('send', help='Send messages to AWS queues')\n add_queue_arg(send_parser)\n send_parser.add_argument('com', nargs='*', help='Command to send (if empty, read lines from stdin)')\n\n kill_parser = subparsers.add_parser('kill', help='Kill AWS queue')\n add_queue_arg(kill_parser)\n\n preview_parser = subparsers.add_parser('preview', help='Preview an AWS queue')\n add_queue_arg(preview_parser)\n\n manage_parser = subparsers.add_parser('manage', help='Manage an AWS queue')\n add_queue_arg(manage_parser)\n\n running_parser = subparsers.add_parser('running', help='Get commands currently running on an AWS queue')\n running_parser.add_argument('identity', help='Identity to use to connect')\n add_queue_arg(running_parser)\n\n args = parser.parse_args()\n\n if args.subparser == 'list':\n list_queues()\n return\n\n if args.subparser == 'create':\n create_queue(args.identity, args.queue_name, args.type, args.count, args.ignore_exists, args.no_spot)\n elif args.subparser == 'send':\n send_messages(args.queue_name, args.com)\n elif args.subparser == 'kill':\n kill_queue(args.queue_name)\n elif args.subparser == 'preview':\n preview_queue(args.queue_name)\n elif args.subparser == 'manage':\n manage_queue(args.queue_name)\n elif args.subparser == 'running':\n running_of_queue(args.identity, args.queue_name)\n else:\n raise ValueError('Unrecognized subparser {}'.format(args.subparser))\n\nif __name__ == '__main__':\n main()\n", "id": "4202998", "language": "Python", "matching_score": 3.257240056991577, "max_stars_count": 105, "path": "aws/command_queue.py" }, { "content": "#!/usr/bin/env python\n\nimport argparse\nimport json\nimport os\nimport requests\nimport spot_checker\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\nimport traceback\nimport uuid\nfrom typing import NoReturn, Optional\n\ndef send_message(queue_url, com):\n # type: (str, str) -> None\n ''' Utility to send a message directly to a URL without checking\n '''\n\n subprocess.check_call([\n 'aws', 'sqs', 'send-message',\n '--queue-url', queue_url,\n '--message-body', com,\n '--message-group-id', 'none',\n '--message-deduplication-id', str(uuid.uuid4()),\n ])\n\ndef kill_instance(instance_id):\n # type: (str) -> None\n subprocess.call(['aws', 'ec2', 'terminate-instances', '--instance-ids', instance_id])\n sys.exit(1)\n\ncurr_com = None # type: Optional[str]\n\ndef watch_for_instance_death(queue_url, instance_id):\n # type: (str, str) -> None\n global curr_com\n\n while True:\n death_time = spot_checker.get_termination_time()\n if not death_time:\n time.sleep(60)\n continue\n\n # sleep until 10s before instance termination\n sleep_dur = death_time - time.time() - 10\n if sleep_dur > 0:\n time.sleep(sleep_dur)\n\n death_message = ':skull_and_crossbones: Spot instance {} dying :skull_and_crossbones:'.format(instance_id)\n\n if curr_com:\n send_message(queue_url, curr_com)\n death_message += '\\nRe-queueing {}'.format(curr_com)\n\n subprocess.call([os.path.join(os.environ['ITHEMAL_HOME'], 'aws', 'ping_slack.py'), death_message])\n return\n\ndef process_queue(instance_id, queue_url, kill_on_fail):\n # type: (str, str, bool) -> None\n global curr_com\n\n t = threading.Thread(target=watch_for_instance_death, args=(queue_url, instance_id))\n t.daemon = True\n t.start()\n\n log_file = open('/tmp/queue_log', 'a+', 1)\n\n while True:\n try:\n output = subprocess.check_output(['aws', 'sqs', 'receive-message', '--queue-url', queue_url, '--wait-time-seconds', '20'])\n except subprocess.CalledProcessError:\n if kill_on_fail:\n kill_instance(instance_id)\n else:\n return\n\n if not output:\n continue\n\n messages = json.loads(output)\n message = messages['Messages'][0]\n subprocess.check_call(['aws', 'sqs', 'delete-message', '--queue-url', queue_url, '--receipt-handle', message['ReceiptHandle']])\n\n curr_com = message['Body']\n com = message['Body']\n\n mk_tmp_file = lambda suffix: open(os.path.join('/tmp', '{}.{}'.format(\n message['MessageId'],\n suffix\n )), 'w')\n\n with mk_tmp_file('stdout') as stdout, mk_tmp_file('stderr') as stderr:\n log_file.write(com + '\\n')\n log_file.write('--> stdout: {}, stderr: {}\\n\\n'.format(stdout.name, stderr.name))\n proc = subprocess.Popen(\n message['Body'],\n shell=True,\n cwd=os.environ['ITHEMAL_HOME'],\n stdout=stdout,\n stderr=stderr\n )\n proc.wait()\n\n if not proc.returncode:\n # if process executed successfully, delete stderr file\n os.unlink(stderr.name)\n\n curr_com = None\n\n if proc.returncode:\n error_msg = 'Command `{}` failed with exit code {} on instance {}'.format(message['Body'], proc.returncode, instance_id)\n subprocess.call([os.path.join(os.environ['ITHEMAL_HOME'], 'aws', 'ping_slack.py'), error_msg])\n\ndef main():\n # type: () -> None\n parser = argparse.ArgumentParser(description='Indefinitely pull messages from a given AWS queue')\n parser.add_argument('queue_url', help='The AWS SQS queue URL to pull from')\n parser.add_argument('--kill', help='Kill the instance if a queue pull fails', action='store_true', default=False)\n args = parser.parse_args()\n\n instance_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text\n\n try:\n process_queue(instance_id, args.queue_url, args.kill)\n except:\n error_msg = 'Error on instance {}:\\n```{}```'.format(instance_id, traceback.format_exc())\n subprocess.call([os.path.join(os.environ['ITHEMAL_HOME'], 'aws', 'ping_slack.py'), error_msg])\n\nif __name__ == '__main__':\n main()\n", "id": "4709549", "language": "Python", "matching_score": 2.931206464767456, "max_stars_count": 105, "path": "aws/aws_utils/queue_process.py" }, { "content": "#!/usr/bin/env python\n\nimport argparse\nimport base64\nimport json\nimport subprocess\nimport urllib2\nimport os\nfrom typing import Optional\n\n# I don't really want to factor this out so pls don't use this\nWEBHOOK_URL = base64.b64decode('<KEY>\nSLACK_USERNAME = 'AWS'\nSLACK_ICON = 'https://raw.githubusercontent.com/quintessence/slack-icons/e9e141f0a119759ca4d59e0b788fc9375c9b2678/images/amazon-web-services-slack-icon.png'\n\n# map from IAM key to Slack user ID\nUSER_MAP = {\n 'renda': 'UCJ98TMB8',\n 'charithm': 'UB59J5BHR',\n 'charithm-mac': 'UB59J5BHR',\n 'mcarbin': 'U7QK3FX88',\n}\n\ndef get_starting_user():\n # type: () -> Optional[str]\n '''Get the IAM key (user) that started this AWS instance, or None if this is not an AWS instance\n '''\n proc = subprocess.Popen(\n ['/usr/bin/curl', '--silent', '--connect-timeout', '1', 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key'],\n stdout=subprocess.PIPE,\n stderr=open(os.devnull, 'w'),\n )\n proc.wait()\n if proc.returncode:\n return None\n\n stdout, _ = proc.communicate()\n stdout = stdout.strip()\n return stdout.split()[2]\n\ndef send_message(message):\n # type: (str) -> None\n ''' Send the given message to slack\n '''\n payload = {\n 'text': message,\n 'username': SLACK_USERNAME,\n 'icon_url': SLACK_ICON,\n }\n\n urllib2.urlopen(urllib2.Request(WEBHOOK_URL, json.dumps(payload)))\n\ndef main():\n # type: () -> None\n parser = argparse.ArgumentParser(description='Ping a user in the #aws-notifications channel on Slack')\n parser.add_argument('--user', default=None, help='User to ping (default: user that started instance on AWS)')\n parser.add_argument('message', help='Message to send')\n\n # behave like 'echo', and just concatenate all args (undocumented)\n args, unknown_args = parser.parse_known_args()\n\n message = args.message\n for unknown_arg in unknown_args:\n message += ' ' + unknown_arg\n\n user = args.user or get_starting_user()\n\n # if there is a user, taf them\n if user:\n message = '<@{}>: {}'.format(\n USER_MAP[user],\n message,\n )\n\n send_message(message)\n\nif __name__ == '__main__':\n main()\n", "id": "1973931", "language": "Python", "matching_score": 1.205525517463684, "max_stars_count": 105, "path": "aws/ping_slack.py" }, { "content": "from flask import Flask, request, send_from_directory, render_template, has_request_context\nfrom flask.logging import default_handler\nfrom logging.config import dictConfig\nimport tempfile\nimport subprocess\nimport os\nimport logging\nimport sys\n\ndictConfig({\n 'version': 1,\n 'formatters': {'default': {\n 'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',\n }},\n 'handlers': {'file': {\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(os.path.expanduser('~'), 'apithemal_logs'),\n 'formatter': 'default'\n }, 'console': {\n 'class': 'logging.StreamHandler',\n 'stream': 'ext://sys.stderr',\n 'formatter': 'default'\n }},\n 'root': {\n 'level': 'DEBUG',\n 'handlers': ['file']\n }\n})\n\napp = Flask(__name__)\n\[email protected]_request\ndef log_request_info():\n app.logger.debug('Remote: %s', request.remote_addr)\n app.logger.debug('Url: %s', request.url)\n app.logger.debug('Headers: %s', request.headers)\n\n try:\n code = '\\n'.join(map(strip_comment, map(str.strip, request.form['code'].encode('utf-8').strip().split('\\n'))))\n model = request.form['model'].encode('utf-8').strip()\n\n app.logger.debug('Code: %s', code)\n app.logger.debug('Model: %s', model)\n except:\n pass\n\[email protected]('/')\ndef index():\n return render_template('index.html', code_text=None, code_hteml=None, prediction=None, error=None)\n\ndef strip_comment(line):\n if ';' in line:\n return line[:line.index(';')]\n return line\n\[email protected]('/predict', methods=['GET', 'POST'])\ndef predict():\n if request.method == 'GET':\n return index()\n\n code = '\\n'.join(map(strip_comment, map(str.strip, request.form['code'].encode('utf-8').strip().split('\\n'))))\n model = request.form['model'].encode('utf-8').strip()\n\n try:\n prediction = get_prediction_of_code(code, model)\n error = None\n except ValueError as v:\n prediction = None\n error = v.args[0]\n\n return render_template(\n 'index.html',\n code_text=code,\n code_html=code.replace('\\n', '<br>'),\n prediction=prediction,\n error=(error and error.replace('\\n', '<br>')),\n last_model=model,\n )\n\n\ndef get_prediction_of_code(code, model):\n _, fname = tempfile.mkstemp()\n success, as_intel_output = intel_compile(code, fname)\n if not success:\n success, as_att_output = att_compile(code, fname)\n if not success:\n success, nasm_output = nasm_compile(code, fname)\n\n if not success:\n if os.path.exists(fname):\n os.unlink(fname)\n raise ValueError('Could not assemble code.\\nAssembler outputs:\\n\\n{}'.format('\\n\\n'.join([\n 'as (Intel syntax): {}'.format(as_intel_output[1]),\n 'as (AT&T syntax): {}'.format(as_att_output[1]),\n 'nasm: {}'.format(nasm_output[1]),\n ])))\n\n try:\n return '{:.3f}'.format(float(subprocess.check_output([\n 'python',\n '/home/ithemal/ithemal/learning/pytorch/ithemal/predict.py',\n '--model', '/home/ithemal/ithemal/learning/pytorch/saved/{}.dump'.format(model),\n '--model-data', '/home/ithemal/ithemal/learning/pytorch/saved/{}.mdl'.format(model),\n '--files', fname\n ]).strip()) / 100)\n except:\n if os.path.exists(fname):\n os.unlink(fname)\n raise ValueError('Ithemal failed to predict timing of code')\n\n\ndef intel_compile(code, output):\n p = subprocess.Popen(['as', '-o', output], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n c = '''\n .text\n .global main\n main:\n .intel_syntax noprefix\n\n mov ebx, 111\n .byte 0x64, 0x67, 0x90\n\n {}\n\n mov ebx, 222\n .byte 0x64, 0x67, 0x90\n '''.format(code)\n output = p.communicate(c)\n p.wait()\n return p.returncode == 0, output\n\ndef att_compile(code, output):\n p = subprocess.Popen(['as', '-o', output], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n c = '''\n .text\n .global main\n main:\n\n movq $111, %ebx\n .byte 0x64, 0x67, 0x90\n\n {}\n\n mov $222, %ebx\n .byte 0x64, 0x67, 0x90\n '''.format(code)\n output = p.communicate(c)\n p.wait()\n return p.returncode == 0, output\n\ndef nasm_compile(code, output):\n tmp = tempfile.NamedTemporaryFile()\n with open(tmp.name, 'w+') as f:\n f.write('''\n\tSECTION .text\n global main\n main:\n\n mov ebx, 111\n db 0x64, 0x67, 0x90\n\n {}\n\n mov ebx, 222\n db 0x64, 0x67, 0x90\n '''.format(code))\n\n p = subprocess.Popen(['nasm', '-o', output, tmp.name], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output = p.communicate()\n p.wait()\n tmp.close()\n return p.returncode == 0, output\n", "id": "6040637", "language": "Python", "matching_score": 2.0811069011688232, "max_stars_count": 105, "path": "apithemal/apithemal.py" }, { "content": "import pytest\nimport os\nimport subprocess\nimport glob\nfrom conftest import *\n\n\n@dynamorio\nclass TestDynamoRIO:\n\n drexec = os.environ['DYNAMORIO_HOME'] + '/bin64/drrun'\n\n def test_dynamorio_installation(self):\n\n proc = subprocess.Popen([TestDynamoRIO.drexec,'--','ls'],stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n drout, _ = proc.communicate()\n proc = subprocess.Popen(['ls'],stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n normalout, _ = proc.communicate()\n\n assert drout == normalout\n\n @ithemal\n def test_drclient_static(self):\n\n static_client = os.environ['ITHEMAL_HOME'] + '/data_collection/build/bin/libstatic.so'\n\n files = glob.glob('/tmp/static_*')\n cmd = ['rm','-f'] + files\n proc = subprocess.Popen(cmd)\n proc.communicate()\n proc = subprocess.Popen([TestDynamoRIO.drexec,'-c',static_client,'3','7','1','gcc','none','/tmp','--','ls'],stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n drout, _ = proc.communicate()\n proc = subprocess.Popen(['ls'],stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n normalout, _ = proc.communicate()\n\n assert drout == normalout\n assert len(glob.glob('/tmp/static_*')) == 1\n\n @ithemal\n def test_created_sql_file(self):\n\n files = glob.glob('/tmp/static_*')\n\n assert len(files) == 1\n\n #check one line of the SQL file to see if it makes sense\n with open(files[0],'r') as f:\n\n line = f.readline()\n assert \"INSERT INTO config (compiler, flags, arch)\" in line\n line = f.readline()\n assert \"INSERT INTO code\" in line\n line = f.readline()\n assert \"UPDATE code SET\" in line\n\n\n\n", "id": "4187386", "language": "Python", "matching_score": 1.7972371578216553, "max_stars_count": 105, "path": "testing/test_dynamorio.py" }, { "content": "import pytest\nimport os\nimport subprocess\nimport glob\nimport re\nfrom shutil import copyfile\n\ndynamorio = pytest.mark.skipif('DYNAMORIO_HOME' not in os.environ.keys(),\n reason=\"DYNAMORIO_HOME not set\")\n\nithemal = pytest.mark.skipif('ITHEMAL_HOME' not in os.environ.keys(),\n reason=\"ITHEMAL_HOME not set\")\n\[email protected](scope=\"module\")\ndef db_config():\n\n if not os.path.exists('test_data/db_config.cfg'):\n copyfile('test_data/example_config.cfg','test_data/db_config.cfg')\n\n config = dict()\n with open('test_data/db_config.cfg','r') as f:\n for line in f:\n found = re.search('([a-zA-Z\\-]+) *= *\\\"*([a-zA-Z0-9#\\./]+)\\\"*', line)\n if found:\n config[found.group(1)] = found.group(2)\n\n return config\n\n\n\n\n", "id": "11587586", "language": "Python", "matching_score": 1.6341036558151245, "max_stars_count": 105, "path": "testing/conftest.py" }, { "content": "import pytest\nimport os\nimport subprocess\nimport glob\nfrom conftest import *\nimport common_libs.utilities as ut\nimport mysql.connector\n\n\n@ithemal\nclass TestDatabase:\n\n def test_connectivity(self,db_config):\n\n assert 'password' in db_config.keys()\n assert 'user' in db_config.keys()\n assert 'port' in db_config.keys()\n\n cnx = ut.create_connection(user=db_config['user'],password=db_config['password'],port=db_config['port'],database=None)\n assert cnx != None\n\n def test_connectivity_from_config(self):\n\n cnx = ut.create_connection_from_config('test_data/db_config.cfg')\n assert cnx != None\n\n def test_create_database(self,db_config):\n\n create_script = os.environ['ITHEMAL_HOME'] + '/data_export/scripts/create_and_populate_db.sh'\n schema = os.environ['ITHEMAL_HOME'] + '/data_export/schemas/mysql_schema.sql'\n\n proc = subprocess.call(['bash',create_script,'test_data/db_config.cfg','testIthemal',schema,'test_data'])\n #_ = proc.communicate()\n\n cnx = ut.create_connection(user=db_config['user'],password=db_config['password'],port=db_config['port'],database='testIthemal')\n assert cnx != None\n\n sql = 'select count(*) from code'\n\n rows = ut.execute_query(cnx, sql, True)\n\n assert len(rows) == 1\n assert len(rows[0]) == 1\n\n assert rows[0][0] == 3287\n\n", "id": "2820927", "language": "Python", "matching_score": 3.766569137573242, "max_stars_count": 105, "path": "testing/test_database.py" }, { "content": "import pytest\nimport os\nimport subprocess\nimport glob\nfrom conftest import *\nimport common_libs.utilities as ut\nimport mysql.connector\nimport urllib\nimport time\n\ndatabase = '--database=test_costmodel'\nconfig = '--config=test_data/db_config.cfg'\narch = '--arch=2'\n\nhome = os.environ['ITHEMAL_HOME']\nscript = home + '/learning/pytorch/ithemal/run_ithemal.py'\nsavedata = home + '/learning/pytorch/inputs/data/time_skylake_test.data'\nembedfile = home + '/learning/pytorch/inputs/embeddings/code_delim.emb'\nsavemodel = home + '/learning/pytorch/inputs/models/test_skylake.mdl'\n\n\ndef wait_timeout(proc, seconds):\n \"\"\"Wait for a process to finish, or raise exception after timeout\"\"\"\n start = time.time()\n end = start + seconds\n interval = 30\n\n while True:\n result = proc.poll()\n if result is not None:\n return result\n if time.time() >= end:\n proc.kill()\n return None\n time.sleep(interval)\n\n\n@ithemal\nclass TestIthemal:\n\n def test_create_ithemal_database(self):\n\n urllib.urlretrieve (\"http://web.mit.edu/charithm/www/test_costmodel.sql\", \"test_data/test_costmodel.sql\")\n assert os.path.exists('test_data/test_costmodel.sql')\n\n default_file = 'test_data/db_config.cfg'\n cnx = ut.create_connection_from_config(default_file)\n assert cnx\n\n ut.execute_query(cnx,'drop database if exists test_costmodel',False)\n cnx_none = ut.create_connection_from_config(default_file,'test_costmodel')\n assert cnx_none == None\n\n ut.execute_query(cnx,'create database if not exists test_costmodel',False)\n cnx.close()\n\n cnx = ut.create_connection_from_config(default_file,'test_costmodel')\n assert cnx\n\n sql = open('test_data/test_costmodel.sql').read()\n\n for line in sql.split(';'):\n print line\n ut.execute_query(cnx,line,False,True)\n cnx.commit()\n\n rows = ut.execute_query(cnx,'select count(*) from code',True)\n assert rows[0][0] == 100000\n\n def test_savedata(self):\n\n\n args = ['python',script, config, database, arch, '--mode=save','--savedatafile=' + savedata]\n proc = subprocess.Popen(args,stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)\n stdout, stderr = proc.communicate()\n\n print stdout\n success = False\n for line in stdout.split('\\n'):\n if line == 'timing values registered for 78179 items':\n success = True\n\n assert success\n\n @pytest.mark.skip\n def test_training(self):\n\n args = ['python',script, '--mode=train','--savedatafile=' + savedata, '--savefile=' + savemodel, '--embedfile=' + embedfile, '--embmode=none']\n\n proc = subprocess.Popen(args,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n wait_timeout(proc,300)\n\n output = []\n for line in proc.stdout:\n output.append(line.decode())\n\n print(\"\".join(output))\n\n assert False\n\n\n", "id": "11859153", "language": "Python", "matching_score": 4.822404384613037, "max_stars_count": 105, "path": "testing/test_ithemal.py" }, { "content": "import pytest\nimport os\nimport subprocess\nimport glob\nfrom conftest import *\nimport common_libs.utilities as ut\nimport mysql.connector\n\n\n@ithemal\nclass TestStats:\n\n def test_getbenchmarks(self):\n\n script = os.environ['ITHEMAL_HOME'] + '/learning/pytorch/stats/getbenchmarks.py'\n database = '--database=testIthemal'\n config = '--config=test_data/db_config.cfg'\n arch = '--arch=1'\n\n args = ['python',script, database, config, arch]\n proc = subprocess.Popen(args,stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)\n stdout, stderr = proc.communicate()\n\n\n success = False\n for line in stdout.split('\\n'):\n if line == 'Total 44 2934 0':\n success = True\n\n assert success\n\n", "id": "8431278", "language": "Python", "matching_score": 2.007089376449585, "max_stars_count": 105, "path": "testing/test_stats.py" }, { "content": "import sys\nimport os\nsys.path.append(os.path.join(os.environ['ITHEMAL_HOME'], 'learning', 'pytorch'))\n\nimport argparse\nimport common_libs.utilities as ut\nimport data.data_cost as dt\nimport torch\nfrom typing import Optional\n\ndef save_data(savefile, arch, format, database=None, config=None):\n # type: (str, int, str, Optional[str], Optional[str]) -> None\n\n if config is None:\n cnx = ut.create_connection(database=database)\n else:\n cnx = ut.create_connection_from_config(database=database, config_file=config)\n\n data = dt.DataInstructionEmbedding()\n data.extract_data(cnx, format, ['code_id','code_intel'])\n data.get_timing_data(cnx, arch)\n\n torch.save(data.raw_data, savefile)\n\ndef main():\n # type: () -> None\n parser = argparse.ArgumentParser('Save data from SQL to disk')\n parser.add_argument('dest', help='Location to save the data to')\n parser.add_argument('--format', default='text', help='Format to save data in')\n parser.add_argument('--arch', type=int, help='Architecture of data to pull', required=True)\n parser.add_argument('--database', help='Database to pull from (if not default)')\n parser.add_argument('--config', help='Database configuration to use (if not deafult)')\n\n args = parser.parse_args()\n\n save_data(args.dest, args.arch, args.format, database=args.database, config=args.config)\n\nif __name__ == '__main__':\n main()\n", "id": "4700762", "language": "Python", "matching_score": 1.203892707824707, "max_stars_count": 105, "path": "learning/pytorch/save_data.py" }, { "content": "#!/usr/bin/env python\n\nfrom gevent import monkey; monkey.patch_all()\n\nimport argparse\nimport functools\nimport subprocess\nimport gevent\nimport os\nimport tempfile\n\n_IACA_HEADER = \"7f454c4602010100000000000000000001003e000100000000000000000000000000000000000000100100000000000000000000400000000000400004000100bb6f000000646790\"\n_IACA_TAIL = \"bbde000000646790\"\n\n_LLVM_BODY = ''' .text\n .att_syntax\n .globl main\nmain:\n # LLVM-MCA-BEGIN test\n{}\n # LLVM-MCA-END test\n '''\n\n_IACA = os.path.join(os.environ['ITHEMAL_HOME'], 'timing_tools', 'iaca-bin')\n_LLVM = os.path.join(os.environ['ITHEMAL_HOME'], 'timing_tools', 'llvm-build', 'bin', 'llvm-mca')\n_DISASSMBLER = os.path.join(os.environ['ITHEMAL_HOME'], 'data_collection', 'disassembler', 'build', 'disassemble')\n\n\ndef time_llvm_base(arch, verbose, code):\n with tempfile.NamedTemporaryFile() as f:\n disassembler = subprocess.Popen([_DISASSMBLER, '-att'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n (output, _) = disassembler.communicate(code)\n f.write(_LLVM_BODY.format(output))\n f.flush()\n output = subprocess.check_output([_LLVM, '-march=x86', '-mcpu={}'.format(arch), f.name])\n if verbose:\n print(output)\n return output\n\ndef time_llvm_cycles(arch, verbose, code):\n output = time_llvm_base(arch, verbose, code)\n total_cycles_line = output.split('\\n')[5]\n cycles = total_cycles_line.split()[2]\n return float(cycles)\n\ndef time_llvm_rthroughput(arch, verbose, code):\n output = time_llvm_base(arch, verbose, code)\n total_cycles_line = output.split('\\n')[11]\n cycles = total_cycles_line.split()[2]\n return float(cycles) * 100\n\ndef time_iaca(arch, verbose, code):\n with tempfile.NamedTemporaryFile() as f:\n f.write('{}{}{}'.format(_IACA_HEADER, code, _IACA_TAIL).decode('hex'))\n f.flush()\n output = subprocess.check_output([_IACA, '-arch', arch, '-reduceout', f.name])\n if verbose:\n print(output)\n txput_line = output.split('\\n')[3]\n txput = txput_line.split()[2]\n return float(txput) * 100\n\ndef time_code_ids(code_ids, timer):\n # get code\n mysql = subprocess.Popen(['mysql', '-N'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n (out, _) = mysql.communicate('SELECT code_id, code_raw FROM code WHERE code_id IN ({});\\n'.format(','.join(map(str, code_ids))))\n jobs = {int(code_id): gevent.spawn(timer, code_raw) for (code_id, code_raw) in map(str.split, out.rstrip().split('\\n'))}\n gevent.joinall(jobs.values(), timeout=240)\n return {code_id: jobs[code_id].value for code_id in jobs}\n\niaca_kind = (2, time_iaca, {'haswell': 'HSW', 'broadwell': 'BDW', 'skylake': 'SKL'})\nllvm_kind_cycles = (3, time_llvm_cycles, {'haswell': 'haswell', 'broadwell': 'broadwell', 'skylake': 'skylake', 'nehalem': 'nehalem', 'ivybridge': 'ivybridge'})\nllvm_kind_rthroughput = (5, time_llvm_rthroughput, {'haswell': 'haswell', 'broadwell': 'broadwell', 'skylake': 'skylake', 'nehalem': 'nehalem', 'ivybridge': 'ivybridge'})\n\n_kind_map = {\n 'iaca': iaca_kind,\n 'llvm-cycles': llvm_kind_cycles,\n 'llvm-rthroughput': llvm_kind_rthroughput,\n}\n\n_arch_map = {\n 'haswell': 1,\n 'skylake': 2,\n 'broadwell': 3,\n 'nehalem': 4,\n 'ivybridge': 5,\n}\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('arch', type=str)\n parser.add_argument('kind')\n parser.add_argument('--insert', action='store_true', default=False)\n parser.add_argument('--verbose', action='store_true', default=False)\n parser.add_argument('code_id', type=int, nargs='+')\n args = parser.parse_args()\n\n (kind_id, timer_func, arch_dict) = _kind_map[args.kind]\n arch_id = _arch_map[args.arch]\n timer = functools.partial(timer_func, arch_dict[args.arch], args.verbose)\n times = time_code_ids(args.code_id, timer)\n\n mysql = subprocess.Popen(['mysql'], stdin=subprocess.PIPE)\n values = ','.join(map(str, ((code_id, arch_id, kind_id, speed) for (code_id, speed) in times.items() if speed is not None)))\n if args.insert:\n print('Inserting {}'.format(len(times)))\n mysql.communicate('INSERT INTO time (code_id, arch_id, kind_id, cycle_count) VALUES {};\\n'.format(values))\n else:\n print(times)\n\n\nif __name__ == '__main__':\n main()\n", "id": "9584445", "language": "Python", "matching_score": 1.7639435529708862, "max_stars_count": 105, "path": "timing_tools/test_code_id.py" }, { "content": "#!/usr/bin/env python3\n\nfrom matplotlib import pyplot as plt\nfrom typing import List, NamedTuple, Union, Optional, Tuple\nimport argparse\nimport numpy as np\nimport os\nimport re\nimport scipy.ndimage.filters\nimport subprocess\nimport time\nimport matplotlib\nmatplotlib.rcParams.update({'font.size': 20})\n\nTrainMeasurement = NamedTuple('TrainMeasurement', [\n ('experiment_name', str),\n ('epochs', List[int]),\n ('times', List[float]),\n ('losses', List[float]),\n ('trainers', List[int]),\n])\n\nTestMeasurement = NamedTuple('TestMeasurement', [\n ('experiment_name', str),\n ('times', List[float]),\n ('losses', List[float]),\n])\n\n_DIRNAME = os.path.abspath(os.path.dirname(__file__))\n\n_TRAIN = 'Train'\n_TEST = 'Test'\n\ndef plot_measurements(train_measurements, test_measurements, has_finished, train_blur, test_blur, plot_trainers, raw_x, save, norm_epoch, min_y, max_y, validation):\n # type: (List[TrainMeasurement], List[TestMeasurement], List[bool], float, float, bool, bool, Optional[str], bool) -> None\n\n def get_times_and_losses(measurement, blur):\n # type: (Union[TrainMeasurement, TestMeasurement], float) -> Tuple[np.array, np.array]\n times = np.array(measurement.times) / 3600\n if blur > 0:\n losses = scipy.ndimage.filters.gaussian_filter1d(measurement.losses, blur)\n else:\n losses = measurement.losses\n if raw_x:\n return np.arange(len(losses)), losses\n else:\n return times, losses\n\n plt.title('Loss over time')\n fig = plt.figure(1, figsize=(12.8, 9.6), dpi=100)\n loss_ax = fig.gca()\n if plot_trainers:\n trainer_ax = loss_ax.twinx()\n trainer_ax.set_ylim([1, 6])\n trainer_ax.set_ylabel('Number of running trainers')\n else:\n trainer_ax = None\n\n\n if norm_epoch:\n loss_ax.set_xlabel('Epochs')\n else:\n loss_ax.set_xlabel('Time in hours')\n\n loss_ax.set_ylim([min_y, max_y])\n loss_ax.set_ylabel('Loss')\n\n for idx, (train_measurement, test_measurement, finished) in enumerate(zip(train_measurements, test_measurements, has_finished)):\n color = 'C{}'.format(idx)\n name = test_measurement.experiment_name\n train_times, train_losses = get_times_and_losses(train_measurement, train_blur)\n test_times, test_losses = get_times_and_losses(test_measurement, test_blur)\n\n ep_advance = np.where(np.diff(train_measurement.epochs))[0] + 1\n\n new_test_times = np.empty_like(test_times)\n\n max_tr = train_times.max()\n\n if norm_epoch:\n prev = 0\n prev_x = 0\n for k, idx in enumerate(ep_advance):\n x = train_times[idx]\n idxs = (test_times >= prev_x) & (test_times < x)\n old_tests = test_times[idxs]\n new_test_times[idxs] = (old_tests - prev_x) / (x - prev_x) + k\n train_times[prev:idx] = np.linspace(k, k+1, idx - prev)\n prev = idx\n prev_x = x\n\n idxs = (test_times >= prev_x)\n old_tests = test_times[idxs]\n new_test_times[idxs] = (old_tests - prev_x) / (max_tr - prev_x) + len(ep_advance)\n train_times[prev:] = np.linspace(len(ep_advance), len(ep_advance)+1, len(train_times) - prev)\n test_times = new_test_times\n else:\n for idx in ep_advance:\n x = train_times[idx]\n y = train_losses[idx]\n loss_ax.plot([x,x], [y - 0.005, y + 0.005], color=color)\n\n loss_ax.plot(train_times, train_losses, label='{} train loss'.format(name), color=color)\n if len(test_times) > 0:\n loss_ax.plot(test_times, test_losses, linestyle='--', label='{} {} loss'.format(name, 'validation' if validation else 'test'), color=color)\n\n if finished: # or True:\n loss_ax.scatter(train_times[-1:], train_losses[-1:], marker='x', color=color)\n\n if trainer_ax is not None:\n trainer_ax.plot(train_times, train_measurement.trainers, label='{} trainers'.format(name), color=color)\n\n\n loss_ax.legend()\n\n if save:\n plt.savefig(save)\n else:\n plt.show()\n\ndef synchronize_experiment_files(experiment_name):\n # type: (str) -> Tuple[str, List[str], List[bool]]\n\n match = re.match(r'^(?P<experiment_name>.*?)(:?\\+(?P<time_count>\\d+))?$', experiment_name)\n if match is None:\n raise ValueError('Unrecognized format: {}'.format(experiment_name))\n\n experiment_name = match.group('experiment_name')\n if match.group('time_count'):\n time_count = max(int(match.group('time_count')), 1)\n else:\n time_count = 1\n\n try:\n output = subprocess.check_output(['aws', 's3', 'ls', 's3://ithemal-experiments/{}/'.format(experiment_name)]).strip()\n except subprocess.CalledProcessError:\n raise ValueError('Unknown experiment {}'.format(experiment_name))\n\n if isinstance(output, bytes):\n output = output.decode('utf8') # type: ignore\n\n splits = [line.strip().split() for line in output.split('\\n')]\n times = [split[1][:-1] for split in splits if split[0] == 'PRE']\n\n experiment_times = sorted(times)[-time_count:]\n has_finished = [] # type: List[bool]\n\n for experiment_time in experiment_times:\n subprocess.check_call(['aws', 's3', 'sync', 's3://ithemal-experiments/{}/{}'.format(experiment_name, experiment_time),\n os.path.join(_DIRNAME, 'data', experiment_name, experiment_time),\n '--exclude', '*', '--include', 'loss_report.log'])\n\n subprocess.check_call(['aws', 's3', 'sync', 's3://ithemal-experiments/{}/{}/checkpoint_reports'.format(experiment_name, experiment_time),\n os.path.join(_DIRNAME, 'data', experiment_name, experiment_time, 'checkpoint_reports')])\n\n has_validation_results_code = subprocess.call(\n ['aws', 's3', 'ls', 's3://ithemal-experiments/{}/{}/validation_results.txt'.format(experiment_name, experiment_time)],\n stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'),\n )\n\n has_finished.append(has_validation_results_code == 0)\n\n return experiment_name, experiment_times, has_finished\n\ndef extract_train_measurement(experiment_name, user_provided_name, experiment_time):\n # type: (str, str) -> TrainMeasurement\n\n fname = os.path.join(_DIRNAME, 'data', experiment_name, experiment_time, 'loss_report.log')\n\n epochs = []\n times = []\n losses = []\n trainers = []\n\n with open(fname) as f:\n for line in f.readlines():\n split = line.split()\n\n epochs.append(int(split[0]))\n times.append(float(split[1]))\n losses.append(float(split[2]))\n trainers.append(int(split[3]))\n\n return TrainMeasurement(\n user_provided_name,\n np.array(epochs),\n np.array(times),\n np.array(losses),\n np.array(trainers),\n )\n\ndef extract_test_measurement(experiment_name, user_provided_name, experiment_time):\n # type: (str, str) -> TestMeasurement\n\n checkpoint_fname_pat = re.compile('(?P<time>\\d+\\.\\d+).report')\n\n times = []\n losses = []\n checkpoint_reports_dir = os.path.join(_DIRNAME, 'data', experiment_name, experiment_time, 'checkpoint_reports')\n\n for checkpoint_report in os.listdir(checkpoint_reports_dir):\n checkpoint_report = os.path.basename(checkpoint_report)\n\n match = checkpoint_fname_pat.search(checkpoint_report)\n\n if not match:\n raise ValueError('Invalid checkpoint report name {} (in {}/{})'.format(checkpoint_report, experiment_name, experiment_time))\n\n elapsed_time = float(match.group('time'))\n\n with open(os.path.join(checkpoint_reports_dir, checkpoint_report)) as f:\n line = f.readlines()[-1]\n loss = float(line[1:line.index(']')])\n times.append(elapsed_time)\n losses.append(loss)\n\n times = np.array(times)\n losses = np.array(losses)\n sorted_idxs = np.argsort(times)\n times = times[sorted_idxs]\n losses = losses[sorted_idxs]\n\n return TestMeasurement(user_provided_name, times, losses)\n\ndef get_measurements(experiments, names):\n # type: (List[str], List[str]) -> Tuple[List[TrainMeasurement], List[TestMeasurement], List[bool]]\n\n train_measurements = [] # type: List[TrainMeasurement]\n test_measurements = [] # type: List[TestMeasurement]\n has_finished = [] # type: List[bool]\n\n if not names:\n names = experiments\n\n assert len(names) == len(experiments)\n\n for experiment_name, user_name in zip(experiments, names):\n name, experiment_times, finished = synchronize_experiment_files(experiment_name)\n has_finished.extend(finished)\n for experiment_time in experiment_times:\n train_measurements.append(extract_train_measurement(name, user_name, experiment_time))\n test_measurements.append(extract_test_measurement(name, user_name, experiment_time))\n\n return train_measurements, test_measurements, has_finished\n\ndef main():\n # type: () -> None\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--train-blur', type=float, default=25)\n parser.add_argument('--test-blur', type=float, default=0.5)\n parser.add_argument('--min-y', type=float, default=0.0)\n parser.add_argument('--max-y', type=float, default=0.4)\n parser.add_argument('experiments', nargs='+')\n parser.add_argument('--names', nargs='+')\n parser.add_argument('--trainers', default=False, action='store_true')\n parser.add_argument('--no-test', default=False, action='store_true')\n parser.add_argument('--raw-x', default=False, action='store_true')\n parser.add_argument('--sort', default=False, action='store_true')\n parser.add_argument('--validation', default=False, action='store_true')\n parser.add_argument('--norm-epoch', default=False, action='store_true')\n parser.add_argument('--shortest-trainer', default=False, action='store_true')\n parser.add_argument('--save')\n\n args = parser.parse_args()\n\n train_measurements, test_measurements, has_finished = get_measurements(args.experiments, args.names)\n\n if args.no_test:\n test_measurements = list(TestMeasurement(m.experiment_name, [], []) for m in test_measurements)\n\n if args.sort:\n idxs = np.argsort([-np.mean(m.losses[len(m.losses)//2:]) for m in train_measurements])\n train_measurements = [train_measurements[i] for i in idxs]\n test_measurements = [test_measurements[i] for i in idxs]\n has_finished = [has_finished[i] for i in idxs]\n\n if args.shortest_trainer:\n shortest_epoch = min(measurement.epochs[-1] for measurement in train_measurements)\n for tridx, (tr, te) in enumerate(zip(train_measurements, test_measurements)):\n try:\n cut_idx = next(i for (i, e) in enumerate(tr.epochs) if e > shortest_epoch)\n except StopIteration:\n continue\n\n train_measurements[tridx] = TrainMeasurement(\n tr.experiment_name,\n tr.epochs[:cut_idx],\n tr.times[:cut_idx],\n tr.losses[:cut_idx],\n tr.trainers[:cut_idx],\n )\n\n cut_time = train_measurements[tridx].times[-1]\n\n try:\n cut_idx = next(i for (i, t) in enumerate(te.times) if t > cut_time)\n except StopIteration:\n continue\n\n test_measurements[tridx] = TestMeasurement(\n te.experiment_name,\n te.times[:cut_idx],\n te.losses[:cut_idx],\n )\n\n plot_measurements(train_measurements, test_measurements, has_finished, args.train_blur, args.test_blur, args.trainers, args.raw_x, args.save, args.norm_epoch, args.min_y, args.max_y, args.validation)\n\nif __name__ == '__main__':\n main()\n", "id": "2805631", "language": "Python", "matching_score": 1.9916424751281738, "max_stars_count": 105, "path": "learning/pytorch/loss_reports/plot.py" }, { "content": "from os import listdir\nfrom os.path import isfile, join\nimport re\nimport argparse\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--path',action='store',default='text',type=str)\n args = parser.parse_args(sys.argv[1:])\n\n mypath = args.path\n\n files = [join(mypath,f) for f in listdir(mypath) if (isfile(join(mypath, f)) and 'static' in f)]\n\n lines = 0\n nonempty = 0\n valid = 0\n\n for file in files:\n print file\n with open(file, 'r') as f:\n for line in f:\n lines += 1\n code = re.search('\\'([0-9]+[0-9,]+)\\'', line)\n if code != None:\n codeline = code.group(1)\n nonempty += 1\n ok = True\n for token in codeline.split(','):\n if token != '' and int(token) > 2000:\n ok = False\n break\n if ok:\n valid += 1\n\n print lines, nonempty, valid\n\n\n\n", "id": "4393277", "language": "Python", "matching_score": 1.0160794258117676, "max_stars_count": 105, "path": "data_export/scripts/data_linter.py" }, { "content": "from os import listdir\nfrom os.path import isfile, join\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport sys\nimport utilities as ut\nfrom tqdm import tqdm\nimport subprocess\nimport os\nimport re\nimport time\nimport argparse\n\n\ndef wait_timeout(proc, seconds):\n \"\"\"Wait for a process to finish, or raise exception after timeout\"\"\"\n start = time.time()\n end = start + seconds\n interval = min(seconds / 1000.0, .10)\n\n while True:\n result = proc.poll()\n if result is not None:\n return result\n if time.time() >= end:\n proc.kill()\n return None\n time.sleep(interval)\n\n\nclass PMCValue:\n\n def __init__(self, value):\n self.value = value\n self.count = 1\n\nclass PMC:\n\n def __init__(self, name):\n self.name = name\n self.values = []\n self.mode = None\n self.percentage = 10\n\n def add_value(self, nvalue):\n\n added = False\n for val in self.values:\n if val.value == 0:\n val.value = 1e-3\n if (abs(val.value - nvalue) * 100.0 / val.value) < self.percentage:\n val.value = (val.value * val.count + nvalue) / (val.count + 1)\n val.count += 1\n added = True\n break\n\n if not added:\n val = PMCValue(nvalue)\n self.values.append(val)\n\n def set_mode(self):\n\n max_count = 0\n\n for val in self.values:\n if val.count > max_count:\n self.mode = val.value\n max_count = val.count\n\nclass PMCCounters:\n\n def __init__(self,line):\n names = line.split()\n #print names\n self.counters = list()\n for name in names:\n self.counters.append(PMC(name))\n\n def add_to_counters(self, line):\n values = line.split()\n #print values\n\n if len(values) != len(self.counters):\n return\n\n for i, value in enumerate(values):\n self.counters[i].add_value(int(value))\n\n def set_modes(self):\n\n for counter in self.counters:\n counter.set_mode()\n\n def get_value(self, name):\n\n for counter in self.counters:\n if name == counter.name:\n return counter.mode\n return None\n\n\ndef insert_time_value(cnx,code_id, time, arch):\n\n sql = 'INSERT INTO times (code_id, arch, kind, time) VALUES(' + str(code_id) + ',' + str(arch) + ',\\'iaca\\',' + str(time) + ')'\n ut.execute_query(cnx, sql, False)\n cnx.commit()\n\n\ndef check_error(line):\n\n errors = ['error','fault','Error']\n\n for error in errors:\n if error in line:\n return True\n return False\n\nif __name__ == '__main__':\n\n\n #command line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('--arch',action='store',type=int,required=True)\n parser.add_argument('--cpu',action='store',type=str,required=True)\n\n parser.add_argument('--database',action='store',type=str,required=True)\n parser.add_argument('--user',action='store', type=str, required=True)\n parser.add_argument('--password',action='store', type=str, required=True)\n parser.add_argument('--port',action='store', type=int, required=True)\n\n parser.add_argument('--subd',action='store',type=str,default='')\n parser.add_argument('--tp',action='store',type=bool,default=False)\n parser.add_argument('--start',action='store',type=int)\n parser.add_argument('--end',action='store',type=int)\n\n args = parser.parse_args(sys.argv[1:])\n\n cnx = ut.create_connection(database=args.database, user=args.user, password=<PASSWORD>, port=args.port)\n sql = 'SELECT code_att, code_id from code'\n rows = ut.execute_query(cnx, sql, True)\n print len(rows)\n\n iaca_home = os.environ['ITHEMAL_HOME'] + '/timing_tools/iaca/'\n os.chdir(iaca_home + args.subd)\n\n lines = []\n start_line = -1\n with open('test.s','r') as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n rep = re.search('.*\\.rept.*', line)\n if rep != None:\n start_line = i\n break\n\n print start_line\n\n total = 0\n errors = 0\n except_errors = 0\n success = 0\n not_finished = 0\n\n total_time = 0\n\n\n start = int(len(rows) * 0.8)\n\n for row in tqdm(rows[start:]):\n\n if row[0] == None:\n continue\n\n if args.start and args.end:\n if row[1] < args.start or row[1] > args.end:\n continue\n\n splitted = row[0].split('\\n')\n write_lines = [line for line in lines]\n\n written = 0\n final_bb = []\n for i, line in enumerate(splitted):\n if line != '':\n final_bb.append(line + '\\n')\n write_lines.insert(start_line + 1 + i, line + '\\n')\n written += 1\n\n #for line in final_bb:\n # print line\n\n #written = 1\n if written > 0:\n total += 1\n with open('out.s','w+') as f:\n f.writelines(write_lines)\n proc = subprocess.Popen(['gcc','-c','-o','test.o','out.s'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n result = wait_timeout(proc, 120)\n\n error_comp = False\n\n if result != None:\n\n try:\n for line in iter(proc.stderr.readline, ''):\n print line\n if check_error(line):\n error_comp = True\n break\n for line in iter(proc.stdout.readline, ''):\n print line\n if check_error(line):\n error_comp = True\n break\n except:\n error_comp = True\n\n else:\n error_comp = True\n\n if error_comp:\n errors += 1\n continue\n\n #print 'comp succesful'\n\n proc = subprocess.Popen(['./iaca','-arch',args.cpu,'-reduceout','test.o'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n start_time = time.time()\n result = wait_timeout(proc, 10)\n end_time = time.time()\n\n if result != None:\n\n error_lines = False\n for line in iter(proc.stderr.readline, ''):\n print line\n if check_error(line):\n error_lines = True\n break\n\n if error_lines == False:\n success += 1\n for line in iter(proc.stdout.readline, ''):\n found = re.search('Block Throughput: ([0-9]+\\.?[0-9]*) Cycles.*',line)\n if found:\n #print found.group(0)\n cycles = float(found.group(1))\n if cycles != 0:\n total_time += end_time - start_time\n print cycles\n if not args.tp:\n insert_time_value(cnx, row[1], cycles, args.arch)\n break\n else:\n for line in final_bb:\n print line[:-1]\n errors += 1\n\n else:\n print 'error not completed'\n not_finished += 1\n\n print total, success, errors, not_finished, except_errors, total_time\n\n cnx.close()\n", "id": "4542447", "language": "Python", "matching_score": 7.318202018737793, "max_stars_count": 105, "path": "timing_tools/timing/getiacatiming.py" }, { "content": "from os import listdir\nfrom os.path import isfile, join\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport sys\nimport common_libs.utilities as ut\nfrom tqdm import tqdm\nimport subprocess\nimport os\nimport re\nimport time\nimport argparse\n\n\ndef wait_timeout(proc, seconds):\n \"\"\"Wait for a process to finish, or raise exception after timeout\"\"\"\n start = time.time()\n end = start + seconds\n interval = min(seconds / 1000.0, .25)\n\n while True:\n result = proc.poll()\n if result is not None:\n return result\n if time.time() >= end:\n proc.kill()\n return None\n time.sleep(interval)\n\ndef fix_reg_names(line):\n # nasm recognizes, for instance, r14d rather than r14l\n regs = [('r%dl'%x, 'r%dd'%x) for x in range(8, 16)]\n for old, new in regs:\n line = line.replace(old, new)\n return line\n\ndef remove_unrecog_words(line):\n\n words = ['ptr', '<rel>']\n\n for word in words:\n line = line.replace(word,'')\n return line\n\ndef add_memory_prefix(line):\n mem = re.search('.*\\[(.*)\\].*', line)\n if (mem != None and\n re.match('.*(rsp|rbp|esp|ebp)', mem.group(1)) is None and\n not line.strip().startswith('lea')):\n index = mem.span(1)[0]\n line = line[:index] + 'UserData + ' + line[index:]\n return line\n\n\ndef insert_time_value(cnx,code_id, time, arch, ttable):\n\n sql = 'INSERT INTO ' + ttable + ' (code_id, arch, kind, time) VALUES(' + str(code_id) + ',' + str(arch) + ',\\'actual\\',' + str(time) + ')'\n ut.execute_query(cnx, sql, False)\n cnx.commit()\n\ndef insert_col_values(cnx, cols, values, code_id, arch, ttable):\n\n for i in range(len(values[0])):\n \n colstr = ''\n valuestr = ''\n\n for j, col in enumerate(cols): \n if j != len(cols) - 1:\n colstr += col + ', '\n valuestr += str(values[j][i]) + ', '\n else:\n colstr += col\n valuestr += str(values[j][i])\n \n\n sql = 'INSERT INTO ' + ttable + ' (code_id, arch, kind,' + colstr + ') VALUES(' + str(code_id) + ',' + str(arch) + ',\\'actual\\',' + valuestr + ')'\n print sql\n ut.execute_query(cnx, sql, False)\n cnx.commit()\n\n\nclass PMCValue:\n\n def __init__(self, value):\n self.value = value\n self.count = 1\n\nclass PMC:\n\n def __init__(self, name):\n self.name = name\n self.values = []\n\n self.mod_values = []\n self.mode = None\n self.percentage = 5\n\n def add_value(self, nvalue):\n\n self.values.append(nvalue)\n\n added = False\n for val in self.mod_values:\n if val.value == 0:\n val.value = 1e-3\n if (abs(val.value - nvalue) * 100.0 / val.value) < self.percentage:\n val.value = (val.value * val.count + nvalue) / (val.count + 1)\n val.count += 1\n added = True\n break\n\n if not added:\n val = PMCValue(nvalue)\n self.mod_values.append(val)\n \n def set_mode(self):\n\n max_count = 0\n\n for val in self.mod_values:\n if val.count > max_count:\n self.mode = val.value\n max_count = val.count\n\nclass PMCCounters:\n\n def __init__(self,line):\n names = line.split()\n #print names\n self.counters = list()\n for name in names:\n self.counters.append(PMC(name))\n\n def add_to_counters(self, line):\n values = line.split()\n #print values\n\n if len(values) != len(self.counters):\n return\n\n for i, value in enumerate(values):\n self.counters[i].add_value(int(value))\n\n def set_modes(self):\n\n for counter in self.counters:\n counter.set_mode()\n\n def get_value(self, name):\n\n for counter in self.counters:\n if name == counter.name:\n return counter.values\n return None\n\n def get_mode(self, name):\n\n for counter in self.counters:\n if name == counter.name:\n return counter.mode\n return None\n\ndef check_error(line):\n\n errors = ['error','fault']\n warnings = ['warning']\n\n for error in errors:\n for warning in warnings:\n if error in line and not warning in line:\n return True\n return False\n\nif __name__ == '__main__':\n\n\n #command line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('--arch',action='store',type=int,required=True)\n\n parser.add_argument('--database',action='store',type=str,required=True)\n parser.add_argument('--user',action='store', type=str, required=True)\n parser.add_argument('--password',action='store', type=str, required=True)\n parser.add_argument('--port',action='store', type=int, required=True)\n parser.add_argument('--ctable',action='store',type=str, required=True)\n parser.add_argument('--ttable',action='store',type=str, required=True)\n parser.add_argument('--limit',action='store',type=int, default=None)\n parser.add_argument('--tp',action='store',type=bool,default=False)\n\n args = parser.parse_args(sys.argv[1:])\n\n cnx = ut.create_connection(database=args.database, user=args.user, password=args.password, port=args.port)\n sql = 'SELECT code_intel, code_id from ' + args.ctable\n rows = ut.execute_query(cnx, sql, True)\n print len(rows)\n\n harness_dir = os.environ['ITHEMAL_HOME'] + '/timing_tools/harness'\n os.chdir(harness_dir)\n\n total = 0\n errors = 0\n except_errors = 0\n success = 0\n not_finished = 0\n\n\n total_time = 0.0\n total_bbs = 0\n\n # do a dry run to figure out measurement overhead\n with open('bb.nasm', 'w') as f:\n f.close()\n proc = subprocess.Popen('./a64-out.sh', stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n result = wait_timeout(proc, 10)\n startHeading = False\n startTimes = False\n counters = None\n for i, line in enumerate(iter(proc.stdout.readline, '')):\n if 'Clock' in line and startTimes == False and startHeading == False: #still didn't start collecting the actual timing data\n startHeading = True\n if startHeading == True:\n counters = PMCCounters(line)\n startTimes = True\n startHeading = False\n elif startTimes == True:\n counters.add_to_counters(line)\n assert counters is not None\n counters.set_modes()\n overhead = counters.get_mode('Core_cyc')\n print 'OVERHEAD =', overhead\n\n for row in rows:\n\n if row[0] == None:\n continue\n\n splitted = row[0].split('\\n')\n\n written = 0\n final_bb = []\n for i, line in enumerate(splitted):\n if line != '':\n line = remove_unrecog_words(line + '\\n')\n line = fix_reg_names(line)\n final_bb.append(line)\n written += 1\n\n\n\n if written > 0:\n total += 1\n with open('bb.nasm','w+') as f:\n f.writelines(final_bb)\n proc = subprocess.Popen('./a64-out.sh', stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n start_time = time.time()\n result = wait_timeout(proc, 10)\n end_time = time.time()\n\n if result != None:\n\n print final_bb\n\n try:\n error_lines = False\n for line in iter(proc.stderr.readline, ''):\n if check_error(line):\n print 'error ' + line\n error_lines = True\n break\n\n if error_lines == False:\n startHeading = False\n startTimes = False\n counters = None\n for i, line in enumerate(iter(proc.stdout.readline, '')):\n print line\n if 'Clock' in line and startTimes == False and startHeading == False: #still didn't start collecting the actual timing data\n startHeading = True\n if startHeading == True:\n #print 'headings ' + line\n counters = PMCCounters(line)\n startTimes = True\n startHeading = False\n elif startTimes == True:\n #print 'values ' + line\n counters.add_to_counters(line)\n if counters != None:\n\n names = ['Core_cyc', 'L1_read_misses', 'L1_write_misses', 'iCache_misses', 'Context_switches']\n columns = ['time', 'l1drmisses', 'l1dwmisses', 'l1imisses', 'conswitch']\n\n values = []\n aval_cols = []\n\n for i, name in enumerate(names):\n vs = counters.get_value(name)\n if vs != None:\n values.append(vs)\n aval_cols.append(columns[i])\n if name == 'Core_cyc':\n for j, v in enumerate(values[-1]):\n values[-1][j] -= overhead\n print aval_cols, values\n\n if not args.tp:\n insert_col_values(cnx, aval_cols, values, row[1], args.arch, args.ttable)\n \n total_time += end_time - start_time\n total_bbs += 1\n print float(total_bbs)/total_time\n success += 1\n else:\n for line in final_bb:\n print line[:-1]\n errors += 1\n except Exception as e:\n print e\n print 'exception occurred'\n except_errors += 1\n\n else:\n print 'error not completed'\n not_finished += 1\n\n if args.limit != None:\n if success == args.limit:\n break\n\n print total, success, errors, not_finished, except_errors\n\n\n print overhead\n cnx.close()\n", "id": "4611927", "language": "Python", "matching_score": 0.874411940574646, "max_stars_count": 105, "path": "timing_tools/timing/gettiming.py" }, { "content": "\"\"\"Cityscapes.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\n\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import image_utils\nfrom tensor2tensor.layers import common_image_attention as cia\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.layers import modalities\nfrom tensor2tensor.models import image_transformer\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import t2t_model\n\nimport tensorflow as tf\n\nCITYSCAPES_IMAGE_SIZE = 256\nTRAIN_DIR = 'train_img'\nVAL_DIR = 'val_img'\n\[email protected]_hparams\ndef imagetransformer_cityscape_uncond():\n hp = image_transformer.imagetransformerpp_base_5l_8h_big_uncond_dr00_dan_g_bs1()\n # hp.bottom[\"targets\"] = modalities.image_channel_embeddings_bottom\n return hp\n\ndef image_generator(data_dir, training, size=CITYSCAPES_IMAGE_SIZE):\n train_prefix = TRAIN_DIR\n eval_prefix = VAL_DIR\n prefix = train_prefix if training else eval_prefix\n images_filepath = os.path.join(data_dir, prefix)\n image_files = tf.gfile.Glob(images_filepath + \"/*\")\n height = size\n width = size\n const_label = 0\n for filename in image_files:\n with tf.gfile.Open(filename, \"rb\") as f:\n encoded_image = f.read()\n yield {\n \"image/encoded\": [encoded_image],\n \"image/format\": [\"png\"],\n \"image/class/label\": [const_label],\n \"image/height\": [height],\n \"image/width\": [width]\n }\n\[email protected]_problem\nclass ImageCityscapes(image_utils.ImageProblem):\n\n @property\n def num_channels(self):\n return 3\n\n @property\n def is_small(self):\n return False\n\n @property\n def num_classes(self):\n return 30\n\n @property\n def train_shards(self):\n return 30\n\n @property\n def dev_shards(self):\n return 5\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n generator_utils.generate_dataset_and_shuffle(\n self.generator(data_dir, tmp_dir, True),\n self.training_filepaths(data_dir, self.train_shards, shuffled=True),\n self.generator(data_dir, tmp_dir, False),\n self.dev_filepaths(data_dir, self.dev_shards, shuffled=True))\n\n def generator(self, data_dir, tmp_dir, is_training):\n if is_training:\n return image_generator(\n tmp_dir, int(True), size=CITYSCAPES_IMAGE_SIZE)\n else:\n return image_generator(\n tmp_dir, int(False), size=CITYSCAPES_IMAGE_SIZE)\n\n def preprocess_example(self, example, mode, unused_hparams):\n example[\"inputs\"].set_shape([CITYSCAPES_IMAGE_SIZE,\n CITYSCAPES_IMAGE_SIZE, 3])\n # example[\"inputs\"] = tf.to_int64(example[\"inputs\"])\n example[\"inputs\"] = tf.to_float(example[\"inputs\"])\n return example\n", "id": "844799", "language": "Python", "matching_score": 1.4903554916381836, "max_stars_count": 0, "path": "usr_dir/data_generators/cityscapes.py" }, { "content": "from . import data_generators", "id": "12834010", "language": "Python", "matching_score": 0.03754342347383499, "max_stars_count": 0, "path": "usr_dir/__init__.py" }, { "content": "import torch\nimport torch.nn as nn\nimport random\n\nclass MeanPredictor(nn.Module):\n def __init__(self, data):\n super(MeanPredictor, self).__init__()\n self._throwaway_param = nn.Linear(1, 1)\n self.mean = sum(datum.y for datum in data.train) / float(len(data.train))\n print('we gonna predict {}'.format(self.mean))\n\n def remove_refs(self, arg):\n pass\n\n def forward(self, datum):\n return torch.tensor([self.mean]).squeeze()\n\nclass RandomPredictor(nn.Module):\n def __init__(self, data):\n super(RandomPredictor, self).__init__()\n self._throwaway_param = nn.Linear(1, 1)\n self.data = data\n\n def remove_refs(self, arg):\n pass\n\n def forward(self, datum):\n guess = random.choice(self.data.train).y\n return torch.tensor([guess]).squeeze()\n", "id": "10486866", "language": "Python", "matching_score": 0.8561623096466064, "max_stars_count": 105, "path": "learning/pytorch/models/baselines.py" }, { "content": "import torch\nfrom typing import Any, Dict\n\ndef dump_shared_params(module):\n # type: (torch.nn.Module) -> Dict[str, Any]\n return {\n name: param.data.share_memory_().storage()._share_filename_()\n for (name, param) in module.named_parameters()\n }\n\ndef load_shared_params(module, params):\n # type: (torch.nn.Module, Dict[str, Any]) -> None\n\n for (name, param) in module.named_parameters():\n storage = torch.Storage._new_shared_filename(*params[name])\n param.data = torch.Tensor(storage).view(param.data.shape)\n", "id": "1006544", "language": "Python", "matching_score": 0.39344263076782227, "max_stars_count": 105, "path": "learning/pytorch/models/model_utils.py" }, { "content": "'''ZMQ, Pickle, and NamedTuples are together broken enough that you\ncan't have the __main__ module declare a NamedTuple which is sent through ZMQ.\n\nTo remedy that, we declare all NamedTuples here\n\n'''\n\nfrom ithemal_utils import BaseParameters, TrainParameters\nfrom typing import Any, Dict, List, Iterator, Tuple, Type, Union, NamedTuple\n\nTrainerInitializeReq = NamedTuple('TrainerInitializeReq', [\n ('rank', int),\n])\nTrainerInitializeResp = NamedTuple('TrainerInitializeResp', [\n ('base_params', BaseParameters),\n ('train_params', TrainParameters),\n])\n\nTrainerDataReq = NamedTuple('TrainerDataReq', [\n ('rank', int),\n])\nTrainerDataResp = NamedTuple('TrainerDataResp', [\n ('model_tensor_params', Any),\n ('trainer_tensor_params', Any),\n])\n\n# ------------------------------\n\nTrainerStepReq = NamedTuple('TrainerStepReq', [\n ('rank', int),\n])\nWaitResp = NamedTuple('WaitResp', [])\nKillResp = NamedTuple('KillResp', [])\nSetLrResp = NamedTuple('SetLrResp', [\n ('new_lr', float),\n])\nShuffleDataResp = NamedTuple('ShuffleDataResp', [\n ('random_state', object),\n])\nRunTrainerResp = NamedTuple('RunTrainerResp', [\n ('partition', Tuple[int, int]),\n])\n\n# ------------------------------\n\nTrainerLossReq = NamedTuple('TrainerLossReq', [\n ('rank', int),\n ('loss', float),\n ('n_items', int),\n])\nTrainerLossResp = NamedTuple('TrainerLossResp', [])\n\n# ------------------------------\n\nTrainerDeathReq = NamedTuple('TrainerDeathReq', [\n ('rank', int),\n ('partition_remainder', Tuple[int, int]),\n])\nTrainerDeathResp = NamedTuple('TrainerDeathResp', [])\n", "id": "7625975", "language": "Python", "matching_score": 3.81791090965271, "max_stars_count": 105, "path": "learning/pytorch/training_messages.py" }, { "content": "#!/usr/bin/env python\n\nimport sys\nimport os\nsys.path.append(os.path.join(os.environ['ITHEMAL_HOME'], 'learning', 'pytorch'))\n\nimport models.graph_models as md\nimport models.losses as ls\nimport models.train as tr\nimport data.data_cost as dt\nfrom experiments.experiment import Experiment\nimport utils.messages as messages\nfrom mpconfig import MPConfig\nfrom utils import *\nfrom training_messages import *\nfrom ithemal_utils import *\n\nimport atexit\nimport collections\nfrom enum import Enum\nimport time\nimport torch\nfrom typing import Any, Dict, List, Iterator, Tuple, Type, Union, NamedTuple, TypeVar\nimport zmq\nfrom tqdm import tqdm\nimport subprocess\nimport random\nimport uuid\n\n\n# ------------------------------- TRAINER STATE ---------------------------------\n\nclass TrainerState(Enum):\n UNINITIALIZED = 0\n LOADING_DATA = 1\n READY_FOR_EPOCH = 2\n READY_FOR_DATA = 3\n DEAD = 4\n\n# ------------------------------- LOSS REPORTING --------------------------------\n\nclass LossReporter(object):\n def __init__(self, experiment, n_datapoints, trainer):\n # type: (Experiment, int, tr.Train) -> None\n\n self.experiment = experiment\n self.n_datapoints = n_datapoints\n self.trainer = trainer\n\n self.start_time = time.time()\n self.ema_loss = 1.0\n self.running_trainers = 0\n self.epoch_no = 0\n self.total_processed_items = 0\n self.epoch_processed_items = 0\n\n self.last_report_time = 0.0\n self.last_save_time = 0.0\n\n self.root_path = experiment.experiment_root_path()\n\n try:\n os.makedirs(self.root_path)\n except OSError:\n pass\n\n # line buffered\n self.loss_report_file = open(os.path.join(self.root_path, 'loss_report.log'), 'w', 1)\n\n self.pbar = tqdm(desc=self.format_loss(), total=self.n_datapoints)\n\n def format_loss(self):\n # type: () -> str\n\n return 'Epoch {}, Loss: {:.2}'.format(\n self.epoch_no,\n self.ema_loss,\n )\n\n def start_epoch(self, epoch_no, n_trainers):\n # type: (int, int) -> None\n\n self.epoch_no = epoch_no\n self.running_trainers = n_trainers\n self.epoch_processed_items = 0\n\n self.pbar.close()\n self.pbar = tqdm(desc=self.format_loss(), total=self.n_datapoints)\n\n def report_items(self, n_items, loss):\n # type: (int, float) -> None\n\n eps = 0.00025 * n_items\n\n self.ema_loss = self.ema_loss * (1 - eps) + loss * eps\n self.epoch_processed_items += n_items\n self.total_processed_items += n_items\n\n desc = self.format_loss()\n self.pbar.set_description(desc)\n self.pbar.update(n_items)\n\n def report_trainer_death(self):\n # type: () -> None\n\n self.running_trainers -= 1\n self.pbar.write('Trainer died! Down to {} trainers'.format(self.running_trainers))\n\n def _report_loss(self, t):\n # type: (float) -> None\n\n message = '\\t'.join(map(str, (\n self.epoch_no,\n t - self.start_time,\n self.ema_loss,\n self.running_trainers\n )))\n self.loss_report_file.write(message + '\\n')\n\n def _checkpoint_trainer(self, t):\n # type: (float) -> None\n\n checkpoint_fname = self.experiment.checkpoint_file_name(t - self.start_time)\n self.trainer.save_checkpoint(\n self.epoch_no, 0, checkpoint_fname,\n runtime=t - self.start_time,\n ep_proc_instances=self.epoch_processed_items,\n total_proc_instances=self.total_processed_items,\n )\n\n def report(self):\n # type: () -> None\n\n t = time.time()\n if t - self.last_report_time > 10:\n self._report_loss(t)\n self.last_report_time = t\n\n if t - self.last_save_time > 10*60:\n self._checkpoint_trainer(t)\n self.last_save_time = t\n\n def finish(self):\n # type: () -> None\n\n self.pbar.close()\n print('Finishing training')\n\n t = time.time()\n self._report_loss(t)\n self._checkpoint_trainer(t)\n\n self.trainer.save_checkpoint(\n self.epoch_no,\n 0,\n os.path.join(self.root_path, 'trained.mdl')\n )\n\n resultfile = os.path.join(self.root_path, 'validation_results.txt')\n self.trainer.validate(resultfile)\n\n# ------------------------------ DATA PARTITIONING ------------------------------\ndef get_partition_splits_from_distr(n_datapoints, n_trainers, split_distr):\n # type: (int, int, List[float]) -> Iterator[Tuple[int, int]]\n\n assert abs(sum(split_distr) - 1) < 1e-4\n assert all(elem >= 0 for elem in split_distr)\n\n idx = 0\n for frac in split_distr:\n split_size = int((n_datapoints / n_trainers) * frac)\n for tr in range(n_trainers):\n yield (idx, idx + split_size)\n idx += split_size\n yield (idx, n_datapoints)\n\ndef get_partition_splits_from_size(n_datapoints, split_size):\n # type: (int, int) -> Iterator[Tuple[int, int]]\n\n for i in range(0, n_datapoints, split_size):\n yield (i, i + split_size)\n\ndef get_partitions(n_datapoints, train_params):\n # type: (int, TrainParameters) -> List[Tuple[int, int]]\n\n split = train_params.split\n\n if isinstance(split, int):\n return list(get_partition_splits_from_size(n_datapoints, split))\n else:\n return list(get_partition_splits_from_distr(n_datapoints, train_params.trainers, split))\n\n\n# ---------------------------- TRAINER CONSTRUCTION -----------------------------\n\ndef load_trainer(base_params, train_params, model, data):\n # type: (BaseParameters, TrainParameters, md.AbstractGraphModule, dt.DataCost) -> tr.Train\n\n return tr.Train(\n model, data, tr.PredictionType.REGRESSION, ls.mse_loss, 1,\n batch_size=train_params.batch_size, clip=None, opt=train_params.optimizer,\n lr=train_params.initial_lr, weight_decay=train_params.weight_decay,\n predict_log=base_params.predict_log, momentum=train_params.momentum,\n nesterov=train_params.nesterov,\n )\n\n# -------------------------------- COORDINATION ---------------------------------\n\ndef get_socket_url(identifier):\n # type: (str) -> str\n\n return 'ipc:///tmp/{}.socket'.format(identifier)\n\ndef run_training_coordinator(base_params, train_params):\n # type: (BaseParameters, TrainParameters) -> None\n\n torch.multiprocessing.set_sharing_strategy('file_system')\n expt = Experiment(train_params.experiment_name, train_params.experiment_time, base_params.data)\n\n socket_identifier = str(uuid.uuid4())\n\n context = zmq.Context()\n socket = context.socket(zmq.REP)\n socket.bind(get_socket_url(socket_identifier))\n\n def send_msg(msg):\n # type: (Union[object, List[object]]) -> None\n if isinstance(msg, list):\n socket.send_pyobj(msg)\n else:\n socket.send_pyobj([msg])\n\n # fork off trainers\n procs = []\n mp_config = MPConfig(train_params.threads)\n trainer_states = {} # type: Dict[int, TrainerState]\n\n def all_in_state(state):\n # type: (TrainerState) -> bool\n return all(trainer_states[rank] == state for rank in trainer_states)\n\n with mp_config:\n for idx in range(train_params.trainers):\n trainer_states[idx] = TrainerState.UNINITIALIZED\n mp_config.set_env(idx)\n procs.append(subprocess.Popen([sys.executable, __file__, socket_identifier, str(idx)]))\n\n @atexit.register\n def cleanup_procs():\n # type: () -> None\n print('cleaning up trainers')\n for proc in procs:\n proc.terminate()\n\n while not all_in_state(TrainerState.LOADING_DATA):\n msg = socket.recv_pyobj()\n if isinstance(msg, TrainerInitializeReq):\n send_msg(TrainerInitializeResp(\n base_params,\n train_params,\n ))\n trainer_states[msg.rank] = TrainerState.LOADING_DATA\n elif isinstance(msg, TrainerDataReq):\n send_msg(WaitResp())\n else:\n raise ValueError('Unexpected message {}'.format(msg))\n\n data = load_data(base_params)\n model = load_model(base_params, data)\n\n dump_model_and_data(model, data, os.path.join(expt.experiment_root_path(), 'predictor.dump'))\n\n trainer = load_trainer(base_params, train_params, model, data)\n\n while not all_in_state(TrainerState.READY_FOR_EPOCH):\n msg = socket.recv_pyobj()\n if isinstance(msg, TrainerDataReq):\n send_msg(TrainerDataResp(\n model.dump_shared_params(),\n trainer.dump_shared_params(),\n ))\n trainer_states[msg.rank] = TrainerState.READY_FOR_EPOCH\n elif isinstance(msg, TrainerStepReq):\n send_msg(WaitResp())\n else:\n raise ValueError('Unexpected message {}'.format(msg))\n\n current_lr = train_params.initial_lr\n loss_reporter = LossReporter(expt, len(data.train), trainer)\n\n for epoch_no in range(train_params.epochs):\n if train_params.decay_trainers:\n n_trainers = max(1, train_params.trainers - epoch_no)\n else:\n n_trainers = train_params.trainers\n\n loss_reporter.start_epoch(epoch_no + 1, n_trainers)\n\n # start exactly n_trainers trainers, kill the rest\n n_started_trainers = 0\n while not all_in_state(TrainerState.READY_FOR_DATA):\n msg = socket.recv_pyobj()\n\n if isinstance(msg, TrainerStepReq):\n if trainer_states[msg.rank] == TrainerState.READY_FOR_EPOCH:\n if n_started_trainers >= n_trainers:\n send_msg(KillResp())\n del trainer_states[msg.rank]\n else:\n send_msg([ShuffleDataResp(random.getstate()), SetLrResp(current_lr)])\n trainer_states[msg.rank] = TrainerState.READY_FOR_DATA\n n_started_trainers += 1\n else:\n send_msg([WaitResp()])\n else:\n raise ValueError('Unexpected message {}'.format(msg))\n\n # shuffle data locally to permute random state\n random.shuffle(data.train)\n\n # get partitions\n partitions = get_partitions(len(data.train), train_params)\n partition_idx = 0\n\n # run until all done with epoch or dead\n while not all(trainer_states[rank] in (TrainerState.READY_FOR_EPOCH, TrainerState.DEAD) for rank in trainer_states):\n msg = socket.recv_pyobj()\n\n if trainer_states[msg.rank] == TrainerState.DEAD:\n send_msg(WaitResp())\n elif isinstance(msg, TrainerStepReq):\n if partition_idx < len(partitions):\n trainer_states[msg.rank] = TrainerState.READY_FOR_DATA\n send_msg(RunTrainerResp(partitions[partition_idx]))\n partition_idx += 1\n else:\n send_msg(WaitResp())\n trainer_states[msg.rank] = TrainerState.READY_FOR_EPOCH\n elif isinstance(msg, TrainerLossReq):\n send_msg(TrainerLossResp())\n loss_reporter.report_items(msg.n_items, msg.loss)\n elif isinstance(msg, TrainerDeathReq):\n send_msg(TrainerDeathResp())\n loss_reporter.report_trainer_death()\n trainer_states[msg.rank] = TrainerState.DEAD\n if msg.partition_remainder[0] < msg.partition_remainder[1]:\n partitions.append(msg.partition_remainder)\n else:\n raise ValueError('Unexpected Message {}'.format(msg))\n\n loss_reporter.report()\n\n if all_in_state(TrainerState.DEAD):\n break\n\n # reset states\n for rank in trainer_states:\n trainer_states[rank] = TrainerState.READY_FOR_EPOCH\n\n # decay LR if necessary\n if train_params.decay_lr or (train_params.weird_lr and epoch_no > 0):\n current_lr /= train_params.lr_decay_rate\n\n loss_reporter.finish()\n\n# ----------------------------------- WORKER ------------------------------------\n\ndef run_training_worker(identifier, rank):\n # type: (str, int) -> None\n\n print('creating socket...')\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n print('connecting to coordinator')\n socket.connect(get_socket_url(identifier))\n\n def send_msg(msg):\n # type: (Any) -> None\n socket.send_pyobj(msg)\n\n def recv_msgs():\n # type: () -> List[Any]\n return socket.recv_pyobj()\n\n def recv_one():\n # type: () -> Any\n resp = recv_msgs()\n assert len(resp) == 1\n return resp[0]\n\n T = TypeVar('T')\n def send_and_get_one(msg, typ):\n # type: (Any, Type[T]) -> T\n send_msg(msg)\n res = recv_one()\n assert isinstance(res, typ)\n return res\n\n initialize_params = send_and_get_one(TrainerInitializeReq(rank), TrainerInitializeResp)\n base_params = initialize_params.base_params\n train_params = initialize_params.train_params\n\n data = load_data(base_params)\n model = load_model(base_params, data)\n trainer = load_trainer(base_params, train_params, model, data)\n\n data_params = None\n while not isinstance(data_params, TrainerDataResp):\n send_msg(TrainerDataReq(rank))\n data_params = recv_one()\n\n model.load_shared_params(data_params.model_tensor_params)\n trainer.load_shared_params(data_params.trainer_tensor_params)\n\n loss_report_freq = 10\n losses = [] # type: List[Tuple[float, int]]\n\n def report_loss(msg):\n # type: (messages.Message) -> None\n if isinstance(msg, messages.TrainerDeathMessage):\n send_and_get_one(TrainerDeathReq(rank, msg.remaining_partition), TrainerDeathResp)\n elif isinstance(msg, messages.LossReportMessage):\n losses.append((msg.loss, msg.n_items))\n if len(losses) > loss_report_freq:\n avg_loss = sum(l[0] for l in losses) / len(losses)\n n_items = sum(l[1] for l in losses)\n losses[:] = []\n\n send_and_get_one(TrainerLossReq(rank, avg_loss, n_items), TrainerLossResp)\n else:\n raise ValueError('Unexpected message {}'.format(msg))\n\n print('starting train loop')\n while True:\n send_msg(TrainerStepReq(rank))\n msgs = recv_msgs()\n\n for msg in msgs:\n if isinstance(msg, WaitResp):\n time.sleep(1)\n continue\n elif isinstance(msg, KillResp):\n print('Trainer {} dying'.format(rank))\n return\n elif isinstance(msg, ShuffleDataResp):\n random.setstate(msg.random_state)\n random.shuffle(data.train)\n random.seed()\n continue\n elif isinstance(msg, SetLrResp):\n trainer.set_lr(msg.new_lr)\n continue\n elif isinstance(msg, RunTrainerResp):\n trainer(rank, msg.partition, report_loss)\n continue\n else:\n raise ValueError('Unexpected message {}'.format(msg))\n\ndef main():\n # type: () -> None\n\n assert len(sys.argv) == 3, 'Must be passed exactly two parameters: socket ID, rank'\n run_training_worker(sys.argv[1], int(sys.argv[2]))\n\nif __name__ == '__main__':\n main()\n", "id": "3058483", "language": "Python", "matching_score": 5.273841381072998, "max_stars_count": 105, "path": "learning/pytorch/training.py" }, { "content": "import sys\nimport os\nsys.path.append(os.path.join(os.environ['ITHEMAL_HOME'], 'learning', 'pytorch'))\n\nimport torch\nimport torch.nn as nn\nfrom enum import Enum\nimport common_libs.utilities as ut\nimport data.data_cost as dt\nimport torch.autograd as autograd\nimport torch.optim as optim\nimport math\nimport numpy as np\nimport os\nimport gc\nimport psutil\nfrom tqdm import tqdm\nimport time\nimport torch\nfrom torch import nn\nimport utils.messages as messages\nimport random\nfrom typing import Any, Callable, Dict, IO, List, Optional, Tuple\n\nfrom . import model_utils\n\ndef memReport():\n # type: () -> None\n num_obj = 0\n for obj in gc.get_objects():\n if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):\n num_obj += 1\n print 'num_obj ' + str(num_obj)\n\ndef cpuStats():\n # type: () -> None\n print(sys.version)\n print(psutil.cpu_percent())\n print(psutil.virtual_memory()) # physical memory usage\n pid = os.getpid()\n py = psutil.Process(pid)\n memoryUse = py.memory_info()[0] / 2. ** 30 # memory use in GB...I think\n print('memory GB:', memoryUse)\n\nclass PredictionType(Enum):\n CLASSIFICATION = 1\n REGRESSION = 2\n\nclass OptimizerType(Enum):\n ADAM_PRIVATE = 1\n ADAM_SHARED = 2\n SGD = 3\n\nclass Train():\n\n \"\"\"\n Performs training and validation for the models listed above\n \"\"\"\n\n def __init__(self,\n model,\n data,\n typ,\n loss_fn,\n num_losses,\n batch_size = 1000,\n tolerance = 25.,\n lr = 0.001,\n momentum = 0.9,\n nesterov=False,\n clip = 2.,\n opt = OptimizerType.SGD,\n weight_decay = 0.,\n predict_log = False,\n ):\n # type: (nn.Module, dt.Data, PredictionType, Callable[[torch.tensor, torch.tensor], torch.tensor], int, int, float, float, float, bool, Optional[float], OptimizerType, float, bool) -> None\n\n self.model = model\n self.typ = typ\n self.data = data\n self.lr = lr\n self.clip = clip\n self.predict_log = predict_log\n self.opt_type = opt\n\n if opt == OptimizerType.SGD:\n self.optimizer = optim.SGD(self.model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay, nesterov=nesterov)\n elif opt == OptimizerType.ADAM_PRIVATE or opt == OptimizerType.ADAM_SHARED:\n self.optimizer = optim.Adam(self.model.parameters(), weight_decay=weight_decay, lr=lr)\n if opt == OptimizerType.ADAM_SHARED:\n for param in self.optimizer.param_groups[0]['params']:\n param.share_memory_()\n else:\n raise ValueError('unknown optimizer...')\n\n #training parameters\n self.partition = (0, len(self.data.train))\n\n self.batch_size = batch_size\n\n #correctness\n self.tolerance = tolerance\n\n #for classification\n self.correct = 0\n\n #functions\n self.loss_fn = loss_fn\n self.num_losses = num_losses\n\n self.rank = 0\n self.last_save_time = 0\n\n def dump_shared_params(self):\n # type: () -> Dict[str, object]\n\n if self.opt_type == OptimizerType.ADAM_SHARED:\n return model_utils.dump_shared_params(self.optimizer)\n else:\n return {}\n\n def load_shared_params(self, params):\n # type: (Dict[str, object]) -> None\n\n if self.opt_type == OptimizerType.ADAM_SHARED:\n model_utils.load_shared_params(self.optimizer, params)\n\n \"\"\"\n Print routines for predicted and target values.\n \"\"\"\n def print_final(self,f,x,y):\n # type: (IO[str], np.array, np.array) -> None\n if x.shape != ():\n size = x.shape[0]\n for i in range(size):\n f.write('%f,%f ' % (x[i],y[i]))\n f.write('\\n')\n else:\n f.write('%f,%f\\n' % (x,y))\n\n def print_max(self,f,x,y):\n # type: (IO[str], np.array, np.array) -> None\n x = torch.argmax(x)\n y = torch.argmax(y)\n\n f.write('%d,%d\\n' % (x.item(),y.item()))\n\n \"\"\"\n correct example counting functions\n \"\"\"\n def correct_classification(self,x,y):\n # type: (torch.tensor, torch.tensor) -> None\n\n x = torch.argmax(x) + 1\n y = torch.argmax(y) + 1\n\n percentage = torch.abs(x - y) * 100.0 / y\n\n if percentage < self.tolerance:\n self.correct += 1\n\n def correct_regression(self,x,y):\n # type: (torch.tensor, torch.tensor) -> None\n\n if x.shape != ():\n x = x[-1]\n y = y[-1]\n\n percentage = torch.abs(x - y) * 100.0 / (y + 1e-3)\n\n if percentage < self.tolerance:\n self.correct += 1\n\n def save_checkpoint(self, epoch, batch_num, filename, **rest):\n # type: (int, int, str, **Any) -> None\n\n state_dict = {\n 'epoch': epoch,\n 'batch_num': batch_num,\n 'model': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n }\n\n for (k, v) in rest.items():\n state_dict[k] = v\n\n # ensure directory exists\n try:\n os.makedirs(os.path.dirname(filename))\n except OSError:\n pass\n\n torch.save(state_dict, filename)\n\n def load_checkpoint(self, filename):\n # type: (str) -> Dict[str, Any]\n\n state_dict = torch.load(filename)\n self.model.load_state_dict(state_dict['model'])\n\n try:\n self.optimizer.load_state_dict(state_dict['optimizer'])\n except ValueError:\n print('Couldnt load optimizer!')\n\n return state_dict\n\n def __call__(self, rank, partition, report_loss_fn=None):\n # type: (int, Tuple[int, int], Optional[Callable[[messages.Message], None]]) -> None\n self.rank = rank\n self.partition = partition\n self.train(report_loss_fn=report_loss_fn)\n\n def get_target(self, datum):\n # type: (dt.DataItem) -> torch.tensor\n target = torch.FloatTensor([datum.y]).squeeze()\n if self.predict_log:\n target.log_()\n return target\n\n \"\"\"\n Training loop - to do make the average loss for general\n \"\"\"\n\n def train(self, report_loss_fn=None):\n # type: (Optional[Callable[[messages.Message], None]]) -> None\n\n (partition_start, partition_end) = self.partition\n\n def report_trainer_death(idx):\n # type: (int) -> None\n\n if report_loss_fn is not None:\n report_loss_fn(messages.TrainerDeathMessage(\n (idx + self.batch_size, partition_end),\n ))\n\n for idx in range(partition_start, partition_end, self.batch_size):\n batch_loss_sum = np.zeros(self.num_losses)\n self.correct = 0\n\n self.optimizer.zero_grad()\n loss_tensor = torch.FloatTensor([0]).squeeze()\n batch = self.data.train[idx:idx+self.batch_size]\n\n if not batch:\n continue\n\n for datum in batch:\n output = self.model(datum)\n\n if torch.isnan(output).any():\n report_trainer_death(idx)\n return\n\n #target as a tensor\n target = self.get_target(datum)\n\n #get the loss value\n if self.loss_fn:\n losses_opt = self.loss_fn(output, target)\n\n if self.predict_log and self.loss_fn:\n losses_rep = self.loss_fn(output.exp(), target.exp())\n else:\n losses_rep = losses_opt\n\n #check how many are correct\n if self.typ == PredictionType.CLASSIFICATION:\n self.correct_classification(output, target)\n elif self.typ == PredictionType.REGRESSION:\n self.correct_regression(output, target)\n\n #accumulate the losses\n for class_idx, (loss_opt, loss_rep) in enumerate(zip(losses_opt, losses_rep)):\n loss_tensor += loss_opt\n l = loss_rep.item()\n batch_loss_sum[class_idx] += l\n\n batch_loss_avg = batch_loss_sum / len(batch)\n\n #propagate gradients\n loss_tensor.backward()\n\n #clip the gradients\n if self.clip is not None:\n torch.nn.utils.clip_grad_norm(self.model.parameters(), self.clip)\n\n for param in self.model.parameters():\n if param.grad is None:\n continue\n\n if torch.isnan(param.grad).any():\n report_trainer_death(idx)\n return\n\n #optimizer step to update parameters\n self.optimizer.step()\n\n # get those tensors out of here!\n for datum in batch:\n self.model.remove_refs(datum)\n\n if report_loss_fn is not None:\n report_loss_fn(messages.LossReportMessage(\n self.rank,\n batch_loss_avg[0],\n len(batch),\n ))\n\n def set_lr(self, lr):\n # type: (float) -> None\n self.lr = lr\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self.lr\n\n \"\"\"\n Validation with a test set\n \"\"\"\n\n def validate(self, resultfile, loadfile=None):\n # type: (str, Optional[str]) -> Tuple[List[List[float]], List[List[float]]]\n if loadfile is not None:\n print 'loaded from checkpoint for validation...'\n self.load_checkpoint(loadfile)\n\n f = open(resultfile,'w')\n\n self.correct = 0\n average_loss = [0] * self.num_losses\n actual = []\n predicted = []\n\n for j, item in enumerate(tqdm(self.data.test)):\n\n #print len(item.x)\n output = self.model(item)\n # 583 post process output\n output = self.data.inverse_label_transform(output)\n target = self.get_target(item)\n\n if self.predict_log:\n output.exp_()\n target.exp_()\n\n #get the target and predicted values into a list\n if self.typ == PredictionType.CLASSIFICATION:\n actual.append((torch.argmax(target) + 1).data.numpy().tolist())\n predicted.append((torch.argmax(output) + 1).data.numpy().tolist())\n else:\n actual.append(target.data.numpy().tolist())\n predicted.append(output.data.numpy().tolist())\n\n # self.print_final(f, output, target)\n f.write('%s,%f,%f\\n' % (item.function.name, target, output))\n\n losses = self.loss_fn(output, target)\n if self.typ == PredictionType.CLASSIFICATION:\n self.correct_classification(output, target)\n else:\n self.correct_regression(output, target)\n\n #accumulate the losses\n loss = torch.zeros(1)\n for c,l in enumerate(losses):\n loss += l\n average_loss[c] = (average_loss[c] * j + l.item()) / (j + 1)\n\n # if j % (len(self.data.test) / 100) == 0:\n # p_str = str(j) + ' '\n # for av in average_loss:\n # p_str += str(av) + ' '\n # p_str += str(self.correct) + ' '\n # print p_str\n\n #remove refs; so the gc remove unwanted tensors\n self.model.remove_refs(item)\n\n # for loss in average_loss:\n # f.write('loss - %f\\n' % (loss))\n # f.write('%f,%f\\n' % (self.correct, len(self.data.test)))\n\n print('Average loss: %f, Num correct: %d, Num data: %d' % (\n average_loss[0], self.correct, len(self.data.test)))\n f.close()\n\n return (actual, predicted)\n", "id": "8129895", "language": "Python", "matching_score": 4.169249057769775, "max_stars_count": 0, "path": "learning/pytorch/models/train.py" }, { "content": "from typing import NamedTuple, Tuple, Union\n\nLossReportMessage = NamedTuple('LossReportMessage', [\n ('rank', int),\n ('loss', float),\n ('n_items', int),\n])\n\nEpochAdvanceMessage = NamedTuple('EpochAdvanceMessage', [\n ('epoch', int),\n ('n_trainers', int),\n])\n\nTrainerDeathMessage = NamedTuple('TrainerDeathMessage', [\n ('remaining_partition', Tuple[int, int]),\n])\n\nMessage = Union[LossReportMessage, EpochAdvanceMessage, TrainerDeathMessage]\n", "id": "9913070", "language": "Python", "matching_score": 0.7681208252906799, "max_stars_count": 105, "path": "learning/pytorch/utils/messages.py" }, { "content": "#!/usr/bin/env python\n\nimport argparse\nimport experiment\nimport subprocess\nimport os\nfrom typing import List, NamedTuple, Optional\n\nReportParameters = NamedTuple('ReportParameters', [\n ('remote_model_uri', str),\n ('local_model_uri', str),\n ('remote_report_uri', str),\n ('local_report_uri', str),\n])\n\nclass Benchmarker(object):\n def __init__(self, name, time, checkpoint=None):\n # type: (str, str, Optional[str]) -> None\n self.name = name\n self.time = time\n self.checkpoint = checkpoint\n\n def get_checkpoint_report_params(self, expt, iaca_only):\n # type: (experiment.Experiment, bool) -> ReportParameters\n assert self.checkpoint is not None\n\n checkpoint_dir = expt.checkpoint_file_dir()\n experiment.mkdir(checkpoint_dir)\n\n s3_checkpoint_path = os.path.join(\n expt.name,\n expt.time,\n 'checkpoints',\n '{}.mdl'.format(self.checkpoint)\n )\n\n remote_model_uri = experiment.get_s3_url(experiment.EXPERIMENT_BUCKET, s3_checkpoint_path)\n local_model_uri = os.path.join(checkpoint_dir, '{}.mdl'.format(self.checkpoint))\n\n if iaca_only:\n report_name = '{}_iaca_only.report'.format(self.checkpoint)\n else:\n report_name = '{}.report'.format(self.checkpoint)\n\n s3_checkpoint_report_path = os.path.join(expt.name, expt.time, 'checkpoint_reports', report_name)\n remote_report_uri = experiment.get_s3_url(experiment.EXPERIMENT_BUCKET, s3_checkpoint_report_path)\n local_report_uri = os.path.join(checkpoint_dir, report_name)\n\n return ReportParameters(\n remote_model_uri=remote_model_uri,\n local_model_uri=local_model_uri,\n remote_report_uri=remote_report_uri,\n local_report_uri=local_report_uri,\n )\n\n def get_trained_report_params(self, expt, iaca_only):\n # type: (experiment.Experiment, bool) -> ReportParameters\n expt_root_dir = expt.experiment_root_path()\n experiment.mkdir(expt_root_dir)\n\n remote_model_uri = experiment.get_s3_url(experiment.EXPERIMENT_BUCKET, os.path.join(\n expt.name,\n expt.time,\n 'trained.mdl'\n ))\n local_model_uri = os.path.join(expt_root_dir, 'trained.mdl')\n\n if iaca_only:\n report_name = 'trained_iaca_only.report'\n else:\n report_name = 'trained.report'\n\n s3_report_path = os.path.join(expt.name, expt.time, report_name)\n remote_report_uri = experiment.get_s3_url(experiment.EXPERIMENT_BUCKET, s3_report_path)\n local_report_uri = os.path.join(expt_root_dir, report_name)\n\n return ReportParameters(\n remote_model_uri=remote_model_uri,\n local_model_uri=local_model_uri,\n remote_report_uri=remote_report_uri,\n local_report_uri=local_report_uri,\n )\n\n def benchmark(self, iaca_only):\n # type: (bool) -> None\n\n expt = experiment.Experiment.make_experiment_from_name_and_time(self.name, self.time)\n expt.download_data()\n\n if self.checkpoint:\n report_params = self.get_checkpoint_report_params(expt, iaca_only)\n else:\n report_params = self.get_trained_report_params(expt, iaca_only)\n\n subprocess.check_call(['aws', 's3', 'cp', report_params.remote_model_uri, report_params.local_model_uri])\n\n validate_args = ['validate', '--load-file', report_params.local_model_uri]\n if iaca_only:\n validate_args.append('--iaca-only')\n\n with open(report_params.local_report_uri, 'w', 1) as f:\n subprocess.check_call(\n expt.get_ithemal_command_root()\n + expt.base_args\n + validate_args,\n stdout=f\n )\n\n\n subprocess.check_call(['aws', 's3', 'cp', report_params.local_report_uri, report_params.remote_report_uri])\n\ndef main():\n # type: () -> None\n parser = argparse.ArgumentParser(description='Get the test performance of a given experiment checkpoint')\n parser.add_argument('name', help='The name of the experiment')\n parser.add_argument('time', help='The time the experiment was run')\n parser.add_argument('--checkpoint', help='The time of the checkpoint. Leave blank to run on trained model')\n parser.add_argument('--iaca-only', help='Whether to test on purely IACA data', action='store_true', default=False)\n\n args = parser.parse_args()\n\n benchmarker = Benchmarker(args.name, args.time, args.checkpoint)\n benchmarker.benchmark(args.iaca_only)\n\nif __name__ == '__main__':\n main()\n", "id": "1376947", "language": "Python", "matching_score": 3.9199657440185547, "max_stars_count": 105, "path": "learning/pytorch/experiments/benchmarker.py" }, { "content": "#!/usr/bin/env python\n\nimport argparse\nimport datetime\nimport json\nimport os\nimport subprocess\nimport sys\nimport urlparse\nimport tempfile\nimport time\nimport traceback\nfrom typing import Any, Dict, List, NamedTuple, Optional\n\n_DIRNAME = os.path.abspath(os.path.dirname(__file__))\nPYTHON = sys.executable\n\ntry:\n ITHEMAL_HOME = os.environ['ITHEMAL_HOME']\nexcept:\n # as a backup (e.g. on Alex's computer) set ITHEMAL_HOME as a function of the gitroot\n ITHEMAL_HOME = subprocess.check_output(['git', 'rev-parse', '--show-toplevel'], cwd=_DIRNAME).strip()\n\nPYTORCH_HOME = os.path.join(ITHEMAL_HOME, 'learning', 'pytorch')\n\nEXPERIMENT_BUCKET = 'ithemal-experiments'\nDATASET_BUCKET = 'ithemal-datasets'\nCHECKPOINT_QUEUE = 'checkpoint_queue'\n\nDEBUG = False\n\n_BENCHMARK_CHECKPOINT = os.path.join(\n '${ITHEMAL_HOME}', # whatever ITHEMAL_HOME is on remote machine\n 'learning', 'pytorch', 'experiments', 'benchmarker.py'\n)\n\ndef debug_print(params):\n # type: (List[str]) -> None\n if DEBUG:\n print(' '.join(params))\n\ndef get_s3_url(bucket, path):\n # type: (str, str) -> str\n return urlparse.urlunsplit(['s3', bucket, path, '', ''])\n\ndef mkdir(directory):\n # type: (str) -> None\n try:\n os.makedirs(directory)\n except OSError:\n pass\n\nclass Experiment(object):\n def __init__(self, name, time, data, base_args=[], train_args=[]):\n # type: (str, str, str, List[str], List[str]) -> None\n self.name = name\n self.time = time\n self.data = os.path.basename(data)\n self.base_args = list(map(str, base_args))\n self.train_args = list(map(str, train_args))\n self.proc = None # type: Optional[subprocess.Popen]\n\n @staticmethod\n def make_experiment_from_name_and_time(experiment_name, experiment_time):\n # type: (str, str) -> Experiment\n remote_config_file_url = get_s3_url(EXPERIMENT_BUCKET, os.path.join(experiment_name, experiment_time, 'config.json'))\n local_config_file_url = os.path.join(PYTORCH_HOME, 'saved', experiment_name, experiment_time, 'config.json')\n subprocess.check_call(['aws', 's3', 'cp', remote_config_file_url, local_config_file_url])\n return Experiment.make_experiment_from_config_file(local_config_file_url, experiment_time=experiment_time)\n\n @staticmethod\n def make_experiment_from_name(experiment_name):\n # type: (str) -> Experiment\n experiment_time = datetime.datetime.fromtimestamp(time.time()).isoformat()\n remote_config_file_url = get_s3_url(EXPERIMENT_BUCKET, os.path.join(experiment_name, 'config.json'))\n local_config_file_url = os.path.join(PYTORCH_HOME, 'saved', experiment_name, experiment_time, 'config.json')\n subprocess.check_call(['aws', 's3', 'cp', remote_config_file_url, local_config_file_url])\n return Experiment.make_experiment_from_config_file(local_config_file_url, experiment_time=experiment_time)\n\n @staticmethod\n def make_experiment_from_config_file(config_file, experiment_time=None):\n # type: (str, Optional[str]) -> Experiment\n\n with open(config_file) as f:\n config = json.load(f)\n\n if experiment_time is None:\n start_time = datetime.datetime.fromtimestamp(time.time()).isoformat()\n else:\n start_time = experiment_time\n\n return Experiment(\n config['name'],\n start_time,\n config['dataset'],\n config.get('base_args', []),\n config.get('train_args', []),\n )\n\n def config_of_experiment(self):\n # type: () -> Dict[str, Any]\n\n return {\n 'name': self.name,\n 'dataset': self.data,\n 'base_args': self.base_args,\n 'train_args': self.train_args,\n }\n\n def experiment_root_path(self):\n # type: () -> str\n return os.path.join(PYTORCH_HOME, 'saved', self.name, self.time)\n\n def checkpoint_file_dir(self):\n # type: () -> str\n return os.path.join(self.experiment_root_path(), 'checkpoints')\n\n def checkpoint_file_name(self, run_time):\n # type: (float) -> str\n return os.path.join(self.checkpoint_file_dir(), '{}.mdl'.format(run_time))\n\n def s3_root_path(self):\n # type: () -> str\n return get_s3_url(EXPERIMENT_BUCKET, os.path.join(self.name, self.time))\n\n def get_ithemal_command_root(self):\n # type: () -> List[str]\n return [\n PYTHON, os.path.join(PYTORCH_HOME, 'ithemal', 'run_ithemal.py'),\n '--data', os.path.join(PYTORCH_HOME, 'saved', self.data),\n ]\n\n def get_params(self):\n # type: () -> List[str]\n return self.get_ithemal_command_root() + self.base_args + [\n 'train',\n '--experiment-name', self.name,\n '--experiment-time', self.time,\n ] + self.train_args\n\n def download_data(self):\n # type: () -> None\n # download the data if not present on this machine\n data_url = get_s3_url(DATASET_BUCKET, '')\n # sync is smarter than cp, but only works on directories: tell it to only sync that one file\n sync_args = ['aws', 's3', 'sync', data_url, os.path.join(PYTORCH_HOME, 'saved'), '--exclude', '*', '--include', self.data]\n debug_print(sync_args)\n subprocess.check_call(sync_args)\n\n def start_experiment(self):\n # type: () -> None\n self.download_data()\n root = self.experiment_root_path()\n mkdir(root)\n\n params = self.get_params()\n\n with open(os.path.join(root, 'config.json'), 'w') as f:\n json.dump(self.config_of_experiment(), f)\n\n with open(os.path.join(root, 'cmdline'), 'w') as f:\n f.write(' '.join(params))\n\n debug_print(params)\n # open proc, line buffer stdout\n\n self.proc = subprocess.Popen(params, stdout=open(os.path.join(root, 'stdout'), 'w', 1))\n\n def enqueue_checkpoints(self, checkpoint_times):\n # type: (List[str]) -> None\n\n for checkpoint_time in checkpoint_times:\n command_param = ' '.join([_BENCHMARK_CHECKPOINT, self.name, self.time, '--checkpoint', checkpoint_time])\n params = [\n os.path.join(ITHEMAL_HOME, 'aws', 'command_queue.py'),\n 'send', CHECKPOINT_QUEUE, command_param\n ]\n\n debug_print(params)\n subprocess.call(params, stdout=open('/dev/null', 'w'))\n\n def sync_all(self):\n # type: () -> None\n params = ['aws', 's3', 'sync', self.experiment_root_path(), self.s3_root_path()]\n debug_print(params)\n subprocess.check_call(params)\n\n def run_and_sync(self):\n # type: () -> bool\n\n self.start_experiment()\n proc = self.proc\n if proc is None:\n raise Exception('Process not created!')\n\n s3_bucket_checkpoint_path = os.path.join(self.s3_root_path(), 'checkpoints')\n checkpoint_path = self.checkpoint_file_dir()\n mkdir(checkpoint_path)\n\n def sync():\n # type: () -> None\n\n # sync checkpoints, capturing the new checkpoints and enqueuing them for validation\n params = ['aws', 's3', 'sync', '--no-progress', checkpoint_path, s3_bucket_checkpoint_path]\n debug_print(params)\n checkpoints_output = subprocess.check_output(params).strip()\n if checkpoints_output:\n print('Checkpoints Output: \"{}\", split: \"{}\"'.format(checkpoints_output, checkpoints_output.strip().split('\\n')))\n checkpoint_files = [line.split()[1] for line in checkpoints_output.split('\\n')]\n checkpoint_times = [os.path.basename(fname)[:-len('.mdl')] for fname in checkpoint_files]\n self.enqueue_checkpoints(checkpoint_times)\n\n self.sync_all()\n\n while proc.poll() is None:\n sync()\n time.sleep(60)\n sync()\n\n for iaca_only in (True, False):\n args = [_BENCHMARK_CHECKPOINT, self.name, self.time]\n if iaca_only:\n args.append('--iaca-only')\n params = [\n os.path.join(ITHEMAL_HOME, 'aws', 'command_queue.py'),\n 'send', CHECKPOINT_QUEUE, ' '.join(args),\n ]\n\n debug_print(params)\n subprocess.call(params, stdout=open('/dev/null', 'w'))\n\n\n params = [os.path.join(ITHEMAL_HOME, 'aws', 'ping_slack.py'), 'Experiment {}_{} finished with exit code {}'.format(\n self.name,\n self.time,\n proc.returncode,\n )]\n debug_print(params)\n subprocess.check_call(params)\n\n return proc.returncode == 0\n\ndef main():\n # type: () -> None\n parser = argparse.ArgumentParser(description='Run experiments, syncing with AWS')\n parser.add_argument('experiment', help='Experiment name or file to run')\n args = parser.parse_args()\n\n if os.path.exists(args.experiment):\n experiment = Experiment.make_experiment_from_config_file(args.experiment)\n else:\n experiment = Experiment.make_experiment_from_name(args.experiment)\n\n try:\n success = experiment.run_and_sync()\n except:\n success = False\n # catch literally anything (including KeyboardInterrupt, SystemExit)\n traceback.print_exc()\n\n if experiment.proc is not None:\n try:\n print('Terminating Ithemal process!')\n experiment.proc.terminate()\n experiment.proc.wait()\n except KeyboardInterrupt:\n print('Force killing Ithemal')\n experiment.proc.kill()\n finally:\n print('Synchronizing files...')\n experiment.sync_all()\n\n if not success:\n sys.exit(1)\n\nif __name__ == '__main__':\n main()\n", "id": "4604539", "language": "Python", "matching_score": 2.108152389526367, "max_stars_count": 105, "path": "learning/pytorch/experiments/experiment.py" }, { "content": "import sys\nimport os\nsys.path.append(os.path.join(os.environ['ITHEMAL_HOME'], 'learning', 'pytorch'))\n\nimport argparse\nimport datetime\nimport random\nimport torch\n\nimport models.graph_models as md\nimport models.train as tr\nimport training\nfrom data.data_extend import DataExtend\nfrom experiments.experiment import Experiment\nfrom ithemal_utils import *\nfrom models.ithemal_extend import RNNExtend, GraphNN, ReductionType\n\n\ndef get_parser():\n parser = argparse.ArgumentParser()\n\n # data\n parser.add_argument('--data', required=True, help='The data file to load from')\n parser.add_argument('--embed-size', help='The size of embedding to use (default: 256)', default=256, type=int)\n parser.add_argument('--hidden-size', help='The size of hidden layer to use (default: 256)', default=256, type=int)\n parser.add_argument('--no-mem', help='Remove all instructions with memory', default=False, action='store_true')\n\n parser.add_argument('--use-rnn', action='store_true', default=False)\n parser.add_argument('--no-residual', default=False, action='store_true', help='Don\\'t use a residual model in Ithemal')\n parser.add_argument('--no-dag-rnn', default=False, action='store_true', help='Don\\'t use the DAG-RNN model in Ithemal')\n\n parser.add_argument('--use-scaling', action='store_true', help='Whether to scale model output', default=False)\n parser.add_argument('--scale-amount', type=float, default=1000., help='Amount to scale by')\n\n parser.add_argument('--use-freq', action='store_true', help='Whether to use block frequency', default=False)\n #\n\n sp = parser.add_subparsers(dest='subparser')\n\n # train\n train = sp.add_parser('train', help='Train an ithemal model')\n train.add_argument('--experiment-name', required=True, help='Name of the experiment to run')\n train.add_argument('--experiment-time', required=True, help='Time the experiment was started at')\n train.add_argument('--load-file', help='Start by loading the provided model')\n train.add_argument('--test', action='store_true', help='Test mode', default=False)\n\n train.add_argument('--batch-size', type=int, default=4, help='The batch size to use in train')\n train.add_argument('--epochs', type=int, default=3, help='Number of epochs to run for')\n train.add_argument('--trainers', type=int, default=1, help='Number of trainer processes to use')\n train.add_argument('--threads', type=int, default=4, help='Total number of PyTorch threads to create per trainer')\n train.add_argument('--decay-trainers', action='store_true', default=False, help='Decay the number of trainers at the end of each epoch')\n train.add_argument('--weight-decay', type=float, default=0, help='Coefficient of weight decay (L2 regularization) on model')\n train.add_argument('--initial-lr', type=float, default=0.1, help='Initial learning rate')\n train.add_argument('--decay-lr', action='store_true', default=False, help='Decay the learning rate at the end of each epoch')\n train.add_argument('--momentum', type=float, default=0.9, help='Momentum parameter for SGD')\n train.add_argument('--nesterov', action='store_true', default=False, help='Use Nesterov momentum')\n train.add_argument('--weird-lr', action='store_true', default=False, help='Use unusual LR schedule')\n train.add_argument('--lr-decay-rate', default=1.2, help='LR division rate', type=float)\n #\n\n # GraphNN\n dag_nonlinearity_group = parser.add_mutually_exclusive_group()\n dag_nonlinearity_group.add_argument('--dag-relu-nonlinearity', action='store_const', const=md.NonlinearityType.RELU, dest='dag_nonlinearity')\n dag_nonlinearity_group.add_argument('--dag-tanh-nonlinearity', action='store_const', const=md.NonlinearityType.TANH, dest='dag_nonlinearity')\n dag_nonlinearity_group.add_argument('--dag-sigmoid-nonlinearity', action='store_const', const=md.NonlinearityType.SIGMOID, dest='dag_nonlinearity')\n parser.set_defaults(dag_nonlinearity=None)\n parser.add_argument('--dag-nonlinearity-width', help='The width of the final nonlinearity (default: 128)', default=128, type=int)\n parser.add_argument('--dag-nonlinear-before-max', action='store_true', default=False)\n\n dag_reduction_group = parser.add_mutually_exclusive_group()\n dag_reduction_group.add_argument('--dag-add-reduction', action='store_const', const=ReductionType.ADD, dest='dag_reduction')\n dag_reduction_group.add_argument('--dag-max-reduction', action='store_const', const=ReductionType.MAX, dest='dag_reduction')\n dag_reduction_group.add_argument('--dag-mean-reduction', action='store_const', const=ReductionType.MEAN, dest='dag_reduction')\n dag_reduction_group.add_argument('--dag-weighted-reduction', action='store_const', const=ReductionType.WEIGHTED, dest='dag_reduction')\n dag_reduction_group.add_argument('--dag-attention-reduction', action='store_const', const=ReductionType.ATTENTION, dest='dag_reduction')\n parser.set_defaults(dag_reduction=ReductionType.MAX)\n #\n\n # optimizer\n optimizer_group = train.add_mutually_exclusive_group()\n optimizer_group.add_argument('--adam-private', action='store_const', const=tr.OptimizerType.ADAM_PRIVATE, dest='optimizer', help='Use Adam with private moments',\n default=tr.OptimizerType.ADAM_PRIVATE)\n optimizer_group.add_argument('--adam-shared', action='store_const', const=tr.OptimizerType.ADAM_SHARED, dest='optimizer', help='Use Adam with shared moments')\n optimizer_group.add_argument('--sgd', action='store_const', const=tr.OptimizerType.SGD, dest='optimizer', help='Use SGD')\n #\n\n return parser\n\n\ndef get_base_parameters(args):\n base_params = BaseParameters(\n data=args.data,\n embed_mode=None,\n embed_file=None,\n random_edge_freq=None,\n predict_log=None,\n no_residual=args.no_residual,\n no_dag_rnn=args.no_dag_rnn,\n dag_reduction=args.dag_reduction,\n edge_ablation_types=None,\n embed_size=args.embed_size,\n hidden_size=args.hidden_size,\n linear_embeddings=None,\n use_rnn=args.use_rnn,\n rnn_type=None,\n rnn_hierarchy_type=None,\n rnn_connect_tokens=None,\n rnn_skip_connections=None,\n rnn_learn_init=None,\n no_mem=args.no_mem,\n linear_dependencies=None,\n flat_dependencies=None,\n dag_nonlinearity=args.dag_nonlinearity,\n dag_nonlinearity_width=args.dag_nonlinearity_width,\n dag_nonlinear_before_max=args.dag_nonlinear_before_max,\n )\n return base_params\n\n\ndef get_train_parameters(args):\n train_params = TrainParameters(\n experiment_name=args.experiment_name,\n experiment_time=args.experiment_time,\n load_file=args.load_file,\n batch_size=args.batch_size,\n trainers=args.trainers,\n threads=args.threads,\n decay_trainers=args.decay_trainers,\n weight_decay=args.weight_decay,\n initial_lr=args.initial_lr,\n decay_lr=args.decay_lr,\n epochs=args.epochs,\n split=None,\n optimizer=args.optimizer,\n momentum=args.momentum,\n nesterov=args.nesterov,\n weird_lr=args.weird_lr,\n lr_decay_rate=args.lr_decay_rate,\n )\n return train_params\n\n\ndef load_data(params, args):\n # type: (BaseParameters) -> dt.DataCost\n # TODO (thomaseh): finish dataloader\n data = DataExtend(params.data, params.use_rnn, args.use_freq)\n # assert False\n\n return data\n\n\ndef load_model(params, args):\n # type: (BaseParameters) -> md.AbstractGraphModule\n if params.use_rnn:\n rnn_params = md.RnnParameters(\n embedding_size=params.embed_size,\n hidden_size=params.hidden_size,\n num_classes=1,\n connect_tokens=False, # NOT USED\n skip_connections=False, # NOT USED\n hierarchy_type='MULTISCALE', # NOT USED\n rnn_type='LSTM', # NOT USED\n learn_init=True, # NOT USED\n )\n model = RNNExtend(rnn_params, args)\n else:\n model = GraphNN(\n embedding_size=params.embed_size, hidden_size=params.hidden_size,\n num_classes=1, use_residual=not params.no_residual,\n use_dag_rnn=not params.no_dag_rnn, reduction=params.dag_reduction,\n nonlinear_type=params.dag_nonlinearity,\n nonlinear_width=params.dag_nonlinearity_width,\n nonlinear_before_max=params.dag_nonlinear_before_max,\n )\n\n return model\n\n\ndef get_save_directory(exp_name, exp_time):\n now = datetime.datetime.now()\n timestamp = '%d%02d%02d%02d%02d%02d' % (\n now.year, now.month, now.day, now.hour, now.minute, now.second)\n save_path = os.path.join(\n 'learning/pytorch/saved', exp_name, exp_time, 'checkpoints', timestamp)\n return save_path\n\n\ndef train(data, model, base_params, train_params, save_dir):\n trainer = training.load_trainer(base_params, train_params, model, data)\n expt = Experiment(\n train_params.experiment_name, train_params.experiment_time,\n base_params.data)\n loss_reporter = training.LossReporter(expt, len(data.train), trainer)\n def report_loss_fn(msg):\n loss_reporter.report_items(msg.n_items, msg.loss)\n\n for epoch_no in range(train_params.epochs):\n loss_reporter.start_epoch(epoch_no + 1, 0)\n random.shuffle(data.train)\n trainer.train(report_loss_fn=report_loss_fn)\n loss_reporter.report()\n # 583 set how often to save models\n if epoch_no % 5 == 0:\n save_file = os.path.join(save_dir, 'epoch_%03d.mdl' % (epoch_no+1,))\n trainer.save_checkpoint(epoch_no, -1, save_file)\n test(trainer, save_dir)\n save_file = os.path.join(save_dir, 'epoch_final.mdl')\n trainer.save_checkpoint(epoch_no, -1, save_file)\n\n return trainer\n\n\ndef test(trainer, save_dir):\n trainer.validate(os.path.join(save_dir, 'results.csv'))\n\n\ndef main():\n # type: () -> None\n args = get_parser().parse_args()\n\n base_params = get_base_parameters(args)\n\n if args.subparser == 'train':\n train_params = get_train_parameters(args)\n\n # load data and model\n print('Loading data and setting up model...')\n data = load_data(base_params, args)\n model = load_model(base_params, args)\n\n if not args.test:\n # train\n print('Training...')\n save_dir = get_save_directory(\n train_params.experiment_name, train_params.experiment_time)\n trainer = train(data, model, base_params, train_params, save_dir)\n else:\n trainer = training.load_trainer(base_params, train_params, model, data)\n trainer.load_checkpoint(args.load_file)\n save_dir = '/'.join(args.load_file.split('/')[:-1])\n\n # test\n print('Testing...')\n test(trainer, save_dir)\n else:\n raise ValueError('Unknown mode \"{}\"'.format(args.subparser))\n\nif __name__ == '__main__':\n main()\n", "id": "2426202", "language": "Python", "matching_score": 7.432061672210693, "max_stars_count": 0, "path": "learning/pytorch/run_ithemal_extend.py" }, { "content": "import sys\nimport os\nsys.path.append(os.path.join(os.environ['ITHEMAL_HOME'], 'learning', 'pytorch'))\n\nimport argparse\nimport time\nimport torch\nimport torch.multiprocessing as mp\ntorch.backends.cudnn.enabled = False\nfrom utils import messages\nimport models.losses as ls\nimport models.train as tr\nfrom tqdm import tqdm\nfrom mpconfig import MPConfig\nfrom typing import Callable, List, Optional, Iterator, Tuple, NamedTuple, Union\nimport random\nimport Queue\nfrom ithemal_utils import *\nimport training\nimport pandas as pd\nimport common_libs.utilities as ut\n\ndef graph_model_benchmark(base_params, benchmark_params):\n # type: (BaseParameters, BenchmarkParameters) -> None\n data = load_data(base_params)\n model = load_model(base_params, data)\n\n train = tr.Train(\n model, data, tr.PredictionType.REGRESSION, ls.mse_loss, 1,\n batch_size=benchmark_params.batch_size, clip=None, opt=tr.OptimizerType.ADAM_PRIVATE, lr=0.01,\n )\n\n model.share_memory()\n\n mp_config = MPConfig(benchmark_params.threads)\n partition_size = benchmark_params.examples // benchmark_params.trainers\n\n processes = []\n\n start_time = time.time()\n\n with mp_config:\n for rank in range(benchmark_params.trainers):\n mp_config.set_env(rank)\n\n partition = (rank * partition_size, (rank + 1) * partition_size)\n\n p = mp.Process(target=train, args=(rank, partition))\n p.daemon = True\n p.start()\n processes.append(p)\n\n for p in processes:\n p.join()\n\n end_time = time.time()\n print('Time to process {} examples: {} seconds'.format(\n benchmark_params.examples,\n end_time - start_time,\n ))\n\ndef graph_model_validate(base_params, model_file, iaca_only):\n # type: (BaseParameters, str, bool) -> None\n data = load_data(base_params)\n if iaca_only:\n cnx = ut.create_connection()\n legal_code_ids = set(\n pd.read_sql('SELECT time_id, code_id FROM times WHERE kind=\"iaca\"', cnx)\n .set_index('time_id')\n .code_id\n )\n data.test = [datum for datum in data.test if datum.code_id in legal_code_ids]\n model = load_model(base_params, data)\n\n train = tr.Train(\n model, data, tr.PredictionType.REGRESSION, ls.mse_loss, 1,\n batch_size=1000, clip=None, predict_log=base_params.predict_log,\n )\n\n resultfile = os.environ['ITHEMAL_HOME'] + '/learning/pytorch/results/realtime_results.txt'\n (actual, predicted) = train.validate(resultfile=resultfile, loadfile=model_file)\n\ndef graph_model_dump(base_params, model_file):\n # type: (BaseParameters, str) -> None\n data = load_data(base_params)\n model = load_model(base_params, data)\n dump_model_and_data(model, data, model_file)\n\ndef main():\n # type: () -> None\n parser = argparse.ArgumentParser()\n\n # data arguments\n parser.add_argument('--data', required=True, help='The data file to load from')\n parser.add_argument('--embed-mode', help='The embedding mode to use (default: none)', default='none')\n parser.add_argument('--embed-file', help='The embedding file to use (default: code_delim.emb)',\n default=os.path.join(os.environ['ITHEMAL_HOME'], 'learning', 'pytorch', 'inputs', 'embeddings', 'code_delim.emb'))\n parser.add_argument('--embed-size', help='The size of embedding to use (default: 256)', default=256, type=int)\n parser.add_argument('--hidden-size', help='The size of hidden layer to use (default: 256)', default=256, type=int)\n parser.add_argument('--no-mem', help='Remove all instructions with memory', default=False, action='store_true')\n\n # edge/misc arguments\n parser.add_argument('--random-edge-freq', type=float, default=0.0, help='The fraction of instructions to add an additional random forward edge to (can be >1)')\n parser.add_argument('--no-residual', default=False, action='store_true', help='Don\\'t use a residual model in Ithemal')\n parser.add_argument('--no-dag-rnn', default=False, action='store_true', help='Don\\'t use the DAG-RNN model in Ithemal')\n parser.add_argument('--predict-log', action='store_true', default=False, help='Predict the log of the time')\n parser.add_argument('--linear-embeddings', action='store_true', default=False, help='Use linear embeddings instead of LSTM')\n\n parser.add_argument('--use-rnn', action='store_true', default=False)\n rnn_type_group = parser.add_mutually_exclusive_group()\n rnn_type_group.add_argument('--rnn-normal', action='store_const', const=md.RnnType.RNN, dest='rnn_type')\n rnn_type_group.add_argument('--rnn-lstm', action='store_const', const=md.RnnType.LSTM, dest='rnn_type')\n rnn_type_group.add_argument('--rnn-gru', action='store_const', const=md.RnnType.GRU, dest='rnn_type')\n parser.set_defaults(rnn_type=md.RnnType.LSTM)\n\n rnn_hierarchy_type_group = parser.add_mutually_exclusive_group()\n rnn_hierarchy_type_group.add_argument('--rnn-token', action='store_const', const=md.RnnHierarchyType.NONE, dest='rnn_hierarchy_type')\n rnn_hierarchy_type_group.add_argument('--rnn-dense', action='store_const', const=md.RnnHierarchyType.DENSE, dest='rnn_hierarchy_type')\n rnn_hierarchy_type_group.add_argument('--rnn-multiscale', action='store_const', const=md.RnnHierarchyType.MULTISCALE, dest='rnn_hierarchy_type')\n rnn_hierarchy_type_group.add_argument('--rnn-linear-model', action='store_const', const=md.RnnHierarchyType.LINEAR_MODEL, dest='rnn_hierarchy_type')\n rnn_hierarchy_type_group.add_argument('--rnn-mop', action='store_const', const=md.RnnHierarchyType.MOP_MODEL, dest='rnn_hierarchy_type')\n parser.set_defaults(rnn_hierarchy_type=md.RnnHierarchyType.MULTISCALE)\n\n parser.add_argument('--rnn-skip-connections', action='store_true', default=False)\n parser.add_argument('--rnn-learn-init', action='store_true', default=False)\n parser.add_argument('--rnn-connect-tokens', action='store_true', default=False)\n\n dag_nonlinearity_group = parser.add_mutually_exclusive_group()\n dag_nonlinearity_group.add_argument('--dag-relu-nonlinearity', action='store_const', const=md.NonlinearityType.RELU, dest='dag_nonlinearity')\n dag_nonlinearity_group.add_argument('--dag-tanh-nonlinearity', action='store_const', const=md.NonlinearityType.TANH, dest='dag_nonlinearity')\n dag_nonlinearity_group.add_argument('--dag-sigmoid-nonlinearity', action='store_const', const=md.NonlinearityType.SIGMOID, dest='dag_nonlinearity')\n parser.set_defaults(dag_nonlinearity=None)\n parser.add_argument('--dag-nonlinearity-width', help='The width of the final nonlinearity (default: 128)', default=128, type=int)\n parser.add_argument('--dag-nonlinear-before-max', action='store_true', default=False)\n\n data_dependency_group = parser.add_mutually_exclusive_group()\n data_dependency_group.add_argument('--linear-dependencies', action='store_true', default=False)\n data_dependency_group.add_argument('--flat-dependencies', action='store_true', default=False)\n\n dag_reduction_group = parser.add_mutually_exclusive_group()\n dag_reduction_group.add_argument('--dag-add-reduction', action='store_const', const=md.ReductionType.ADD, dest='dag_reduction')\n dag_reduction_group.add_argument('--dag-max-reduction', action='store_const', const=md.ReductionType.MAX, dest='dag_reduction')\n dag_reduction_group.add_argument('--dag-mean-reduction', action='store_const', const=md.ReductionType.MEAN, dest='dag_reduction')\n dag_reduction_group.add_argument('--dag-attention-reduction', action='store_const', const=md.ReductionType.ATTENTION, dest='dag_reduction')\n parser.set_defaults(dag_reduction=md.ReductionType.MAX)\n\n def add_edge_ablation(ablation):\n # type: (EdgeAblationType) -> None\n parser.add_argument('--{}'.format(ablation.value), action='append_const', dest='edge_ablations', const=ablation)\n\n add_edge_ablation(EdgeAblationType.TRANSITIVE_REDUCTION)\n add_edge_ablation(EdgeAblationType.TRANSITIVE_CLOSURE)\n add_edge_ablation(EdgeAblationType.ADD_LINEAR_EDGES)\n add_edge_ablation(EdgeAblationType.ONLY_LINEAR_EDGES)\n add_edge_ablation(EdgeAblationType.NO_EDGES)\n\n sp = parser.add_subparsers(dest='subparser')\n\n train = sp.add_parser('train', help='Train an ithemal model')\n train.add_argument('--experiment-name', required=True, help='Name of the experiment to run')\n train.add_argument('--experiment-time', required=True, help='Time the experiment was started at')\n train.add_argument('--load-file', help='Start by loading the provided model')\n\n train.add_argument('--batch-size', type=int, default=4, help='The batch size to use in train')\n train.add_argument('--epochs', type=int, default=3, help='Number of epochs to run for')\n train.add_argument('--trainers', type=int, default=4, help='Number of trainer processes to use')\n train.add_argument('--threads', type=int, default=4, help='Total number of PyTorch threads to create per trainer')\n train.add_argument('--decay-trainers', action='store_true', default=False, help='Decay the number of trainers at the end of each epoch')\n train.add_argument('--weight-decay', type=float, default=0, help='Coefficient of weight decay (L2 regularization) on model')\n train.add_argument('--initial-lr', type=float, default=0.1, help='Initial learning rate')\n train.add_argument('--decay-lr', action='store_true', default=False, help='Decay the learning rate at the end of each epoch')\n train.add_argument('--momentum', type=float, default=0.9, help='Momentum parameter for SGD')\n train.add_argument('--nesterov', action='store_true', default=False, help='Use Nesterov momentum')\n train.add_argument('--weird-lr', action='store_true', default=False, help='Use unusual LR schedule')\n train.add_argument('--lr-decay-rate', default=1.2, help='LR division rate', type=float)\n\n split_group = train.add_mutually_exclusive_group()\n split_group.add_argument(\n '--split-dist', action='store_const', const=[0.5, 0.25, 0.125, .0625, .0625],\n help='Split data partitions between trainers via a distribution',\n )\n split_group.add_argument('--split-size', type=int, help='Partitions of a fixed size')\n\n optimizer_group = train.add_mutually_exclusive_group()\n optimizer_group.add_argument('--adam-private', action='store_const', const=tr.OptimizerType.ADAM_PRIVATE, dest='optimizer', help='Use Adam with private moments',\n default=tr.OptimizerType.ADAM_PRIVATE)\n optimizer_group.add_argument('--adam-shared', action='store_const', const=tr.OptimizerType.ADAM_SHARED, dest='optimizer', help='Use Adam with shared moments')\n optimizer_group.add_argument('--sgd', action='store_const', const=tr.OptimizerType.SGD, dest='optimizer', help='Use SGD')\n\n benchmark = sp.add_parser('benchmark', help='Benchmark train performance of an Ithemal setup')\n benchmark.add_argument('--n-examples', type=int, default=1000, help='Number of examples to use in benchmark')\n benchmark.add_argument('--trainers', type=int, default=4, help='Number of trainer processes to use')\n benchmark.add_argument('--threads', type=int, default=4, help='Total number of PyTorch threads to create per trainer')\n benchmark.add_argument('--batch-size', type=int, default=4, help='The batch size to use in train')\n\n validate = sp.add_parser('validate', help='Get performance of a dataset')\n validate.add_argument('--load-file', help='File to load the model from')\n validate.add_argument('--iaca-only', help='Only report accuracy on IACA datapoints', action='store_true', default=False)\n\n dump = sp.add_parser('dump', help='Dump the dataset to a file')\n dump.add_argument('--dump-file', help='File to dump the model to', required=True)\n\n args = parser.parse_args()\n\n base_params = BaseParameters(\n data=args.data,\n embed_mode=args.embed_mode,\n embed_file=args.embed_file,\n random_edge_freq=args.random_edge_freq,\n predict_log=args.predict_log,\n no_residual=args.no_residual,\n no_dag_rnn=args.no_dag_rnn,\n dag_reduction=args.dag_reduction,\n edge_ablation_types=args.edge_ablations or [],\n embed_size=args.embed_size,\n hidden_size=args.hidden_size,\n linear_embeddings=args.linear_embeddings,\n use_rnn=args.use_rnn,\n rnn_type=args.rnn_type,\n rnn_hierarchy_type=args.rnn_hierarchy_type,\n rnn_connect_tokens=args.rnn_connect_tokens,\n rnn_skip_connections=args.rnn_skip_connections,\n rnn_learn_init=args.rnn_learn_init,\n no_mem=args.no_mem,\n linear_dependencies=args.linear_dependencies,\n flat_dependencies=args.flat_dependencies,\n dag_nonlinearity=args.dag_nonlinearity,\n dag_nonlinearity_width=args.dag_nonlinearity_width,\n dag_nonlinear_before_max=args.dag_nonlinear_before_max,\n )\n\n if args.subparser == 'train':\n if args.split_dist:\n split = args.split_dist\n else:\n split = args.split_size or 1000\n\n train_params = TrainParameters(\n experiment_name=args.experiment_name,\n experiment_time=args.experiment_time,\n load_file=args.load_file,\n batch_size=args.batch_size,\n trainers=args.trainers,\n threads=args.threads,\n decay_trainers=args.decay_trainers,\n weight_decay=args.weight_decay,\n initial_lr=args.initial_lr,\n decay_lr=args.decay_lr,\n epochs=args.epochs,\n split=split,\n optimizer=args.optimizer,\n momentum=args.momentum,\n nesterov=args.nesterov,\n weird_lr=args.weird_lr,\n lr_decay_rate=args.lr_decay_rate,\n )\n training.run_training_coordinator(base_params, train_params)\n\n elif args.subparser == 'validate':\n graph_model_validate(base_params, args.load_file, args.iaca_only)\n\n elif args.subparser == 'dump':\n graph_model_dump(base_params, args.dump_file)\n\n elif args.subparser == 'benchmark':\n benchmark_params = BenchmarkParameters(\n batch_size=args.batch_size,\n trainers=args.trainers,\n threads=args.threads,\n examples=args.n_examples,\n )\n graph_model_benchmark(base_params, benchmark_params)\n\n else:\n raise ValueError('Unknown mode \"{}\"'.format(args.subparser))\n\nif __name__ == '__main__':\n main()\n", "id": "3125986", "language": "Python", "matching_score": 7.833711624145508, "max_stars_count": 105, "path": "learning/pytorch/run_ithemal.py" }, { "content": "import sys\nimport os\nsys.path.append(os.path.join(os.environ['ITHEMAL_HOME'], 'learning', 'pytorch'))\n\nfrom enum import Enum\nimport torch\nfrom typing import Any, Callable, List, Optional, Iterator, Tuple, NamedTuple, Union\n\nimport data.data_cost as dt\nimport models.graph_models as md\nimport models.train as tr\n\nclass EdgeAblationType(Enum):\n TRANSITIVE_REDUCTION = 'transitive-reduction'\n TRANSITIVE_CLOSURE = 'transitive-closure'\n ADD_LINEAR_EDGES = 'add-linear-edges'\n ONLY_LINEAR_EDGES = 'only-linear-edges'\n NO_EDGES = 'no-edges'\n\nBaseParameters = NamedTuple('BaseParameters', [\n ('data', str),\n ('embed_mode', str),\n ('embed_file', str),\n ('random_edge_freq', float),\n ('predict_log', bool),\n ('no_residual', bool),\n ('no_dag_rnn', bool),\n ('dag_reduction', md.ReductionType),\n ('edge_ablation_types', List[EdgeAblationType]),\n ('embed_size', int),\n ('hidden_size', int),\n ('linear_embeddings', bool),\n ('use_rnn', bool),\n ('rnn_type', md.RnnType),\n ('rnn_hierarchy_type', md.RnnHierarchyType),\n ('rnn_connect_tokens', bool),\n ('rnn_skip_connections', bool),\n ('rnn_learn_init', bool),\n ('no_mem', bool),\n ('linear_dependencies', bool),\n ('flat_dependencies', bool),\n ('dag_nonlinearity', md.NonlinearityType),\n ('dag_nonlinearity_width', int),\n ('dag_nonlinear_before_max', bool),\n])\n\nTrainParameters = NamedTuple('TrainParameters', [\n ('experiment_name', str),\n ('experiment_time', str),\n ('load_file', Optional[str]),\n ('batch_size', int),\n ('trainers', int),\n ('threads', int),\n ('decay_trainers', bool),\n ('weight_decay', float),\n ('initial_lr', float),\n ('decay_lr', bool),\n ('epochs', int),\n ('split', Union[int, List[float]]),\n ('optimizer', tr.OptimizerType),\n ('momentum', float),\n ('nesterov', bool),\n ('weird_lr', bool),\n ('lr_decay_rate', float),\n])\n\nBenchmarkParameters = NamedTuple('BenchmarkParameters', [\n ('batch_size', int),\n ('trainers', int),\n ('threads', int),\n ('examples', int),\n])\n\nPredictorDump = NamedTuple('PredictorDump', [\n ('model', md.AbstractGraphModule),\n ('dataset_params', Any),\n])\n\n\ndef ablate_data(data, edge_ablation_types, random_edge_freq):\n # type: (dt.DataCost, List[EdgeAblationType], float) -> None\n\n for edge_ablation_type in edge_ablation_types:\n if edge_ablation_type == EdgeAblationType.TRANSITIVE_REDUCTION:\n for data_item in data.data:\n data_item.block.transitive_reduction()\n elif edge_ablation_type == EdgeAblationType.TRANSITIVE_CLOSURE:\n for data_item in data.data:\n data_item.block.transitive_closure()\n elif edge_ablation_type == EdgeAblationType.ADD_LINEAR_EDGES:\n for data_item in data.data:\n data_item.block.linearize_edges()\n elif edge_ablation_type == EdgeAblationType.ONLY_LINEAR_EDGES:\n for data_item in data.data:\n data_item.block.remove_edges()\n data_item.block.linearize_edges()\n elif edge_ablation_type == EdgeAblationType.NO_EDGES:\n for data_item in data.data:\n data_item.block.remove_edges()\n\n if random_edge_freq > 0:\n for data_item in data.data:\n data_item.block.random_forward_edges(random_edge_freq / len(data_item.block.instrs))\n\ndef load_data(params):\n # type: (BaseParameters) -> dt.DataCost\n data = dt.load_dataset(params.data)\n\n def filter_data(filt):\n # type: (Callable[[dt.DataItem], bool]) -> None\n data.data = [d for d in data.data if filt(d)]\n data.train = [d for d in data.train if filt(d)]\n data.test = [d for d in data.test if filt(d)]\n\n if params.no_mem:\n filter_data(lambda d: not d.block.has_mem())\n\n ablate_data(data, params.edge_ablation_types, params.random_edge_freq)\n\n if params.linear_dependencies:\n filter_data(lambda d: d.block.has_linear_dependencies())\n\n if params.flat_dependencies:\n filter_data(lambda d: d.block.has_no_dependencies())\n\n return data\n\ndef load_model(params, data):\n # type: (BaseParameters, dt.DataCost) -> md.AbstractGraphModule\n\n if params.use_rnn:\n rnn_params = md.RnnParameters(\n embedding_size=params.embed_size,\n hidden_size=params.hidden_size,\n num_classes=1,\n connect_tokens=params.rnn_connect_tokens,\n skip_connections=params.rnn_skip_connections,\n hierarchy_type=params.rnn_hierarchy_type,\n rnn_type=params.rnn_type,\n learn_init=params.rnn_learn_init,\n )\n model = md.RNN(rnn_params)\n else:\n model = md.GraphNN(embedding_size=params.embed_size, hidden_size=params.hidden_size, num_classes=1,\n use_residual=not params.no_residual, linear_embed=params.linear_embeddings,\n use_dag_rnn=not params.no_dag_rnn, reduction=params.dag_reduction,\n nonlinear_type=params.dag_nonlinearity, nonlinear_width=params.dag_nonlinearity_width,\n nonlinear_before_max=params.dag_nonlinear_before_max,\n )\n\n model.set_learnable_embedding(mode=params.embed_mode, dictsize=628 or max(data.hot_idx_to_token) + 1)\n\n return model\n\ndef dump_model_and_data(model, data, fname):\n # type: (md.AbstractGraphMode, dt.DataCost, str) -> None\n try:\n os.makedirs(os.path.dirname(fname))\n except OSError:\n pass\n torch.save(PredictorDump(\n model=model,\n dataset_params=data.dump_dataset_params(),\n ), fname)\n\ndef load_model_and_data(fname):\n # type: (str) -> (md.AbstractGraphMode, dt.DataCost)\n dump = torch.load(fname)\n data = dt.DataInstructionEmbedding()\n data.read_meta_data()\n data.load_dataset_params(dump.dataset_params)\n return (dump.model, data)\n", "id": "7528925", "language": "Python", "matching_score": 4.143949508666992, "max_stars_count": 105, "path": "learning/pytorch/ithemal_utils.py" }, { "content": "import sys\nimport os\nsys.path.append(os.path.join(os.environ['ITHEMAL_HOME'], 'learning', 'pytorch'))\n\nfrom enum import Enum, unique\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport common_libs.utilities as ut\nimport data.data_cost as dt\nimport torch.autograd as autograd\nimport torch.optim as optim\nimport math\nimport numpy as np\nfrom typing import Any, Callable, Dict, List, NamedTuple, Optional, Union, Tuple\nfrom . import model_utils\n\nclass AbstractGraphModule(nn.Module):\n\n def __init__(self, embedding_size, hidden_size, num_classes):\n # type: (int, int, int) -> None\n super(AbstractGraphModule, self).__init__()\n\n self.embedding_size = embedding_size\n self.num_classes = num_classes\n self.hidden_size = hidden_size\n\n def set_learnable_embedding(self, mode, dictsize, seed = None):\n # type: (str, int, Optional[int]) -> None\n\n self.mode = mode\n\n if mode != 'learnt':\n embedding = nn.Embedding(dictsize, self.embedding_size)\n\n if mode == 'none':\n print 'learn embeddings form scratch...'\n initrange = 0.5 / self.embedding_size\n embedding.weight.data.uniform_(-initrange, initrange)\n self.final_embeddings = embedding\n elif mode == 'seed':\n print 'seed by word2vec vectors....'\n embedding.weight.data = torch.FloatTensor(seed)\n self.final_embeddings = embedding\n elif mode == 'learnt':\n print 'using learnt word2vec embeddings...'\n self.final_embeddings = seed\n else:\n print 'embedding not selected...'\n exit()\n\n def dump_shared_params(self):\n # type: () -> Dict[str, Any]\n return model_utils.dump_shared_params(self)\n\n def load_shared_params(self, params):\n # type: (Dict[str, Any]) -> None\n model_utils.load_shared_params(self, params)\n\n def load_checkpoint_file(self, fname):\n self.load_state_dict(torch.load(fname)['model'])\n\n def load_state_dict(self, state_dict):\n model_dict = self.state_dict()\n new_model_dict = {k: v for (k, v) in state_dict.items() if k in model_dict}\n model_dict.update(new_model_dict)\n super(AbstractGraphModule, self).load_state_dict(model_dict)\n\n def init_hidden(self):\n # type: () -> Tuple[nn.Parameter, nn.Parameter]\n\n return (\n nn.Parameter(torch.zeros(1, 1, self.hidden_size, requires_grad=True)),\n nn.Parameter(torch.zeros(1, 1, self.hidden_size, requires_grad=True)),\n )\n\n def remove_refs(self, item):\n # type: (dt.DataItem) -> None\n pass\n\n@unique\nclass ReductionType(Enum):\n MAX = 0\n ADD = 1\n MEAN = 2\n ATTENTION = 3\n\n@unique\nclass NonlinearityType(Enum):\n RELU = 0\n SIGMOID = 1\n TANH = 2\n\nclass GraphNN(AbstractGraphModule):\n\n def __init__(self, embedding_size, hidden_size, num_classes, use_residual=True, linear_embed=False, use_dag_rnn=True, reduction=ReductionType.MAX, nonlinear_width=128, nonlinear_type=NonlinearityType.RELU, nonlinear_before_max=False):\n # type: (int, int, int, bool, bool, bool, ReductionType, int, NonlinearityType, bool) -> None\n super(GraphNN, self).__init__(embedding_size, hidden_size, num_classes)\n\n assert use_residual or use_dag_rnn, 'Must use some type of predictor'\n\n self.use_residual = use_residual\n self.linear_embed = linear_embed\n self.use_dag_rnn = use_dag_rnn\n\n #lstm - input size, hidden size, num layers\n self.lstm_token = nn.LSTM(self.embedding_size, self.hidden_size)\n self.lstm_ins = nn.LSTM(self.hidden_size, self.hidden_size)\n\n # linear weight for instruction embedding\n self.opcode_lin = nn.Linear(self.embedding_size, self.hidden_size)\n self.src_lin = nn.Linear(self.embedding_size, self.hidden_size)\n self.dst_lin = nn.Linear(self.embedding_size, self.hidden_size)\n # for sequential model\n self.opcode_lin_seq = nn.Linear(self.embedding_size, self.hidden_size)\n self.src_lin_seq = nn.Linear(self.embedding_size, self.hidden_size)\n self.dst_lin_seq = nn.Linear(self.embedding_size, self.hidden_size)\n\n #linear layer for final regression result\n self.linear = nn.Linear(self.hidden_size,self.num_classes)\n\n self.nonlinear_1 = nn.Linear(self.hidden_size, nonlinear_width)\n self.nonlinear_2 = nn.Linear(nonlinear_width, self.num_classes)\n\n #lstm - for sequential model\n self.lstm_token_seq = nn.LSTM(self.embedding_size, self.hidden_size)\n self.lstm_ins_seq = nn.LSTM(self.hidden_size, self.hidden_size)\n self.linear_seq = nn.Linear(self.hidden_size, self.num_classes)\n\n self.reduction_typ = reduction\n self.attention_1 = nn.Linear(self.hidden_size, self.hidden_size // 2)\n self.attention_2 = nn.Linear(self.hidden_size // 2, 1)\n\n self.nonlinear_premax_1 = nn.Linear(self.hidden_size, self.hidden_size * 2)\n self.nonlinear_premax_2 = nn.Linear(self.hidden_size * 2, self.hidden_size)\n\n self.nonlinear_seq_1 = nn.Linear(self.hidden_size, nonlinear_width)\n self.nonlinear_seq_2 = nn.Linear(nonlinear_width, self.num_classes)\n\n self.use_nonlinear = nonlinear_type is not None\n\n if nonlinear_type == NonlinearityType.RELU:\n self.final_nonlinearity = torch.relu\n elif nonlinear_type == NonlinearityType.SIGMOID:\n self.final_nonlinearity = torch.sigmoid\n elif nonlinear_type == NonlinearityType.TANH:\n self.final_nonlinearity = torch.tanh\n\n self.nonlinear_before_max = nonlinear_before_max\n\n def reduction(self, items):\n # type: (List[torch.tensor]) -> torch.tensor\n if len(items) == 0:\n return self.init_hidden()[0]\n elif len(items) == 1:\n return items[0]\n\n def binary_reduction(reduction):\n # type: (Callable[[torch.tensor, torch.tensor], torch.tensor]) -> torch.tensor\n final = items[0]\n for item in items[1:]:\n final = reduction(final, item)\n return final\n\n stacked_items = torch.stack(items)\n\n if self.reduction_typ == ReductionType.MAX:\n return binary_reduction(torch.max)\n elif self.reduction_typ == ReductionType.ADD:\n return binary_reduction(torch.add)\n elif self.reduction_typ == ReductionType.MEAN:\n return binary_reduction(torch.add) / len(items)\n elif self.reduction_typ == ReductionType.ATTENTION:\n preds = torch.stack([self.attention_2(torch.relu(self.attention_1(item))) for item in items])\n probs = F.softmax(preds, dim=0)\n print('{}, {}, {}'.format(\n probs.shape,\n stacked_items.shape,\n stacked_items * probs\n ))\n return (stacked_items * probs).sum(dim=0)\n else:\n raise ValueError()\n\n def remove_refs(self, item):\n # type: (dt.DataItem) -> None\n\n for instr in item.block.instrs:\n if instr.lstm != None:\n del instr.lstm\n if instr.hidden != None:\n del instr.hidden\n instr.lstm = None\n instr.hidden = None\n instr.tokens = None\n\n def init_bblstm(self, item):\n # type: (dt.DataItem) -> None\n\n self.remove_refs(item)\n for i, instr in enumerate(item.block.instrs):\n tokens = item.x[i]\n if self.mode == 'learnt':\n instr.tokens = [self.final_embeddings[token] for token in tokens]\n else:\n instr.tokens = self.final_embeddings(torch.LongTensor(tokens))\n\n def create_graphlstm(self, block):\n # type: (ut.BasicBlock) -> torch.tensor\n\n leaves = block.find_leaves()\n\n leaf_hidden = []\n for leaf in leaves:\n hidden = self.create_graphlstm_rec(leaf)\n leaf_hidden.append(hidden[0].squeeze())\n\n if self.nonlinear_before_max:\n leaf_hidden = [\n self.nonlinear_premax_2(torch.relu(self.nonlinear_premax_1(h)))\n for h in leaf_hidden\n ]\n\n return self.reduction(leaf_hidden)\n\n def get_instruction_embedding_linear(self, instr, seq_model):\n # type: (ut.Instruction, bool) -> torch.tensor\n\n if seq_model:\n opcode_lin = self.opcode_lin_seq\n src_lin = self.src_lin_seq\n dst_lin = self.dst_lin_seq\n else:\n opcode_lin = self.opcode_lin\n src_lin = self.src_lin\n dst_lin = self.dst_lin\n\n opc_embed = instr.tokens[0]\n src_embed = instr.tokens[2:2+len(instr.srcs)]\n dst_embed = instr.tokens[-1-len(instr.dsts):-1]\n\n opc_hidden = opcode_lin(opc_embed)\n\n src_hidden = torch.zeros(self.embedding_size)\n for s in src_embed:\n src_hidden = torch.max(F.relu(src_lin(s)))\n\n dst_hidden = torch.zeros(self.embedding_size)\n for d in dst_embed:\n dst_hidden = torch.max(F.relu(dst_lin(d)))\n\n return (opc_hidden + src_hidden + dst_hidden).unsqueeze(0).unsqueeze(0)\n\n\n def get_instruction_embedding_lstm(self, instr, seq_model):\n # type: (ut.Instruction, bool) -> torch.tensor\n if seq_model:\n lstm = self.lstm_token_seq\n else:\n lstm = self.lstm_token\n\n _, hidden = lstm(instr.tokens.unsqueeze(1), self.init_hidden())\n return hidden[0]\n\n def get_instruction_embedding(self, instr, seq_model):\n # type: (ut.Instruction, bool) -> torch.tensor\n if self.linear_embed:\n return self.get_instruction_embedding_linear(instr, seq_model)\n else:\n return self.get_instruction_embedding_lstm(instr, seq_model)\n\n def create_graphlstm_rec(self, instr):\n # type: (ut.Instruction) -> torch.tensor\n\n if instr.hidden != None:\n return instr.hidden\n\n parent_hidden = [self.create_graphlstm_rec(parent) for parent in instr.parents]\n\n if len(parent_hidden) > 0:\n hs, cs = list(zip(*parent_hidden))\n in_hidden_ins = (self.reduction(hs), self.reduction(cs))\n else:\n in_hidden_ins = self.init_hidden()\n\n ins_embed = self.get_instruction_embedding(instr, False)\n\n out_ins, hidden_ins = self.lstm_ins(ins_embed, in_hidden_ins)\n instr.hidden = hidden_ins\n\n return instr.hidden\n\n def create_residual_lstm(self, block):\n # type: (ut.BasicBlock) -> torch.tensor\n\n ins_embeds = autograd.Variable(torch.zeros(len(block.instrs),self.embedding_size))\n for i, ins in enumerate(block.instrs):\n ins_embeds[i] = self.get_instruction_embedding(ins, True).squeeze()\n\n ins_embeds_lstm = ins_embeds.unsqueeze(1)\n\n _, hidden_ins = self.lstm_ins_seq(ins_embeds_lstm, self.init_hidden())\n\n seq_ret = hidden_ins[0].squeeze()\n\n return seq_ret\n\n def forward(self, item):\n # type: (dt.DataItem) -> torch.tensor\n\n self.init_bblstm(item)\n\n final_pred = torch.zeros(self.num_classes).squeeze()\n\n if self.use_dag_rnn:\n graph = self.create_graphlstm(item.block)\n if self.use_nonlinear and not self.nonlinear_before_max:\n final_pred += self.nonlinear_2(self.final_nonlinearity(self.nonlinear_1(graph))).squeeze()\n else:\n final_pred += self.linear(graph).squeeze()\n\n if self.use_residual:\n sequential = self.create_residual_lstm(item.block)\n if self.use_nonlinear:\n final_pred += self.nonlinear_seq_2(self.final_nonlinearity(self.nonlinear_seq_1(sequential))).squeeze()\n else:\n final_pred += self.linear(sequential).squeeze()\n\n return final_pred.squeeze()\n\n@unique\nclass RnnHierarchyType(Enum):\n NONE = 0\n DENSE = 1\n MULTISCALE = 2\n LINEAR_MODEL = 3\n MOP_MODEL = 4\n\n@unique\nclass RnnType(Enum):\n RNN = 0\n LSTM = 1\n GRU = 2\n\nRnnParameters = NamedTuple('RnnParameters', [\n ('embedding_size', int),\n ('hidden_size', int),\n ('num_classes', int),\n ('connect_tokens', bool),\n ('skip_connections', bool),\n ('learn_init', bool),\n ('hierarchy_type', RnnHierarchyType),\n ('rnn_type', RnnType),\n])\n\n\nclass RNN(AbstractGraphModule):\n\n def __init__(self, params):\n # type: (RnnParameters) -> None\n super(RNN, self).__init__(params.embedding_size, params.hidden_size, params.num_classes)\n\n self.params = params\n\n if params.rnn_type == RnnType.RNN:\n self.token_rnn = nn.RNN(self.embedding_size, self.hidden_size)\n self.instr_rnn = nn.RNN(self.hidden_size, self.hidden_size)\n elif params.rnn_type == RnnType.LSTM:\n self.token_rnn = nn.LSTM(self.embedding_size, self.hidden_size)\n self.instr_rnn = nn.LSTM(self.hidden_size, self.hidden_size)\n elif params.rnn_type == RnnType.GRU:\n self.token_rnn = nn.GRU(self.embedding_size, self.hidden_size)\n self.instr_rnn = nn.GRU(self.hidden_size, self.hidden_size)\n else:\n raise ValueError('Unknown RNN type {}'.format(params.rnn_type))\n\n self._token_init = self.rnn_init_hidden()\n self._instr_init = self.rnn_init_hidden()\n\n self.linear = nn.Linear(self.hidden_size, self.num_classes)\n\n def rnn_init_hidden(self):\n # type: () -> Union[Tuple[nn.Parameter, nn.Parameter], nn.Parameter]\n\n hidden = self.init_hidden()\n\n # for h in hidden:\n # torch.nn.init.kaiming_uniform_(h)\n\n if self.params.rnn_type == RnnType.LSTM:\n return hidden\n else:\n return hidden[0]\n\n def get_token_init(self):\n # type: () -> torch.tensor\n if self.params.learn_init:\n return self._token_init\n else:\n return self.rnn_init_hidden()\n\n def get_instr_init(self):\n # type: () -> torch.tensor\n if self.params.learn_init:\n return self._instr_init\n else:\n return self.rnn_init_hidden()\n\n def pred_of_instr_chain(self, instr_chain, save_embed):\n # type: (torch.tensor) -> torch.tensor\n _, final_state_packed = self.instr_rnn(instr_chain, self.get_instr_init())\n if self.params.rnn_type == RnnType.LSTM:\n final_state = final_state_packed[0]\n else:\n final_state = final_state_packed\n\n if save_embed is not None:\n embed = final_state.squeeze()\n save_path = os.path.join(\n os.path.dirname(save_embed),\n os.path.basename('.'.join(save_embed.split('.')[:-1]) + '.embed'))\n torch.save(embed, save_path)\n\n return self.linear(final_state.squeeze()).squeeze()\n\n\n def forward(self, item, save_embed=None):\n # type: (dt.DataItem) -> torch.tensor\n\n token_state = self.get_token_init()\n\n token_output_map = {} # type: Dict[ut.Instruction, torch.tensor]\n token_state_map = {} # type: Dict[ut.Instruction, torch.tensor]\n\n for instr, token_inputs in zip(item.block.instrs, item.x):\n if not self.params.connect_tokens:\n token_state = self.get_token_init()\n\n if self.params.skip_connections and self.params.hierarchy_type == RnnHierarchyType.NONE:\n for parent in instr.parents:\n parent_state = token_state_map[parent]\n\n if self.params.rnn_type == RnnType.LSTM:\n token_state = (\n token_state[0] + parent_state[0],\n token_state[1] + parent_state[1],\n )\n else:\n token_state = token_state + parent_state\n\n tokens = self.final_embeddings(torch.LongTensor(token_inputs)).unsqueeze(1)\n output, state = self.token_rnn(tokens, token_state)\n token_output_map[instr] = output\n token_state_map[instr] = state\n\n if self.params.hierarchy_type == RnnHierarchyType.NONE:\n final_state_packed = token_state_map[item.block.instrs[-1]]\n\n if self.params.rnn_type == RnnType.LSTM:\n final_state = final_state_packed[0]\n else:\n final_state = final_state_packed\n return self.linear(final_state.squeeze()).squeeze()\n\n instr_chain = torch.stack([token_output_map[instr][-1] for instr in item.block.instrs])\n\n if self.params.hierarchy_type == RnnHierarchyType.DENSE:\n instr_chain = torch.stack([state for instr in item.block.instrs for state in token_output_map[instr]])\n elif self.params.hierarchy_type == RnnHierarchyType.LINEAR_MODEL:\n return sum(\n self.linear(st).squeeze()\n for st in instr_chain\n )\n elif self.params.hierarchy_type == RnnHierarchyType.MOP_MODEL:\n preds = torch.stack([\n self.pred_of_instr_chain(torch.stack([token_output_map[instr][-1] for instr in instrs]))\n for instrs in item.block.paths_of_block()\n ])\n return torch.max(preds)\n\n return self.pred_of_instr_chain(instr_chain, save_embed)\n\nclass Fasthemal(AbstractGraphModule):\n def __init__(self, embedding_size, hidden_size, num_classes):\n # type: (int, int, int) -> None\n super(Fasthemal, self).__init__(embedding_size, hidden_size, num_classes)\n self.token_rnn = nn.LSTM(self.embedding_size, self.hidden_size)\n self.instr_rnn = nn.LSTM(self.hidden_size, self.hidden_size)\n self.linear = nn.Linear(self.hidden_size, self.num_classes)\n\n def forward(self, item):\n # type: (dt.DataItem) -> torch.tensor\n\n embeds = []\n\n for token_inputs in item.x:\n tokens = self.final_embeddings(torch.LongTensor(token_inputs)).unsqueeze(1)\n _, (token_state, _) = self.token_rnn(tokens)\n embeds.append(token_state.squeeze(1))\n\n z = torch.stack(embeds)\n _, instr_state = self.instr_rnn(z)\n\n return self.linear(instr_state[0].squeeze()).squeeze()\n", "id": "3041481", "language": "Python", "matching_score": 7.311338424682617, "max_stars_count": 0, "path": "learning/pytorch/models/graph_models.py" }, { "content": "import sys\nimport os\nsys.path.append(os.path.join(os.environ['ITHEMAL_HOME'], 'learning', 'pytorch'))\nfrom enum import Enum, unique\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport models.graph_models as md\nfrom graph_models import AbstractGraphModule\n\n\nclass RNNExtend(AbstractGraphModule):\n\n def __init__(self, params, args):\n # type: (RnnParameters) -> None\n super(RNNExtend, self).__init__(params.embedding_size, params.hidden_size, params.num_classes)\n\n self.params = params\n self.args = args\n\n # assuming LSTM for now\n self.bb_rnn = nn.LSTM(self.embedding_size, self.hidden_size)\n\n self._bb_init = self.rnn_init_hidden()\n\n self.linear = nn.Linear(self.hidden_size, self.num_classes)\n\n def rnn_init_hidden(self):\n # type: () -> Union[Tuple[nn.Parameter, nn.Parameter], nn.Parameter]\n return self.init_hidden()\n\n def get_bb_init(self):\n # type: () -> torch.tensor\n return self._bb_init\n\n def forward(self, batch):\n # type: (dt.DataItem) -> torch.tensor\n # embed size should be (# bbs, batch size, hidden size)\n embed = torch.stack(batch.x).unsqueeze(1)\n _, final_state_packed = self.bb_rnn(embed, self.get_bb_init())\n final_state = final_state_packed[0]\n pred = self.linear(final_state.squeeze()).squeeze()\n if self.args.use_scaling:\n pred = pred * self.args.scale_amount\n\n return pred\n\n\n@unique\nclass ReductionType(Enum):\n MAX = 0\n ADD = 1\n MEAN = 2\n ATTENTION = 3\n WEIGHTED = 4\n\n\nclass GraphNN(AbstractGraphModule):\n\n def __init__(self, embedding_size, hidden_size, num_classes, use_residual=True, use_dag_rnn=True, reduction=ReductionType.MAX, nonlinear_width=128, nonlinear_type=md.NonlinearityType.RELU, nonlinear_before_max=False):\n # type: (int, int, int, bool, bool, bool, ReductionType, int, NonlinearityType, bool) -> None\n super(GraphNN, self).__init__(embedding_size, hidden_size, num_classes)\n\n assert use_residual or use_dag_rnn, 'Must use some type of predictor'\n\n self.use_residual = use_residual\n self.use_dag_rnn = use_dag_rnn\n\n #lstm - input size, hidden size, num layers\n self.lstm_token = nn.LSTM(self.embedding_size, self.hidden_size)\n self.lstm_ins = nn.LSTM(self.hidden_size, self.hidden_size)\n\n #linear layer for final regression result\n self.linear = nn.Linear(self.hidden_size,self.num_classes)\n\n self.nonlinear_1 = nn.Linear(self.hidden_size, nonlinear_width)\n self.nonlinear_2 = nn.Linear(nonlinear_width, self.num_classes)\n\n #lstm - for sequential model\n self.lstm_ins_seq = nn.LSTM(self.hidden_size, self.hidden_size)\n self.linear_seq = nn.Linear(self.hidden_size, self.num_classes)\n\n self.reduction_typ = reduction\n self.attention_1 = nn.Linear(self.hidden_size, self.hidden_size // 2)\n self.attention_2 = nn.Linear(self.hidden_size // 2, 1)\n\n self.nonlinear_premax_1 = nn.Linear(self.hidden_size, self.hidden_size * 2)\n self.nonlinear_premax_2 = nn.Linear(self.hidden_size * 2, self.hidden_size)\n\n self.nonlinear_seq_1 = nn.Linear(self.hidden_size, nonlinear_width)\n self.nonlinear_seq_2 = nn.Linear(nonlinear_width, self.num_classes)\n\n self.use_nonlinear = nonlinear_type is not None\n\n if nonlinear_type == md.NonlinearityType.RELU:\n self.final_nonlinearity = torch.relu\n elif nonlinear_type == md.NonlinearityType.SIGMOID:\n self.final_nonlinearity = torch.sigmoid\n elif nonlinear_type == md.NonlinearityType.TANH:\n self.final_nonlinearity = torch.tanh\n\n self.nonlinear_before_max = nonlinear_before_max\n\n def reduction(self, items, weights=None):\n # type: (List[torch.tensor]) -> torch.tensor\n if len(items) == 0:\n return self.init_hidden()[0]\n elif len(items) == 1:\n return items[0]\n\n def binary_reduction(reduction, weights=None):\n # type: (Callable[[torch.tensor, torch.tensor], torch.tensor]) -> torch.tensor\n if weights is None:\n final = items[0]\n for item in items[1:]:\n final = reduction(final, item)\n else:\n final = items[0] * weights[0]\n for i, item in enumerate(items[1:]):\n final = torch.add(final, item * weights[i])\n return final\n\n stacked_items = torch.stack(items)\n\n if self.reduction_typ == ReductionType.MAX:\n return binary_reduction(torch.max)\n elif self.reduction_typ == ReductionType.ADD:\n return binary_reduction(torch.add)\n elif self.reduction_typ == ReductionType.MEAN:\n return binary_reduction(torch.add) / len(items)\n elif self.reduction_typ == ReductionType.ATTENTION:\n preds = torch.stack([self.attention_2(torch.relu(self.attention_1(item))) for item in items])\n probs = F.softmax(preds, dim=0)\n # print('{}, {}, {}'.format(\n # probs.shape,\n # stacked_items.shape,\n # stacked_items * probs\n # ))\n return (stacked_items * probs).sum(dim=0)\n elif self.reduction_typ == ReductionType.WEIGHTED:\n return binary_reduction(torch.add, weights)\n else:\n raise ValueError()\n\n def remove_refs(self, item):\n # type: (dt.DataItem) -> None\n for bblock in item.function.bblocks:\n if bblock.lstm != None:\n del bblock.lstm\n if bblock.hidden != None:\n del bblock.hidden\n bblock.lstm = None\n bblock.hidden = None\n\n def init_funclstm(self, item):\n # type: (dt.DataItem) -> None\n self.remove_refs(item)\n\n def create_graphlstm(self, function):\n # type: (ut.BasicBlock) -> torch.tensor\n leaves = function.find_leaves()\n\n leaf_hidden = []\n for leaf in leaves:\n hidden = self.create_graphlstm_rec(leaf)\n leaf_hidden.append(hidden[0].squeeze())\n\n if self.nonlinear_before_max:\n leaf_hidden = [\n self.nonlinear_premax_2(torch.relu(self.nonlinear_premax_1(h)))\n for h in leaf_hidden\n ]\n\n return self.reduction(leaf_hidden)\n\n def create_graphlstm_rec(self, bblock):\n # type: (ut.Instruction) -> torch.tensor\n if bblock.hidden != None:\n return bblock.hidden\n\n parent_hidden = [self.create_graphlstm_rec(parent) for parent in bblock.parents]\n weights = bblock.parents_probs \\\n if self.reduction_typ == ReductionType.WEIGHTED else None\n\n if len(parent_hidden) > 0:\n hs, cs = list(zip(*parent_hidden))\n in_hidden_ins = (\n self.reduction(hs, weights=weights),\n self.reduction(cs, weights=weights),\n )\n else:\n in_hidden_ins = self.init_hidden()\n\n if bblock.embed is None:\n hidden_ins = in_hidden_ins\n else:\n out_ins, hidden_ins = self.lstm_ins(\n bblock.embed.unsqueeze(0).unsqueeze(0), in_hidden_ins)\n bblock.hidden = hidden_ins\n\n return bblock.hidden\n\n def create_residual_lstm(self, function):\n # type: (ut.BasicBlock) -> torch.tensor\n ins_embeds_lstm = function.get_embedding().unsqueeze(1)\n\n _, hidden_ins = self.lstm_ins_seq(ins_embeds_lstm, self.init_hidden())\n\n seq_ret = hidden_ins[0].squeeze()\n\n return seq_ret\n\n def forward(self, item):\n # type: (dt.DataItem) -> torch.tensor\n self.init_funclstm(item)\n\n final_pred = torch.zeros(self.num_classes).squeeze()\n\n if self.use_dag_rnn:\n graph = self.create_graphlstm(item.function)\n if self.use_nonlinear and not self.nonlinear_before_max:\n final_pred += self.nonlinear_2(self.final_nonlinearity(self.nonlinear_1(graph))).squeeze()\n else:\n final_pred += self.linear(graph).squeeze()\n\n if self.use_residual:\n sequential = self.create_residual_lstm(item.function)\n if self.use_nonlinear:\n final_pred += self.nonlinear_seq_2(self.final_nonlinearity(self.nonlinear_seq_1(sequential))).squeeze()\n else:\n final_pred += self.linear(sequential).squeeze()\n\n return final_pred.squeeze()\n\n\nclass BasicBlock:\n\n def __init__(self, embed, name='', bb_id=None):\n self.embed = embed\n self.name = name\n self.bb_id = bb_id\n\n self.parents = []\n self.children = []\n self.parents_probs = []\n self.children_probs = []\n\n #for lstms\n self.lstm = None\n self.hidden = None\n\n def print_bb(self):\n print('#####')\n print('Function: %s, BasicBlock %d' % (self.name, self.bb_id))\n print('')\n if len(self.parents) > 0:\n print('Parents:')\n for i, parent in enumerate(self.parents):\n print(parent.__str__() + ', Edge Prob = %.2f' % (self.parents_probs[i]))\n print('')\n if len(self.children) > 0:\n print('Children:')\n for i, child in enumerate(self.children):\n print(child.__str__() + ', Edge Prob = %.2f' % (self.children_probs[i]))\n print('')\n print('#####')\n\n def __str__(self):\n return 'Function: %s, BasicBlock %d: %d parents and %d children' % (\n self.name, self.bb_id, len(self.parents), len(self.children))\n\n\nclass Function:\n\n def __init__(self, bblocks, name='', block_freq=None):\n self.bblocks = bblocks\n self.name = name\n self.block_freq = block_freq\n\n def num_bblocks(self):\n return len(self.bblocks)\n\n def print_function(self):\n for bblock in self.bblocks:\n bblock.print_bb()\n\n def get_embedding(self):\n return torch.stack(\n [bb.embed for bb in self.bblocks if bb.embed is not None])\n\n def linearize_edges(self):\n for fst, snd in zip(self.bblocks, self.bblocks[1:]):\n if snd not in fst.children:\n fst.children.append(snd)\n if fst not in snd.parents:\n snd.parents.append(fst)\n\n def find_roots(self):\n roots = []\n for bblock in self.bblocks:\n if len(bblock.parents) == 0:\n roots.append(bblock)\n return roots\n\n def find_leaves(self):\n leaves = []\n for bblock in self.bblocks:\n if len(bblock.children) == 0:\n leaves.append(bblock)\n return leaves\n", "id": "12726572", "language": "Python", "matching_score": 3.7687642574310303, "max_stars_count": 0, "path": "learning/pytorch/models/ithemal_extend.py" }, { "content": "#this file contains models that I have tried out for different tasks, which are reusable\n#plus it has the training framework for those models given data - each model has its own data requirements\n\nimport numpy as np\nimport common_libs.utilities as ut\nimport random\nimport torch.nn as nn\nimport torch.autograd as autograd\nimport torch.optim as optim\nimport torch\nimport math\n\n\nclass ModelAbs(nn.Module):\n\n \"\"\"\n Abstract model without the forward method.\n\n lstm for processing tokens in sequence and linear layer for output generation\n lstm is a uni-directional single layer lstm\n\n num_classes = 1 - for regression\n num_classes = n - for classifying into n classes\n\n \"\"\"\n\n def __init__(self, hidden_size, embedding_size, num_classes):\n\n super(ModelAbs, self).__init__()\n self.hidden_size = hidden_size\n self.name = 'should be overridden'\n\n #numpy array with batchsize, embedding_size\n self.embedding_size = embedding_size\n self.num_classes = num_classes\n\n #lstm - input size, hidden size, num layers\n self.lstm_token = nn.LSTM(self.embedding_size, self.hidden_size)\n\n #hidden state for the rnn\n self.hidden_token = self.init_hidden()\n\n #linear layer for regression - in_features, out_features\n self.linear = nn.Linear(self.hidden_size, self.num_classes)\n\n def init_hidden(self):\n return (autograd.Variable(torch.zeros(1, 1, self.hidden_size)),\n autograd.Variable(torch.zeros(1, 1, self.hidden_size)))\n\n\n #this is to set learnable embeddings\n def set_learnable_embedding(self, mode, dictsize, seed = None):\n\n self.mode = mode\n\n if mode != 'learnt':\n embedding = nn.Embedding(dictsize, self.embedding_size)\n\n if mode == 'none':\n print 'learn embeddings form scratch...'\n initrange = 0.5 / self.embedding_size\n embedding.weight.data.uniform_(-initrange, initrange)\n self.final_embeddings = embedding\n elif mode == 'seed':\n print 'seed by word2vec vectors....'\n embedding.weight.data = torch.FloatTensor(seed)\n self.final_embeddings = embedding\n else:\n print 'using learnt word2vec embeddings...'\n self.final_embeddings = seed\n\n #remove any references you may have that inhibits garbage collection\n def remove_refs(self, item):\n return\n\nclass ModelSequentialRNN(ModelAbs):\n\n \"\"\"\n Prediction at every hidden state of the unrolled rnn.\n\n Input - sequence of tokens processed in sequence by the lstm\n Output - predictions at the every hidden state\n\n uses lstm and linear setup of ModelAbs\n each hidden state is given as a seperate batch to the linear layer\n\n \"\"\"\n\n def __init__(self, hidden_size, embedding_size, num_classes, intermediate):\n super(ModelSequentialRNN, self).__init__(hidden_size, embedding_size, num_classes)\n if intermediate:\n self.name = 'sequential RNN intermediate'\n else:\n self.name = 'sequential RNN'\n self.intermediate = intermediate\n\n def forward(self, item):\n\n self.hidden_token = self.init_hidden()\n\n #convert to tensor\n if self.mode == 'learnt':\n acc_embeds = []\n for token in item.x:\n acc_embeds.append(self.final_embeddings[token])\n embeds = torch.FloatTensor(acc_embeds)\n else:\n embeds = self.final_embeddings(torch.LongTensor(item.x))\n\n\n #prepare for lstm - seq len, batch size, embedding size\n seq_len = embeds.shape[0]\n embeds_for_lstm = embeds.unsqueeze(1)\n\n #lstm outputs\n #output, (h_n,c_n)\n #output - (seq_len, batch = 1, hidden_size * directions) - h_t for each t final layer only\n #h_n - (layers * directions, batch = 1, hidden_size) - h_t for t = seq_len\n #c_n - (layers * directions, batch = 1, hidden_size) - c_t for t = seq_len\n\n #lstm inputs\n #input, (h_0, c_0)\n #input - (seq_len, batch, input_size)\n\n lstm_out, self.hidden_token = self.lstm_token(embeds_for_lstm, self.hidden_token)\n\n if self.intermediate:\n #input to linear - seq_len, hidden_size (seq_len is the batch size for the linear layer)\n #output - seq_len, num_classes\n values = self.linear(lstm_out[:,0,:].squeeze()).squeeze()\n else:\n #input to linear - hidden_size\n #output - num_classes\n values = self.linear(self.hidden_token[0].squeeze()).squeeze()\n\n return values\n\nclass ModelHierarchicalRNN(ModelAbs):\n\n \"\"\"\n Prediction at every hidden state of the unrolled rnn for instructions.\n\n Input - sequence of tokens processed in sequence by the lstm but seperated into instructions\n Output - predictions at the every hidden state\n\n lstm predicting instruction embedding for sequence of tokens\n lstm_ins processes sequence of instruction embeddings\n linear layer process hidden states to produce output\n\n \"\"\"\n\n def __init__(self, hidden_size, embedding_size, num_classes, intermediate):\n super(ModelHierarchicalRNN, self).__init__(hidden_size, embedding_size, num_classes)\n\n self.hidden_ins = self.init_hidden()\n self.lstm_ins = nn.LSTM(self.hidden_size, self.hidden_size)\n\n if intermediate:\n self.name = 'hierarchical RNN intermediate'\n else:\n self.name = 'hierarchical RNN'\n self.intermediate = intermediate\n\n def copy(self, model):\n\n self.linear = model.linear\n self.lstm_token = model.lstm_token\n self.lstm_ins = model.lstm_ins\n\n def forward(self, item):\n\n self.hidden_token = self.init_hidden()\n self.hidden_ins = self.init_hidden()\n\n ins_embeds = autograd.Variable(torch.zeros(len(item.x),self.embedding_size))\n for i, ins in enumerate(item.x):\n\n if self.mode == 'learnt':\n acc_embeds = []\n for token in ins:\n acc_embeds.append(self.final_embeddings[token])\n token_embeds = torch.FloatTensor(acc_embeds)\n else:\n token_embeds = self.final_embeddings(torch.LongTensor(ins))\n\n #token_embeds = torch.FloatTensor(ins)\n token_embeds_lstm = token_embeds.unsqueeze(1)\n out_token, hidden_token = self.lstm_token(token_embeds_lstm,self.hidden_token)\n ins_embeds[i] = hidden_token[0].squeeze()\n\n ins_embeds_lstm = ins_embeds.unsqueeze(1)\n\n out_ins, hidden_ins = self.lstm_ins(ins_embeds_lstm, self.hidden_ins)\n\n if self.intermediate:\n values = self.linear(out_ins[:,0,:]).squeeze()\n else:\n values = self.linear(hidden_ins[0].squeeze()).squeeze()\n\n return values\n\n\n\nclass ModelHierarchicalRNNRelational(ModelAbs):\n\n def __init__(self, embedding_size, num_classes):\n super(ModelHierarchicalRNNRelational, self).__init__(embedding_size, num_classes)\n\n self.hidden_ins = self.init_hidden()\n self.lstm_ins = nn.LSTM(self.hidden_size, self.hidden_size)\n\n self.linearg1 = nn.Linear(2 * self.hidden_size, self.hidden_size)\n self.linearg2 = nn.Linear(self.hidden_size, self.hidden_size)\n\n\n def forward(self, item):\n\n self.hidden_token = self.init_hidden()\n self.hidden_ins = self.init_hidden()\n\n ins_embeds = autograd.Variable(torch.zeros(len(item.x),self.hidden_size))\n for i, ins in enumerate(item.x):\n\n if self.mode == 'learnt':\n acc_embeds = []\n for token in ins:\n acc_embeds.append(self.final_embeddings[token])\n token_embeds = torch.FloatTensor(acc_embeds)\n else:\n token_embeds = self.final_embeddings(torch.LongTensor(ins))\n\n #token_embeds = torch.FloatTensor(ins)\n token_embeds_lstm = token_embeds.unsqueeze(1)\n out_token, hidden_token = self.lstm_token(token_embeds_lstm,self.hidden_token)\n ins_embeds[i] = hidden_token[0].squeeze()\n\n ins_embeds_lstm = ins_embeds.unsqueeze(1)\n\n out_ins, hidden_ins = self.lstm_ins(ins_embeds_lstm, self.hidden_ins)\n\n seq_len = len(item.x)\n\n g_variable = autograd.Variable(torch.zeros(self.hidden_size))\n\n for i in range(seq_len):\n for j in range(i,seq_len):\n\n concat = torch.cat((out_ins[i].squeeze(),out_ins[j].squeeze()),0)\n g1 = nn.functional.relu(self.linearg1(concat))\n g2 = nn.functional.relu(self.linearg2(g1))\n\n g_variable += g2\n\n\n output = self.linear(g_variable)\n\n return output\n\n\nclass ModelSequentialRNNComplex(nn.Module):\n\n \"\"\"\n Prediction using the final hidden state of the unrolled rnn.\n\n Input - sequence of tokens processed in sequence by the lstm\n Output - the final value to be predicted\n\n we do not derive from ModelAbs, but instead use a bidirectional, multi layer\n lstm and a deep MLP with non-linear activation functions to predict the final output\n\n \"\"\"\n\n def __init__(self, embedding_size):\n super(ModelFinalHidden, self).__init__()\n\n self.name = 'sequential RNN'\n self.hidden_size = 256\n self.embedding_size = embedding_size\n\n self.layers = 2\n self.directions = 1\n self.is_bidirectional = (self.directions == 2)\n self.lstm_token = torch.nn.LSTM(input_size = self.embedding_size,\n hidden_size = self.hidden_size,\n num_layers = self.layers,\n bidirectional = self.is_bidirectional)\n self.linear1 = nn.Linear(self.layers * self. directions * self.hidden_size, self.hidden_size)\n self.linear2 = nn.Linear(self.hidden_size,1)\n self.hidden_token = self.init_hidden()\n\n def init_hidden(self):\n return (autograd.Variable(torch.zeros(self.layers * self.directions, 1, self.hidden_size)),\n autograd.Variable(torch.zeros(self.layers * self.directions, 1, self.hidden_size)))\n\n def forward(self, item):\n\n self.hidden_token = self.init_hidden()\n\n #convert to tensor\n if self.mode == 'learnt':\n acc_embeds = []\n for token in item.x:\n acc_embeds.append(self.final_embeddings[token])\n embeds = torch.FloatTensor(acc_embeds)\n else:\n embeds = self.final_embeddings(torch.LongTensor(item.x))\n\n\n #prepare for lstm - seq len, batch size, embedding size\n seq_len = embeds.shape[0]\n embeds_for_lstm = embeds.unsqueeze(1)\n\n lstm_out, self.hidden_token = self.lstm_token(embeds_for_lstm, self.hidden_token)\n\n f1 = nn.functional.relu(self.linear1(self.hidden_token[0].squeeze().view(-1)))\n f2 = self.linear2(f1)\n return f2\n", "id": "35232", "language": "Python", "matching_score": 2.225501537322998, "max_stars_count": 105, "path": "learning/pytorch/models/rnn_models.py" }, { "content": "import torch\nfrom models.graph_models import RnnParameters\nfrom models.ithemal_extend import RNNExtend\n\nif __name__ == '__main__':\n rnn_params = RnnParameters(\n embedding_size=256,\n hidden_size=256,\n num_classes=1,\n connect_tokens=False, # NOT USED\n skip_connections=False, # NOT USED\n hierarchy_type='MULTISCALE', # NOT USED\n rnn_type='LSTM', # NOT USED\n learn_init=True, # NOT USED\n )\n model = RNNExtend(rnn_params)\n test_input = torch.rand(5, 1, 256)\n test_output = model(test_input)\n print(test_output.item())\n", "id": "7289669", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "learning/pytorch/test_rnnextend.py" }, { "content": "from . import cityscapes", "id": "6463179", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "usr_dir/data_generators/__init__.py" }, { "content": "import csv\nimport json\nimport os\nimport random\nimport torch\nimport numpy as np\n\nfrom models.ithemal_extend import BasicBlock, Function\n\nclass DataItem:\n\n def __init__(self, x, y, function, code_id):\n self.x = x\n self.y = y\n self.function = function\n self.code_id = code_id\n\n\nclass DataExtend(object):\n\n def __init__(self, data_path, use_rnn=True, use_freq=False):\n self.load_data(data_path, use_rnn, use_freq)\n\n def load_data(self, data_path, use_rnn, use_freq):\n self.data, self.train, self.test = [], [], []\n data = []\n with open(os.path.join(data_path, 'labels.csv')) as csvfile:\n csvreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n first_flag = True\n for row in csvreader:\n if first_flag:\n first_flag = False\n continue\n row = row[0].split(',')\n data.append(row)\n for d in data:\n # get path to function basic blocks\n func_path = os.path.join(data_path, d[0])\n # get list of embedding paths\n bbs = [bb for bb in os.listdir(func_path) if bb.endswith('.embed')]\n bbs.sort(key=lambda x: int(x.strip('.embed')))\n # load basic block embeddings\n x = [torch.load(os.path.join(func_path, bb)) for bb in bbs]\n if len(x) == 0:\n continue\n # create DataItem\n if use_rnn:\n # regular RNN\n self.data.append(\n DataItem(x, float(d[1]), Function([], d[0], None), None))\n else:\n # GraphNN\n # filter out the one example that breaks the code\n if func_path == 'data/bbs/cbench-telecom-gsm/rpe/RPE_grid_positioning':\n continue\n # create BasicBlock objects for each block\n basicblocks = []\n basicblocks_d = {}\n for bb in bbs:\n embed = torch.load(os.path.join(func_path, bb))\n bb_id = int(bb.strip('.embed'))\n basicblocks.append(BasicBlock(embed, d[0], bb_id))\n basicblocks_d[bb_id] = basicblocks[-1]\n # read CFG file\n cfg = json.load(\n open(os.path.join(func_path, 'CFG_collapsed.json')))\n if use_freq:\n # read block frequency file\n block_freq = json.load(\n open(os.path.join(func_path,\n 'block_execution_counts.json')))\n else:\n block_freq = None\n # set children, parents, and edge probs for each basic block\n for basicblock in basicblocks_d.values():\n for dest in cfg[str(basicblock.bb_id)]:\n if len(dest) == 0 or dest[0] not in basicblocks_d:\n continue\n basicblock.children.append(basicblocks_d[dest[0]])\n basicblock.children_probs.append(dest[1])\n basicblocks_d[dest[0]].parents.append(basicblock)\n basicblocks_d[dest[0]].parents_probs.append(dest[1])\n self.data.append(\n DataItem(\n x, float(d[1]),\n Function(basicblocks, d[0], block_freq), None))\n # split data into train and val\n idx = int(len(self.data) * 0.8)\n self.train = self.data[:idx]\n # apply transformation to labels\n # self.get_train_stats()\n for ex in self.train:\n ex.y = self.transform_label(ex.y)\n\n self.test = self.data[idx:]\n\n def get_train_stats(self):\n train_ys = [ex.y for ex in self.train]\n self.train_mean = np.mean(train_ys)\n self.train_std = np.std(train_ys)\n\n def transform_label(self, y):\n # return (y - self.train_mean) / self.train_std\n return np.log(y)\n\n def inverse_label_transform(self, y):\n # return y * self.train_std + self.train_mean\n return torch.exp(y)\n", "id": "3047185", "language": "Python", "matching_score": 0.6098576784133911, "max_stars_count": 0, "path": "learning/pytorch/data/data_extend.py" }, { "content": "# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"PPO binary over a gym env.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nfrom absl import app\nfrom absl import flags\nimport jax\nfrom jax.config import config\nfrom tensor2tensor.trax import layers\nfrom tensor2tensor.trax.rlax import ppo\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"env_name\", None, \"Name of the environment to make.\")\nflags.DEFINE_string(\"t2t_gym_env\", None, \"Name of the T2TGymEnv to make.\")\nflags.DEFINE_integer(\"epochs\", 100, \"Number of epochs to run for.\")\nflags.DEFINE_integer(\"random_seed\", 0, \"Random seed.\")\nflags.DEFINE_integer(\"batch_size\", 32, \"Batch of trajectories needed.\")\nflags.DEFINE_integer(\"num_optimizer_steps\", 100, \"Number of optimizer steps.\")\nflags.DEFINE_integer(\"boundary\", 20,\n \"We pad trajectories at integer multiples of this number.\")\nflags.DEFINE_integer(\"max_timestep\", None,\n \"If set to an integer, maximum number of time-steps in a \"\n \"trajectory.\")\nflags.DEFINE_float(\"policy_and_value_net_learning_rate\", 1e-3, \"Learning rate.\")\nflags.DEFINE_float(\"policy_net_learning_rate\", 3e-4,\n \"Learning rate for the policy net only.\")\nflags.DEFINE_float(\"value_net_learning_rate\", 1e-3,\n \"Learning rate for the value net only.\")\nflags.DEFINE_boolean(\"jax_debug_nans\", False,\n \"Setting to true will help to debug nans and disable jit.\")\nflags.DEFINE_boolean(\"disable_jit\", False, \"Setting to true will disable jit.\")\nflags.DEFINE_boolean(\"combined_policy_and_value_function\", False,\n \"If True there is a single network that determines policy\"\n \"and values.\")\nflags.DEFINE_integer(\"flatten_non_batch_time_dims\", False,\n \"If true, we flatten except the first two dimensions.\")\n\n\ndef common_layers():\n cur_layers = []\n if FLAGS.flatten_non_batch_time_dims:\n cur_layers = [layers.Div(divisor=255.0), layers.Flatten(num_axis_to_keep=2)]\n return cur_layers + [layers.Dense(16), layers.Relu(),\n layers.Dense(4), layers.Relu()]\n\n\ndef run_training_loop():\n \"\"\"Run the PPO training loop.\"\"\"\n\n policy_net_fun = None\n value_net_fun = None\n policy_and_value_net_fun = None\n policy_optimizer_fun = None\n value_optimizer_fun = None\n policy_and_value_optimizer_fun = None\n\n if FLAGS.combined_policy_and_value_function:\n policy_and_value_net_fun = functools.partial(\n ppo.policy_and_value_net, bottom_layers=common_layers())\n policy_and_value_optimizer_fun = get_optimizer_fun(\n FLAGS.policy_and_value_net_learning_rate)\n else:\n policy_net_fun = functools.partial(ppo.policy_net,\n bottom_layers=common_layers())\n value_net_fun = functools.partial(ppo.value_net,\n bottom_layers=common_layers())\n policy_optimizer_fun = get_optimizer_fun(FLAGS.policy_net_learning_rate)\n value_optimizer_fun = get_optimizer_fun(FLAGS.value_net_learning_rate)\n\n ppo.training_loop(\n env_name=FLAGS.env_name,\n epochs=FLAGS.epochs,\n policy_net_fun=policy_net_fun,\n value_net_fun=value_net_fun,\n policy_and_value_net_fun=policy_and_value_net_fun,\n policy_optimizer_fun=policy_optimizer_fun,\n value_optimizer_fun=value_optimizer_fun,\n policy_and_value_optimizer_fun=policy_and_value_optimizer_fun,\n batch_size=FLAGS.batch_size,\n num_optimizer_steps=FLAGS.num_optimizer_steps,\n boundary=FLAGS.boundary,\n max_timestep=FLAGS.max_timestep,\n random_seed=FLAGS.random_seed)\n\n\ndef get_optimizer_fun(learning_rate):\n return functools.partial(ppo.optimizer_fun, step_size=learning_rate)\n\n\ndef main(argv):\n del argv\n\n if FLAGS.jax_debug_nans:\n config.update(\"jax_debug_nans\", True)\n\n if FLAGS.jax_debug_nans or FLAGS.disable_jit:\n with jax.disable_jit():\n run_training_loop()\n else:\n run_training_loop()\n\nif __name__ == \"__main__\":\n app.run(main)\n", "id": "1506297", "language": "Python", "matching_score": 0.8909597396850586, "max_stars_count": 0, "path": "tensor2tensor/trax/rlax/ppo_main.py" }, { "content": "import os\nfrom typing import Any, Optional, Tuple\n\nclass MPConfig(object):\n THREADS_KEY = \"OMP_NUM_THREADS\"\n AFFINITY_KEY = \"KMP_AFFINITY\"\n\n # PyTorch starts 2 of its own threads for each trainer, so we actually want to start 2 fewer threads\n PYTORCH_THREAD_OFFSET = 2\n\n\n def __init__(self, threads):\n # type: (int) -> None\n assert 2 <= threads\n\n self.threads = threads\n self.saved_env = None # type: Optional[Tuple[Optional[str], Optional[str]]]\n\n def __enter__(self):\n # type: () -> None\n threads = os.environ.get(MPConfig.THREADS_KEY)\n affinity = os.environ.get(MPConfig.AFFINITY_KEY)\n\n self.saved_env = (threads, affinity)\n\n def set_env(self, trainer_id):\n # type: (int) -> None\n\n # set the OMP config, to get threads on sequential CPUs, ideally on the same socket\n os.environ[MPConfig.THREADS_KEY] = str(self.threads - MPConfig.PYTORCH_THREAD_OFFSET)\n os.environ[MPConfig.AFFINITY_KEY] = ','.join(map(str, [\n 'verbose',\n 'granularity=fine',\n 'compact',\n '1',\n trainer_id * self.threads\n ]))\n\n def __exit__(self,exc_type, exc_value, traceback):\n # type: (Any, Any, Any) -> None\n assert self.saved_env\n\n (threads, affinity) = self.saved_env\n\n if threads is not None:\n os.environ[MPConfig.THREADS_KEY] = threads\n\n if affinity is not None:\n os.environ[MPConfig.AFFINITY_KEY] = affinity\n\n self.saved_env = None\n", "id": "8911991", "language": "Python", "matching_score": 0.5623299479484558, "max_stars_count": 105, "path": "learning/pytorch/mpconfig.py" }, { "content": "import os\nimport sys\nimport subprocess\n\ndef execute(command):\n\tprocess = subprocess.Popen(args=command, stdout=subprocess.PIPE, shell=True)\n\treturn process.communicate()[0]\n\n\ndef extract_sections(filename):\n\tsection_info = execute(\"objdump -h \" + filename).strip().split('\\n')\n\tsection_list = []\n\tsection_vmas = {}\n\tfor line in section_info:\n\t\tinfo = line.strip().split()\n\t\tif len(info) < 7:\n\t\t\tcontinue\n\t\t\n\t\tsection_name = info[1]\n\t\tif not(section_name == \".text\" or section_name.startswith(\".text.\")):\n\t\t\tcontinue\t\n\t\ttry:\n\t\t\tsection_size = int(info[2], 16)\n\t\t\tsection_offset = int(info[5], 16)\n\t\t\tsection_vma = int(info[3], 16)\n\t\texcept ValueError as verr:\n\t\t\tcontinue\n\t\tsection_list += [(section_name, section_size, section_offset)]\n\t\tsection_vmas[section_name] = section_vma\n\treturn (section_list, section_vmas)\n\ndef extract_symbols(filename, section_vma):\n\tsymbols_info = execute(\"objdump -t \" + filename).strip().split('\\n')\n\tsymbol_list = []\n\tfor line in symbols_info:\n\t\tinfo = line.strip().split()\n\t\tif len(info) < 6:\n\t\t\tcontinue\n\t\tif info[2] != \"F\":\n\t\t\tcontinue\n\t\tsection_name = info[3]\n\t\tif not (section_name == \".text\" or section_name.startswith(\".text.\")):\n\t\t\tcontinue\n\t\t\t\n\t\ttry:\n\t\t\tsymbol_offset_in_section = int(info[0], 16) - section_vma[section_name]\n\t\t\tsymbol_size = int(info[4], 16)\n\t\texcept ValueError as verr:\n\t\t\tcontinue\n\t\tsymbol_name = info[5]\n\t\tif symbol_name == \".hidden\" and len(info) > 6:\n\t\t\tsymbol_name = info[6]\n\t\tsymbol_list += [(symbol_name, section_name, symbol_offset_in_section, symbol_size)]\n\n\treturn symbol_list\t\t\t\n\n\ndef create_bin(filename, outfilename, sections_list):\n\tinput_file = open(filename, \"rb\")\n\toutput_file = open(outfilename, \"wb\")\n\trunning_offset = 0\n\tsection_offsets = {}\n\tfor section in sections_list:\n\t\ttry:\n\t\t\tinput_file.seek(section[2])\n\t\t\tsection_data = input_file.read(section[1])\n\t\t\tif len(section_data) != section[1]:\n\t\t\t\traise ValueError(\"Inadequte length in input file\")\n\t\t\toutput_file.write(section_data)\n\t\t\tsection_offsets[section[0]] = running_offset\n\t\t\trunning_offset += section[1]\n\t\texcept Exception as e:\n\t\t\tprint \"Error while copying section \" + section[0] + \" to output file\"\n\t\t\tprint e\n\t\t\texit(-1)\n\n\tinput_file.close()\n\toutput_file.close()\n\t\t\n\treturn section_offsets\n\t\t\t\n\ndef create_metadata(metadata_filename, symbol_list, section_offsets):\n\tmetadata_file = open(metadata_filename, \"w\")\n\tfor symbol in symbol_list:\n\t\ttry:\n\t\t\tactual_offset = section_offsets[symbol[1]] + symbol[2]\n\t\texcept Exception as e:\n\t\t\tprint \"Error while creating metadata for function \" + symbol[0] \n\t\t\tprint e\n\t\t\texit(-1)\n\t\tmetadata_file.write(symbol[0] + \"\\t\" + str(actual_offset) + \"\\t\" + str(symbol[3]) + \"\\n\")\n\t\n\n\ndef main():\n\tif len(sys.argv) < 4:\n\t\tprint \"Usage: \" + sys.argv[0] + \" <input filename> <output binary filename> <output metadata filename>\"\n\t\texit(-1)\n\tinput_filename = sys.argv[1]\n\toutput_filename = sys.argv[2]\n\toutput_metadata_filename = sys.argv[3]\n\n\tsection_list, section_vma = extract_sections(input_filename)\n\tsymbol_list = extract_symbols(input_filename, section_vma)\n\tsection_offsets = create_bin(input_filename, output_filename, section_list)\n\tcreate_metadata(output_metadata_filename, symbol_list, section_offsets)\n\tprint \"DONE\"\n\nif __name__ == \"__main__\":\n\tmain()\n", "id": "2294005", "language": "Python", "matching_score": 0.16568756103515625, "max_stars_count": 105, "path": "data_collection/static/extract.py" }, { "content": "import collections\nimport mysql.connector\nimport struct\nimport sys\nfrom mysql.connector import errorcode\nimport random\nimport re\nimport os\nimport tempfile\nfrom typing import Dict, FrozenSet, Optional, Tuple, Union\n\n#mysql specific functions\ndef create_connection(database=None, user=None, password=None, port=None):\n args = {}\n\n option_files = list(filter(os.path.exists, map(os.path.abspath, map(os.path.expanduser, [\n '/etc/my.cnf',\n '~/.my.cnf',\n ]))))\n\n if option_files:\n args['option_files'] = option_files\n if database:\n args['database'] = database\n if user:\n args['user'] = user\n if password:\n args['password'] = password\n if port:\n args['port'] = port\n\n cnx = None\n try:\n cnx = mysql.connector.connect(**args)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n print(\"Something is wrong with your user name or password\")\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n print(\"Database does not exist\")\n else:\n print(err)\n\n return cnx\n\ndef get_mysql_config(filename):\n\n config = dict()\n with open(filename,'r') as f:\n for line in f:\n found = re.search('([a-zA-Z\\-]+) *= *\\\"*([a-zA-Z0-9#\\./]+)\\\"*', line)\n if found:\n config[found.group(1)] = found.group(2)\n return config\n\n\ndef create_connection_from_config(config_file, database=None):\n\n config = get_mysql_config(config_file)\n cnx = create_connection(user=config['user'],password=config['password'],port=config['port'],database=database)\n return cnx\n\ndef execute_many(cnx, sql, values):\n cur = cnx.cursor(buffered=True)\n cur.executemany(sql, values)\n\n\ndef execute_query(cnx, sql, fetch, multi=False):\n cur = cnx.cursor(buffered=True)\n cur.execute(sql,multi)\n if fetch:\n return cur.fetchall()\n else:\n return None\n\n#data reading function\ndef get_data(cnx, format, cols, limit=None):\n try:\n cur = cnx.cursor(buffered=True)\n\n #code column is mandatory\n columns = 'code_token'\n for col in cols:\n columns += ',' + col\n columns += ''\n\n sql = 'SELECT ' + columns + ' FROM code'\n if limit is not None:\n sql += ' LIMIT {}'.format(limit)\n\n print sql\n data = list()\n cur.execute(sql)\n print cur.rowcount\n row = cur.fetchone()\n while row != None:\n item = list()\n code = list()\n if format == 'text':\n for value in row[0].split(','):\n if value != '':\n code.append(int(value))\n elif format == 'bin':\n if len(row[0]) % 2 != 0:\n row = cur.fetchone()\n continue\n for i in range(0,len(row[0]),2):\n slice = row[0][i:i+2]\n convert = struct.unpack('h',slice)\n code.append(int(convert[0]))\n\n item.append(code)\n for i in range(len(cols)):\n item.append(row[i + 1])\n data.append(item)\n row = cur.fetchone()\n except Exception as e:\n print e\n else:\n return data\n\n\n#dynamorio specific encoding details - tokenizing\ndef get_opcode_opnd_dict(opcode_start, opnd_start):\n sym_dict = dict()\n\n filename = os.environ['ITHEMAL_HOME'] + '/common/inputs/encoding.h'\n\n with open(filename,'r') as f:\n opcode_num = opcode_start\n opnd_num = opnd_start\n for line in f:\n opcode_re = re.search('/\\*.*\\*/.*OP_([a-zA-Z_0-9]+),.*', line)\n if opcode_re != None:\n sym_dict[opcode_num] = opcode_re.group(1)\n opcode_num = opcode_num + 1\n opnd_re = re.search('.*DR_([A-Za-z_0-9]+),.*', line)\n if opnd_re != None:\n sym_dict[opnd_num] = opnd_re.group(1)\n opnd_num = opnd_num + 1\n f.close()\n\n return sym_dict\n\ndef read_offsets():\n offsets_filename = os.environ['ITHEMAL_HOME'] + '/common/inputs/offsets.txt'\n offsets = list()\n with open(offsets_filename,'r') as f:\n for line in f:\n for value in line.split(','):\n offsets.append(int(value))\n f.close()\n assert len(offsets) == 5\n return offsets\n\ndef get_sym_dict():\n # type: Tuple[Dict[int, str], int]\n\n offsets = read_offsets()\n sym_dict = get_opcode_opnd_dict(opcode_start = offsets[0],opnd_start = offsets[1])\n\n sym_dict[offsets[2]] = 'int_immed'\n sym_dict[offsets[3]] = 'float_immed'\n\n return sym_dict, offsets[4]\n\n_REGISTER_ALIASES = (\n {'REG_RAX', 'REG_EAX', 'REG_AX', 'REG_AH', 'REG_AL'},\n {'REG_RBX', 'REG_EBX', 'REG_BX', 'REG_BH', 'REG_BL'},\n {'REG_RCX', 'REG_ECX', 'REG_CX', 'REG_CH', 'REG_CL'},\n {'REG_RDX', 'REG_EDX', 'REG_DX', 'REG_DH', 'REG_DL'},\n {'REG_RSP', 'REG_ESP', 'REG_SP'},\n {'REG_RBP', 'REG_EBP', 'REG_BP'},\n {'REG_RSI', 'REG_ESI', 'REG_SI'},\n {'REG_RDI', 'REG_EDI', 'REG_DI'},\n {'REG_R8', 'REG_R8D', 'REG_R8W', 'REG_8L'},\n {'REG_R9', 'REG_R9D', 'REG_R9W', 'REG_9L'},\n {'REG_R10', 'REG_R10D', 'REG_R10W', 'REG_10L'},\n {'REG_R11', 'REG_R11D', 'REG_R11W', 'REG_11L'},\n {'REG_R12', 'REG_R12D', 'REG_R12W', 'REG_12L'},\n {'REG_R13', 'REG_R13D', 'REG_R13W', 'REG_13L'},\n {'REG_R14', 'REG_R14D', 'REG_R14W', 'REG_14L'},\n {'REG_R15', 'REG_R15D', 'REG_R15W', 'REG_15L'},\n)\n_REGISTER_ALIAS_MAP = {reg: regset for regset in _REGISTER_ALIASES for reg in regset}\ndef _get_canonical_operand(op):\n return _REGISTER_ALIAS_MAP.get(_global_sym_dict.get(op, None), op)\n\n_REGISTER_CLASSES = tuple(map(frozenset, (\n {'REG_RAX', 'REG_RCX', 'REG_RDX', 'REG_RBX', 'REG_RSP', 'REG_RBP', 'REG_RSI',\n 'REG_RDI', 'REG_R8', 'REG_R9', 'REG_R10', 'REG_R11', 'REG_R12', 'REG_R13',\n 'REG_R14', 'REG_R15'},\n {'REG_EAX', 'REG_ECX', 'REG_EDX', 'REG_EBX', 'REG_ESP', 'REG_EBP', 'REG_ESI',\n 'REG_EDI', 'REG_R8D', 'REG_R9D', 'REG_R10D', 'REG_R11D', 'REG_R12D', 'REG_R13D',\n 'REG_R14D', 'REG_R15D'},\n {'REG_AX', 'REG_CX', 'REG_DX', 'REG_BX', 'REG_SP', 'REG_BP', 'REG_SI',\n 'REG_DI', 'REG_R8W', 'REG_R9W', 'REG_R10W', 'REG_R11W', 'REG_R12W', 'REG_R13W',\n 'REG_R14W', 'REG_R15W'},\n {'REG_AL', 'REG_CL', 'REG_DL', 'REG_BL', 'REG_AH', 'REG_CH', 'REG_DH',\n 'REG_BH', 'REG_R8L', 'REG_R9L', 'REG_R10L', 'REG_R11L', 'REG_R12L', 'REG_R13L',\n 'REG_R14L', 'REG_R15L'},\n)))\n\ndef get_register_class(reg):\n # type: Union[str, int] -> Optional[FrozenSet[str]]\n\n if isinstance(reg, int):\n reg = _global_sym_dict.get(reg)\n\n for cls in _REGISTER_CLASSES:\n if reg in cls:\n return cls\n\n return None\n\ndef get_name(val,sym_dict,mem_offset):\n if val >= mem_offset:\n return 'mem_' + str(val - mem_offset)\n elif val < 0:\n return 'delim'\n else:\n return sym_dict[val]\n\ndef get_percentage_error(predicted, actual):\n\n errors = []\n for pitem, aitem in zip(predicted, actual):\n\n if type(pitem) == list:\n pitem = pitem[-1]\n aitem = aitem[-1]\n\n error = abs(float(pitem) - float(aitem)) * 100.0 / float(aitem)\n\n errors.append(error)\n\n return errors\n\n_global_sym_dict, _global_mem_start = get_sym_dict()\n_global_sym_dict_rev = {v:k for (k, v) in _global_sym_dict.items()}\n\n#calculating static properties of instructions and basic blocks\nclass Instruction:\n\n def __init__(self, opcode, srcs, dsts, num):\n self.opcode = opcode\n self.num = num\n self.srcs = srcs\n self.dsts = dsts\n self.parents = []\n self.children = []\n\n #for lstms\n self.lstm = None\n self.hidden = None\n self.tokens = None\n\n def clone(self):\n return Instruction(self.opcode, self.srcs[:], self.dsts[:], self.num)\n\n def print_instr(self):\n print self.num, self.opcode, self.srcs, self.dsts\n num_parents = [parent.num for parent in self.parents]\n num_children = [child.num for child in self.children]\n print num_parents, num_children\n\n def __str__(self):\n return self.intel\n\n def has_mem(self):\n return any(operand >= _global_mem_start for operand in self.srcs + self.dsts)\n\n def is_idempotent(self):\n return len(set(self.srcs) & set(self.dsts)) == 0\n\nclass InstructionReplacer(object):\n def __init__(self, regexp_intel, replacement_intel,\n replacement_srcs, replacement_dsts):\n self.regexp_intel = re.compile(regexp_intel)\n self.replacement_intel = replacement_intel\n self.replacement_srcs = replacement_srcs\n self.replacement_dsts = replacement_dsts\n\n def replace(self, instr, unused_registers):\n if instr.has_mem():\n return None\n\n match = self.regexp_intel.match(instr.intel)\n if match is None:\n return None\n\n unused_set = None\n for operand in instr.dsts:\n op_cls = get_register_class(operand)\n if op_cls is None:\n continue\n\n m_unused_set = op_cls & unused_registers\n if unused_set is not None:\n assert unused_set == m_unused_set, 'Did not expect mix of operand types'\n\n unused_set = m_unused_set\n\n if not unused_set:\n return None\n\n unused = list(unused_set)\n unused_intel = list(map(lambda x: x[x.rindex('_')+1:].lower(), unused))\n unused_token = list(map(_global_sym_dict_rev.get, unused))\n\n new_instr = instr.clone()\n new_instr.intel = self.replacement_intel.format(\n unused=unused_intel,\n **match.groupdict()\n )\n\n new_instr.srcs = list(map(int, map(lambda x: x.format(\n srcs=instr.srcs,\n dsts=instr.dsts,\n unused=unused_token,\n ), self.replacement_srcs)))\n\n new_instr.dsts = list(map(int, map(lambda x: x.format(\n srcs=instr.srcs,\n dsts=instr.dsts,\n unused=unused_token,\n ), self.replacement_dsts)))\n\n return new_instr\n\ndef _two_way_replacer(opcode):\n return InstructionReplacer(\n r'{}\\s+(?P<op1>\\w+),\\s+(?P<op2>\\w+)'.format(opcode),\n r'{} {{unused[0]}}, {{op2}}'.format(opcode),\n ['{srcs[0]}', '{unused[0]}'],\n ['{unused[0]}'],\n )\n\ndef _three_way_replacer(opcode):\n return InstructionReplacer(\n r'{}\\s+(?P<op1>\\w+),\\s+(?P<op2>\\w+),\\s+(?P<op3>\\w+)'.format(opcode),\n r'{} {{unused[0]}}, {{op2}}, {{op3}}'.format(opcode),\n ['{srcs[0]}', '{srcs[1]}'],\n ['{unused[0]}'],\n )\n\nreplacers = (\n _two_way_replacer('add'), _two_way_replacer('sub'), _two_way_replacer('and'),\n _two_way_replacer('or'), _two_way_replacer('xor'), _two_way_replacer('shl'),\n _two_way_replacer('shr'), _two_way_replacer('sar'),\n _three_way_replacer('imul'),\n)\n\n\nclass BasicBlock:\n\n def __init__(self, instrs):\n self.instrs = instrs\n self.span_values = [0] * len(self.instrs)\n\n def num_instrs(self):\n return len(self.instrs)\n\n def num_span(self, instr_cost):\n\n for i in range(len(self.instrs)):\n self.span_rec(i, instr_cost)\n\n if len(self.instrs) > 0:\n return max(self.span_values)\n else:\n return 0\n\n def print_block(self):\n for instr in self.instrs:\n instr.print_instr()\n\n\n def span_rec(self, n, instr_cost):\n\n if self.span_values[n] != 0:\n return self.span_values[n]\n\n src_instr = self.instrs[n]\n span = 0\n dsts = []\n for dst in src_instr.dsts:\n dsts.append(dst)\n\n for i in range(n + 1, len(self.instrs)):\n dst_instr = self.instrs[i]\n for dst in dsts:\n found = False\n for src in dst_instr.srcs:\n if(dst == src):\n ret = self.span_rec(i, instr_cost)\n if span < ret:\n span = ret\n found = True\n break\n if found:\n break\n dsts = list(set(dsts) - set(dst_instr.dsts)) #remove dead destinations\n\n if src_instr.opcode in instr_cost:\n cost = instr_cost[src_instr.opcode]\n else:\n src_instr.print_instr()\n cost = 1\n\n #assert cost == 1\n\n self.span_values[n] = span + cost\n return self.span_values[n]\n\n\n def find_uses(self, n):\n\n instr = self.instrs[n]\n for dst in map(_get_canonical_operand, instr.dsts):\n for i in range(n + 1, len(self.instrs), 1):\n dst_instr = self.instrs[i]\n if dst in map(_get_canonical_operand, dst_instr.srcs):\n if not dst_instr in instr.children:\n instr.children.append(dst_instr)\n if dst in map(_get_canonical_operand, dst_instr.dsts): #value becomes dead here\n break\n\n def find_defs(self, n):\n\n instr = self.instrs[n]\n for src in map(_get_canonical_operand, instr.srcs):\n for i in range(n - 1, -1, -1):\n src_instr = self.instrs[i]\n if src in map(_get_canonical_operand, src_instr.dsts):\n if not src_instr in instr.parents:\n instr.parents.append(src_instr)\n break\n\n def create_dependencies(self):\n\n for n in range(len(self.instrs)):\n self.find_defs(n)\n self.find_uses(n)\n\n def get_dfs(self):\n dfs = collections.defaultdict(set)\n\n for instr in self.instrs[::-1]:\n frontier = {instr}\n while frontier:\n n = frontier.pop()\n if n in dfs:\n dfs[instr] |= dfs[n]\n continue\n\n for c in n.children:\n if c in dfs[instr] or c in frontier:\n continue\n frontier.add(c)\n dfs[instr].add(n)\n\n return dfs\n\n def transitive_closure(self):\n dfs = self.get_dfs()\n for instr in self.instrs:\n transitive_children = set(n for c in instr.children for n in dfs[c])\n instr.children = list(transitive_children)\n for child in instr.children:\n if instr not in child.parents:\n child.parents.append(instr)\n\n def transitive_reduction(self):\n dfs = self.get_dfs()\n for instr in self.instrs:\n\n transitively_reachable_children = set()\n for child in instr.children:\n transitively_reachable_children |= dfs[child] - {child}\n\n for child in transitively_reachable_children:\n if child in instr.children:\n instr.children.remove(child)\n child.parents.remove(instr)\n\n def random_forward_edges(self, frequency):\n '''Add forward-facing edges at random to the instruction graph.\n\n There are n^2/2 -1 considered edges (where n is the number of\n instructions), so to add 5 edges in expectation, one would\n provide frequency=5/(n^2/2-1)\n\n '''\n n_edges_added = 0\n for head_idx, head_instr in enumerate(self.instrs[:-1]):\n for tail_instr in self.instrs[head_idx+1:]:\n if random.random() < frequency:\n if tail_instr not in head_instr.children:\n head_instr.children.append(tail_instr)\n tail_instr.parents.append(head_instr)\n n_edges_added += 1\n\n return n_edges_added\n\n def remove_edges(self):\n for instr in self.instrs:\n instr.parents = []\n instr.children = []\n\n def linearize_edges(self):\n for fst, snd in zip(self.instrs, self.instrs[1:]):\n if snd not in fst.children:\n fst.children.append(snd)\n if fst not in snd.parents:\n snd.parents.append(fst)\n\n def find_roots(self):\n roots = []\n for instr in self.instrs:\n if len(instr.parents) == 0:\n roots.append(instr)\n return roots\n\n def find_leaves(self):\n leaves = []\n for instr in self.instrs:\n if len(instr.children) == 0:\n leaves.append(instr)\n\n return leaves\n\n def gen_reorderings(self, single_perm=False):\n self.create_dependencies()\n\n def _gen_reorderings(prefix, schedulable_instructions, mem_q):\n mem_q = mem_q[:]\n has_pending_mem = any(instr.has_mem() for instr in schedulable_instructions)\n has_activated_mem = mem_q and all(parent in prefix for parent in mem_q[0].parents)\n\n if has_activated_mem and not has_pending_mem:\n schedulable_instructions.append(mem_q.pop(0))\n\n if len(schedulable_instructions) == 0:\n return [prefix]\n\n reorderings = []\n def process_index(i):\n instr = schedulable_instructions[i]\n # pop this instruction\n rest_scheduleable_instructions = schedulable_instructions[:i] + schedulable_instructions[i+1:]\n rest_prefix = prefix + [instr]\n\n # add all activated children\n for child in instr.children:\n if all(parent in rest_prefix for parent in child.parents):\n if not child.has_mem():\n rest_scheduleable_instructions.append(child)\n\n reorderings.extend(_gen_reorderings(rest_prefix, rest_scheduleable_instructions, mem_q))\n\n if single_perm:\n process_index(random.randrange(len(schedulable_instructions)))\n else:\n for i in range(len(schedulable_instructions)):\n process_index(i)\n\n return reorderings\n\n return _gen_reorderings(\n [],\n [i for i in self.find_roots() if not i.has_mem()],\n [i for i in self.instrs if i.has_mem()],\n )\n\n def sample_reordering(self):\n # TODO: THIS VIOLATES FALSE DEPENDENCIES\n prefix = []\n enabled = []\n enabled_mem = []\n some_mem_enabled = False\n\n def is_enabled(i):\n return all(p in prefix for p in instr.parents)\n\n for instr in self.instrs:\n if len(instr.parents) == 0:\n if instr.has_mem():\n if not some_mem_enabled:\n enabled.append(instr)\n some_mem_enabled = True\n else:\n enabled_mem.append(instr)\n else:\n enabled.append(instr)\n\n while enabled or enabled_mem:\n to_schedule = random.randrange(len(enabled))\n instr = enabled.pop(to_schedule)\n prefix.append(instr)\n if instr.has_mem():\n if enabled_mem:\n enabled.append(enabled_mem.pop(0))\n else:\n some_mem_enabled = False\n for ch in instr.children:\n if is_enabled(ch):\n if ch.has_mem():\n if some_mem_enabled:\n enabled_mem.append(ch)\n else:\n enabled.append(ch)\n some_mem_enabled = True\n else:\n enabled.append(ch)\n\n return prefix\n\n def paths_of_block(self):\n # type: () -> List[List[ut.Instruction]]\n def paths_of_instr(i, parents):\n # type: (ut.Instruction, List[ut.Instruction]) -> List[List[ut.Instruction]]\n new_parents = parents + [i]\n if i.children:\n return sum((paths_of_instr(c, new_parents) for c in i.children), [])\n else:\n return [new_parents]\n\n return sum((paths_of_instr(i, []) for i in self.find_roots()), [])\n\n def draw(self, to_file=False, file_name=None, view=True):\n if to_file and not file_name:\n file_name = tempfile.NamedTemporaryFile(suffix='.gv').name\n\n from graphviz import Digraph\n\n dot = Digraph()\n for instr in self.instrs:\n dot.node(str(id(instr)), str(instr))\n for child in instr.children:\n dot.edge(str(id(instr)), str(id(child)))\n\n if to_file:\n dot.render(file_name, view=view)\n return dot, file_name\n else:\n return dot\n\n def has_mem(self):\n return any(map(Instruction.has_mem, self.instrs))\n\n def has_no_dependencies(self):\n return all(len(i.parents) == 0 and len(i.children) == 0 for i in self.instrs)\n\n def has_linear_dependencies(self):\n if len(self.instrs) <= 1:\n return True\n\n return (\n len(self.instrs[0].children) == 1 and\n all(len(i.parents) == 1 and len(i.children) == 1 for i in self.instrs[1:-1]) and\n len(self.instrs[-1].parents) == 1\n )\n\ndef generate_duplicates(instrs, max_n_dups):\n for idx in range(len(instrs) - 1, -1, -1):\n instr = instrs[idx]\n unused_regs = unused_registers_at_point(instrs, idx)\n for replacer in replacers:\n res = replacer.replace(instr, unused_regs)\n if res is None:\n continue\n\n augmentations = []\n new_instrs = instrs[:]\n for i in range(max_n_dups):\n unused_regs = unused_registers_at_point(new_instrs, idx)\n aug_instr = replacer.replace(instr, unused_regs)\n if not aug_instr:\n break\n new_instrs.insert(idx, aug_instr)\n augmentations.append(new_instrs[:])\n\n return augmentations\n\n return []\n\n\ndef unused_registers_at_point(instrs, idx):\n if idx < 0 or idx > len(instrs):\n raise ValueError('{} is not a valid index'.format(idx))\n\n unused_regs = set()\n for cls in _REGISTER_CLASSES:\n unused_regs |= cls\n for instr in instrs[idx:]:\n for src in instr.srcs:\n unused_regs -= {_global_sym_dict.get(src)}\n for dst in instr.dsts:\n unused_regs -= {_global_sym_dict.get(dst)}\n\n return unused_regs\n\n\ndef create_basicblock(tokens):\n\n opcode = None\n srcs = []\n dsts = []\n mode = 0\n\n mode = 0\n instrs = []\n for item in tokens:\n if item == -1:\n mode += 1\n if mode > 2:\n mode = 0\n instr = Instruction(opcode,srcs,dsts,len(instrs))\n instrs.append(instr)\n opcode = None\n srcs = []\n dsts = []\n continue\n else:\n if mode == 0:\n opcode = item\n elif mode == 1:\n srcs.append(item)\n else:\n dsts.append(item)\n\n block = BasicBlock(instrs)\n return block\n\n\nif __name__ == \"__main__\":\n cnx = create_connection()\n cur = cnx.cursor(buffered = True)\n\n sql = 'SELECT code_id, code_token from code where program = \\'2mm\\' and rel_addr = 4136'\n\n cur.execute(sql)\n\n rows = cur.fetchall()\n\n sym_dict, mem_start = get_sym_dict()\n\n for row in rows:\n print row[0]\n code = []\n for val in row[1].split(','):\n if val != '':\n code.append(get_name(int(val),sym_dict,mem_start))\n print code\n\n\n sql = 'SELECT time from times where code_id = ' + str(rows[0][0])\n cur.execute(sql)\n rows = cur.fetchall()\n\n times = [int(t[0]) for t in rows]\n print sorted(times)\n", "id": "12171759", "language": "Python", "matching_score": 3.9259300231933594, "max_stars_count": 105, "path": "common/common_libs/utilities.py" }, { "content": "#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport sys\nimport os\nsys.path.append(os.path.join(os.environ['ITHEMAL_HOME'], 'learning', 'pytorch'))\n\n# this script doesn't need matplotlib; this line fixes interactive use of this script.\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport argparse\nimport common_libs.utilities as ut\nimport data_cost as dt\nimport os\nimport time\nimport torch\nfrom tqdm import tqdm\nfrom typing import Any, Dict, Iterable, List, Optional, Set, Sequence\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle # type: ignore\n\n_DIRNAME = os.path.abspath(os.path.dirname(__file__))\n_DATA_DIR = os.path.join(_DIRNAME, os.pardir, 'inputs', 'augmentations')\n_DEFAULT_DUP_TEMPLATE = os.path.join(_DIRNAME, 'eu_dup_template.json')\n\n_time_str = None # type: Optional[str]\ndef time_str(): # type: () -> str\n global _time_str\n if _time_str is None:\n _time_str = time.strftime('%Y-%m-%d.%H-%M-%S')\n return _time_str\n\ndef save_object(obj, name): # type: (Any, str) -> None\n with open(os.path.join(_DATA_DIR, '{}_{}.pkl'.format(name, time_str())), 'wb') as f:\n pickle.dump(obj, f)\n\ndef execute_sql(commands): # type: (List[str]) -> None\n cnx = ut.create_connection()\n for com in commands:\n ut.execute_query(cnx, com, False)\n cnx.commit()\n\n\ndef read_dataset(data_file, embedding_file): # type: (str, str) -> dt.DataInstructionEmbedding\n data = dt.DataInstructionEmbedding()\n\n data.raw_data = torch.load(data_file)\n data.set_embedding(embedding_file)\n data.read_meta_data()\n data.prepare_data()\n data.generate_datasets()\n return data\n\n\nAugmentationMap = Dict[dt.DataItem, Iterable[Sequence[ut.Instruction]]]\n\ndef gen_permutations(\n full_data,\n desired_n_perms=None,\n max_block_size=None,\n min_perms_per_block=None,\n max_perms_per_block=None\n):\n # type: (dt.DataInstructionEmbedding, Optional[int], Optional[int], Optional[int], Optional[int]) -> AugmentationMap\n data = set(full_data.data)\n perms = {} # type: AugmentationMap\n\n n_perms_gen = 0\n\n pbar = tqdm(total=(desired_n_perms or len(data)))\n while data and (desired_n_perms is None or n_perms_gen < desired_n_perms):\n datum = data.pop()\n block = datum.block\n if max_block_size and len(block.instrs) > max_block_size:\n continue\n if max_perms_per_block:\n reorderings = set() # type: Set[Sequence[ut.Instruction]]\n n_tries = 0\n while len(reorderings) < max_perms_per_block and n_tries < max_perms_per_block * 2:\n m_reorderings = block.gen_reorderings(single_perm=True)\n assert len(m_reorderings) == 1\n reorderings.add(tuple(m_reorderings[0]))\n n_tries += 1\n else:\n reorderings = set(map(tuple, block.gen_reorderings()))\n if min_perms_per_block and len(reorderings) < min_perms_per_block:\n continue\n perms[datum] = reorderings\n n_perms_gen += len(reorderings)\n pbar.update(len(reorderings) if desired_n_perms else 1)\n pbar.close()\n\n return perms\n\ndef gen_duplicated_instructions(full_data, max_dups):\n # type: (dt.DataInstructionEmbedding, int) -> AugmentationMap\n\n data = set(full_data.data)\n perms = {} # type: AugmentationMap\n\n pbar = tqdm(total=len(data))\n while data:\n datum = data.pop()\n block = datum.block\n reorderings = ut.generate_duplicates(block.instrs, max_dups)\n if reorderings:\n perms[datum] = reorderings\n pbar.update(1)\n pbar.close()\n\n return perms\n\ndef gen_sql_commands_of_augs(augs, table_name): # type: (AugmentationMap, str) -> List[str]\n sql_commands = []\n sql_commands.append('''CREATE TABLE {} (\n aug_id int(32) NOT NULL AUTO_INCREMENT,\n code_id int(32) NOT NULL,\n code_intel TEXT NOT NULL,\n code_token TEXT NOT NULL,\n PRIMARY KEY (aug_id),\n CONSTRAINT {}_idfk_1 FOREIGN KEY (code_id) REFERENCES code(code_id)\n );'''.format(table_name, table_name))\n\n def format_insert_command(values): # List[str] -> str\n return 'INSERT INTO {} (code_id, code_intel, code_token) VALUES ({});'.format(\n table_name,\n ','.join(values),\n )\n\n for dataitem in tqdm(augs):\n for aug in augs[dataitem]:\n tokens = []\n for i in aug:\n tokens.append(i.opcode)\n tokens.append(-1)\n tokens.extend(i.srcs)\n tokens.append(-1)\n tokens.extend(i.dsts)\n tokens.append(-1)\n\n values = [\n str(dataitem.code_id),\n \"'{}'\".format('\\n'.join(i.intel for i in aug)),\n \"'{}'\".format(','.join(map(str, tokens))),\n ]\n sql_commands.append(format_insert_command(values))\n\n return sql_commands\n\ndef main(): # type: () -> None\n parser = argparse.ArgumentParser(description='Supplement dataset')\n parser.add_argument('--data', type=str, required=True, help='Block data file to use (e.g. inputs/data/time_skylake.data')\n parser.add_argument('--embedding', type=str, required=True, help='Token embedding file to use (e.g. inputs/embeddings/code_delim.emb)')\n parser.add_argument('--table-name', type=str, required=True, help='Table to write augmentations to (will be freshly created)')\n\n parser.add_argument('--execute-sql', action='store_true', default=False)\n parser.add_argument('--store-sql', action='store_true', default=False)\n parser.add_argument('--optimize-sql', action='store_true', default=False)\n\n subparsers = parser.add_subparsers(dest='command')\n\n perms_parser = subparsers.add_parser('permutations')\n perms_parser.add_argument('--desired-n-perms', default='all')\n perms_parser.add_argument('--max-block-size', type=int, default=None, help='Maximum block size to attempt to generate permutations for. Default none')\n perms_parser.add_argument('--min-perms-per-block', type=int, default=None, help='Minimum number of permutations to include when generating permutations (otherwise throw out block)')\n perms_parser.add_argument('--max-perms-per-block', type=int, default=None, help='Maximum numnber of permutations to include when generating permuations.')\n\n ports_parser = subparsers.add_parser('ports')\n ports_parser.add_argument('--dup-template', type=str, default=_DEFAULT_DUP_TEMPLATE)\n ports_parser.add_argument('--max-dups', type=int, default=10, help='Max number of times to duplicate a given instruction')\n\n args = parser.parse_args()\n\n data = read_dataset(args.data, args.embedding)\n\n if args.command == 'permutations':\n if args.desired_n_perms == 'all':\n desired_n_perms = None\n elif args.desired_n_perms == 'equal':\n desired_n_perms = len(data.data)\n else:\n desired_n_perms = int(args.desired_n_perms)\n\n augs = gen_permutations(\n data,\n desired_n_perms=desired_n_perms,\n max_block_size=args.max_block_size,\n min_perms_per_block=args.min_perms_per_block,\n max_perms_per_block=args.max_perms_per_block,\n )\n else:\n augs = gen_duplicated_instructions(data, args.max_dups)\n\n sql_commands = gen_sql_commands_of_augs(augs, args.table_name)\n\n if args.optimize_sql:\n sql_commands.insert(0, 'SET autocommit=0;')\n sql_commands.insert(1, 'SET unique_checks=0;')\n sql_commands.insert(2, 'SET foreign_key_checks=0;')\n sql_commands.append('COMMIT;')\n sql_commands.append('SET unique_checks=1;')\n sql_commands.append('SET foreign_key_checks=1;')\n sql_commands.append('SET autocommit=1;')\n\n if args.store_sql:\n with open(os.path.join(_DATA_DIR, 'table_{}.sql'.format(time_str())), 'w') as f:\n print('\\n'.join(sql_commands), file=f)\n\n if args.execute_sql:\n execute_sql(sql_commands)\n\nif __name__ == '__main__':\n main()\n", "id": "10925652", "language": "Python", "matching_score": 2.722658157348633, "max_stars_count": 105, "path": "learning/pytorch/data/augmentation.py" }, { "content": "import numpy as np\nimport random\nimport torch.nn as nn\nimport torch.autograd as autograd\nimport torch.optim as optim\nimport torch\nfrom tqdm import tqdm\nfrom .data import Data\nimport matplotlib.pyplot as plt\nimport statistics\nimport pandas as pd\nimport xml.etree.ElementTree as ET\nimport itertools\n\nimport sys\nsys.path.append('..')\n\nimport common_libs.utilities as ut\n\n\nclass DataItem:\n\n def __init__(self, x, y, block, code_id):\n self.x = x\n self.y = y\n self.block = block\n self.code_id = code_id\n\nclass DataInstructionEmbedding(Data):\n\n def __init__(self):\n super(DataInstructionEmbedding, self).__init__()\n self.token_to_hot_idx = {}\n self.hot_idx_to_token = {}\n self.data = []\n\n def dump_dataset_params(self):\n return (self.token_to_hot_idx, self.hot_idx_to_token)\n\n def load_dataset_params(self, params):\n (self.token_to_hot_idx, self.hot_idx_to_token) = params\n\n def prepare_data(self, progress=True, fixed=False):\n def hot_idxify(elem):\n if elem not in self.token_to_hot_idx:\n if fixed:\n # TODO: this would be a good place to implement UNK tokens\n raise ValueError('Ithemal does not yet support UNK tokens!')\n self.token_to_hot_idx[elem] = len(self.token_to_hot_idx)\n self.hot_idx_to_token[self.token_to_hot_idx[elem]] = elem\n return self.token_to_hot_idx[elem]\n\n if progress:\n iterator = tqdm(self.raw_data)\n else:\n iterator = self.raw_data\n\n for (code_id, timing, code_intel, code_xml) in iterator:\n block_root = ET.fromstring(code_xml)\n instrs = []\n raw_instrs = []\n curr_mem = self.mem_start\n for _ in range(1): # repeat for duplicated blocks\n # handle missing or incomplete code_intel\n split_code_intel = itertools.chain((code_intel or '').split('\\n'), itertools.repeat(''))\n for (instr, m_code_intel) in zip(block_root, split_code_intel):\n raw_instr = []\n opcode = int(instr.find('opcode').text)\n raw_instr.extend([opcode, '<SRCS>'])\n srcs = []\n for src in instr.find('srcs'):\n if src.find('mem') is not None:\n raw_instr.append('<MEM>')\n for mem_op in src.find('mem'):\n raw_instr.append(int(mem_op.text))\n srcs.append(int(mem_op.text))\n raw_instr.append('</MEM>')\n srcs.append(curr_mem)\n curr_mem += 1\n else:\n raw_instr.append(int(src.text))\n srcs.append(int(src.text))\n\n raw_instr.append('<DSTS>')\n dsts = []\n for dst in instr.find('dsts'):\n if dst.find('mem') is not None:\n raw_instr.append('<MEM>')\n for mem_op in dst.find('mem'):\n raw_instr.append(int(mem_op.text))\n # operands used to calculate dst mem ops are sources\n srcs.append(int(mem_op.text))\n raw_instr.append('</MEM>')\n dsts.append(curr_mem)\n curr_mem += 1\n else:\n raw_instr.append(int(dst.text))\n dsts.append(int(dst.text))\n\n raw_instr.append('<END>')\n raw_instrs.append(list(map(hot_idxify, raw_instr)))\n instrs.append(ut.Instruction(opcode, srcs, dsts, len(instrs)))\n instrs[-1].intel = m_code_intel\n\n block = ut.BasicBlock(instrs)\n block.create_dependencies()\n datum = DataItem(raw_instrs, timing, block, code_id)\n self.data.append(datum)\n\ndef load_dataset(data_savefile=None, arch=None, format='text'):\n data = DataInstructionEmbedding()\n\n if data_savefile is None:\n if arch is None:\n raise ValueError('Must provide one of data_savefile or arch')\n\n cnx = ut.create_connection()\n data.extract_data(cnx, format, ['code_id','code_intel'])\n data.get_timing_data(cnx, arch)\n else:\n data.raw_data = torch.load(data_savefile)\n\n data.read_meta_data()\n data.prepare_data()\n data.generate_datasets()\n\n return data\n", "id": "697860", "language": "Python", "matching_score": 2.680016279220581, "max_stars_count": 105, "path": "learning/pytorch/data/data_cost.py" }, { "content": "#main data file\n\nimport numpy as np\nimport common_libs.utilities as ut\nimport random\nimport torch.nn as nn\nimport torch.autograd as autograd\nimport torch.optim as optim\nimport torch\nimport matplotlib.pyplot as plt\n\n\nclass Data(object):\n\n \"\"\"\n Main data object which extracts data from a database, partition it and gives out batches.\n\n \"\"\"\n\n\n def __init__(self): #copy constructor\n self.percentage = 80\n self.costs = dict()\n\n def extract_data(self, cnx, format, fields):\n print 'extracting data'\n self.raw_data = ut.get_data(cnx, format, fields)\n self.fields = fields\n\n def read_meta_data(self):\n\n self.sym_dict,_ = ut.get_sym_dict()\n self.offsets = ut.read_offsets()\n\n self.opcode_start = self.offsets[0]\n self.operand_start = self.offsets[1]\n self.int_immed = self.offsets[2]\n self.float_immed = self.offsets[3]\n self.mem_start = self.offsets[4]\n\n for i in range(self.opcode_start, self.mem_start):\n self.costs[i] = 1\n\n\n def generate_costdict(self, maxnum):\n for i in range(self.opcode_start, self.mem_start):\n self.costs[i] = np.random.randint(1,maxnum)\n\n def prepare_data(self):\n pass\n\n def generate_datasets(self):\n size = len(self.data)\n split = (size * self.percentage) // 100\n self.train = self.data[:split]\n self.test = self.data[(split + 1):]\n print 'train ' + str(len(self.train)) + ' test ' + str(len(self.test))\n\n\n def generate_batch(self, batch_size, partition=None):\n if partition is None:\n partition = (0, len(self.train))\n\n # TODO: this seems like it would be expensive for a large data set\n (start, end) = partition\n population = range(start, end)\n selected = random.sample(population,batch_size)\n\n self.batch = []\n for index in selected:\n self.batch.append(self.train[index])\n\n def plot_histogram(self, data):\n\n ys = list()\n for item in data:\n ys.append(item.y)\n\n plt.hist(ys, min(max(ys), 1000))\n plt.show()\n", "id": "8405397", "language": "Python", "matching_score": 1.2626229524612427, "max_stars_count": 105, "path": "learning/pytorch/data/data.py" }, { "content": "import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport sys\nimport utilities as ut\nimport numpy as np\nimport random\n\n\n\ndef plot_histogram(filename, values, maxvalue, xlabel, ylabel, title):\n plt.figure()\n plt.hist(self.values, bins=maxvalue, range=(0,maxvalue), edgecolor='black', linewidth=0.3)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.savefig(filename, bbox_inches='tight')\n plt.close()\n\ndef plot_line_graphs(filename, losses, legend, ylabel='loss', xlabel='batch', title='Learning Curves', xmin = None, xmax = None, ymin = None, ymax = None):\n plt.figure()\n for loss, label in zip(losses, legend):\n y = loss\n x = np.arange(len(loss))\n h = plt.plot(x,y, '.-', linewidth=1, markersize=2, label=label)\n\n plt.legend()\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n plt.title(title)\n\n cur_xmin, cur_xmax = plt.xlim()\n cur_ymin, cur_ymax = plt.ylim()\n\n if xmin != None and cur_xmin < xmin:\n plt.xlim(xmin = xmin)\n if ymin != None and cur_ymin < ymin:\n plt.ylim(ymin = ymin)\n if xmax != None and cur_xmax > xmax:\n plt.xlim(xmax = xmax)\n if ymax != None and cur_ymax > ymax:\n plt.ylim(ymax = ymax)\n plt.savefig(filename)\n plt.close()\n\n\n\nif __name__ == '__main__':\n\n ys = []\n labels = ['graph1', 'graph2']\n\n for _ in range(2):\n y = []\n for i in range(random.randint(1,100)):\n y.append(random.randint(0,100))\n ys.append(y)\n\n plot_line_graphs('test.png',ys,labels, xmin=0, xmax=50, ymin=0, ymax=40)\n\n\n\n\n\n\n", "id": "8069515", "language": "Python", "matching_score": 1.016080617904663, "max_stars_count": 105, "path": "common/common_libs/graphs.py" }, { "content": "import torch\nimport torch.nn as nn\nimport sys\nsys.path.append('..')\nimport common_libs.utilities as ut\nimport torch.autograd as autograd\nimport torch.optim as optim\nimport math\nimport numpy as np\n\n\"\"\"\nmse loss normalized\n\"\"\"\ndef mse_loss(output,target):\n\n\n loss_fn = nn.MSELoss(reduction = 'none')\n loss = torch.sqrt(loss_fn(output, target)) / (target + 1e-3)\n loss = torch.mean(loss)\n\n return [loss]\n\n\"\"\"\nmse loss + margin rank loss\n\"\"\"\n\ndef mse_loss_plus_rank_loss(output,target):\n\n cost = output\n target_cost = target\n\n if output.size()[0] > 1:\n inter = output[:-1]\n inter_1 = output[1:]\n else: #emulate no rank loss\n inter = torch.ones(1)\n inter_1 = 2 * torch.ones(1)\n\n target_rank = torch.ones(inter.size())\n\n loss_mse = nn.MSELoss(reduction = 'none')\n loss1 = torch.sqrt(loss_mse(cost, target_cost)) / (target_cost + 1e-3)\n loss1 = torch.mean(loss1)\n\n loss_rank = nn.MarginRankingLoss()\n loss2 = loss_rank(inter_1, inter, target_rank)\n\n return [loss1, loss2]\n\n\n\"\"\"\nsoftmax cross entropy loss with weights\n\"\"\"\ndef cross_entropy_loss_with_weights(output,target):\n\n outputs = nn.functional.softmax(output,0)\n\n nz = torch.nonzero(target)[0][0]\n\n mean = nz\n std = nz * 0.05\n weight_points = range(target.shape[0])\n\n normal = torch.distributions.normal.Normal(mean, std)\n weight_values = torch.exp(normal.log_prob(torch.FloatTensor(weight_points)))\n weight_values = weight_values / torch.sum(weight_values)\n\n target = weight_values * 100 #just scaling the weights\n\n loss = nn.functional.binary_cross_entropy(outputs,target)\n return [loss]\n\n\n\"\"\"\nsoftmax cross entropy loss for classification\n\"\"\"\ndef cross_entropy_loss(output,target):\n\n outputs = nn.functional.softmax(output,0)\n\n loss = nn.functional.binary_cross_entropy(outputs,target)\n return [loss]\n\n", "id": "10995473", "language": "Python", "matching_score": 1.0430412292480469, "max_stars_count": 0, "path": "learning/pytorch/models/losses.py" }, { "content": "import setuptools\n\nsetuptools.setup(\n name=\"common_libs\",\n version=\"0.0.1\",\n author=\"<NAME>\",\n author_email=\"<EMAIL>\",\n description=\"common libs used by ithemal\",\n packages=['common_libs']\n)\n", "id": "9629785", "language": "Python", "matching_score": 0.13449311256408691, "max_stars_count": 105, "path": "common/setup.py" } ]
2.044098
Gender-Analysis-of-STEM
[ { "content": "import twint\n\nc = twint.Config()\nc.Since = \"2021-02-01\"\nc.Until = \"2021-03-14\"\nc.Search = '(mulher OR mulheres OR garotinha OR garotas OR menina OR garotas) AND \\\n ((engenheira OR cientista OR arquiteta OR programação OR biologa) OR \\\n (engenharia OR ciência OR stem)) OR \\\n (química OR bioquimica OR astrofísica OR astronomía OR eletrônica OR \\\n mecânica OR computação)'\nc.Lang = \"pt\"\nc.Store_csv = True\nc.Output = \"./Query3_2021_pt.csv\"\ntwint.run.Search(c)\n", "id": "12173049", "language": "Python", "matching_score": 4.063791275024414, "max_stars_count": 0, "path": "queries/portuguese/query3_pt.py" }, { "content": "import twint\n\nc = twint.Config()\nc.Since = \"2021-02-01\"\nc.Until = \"2021-03-14\"\nc.Search = '(mulher OR mulheres OR garotinha OR garotas OR menina OR garotas) AND \\\n ((engenheira OR cientista OR arquiteta OR programação OR biologa) OR \\\n (engenharia OR ciência OR stem)) OR \\\n (tecnología OR software OR aeronáutica OR aeronaval OR \\\n OR metais OR mining OR agronomia OR automóvel)'\nc.Lang = \"pt\"\nc.Store_csv = True\nc.Output = \"./Query3.2_2021_pt.csv\"\ntwint.run.Search(c)\n", "id": "4793130", "language": "Python", "matching_score": 1.5802199840545654, "max_stars_count": 0, "path": "queries/portuguese/query32_pt.py" }, { "content": "import twint\n\nc = twint.Config()\nc.Since = \"2019-02-01\"\nc.Until = \"2019-03-14\"\nc.Search = \"(mujer OR mujeres OR niña OR niñas OR chica OR chicas) AND \\\n((ingeniera OR científica OR arquitecta OR programadora OR bióloga) OR \\\n(ingeniería OR ciencia OR stem)) OR \\\n(tecnología OR software OR metalurgía OR minería OR agronomía OR automotriz)\"\nc.Lang = \"es\"\nc.Store_csv = True\nc.Output = \"./Query3.2_2019.csv\"\ntwint.run.Search(c)\n", "id": "12759758", "language": "Python", "matching_score": 3.8331401348114014, "max_stars_count": 0, "path": "queries/spanish/query32_sp.py" }, { "content": "import twint\n\nc = twint.Config()\nc.Since = \"2019-02-01\"\nc.Until = \"2019-03-14\"\nc.Search = '(mujer OR mujeres OR niña OR niñas OR chica OR chicas) AND \\\n((ingeniera OR científica OR arquitecta OR programadora OR bióloga) OR \\\n(ingeniería OR ciencia OR stem)) OR \\\n(química OR bioquímica OR ecología OR geología OR astrofísica OR astronomía OR electronica OR \\\nmécanica OR computación)'\nc.Lang = \"es\"\nc.Store_csv = True\nc.Output = \"./Query3_2021.csv\"\ntwint.run.Search(c)\n", "id": "12717408", "language": "Python", "matching_score": 3.0484018325805664, "max_stars_count": 0, "path": "queries/spanish/query3_sp.py" }, { "content": "import twint\n\nc = twint.Config()\nc.Since = \"2021-02-01\"\nc.Until = \"2021-03-14\"\nc.Search = \"(mujer OR mujeres OR niña OR niñas OR chica OR chicas) AND \\\n((ingeniera OR científica OR arquitecta OR programadora OR bióloga) OR \\\n(ingeniería OR ciencia OR stem)) OR \\\n(biología OR TIC OR arquitectura)\"\nc.Lang = \"es\"\nc.Store_csv = True\nc.Output = \"/Users/marianafernandez/Documents/FashionableBID/Gender-Analysis-of-STEM/data-raw/Query2_2021.csv\"\ntwint.run.Search(c)\n", "id": "1876741", "language": "Python", "matching_score": 4.337407112121582, "max_stars_count": 0, "path": "queries/spanish/query2_sp.py" }, { "content": "import twint\n\nc = twint.Config()\nc.Since = \"2021-02-01\"\nc.Until = \"2021-03-14\"\nc.Search = \"(mujer OR mujeres OR niña OR niñas OR chica OR chicas) AND \\\n ((ingeniera OR científica OR arquitecta OR programadora OR bióloga) OR \\\n (ingeniería OR ciencia OR stem)) OR \\\n (matemáticas) OR \\\n (#WomenInSTEM OR #WomenInTech OR #MujeresEnTI OR #MujeresEnIT OR #GirlsInTech)\"\nc.Lang = \"es\"\nc.Store_csv = True\nc.Output = \"./Query1_2021.csv\"\ntwint.run.Search(c)\n", "id": "1593675", "language": "Python", "matching_score": 0.16165189445018768, "max_stars_count": 0, "path": "queries/spanish/query1_sp.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"zero-shot-cross-lingual.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1017sr8pqkIJD6SapxmzWIRSSf9zEu9WB\n\n# Zero Cross-lingual Topic Modeling\n\n### Fashionable to Be Dumb? A Gender Analysis of STEM Discourse in Latin American Social Media\n\nWe are going to use our Zero-Shot Topic Model to get the topics out of a collections of articles you will upload here. Then, we are going to predict the topics of unseen documents in an unseen language exploiting the multilingual capabilities of Multilingual BERT.\n\n### Install libraries\n\"\"\"\n\n# Commented out IPython magic to ensure Python compatibility.\n# %%capture\n# !pip install contextualized-topic-models\n\n# Commented out IPython magic to ensure Python compatibility.\n# %%capture\n# !pip install pandas==1.2.5\n\n# Commented out IPython magic to ensure Python compatibility.\n# %%capture\n# !pip install pyldavis\n\n\"\"\"### Import libraries\"\"\"\n\nfrom contextualized_topic_models.models.ctm import ZeroShotTM\nfrom contextualized_topic_models.utils.data_preparation import TopicModelDataPreparation\nfrom contextualized_topic_models.utils.preprocessing import WhiteSpacePreprocessing\nimport nltk\n\nimport pandas as pd\n\n\"\"\"### Load Data\"\"\"\n\nds_esp = pd.read_csv(\"/content/ds_esp_2019_2021.csv\")\n\nds_esp[\"final_tweet\"] = ds_esp[\"final_tweet\"].astype(str)\n\n\"\"\"### Preprocessing\n\nWe need text without punctuation to build the bag of word. Also, we might want only to have the most frequent words inside the BoW. Too many words might not help.\n\"\"\"\n\n# Commented out IPython magic to ensure Python compatibility.\n# %%capture\n# nltk.download('stopwords')\n\ndocuments = ds_esp[\"final_tweet\"].to_numpy().flatten()\nsp = WhiteSpacePreprocessing(documents, stopwords_language=['spanish','english'])\npreprocessed_documents, unpreprocessed_corpus, vocab = sp.preprocess()\n\npreprocessed_documents[:2]\n#unpreprocessed_corpus[:2]\nvocab[:10]\n\n\"\"\"### Export to be used by OCTIS\n\n#### Corpus\n\"\"\"\n\nwith open('corpus.csv', 'w') as f:\n for item in preprocessed_documents:\n f.write(\"%s\\n\" % item)\n\n\"\"\"#### Vocabulary\"\"\"\n\nvocab = open(\"vocabulary.txt\", \"r\")\n#print(vocab.read()) \ntype(vocab)\n\npreprocessed_documents = pd.read_csv(\"/content/corpus.csv\")\npreprocessed_documents\n\n\"\"\"### Topic Model Preparation\"\"\"\n\ntp = TopicModelDataPreparation(\"distiluse-base-multilingual-cased\")\n#tp = TopicModelDataPreparation(\"paraphrase-multilingual-mpnet-base-v2\")\n\ntraining_dataset = tp.fit(text_for_contextual=unpreprocessed_corpus, text_for_bow=preprocessed_documents)\n\nwith open('vocabulary_fit2.txt', 'w') as f:\n for item in vocab:\n f.write(\"%s\\n\" % item)\n\nlen(tp.vocab)\n\n\"\"\"### Training our Zero-Shot Contextualized Topic Model\"\"\"\n\nvocab = pd.read_csv(\"/content/vocab_fit.txt\")\nlen(vocab)\n\nctm = ZeroShotTM(bow_size=len(tp.vocab),\n contextual_size=512, \n n_components=10, \n num_epochs=30,\n model_type = \"prodLDA\",\n activation = \"softplus\",\n dropout = 0.6528626452408228,\n hidden_sizes=(100,100,100)\n )\nctm.fit(training_dataset) # run the model\n\n\"\"\"### Topics\"\"\"\n\nctm.get_topic_lists(10)\ndf_topics = pd.DataFrame(ctm.get_topic_lists(30))\ndf_topics.to_csv(\"Words_per_Topic_30.csv\", index = False)\n\n\"\"\"### Visualization\"\"\"\n\nlda_vis_data = ctm.get_ldavis_data_format(tp.vocab, training_dataset, n_samples=10)\n\nimport pyLDAvis as vis\nstem_pd = vis.prepare(**lda_vis_data)\nvis.display(stem_pd)\n\nvis.save_html(stem_pd, 'ctm_optimizado2.html')\n\n\"\"\"### Topic Prediction\"\"\"\n\ntopics_predictions = ctm.get_thetas(training_dataset, n_samples=10) # get all the topic predictions\n\ntopics1 = ctm.get_predicted_topics(dataset=training_dataset, n_samples=10)\ntopics2 = ctm.get_topic_lists()\n\ntopics3 = ctm.get_doc_topic_distribution(training_dataset)\n\ntopic4 = ctm.get_most_likely_topic(topics3)\n\nprint(len(preprocessed_documents))\nprint(len(unpreprocessed_corpus))\nd = {'Tweet':unpreprocessed_corpus,'Topic':topics1}\ndf = pd.DataFrame(d)\ndf\n\n\"\"\"### Merge Dataset\"\"\"\n\ndf['Topic_distribution'] = topics3.tolist()\ndf\n\ndf['topic_labels'] = df[\"Topic\"].apply(lambda row: ctm.get_topic_lists(10)[row-1])\ndf\n\ndf.to_csv(\"Topic_database_topic_prediction.csv\", index = False)\n\ndf_empty = pd.DataFrame({'Topic_distribution' : []})\n\ndf_empty['Topic_distribution'] = topics3.tolist()\ndf_empty.to_csv(\"Topic_distribution.csv\", index=False)\n\n\"\"\"## Imports\n\n### Unseen languages\n\"\"\"\n\ndf_pt = pd.read_csv(\"ds_pt_2019_2021.csv\")\ndf_pt\n\nportuguese_documents = df_pt[\"final_tweet\"].to_numpy().flatten()\nportuguese_documents\n\ntp = TopicModelDataPreparation(\"distiluse-base-multilingual-cased\")\n\ntesting_dataset = tp.transform(portuguese_documents) # create dataset for the testset\n\n\"\"\"### Topic Prediction\"\"\"\n\n# n_sample how many times to sample the distribution (see the documentation)\nportuguese_topics_predictions = ctm.get_thetas(testing_dataset, n_samples=10) # get all the topic predictions\n\nimport numpy as np\n\ntopic_number = np.argmax(portuguese_topics_predictions[0]) # get the topic id of the first document\nctm.get_topic_lists(10)[topic_number]\n\nimport numpy as np\n\naux = []\n\nfor elements in portuguese_topics_predictions:\n aux.append(np.argmax(elements))\nlen(aux)\n\ntopics7 = ctm.get_predicted_topics(dataset=testing_dataset, n_samples=10)\ntopics8 = ctm.get_topic_lists()\n\ntopics9 = ctm.get_doc_topic_distribution(testing_dataset)\n\ntopics9\n\nd = {'Tweet':portuguese_documents,'Topic':aux}\ndf_p = pd.DataFrame(d)\ndf_p\n\ndf_p['topic_labels'] = df_p[\"Topic\"].apply(lambda row: ctm.get_topic_lists(10)[row - 1])\n\n## Merge topic prediction\ndf_p['Topic_distribution'] = topics9.tolist()\ndf_p\n\ndf_p.to_csv(\"Topic_database_topic_prediction_pt.csv\", index = False)\n\ndf_empty2 = pd.DataFrame({'Topic_distribution' : []})\n\ndf_empty2['Topic_distribution'] = topics9.tolist()\ndf_empty2\ndf_empty2.to_csv(\"Topic_distribution_pt.csv\", index=False)\n\ndf_empty2\n\n\"\"\"### Save Model\"\"\"\n\nctm.save(models_dir=\"./\")\n\n\"\"\"## Visualizations\"\"\"\n\nctm.get_wordcloud(9,n_words=40,background_color=\"white\")\n\ntp_w_m = ctm.get_topic_word_matrix()\ndf_tp = pd.DataFrame(tp_w_m)\ndf_tp.to_csv(\"Topic_word_matrix.csv\", index = False)\n\ntopics = ctm.get_topics()\nd = {'Topics':topics}\ntopics_df = pd.DataFrame(d)\ntopics_df.to_csv(\"Topics.csv\", index=False)\n\nctm.get_most_likely_topic(doc_topic_distribution=topics3)\n\nimport json\nres = json.load(open(\"result.json\",'r'))\nres.keys()\n\nres['x_iters']['activation']\n\nres[\"f_val\"]\n\nimport matplotlib.pyplot as plt\n\nplt.plot(res[\"f_val\"])\n\n\"\"\"### Load Model\"\"\"\n\nctm = ZeroShotTM(bow_size=len(content_list),\n contextual_size=512, \n n_components=10, \n num_epochs=30,\n model_type = \"prodLDA\",\n activation = \"softplus\",\n dropout = 0.6528626452408228,\n hidden_sizes=(100,100,100)\n )\n\nctm.load(\"/content/test\", epoch=29)\n\nmy_file = open(\"vocabulary.txt\", \"r\")\n\ncontent = my_file.read()\n\ncontent_list = content.split(\"\\n\")\n\nmy_file.close()\n\nprint(content_list)\n\nlen(content_list)", "id": "3700492", "language": "Python", "matching_score": 5.724959850311279, "max_stars_count": 0, "path": "topic-model/zero_shot_cross_lingual.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"optimize-octis.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1kJ5SbYOefCHo3VU_eQ-D2Y1kTB118eih\n\n### Install Libraries\n\"\"\"\n\n# Commented out IPython magic to ensure Python compatibility.\n# %%capture\n# !pip install octis\n\n\"\"\"### Import libraries\"\"\"\n\nfrom octis.models.CTM import CTM\nfrom octis.dataset.dataset import Dataset\nfrom octis.optimization.optimizer import Optimizer\nfrom skopt.space.space import Real, Categorical, Integer\nfrom octis.evaluation_metrics.coherence_metrics import Coherence\nfrom octis.models.LDA import LDA\nfrom octis.evaluation_metrics.diversity_metrics import TopicDiversity\nfrom octis.evaluation_metrics.coherence_metrics import Coherence\n\n\"\"\"### Load Data\"\"\"\n\nfrom octis.dataset.dataset import Dataset\ndataset = Dataset()\ndataset.load_custom_dataset_from_folder(\"STEM\")\n\n\"\"\"### Load Topic Model\"\"\"\n\nmodel = CTM(num_topics=10,\n num_epochs=30,\n inference_type='zeroshot', \n bert_model=\"distiluse-base-multilingual-cased\")\n\n\"\"\"### Define Evaluation Metrics\"\"\"\n\n#Coherence\nnpmi = Coherence(texts=dataset.get_corpus())\n\n#Diversity\ntopic_diversity = TopicDiversity(topk=10)\n\nsearch_space = {\"num_layers\": Categorical({1, 2, 3}), \n \"num_neurons\": Categorical({100, 200, 300}),\n \"activation\": Categorical({'relu', 'softplus'}), \n \"dropout\": Real(0.0, 0.95)\n}\n\noptimization_runs=30\nmodel_runs=1\n\noptimizer=Optimizer()\noptimization_result = optimizer.optimize(\n model, dataset, npmi, search_space, number_of_call=optimization_runs, \n model_runs=model_runs, save_models=True, \n extra_metrics=None, # to keep track of other metrics\n plot_best_seen=True, plot_model=True, plot_name=\"B0_plot\",\n save_path='results2/test_ctm//')\n\n\"\"\"### Save Results of Optimization\"\"\"\n\noptimization_result.save_to_csv(\"results_ctm.csv\")\n\n\"\"\"### Analysis of the result\"\"\"\n\nimport json\nres = json.load(open(\"results2/test_ctm/result.json\",'r'))\nres.keys()\n\nres['x_iters']['activation']\nres[\"f_val\"]\n\nimport matplotlib.pyplot as plt\n\nplt.plot(res[\"f_val\"])\n\n\"\"\"### Get hyperparameters\"\"\"\n\nmodel.hyperparameters", "id": "6330834", "language": "Python", "matching_score": 0.3044741451740265, "max_stars_count": 0, "path": "optimization/optimize_octis.py" }, { "content": "import subprocess\n\n# Using readlines()\nfile1 = open('/Users/marianafernandez/Documents/FashionableBID/Gender-Analysis-of-STEM/data-raw/communities/2021.txt', 'r')\nLines = file1.readlines()\n\ncount = 0\n# Strips the newline character\nfor line in Lines:\n count += 1\n\n command = \"twarc2 search --archive --start-time '2021-02-01' --end-time '2021-03-14' 'conversation_id:{} OR (url:{} is:quote)' /Users/marianafernandez/Documents/FashionableBID/Gender-Analysis-of-STEM/data-raw/communities/2021/{}.jsonl\".format(line.strip(), line.strip(), line.strip())\n\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n process.wait()\n\nprint(process.returncode)\n", "id": "4557249", "language": "Python", "matching_score": 4.328153133392334, "max_stars_count": 0, "path": "extraction-conversations/runner2021.py" }, { "content": "import subprocess\n \n# Using readlines()\nfile1 = open('2019.txt', 'r')\nLines = file1.readlines()\n \ncount = 0\n# Strips the newline character\nfor line in Lines:\n count += 1\n\n command = \"twarc2 search --archive --start-time '2019-02-01' --end-time '2019-03-14' 'conversation_id:{} OR (url:{} is:quote)' ./2019/{}.jsonl\".format(line.strip(), line.strip(), line.strip())\n\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n process.wait()\n\n#print(process.returncode)", "id": "8946895", "language": "Python", "matching_score": 3.36083984375, "max_stars_count": 0, "path": "extraction-conversations/runner2019.py" } ]
3.59699
nhhughes
[ { "content": "#! /usr/bin/env python\n__author__ = 'nathan'\n\nimport networkx as nx\nimport sys\nimport time\nfrom Queue import PriorityQueue\nimport numpy as np\n\n\ndef main():\n print '\\n', \"*\" * 80\n s = \"* Welcome to the EEROS IQP Team Community Analysis Tool\"\n s += \" \" * (79 - len(s))\n s += \"*\"\n print s\n print \"*\" * 80, \"\\n\"\n if len(sys.argv) != 3:\n print \"Invalid Arguments!\"\n print \"Usage: ./Analysis u|r Repository-Name\"\n exit(1)\n if not (sys.argv[1] == 'r' or sys.argv[1] == 'u'):\n print \"Invalid Arguments!\"\n print \"Usage: ./Analysis u|r Repository-Name\"\n exit(1)\n repo = sys.argv[2]\n\n tree = clean_up_commit_tree(get_commit_tree_json(repo))\n\n if sys.argv[1] == 'r':\n process_days(tree, repo)\n else:\n pass\n\n print \"Finished analysis!\"\n print\n\n\ndef clean_up_commit_tree(tree):\n new_tree = nx.DiGraph()\n for i in tree.nodes():\n new_tree.add_node(tree.node[i]['hexsha'], author=tree.node[i]['author'],\n date=tree.node[i]['authored_date'], diff=tree.node[i]['diff'])\n for i in tree.edges():\n new_tree.add_edge(tree.node[i[0]]['hexsha'], tree.node[i[1]]['hexsha'])\n return new_tree\n\n\ndef get_commit_tree_json(repo):\n import json\n from networkx.readwrite import json_graph\n\n with open('./data/' + repo + ':commits', 'r') as f:\n data = f.read()\n json_data = json.loads(data)\n return json_graph.node_link_graph(json_data)\n\n\ndef process_days(tree, repo, edge_tolerance=0.5, init_time_val=5, deprecation_val=1):\n total_graph = nx.Graph()\n\n health_values = []\n commit_count = []\n actor_count = []\n closeness_values = []\n\n degrees = tree.in_degree()\n root = filter(lambda x: degrees[x] == 0, degrees)[0]\n traverse_graph(root, tree, total_graph, health_values, commit_count, actor_count, closeness_values,\n edge_tolerance=edge_tolerance, init_time_val=init_time_val, deprecation_val=deprecation_val)\n\n ending_date = max(map(lambda x: time.mktime(time.strptime(tree.node[x]['date'], \"%Y-%m-%dT%H:%M:%SZ\")), tree))\n finalize_graph(ending_date, total_graph)\n store_all_results_json(total_graph, health_values, commit_count, actor_count, closeness_values, repo)\n\n\ndef traverse_graph(root, graph, total_graph, health_values, commit_count, actor_count, closeness_values,\n edge_tolerance=0.5, init_time_val=5, deprecation_val=1):\n file_diffs = {}\n traverse_count = 0\n frontier = PriorityQueue()\n visited = set([])\n frontier.put((graph.node[root]['date'], (root, None)))\n while not frontier.empty():\n curr, parent = frontier.get()[1]\n traverse_count += 1\n do_something(curr, graph, total_graph, parent, health_values, file_diffs, commit_count, actor_count,\n closeness_values, edge_tolerance=edge_tolerance, init_time_val=init_time_val,\n deprecation_val=deprecation_val)\n if curr not in visited:\n visited.add(curr)\n for child in graph[curr]:\n if child not in visited:\n frontier.put((graph.node[child]['date'], (child, curr)))\n\n\ndef do_something(root, tree, total_graph, parent, health_values, file_diffs, commit_count, actor_count,\n closeness_values, edge_tolerance=0.5, init_time_val=5, deprecation_val=1):\n update_actors(tree, total_graph, root)\n date = tree.node[root]['date']\n date = time.mktime(time.strptime(date, \"%Y-%m-%dT%H:%M:%SZ\"))\n if parent:\n update_edge(total_graph, root, parent, tree, file_diffs, edge_tolerance=edge_tolerance,\n init_time_val=init_time_val, deprecation_val=deprecation_val)\n for node in total_graph.nodes():\n importance = 0.\n for edge in total_graph[node]:\n importance += total_graph[node][edge]['weight']\n total_graph.node[node]['importance'][date] = importance\n health_values.append((date, nx.estrada_index(total_graph)))\n closeness_values.append((date, np.mean(nx.closeness_centrality(total_graph).values())))\n actor_count.append((date, len(total_graph)))\n if len(commit_count) > 0:\n commit_count.append((date, commit_count[-1][1] + 1))\n else:\n commit_count.append((date, 1))\n\n\ndef update_actors(tree, actors, root):\n author = tree.node[root]['author']\n if author not in actors:\n date = tree.node[root]['date']\n to_update_with = time.mktime(time.strptime(date, \"%Y-%m-%dT%H:%M:%SZ\"))\n actors.add_node(author, name=author, entrance=to_update_with, importance={})\n\n\ndef update_edge(actors, child, parent, tree, file_diffs, edge_tolerance=0.5, init_time_val=5, deprecation_val=1):\n child_diff = tree.node[child]['diff']\n parent_diff = tree.node[parent]['diff']\n\n date = tree.node[child]['date']\n to_update_with = time.mktime(time.strptime(date, \"%Y-%m-%dT%H:%M:%SZ\"))\n\n child_author = tree.node[child]['author']\n parent_author = tree.node[parent]['author']\n\n if child_author == parent_author:\n return\n\n strong_interactions = get_direct_interactions(child_diff, parent_diff)\n weak_interactions, files_edited = get_indirect_interactions(child_diff, file_diffs)\n update_file_diffs(file_diffs, files_edited, child_author, init_time_val=init_time_val,\n deprecation_val=deprecation_val)\n\n for author in weak_interactions:\n if author != child_author:\n if author != parent_author:\n weight = weak_interactions[author] * 0.1\n else:\n weight = weak_interactions[author] * 0.1 + strong_interactions * 0.5\n update_weight_values(weight, to_update_with, parent_author, child_author, actors,\n edge_tolerance=edge_tolerance)\n\n\ndef get_direct_interactions(child_diff, parent_diff):\n strong_interactions = 0\n lines_edited = {}\n\n for diff_piece in child_diff:\n\n info = diff_piece.split(\":\")\n if info[0] != info[1] and info[0] == 'dev/null':\n pass\n else:\n if int(info[3]) > int(info[5]):\n local_lines_edited = set(range(int(info[2]) + int(info[3])))\n else:\n local_lines_edited = set(range(int(info[4]) + int(info[5])))\n lines_edited[info[1]] = local_lines_edited\n\n for diff_piece in parent_diff:\n\n info = diff_piece.split(\":\")\n if info[0] != info[1] and info[1] == 'dev/null':\n pass\n else:\n file_edited = info[0]\n if int(info[3]) > int(info[5]):\n local_lines_edited = range(int(info[2]) + int(info[3]))\n else:\n local_lines_edited = range(int(info[4]) + int(info[5]))\n if file_edited in lines_edited:\n for i in local_lines_edited:\n if i in lines_edited[file_edited]:\n strong_interactions += 1\n break\n\n return strong_interactions\n\n\ndef get_indirect_interactions(child_diffs, file_edits):\n indirect_authors = {}\n files_edited = []\n for difference in child_diffs:\n info = difference.split(\":\")\n if info[0] == 'dev/null':\n pass\n elif info[0] in file_edits:\n files_edited.append(info[0])\n for author in file_edits[info[0]].keys():\n if author in indirect_authors:\n indirect_authors[author] += 1\n else:\n indirect_authors[author] = 1\n else:\n files_edited.append(info[0])\n\n return indirect_authors, files_edited\n\n\ndef update_file_diffs(file_diffs, files_edited, child_author, init_time_val=5, deprecation_val=1):\n for file_object in files_edited:\n if file_object in file_diffs:\n file_diffs[file_object][child_author] = init_time_val\n else:\n file_diffs[file_object] = {child_author: init_time_val}\n for file_object in file_diffs:\n for author in file_diffs[file_object]:\n file_diffs[file_object][author] -= deprecation_val\n\n\ndef update_weight_values(weight, date, parent_author, child_author, actors, edge_tolerance=0.5):\n if parent_author not in actors[child_author]:\n actors.add_edge(parent_author, child_author, weights={date: weight}, weight=weight,\n start=[date], end=[], current=True)\n else:\n if actors[child_author][parent_author]['current']:\n new_weight = actors[child_author][parent_author]['weight'] + weight\n actors[parent_author][child_author]['weights'][date] = new_weight\n actors[parent_author][child_author]['weight'] = new_weight\n else:\n if abs(weight) > edge_tolerance:\n actors[parent_author][child_author]['current'] = True\n actors[parent_author][child_author]['start'].append(date)\n new_weight = actors[child_author][parent_author]['weight'] + weight\n actors[parent_author][child_author]['weights'][date] = new_weight\n actors[parent_author][child_author]['weight'] = new_weight\n\n\ndef finalize_graph(end_date, actors):\n for edge in actors.edges():\n if len(actors[edge[0]][edge[1]]['start']) > len(actors[edge[0]][edge[1]]['end']):\n actors[edge[0]][edge[1]]['end'].append(end_date)\n if len(actors[edge[1]][edge[0]]['start']) > len(actors[edge[1]][edge[0]]['end']):\n actors[edge[1]][edge[0]]['end'].append(end_date)\n\n\ndef store_all_results_json(total_graph, health_values, commit_count, actor_count, closeness_values, repo):\n from networkx.readwrite import json_graph\n import json\n\n total_graph.graph['health'] = health_values\n total_graph.graph['commits'] = commit_count\n total_graph.graph['actors'] = actor_count\n total_graph.graph['closeness'] = closeness_values\n data = json_graph.node_link_data(total_graph)\n with open('./data/' + repo + ':actors', 'w') as f:\n json.dump(data, f)\n\n\nif __name__ == '__main__':\n main()\n", "id": "4438623", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "Analysis.py" }, { "content": "#! /usr/bin/env python\n__author__ = 'nathan'\n\nimport sys\nimport re\nimport requests\nimport getpass\nimport git\nimport shutil\nimport os\nimport time\nimport subprocess\nimport tempfile\n\n\nfrom progressbar.widgets import Bar\nfrom progressbar import ProgressBar\nimport networkx as nx\nfrom collections import deque\n\n\ndef main():\n print_welcome()\n user, repo, command = parse_arguments(sys.argv)\n\n if command == 'r':\n create_new_record(repo, user)\n elif command == 'd':\n delete_records(repo)\n else:\n update_records(repo, user)\n\n\ndef print_welcome():\n print '\\n', \"*\" * 80\n s = \"* Welcome to the EEROS IQP Team Repository Analysis Tool\"\n s += \" \" * (79 - len(s))\n s += \"*\"\n print s\n print \"*\" * 80, \"\\n\"\n\n\ndef parse_arguments(arg_info):\n user = \"\"\n repo = \"\"\n command = \"\"\n\n usage_warning = (\"Invalid Arguments!\\n\" + \"Usage: ./GitParserRest.py u|rUser Repository-Name or \\n\" + \" \" * 7\n + \"./GitParserrest.py d Repository-Name\\n\")\n\n if len(arg_info) < 2:\n print usage_warning\n exit(1)\n else:\n command = arg_info[1]\n if command != 'u' and command != 'r' and command != 'd':\n print usage_warning\n exit(1)\n else:\n if command == 'r' or command == 'u':\n if len(arg_info) != 4:\n print usage_warning\n exit(1)\n user = arg_info[2]\n repo = arg_info[3]\n else:\n if len(arg_info) != 3:\n print usage_warning\n exit(1)\n repo = arg_info[2]\n\n return user, repo, command\n\n\ndef create_new_record(repo, user):\n print \"Requesting all forks of the repository\", repo, \"hosted by\", user\n username = raw_input(\"Please enter your Github username: \")\n password = <PASSWORD>.<PASSWORD>()\n auth_key = (username, password)\n forks = get_all_forks_from_repository(user, repo, auth_key)\n\n clone_urls = get_commit_urls(forks, user, repo)\n directory = clone_all_repositories(clone_urls)\n repositories = get_cloned_repositories(directory)\n\n commit_tree = create_commit_tree_structures_for_forks(repositories)\n\n update_with_diffs_from_clones(commit_tree)\n commit_tree = get_attributes_from_nodes(commit_tree)\n\n export_json(commit_tree, repo)\n # cleaning up\n print \"Removing temporary files and cleaning up...\"\n print\n shutil.rmtree(directory)\n\n\ndef get_all_forks_from_repository(user, repository_name, auth_key):\n urls = []\n url = 'https://api.github.com/repos/' + user + '/' + repository_name + '/forks'\n try:\n r = requests.get(url,\n auth=auth_key).json()\n urls = [i[u'forks_url'] for i in r]\n except (KeyError, TypeError):\n print \"Invalid Password!\"\n exit(1)\n return urls\n\n\ndef get_commit_urls(forks, user, repository_name):\n repos = [u'https://api.github.com/repos/' + user + u'/' + repository_name + u'/commits']\n p = re.compile('(\\S+/{2})(\\S+/)')\n progress = ProgressBar(widgets=['Creating Clone Urls', Bar()])\n\n if len(forks) > 0:\n for fork in progress(forks):\n matches = p.findall(fork)[0]\n repos.append(matches[0] + matches[1] + 'commits')\n else:\n progress.start()\n progress.finish()\n return repos\n\n\ndef clone_all_repositories(urls):\n clone_urls = []\n p = re.compile('(.*/{2})(.*\\.com/[^/]+)(/[^/]+)(/[^/]+)(/[^/]+$)')\n for url in urls:\n clone_urls.append(p.sub('\\g<1>github.com\\g<3>\\g<4>.git', url))\n temp_path = tempfile.mkdtemp(dir=os.getcwd())\n os.chdir(temp_path)\n progress = ProgressBar(widgets=['Cloning repositories', Bar()])\n for url in progress(clone_urls):\n os.chdir(tempfile.mkdtemp(dir=temp_path))\n git.Git().clone(url)\n os.chdir(temp_path)\n return temp_path\n\n\ndef get_cloned_repositories(directory):\n directories = os.listdir(directory)\n repositories = []\n progress = ProgressBar(widgets=['Getting Cloned Repository File Paths', Bar()])\n for directory in progress(directories):\n curr_directory = os.getcwd() + '/' + directory\n repo_name = os.listdir(curr_directory)[0]\n repositories.append(git.Repo(curr_directory + '/' + repo_name))\n return repositories\n\n\ndef create_commit_tree_structures_for_forks(repositories):\n\n progress = ProgressBar(widgets=['Creating commit history for forks', Bar()])\n tree = nx.DiGraph()\n root_index = -1\n for repository in progress(repositories):\n\n commits = []\n origin = repository.remote('origin')\n\n for branch in origin.refs:\n commits.append(branch.commit)\n\n for commit in commits:\n temp_root_index = update_tree(tree, commit, repository.git_dir)\n if root_index == -1 and temp_root_index != -1:\n root_index = temp_root_index\n\n tree.graph['root_index'] = root_index\n\n return tree\n\n\ndef update_tree(tree, root, repository_path):\n queue = deque()\n queue.append(root)\n visited = set([])\n index = -1\n while len(queue) != 0:\n node = queue.popleft()\n if node not in visited:\n visited.add(node)\n if len(node.parents) != 0:\n if node not in tree:\n tree.add_node(node, repo_path=repository_path)\n for parent in node.parents:\n if parent not in tree:\n tree.add_node(parent, repo_path=repository_path)\n tree.add_edge(parent, node)\n if parent not in visited:\n queue.append(parent)\n else:\n if node in tree:\n index = tree.nodes().index(node)\n else:\n tree.add_node(node, repo_path=repository_path)\n index = len(tree)\n return index\n\n\ndef update_with_diffs_from_clones(tree):\n progress = ProgressBar(widgets=['Processing Diff Information', Bar()], maxval=len(tree)).start()\n root_index = tree.graph['root_index']\n\n queue = deque()\n parent = tree.nodes()[root_index]\n tree.node[parent]['diff'] = ''\n queue.append(parent)\n visited = set()\n\n while len(queue) > 0:\n parent = queue.popleft()\n if parent not in visited:\n visited.add(parent)\n if progress.currval < len(tree):\n progress.update(progress.currval+1)\n for commit in tree[parent]:\n if commit not in visited:\n queue.append(commit)\n diff_result = ''\n try:\n if os.getcwd() != tree.node[commit]['repo_path']:\n os.chdir(tree.node[commit]['repo_path'])\n p = subprocess.Popen([\"git\", \"diff\", parent.hexsha, commit.hexsha], stdout=subprocess.PIPE)\n diff_result = p.communicate()[0]\n diff_result = diff_result.decode('utf8', 'ignore')\n diff_result = process_diff(diff_result)\n except UnicodeDecodeError as e:\n print e\n exit(1)\n\n tree.node[commit]['diff'] = diff_result\n\n progress.finish()\n\n\ndef process_diff(diff_text):\n p = re.compile('(-{3} .*)\\n(\\+{3} .*)\\n(@{2}.*@{2})')\n matches = p.findall(diff_text)\n diff_strings = [make_diff_string(i) for i in matches]\n return diff_strings\n\n\ndef make_diff_string(diff_set):\n s = \"\"\n s += diff_set[0][5:] + \":\"\n s += diff_set[1][5:]\n changes = diff_set[2]\n p = re.compile('(\\d+),(\\d+)')\n results = p.findall(changes)\n if len(results) != 2:\n s += \":0:0:0:0\"\n else:\n s += \":\" + results[0][0] + \":\" + results[0][1] + \":\" + results[1][0] + \":\" + results[1][1]\n return s\n\n\ndef get_attributes_from_nodes(tree):\n progress = ProgressBar(widgets=['Parsing Commit Information', Bar()], maxval=len(tree) + len(tree.edges())).start()\n\n new_graph = nx.DiGraph()\n for node in tree.nodes():\n diff = ''\n if 'diff' in tree.node[node]:\n diff = tree.node[node]['diff']\n\n committed_date = node.committed_date\n time.timezone = int(node.committer_tz_offset)\n committed_date = time.gmtime(committed_date)\n\n authored_date = node.authored_date\n time.timezone = int(node.author_tz_offset)\n authored_date = time.gmtime(authored_date)\n\n attributes = dict(authored_date=parse_time(committed_date), author=node.author.email,\n committer=node.committer.email, committed_date=parse_time(authored_date),\n message=node.message, diff=diff, hexsha=node.hexsha)\n new_graph.add_node(node.hexsha, **attributes)\n progress.update(progress.currval + 1)\n for edge in tree.edges():\n new_graph.add_edge(edge[0].hexsha, edge[1].hexsha)\n progress.update(progress.currval + 1)\n progress.finish()\n return new_graph\n\n\ndef parse_time(time_value):\n return time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time_value)\n\n\ndef export_json(commit_tree, repo):\n from networkx.readwrite import json_graph\n import json\n data = json_graph.node_link_data(commit_tree)\n os.chdir(os.path.expanduser(\"~/eeros_iqp/eeros-community-analysis-website\"))\n with open(\"./data/\" + repo + \":commits\", 'w') as f:\n json.dump(data, f)\n\n\ndef delete_records(repo):\n\n print \"Deleting Old Records\"\n file_name = './data/' + repo + \":commits\"\n p = subprocess.Popen(['rm', file_name], stderr=subprocess.PIPE)\n if len(p.communicate()[1]) == 0:\n print \"Finished deletion of Records!\"\n print\n else:\n print \"No Records exist for that repository!\"\n print\n\n\ndef update_records(repo, user):\n if not os.path.exists(os.getcwd() + \"data/\" + repo + \":commits\"):\n print \"No Records exists for the specified repository! Run with r flag instead\"\n print\n exit()\n\n print \"Requesting all forks of the repository\", repo, \"hosted by\", user\n username = raw_input(\"Please enter your Github username: \")\n password = <PASSWORD>()\n auth_key = (username, password)\n forks = get_all_forks_from_repository(user, repo, auth_key)\n forks.append(u'https://api.github.com/repos/' + user + u'/' + repo + u'/forks')\n\n print \"TBD\"\n\n # get most recent commit for each branch and fork using neo4j\n # old_commits = get_commits_from_database(gdb, repo)\n\n # use rest api to check if most recent commit for branches and forks is more up to date\n # if so grab most recent commits\n # updated_commits, changed = get_commits_from_forks(forks, old_commits, auth_key, user, repo)\n\n # if changed:\n #\n # update_neo4j_with_care(updated_commits, gdb, repo+\":commits\", repo+\":child\")\n #\n # print \"Finished updating database!\"\n # print\n\n # else:\n #\n # print \"Repositories are up to date!\"\n # print\n\n\ndef get_commits_from_database(database, repository_name):\n # q = \"match (node:`\" + repository_name + \":commits`) return node\"\n # commits = database.query(q, returns=client.Node)\n commit_dictionary = {}\n # for i in commits:\n # commit_dictionary[i[0].properties['hexsha']] = i[0]\n return commit_dictionary\n\n\n# TODO remove parents from attributes before returning\ndef get_commits_from_forks(forks, old_commits, auth_key, user, repo):\n new_commits = []\n graphs = []\n for fork in forks:\n graph = nx.DiGraph()\n branch_request_url = fork[:-5] + \"git/refs/heads\"\n commit_request_url = fork[:-5] + \"git/commits\"\n branches = requests.get(branch_request_url, auth=auth_key)\n for branch in branches.json():\n if branch[u'object'][u'sha'] not in old_commits:\n new_commits.append(branch[u'object'][u'sha'])\n\n diff_url_base = 'http://github.com/' + user + '/' + repo + '/commit/'\n for commit in new_commits:\n queue = deque()\n queue.append(commit)\n while len(queue) > 0:\n to_examine = queue.popleft()\n request_url = commit_request_url + '/' + to_examine\n commit_info = requests.get(request_url, auth=auth_key).json()\n parents = commit_info['parents']\n for parent in parents:\n if parent['sha'] not in old_commits:\n queue.append(parent['sha'])\n\n attributes = dict(authored_date=commit_info['author']['date'], author=commit_info['author']['email'],\n committer=commit_info['committer']['email'],\n committed_date=commit_info['committer']['date'], message=commit_info['message'],\n hexsha=to_examine, parents=[i['sha'] for i in parents])\n\n diff_file = requests.get(diff_url_base + to_examine + \".diff\").text\n attributes['diff'] = diff_file\n graph.add_node(attributes['hexsha'], **attributes)\n for node in graph.nodes():\n for parent in graph.node[node]['parents']:\n graph.add_edge(parent, node)\n graphs.append(graph)\n if len(new_commits) == 0:\n return None, False\n else:\n if len(graphs) > 1:\n return nx.compose_all(graphs), True\n else:\n return graphs[0], True\n\n\n\n\nif __name__ == '__main__':\n main()", "id": "8284097", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "GitParserRest.py" } ]
0
vpothukuchi
[ { "content": "#!/usr/bin/env python3\n\nimport adafruit_platformdetect\n\ndetector = adafruit_platformdetect.Detector()\n\nprint(\"Chip id: \", detector.chip.id)\nprint(\"Board id: \", detector.board.id)\nprint()\n\nprint(\"Is this a DragonBoard 410c?\", detector.board.DRAGONBOARD_410C)\nprint(\"Is this a Pi 3B+?\", detector.board.RASPBERRY_PI_3B_PLUS)\nprint(\"Is this a Pi 4B?\", detector.board.RASPBERRY_PI_4B)\nprint(\"Is this a 40-pin Raspberry Pi?\", detector.board.any_raspberry_pi_40_pin)\nprint(\"Is this a Raspberry Pi Compute Module?\", detector.board.any_raspberry_pi_cm)\nprint(\"Is this a BeagleBone Board?\", detector.board.any_beaglebone)\nprint(\"Is this a Giant Board?\", detector.board.GIANT_BOARD)\nprint(\"Is this a Coral Dev Board?\", detector.board.CORAL_EDGE_TPU_DEV)\nprint(\"Is this a Coral Dev Board Mini?\", detector.board.CORAL_EDGE_TPU_DEV_MINI)\nprint(\"Is this a MaaXBoard?\", detector.board.MAAXBOARD)\nprint(\"Is this a MaaXBoard Mini?\", detector.board.MAAXBOARD_MINI)\nprint(\"Is this a SiFive Unleashed? \", detector.board.SIFIVE_UNLEASHED)\nprint(\"Is this a PYNQ Board?\", detector.board.PYNQ_Z1 | detector.board.PYNQ_Z2)\nprint(\"Is this a Rock Pi board?\", detector.board.any_rock_pi_board)\nprint(\"Is this a NanoPi board?\", detector.board.any_nanopi)\nprint(\"Is this a Clockwork Pi board?\", detector.board.any_clockwork_pi_board)\nprint(\"Is this an embedded Linux system?\", detector.board.any_embedded_linux)\nprint(\"Is this a generic Linux PC?\", detector.board.GENERIC_LINUX_PC)\nprint(\"Is this a UDOO Bolt?\", detector.board.UDOO_BOLT)\nprint(\"Is this an ASUS Tinker Board?\", detector.board.ASUS_TINKER_BOARD)\nprint(\"Is this an STM32MP1 Board?\", detector.board.any_stm32mp1)\nprint(\n \"Is this an OS environment variable special case?\",\n detector.board.FTDI_FT232H\n | detector.board.FTDI_FT2232H\n | detector.board.MICROCHIP_MCP2221\n | detector.board.BINHO_NOVA\n | detector.board.GREATFET_ONE\n | detector.board.PICO_U2IF\n | detector.board.FEATHER_U2IF\n | detector.board.ITSYBITY_U2IF\n | detector.board.MACROPAD_U2IF\n | detector.board.QTPY_U2IF\n | detector.board.QT2040_TRINKEY_U2IF,\n)\n\nif detector.board.any_raspberry_pi:\n print(\"Raspberry Pi detected.\")\n\nif detector.board.any_jetson_board:\n print(\"Jetson platform detected.\")\n\nif detector.board.any_pynq_board:\n print(\"PYNQ platform detected.\")\n\nif detector.board.any_orange_pi:\n print(\"Orange Pi detected.\")\n\nif detector.board.any_odroid_40_pin:\n print(\"Odroid detected.\")\n\nif detector.board.any_onion_omega_board:\n print(\"Onion Omega detected.\")\n\nif detector.board.any_pine64_board:\n print(\"Pine64 device detected.\")\n\nif detector.board.any_rock_pi_board:\n print(\"Rock Pi device detected.\")\n\nif detector.board.any_clockwork_pi:\n print(\"Clockwork Pi device detected.\")\n\nif detector.board.any_asus_tinker_board:\n print(\"ASUS Tinker Board device detected.\")\n\nif detector.board.any_coral_board:\n print(\"Coral device detected.\")\n\nif detector.board.any_lubancat:\n print(\"LubanCat detected.\")\n", "id": "2513046", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "bin/detect.py" } ]
0
wojtekj89
[ { "content": "import requests\nfrom core import settings\n\ndef get_movie(title):\n \"\"\"Getting movie details from OMDB API\"\"\"\n params = {\n 't': title,\n 'apikey': settings.OMDB_API_KEY\n }\n\n r = requests.get(settings.OMDB_URL, params=params)\n response = r.json()\n\n if not r.ok:\n raise requests.exceptions(r.status_code, 'OMDB API error')\n\n else:\n response = r.json()\n if response['Response'] == 'False':\n \"\"\" When OMDB API can't find a movie status code is 200 \"\"\"\n raise (requests.exceptions.HTTPError(404, response['Error']))\n else:\n return response", "id": "2246469", "language": "Python", "matching_score": 1.44513738155365, "max_stars_count": 0, "path": "src/api/services.py" }, { "content": "from core import settings\nimport requests\nfrom decimal import Decimal\nfrom dateutil.parser import parse\nfrom bs4 import BeautifulSoup\n\nclass ECBScraper:\n\n def __init__(self):\n self.url = settings.ECB_URL\n\n def read_data(self, currency):\n r = requests.get(self.url.format(currency))\n\n if not r.status_code == 200:\n return\n # TODO: handle scraper error\n\n parser = BeautifulSoup(response, 'lxml')\n item = parser.find('item')\n\n return self.parse_response(item)\n\n def parse_response(self, item):\n stats = item.find('cb:statistics')\n currency = stats.find('cb:targetcurrency').text\n rate = stats.find('cb:value').text\n date = parse(item.find('dc:date').text)\n\n return {\n 'currency': currency,\n 'rate': float(rate),\n 'date': date\n }\n \n def read_archive_data(self, currency):\n r = requests.get(self.url.format(currency))\n\n if not r.status_code == 200:\n return\n # TODO: handle scraper error\n \n parser = BeautifulSoup(r.content, 'lxml')\n items = parser.find_all('item')\n rates = list()\n for item in items:\n rates.append(self.parse_response(item))\n \n return rates", "id": "6802108", "language": "Python", "matching_score": 2.5461952686309814, "max_stars_count": 0, "path": "src/api/services.py" }, { "content": "from __future__ import absolute_import, unicode_literals\n\nfrom celery import shared_task\nfrom celery.signals import celeryd_init\n\nfrom core import settings\nfrom api.models import ExchangeRate\nfrom api.services import ECBScraper\n\n\n@celeryd_init.connect\ndef at_start(sender, **k):\n \"\"\" Getting archive data on start of the app \"\"\"\n get_history_exchange_rates.delay()\n\n@shared_task\ndef get_exchange_rates():\n currencies = settings.CURRENCIES\n scraper = ECBScraper()\n\n for currency in currencies:\n data = scraper.read_data(currency)\n rate = ExchangeRate(currency=data.get('currency'), rate=float(data.get('rate')), date=data.get('date'))\n\n try:\n rate.save()\n except Exception as e:\n # TODO: handle error in saving\n print('saving error ', str(e))\n pass\n \n return\n\n@shared_task\ndef get_history_exchange_rates():\n currencies = settings.CURRENCIES\n scraper = ECBScraper()\n\n for currency in currencies:\n data = scraper.read_archive_data(currency)\n\n for item in data:\n rate = ExchangeRate(currency=item.get('currency'), rate=float(item.get('rate')), date=item.get('date'))\n\n try:\n rate.save()\n except Exception as e:\n # TODO: handle error in saving\n print('saving error ', str(e))\n pass\n \n return", "id": "4271501", "language": "Python", "matching_score": 2.5401151180267334, "max_stars_count": 0, "path": "src/api/tasks.py" }, { "content": "from __future__ import absolute_import, unicode_literals\nimport os\nfrom celery import Celery\nfrom celery.schedules import crontab\n\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')\n\napp = Celery('ecb_feed')\napp.config_from_object('django.conf:settings', namespace='CELERY')\napp.autodiscover_tasks()\n\napp.conf.beat_schedule = {\n 'rates_daily': {\n 'task': 'api.tasks.get_exchange_rates',\n 'schedule': crontab(hour=15, minute=5)\n }\n}\n\[email protected](bind=True)\ndef debug_task(self):\n print('Request: {0!r}'.format(self.request))", "id": "1656357", "language": "Python", "matching_score": 0.8804444074630737, "max_stars_count": 0, "path": "src/core/celery.py" }, { "content": "from django.conf.urls import url\n\nfrom .views import AllRates, TodayRates\n\nurlpatterns = [\n url(r'all', AllRates.as_view(), name='all_rates_list'),\n url(r'today', TodayRates.as_view(), name='today_rates_list')\n]", "id": "5694978", "language": "Python", "matching_score": 1.203677773475647, "max_stars_count": 0, "path": "src/api/urls.py" }, { "content": "from django.conf.urls import url\n\nfrom rest_framework.routers import SimpleRouter\n\nfrom .views import MoviesView, CommentsView, TopView\n\nrouter = SimpleRouter()\nrouter.register(r'movies', MoviesView)\nrouter.register(r'comments', CommentsView, base_name='comments')\nurlpatterns = [\n url(r'top', TopView.as_view())\n]\n\nurlpatterns += router.urls\n", "id": "501070", "language": "Python", "matching_score": 0.7177223563194275, "max_stars_count": 0, "path": "src/api/urls.py" }, { "content": "from rest_framework.generics import ListAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_404_NOT_FOUND\n\nfrom .models import ExchangeRate\n\nfrom .serializers import ExchangeRateSerializer\n\nimport datetime\n\nclass AllRates(ListAPIView):\n serializer_class = ExchangeRateSerializer\n\n def get_queryset(self):\n currency = self.request.query_params.get('currency')\n\n if not currency:\n return ExchangeRate.objects.all()\n\n qs = ExchangeRate.objects.filter(currency=str(currency).upper())\n # TODO: handle when filtering with wrong currency\n\n return qs\n\nclass TodayRates(ListAPIView):\n serializer_class = ExchangeRateSerializer\n\n def get_queryset(self):\n today = datetime.datetime.now()\n qs = ExchangeRate.objects.filter(date=today.date())\n\n if not qs:\n yesterday = today - datetime.timedelta(days = 1)\n qs = ExchangeRate.objects.filter(date=yesterday.date())\n\n return qs\n", "id": "8195731", "language": "Python", "matching_score": 2.914576292037964, "max_stars_count": 0, "path": "src/api/views.py" }, { "content": "from rest_framework.serializers import ModelSerializer\n\nfrom .models import ExchangeRate\n\n\nclass ExchangeRateSerializer(ModelSerializer):\n class Meta:\n model = ExchangeRate\n fields = ('currency', 'rate', 'date')", "id": "1586341", "language": "Python", "matching_score": 1.424970269203186, "max_stars_count": 0, "path": "src/api/serializers.py" }, { "content": "from rest_framework.mixins import ListModelMixin, CreateModelMixin\nfrom rest_framework.viewsets import GenericViewSet\nfrom rest_framework.status import HTTP_400_BAD_REQUEST, HTTP_409_CONFLICT, HTTP_201_CREATED, HTTP_503_SERVICE_UNAVAILABLE\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom django.db import IntegrityError\nfrom django.db.models import Case, F, Sum, When\nfrom django.db.models.functions import DenseRank, Coalesce\nfrom django.db.models.fields import IntegerField\nfrom django.db.models.expressions import Window\n\nfrom django.shortcuts import get_object_or_404\n\nfrom rest_framework.filters import SearchFilter, OrderingFilter\n\nfrom .models import Movie, Comment\nfrom .serializers import MovieSerializer, MovieRequestSerializer, CommentSerializer\n\nfrom .services import get_movie\n\nimport datetime\n\n\nclass MoviesView(ListModelMixin, GenericViewSet):\n queryset = Movie.objects.all()\n serializer_class = MovieSerializer\n filter_backends = (SearchFilter, OrderingFilter)\n search_fields = ('title', 'genre')\n ordering_fields = ('title', 'year')\n\n def create(self, request, *args, **kwargs):\n request = MovieRequestSerializer(data=request.data)\n\n if not request.is_valid():\n return Response(request.errors, status=HTTP_400_BAD_REQUEST)\n\n title = request.data['title']\n\n try:\n omdb_data = get_movie(title)\n except:\n return Response({'error': 'OMDB API error'}, status=HTTP_503_SERVICE_UNAVAILABLE)\n\n movie = Movie(\n title=omdb_data['Title'],\n year=omdb_data['Year'],\n genre=omdb_data['Genre'],\n country=omdb_data['Country'],\n plot=omdb_data['Plot']\n )\n\n if Movie.objects.filter(title=omdb_data['Title']).exists():\n return Response({'error': 'Movie already exists'}, status=HTTP_409_CONFLICT)\n\n try:\n movie.save()\n except:\n return Response({'error': ''}, status=HTTP_409_CONFLICT)\n\n return Response(omdb_data, status=HTTP_201_CREATED)\n\n\nclass CommentsView(ListModelMixin, GenericViewSet, CreateModelMixin):\n serializer_class = CommentSerializer\n\n def get_queryset(self):\n \"\"\"Optionally filter to `movie_id` passed in querystring.\"\"\"\n queryset = Comment.objects.all()\n movie_id = self.request.query_params.get('movie_id', None)\n if movie_id is not None:\n try:\n movie_id = int(movie_id)\n except ValueError:\n raise exceptions.ValidationError({\n 'movie_id': [\n 'Incorrect movie_id type. Expected int.',\n ],\n })\n movie = get_object_or_404(Movie, id=movie_id)\n queryset = queryset.filter(movie_id=movie)\n return queryset\n\n\nclass TopView(APIView):\n queryset = Movie.objects.all()\n serializer_class = MovieSerializer\n\n def get(self, request):\n start = str(request.data.get('start'))\n end = str(request.data.get('end'))\n\n if not start or not end:\n return Response({'msg': 'Missing params'}, status=HTTP_400_BAD_REQUEST)\n\n try:\n start = datetime.datetime.strptime(start, '%d-%m-%Y')\n end = datetime.datetime.strptime(end, '%d-%m-%Y')\n except:\n return Response({'msg': 'Invalid date format, expected DD-MM-YYYY'}, status=HTTP_400_BAD_REQUEST)\n\n movies_query = Movie.objects.annotate(\n comments=Coalesce(\n Sum(Case(\n When(comment__timestamp__range=[start, end], then=1),\n output_field=IntegerField()\n )),\n 0\n ),\n rank=Window(\n expression=DenseRank(),\n order_by=F('comments').desc()\n )\n ).order_by('-comments', 'id')\n\n movies = [{\n 'id': movie.id,\n 'comments': movie.comments,\n 'rank': movie.rank\n } for movie in movies_query\n ]\n\n return Response(movies)\n", "id": "3660770", "language": "Python", "matching_score": 3.8223607540130615, "max_stars_count": 0, "path": "src/api/views.py" }, { "content": "from rest_framework.serializers import ModelSerializer, PrimaryKeyRelatedField, Serializer, CharField\nfrom .models import Movie, Comment\n\n\nclass MovieSerializer(ModelSerializer):\n class Meta:\n model = Movie\n fields = ('id', 'title', 'year', 'genre', 'country', 'plot')\n read_only_fields = ('year', 'genre', 'country', 'plot')\n\n\nclass MovieRequestSerializer(Serializer):\n \"\"\"Checking if POST request is valid \"\"\"\n title = CharField(max_length=100, required=True)\n\n def create(self, data):\n return {'title': data.get('title')}\n\n def update(self, instance, data):\n instance['title'] = data.get('title', instance['title'])\n\n\nclass CommentSerializer(ModelSerializer):\n movie_id = PrimaryKeyRelatedField(queryset=Movie.objects.all())\n\n class Meta:\n model = Comment\n fields = ('id', 'movie_id', 'text')\n", "id": "1722621", "language": "Python", "matching_score": 2.147564649581909, "max_stars_count": 0, "path": "src/api/serializers.py" }, { "content": "from django.db.models import Model, CharField, FloatField, DateField\n\nclass ExchangeRate(Model):\n currency = CharField(max_length=10)\n rate = FloatField()\n date = DateField()\n ", "id": "1426014", "language": "Python", "matching_score": 2.8383395671844482, "max_stars_count": 0, "path": "src/api/models.py" }, { "content": "from django.db.models import Model, CharField, ForeignKey, TextField, DateTimeField, CASCADE\n\nclass Movie(Model):\n title = CharField(max_length=100)\n year = CharField(max_length=25)\n genre = CharField(max_length=50)\n country = CharField(max_length=50)\n plot = TextField()\n\n def __str__(self):\n return self.title\n\nclass Comment(Model):\n movie_id = ForeignKey(Movie, on_delete=CASCADE)\n text = TextField()\n timestamp = DateTimeField(auto_now_add=True)\n", "id": "4069140", "language": "Python", "matching_score": 0.19642673432826996, "max_stars_count": 0, "path": "src/api/models.py" }, { "content": "from django.test import TestCase\n\nfrom unittest.mock import Mock, patch\n\nfrom requests.exceptions import RequestException\n\nfrom api.services import get_movie\n\nclass OMDBTest(TestCase):\n\n def test_real_request(self):\n r = get_movie('django')\n\n self.assertEqual(r['Title'], 'Django')\n\n @patch('api.services.requests.get')\n def test_exception(self, mock_service):\n \"\"\" Test if OMDB error is thrown \"\"\"\n mock_service.side_effect = RequestException()\n with self.assertRaises(RequestException):\n get_movie('django')\n", "id": "7426565", "language": "Python", "matching_score": 2.3408236503601074, "max_stars_count": 0, "path": "src/api/tests/test_omdb_service.py" }, { "content": "from django.test import TestCase\n\nfrom mixer.backend.django import mixer\n\nclass TestModels(TestCase):\n\n def test_movie_model(self):\n movie = mixer.blend('api.Movie')\n self.assertGreaterEqual(movie.pk, 1)\n\n def test_comment_model(self):\n comment = mixer.blend('api.Comment')\n self.assertGreaterEqual(comment.pk, 1)\n\n def test_movie_string_representation(self):\n movie = mixer.blend('api.Movie')\n self.assertEqual(movie.title, str(movie))", "id": "10067116", "language": "Python", "matching_score": 2.0198850631713867, "max_stars_count": 0, "path": "src/api/tests/test_models.py" }, { "content": "from django.test import RequestFactory, TestCase\n\nfrom mixer.backend.django import mixer\nfrom api.views import MoviesView, CommentsView, TopView\n\n\nclass MoviesListTest(TestCase):\n\n def setUp(self):\n self.factory = RequestFactory()\n self.movie1 = mixer.blend('api.Movie')\n self.movie2 = mixer.blend('api.Movie')\n\n def test_movies(self):\n request = self.factory.get('/movies')\n response = MoviesView.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, 200)\n payload = response.data\n self.assertEqual(len(payload), 2)\n self.assertTrue(str(self.movie1) in str(payload))\n\nclass MoviesCreateTest(TestCase):\n \n def setUp(self):\n self.factory = RequestFactory()\n self.movie2 = mixer.blend('api.Movie', title=\"Existing\")\n self.title = 'mock'\n \n def test_add_movie(self):\n request = self.factory.post('/movies', data={'title': self.title})\n response = MoviesView.as_view({\"post\": \"create\"})(request)\n self.assertEqual(response.status_code, 201)\n payload = response.data\n self.assertTrue(self.title in str(payload).lower())\n \n def test_add_existing_movie(self):\n request = self.factory.post('/movies', data={'title': \"Existing\"})\n response = MoviesView.as_view({\"post\": \"create\"})(request)\n self.assertEqual(response.status_code, 409)\n payload = response.data\n self.assertEqual(len(payload), 1)\n\n def test_add_without_title(self):\n request = self.factory.post('/movies', data={'asd': self.title})\n response = MoviesView.as_view({\"post\": \"create\"})(request)\n self.assertEqual(response.status_code, 400)\n\n def test_create_existing_short_title(self):\n movie = mixer.blend('api.Movie', title=\"Game of Thrones\")\n request = self.factory.post('/movies', data={'title': \"Game\"})\n response = MoviesView.as_view({\"post\": \"create\"})(request)\n self.assertEqual(response.status_code, 409)\n\n \nclass MovieCreateAndGetTest(TestCase):\n\n def setUp(self):\n self.factory = RequestFactory()\n self.title = \"Test\"\n\n def test_add_and_read(self):\n request = self.factory.get('/movies')\n response = MoviesView.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, 200)\n payload = response.data\n self.assertEqual(len(payload), 0)\n\n request = self.factory.post('/movies', data={'title': self.title})\n response = MoviesView.as_view({\"post\": \"create\"})(request)\n self.assertEqual(response.status_code, 201)\n\n request = self.factory.get('/movies')\n response = MoviesView.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, 200)\n payload = response.data\n self.assertEqual(len(payload), 1)\n self.assertTrue(self.title in str(payload))\n\nclass CommentListTest(TestCase):\n\n def setUp(self):\n self.factory = RequestFactory()\n self.comment1 = mixer.blend('api.Comment')\n self.movie = mixer.blend('api.Movie')\n self.comment2 = mixer.blend('api.Comment', movie_id=self.movie)\n\n def test_all_comments(self):\n request = self.factory.get('/comments')\n response = CommentsView.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, 200)\n payload = response.data\n self.assertEqual(len(payload), 2)\n\n def test_filtered_comments(self):\n request = self.factory.get('/comments?movie_id=' + str(self.movie.id))\n response = CommentsView.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, 200)\n payload = response.data\n self.assertEqual(len(payload), 1)\n\nclass CommentCreateTest(TestCase):\n\n def setUp(self):\n self.factory = RequestFactory()\n self.movie1 = mixer.blend('api.Movie')\n\n def test_add_comment(self):\n request = self.factory.post('/comments', data={'movie_id': self.movie1.id, 'text': 'comment1'})\n response = CommentsView.as_view({\"post\": \"create\"})(request)\n self.assertEqual(response.status_code, 201)\n\n def test_add_comment_when_no_movie(self):\n request = self.factory.post('/comments', data={'movie_id': 111, 'text': 'comment1'})\n response = CommentsView.as_view({\"post\": \"create\"})(request)\n self.assertEqual(response.status_code, 400)\n \n def test_incorrect_id_type(self):\n request = self.factory.post('/comments', data={'movie_id': \"wrong\", 'text': 'comment1'})\n response = CommentsView.as_view({\"post\": \"create\"})(request)\n self.assertEqual(response.status_code, 400)\n\n def test_invalid_comment(self):\n request = self.factory.post('/comments', data={'movie_id': self.movie1, 'text': ''})\n response = CommentsView.as_view({\"post\": \"create\"})(request)\n self.assertEqual(response.status_code, 400)\n\n request = self.factory.post('/comments', data={'text': 'asd'})\n response = CommentsView.as_view({\"post\": \"create\"})(request)\n self.assertEqual(response.status_code, 400)\n\nclass TopListView(TestCase):\n\n def setUp(self):\n self.factory = RequestFactory()\n self.movie = mixer.blend('api.Movie')\n \n def test_missing_params(self):\n request = self.factory.get('/top', data={'start': \"\"})\n response = TopView.as_view()(request)\n self.assertEqual(response.status_code, 400)\n\n request = self.factory.get('/top', data={'end': \"\"})\n response = TopView.as_view()(request)\n self.assertEqual(response.status_code, 400)\n\n def test_invalid_format(self):\n request = self.factory.get('/top', data={'start': \"sdfdsf\", 'end': \"sdfdsfsdf\"})\n response = TopView.as_view()(request)\n self.assertEqual(response.status_code, 400)\n\n request = self.factory.get('/top', data={'start': \"06-06-2000\", 'end': \"2000-06-06\"})\n response = TopView.as_view()(request)\n self.assertEqual(response.status_code, 400)\n\n ## This test is crashing\n # def test_valid_request(self):\n # request = self.factory.get('/top', data={'start': \"06-12-1999\", 'end': \"01-01-2000\"})\n # response = TopView.as_view()(request)\n # print(response.data)\n # self.assertEqual(response.status_code, 200)", "id": "2232557", "language": "Python", "matching_score": 2.730973243713379, "max_stars_count": 0, "path": "src/api/tests/test_views.py" }, { "content": "from django.test import TestCase\nfrom .services import ECBScraper\n\nclass ScraperTest(TestCase):\n def setUp(self):\n self.s = ECBScraper()\n \n def test_reading_data(self):\n data = self.s.read_data('usd')\n print(data)\n self.assertTrue('USD' in str(data))\n", "id": "10952080", "language": "Python", "matching_score": 0.8891100287437439, "max_stars_count": 0, "path": "src/api/tests.py" } ]
2.083725
Dantali0n
[ { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom radloggerpy.hacking import checks\n\nfrom radloggerpy.tests import base\n\n\nclass TestHacking(base.TestCase):\n \"\"\"Hacking allows to define additional flake8 linting rules\n\n The hacking library is maintained by OpenStack and allows to define\n additional flake8 rules for code linting. These test cases assert that\n these additional flake8 rules work as intended.\n \"\"\"\n\n def setUp(self):\n super(TestHacking, self).setUp()\n\n def test_no_redundant_import_alias_offending(self):\n \"\"\"Settings an alias to the name of the import is not allowed\"\"\"\n\n offending_line = \"from X import Y as Y\"\n generator = checks.no_redundant_import_alias(offending_line)\n\n self.assertEqual(0, next(generator)[0])\n\n def test_no_redundant_import_alias_allowed(self):\n \"\"\"Any alias name that is not the exact of the import is allowed\"\"\"\n\n allowed_line = \"from X import Y as Z\"\n generator = checks.no_redundant_import_alias(allowed_line)\n\n self.assertRaises(StopIteration, next, generator)\n\n def test_check_builtins_gettext_offending(self):\n \"\"\"Not allowed as it uses native gettext function\"\"\"\n\n filename = \"radloggerpy/radlogger.py\"\n lines = [\n \"from X import Y as Y\",\n \"def function():\",\n \" print(_('builtin gettext'))\"\n ]\n\n generator = checks.check_builtins_gettext(\n \"logical_line\", [' _'], filename, lines, False)\n\n self.assertEqual(0, next(generator)[0])\n\n def test_check_builtins_gettext_allowed(self):\n \"\"\"Allowed as _i18n import ensure native gettext is overridden\"\"\"\n\n filename = \"radloggerpy/radlogger.py\"\n lines = [\n \"from radloggerpy._i18n import _\",\n \"def function():\",\n \" print(_('builtin gettext'))\"\n ]\n\n generator = checks.check_builtins_gettext(\n \"logical_line\", [' _'], filename, lines, False)\n\n self.assertRaises(StopIteration, next, generator)\n\n def test_check_oslo_i18n_wrapper_offending(self):\n \"\"\"Not allowed as i18n import should lead with _\"\"\"\n\n logical_line = \"from radloggerpy.i18n import _\"\n filename = \"radloggerpy/foo/bar.py\"\n\n generator = checks.check_oslo_i18n_wrapper(\n logical_line, filename, False)\n\n self.assertEqual(0, next(generator)[0])\n\n def test_check_oslo_i18n_wrapper_allowed(self):\n \"\"\"Allowed as i18n imort has leading _\"\"\"\n\n logical_line = \"from radloggerpy._i18n import _\"\n filename = \"radloggerpy/foo/bar.py\"\n\n generator = checks.check_oslo_i18n_wrapper(\n logical_line, filename, False)\n\n self.assertRaises(StopIteration, next, generator)\n\n def test_check_log_warn_deprecated_offending(self):\n logical_line = \"LOG.warn('example')\"\n filename = \"radloggerpy/foo/bar.py\"\n\n generator = checks.check_log_warn_deprecated(\n logical_line, filename)\n\n self.assertEqual(0, next(generator)[0])\n\n def test_check_log_warn_deprecated_allowed(self):\n logical_line = \"LOG.warning('example')\"\n filename = \"radloggerpy/foo/bar.py\"\n\n generator = checks.check_log_warn_deprecated(\n logical_line, filename)\n\n self.assertRaises(StopIteration, next, generator)\n\n def test_check_assert_is_instance_offending(self):\n logical_line = \"self.assertTrue(isinstance(observed, Type))\"\n filename = \"radloggerpy/tests/bar.py\"\n\n generator = checks.check_assert_is_instance(\n logical_line, filename)\n\n self.assertEqual(0, next(generator)[0])\n\n def test_check_assert_is_instance_allowed(self):\n logical_line = \"assertIsInstance(observed, type)\"\n filename = \"radloggerpy/tests/bar.py\"\n\n generator = checks.check_assert_is_instance(\n logical_line, filename)\n\n self.assertRaises(StopIteration, next, generator)\n\n def test_check_assert_empty_offending(self):\n logical_line = \"self.assertEqual(measured, [])\"\n filename = \"radloggerpy/tests/bar.py\"\n\n generator = checks.check_assert_empty(\n logical_line, filename)\n\n self.assertEqual(0, next(generator)[0])\n\n def test_check_assert_empty_allowed(self):\n logical_line = \"self.assertEqual([], measured)\"\n filename = \"radloggerpy/tests/bar.py\"\n\n generator = checks.check_assert_empty(\n logical_line, filename)\n\n self.assertRaises(StopIteration, next, generator)\n\n def test_check_assert_false_offending(self):\n logical_line = \"assertEqual(False, observed)\"\n filename = \"radloggerpy/tests/bar.py\"\n\n generator = checks.check_assert_false(\n logical_line, filename)\n\n self.assertEqual(0, next(generator)[0])\n\n def test_check_assert_false_allowed(self):\n logical_line = \"assertFalse(observed)\"\n filename = \"radloggerpy/tests/bar.py\"\n\n generator = checks.check_assert_false(\n logical_line, filename)\n\n self.assertRaises(StopIteration, next, generator)\n\n def test_check_assert_true_offending(self):\n logical_line = \"assertEqual(True, observed)\"\n filename = \"radloggerpy/tests/bar.py\"\n\n generator = checks.check_assert_true(\n logical_line, filename)\n\n self.assertEqual(0, next(generator)[0])\n\n def test_check_assert_true_allowed(self):\n logical_line = \"assertTrue(observed)\"\n filename = \"radloggerpy/tests/bar.py\"\n\n generator = checks.check_assert_false(\n logical_line, filename)\n\n self.assertRaises(StopIteration, next, generator)\n\n def test_check_python3_no_iteritems_offending(self):\n logical_line = \"input.iteritems()\"\n\n generator = checks.check_python3_no_iteritems(logical_line)\n\n self.assertEqual(0, next(generator)[0])\n\n def test_check_python3_no_iteritems_allowed(self):\n logical_line = \"six.iteritems(input)\"\n\n generator = checks.check_python3_no_iteritems(logical_line)\n\n self.assertRaises(StopIteration, next, generator)\n\n def test_check_no_basestring_offending(self):\n logical_line = \"self.assertIsInstance(basestring, object)\"\n\n generator = checks.check_no_basestring(logical_line)\n\n self.assertEqual(0, next(generator)[0])\n\n def test_check_no_basestring_allowed(self):\n logical_line = \"self.assertIsInstance(six.string_types, object)\"\n\n generator = checks.check_no_basestring(logical_line)\n\n self.assertRaises(StopIteration, next, generator)\n\n def test_check_python3_xrange_offending(self):\n logical_line = \"xrange(1, 2, 3)\"\n\n generator = checks.check_python3_xrange(logical_line)\n\n self.assertEqual(0, next(generator)[0])\n\n def test_check_python3_xrange_allowed(self):\n logical_line = \"range.range(1, 2)\"\n\n generator = checks.check_python3_xrange(logical_line)\n\n self.assertRaises(StopIteration, next, generator)\n\n def test_check_assert_called_once_with_offending(self):\n logical_line = \"m_mocked.asserthascalled(1, 2)\"\n filename = \"radloggerpy/tests/bar.py\"\n\n generator = checks.check_assert_called_once_with(\n logical_line, filename)\n\n self.assertEqual(0, next(generator)[0])\n\n def test_check_assert_called_once_with_offending_two(self):\n logical_line = \"m_mocked.calledoncewith(1, 2)\"\n filename = \"radloggerpy/tests/bar.py\"\n\n generator = checks.check_assert_called_once_with(\n logical_line, filename)\n\n self.assertEqual(0, next(generator)[0])\n\n def test_check_assert_called_once_with_allowed(self):\n logical_line = \"m_mocked.assert_called_once_with(1, 2)\"\n filename = \"radloggerpy/tests/bar.py\"\n\n generator = checks.check_assert_called_once_with(\n logical_line, filename)\n\n self.assertRaises(StopIteration, next, generator)\n\n def test_no_translate_debug_logs_offending(self):\n logical_line = \"LOG.debug(_('Shikato ga nai~'))\"\n filename = \"radloggerpy/tests/bar.py\"\n\n generator = checks.no_translate_debug_logs(\n logical_line, filename)\n\n self.assertEqual(0, next(generator)[0])\n\n def test_no_translate_debug_logs_allowed(self):\n logical_line = \"LOG.warning(_('Shikato ga nai~'))\"\n filename = \"radloggerpy/tests/bar.py\"\n\n generator = checks.no_translate_debug_logs(\n logical_line, filename)\n\n self.assertRaises(StopIteration, next, generator)\n", "id": "110560", "language": "Python", "matching_score": 3.4334919452667236, "max_stars_count": 0, "path": "radloggerpy/tests/hacking/test_hacking.py" }, { "content": "# Copyright (c) 2014 OpenStack Foundation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport re\n\nfrom hacking import core\n\n# Guidelines for writing new hacking checks\n#\n# - Use only for Watcher specific tests. OpenStack general tests\n# should be submitted to the common 'hacking' module.\n# - Pick numbers in the range N3xx. Find the current test with\n# the highest allocated number and then pick the next value.\n# - Keep the test method code in the source file ordered based\n# on the N3xx value.\n# - List the new rule in the top level HACKING.rst file\n\n_all_log_levels = {\n 'reserved': '_', # this should never be used with a log unless\n # it is a variable used for a log message and\n # a exception\n 'error': '_LE',\n 'info': '_LI',\n 'warning': '_LW',\n 'critical': '_LC',\n 'exception': '_LE',\n}\n_all_hints = set(_all_log_levels.values())\n\n\nlog_warn = re.compile(r\"(.)*LOG\\.(warn)\\(\\s*('|\\\"|_)\")\nre_redundant_import_alias = re.compile(r\".*import (.+) as \\1$\")\n\n\[email protected]\ndef no_translate_debug_logs(logical_line, filename):\n \"\"\"Check for 'LOG.debug(_(' and 'LOG.debug(_Lx('\n\n * This check assumes that 'LOG' is a logger.\n \"\"\"\n for hint in _all_hints:\n if logical_line.startswith(\"LOG.debug(%s(\" % hint):\n yield(0, \"N319 Don't translate debug level logs\")\n\n\[email protected]\ndef check_assert_called_once_with(logical_line, filename):\n # Try to detect nonexistent mock methods like:\n # assertCalledOnceWith\n # assert_has_called\n # called_once_with\n if 'radloggerpy/tests/' in filename:\n if '.assert_called_once_with(' in logical_line:\n return\n uncased_line = logical_line.lower().replace('_', '')\n\n check_calls = ['.calledoncewith']\n if any(x for x in check_calls if x in uncased_line):\n msg = (\"N322: Possible use of no-op mock method. \"\n \"please use assert_called_once_with.\")\n yield (0, msg)\n\n if '.asserthascalled' in uncased_line:\n msg = (\"N322: Possible use of no-op mock method. \"\n \"please use assert_has_calls.\")\n yield (0, msg)\n\n\[email protected]\ndef check_python3_xrange(logical_line):\n if re.search(r\"\\bxrange\\s*\\(\", logical_line):\n yield(0, \"N325: Do not use xrange. Use range, or six.moves.range for \"\n \"large loops.\")\n\n\[email protected]\ndef check_no_basestring(logical_line):\n if re.search(r\"\\bbasestring\\b\", logical_line):\n msg = (\"N326: basestring is not Python3-compatible, use \"\n \"six.string_types instead.\")\n yield(0, msg)\n\n\[email protected]\ndef check_python3_no_iteritems(logical_line):\n if re.search(r\".*\\.iteritems\\(\\)\", logical_line):\n msg = (\"N327: Use six.iteritems() instead of dict.iteritems().\")\n yield(0, msg)\n\n\[email protected]\ndef check_assert_true(logical_line, filename):\n if 'radloggerpy/tests/' in filename:\n if re.search(r\"assertEqual\\(\\s*True,[^,]*(,[^,]*)?\\)\", logical_line):\n msg = (\"N328: Use assertTrue(observed) instead of \"\n \"assertEqual(True, observed)\")\n yield (0, msg)\n\n\[email protected]\ndef check_assert_false(logical_line, filename):\n if 'radloggerpy/tests/' in filename:\n if re.search(r\"assertEqual\\(\\s*False,[^,]*(,[^,]*)?\\)\", logical_line):\n msg = (\"N328: Use assertFalse(observed) instead of \"\n \"assertEqual(False, observed)\")\n yield (0, msg)\n\n\[email protected]\ndef check_assert_empty(logical_line, filename):\n if 'radloggerpy/tests/' in filename:\n msg = (\"N330: Use assertEqual(*empty*, observed) instead of \"\n \"assertEqual(observed, *empty*). *empty* contains \"\n \"{}, [], (), set(), '', \\\"\\\"\")\n empties = r\"(\\[\\s*\\]|\\{\\s*\\}|\\(\\s*\\)|set\\(\\s*\\)|'\\s*'|\\\"\\s*\\\")\"\n reg = r\"assertEqual\\(([^,]*,\\s*)+?%s\\)\\s*$\" % empties\n if re.search(reg, logical_line):\n yield (0, msg)\n\n\[email protected]\ndef check_assert_is_instance(logical_line, filename):\n if 'radloggerpy/tests/' in filename:\n if re.search(r\"assertTrue\\(\\s*isinstance\\(\\s*[^,]*,\\s*[^,]*\\)\\)\",\n logical_line):\n msg = (\"N331: Use assertIsInstance(observed, type) instead \"\n \"of assertTrue(isinstance(observed, type))\")\n yield (0, msg)\n\n\[email protected]\ndef check_log_warn_deprecated(logical_line, filename):\n \"\"\"LOG.warn is deprecated but still possible\n\n N333(watcher/foo.py): LOG.warn(\"example\")\n Okay(watcher/foo.py): LOG.warning(\"example\")\n \"\"\"\n\n msg = \"N333: Use LOG.warning due to compatibility with py3\"\n if log_warn.match(logical_line):\n yield (0, msg)\n\n\[email protected]\ndef check_oslo_i18n_wrapper(logical_line, filename, noqa):\n \"\"\"Check for radloggerpy.i18n usage.\n\n N340(radloggerpy/foo/bar.py): from radloggerpy.i18n import _\n Okay(radloggerpy/foo/bar.py): from radloggerpy._i18n import _\n Okay(radloggerpy/foo/bar.py): from radloggerpy.i18n import _ # noqa\n \"\"\"\n\n if noqa:\n return\n\n split_line = logical_line.split()\n modulename = os.path.normpath(filename).split('/')[0]\n bad_i18n_module = '%s.i18n' % modulename\n\n if len(split_line) > 1 and split_line[0] in ('import', 'from'):\n if (split_line[1] == bad_i18n_module or\n modulename !=\n 'radloggerpy' and split_line[1]\n in ('radloggerpy.i18n', 'radloggerpy._i18n')):\n msg = (\"N340: %(found)s is found. Use %(module)s._i18n instead.\"\n % {'found': split_line[1], 'module': modulename})\n yield (0, msg)\n\n\[email protected]\ndef check_builtins_gettext(logical_line, tokens, filename, lines, noqa):\n \"\"\"Check usage of builtins gettext _().\n\n N341(radloggerpy/foo.py): _('foo')\n Okay(radloggerpy/i18n.py): _('foo')\n Okay(radloggerpy/_i18n.py): _('foo')\n Okay(radloggerpy/foo.py): _('foo') # noqa\n \"\"\"\n\n if noqa:\n return\n\n modulename = os.path.normpath(filename).split('/')[0]\n\n if '%s/tests' % modulename in filename:\n return\n\n if os.path.basename(filename) in ('i18n.py', '_i18n.py'):\n return\n\n token_values = [t[1] for t in tokens]\n i18n_wrapper = '%s._i18n' % modulename\n\n if '_' in token_values:\n i18n_import_line_found = False\n for line in lines:\n split_line = [elm.rstrip(',') for elm in line.split()]\n if (len(split_line) > 1 and split_line[0] == 'from' and\n split_line[1] == i18n_wrapper and\n '_' in split_line):\n i18n_import_line_found = True\n break\n if not i18n_import_line_found:\n msg = (\"N341: _ from python builtins module is used. \"\n \"Use _ from %s instead.\" % i18n_wrapper)\n yield (0, msg)\n\n\[email protected]\ndef no_redundant_import_alias(logical_line):\n \"\"\"Checking no redundant import alias.\n\n N342(radloggerpy/foo.py): from X import Y as Y\n Okay(radloggerpy/foo.py): from X import Y as Z\n \"\"\"\n\n if re.match(re_redundant_import_alias, logical_line):\n yield(0, \"N342: No redundant import alias.\")\n", "id": "961205", "language": "Python", "matching_score": 1.6207548379898071, "max_stars_count": 0, "path": "radloggerpy/hacking/checks.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport gettext\nimport os\nfrom unittest import mock\n\nimport locale\nimport oslo_i18n\nfrom oslo_i18n._gettextutils import _BABEL_ALIASES\nfrom oslo_i18n import _locale\n\nimport radloggerpy\nfrom radloggerpy import _i18n\nfrom radloggerpy._i18n import DOMAIN\n\nfrom radloggerpy.tests import base\n\n\nclass Testi18n(base.TestCase):\n\n def setUp(self):\n super().setUp()\n\n def test_domain(self):\n self.assertEqual(_i18n.DOMAIN, _i18n._translators.domain)\n\n @mock.patch.object(oslo_i18n._gettextutils, '_AVAILABLE_LANGUAGES')\n @mock.patch.object(os, 'environ')\n def test_translate_nl(self, m_environ, m_languages_get):\n m_languages_get.return_value = None\n m_environ.get.side_effect = [\n 'nl', 'nl', 'nl', 'nl', 'radloggerpy/locale'\n ]\n\n m_translated_nl = _i18n._(\"RadLoggerPy opstarten met PID %s\")\n m_untranslated = _i18n._(\"Starting RadLoggerPy service on PID %s\")\n\n self.assertEqual(m_translated_nl,\n _i18n.translate(m_untranslated, 'nl_NL'))\n\n def test_get_available_languages(self):\n m_languages = ['en_US']\n\n self.assertEqual(m_languages, _i18n.get_available_languages())\n\n @mock.patch.object(oslo_i18n._gettextutils, '_AVAILABLE_LANGUAGES')\n @mock.patch.object(os, 'environ')\n def test_get_available_languages_real(self, m_environ, m_languages_get):\n \"\"\"Ensure all languages are registered if the localedir is set\n\n :param m_environ: patch os.environ.get to fake RADLOGGERPY_LOCALEDIR\n :param m_languages_get: reset _factory _AVAILALE_LANGUAGES variable\n :return:\n \"\"\"\n m_languages_get.return_value = None\n m_environ.get.return_value = 'radloggerpy/locale'\n\n m_languages = ['en_US']\n locale_identifiers = set(locale.windows_locale.values())\n localedir = os.environ.get(\n _locale.get_locale_dir_variable_name(DOMAIN))\n\n m_locale = radloggerpy.__path__[0] + '/locale'\n m_locale_dirs = [o for o in os.listdir(m_locale)\n if os.path.isdir(os.path.join(m_locale, o))]\n\n for m_locale_dir in m_locale_dirs:\n m_languages.extend(language for language in locale_identifiers\n if m_locale_dir in language)\n\n m_languages.extend(\n alias for alias, _ in _BABEL_ALIASES.items() if gettext.find(\n DOMAIN, localedir=localedir, languages=[alias])\n )\n\n self.assertItemsEqual(m_languages, _i18n.get_available_languages())\n", "id": "6040643", "language": "Python", "matching_score": 0.6527732610702515, "max_stars_count": 0, "path": "radloggerpy/tests/test_i18n.py" }, { "content": "#!/usr/bin/env python3\n#\n# Copyright (c) 2015-2017 Valve Corporation\n# Copyright (c) 2015-2017 LunarG, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: <NAME> <<EMAIL>>\n\nimport os\nimport subprocess\nimport sys\nfrom glob import glob\n\nif __name__ == '__main__':\n if (len(sys.argv) != 3):\n print(\"Usage: %s <VLLF_DIR> <VULKAN_HEADERS_INCLUDE_DIR\" % sys.argv[0])\n sys.exit(os.EX_USAGE)\n\n # Write commit ID to output header file\n with open(\"Android.mk\", \"w\") as header_file:\n\n # File Comment\n file_comment = '# *** THIS FILE IS GENERATED - DO NOT EDIT ***\\n'\n file_comment += '# See vlf_makefile_generator.py for modifications\\n'\n\n header_file.write(file_comment)\n # Copyright Notice\n copyright = '\\n'\n copyright += '# Copyright (c) 2015-2017 The Khronos Group Inc.\\n'\n copyright += '# Copyright (c) 2015-2017 Valve Corporation\\n'\n copyright += '# Copyright (c) 2015-2017 LunarG, Inc.\\n'\n copyright += '# Copyright (c) 2015-2017 Google Inc.\\n'\n copyright += '#\\n'\n copyright += '# Licensed under the Apache License, Version 2.0 (the \"License\");\\n'\n copyright += '# you may not use this file except in compliance with the License.\\n'\n copyright += '# You may obtain a copy of the License at\\n'\n copyright += '#\\n'\n copyright += '# http://www.apache.org/licenses/LICENSE-2.0\\n'\n copyright += '#\\n'\n copyright += '# Unless required by applicable law or agreed to in writing, software\\n'\n copyright += '# distributed under the License is distributed on an \"AS IS\" BASIS,\\n'\n copyright += '# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n'\n copyright += '# See the License for the specific language governing permissions and\\n'\n copyright += '# limitations under the License.\\n'\n copyright += '#\\n'\n copyright += '# Author: <NAME> <<EMAIL>>\\n'\n copyright += '#\\n\\n'\n header_file.write(copyright)\n\n # Destination directory is first (and only) arg\n vlf_dir = sys.argv[1]\n include_dir = sys.argv[2]\n include_dir = os.path.normpath(include_dir)\n\n # Get list of subdirectories in layer_factory (dest) dir\n layer_factory_path = \"%s/*/\" % vlf_dir\n layer_factory_dirs = glob(layer_factory_path)\n\n contents = ''\n\n # Output makefile target section for each factory layer in dest sub-directory\n for layer_factory_path in layer_factory_dirs:\n factory_layer = os.path.basename(os.path.normpath(layer_factory_path))\n contents += '\\n'\n contents += 'include $(CLEAR_VARS)\\n'\n contents += 'LOCAL_MODULE := VkLayer_%s\\n' % factory_layer\n contents += 'LOCAL_SRC_FILES += $(LAYER_DIR)/include/layer_factory.cpp\\n'\n contents += 'LOCAL_SRC_FILES += $(LVL_DIR)/layers/xxhash.c\\n'\n # Add *.cpp files (if any) to makefile dependencies\n for path, subdirs, files in os.walk(factory_layer):\n for file in files:\n if '.cpp' in file:\n contents += 'LOCAL_SRC_FILES += $(LOCAL_PATH)/$(SRC_DIR)/layer_factory/%s/%s\\n' & (factory_layer, file)\n contents += 'LOCAL_C_INCLUDES += %s\\n' % include_dir\n contents += 'LOCAL_C_INCLUDES += $(LOCAL_PATH)/$(LAYER_DIR)/include\\n'\n contents += 'LOCAL_C_INCLUDES += $(LOCAL_PATH)/$(LVL_DIR)/layers\\n'\n contents += 'LOCAL_C_INCLUDES += $(LOCAL_PATH)/$(LVL_DIR)/layers/generated\\n'\n contents += 'LOCAL_C_INCLUDES += $(LOCAL_PATH)/$(SRC_DIR)/layer_factory/%s\\n' % factory_layer\n contents += 'LOCAL_C_INCLUDES += $(LOCAL_PATH)/$(LVL_DIR)/loader\\n'\n contents += 'LOCAL_STATIC_LIBRARIES += layer_utils\\n'\n contents += 'LOCAL_CPPFLAGS += -std=c++11 -DVK_PROTOTYPES -Wall -Werror -Wno-unused-function -Wno-unused-const-variable\\n'\n contents += 'LOCAL_CPPFLAGS += -DVK_USE_PLATFORM_ANDROID_KHR -DVK_ENABLE_BETA_EXTENSIONS -fvisibility=hidden\\n'\n contents += 'LOCAL_LDLIBS := -llog\\n'\n contents += 'LOCAL_LDFLAGS += -Wl,-Bsymbolic\\n'\n contents += 'LOCAL_LDFLAGS += -Wl,--exclude-libs,ALL\\n'\n contents += 'include $(BUILD_SHARED_LIBRARY)\\n'\n\n\n header_file.write(contents)\n\n", "id": "5572068", "language": "Python", "matching_score": 2.238971471786499, "max_stars_count": 579, "path": "scripts/vlf_makefile_generator.py" }, { "content": "#!/usr/bin/env python3\n# Copyright (c) 2020-2021 LunarG, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: <NAME> <<EMAIL>>\n\"\"\"\nSome objects and functions to run tests for the VK_LAYER_LUNARG_device_simulation layer.\nThis script will be modified in the future when LunarG/VulkanTools is updated to use the\nCI test harness written by <NAME>. It is intended to be usable by VulkanTools\ndevelopers in their own repository clones, as well as in external CI using GitHub Actions\nand internal CI using Jenkins.\n\nOnly desktop Vulkan is currently supported; considerable redesign would be\nrequired to be able to do these exercises on Android.\n\"\"\"\n\nimport argparse\nimport json\nimport logging\nimport os\nimport subprocess\nimport sys\n\nlog = logging.getLogger(__name__)\n\ndef standalone_run(command_list, env=None):\n \"\"\"This utility is used for external execution of binaries.\n It is encapsulated here and passed as a parameter to the\n higher-level execution functions because a caller (e.g.\n internal CI) may prefer to use their own execution function.\n This function always returns the generated stdout and stderr\n of the executed process so the caller can examine them.\"\"\"\n p = subprocess.run(command_list, check=True, capture_output=True, env=env)\n return p.stdout, p.stderr\n\n# Our tests will have vulkaninfo output with some expected values and\n# a lot of values that we don't care about (and that are sensitive to the test\n# environment, like the SDK version or the graphics driver version), e.g.\n# a list of all extensions... When we compare our generated output with\n# expected output, we really just want to make sure the values we need\n# are present, and ignore everything else.\n#\n# We could implement this with a custom dict class that alters the\n# comparison operator. But for now we're implementing our own recursive\n# dictionary comparisons and list comparisons (because we likely have lists\n# of dictionaries).\n\ndef compare_lists(list_1, list_2, debug_indent=\"\"):\n \"\"\"\n A custom method for comparing lists.\n \"\"\"\n log.debug('%sComparing lists...', debug_indent)\n\n if len(list_1) != len(list_2):\n log.debug(\"%sLists unequal lengths! %d vs %d\", debug_indent, len(list_1), len(list_2))\n return False\n for value_1, value_2 in zip(list_1, list_2):\n if not check_types_and_compare_values(value_1,\n value_2,\n debug_indent + \"\\t\"):\n return False\n\n return True\n\n\ndef is_dict_subset(dict_1, dict_2, debug_indent=\"\"):\n \"\"\"\n A custom method for comparing dictionaries.\n Dictionaries do not have to be completely equal.\n dict_1 simply needs to be a subset contained within dict_2.\n \"\"\"\n log.debug(\"%sComparing dictionaries...\", debug_indent)\n\n for k in dict_1:\n try:\n if not check_types_and_compare_values(dict_1[k],\n dict_2[k],\n debug_indent + \"\\t\"):\n return False\n except KeyError:\n return False\n\n return True\n\n\ndef check_types_and_compare_values(struct_1, struct_2, debug_indent=\"\"):\n \"\"\"\n Does some error checking before passing the structs onto appropriate compare functions.\n \"\"\"\n\n if isinstance(struct_1, list) and isinstance(struct_2, list):\n return compare_lists(struct_1, struct_2, debug_indent)\n\n if isinstance(struct_1, dict) and isinstance(struct_2, dict):\n return is_dict_subset(struct_1, struct_2, debug_indent)\n\n if struct_1 != struct_2:\n log.debug(\"%sPrimitive comparison of %s and %s failed!\", debug_indent, struct_1, struct_2)\n return False\n\n return True\n\nclass DevsimLayerTestException(Exception):\n \"\"\"The exception class for this module.\"\"\"\n\ndef checkPortabilityPresent(json_object):\n \"\"\"The list of supported test cases differs when a device supports\n native portability, or whether emulated portability must be used.\n This function returns True if the passed json_object (which is\n a JSON-parsed vulkaninfo output) shows that native portability\n is present.\"\"\"\n extensions = json_object.get(\"ArrayOfVkExtensionProperties\")\n return extensions is not None and \"VK_KHR_portability_subset\" in [x[\"extensionName\"] for x in extensions]\n\n\n# All test cases are subclasses of BaseTest.\nclass BaseTest:\n \"\"\"\n A class that implements and defines some base functions and a base initializer for devsim tests.\n The devsim_input_files are a list of filenames; if they're relative, they're relative to\n the same directory where this script resides. Absolute paths may also be provided.\n The expected_vulkaninfo_output can be a dictionary containing subset vulkaninfo\n output to compare against, or can specify a relative or absolute filename that contains\n a more extensive set of vulkaninfo output.\n\n This test requires that a valid VK_LAYER_PATH and (for Linux) LD_LIBRARY_PATH\n are already set up in the enviroment.\n \"\"\"\n\n # A subclass should override this with the proper list of input\n # files for that particular class.\n devsim_input_files = []\n\n # The output may either be the name of a file in the same\n # directory as this script, or an inline object that specifies\n # a subset of the JSON object that vulkaninfo outputs.\n vulkaninfo_output = None\n\n # A test case should override this if it needs to call vulkaninfo\n # with something other than the default arguments.\n vulkaninfo_args = ['--json']\n\n # These class variables are used to set up the test environment.\n # Some are not yet exercised, but are included for completeness\n # and eventual expansion.\n instance_layers = \"VK_LAYER_LUNARG_device_simulation\"\n debug_enable = 0\n exit_on_error = 0\n portability = 0\n extension_list = 0\n memory_flags = 0\n\n def __init__(self,\n vulkaninfo_path,\n run=standalone_run):\n\n self.vulkaninfo_path = vulkaninfo_path\n self.run = run\n\n # Create the list of absolute paths we'll need for VK_DEVSIM_FILENAME.\n devsim_absolute_files = []\n for devsim_file in self.devsim_input_files:\n full_path = os.path.abspath(devsim_file)\n devsim_absolute_files.append(full_path)\n if not os.path.isfile(full_path):\n log.debug('devsim input file %s (normalized %s) does not exist',\n devsim_file, full_path)\n log.debug(\"devsim absolute files: %s\", devsim_absolute_files)\n\n # This dictionary of environment values will be added to the run environment\n # for this test case.\n self.test_environment = {\n \"VK_INSTANCE_LAYERS\": str(self.instance_layers),\n \"VK_DEVSIM_DEBUG_ENABLE\": str(self.debug_enable),\n \"VK_DEVSIM_EXIT_ON_ERROR\": str(self.exit_on_error),\n \"VK_DEVSIM_FILENAME\": os.pathsep.join(devsim_absolute_files),\n \"VK_DEVSIM_EMULATE_PORTABILITY_SUBSET_EXTENSION\": str(self.portability),\n \"VK_DEVSIM_MODIFY_EXTENSION_LIST\": str(self.extension_list),\n \"VK_DEVSIM_MODIFY_MEMORY_FLAGS\": str(self.memory_flags),\n }\n\n # Get the expected subset dictionary output for use later.\n if isinstance(self.vulkaninfo_output, dict):\n self.expected_vulkaninfo_output = self.vulkaninfo_output\n else:\n output_file = os.path.abspath(self.vulkaninfo_output)\n if not os.path.isfile(output_file):\n raise DevsimLayerTestException(\"expected output file does not exist: {}\".format(output_file))\n\n with open(output_file) as f:\n self.expected_vulkaninfo_output = json.load(f)\n\n # We need to be able to pass json_object for subclasses, even\n # though it is not used in the base class.\n # pylint: disable=unused-argument,no-self-use\n def checkJson(self, json_object):\n \"\"\"Tests that need to also directly examine the vulkaninfo output\n will override this function to do their additional checks. By\n default this function doesn't do any additional checking, making\n it appropriate for most of the tests.\"\"\"\n return True\n # pylint: enable=unused-argument,no-self-use\n\n def check(self):\n \"\"\"\n Base checker function that returns whether the test passed or failed.\n Compares output of the \"run\" function to an \"answer\" file.\n If the run output contains the values in the answer file, the test is considered passed.\n \"\"\"\n command_list = [self.vulkaninfo_path] + self.vulkaninfo_args\n\n # Prepare the environment needed for running this instance.\n # We want to make sure that we don't modify the environment\n # for any of the other test instances.\n env = os.environ.copy()\n env.update(self.test_environment)\n\n stdout, stderr = self.run(command_list, env=env)\n log.debug(\"stderr from %s: %s\", command_list, stderr)\n actual_vulkaninfo_output = json.loads(stdout)\n return (\n check_types_and_compare_values(self.expected_vulkaninfo_output,\n actual_vulkaninfo_output) and\n self.checkJson(actual_vulkaninfo_output)\n )\n\nclass DevsimTestReadMultipleInputFiles(BaseTest):\n \"\"\"\n Tests devsim layer's ability to read in multiple input files and\n properly modify device queries.\n \"\"\"\n devsim_input_files = [\n \"devsim_test2_in1.json\",\n \"devsim_test2_in2.json\",\n \"devsim_test2_in4.json\",\n \"devsim_test2_in5.json\"\n ]\n vulkaninfo_output = \"devsim_test2_gold.json\"\n\nclass PortabilityExtensionPresentEmulationOffTest(BaseTest):\n \"\"\"\n Tests if the VK_KHR_portability_subset extension is available on the device without emulation.\n \"\"\"\n devsim_input_files = [\n \"devsim_dummy_in.json\"\n ]\n vulkaninfo_output = {}\n\n def checkJson(self, json_object):\n return not checkPortabilityPresent(json_object)\n\nclass PortabilityExtensionPresentEmulationOnTest(BaseTest):\n \"\"\"\n Tests if the VK_KHR_portability_subset extension is available on the device with\n devsim emulation.\n \"\"\"\n devsim_input_files = [\n \"devsim_dummy_in.json\"\n ]\n vulkaninfo_output = {}\n portability = 1\n\n def checkJson(self, json_object):\n return checkPortabilityPresent(json_object)\n\nclass PortabilityNonEmulatedTest(BaseTest):\n \"\"\"\n Tests if devsim can modify portability queries for the device when\n the VK_KHR_portability_subset extension is supported by the device.\n \"\"\"\n devsim_input_files = [\n \"portability_test.json\"\n ]\n vulkaninfo_output = \"portability_test_gold.json\"\n vulkaninfo_args = ['--portability']\n\nclass PortabilityEmulatedTest(BaseTest):\n \"\"\"\n Tests if devsim can modify protablity queries for the device when\n the VK_KHR_portability_subset extension is emulated by devsim.\n \"\"\"\n devsim_input_files = [\n \"portability_test.json\"\n ]\n vulkaninfo_output = \"portability_test_gold.json\"\n vulkaninfo_args = ['--portability']\n portability = 1\n\n\ndef RunTests(vulkaninfo_path, run=standalone_run):\n \"\"\"Run all appropriate test cases for the current configuration.\n The caller can pass in their own custom run() function if\n desired.\"\"\"\n\n # We always run these devsim tests.\n test_cases = [\n DevsimTestReadMultipleInputFiles(vulkaninfo_path, run=run),\n ]\n\n # First check to see whether portability is present on the current\n # device. The list of test cases will be different.\n stdout, stderr = run([vulkaninfo_path, '--json'])\n log.debug(\"checking if portability is present: stderr=%s\", stderr)\n if checkPortabilityPresent(json.loads(stdout)):\n log.info(\"Testing devsim and native portability\")\n test_cases.extend([\n PortabilityNonEmulatedTest(vulkaninfo_path, run=run),\n # Emulation should not override if it detects that native\n # portability is present, so the Emulated Test should also pass.\n PortabilityEmulatedTest(vulkaninfo_path, run=run),\n ])\n else:\n log.info(\"Testing devsim and emulated portability\")\n test_cases.extend([\n PortabilityExtensionPresentEmulationOffTest(vulkaninfo_path, run=run),\n PortabilityExtensionPresentEmulationOnTest(vulkaninfo_path, run=run),\n PortabilityEmulatedTest(vulkaninfo_path, run=run),\n ])\n\n # Run all the established test cases.\n pass_count = 0\n fail_count = 0\n for test_case in test_cases:\n test_case_name = test_case.__class__.__name__\n log.info(\"RUNNING TEST %s\", test_case_name)\n passed = test_case.check()\n if passed:\n log.info(\"PASS %s\", test_case_name)\n pass_count += 1\n else:\n log.error(\"FAIL %s\", test_case_name)\n fail_count += 1\n\n if fail_count == 0:\n log.info(\"All %d tests PASS\", pass_count)\n return True\n\n log.error(\"%d test%s FAIL, %d test%s PASS\",\n fail_count, \"\" if fail_count == 1 else \"\",\n pass_count, \"\" if pass_count == 1 else \"\")\n return False\n\ndef main():\n \"\"\"\n Main function to run the basic tests.\n \"\"\"\n\n logging.basicConfig(level=logging.INFO)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"vulkan_tools_install_dir\",\n help=\"Path to the install directory of the KhronosGroup/Vulkan-Tools repo.\"\n )\n parser.add_argument(\n \"--dir\",\n help=\"Specify the directory to run the tests in. Defaults to current working directory.\",\n dest=\"directory\",\n default=os.getcwd()\n )\n\n args = parser.parse_args()\n\n vulkaninfo_path = args.vulkan_tools_install_dir\n working_directory = os.path.abspath(args.directory)\n\n starting_working_directory = os.getcwd()\n os.chdir(working_directory)\n \n\n if sys.platform.startswith(\"win32\"):\n vulkaninfo_path = os.path.join(vulkaninfo_path, \"bin\", \"vulkaninfo.exe\")\n elif sys.platform.startswith(\"linux\"):\n vulkaninfo_path = os.path.join(vulkaninfo_path, \"bin\", \"vulkaninfo\")\n elif sys.platform.startswith(\"darwin\"):\n vulkaninfo_path = os.path.join(vulkaninfo_path, \"vulkaninfo\", \"vulkaninfo\")\n\n RunTests(vulkaninfo_path)\n\n os.chdir(starting_working_directory)\n\n\nif __name__ == '__main__':\n main()\n", "id": "5685831", "language": "Python", "matching_score": 2.3645904064178467, "max_stars_count": 579, "path": "tests/devsim_layer_tests.py" }, { "content": "# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n# Copyright 2012 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_config import cfg\nfrom oslo_log import _options\nfrom oslo_log import log\n\nfrom radloggerpy import config\nfrom radloggerpy import version\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\"\"\"Handles service like methods such as setting the correct log levels and\nparsing command line arguments.\"\"\"\n\n_DEFAULT_LOG_LEVELS = ['sqlalchemy=WARN', 'stevedore=INFO', 'iso8601=WARN',\n 'requests=WARN']\n\n\ndef setup_config_and_logging(argv=(), conf=cfg.CONF):\n \"\"\"register logging config options and parse commandline arguments\"\"\"\n log.register_options(conf)\n\n parse_args(argv)\n # Set log levels for external libraries\n cfg.set_defaults(_options.log_opts,\n default_log_levels=_DEFAULT_LOG_LEVELS)\n log.setup(conf, 'radloggerpy')\n # Write all configuration options and values to log\n conf.log_opt_values(LOG, log.DEBUG)\n\n\ndef parse_args(argv, default_config_files=None, default_config_dirs=None):\n \"\"\"Load information into config and allow program arguments to override\"\"\"\n default_config_files = (default_config_files or\n cfg.find_config_files(project='RadLoggerPy'))\n default_config_dirs = (default_config_dirs or\n cfg.find_config_dirs(project='RadLoggerPy'))\n cfg.CONF(argv[1:],\n project='RadLoggerPy',\n version=version.version_info.release_string(),\n default_config_dirs=default_config_dirs,\n default_config_files=default_config_files)\n\n\ndef list_opts():\n \"\"\"Required method by opts for oslo-config-generator\"\"\"\n return []\n", "id": "5387803", "language": "Python", "matching_score": 2.2059738636016846, "max_stars_count": 0, "path": "radloggerpy/config/config.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_log import log\n\nfrom radloggerpy import config\nfrom radloggerpy.tests import base\nfrom radloggerpy import version\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass TestConfFixture(base.TestCase):\n \"\"\"Test conf fixture resetting config between tests\"\"\"\n\n def setUp(self):\n super(TestConfFixture, self).setUp()\n\n # store the value for the filename database option\n self.filename_opts = [i for i in config.database.DATABASE_OPTS\n if i.name == 'filename'][0]\n\n def test_cfg_reset_part_one(self):\n self.assertEqual(self.filename_opts.default,\n CONF.database.filename)\n CONF.database.filename = 'part_one'\n self.assertEqual('part_one', CONF.database.filename)\n\n def test_cfg_reset_part_two(self):\n self.assertEqual(self.filename_opts.default,\n CONF.database.filename)\n CONF.database.filename = 'part_two'\n self.assertEqual('part_two', CONF.database.filename)\n\n def test_cfg_parse_args_one(self):\n version_default = version.version_info.release_string()\n self.assertEqual(version_default, CONF.version)\n CONF.version = 'args_one'\n self.assertEqual('args_one', CONF.version)\n\n def test_cfg_parse_args_two(self):\n version_default = version.version_info.release_string()\n self.assertEqual(version_default, CONF.version)\n CONF.version = 'args_two'\n self.assertEqual('args_two', CONF.version)\n", "id": "36627", "language": "Python", "matching_score": 2.006941795349121, "max_stars_count": 0, "path": "radloggerpy/tests/config/test_conf_fixture.py" }, { "content": "# Copyright 2010-2011 OpenStack Foundation\n# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom oslotest import base\nimport testscenarios\n\nfrom radloggerpy.tests import conf_fixture\n\n\nCONF = cfg.CONF\ntry:\n log.register_options(CONF)\nexcept cfg.ArgsAlreadyParsedError:\n pass\nCONF.set_override('use_stderr', False)\n\n\nclass BaseTestCase(testscenarios.WithScenarios, base.BaseTestCase):\n \"\"\"Test base class.\"\"\"\n\n def setUp(self):\n super().setUp()\n\n # Use this fixture if class variables are changed so they get patched\n # back to the default value afterwards. This fixture also works for\n # ensuring singletons are patched back to default state.\n #\n # self.p_example = mock.patch.object(\n # file, 'Class',\n # new=file.Class)\n # self.m_example = self.p_example.start()\n # self.addCleanup(self.p_example.stop)\n #\n # This binds methods from the fixture to their respective class\n # has to be done for every method but only for python 2.7 so we can\n # almost get rid of this.\n # self.m_example.get_method = self.m_example.__get__(\n # self.m_example, file.Class)\n\n self.addCleanup(cfg.CONF.reset)\n\n\nclass TestCase(BaseTestCase):\n \"\"\"Test case base class for all unit tests.\"\"\"\n\n def setUp(self):\n super().setUp()\n self.useFixture(conf_fixture.ConfReloadFixture())\n # self.useFixture(conf_fixture.ConfFixture(cfg.CONF))\n", "id": "4479228", "language": "Python", "matching_score": 0.4980754554271698, "max_stars_count": 0, "path": "radloggerpy/tests/base.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_config import cfg\n\ndevices = cfg.OptGroup(name='devices',\n title='Configuration Options for measuring devices')\n\nDEVICES_OPTS = [\n cfg.IntOpt('concurrent_worker_amount',\n default=-1,\n min=-1,\n help='Number of concurrent workers to use in addition to the '\n 'main thread. Setting this to -1 means the value will be'\n 'based on nproc which returns the number of available'\n 'concurrent threads (not cores).'),\n cfg.IntOpt('minimal_polling_delay',\n default=1000,\n min=0,\n help='Minimum time in between a device pulling for data, going '\n 'to sleep and pulling for data again. Value is expressed '\n 'in milliseconds (1/1000 second).'),\n cfg.BoolOpt('restart_on_error',\n default=True,\n help='Should the device manager restart devices upon '\n 'encountering an error.'),\n cfg.IntOpt('max_consecutive_error',\n default=3,\n min=-1,\n help='Maximum amount of consecutive device restarts without '\n 'the device returning any measurements. -1 for unlimited.'\n 'This option is ignored when restart_on_error is false.')\n]\n\n\ndef register_opts(conf):\n conf.register_group(devices)\n conf.register_opts(DEVICES_OPTS, group=devices)\n\n\ndef list_opts():\n return [(devices, DEVICES_OPTS)]\n", "id": "9165338", "language": "Python", "matching_score": 2.952707052230835, "max_stars_count": 0, "path": "radloggerpy/config/devices.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_config import cfg\n\ndatabase = cfg.OptGroup(name='database',\n title='Configuration Options for database')\n\nDATABASE_OPTS = [\n cfg.StrOpt('filename',\n default='radlogger.sqlite',\n help='Name for database file')\n]\n\n\ndef register_opts(conf):\n conf.register_group(database)\n conf.register_opts(DATABASE_OPTS, group=database)\n\n\ndef list_opts():\n return [(database, DATABASE_OPTS)]\n", "id": "12501054", "language": "Python", "matching_score": 1.925092339515686, "max_stars_count": 0, "path": "radloggerpy/config/database.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport inspect\nfrom unittest import mock\n\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom radloggerpy import config\n\nfrom radloggerpy.common import dynamic_import as di\nfrom radloggerpy.config import opts\nfrom radloggerpy.tests import base\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass TestConfOpts(base.TestCase):\n \"\"\"Test opts file to generate configuration samples\"\"\"\n\n def setUp(self):\n super(TestConfOpts, self).setUp()\n\n def test_module_names_complete(self):\n \"\"\"Test that all config files are property included in __init__.py\"\"\"\n\n # opts and cfg should not be included in init even though they are\n # modules\n remove = ['opts', 'cfg']\n\n # get all attributes of the config directory\n path = config.__path__[0]\n names = dir(config)\n modules = list()\n\n # for every attribute that is a module add it to modules\n for name in names:\n if inspect.ismodule(getattr(config, name, None)):\n modules.append(name)\n\n # remove the not to be included modules\n for r in remove:\n modules.remove(r)\n\n # ensure that opts._list_module_names() gets all modules properly\n self.assertEqual(modules, di.list_module_names(path, remove))\n\n def test_module_import(self):\n \"\"\"Assert correct import of config modules based on string name\"\"\"\n\n path = 'radloggerpy.config'\n\n self.assertEqual(\n [config.devices],\n di.import_modules(['devices'], path, opts.LIST_OPTS_FUNC_NAME))\n\n class FakeOpts(object):\n \"\"\"Simulate options module since list_opts won't distinguish\"\"\"\n fgroup = cfg.OptGroup(name='example_group',\n title='Example')\n\n fopts = [cfg.IntOpt('example_opts')]\n\n @staticmethod\n def list_opts():\n return [(TestConfOpts.FakeOpts.fgroup,\n TestConfOpts.FakeOpts.fopts)]\n\n @mock.patch.object(opts, 'import_modules')\n def test_list_opts(self, m_modules):\n \"\"\"Test that with the FakeOpts the expected available config options\"\"\"\n\n m_modules.return_value = [self.FakeOpts]\n\n self.assertEqual([(self.FakeOpts.fgroup, self.FakeOpts.fopts)],\n opts.list_opts())\n", "id": "11445740", "language": "Python", "matching_score": 3.958397388458252, "max_stars_count": 0, "path": "radloggerpy/tests/config/test_opts.py" }, { "content": "# Copyright 2016 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nThis is the single point of entry to generate the sample configuration\nfile for Watcher. It collects all the necessary info from the other modules\nin this package. It is assumed that:\n\n* every other module in this package has a 'list_opts' function which\n return a dict where\n * the keys are strings which are the group names\n * the value of each key is a list of config options for that group\n* the watcher.conf package doesn't have further packages with config options\n* this module is only used in the context of sample file generation\n\"\"\"\n\nimport os\n\nfrom radloggerpy.common.dynamic_import import import_modules\nfrom radloggerpy.common.dynamic_import import list_module_names\n\nLIST_OPTS_FUNC_NAME = \"list_opts\"\n\n\ndef list_opts():\n \"\"\"Grouped list of all the radloggerpy-specific configuration options\n\n :return: A list of ``(group, [opt_1, opt_2])`` tuple pairs, where ``group``\n is either a group name as a string or an OptGroup object.\n \"\"\"\n opts = list()\n package_path = os.path.dirname(os.path.abspath(__file__))\n module_names = list_module_names(package_path, ['opts'])\n imported_modules = import_modules(\n module_names, 'radloggerpy.config', LIST_OPTS_FUNC_NAME)\n for mod in imported_modules:\n opts.extend(mod.list_opts())\n return opts\n", "id": "5776449", "language": "Python", "matching_score": 2.2256011962890625, "max_stars_count": 0, "path": "radloggerpy/config/opts.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nfrom radloggerpy.common.dynamic_import import import_modules\nfrom radloggerpy.common.dynamic_import import list_module_names\nfrom radloggerpy.database.declarative_base import base\nfrom radloggerpy.database import models\n\n\ndef create_database_tables(engine):\n \"\"\"Creates the database table using the specified engine\"\"\"\n tables = _list_tables()\n base.metadata.create_all(bind=engine, tables=tables)\n\n\ndef _list_tables():\n \"\"\"Collection of all the sqlalchemy model __table__ objects\n\n :return: A Collection of ``__table__`` objects from sqlalchemy models.\n \"\"\"\n tables = list()\n\n modules = list()\n # create module_name and expected class tuples from list_module_names\n # if the module file is account_types the expected class is AccountTypes.\n for module_name in list_module_names(models.__path__[0]):\n modules.append((module_name, module_name.title().replace('_', '')))\n\n imported_modules = import_modules(\n modules, models.__name__, fetch_attribute=True)\n for module, attribute in imported_modules:\n # Access the modules class and subsequent __table__\n tables.append(getattr(module, attribute).__table__)\n return tables\n", "id": "4979103", "language": "Python", "matching_score": 3.645272970199585, "max_stars_count": 0, "path": "radloggerpy/database/create_database.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport importlib\nimport pkgutil\nfrom unittest import mock\n\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nfrom sqlalchemy import create_engine, text\n\nfrom radloggerpy.common.dynamic_import import import_modules\nfrom radloggerpy.common.dynamic_import import list_module_names\nfrom radloggerpy.database import create_database as cd\nfrom radloggerpy.database import models\nfrom radloggerpy.database.models import device\n\nfrom radloggerpy.tests import base\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass TestCreateDatabase(base.TestCase):\n\n def setUp(self):\n super(TestCreateDatabase, self).setUp()\n\n self.m_path = models.__path__[0]\n\n def test_create_tables(self):\n m_engine = create_engine('sqlite:///:memory:', echo=True)\n cd.create_database_tables(m_engine)\n\n # connect to the in memory database and query the number of tables\n m_conn = m_engine.connect()\n m_text = text(\"SELECT name FROM sqlite_master WHERE type='table'\")\n result = m_conn.execute(m_text).fetchall()\n m_conn.close()\n\n # assert that as many tables as models were created.\n # WARNING: this will break when there are many-to-many relationships\n self.assertEqual(\n len(list_module_names(self.m_path)), len(result))\n\n @mock.patch.object(cd, 'import_modules')\n @mock.patch.object(cd, 'list_module_names')\n def test_list_tables(self, m_list_modules, m_import_models):\n \"\"\"Test list_tables list generation by accessing tuples\"\"\"\n\n # return a list with the supposed names of modules\n m_list_modules.return_value = ['a', 'b']\n\n # create mocked classes with the __table__ attribute\n m_a = mock.Mock(__table__='value1')\n m_b = mock.Mock(__table__='value2')\n\n # return tuples were the right side contains a string to access the\n # attribute of the left object. This object should have the __table__\n # attribute.\n m_import_models.return_value = [\n (mock.Mock(A=m_a), 'A'), (mock.Mock(B=m_b), 'B')]\n\n result = cd._list_tables()\n\n # assert _list_tables called list_model and import_models\n m_list_modules.assert_called_once_with(self.m_path)\n m_import_models.assert_called_once_with([('a', 'A'), ('b', 'B')],\n 'radloggerpy.database.models',\n fetch_attribute=True)\n\n self.assertEqual(['value1', 'value2'], result)\n\n def test_modules_names(self):\n \"\"\"Test that all model files are properly discovered\"\"\"\n\n # get all modules of the model directory using the directory __path__\n modules = list()\n for __, modname, ispkg in pkgutil.iter_modules(\n path=[models.__path__[0]]):\n modules.append(modname)\n\n # ensure that _list_model_names() gets all modules properly\n self.assertEqual(modules, list_module_names(self.m_path))\n\n def test_module_import(self):\n \"\"\"Assert correct import of model with returned tuple\"\"\"\n\n self.assertEqual(\n [(device, 'Device')], import_modules([('device', 'Device')],\n 'radloggerpy.database.models',\n fetch_attribute=True))\n\n @mock.patch.object(importlib, 'import_module')\n def test_module_import_exception(self, m_importlib):\n \"\"\"Assert raising exception on failed import of model\"\"\"\n m_module = ('fake_model', 'FakeModel')\n\n # ensure mocked return does not have attribute FakeModel\n m_importlib.return_value = object()\n\n self.assertRaises(AttributeError, import_modules, [m_module],\n self.m_path, fetch_attribute=True)\n", "id": "8724114", "language": "Python", "matching_score": 3.4891767501831055, "max_stars_count": 0, "path": "radloggerpy/tests/database/test_create_database.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport importlib\nimport pkgutil\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\"\"\"Collection of methods which detect and import modules for a directory\"\"\"\n\n\ndef list_module_names(package_path, excludes=[]):\n \"\"\"Gather a collection of modules from the specified path with excludes\n\n :param package_path: Absolute path to 'package' directory.\n :param excludes: Collection of strings excluded if found in package_path\n :return: Collection of modules represented as strings.\n \"\"\"\n module_names = []\n for __, modname, ispkg in pkgutil.iter_modules(path=[package_path]):\n if modname in excludes or ispkg:\n LOG.debug(\"Exclude {} in list_module_names from {}\"\n .format(modname, package_path))\n else:\n module_names.append(modname)\n return module_names\n\n\ndef import_modules(modules, path, attribute=None, fetch_attribute=False):\n \"\"\"Import and return modules from a path if they have a given attribute\n\n :param modules: Collection of modules to import or collection of tuples\n containing (module, attribute).\n :param path: import path to get modules from\n :param attribute: attribute to filter modules by or None if tuples are used\n :param fetch_attribute: True to create module, attribute tuples, False to\n only return modules\n :exception AttributeError: When attribute does not exist for a given module\n :exception ImportError: If path + module does not exist\n :return: Collection of imported modules or collection of\n (module, attribute) tuples when fetch_attribute is True.\n \"\"\"\n imported_modules = []\n\n # If attribute is not set modules should be collection of tuples\n if not attribute:\n module_filters = modules\n else:\n # Generate a list of tuples were each module_name is associated with\n # the specified attribute.\n module_filters = list()\n for module_name in modules:\n module_filters.append((module_name, attribute))\n\n for modname, attrib in module_filters:\n mod = importlib.import_module(path + '.' + modname)\n if not hasattr(mod, attrib):\n msg = \"The module '%s.%s' should have a '%s' \"\\\n \"attribute.\" % (path, modname, attrib)\n raise AttributeError(msg)\n elif fetch_attribute:\n imported_modules.append((mod, attrib))\n else:\n imported_modules.append(mod)\n return imported_modules\n", "id": "6779471", "language": "Python", "matching_score": 0.24966847896575928, "max_stars_count": 0, "path": "radloggerpy/common/dynamic_import.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom copy import copy\nfrom datetime import datetime\nfrom unittest import mock\n\nfrom cliff.lister import Lister\n\nfrom radloggerpy.cli.argument import Argument\nfrom radloggerpy.cli.v1.measurement import measurement_list as mc\nfrom radloggerpy.database.objects.device import DeviceObject\n\nfrom radloggerpy.tests import base\n\n\nclass TestDeviceList(base.TestCase):\n\n def setUp(self):\n super(TestDeviceList, self).setUp()\n\n def test_arguments_base(self):\n bases = copy(mc.MeasurementList.__bases__)\n f_bases = tuple(base for base in bases if base != Lister)\n\n m_base = mock.patch.object(\n mc.MeasurementList, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = mc.MeasurementList()\n t_device.register_arguments(mock.Mock())\n\n self.assertTrue('--device' in t_device._arguments.keys())\n self.assertTrue('--name' in t_device._arguments.keys())\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, mc.MeasurementList.__bases__)\n\n @mock.patch.object(mc, 'super')\n def test_arguments(self, m_super):\n m_super.return_value = mock.Mock(\n arguments={'--device': Argument(), '--name': Argument()}\n )\n\n bases = copy(mc.MeasurementList.__bases__)\n f_bases = tuple(base for base in bases if base != Lister)\n\n m_base = mock.patch.object(\n mc.MeasurementList, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = mc.MeasurementList()\n t_device.register_arguments(mock.Mock())\n\n m_super.assert_called_once()\n\n self.assertTrue('--device' in t_device.arguments.keys())\n self.assertTrue('--name' in t_device.arguments.keys())\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, mc.MeasurementList.__bases__)\n\n @mock.patch.object(mc, 'super')\n def test_parser(self, m_super):\n\n m_parser = mock.Mock()\n m_super.return_value.get_parser.return_value = m_parser\n\n # remove ShowOne from the DeviceShow inheritance\n bases = copy(mc.MeasurementList.__bases__)\n f_bases = tuple(base for base in bases if base != Lister)\n\n m_base = mock.patch.object(\n mc.MeasurementList, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = mc.MeasurementList()\n\n t_device._add_interfaces = mock.Mock()\n t_device._add_implementations = mock.Mock()\n t_device.register_arguments = mock.Mock()\n\n t_device.get_parser(\"test\")\n\n t_device._add_interfaces.assert_not_called()\n t_device._add_implementations.assert_not_called()\n\n t_device.register_arguments.assert_called_once_with(m_parser)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, mc.MeasurementList.__bases__)\n\n @mock.patch.object(mc, 'MeasurementObject')\n def test_take_action(self, m_dev_obj):\n\n # remove ShowOne from the DeviceShow inheritance\n bases = copy(mc.MeasurementList.__bases__)\n f_bases = tuple(base for base in bases if base != Lister)\n\n m_args = mock.Mock()\n m_args._get_kwargs.return_value = {}\n\n m_mod_dev = mock.Mock()\n m_mod_dev.id = 1337\n m_mod_dev.timestamp = datetime.utcnow()\n m_mod_dev.cpm = 12\n m_mod_dev.svh = 0.12\n m_mod_dev.device = DeviceObject(**{'id': 1})\n\n m_dev_obj.find.return_value = [m_mod_dev]\n\n m_base = mock.patch.object(\n mc.MeasurementList, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = mc.MeasurementList()\n\n t_device.app = mock.Mock()\n\n t_result = t_device.take_action(m_args)\n\n self.assertEqual(t_result[1][0][0], m_mod_dev.timestamp)\n self.assertEqual(t_result[1][0][1], m_mod_dev.device.id)\n self.assertEqual(t_result[1][0][2], m_mod_dev.cpm)\n self.assertEqual(t_result[1][0][3], m_mod_dev.svh)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, mc.MeasurementList.__bases__)\n\n @mock.patch.object(mc, 'DeviceObject')\n @mock.patch.object(mc, 'MeasurementObject')\n def test_take_action_device(self, m_dev_obj, m_obj):\n\n # remove ShowOne from the DeviceShow inheritance\n bases = copy(mc.MeasurementList.__bases__)\n f_bases = tuple(base for base in bases if base != Lister)\n\n m_args = mock.Mock()\n m_args._get_kwargs.return_value = {'device': 1, 'name': 'test'}\n\n m_mock = mock.Mock()\n m_obj.return_value = m_mock\n\n m_dev_obj.find.return_value = [mock.Mock()]\n\n m_base = mock.patch.object(\n mc.MeasurementList, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = mc.MeasurementList()\n\n t_device.app = mock.Mock()\n\n t_device.take_action(m_args)\n m_dev_obj.assert_called_once_with(**{'device': m_mock})\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, mc.MeasurementList.__bases__)\n\n @mock.patch.object(mc, 'MeasurementObject')\n def test_take_action_none(self, m_dev_obj):\n\n # remove ShowOne from the DeviceShow inheritance\n bases = copy(mc.MeasurementList.__bases__)\n f_bases = tuple(base for base in bases if base != Lister)\n\n m_args = mock.Mock()\n m_args._get_kwargs.return_value = {}\n\n m_dev_obj.find.return_value = []\n\n m_base = mock.patch.object(\n mc.MeasurementList, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = mc.MeasurementList()\n\n t_device.app = mock.Mock()\n\n self.assertRaises(RuntimeWarning, t_device.take_action, m_args)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, mc.MeasurementList.__bases__)\n", "id": "7951116", "language": "Python", "matching_score": 5.784298419952393, "max_stars_count": 0, "path": "radloggerpy/tests/cli/measurement/test_measurement_list.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom copy import copy\nfrom unittest import mock\n\nfrom cliff.lister import Lister\n\nfrom radloggerpy.cli.v1.device import device_list as dl\nfrom radloggerpy.device.device_manager import DeviceManager as dm\n\nfrom radloggerpy.tests import base\nfrom radloggerpy.types.device_interfaces import DeviceInterfaces\nfrom radloggerpy.types.device_types import DeviceTypes\n\n\nclass TestDeviceList(base.TestCase):\n\n def setUp(self):\n super(TestDeviceList, self).setUp()\n\n @mock.patch.object(dl, 'super')\n def test_arguments(self, m_super):\n m_super.return_value = mock.Mock(arguments={})\n\n bases = copy(dl.DeviceList.__bases__)\n f_bases = tuple(base for base in bases if base != Lister)\n\n m_base = mock.patch.object(\n dl.DeviceList, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = dl.DeviceList()\n t_device.register_arguments(mock.Mock())\n\n m_super.assert_called_once()\n\n self.assertEqual(0, len(t_device._arguments))\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, dl.DeviceList.__bases__)\n\n @mock.patch.object(dl, 'super')\n def test_parser(self, m_super):\n\n m_parser = mock.Mock()\n m_super.return_value.get_parser.return_value = m_parser\n\n # remove ShowOne from the DeviceShow inheritance\n bases = copy(dl.DeviceList.__bases__)\n f_bases = tuple(base for base in bases if base != Lister)\n\n m_base = mock.patch.object(\n dl.DeviceList, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = dl.DeviceList()\n\n t_device._add_interfaces = mock.Mock()\n t_device._add_implementations = mock.Mock()\n t_device.register_arguments = mock.Mock()\n\n t_device.get_parser(\"test\")\n\n t_device._add_interfaces.assert_called_once_with()\n t_device._add_implementations.assert_called_once_with()\n t_device.register_arguments.assert_called_once_with(m_parser)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, dl.DeviceList.__bases__)\n\n @mock.patch.object(dl, 'DeviceObject')\n def test_take_action(self, m_dev_obj):\n\n # remove ShowOne from the DeviceShow inheritance\n bases = copy(dl.DeviceList.__bases__)\n f_bases = tuple(base for base in bases if base != Lister)\n\n m_args = mock.Mock()\n m_args._get_kwargs.return_value = {}\n\n m_mod_dev = mock.Mock()\n m_mod_dev.id = 1\n m_mod_dev.name = 'test'\n m_mod_dev.type = DeviceTypes.AVERAGE\n m_mod_dev.interface = DeviceInterfaces.SERIAL\n m_mod_dev.implementation = dm.get_device_implementations()[0].NAME\n m_dev_obj.find.return_value = [m_mod_dev]\n\n m_base = mock.patch.object(\n dl.DeviceList, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = dl.DeviceList()\n\n t_device.app = mock.Mock()\n\n t_result = t_device.take_action(m_args)\n self.assertEqual(t_result[1][0][0], m_mod_dev.id)\n self.assertEqual(t_result[1][0][1], m_mod_dev.name)\n self.assertEqual(t_result[1][0][2], m_mod_dev.type)\n self.assertEqual(t_result[1][0][3], m_mod_dev.interface)\n self.assertEqual(t_result[1][0][4], m_mod_dev.implementation)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, dl.DeviceList.__bases__)\n\n @mock.patch.object(dl, 'DeviceObject')\n def test_take_action_none(self, m_dev_obj):\n\n # remove ShowOne from the DeviceShow inheritance\n bases = copy(dl.DeviceList.__bases__)\n f_bases = tuple(base for base in bases if base != Lister)\n\n m_args = mock.Mock()\n m_args._get_kwargs.return_value = {}\n\n m_dev_obj.find.return_value = []\n\n m_base = mock.patch.object(\n dl.DeviceList, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = dl.DeviceList()\n\n t_device.app = mock.Mock()\n\n self.assertRaises(RuntimeWarning, t_device.take_action, m_args)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, dl.DeviceList.__bases__)\n", "id": "10579952", "language": "Python", "matching_score": 3.9455318450927734, "max_stars_count": 0, "path": "radloggerpy/tests/cli/device/test_device_list.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom copy import copy\nfrom unittest import mock\n\nfrom cliff.command import Command\n\nfrom radloggerpy.cli.v1.device import device_add_serial\nfrom radloggerpy.device.device_manager import DeviceManager as dm\n\nfrom radloggerpy.tests import base\nfrom radloggerpy.types.device_interfaces import DeviceInterfaces\nfrom radloggerpy.types.device_types import DeviceTypes\nfrom radloggerpy.types.serial_bytesize import SerialBytesizeTypes\nfrom radloggerpy.types.serial_parity import SerialParityTypes\nfrom radloggerpy.types.serial_stopbit import SerialStopbitTypes\n\n\nclass TestDeviceAddSerial(base.TestCase):\n\n def setUp(self):\n super(TestDeviceAddSerial, self).setUp()\n\n def test_arguments_base(self):\n bases = copy(device_add_serial.DeviceAddSerial.__bases__)\n f_bases = tuple(base for base in bases if base != Command)\n\n m_base = mock.patch.object(\n device_add_serial.DeviceAddSerial, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = device_add_serial.DeviceAddSerial()\n t_device.register_arguments(mock.Mock())\n\n self.assertTrue('name' in t_device._arguments.keys())\n self.assertTrue('implementation' in t_device._arguments.keys())\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, device_add_serial.DeviceAddSerial.__bases__)\n\n @mock.patch.object(device_add_serial, 'super')\n def test_arguments(self, m_super):\n m_super.return_value = mock.Mock(arguments={})\n\n bases = copy(device_add_serial.DeviceAddSerial.__bases__)\n f_bases = tuple(base for base in bases if base != Command)\n\n m_base = mock.patch.object(\n device_add_serial.DeviceAddSerial, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = device_add_serial.DeviceAddSerial()\n t_device.register_arguments(mock.Mock())\n\n m_super.assert_called_once()\n self.assertTrue('port' in t_device._arguments.keys())\n self.assertTrue('baudrate' in t_device._arguments.keys())\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, device_add_serial.DeviceAddSerial.__bases__)\n\n @mock.patch.object(device_add_serial, 'super')\n def test_parser(self, m_super):\n\n m_parser = mock.Mock()\n m_super.return_value.get_parser.return_value = m_parser\n\n bases = copy(device_add_serial.DeviceAddSerial.__bases__)\n f_bases = tuple(base for base in bases if base != Command)\n\n m_base = mock.patch.object(\n device_add_serial.DeviceAddSerial, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = device_add_serial.DeviceAddSerial()\n\n t_device._add_implementations = mock.Mock()\n t_device.register_arguments = mock.Mock()\n\n t_device.get_parser(\"test\")\n\n t_device._add_implementations.assert_called_once()\n t_device.register_arguments.assert_called_once_with(m_parser)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, device_add_serial.DeviceAddSerial.__bases__)\n\n @mock.patch.object(device_add_serial, 'Dm')\n @mock.patch.object(device_add_serial, 'SerialDeviceObject')\n def test_take_action(self, m_dev_obj, m_dm):\n\n m_dm.get_device_class.return_type = mock.Mock(TYPE=DeviceTypes.AVERAGE)\n\n bases = copy(device_add_serial.DeviceAddSerial.__bases__)\n f_bases = tuple(base for base in bases if base != Command)\n\n m_args = mock.Mock()\n m_args._get_kwargs.return_value = {'detailed': None}\n\n m_mod_dev = mock.Mock()\n m_mod_dev.id = 1\n m_mod_dev.name = 'test'\n m_mod_dev.type = DeviceTypes.AVERAGE\n m_mod_dev.interface = DeviceInterfaces.SERIAL\n m_mod_dev.implementation = dm.get_device_implementations()[0].NAME\n m_mod_dev.port = '/dev/ttyUSB0'\n m_mod_dev.baudrate = 9600\n m_mod_dev.bytesize = SerialBytesizeTypes.FIVEBITS\n m_mod_dev.parity = SerialParityTypes.PARITY_NONE\n m_mod_dev.stopbits = SerialStopbitTypes.STOPBITS_ONE\n m_mod_dev.timeout = None\n m_dev_obj.add.return_value = m_mod_dev\n\n m_base = mock.patch.object(\n device_add_serial.DeviceAddSerial, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = device_add_serial.DeviceAddSerial()\n\n t_device.app = mock.Mock()\n\n t_result = t_device.take_action(m_args)\n self.assertEqual(t_result.id, m_mod_dev.id)\n self.assertEqual(t_result.name, m_mod_dev.name)\n self.assertEqual(t_result.type, m_mod_dev.type)\n self.assertEqual(t_result.interface, m_mod_dev.interface)\n self.assertEqual(t_result.implementation, m_mod_dev.implementation)\n self.assertEqual(t_result.port, m_mod_dev.port)\n self.assertEqual(t_result.baudrate, m_mod_dev.baudrate)\n self.assertEqual(t_result.bytesize, m_mod_dev.bytesize)\n self.assertEqual(t_result.parity, m_mod_dev.parity)\n self.assertEqual(t_result.stopbits, m_mod_dev.stopbits)\n self.assertEqual(t_result.timeout, m_mod_dev.timeout)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, device_add_serial.DeviceAddSerial.__bases__)\n\n @mock.patch.object(device_add_serial, 'Dm')\n @mock.patch.object(device_add_serial, 'SerialDeviceObject')\n def test_take_action_error(self, m_dev_obj, m_dm):\n\n m_dm.get_device_class.return_type = mock.Mock(TYPE=DeviceTypes.AVERAGE)\n\n bases = copy(device_add_serial.DeviceAddSerial.__bases__)\n f_bases = tuple(base for base in bases if base != Command)\n\n m_args = mock.Mock()\n m_args._get_kwargs.return_value = {'detailed': None}\n\n m_dev_obj.add.side_effect = RuntimeError()\n\n m_base = mock.patch.object(\n device_add_serial.DeviceAddSerial, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = device_add_serial.DeviceAddSerial()\n\n t_device.app = mock.Mock()\n\n self.assertRaises(RuntimeError, t_device.take_action, m_args)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, device_add_serial.DeviceAddSerial.__bases__)\n", "id": "10636954", "language": "Python", "matching_score": 4.634878635406494, "max_stars_count": 0, "path": "radloggerpy/tests/cli/device/test_device_add_serial.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom copy import copy\nfrom unittest import mock\n\nfrom cliff.lister import Lister\n\nfrom radloggerpy.cli.argument import Argument\nfrom radloggerpy.cli.v1.device import device_list_serial as dl\nfrom radloggerpy.device.device_manager import DeviceManager as dm\n\nfrom radloggerpy.tests import base\nfrom radloggerpy.types.device_interfaces import DeviceInterfaces\nfrom radloggerpy.types.device_types import DeviceTypes\nfrom radloggerpy.types.serial_bytesize import SerialBytesizeTypes\nfrom radloggerpy.types.serial_parity import SerialParityTypes\nfrom radloggerpy.types.serial_stopbit import SerialStopbitTypes\n\n\nclass TestDeviceList(base.TestCase):\n\n def setUp(self):\n super(TestDeviceList, self).setUp()\n\n @mock.patch.object(dl, 'super')\n def test_arguments(self, m_super):\n m_super.return_value = mock.Mock(\n arguments={'--port': Argument(), '--interface': Argument()}\n )\n\n bases = copy(dl.DeviceListSerial.__bases__)\n f_bases = tuple(base for base in bases if base != Lister)\n\n m_base = mock.patch.object(\n dl.DeviceListSerial, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = dl.DeviceListSerial()\n t_device.register_arguments(mock.Mock())\n\n m_super.assert_called_once()\n\n self.assertTrue('--port' in t_device.arguments.keys())\n self.assertFalse('--interface' in t_device.arguments.keys())\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, dl.DeviceListSerial.__bases__)\n\n @mock.patch.object(dl, 'super')\n def test_parser(self, m_super):\n\n m_parser = mock.Mock()\n m_super.return_value.get_parser.return_value = m_parser\n\n # remove ShowOne from the DeviceShow inheritance\n bases = copy(dl.DeviceListSerial.__bases__)\n f_bases = tuple(base for base in bases if base != Lister)\n\n m_base = mock.patch.object(\n dl.DeviceListSerial, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = dl.DeviceListSerial()\n\n t_device._add_interfaces = mock.Mock()\n t_device._add_implementations = mock.Mock()\n t_device.register_arguments = mock.Mock()\n\n t_device.get_parser(\"test\")\n\n t_device._add_interfaces.assert_not_called()\n\n t_device._add_implementations.assert_called_once_with(\n DeviceInterfaces.SERIAL)\n t_device.register_arguments.assert_called_once_with(m_parser)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, dl.DeviceListSerial.__bases__)\n\n @mock.patch.object(dl, 'SerialDeviceObject')\n def test_take_action(self, m_dev_obj):\n\n # remove ShowOne from the DeviceShow inheritance\n bases = copy(dl.DeviceListSerial.__bases__)\n f_bases = tuple(base for base in bases if base != Lister)\n\n m_args = mock.Mock()\n m_args._get_kwargs.return_value = {}\n\n m_mod_dev = mock.Mock()\n m_mod_dev.id = 1\n m_mod_dev.name = 'test'\n m_mod_dev.type = DeviceTypes.AVERAGE\n m_mod_dev.interface = DeviceInterfaces.SERIAL\n m_mod_dev.implementation = dm.get_device_implementations()[0].NAME\n m_mod_dev.port = '/dev/ttyUSB0'\n m_mod_dev.baudrate = 9600\n m_mod_dev.bytesize = SerialBytesizeTypes.FIVEBITS\n m_mod_dev.parity = SerialParityTypes.PARITY_NONE\n m_mod_dev.stopbits = SerialStopbitTypes.STOPBITS_ONE\n m_mod_dev.timeout = None\n m_dev_obj.find.return_value = [m_mod_dev]\n\n m_base = mock.patch.object(\n dl.DeviceListSerial, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = dl.DeviceListSerial()\n\n t_device.app = mock.Mock()\n\n t_result = t_device.take_action(m_args)\n self.assertEqual(t_result[1][0][0], m_mod_dev.id)\n self.assertEqual(t_result[1][0][1], m_mod_dev.name)\n self.assertEqual(t_result[1][0][2], m_mod_dev.type)\n self.assertEqual(t_result[1][0][3], m_mod_dev.interface)\n self.assertEqual(t_result[1][0][4], m_mod_dev.implementation)\n self.assertEqual(t_result[1][0][5], m_mod_dev.port)\n self.assertEqual(t_result[1][0][6], m_mod_dev.baudrate)\n self.assertEqual(t_result[1][0][7], m_mod_dev.bytesize)\n self.assertEqual(t_result[1][0][8], m_mod_dev.parity)\n self.assertEqual(t_result[1][0][9], m_mod_dev.stopbits)\n self.assertEqual(t_result[1][0][10], m_mod_dev.timeout)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, dl.DeviceListSerial.__bases__)\n\n @mock.patch.object(dl, 'SerialDeviceObject')\n def test_take_action_none(self, m_dev_obj):\n\n # remove ShowOne from the DeviceShow inheritance\n bases = copy(dl.DeviceListSerial.__bases__)\n f_bases = tuple(base for base in bases if base != Lister)\n\n m_args = mock.Mock()\n m_args._get_kwargs.return_value = {}\n\n m_dev_obj.find.return_value = []\n\n m_base = mock.patch.object(\n dl.DeviceListSerial, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = dl.DeviceListSerial()\n\n t_device.app = mock.Mock()\n\n self.assertRaises(RuntimeWarning, t_device.take_action, m_args)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, dl.DeviceListSerial.__bases__)\n", "id": "562293", "language": "Python", "matching_score": 4.746581077575684, "max_stars_count": 0, "path": "radloggerpy/tests/cli/device/test_device_list_serial.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom copy import copy\nfrom unittest import mock\n\nfrom cliff.show import ShowOne\nfrom sqlalchemy.orm.exc import MultipleResultsFound\n\nfrom radloggerpy.cli.v1.device import device_show\nfrom radloggerpy.device.device_manager import DeviceManager as dm\n\nfrom radloggerpy.tests import base\nfrom radloggerpy.types.device_interfaces import DeviceInterfaces\nfrom radloggerpy.types.device_interfaces import INTERFACE_CHOICES\nfrom radloggerpy.types.device_types import DeviceTypes\nfrom radloggerpy.types.serial_bytesize import SerialBytesizeTypes\nfrom radloggerpy.types.serial_parity import SerialParityTypes\nfrom radloggerpy.types.serial_stopbit import SerialStopbitTypes\n\n\nclass TestDeviceShow(base.TestCase):\n\n def setUp(self):\n super(TestDeviceShow, self).setUp()\n\n @mock.patch.object(device_show, 'super')\n def test_parser(self, m_super):\n\n m_parser = mock.Mock()\n m_super.return_value.get_parser.return_value = m_parser\n\n bases = copy(device_show.DeviceShow.__bases__)\n f_bases = tuple(base for base in bases if base != ShowOne)\n\n m_base = mock.patch.object(\n device_show.DeviceShow, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = device_show.DeviceShow()\n\n t_device._add_interfaces = mock.Mock()\n t_device._add_implementations = mock.Mock()\n t_device.register_arguments = mock.Mock()\n\n t_device.get_parser(\"test\")\n\n t_device._add_interfaces.assert_called_once()\n t_device._add_implementations.assert_called_once()\n t_device.register_arguments.assert_called_once_with(m_parser)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, device_show.DeviceShow.__bases__)\n\n @mock.patch.object(device_show, 'DeviceObject')\n def test_take_action(self, m_dev_obj):\n\n bases = copy(device_show.DeviceShow.__bases__)\n f_bases = tuple(base for base in bases if base != ShowOne)\n\n m_args = mock.Mock()\n m_args._get_kwargs.return_value = {'detailed': None}\n\n m_mod_dev = mock.Mock()\n m_mod_dev.id = 1\n m_mod_dev.name = 'test'\n m_mod_dev.type = DeviceTypes.AVERAGE\n m_mod_dev.interface = DeviceInterfaces.SERIAL\n m_mod_dev.implementation = dm.get_device_implementations()[0].NAME\n m_dev_obj.find.return_value = m_mod_dev\n\n m_base = mock.patch.object(\n device_show.DeviceShow, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = device_show.DeviceShow()\n\n t_device.app = mock.Mock()\n\n t_result = t_device.take_action(m_args)\n self.assertEqual(t_result[1][0], m_mod_dev.id)\n self.assertEqual(t_result[1][1], m_mod_dev.name)\n self.assertEqual(t_result[1][2], m_mod_dev.type)\n self.assertEqual(t_result[1][3], m_mod_dev.interface)\n self.assertEqual(t_result[1][4], m_mod_dev.implementation)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, device_show.DeviceShow.__bases__)\n\n @mock.patch.object(device_show, 'SerialDeviceObject')\n @mock.patch.object(device_show, 'DeviceObject')\n def test_take_action_details_serial(self, m_dev_obj, m_dev_ser_obj):\n\n bases = copy(device_show.DeviceShow.__bases__)\n f_bases = tuple(base for base in bases if base != ShowOne)\n\n m_args = mock.Mock()\n m_args._get_kwargs.return_value = {'detailed': True}\n\n m_mod_dev = mock.Mock()\n m_mod_dev.id = 1\n m_mod_dev.name = 'test'\n m_mod_dev.type = DeviceTypes.AVERAGE\n m_mod_dev.interface = INTERFACE_CHOICES[DeviceInterfaces.SERIAL]\n m_mod_dev.implementation = dm.get_device_implementations()[0].NAME\n\n m_dev_obj.find.return_value = m_mod_dev\n\n m_mod_ser_dev = mock.Mock()\n m_mod_ser_dev.port = '/dev/ttyUSB0'\n m_mod_ser_dev.baudrate = 9600\n m_mod_ser_dev.bytesize = SerialBytesizeTypes.FIVEBITS\n m_mod_ser_dev.parity = SerialParityTypes.PARITY_NONE\n m_mod_ser_dev.stopbits = SerialStopbitTypes.STOPBITS_ONE\n m_mod_ser_dev.timeout = None\n\n m_dev_ser_obj.find.return_value = m_mod_ser_dev\n\n m_base = mock.patch.object(\n device_show.DeviceShow, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = device_show.DeviceShow()\n\n t_device.app = mock.Mock()\n\n t_result = t_device.take_action(m_args)\n self.assertEqual(t_result[1][0], m_mod_dev.id)\n self.assertEqual(t_result[1][1], m_mod_dev.name)\n self.assertEqual(t_result[1][2], m_mod_dev.type)\n self.assertEqual(t_result[1][3], m_mod_dev.interface)\n self.assertEqual(t_result[1][4], m_mod_dev.implementation)\n self.assertEqual(t_result[1][5], m_mod_ser_dev.port)\n self.assertEqual(t_result[1][6], m_mod_ser_dev.baudrate)\n self.assertEqual(t_result[1][7], m_mod_ser_dev.bytesize)\n self.assertEqual(t_result[1][8], m_mod_ser_dev.parity)\n self.assertEqual(t_result[1][9], m_mod_ser_dev.stopbits)\n self.assertEqual(t_result[1][10], m_mod_ser_dev.timeout)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, device_show.DeviceShow.__bases__)\n\n @mock.patch.object(device_show, 'DeviceObject')\n def test_take_action_none(self, m_dev_obj):\n\n bases = copy(device_show.DeviceShow.__bases__)\n f_bases = tuple(base for base in bases if base != ShowOne)\n\n m_args = mock.Mock()\n m_args._get_kwargs.return_value = {'detailed': None}\n\n m_dev_obj.find.return_value = None\n\n m_base = mock.patch.object(\n device_show.DeviceShow, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = device_show.DeviceShow()\n\n t_device.app = mock.Mock()\n\n self.assertRaises(RuntimeWarning, t_device.take_action, m_args)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, device_show.DeviceShow.__bases__)\n\n @mock.patch.object(device_show, 'DeviceObject')\n def test_take_action_multiple(self, m_dev_obj):\n\n bases = copy(device_show.DeviceShow.__bases__)\n f_bases = tuple(base for base in bases if base != ShowOne)\n\n m_args = mock.Mock()\n m_args._get_kwargs.return_value = {'detailed': None}\n\n m_dev_obj.find.side_effect = MultipleResultsFound()\n\n m_base = mock.patch.object(\n device_show.DeviceShow, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = device_show.DeviceShow()\n\n t_device.app = mock.Mock()\n\n self.assertRaises(RuntimeWarning, t_device.take_action, m_args)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, device_show.DeviceShow.__bases__)\n", "id": "7236790", "language": "Python", "matching_score": 5.743042469024658, "max_stars_count": 0, "path": "radloggerpy/tests/cli/device/test_device_show.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom copy import copy\nfrom unittest import mock\n\nfrom cliff.show import ShowOne\nfrom sqlalchemy.orm.exc import MultipleResultsFound\n\nfrom radloggerpy.cli.argument import Argument\nfrom radloggerpy.cli.v1.device import device_remove as dr\nfrom radloggerpy.device.device_manager import DeviceManager as dm\n\nfrom radloggerpy.tests import base\nfrom radloggerpy.types.device_interfaces import DeviceInterfaces\n\n\nclass TestDeviceRemove(base.TestCase):\n\n def setUp(self):\n super(TestDeviceRemove, self).setUp()\n\n @mock.patch.object(dr, 'super')\n def test_arguments(self, m_super):\n m_super.return_value = mock.Mock(\n arguments={\n '--id': Argument(),\n '--interface': Argument(),\n '--implementation': Argument()\n }\n )\n\n bases = copy(dr.DeviceRemove.__bases__)\n f_bases = tuple(base for base in bases if base != ShowOne)\n\n m_base = mock.patch.object(\n dr.DeviceRemove, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = dr.DeviceRemove()\n t_device.register_arguments(mock.Mock())\n\n m_super.assert_called_once()\n\n self.assertTrue('--id' in t_device._arguments.keys())\n self.assertFalse('--implementation' in t_device._arguments.keys())\n self.assertFalse('--interface' in t_device._arguments.keys())\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, dr.DeviceRemove.__bases__)\n\n @mock.patch.object(dr, 'super')\n def test_parser(self, m_super):\n\n m_parser = mock.Mock()\n m_super.return_value.get_parser.return_value = m_parser\n\n # remove ShowOne from the DeviceShow inheritance\n bases = copy(dr.DeviceRemove.__bases__)\n f_bases = tuple(base for base in bases if base != ShowOne)\n\n m_base = mock.patch.object(\n dr.DeviceRemove, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = dr.DeviceRemove()\n\n t_device.register_arguments = mock.Mock()\n\n t_device.get_parser(\"test\")\n\n t_device.register_arguments.assert_called_once_with(m_parser)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, dr.DeviceRemove.__bases__)\n\n @mock.patch.object(dr, 'DeviceObject')\n def test_take_action(self, m_dev_obj):\n\n # remove ShowOne from the DeviceShow inheritance\n bases = copy(dr.DeviceRemove.__bases__)\n f_bases = tuple(base for base in bases if base != ShowOne)\n\n m_args = mock.Mock()\n m_args._get_kwargs.return_value = {\"id\": 1}\n\n m_mod_dev = mock.Mock()\n m_mod_dev.id = 1\n m_mod_dev.name = 'test'\n m_mod_dev.interface = DeviceInterfaces.SERIAL\n m_mod_dev.implementation = dm.get_device_implementations()[0].NAME\n m_dev_obj.delete.return_value = m_mod_dev\n\n m_base = mock.patch.object(\n dr.DeviceRemove, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = dr.DeviceRemove()\n\n t_device.app = mock.Mock()\n\n t_result = t_device.take_action(m_args)\n self.assertEqual(t_result[1][0], m_mod_dev.id)\n self.assertEqual(t_result[1][1], m_mod_dev.name)\n self.assertEqual(t_result[1][2], m_mod_dev.interface)\n self.assertEqual(t_result[1][3], m_mod_dev.implementation)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, dr.DeviceRemove.__bases__)\n\n @mock.patch.object(dr, 'DeviceObject')\n def test_take_action_none(self, m_dev_obj):\n\n # remove ShowOne from the DeviceShow inheritance\n bases = copy(dr.DeviceRemove.__bases__)\n f_bases = tuple(base for base in bases if base != ShowOne)\n\n m_args = mock.Mock()\n m_args._get_kwargs.return_value = {'id': 1}\n\n m_dev_obj.delete.return_value = None\n\n m_base = mock.patch.object(\n dr.DeviceRemove, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = dr.DeviceRemove()\n\n t_device.app = mock.Mock()\n\n self.assertRaises(RuntimeWarning, t_device.take_action, m_args)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, dr.DeviceRemove.__bases__)\n\n @mock.patch.object(dr, 'DeviceObject')\n def test_take_action_multiple(self, m_dev_obj):\n\n # remove ShowOne from the DeviceShow inheritance\n bases = copy(dr.DeviceRemove.__bases__)\n f_bases = tuple(base for base in bases if base != ShowOne)\n\n m_args = mock.Mock()\n m_args._get_kwargs.return_value = {'id': 1}\n\n m_dev_obj.delete.side_effect = MultipleResultsFound()\n\n m_base = mock.patch.object(\n dr.DeviceRemove, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = dr.DeviceRemove()\n\n t_device.app = mock.Mock()\n\n self.assertRaises(RuntimeWarning, t_device.take_action, m_args)\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, dr.DeviceRemove.__bases__)\n", "id": "848723", "language": "Python", "matching_score": 3.498032808303833, "max_stars_count": 0, "path": "radloggerpy/tests/cli/device/test_device_remove.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom sqlalchemy.orm.exc import MultipleResultsFound\n\nfrom radloggerpy._i18n import _\nfrom radloggerpy.cli.argument import Argument\nfrom radloggerpy.cli.v1.device.device_show import DeviceShow\nfrom radloggerpy.database.objects.serial_device import SerialDeviceObject\nfrom radloggerpy.types.device_interfaces import DeviceInterfaces\nfrom radloggerpy.types.serial_bytesize import BYTESIZE_CHOICES\nfrom radloggerpy.types.serial_parity import PARITY_CHOICES\nfrom radloggerpy.types.serial_stopbit import STOPBIT_CHOICES\n\n\nclass DeviceShowSerial(DeviceShow):\n \"\"\"Command to show information about devices\"\"\"\n\n _arguments = None\n\n @property\n def arguments(self):\n if self._arguments is None:\n # retrieve existing arguments from baseclass\n self._arguments = super().arguments\n self._arguments.update({\n '--port': Argument(\n help=\"Symbolic name of the serial port to be translated \"\n \"to the physical device, such as /dev/ttyUSB0 or \"\n \"COM1.\",\n default=None),\n '--baudrate': Argument(\n '-r', default=None,\n help=\"The speed at which the device sends data expressed \"\n \"in symbols per second (baud), typically 9600 Bd/s.\"\n ),\n '--bytesize': Argument(\n '-b', default=None, type=int,\n choices=BYTESIZE_CHOICES.values()),\n '--parity': Argument(\n '-p', default=None,\n choices=PARITY_CHOICES.values()),\n '---stopbits': Argument(\n '-s', default=None, type=float,\n choices=STOPBIT_CHOICES.values()),\n '--timeout': Argument('-t', default=None),\n })\n # remove interface argument as serial is predefined interface type\n if '--interface' in self._arguments:\n del self._arguments['--interface']\n return self._arguments\n\n def get_parser(self, program_name):\n parser = super(DeviceShow, self).get_parser(program_name)\n self._add_implementations(DeviceInterfaces.SERIAL)\n self.register_arguments(parser)\n return parser\n\n def take_action(self, parsed_args):\n args = dict(parsed_args._get_kwargs())\n device_obj = SerialDeviceObject(**args)\n\n try:\n data = SerialDeviceObject.find(\n self.app.database_session, device_obj, False)\n except MultipleResultsFound:\n raise RuntimeWarning(_(\"Multiple devices found\"))\n\n if data is None:\n raise RuntimeWarning(_(\"Device could not be found\"))\n\n fields = (\n 'id', 'name', 'measurement type', 'interface', 'implementation',\n 'port', 'baudrate', 'bytesize', 'parity', 'stopbits', 'timeout')\n values = (data.id, data.name, data.type, data.interface,\n data.implementation, data.port, data.baudrate, data.bytesize,\n data.parity, data.stopbits, data.timeout)\n\n return (fields, values)\n", "id": "7517829", "language": "Python", "matching_score": 6.808140277862549, "max_stars_count": 0, "path": "radloggerpy/cli/v1/device/device_show_serial.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom cliff.lister import Lister\n\nfrom radloggerpy._i18n import _\nfrom radloggerpy.cli.argument import Argument\nfrom radloggerpy.cli.v1.device.device import DeviceCommand\nfrom radloggerpy.database.objects.serial_device import SerialDeviceObject\nfrom radloggerpy.types.device_interfaces import DeviceInterfaces\nfrom radloggerpy.types.serial_bytesize import BYTESIZE_CHOICES\nfrom radloggerpy.types.serial_parity import PARITY_CHOICES\nfrom radloggerpy.types.serial_stopbit import STOPBIT_CHOICES\n\n\nclass DeviceListSerial(Lister, DeviceCommand):\n \"\"\"Command to show lists of serial devices\"\"\"\n\n _arguments = None\n\n @property\n def arguments(self):\n if self._arguments is None:\n self._arguments = super().arguments\n self._arguments.update({\n '--port': Argument(\n help=\"Symbolic name of the serial port to be translated \"\n \"to the physical device, such as /dev/ttyUSB0 or \"\n \"COM1.\",\n default=None),\n '--baudrate': Argument(\n '-r', default=None,\n help=\"The speed at which the device sends data expressed \"\n \"in symbols per second (baud), typically 9600 Bd/s.\"\n ),\n '--bytesize': Argument(\n '-b', default=None, type=int,\n choices=BYTESIZE_CHOICES.values()),\n '--parity': Argument(\n '-p', default=None,\n choices=PARITY_CHOICES.values()),\n '---stopbits': Argument(\n '-s', default=None, type=float,\n choices=STOPBIT_CHOICES.values()),\n '--timeout': Argument('-t', default=None),\n })\n if '--interface' in self._arguments:\n del self._arguments['--interface']\n return self._arguments\n\n def get_parser(self, program_name):\n parser = super(DeviceListSerial, self).get_parser(program_name)\n self._add_implementations(DeviceInterfaces.SERIAL)\n self.register_arguments(parser)\n return parser\n\n def take_action(self, parsed_args):\n args = dict(parsed_args._get_kwargs())\n device_obj = SerialDeviceObject(**args)\n\n data = SerialDeviceObject.find(\n self.app.database_session, device_obj, True)\n\n if len(data) == 0:\n raise RuntimeWarning(_(\"No devices found\"))\n\n fields = (\n 'id', 'name', 'measurement type', 'interface', 'implementation',\n 'port', 'baudrate', 'bytesize', 'parity', 'stopbits', 'timeout')\n values = []\n for result in data:\n value = (result.id, result.name, result.type, result.interface,\n result.implementation, result.port, result.baudrate,\n result.bytesize, result.parity, result.stopbits,\n result.timeout)\n values.append(value)\n\n return [fields, values]\n", "id": "11719454", "language": "Python", "matching_score": 5.9754862785339355, "max_stars_count": 0, "path": "radloggerpy/cli/v1/device/device_list_serial.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom cliff.command import Command\n\nfrom radloggerpy.cli.argument import Argument\nfrom radloggerpy.cli.v1.device.device_add import DeviceAddCommand\nfrom radloggerpy.database.objects.serial_device import SerialDeviceObject\nfrom radloggerpy.device.device_manager import DeviceManager as Dm\nfrom radloggerpy.types.device_interfaces import DeviceInterfaces\nfrom radloggerpy.types.device_interfaces import INTERFACE_CHOICES\nfrom radloggerpy.types.serial_bytesize import BYTESIZE_CHOICES\nfrom radloggerpy.types.serial_parity import PARITY_CHOICES\nfrom radloggerpy.types.serial_stopbit import STOPBIT_CHOICES\n\n\nclass DeviceAddSerial(Command, DeviceAddCommand):\n \"\"\"Command to add serial devices\"\"\"\n\n _arguments = None\n\n @property\n def arguments(self):\n if self._arguments is None:\n # retrieve existing arguments from baseclass\n self._arguments = super().arguments\n self._arguments.update({\n 'port': Argument(\n help=\"Symbolic name of the serial port to be translated \"\n \"to the physical device, such as /dev/ttyUSB0 or \"\n \"COM1.\"),\n 'baudrate': Argument(\n help=\"The speed at which the device sends data expressed \"\n \"in symbols per second (baud), typically 9600 Bd/s.\"),\n '--bytesize': Argument(\n '-b', default=8, type=int,\n choices=BYTESIZE_CHOICES.values()),\n '--parity': Argument(\n '-p', default=\"none\",\n choices=PARITY_CHOICES.values()),\n '---stopbits': Argument(\n '-s', default=1, type=float,\n choices=STOPBIT_CHOICES.values()),\n '--timeout': Argument('-t', default=None),\n })\n return self._arguments\n\n def get_parser(self, program_name):\n parser = super(DeviceAddSerial, self).get_parser(program_name)\n\n # Add implementations ensures only serial interface devices are shown\n # as valid parameter.\n self._add_implementations(DeviceInterfaces.SERIAL)\n\n self.register_arguments(parser)\n return parser\n\n def take_action(self, parsed_args):\n serial_obj = SerialDeviceObject(**dict(parsed_args._get_kwargs()))\n\n # Set the serial attribute as string, since get_device_class expects it\n # as retrieved when constructing objects from database\n serial_obj.interface = INTERFACE_CHOICES[DeviceInterfaces.SERIAL]\n\n # Get the class for the implementation and use it to set type\n # TODO(Dantali0n): Catch and raise errors (if any, should not be\n # possible due to parameter restrictions)\n implementation = Dm.get_device_class(serial_obj)\n serial_obj.type = implementation.TYPE\n\n return SerialDeviceObject.add(self.app.database_session, serial_obj)\n", "id": "1384764", "language": "Python", "matching_score": 4.875049114227295, "max_stars_count": 0, "path": "radloggerpy/cli/v1/device/device_add_serial.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom cliff.show import ShowOne\nfrom sqlalchemy.orm.exc import MultipleResultsFound\n\nfrom radloggerpy._i18n import _\nfrom radloggerpy.cli.argument import Argument\nfrom radloggerpy.cli.v1.device.device import DeviceCommand\nfrom radloggerpy.database.objects.device import DeviceObject\nfrom radloggerpy.database.objects.serial_device import SerialDeviceObject\nfrom radloggerpy.types.device_interfaces import DeviceInterfaces\nfrom radloggerpy.types.device_interfaces import INTERFACE_CHOICES\n\n\nclass DeviceShow(ShowOne, DeviceCommand):\n \"\"\"Command to show information about a device\"\"\"\n\n _arguments = None\n\n @property\n def arguments(self):\n if self._arguments is None:\n self._arguments = super().arguments\n self._arguments.update({\n '--detailed': Argument(\n '-d', help=\"Show details related to the specific device \"\n \"type if found.\",\n action=\"store_true\")})\n return self._arguments\n\n def get_parser(self, program_name):\n parser = super(DeviceShow, self).get_parser(program_name)\n self._add_interfaces()\n self._add_implementations()\n self.register_arguments(parser)\n return parser\n\n def take_action(self, parsed_args):\n args = dict(parsed_args._get_kwargs())\n device_obj = DeviceObject(**args)\n\n details = args['detailed']\n\n try:\n data = DeviceObject.find(\n self.app.database_session, device_obj, False)\n except MultipleResultsFound:\n raise RuntimeWarning(_(\"Multiple devices found\"))\n\n if data is None:\n raise RuntimeWarning(_(\"Device could not be found\"))\n\n fields = (\n 'id', 'name', 'measurement type', 'interface', 'implementation')\n values = (\n data.id, data.name, data.type, data.interface, data.implementation)\n\n if details and data.interface == \\\n INTERFACE_CHOICES[DeviceInterfaces.SERIAL]:\n data = SerialDeviceObject.find(\n self.app.database_session, device_obj, False)\n fields += ('port', 'baudrate', 'bytesize', 'parity',\n 'stopbits', 'timeout')\n values += (data.port, data.baudrate, data.bytesize, data.parity,\n data.stopbits, data.timeout)\n elif details and data.interface == \\\n INTERFACE_CHOICES[DeviceInterfaces.ETHERNET]:\n pass\n elif details and data.interface == \\\n INTERFACE_CHOICES[DeviceInterfaces.USB]:\n pass\n\n return (fields, values)\n", "id": "2748733", "language": "Python", "matching_score": 5.595580101013184, "max_stars_count": 0, "path": "radloggerpy/cli/v1/device/device_show.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom cliff.show import ShowOne\nfrom sqlalchemy.orm.exc import MultipleResultsFound\n\nfrom radloggerpy._i18n import _\nfrom radloggerpy.cli.v1.device.device import DeviceCommand\nfrom radloggerpy.database.objects.device import DeviceObject\n\n\nclass DeviceRemove(ShowOne, DeviceCommand):\n \"\"\"Command to remove device\"\"\"\n\n _arguments = None\n\n @property\n def arguments(self):\n if self._arguments is None:\n self._arguments = super().arguments\n if '--interface' in self._arguments:\n del self._arguments['--interface']\n if '--implementation' in self._arguments:\n del self._arguments['--implementation']\n return self._arguments\n\n def get_parser(self, program_name):\n parser = super(DeviceRemove, self).get_parser(program_name)\n self.register_arguments(parser)\n return parser\n\n def take_action(self, parsed_args):\n args = dict(parsed_args._get_kwargs())\n device_obj = DeviceObject(**args)\n\n if device_obj.id is None and device_obj.name is None:\n raise RuntimeWarning(\n _(\"Either the id or name must be specified to \"\n \"remove a device\"))\n\n try:\n data = DeviceObject.delete(\n self.app.database_session, device_obj, False)\n except MultipleResultsFound:\n raise RuntimeWarning(_(\"Multiple devices found\"))\n\n if data is None:\n raise RuntimeWarning(_(\"Device could not be found\"))\n\n fields = ('id', 'name', 'interface', 'implementation')\n values = (data.id, data.name, data.interface, data.implementation)\n\n self.app.LOG.info(_(\"Device removed successfully\"))\n return (fields, values)\n", "id": "3565784", "language": "Python", "matching_score": 3.122225284576416, "max_stars_count": 0, "path": "radloggerpy/cli/v1/device/device_remove.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom cliff.lister import Lister\n\nfrom radloggerpy._i18n import _\nfrom radloggerpy.cli.v1.measurement.measurement import MeasurementCommand\nfrom radloggerpy.database.objects.device import DeviceObject\nfrom radloggerpy.database.objects.measurement import MeasurementObject\n\n\nclass MeasurementList(Lister, MeasurementCommand):\n \"\"\"Command to show lists of measurements\"\"\"\n\n _arguments = None\n\n @property\n def arguments(self):\n if self._arguments is None:\n self._arguments = super().arguments\n return self._arguments\n\n def get_parser(self, program_name):\n parser = super(MeasurementList, self).get_parser(program_name)\n self.register_arguments(parser)\n return parser\n\n def take_action(self, parsed_args):\n args = dict(parsed_args._get_kwargs())\n\n if 'device' in args or 'name' in args:\n \"\"\"Set device for MeasurementObject if any device params are set\"\"\"\n dev_obj = DeviceObject()\n if args['device']:\n dev_obj.id = args['device']\n del args['device']\n if args['name']:\n dev_obj.name = args['name']\n del args['name']\n args['device'] = dev_obj\n\n measure_obj = MeasurementObject(**args)\n\n data = MeasurementObject.find(\n self.app.database_session, measure_obj, True)\n\n if len(data) == 0:\n raise RuntimeWarning(_(\"No measurements found\"))\n\n fields = ('timestamp', 'device', 'cpm', 'μSv/h')\n values = []\n for result in data:\n value = (result.timestamp, result.device.id, result.cpm,\n result.svh)\n values.append(value)\n\n return [fields, values]\n", "id": "2400236", "language": "Python", "matching_score": 4.789567947387695, "max_stars_count": 0, "path": "radloggerpy/cli/v1/measurement/measurement_list.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom cliff.lister import Lister\n\nfrom radloggerpy._i18n import _\nfrom radloggerpy.cli.v1.device.device import DeviceCommand\nfrom radloggerpy.database.objects.device import DeviceObject\n\n\nclass DeviceList(Lister, DeviceCommand):\n \"\"\"Command to show lists of devices\"\"\"\n\n _arguments = None\n\n @property\n def arguments(self):\n if self._arguments is None:\n self._arguments = super().arguments\n return self._arguments\n\n def get_parser(self, program_name):\n parser = super(DeviceList, self).get_parser(program_name)\n self._add_interfaces()\n self._add_implementations()\n self.register_arguments(parser)\n return parser\n\n def take_action(self, parsed_args):\n args = dict(parsed_args._get_kwargs())\n device_obj = DeviceObject(**args)\n\n data = DeviceObject.find(\n self.app.database_session, device_obj, True)\n\n if len(data) == 0:\n raise RuntimeWarning(_(\"No devices found\"))\n\n fields = ('id', 'name', 'measurement type', 'interface',\n 'implementation')\n values = []\n for result in data:\n value = (result.id, result.name, result.type, result.interface,\n result.implementation)\n values.append(value)\n\n return [fields, values]\n", "id": "4922151", "language": "Python", "matching_score": 0.9612828493118286, "max_stars_count": 0, "path": "radloggerpy/cli/v1/device/device_list.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nclass Argument:\n \"\"\"Small object to contain parameters for adding arguments to argparse\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the object with potentially some arguments\n\n An example: `Argument(default=\"example\", nargs=\"?\")`\n\n Another: `Argument('-t', required=True, help=\"The type\")`\n\n :param args: unnamed arguments\n :param kwargs: named arguments\n \"\"\"\n\n self._args = list()\n for element in args:\n self._args.append(element)\n self._args = tuple(self._args)\n\n self._kwargs = dict()\n for key, value in kwargs.items():\n self._kwargs[key] = value\n\n def add_kwarg(self, key, value):\n \"\"\"Add an additional named arguments after object construction\n\n :param key: Key to add to the dictionary\n :type key: str\n :param value: used as value for the dictionary key\n :return: True if the item was added successfully, false if it existed\n already.\n :rtype: bool\n \"\"\"\n\n if self._kwargs.get(key) is not None:\n return False\n\n self._kwargs[key] = value\n return True\n\n def args(self):\n \"\"\"Return all unnamed arguments\n\n Use with * to pass as `*args` such as `*Argument.args()`\n \"\"\"\n\n return self._args\n\n def kwargs(self):\n \"\"\"Return all named arguments\n\n Use with ** to pass as `**kwargs` such as `**Argument.kwargs()`\n \"\"\"\n\n return self._kwargs\n", "id": "5210861", "language": "Python", "matching_score": 1.8626596927642822, "max_stars_count": 0, "path": "radloggerpy/cli/argument.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\n\n\nclass ArgumentHelper(metaclass=abc.ABCMeta):\n \"\"\"Simplifies the adding of arguments when using argparse\n\n Implement this abstract class to simplify adding of argparse arguments.\n Add elements to the :py:attr:`arguments` dictionary were the key is the\n desired argument name and the value is an\n :py:class:`radloggerpy.cli.argument.Argument` instance.\n\n To register the arguments defined in :py:attr:`arguments` call the\n :py:func:`register_arguments` function passing the desired argparse\n ArgumentParser instance.\n\n The expected structure of :py:attr:`arguments` could look as follows:\n\n `{ 'name' : Argument(), '--interface' : Argument('-i', required=True) }`\n\n When the extend of certain Argument parameters is not known at compile time\n these parameters can be added using\n :py:func:`radloggerpy.cli.argument.Argument.add_kwarg`\n \"\"\"\n\n @property\n @abc.abstractmethod\n def arguments(self) -> dict:\n \"\"\"Dictionary property that must be implemented to contain arguments\"\"\"\n pass\n\n def register_arguments(self, parser):\n \"\"\"Register all arguments in :py:attr:`arguments` on the parser\n\n :param parser: argparse parser for command line strings\n :type parser: :py:class:`argparse.ArgumentParser`\n \"\"\"\n for key, value in self.arguments.items():\n parser.add_argument(key, *value.args(), **value.kwargs())\n", "id": "3525181", "language": "Python", "matching_score": 2.375032663345337, "max_stars_count": 0, "path": "radloggerpy/cli/argument_helper.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom radloggerpy.cli.argument import Argument\nfrom radloggerpy.cli.argument_helper import ArgumentHelper\n\nfrom radloggerpy.tests import base\n\n\nclass TestArgumentHelper(base.TestCase):\n\n class TestHelper(ArgumentHelper):\n\n arguments = {\n 'test': Argument(default=\"example\"),\n '--test': Argument('-t', required=True),\n }\n\n def setUp(self):\n super(TestArgumentHelper, self).setUp()\n\n def test_construct_helper(self):\n helper = TestArgumentHelper.TestHelper()\n m_parser = mock.Mock()\n\n helper.register_arguments(m_parser)\n\n m_parser.add_argument.assert_has_calls(\n [\n mock.call('test', default=\"example\"),\n mock.call('--test', '-t', required=True),\n ],\n any_order=True\n )\n", "id": "12337588", "language": "Python", "matching_score": 2.1159212589263916, "max_stars_count": 0, "path": "radloggerpy/tests/cli/test_argument_helper.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom radloggerpy.cli.argument import Argument\nfrom radloggerpy.cli.v1.device import device_helper\n\nfrom radloggerpy.tests import base\n\n\nclass TestDeviceHelper(base.TestCase):\n\n class TestDevHelper(device_helper.DeviceHelper):\n\n _arguments = None\n\n _implementation_key = 'impexp'\n\n @property\n def arguments(self):\n if self._arguments is None:\n self._arguments = dict()\n self._arguments.update({\n 'test': Argument('-t'),\n 'impexp': Argument('-i')\n })\n return self._arguments\n\n def setUp(self):\n super(TestDeviceHelper, self).setUp()\n\n def test_construct_helper(self):\n helper = TestDeviceHelper.TestDevHelper()\n m_parser = mock.Mock()\n\n helper.register_arguments(m_parser)\n\n m_parser.add_argument.assert_has_calls(\n [\n mock.call('test', '-t'),\n mock.call('impexp', '-i')\n ],\n any_order=True\n )\n\n @mock.patch.object(device_helper, 'DeviceManager')\n def test_add_implementation(self, m_dev_manager):\n helper = TestDeviceHelper.TestDevHelper()\n\n m_dev_manager.get_device_implementations.return_value = [\n mock.Mock(NAME='example')\n ]\n\n helper._add_implementations()\n\n self.assertTrue(\n 'example' in helper.arguments[\n helper._implementation_key].kwargs()['choices']\n )\n\n @mock.patch.object(device_helper, 'DeviceManager')\n def test_add_implementation_filter(self, m_dev_manager):\n helper = TestDeviceHelper.TestDevHelper()\n\n m_dev_manager.get_device_implementations.return_value = [\n mock.Mock(NAME='example1', INTERFACE='have'),\n mock.Mock(NAME='example2', INTERFACE='filter'),\n ]\n\n helper._add_implementations('have')\n\n self.assertTrue(\n 'example1' in helper.arguments[\n helper._implementation_key].kwargs()['choices']\n )\n\n self.assertFalse(\n 'example2' in helper.arguments[\n helper._implementation_key].kwargs()['choices']\n )\n", "id": "4153921", "language": "Python", "matching_score": 3.803839683532715, "max_stars_count": 0, "path": "radloggerpy/tests/cli/device/test_device_helper.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\n\nfrom radloggerpy.cli.argument_helper import ArgumentHelper\nfrom radloggerpy.device.device_manager import DeviceManager\n\n\nclass DeviceHelper(ArgumentHelper, metaclass=abc.ABCMeta):\n \"\"\"Abstract helper for shared device interface\"\"\"\n\n # Should be overridden by child classes\n _implementation_key = ''\n\n def _add_implementations(self, device_interface=None):\n\n if device_interface is None:\n choices = [dev.NAME for dev in\n DeviceManager.get_device_implementations()]\n else:\n choices = [dev.NAME for dev in\n DeviceManager.get_device_implementations()\n if dev.INTERFACE == device_interface]\n\n self.arguments[self._implementation_key].add_kwarg(\n 'choices', choices\n )\n", "id": "6043706", "language": "Python", "matching_score": 2.0875260829925537, "max_stars_count": 0, "path": "radloggerpy/cli/v1/device/device_helper.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\n\nfrom radloggerpy.cli.argument import Argument\nfrom radloggerpy.cli.argument_helper import ArgumentHelper\n\n\nclass MeasurementCommand(ArgumentHelper, metaclass=abc.ABCMeta):\n \"\"\"Abstract command to interface with measurements\"\"\"\n\n _arguments = None\n\n @property\n def arguments(self):\n if self._arguments is None:\n self._arguments = dict()\n self._arguments.update({\n '--device': Argument(\n '-d', type=int, help=\"Device id for associated \"\n \"measurements\"),\n '--name': Argument(\n '-n', help=\"Device name for associated \"\n \"measurements\")\n })\n return self._arguments\n", "id": "10468331", "language": "Python", "matching_score": 2.6521146297454834, "max_stars_count": 0, "path": "radloggerpy/cli/v1/measurement/measurement.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\n\nfrom radloggerpy.cli.argument import Argument\nfrom radloggerpy.cli.v1.device.device_helper import DeviceHelper\nfrom radloggerpy.types.device_interfaces import INTERFACE_CHOICES\n\n\nclass DeviceCommand(DeviceHelper, metaclass=abc.ABCMeta):\n \"\"\"Abstract command to interface with devices\"\"\"\n\n _arguments = None\n\n _implementation_key = '--implementation'\n\n @property\n def arguments(self):\n if self._arguments is None:\n self._arguments = dict()\n self._arguments.update({\n '--id': Argument(\n '-i', help=\"Database id associated with this object\",\n type=int),\n '--name': Argument(\n '-n', help=\"Unique name to help identify this device.\"),\n '--interface': Argument(\n '-f', help=\"Type of interface to communicate with the \"\n \"radiation monitoring device.\"),\n '--implementation': Argument(\n '-m', help=\"The specific implementation of radiation \"\n \"monitor device. See documentation for \"\n \"supported models.\"),\n })\n return self._arguments\n\n def _add_interfaces(self):\n self.arguments['--interface'].add_kwarg(\n 'choices',\n INTERFACE_CHOICES.values()\n )\n", "id": "4876915", "language": "Python", "matching_score": 2.9635725021362305, "max_stars_count": 0, "path": "radloggerpy/cli/v1/device/device.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom radloggerpy.cli.v1.device import device\n\nfrom radloggerpy.tests import base\nfrom radloggerpy.types.device_interfaces import INTERFACE_CHOICES\n\n\nclass TestDeviceCommand(base.TestCase):\n\n class DevCommandExp(device.DeviceCommand):\n\n _arguments = None\n\n @property\n def arguments(self):\n if self._arguments is None:\n self._arguments = super().arguments\n return self._arguments\n\n def setUp(self):\n super(TestDeviceCommand, self).setUp()\n\n def test_add_interfaces(self):\n dev_command = TestDeviceCommand.DevCommandExp()\n\n dev_command._add_interfaces()\n\n self.assertItemsEqual(\n INTERFACE_CHOICES.values(), dev_command.arguments[\n '--interface'].kwargs()['choices']\n )\n", "id": "1347206", "language": "Python", "matching_score": 1.6750637292861938, "max_stars_count": 0, "path": "radloggerpy/tests/cli/device/test_device.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom radloggerpy.cli.argument import Argument\n\nfrom radloggerpy.tests import base\n\n\nclass TestArgument(base.TestCase):\n\n def setUp(self):\n super(TestArgument, self).setUp()\n\n def test_construct_args(self):\n arg = Argument('-t')\n\n self.assertEqual(1, len(arg.args()))\n self.assertEqual((\"-t\",), arg.args())\n\n def test_construct_kwargs(self):\n arg = Argument(default=\"test\", required=True)\n\n self.assertEqual(2, len(arg.kwargs()))\n self.assertEqual({\"default\": \"test\", \"required\": True}, arg.kwargs())\n\n def test_add_kwarg(self):\n arg = Argument()\n\n self.assertEqual(0, len(arg.kwargs()))\n\n arg.add_kwarg(\"test\", \"example\")\n self.assertEqual({\"test\": \"example\"}, arg.kwargs())\n\n def test_add_kwarg_duplicate(self):\n arg = Argument()\n\n self.assertEqual(0, len(arg.kwargs()))\n\n self.assertTrue(arg.add_kwarg(\"test\", \"example\"))\n self.assertFalse(arg.add_kwarg(\"test\", \"example2\"))\n self.assertEqual({\"test\": \"example\"}, arg.kwargs())\n\n def test_add_kwarg_none_str(self):\n arg = Argument()\n\n self.assertTrue(arg.add_kwarg(False, \"example\"))\n self.assertEqual({False: \"example\"}, arg.kwargs())\n", "id": "9476446", "language": "Python", "matching_score": 0.6111930012702942, "max_stars_count": 0, "path": "radloggerpy/tests/cli/test_argument.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom oslo_log import log\n\nfrom radloggerpy import config\nfrom radloggerpy.database.models.device import Device\nfrom radloggerpy.database.models.serial_device import SerialDevice\nfrom radloggerpy.database.objects.device import DeviceObject\nfrom radloggerpy.database.objects import serial_device as sd\n\nfrom radloggerpy.tests import base\nfrom radloggerpy.types.device_interfaces import DeviceInterfaces\nfrom radloggerpy.types.serial_bytesize import SerialBytesizeTypes\nfrom radloggerpy.types.serial_parity import SerialParityTypes\nfrom radloggerpy.types.serial_stopbit import SerialStopbitTypes\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass TestSerialDeviceObject(base.TestCase):\n\n def setUp(self):\n super(TestSerialDeviceObject, self).setUp()\n\n def test_init(self):\n\n m_atribs = {\n \"port\": \"value1\",\n \"attributeskip\": \"none\",\n }\n\n test_obj = sd.SerialDeviceObject(**m_atribs)\n\n self.assertEqual(\"value1\", test_obj.port)\n self.assertIsNone(None, getattr(test_obj, \"attributeskip\", None))\n\n def test_filter(self):\n\n m_atribs = {\n \"port\": \"value1\",\n \"attributeskip\": \"none\",\n }\n\n test_obj = sd.SerialDeviceObject(**m_atribs)\n\n m_result = test_obj._filter(test_obj)\n\n self.assertEqual(\n {\"port\": \"value1\"}, m_result)\n\n def test_build_object_unset(self):\n\n test_obj = sd.SerialDeviceObject()\n test_obj._build_object()\n\n self.assertIsNone(None, test_obj.m_serial_device.port)\n self.assertIsNone(None, test_obj.m_serial_device.baudrate)\n self.assertIsNone(None, test_obj.m_serial_device.bytesize)\n self.assertIsNone(None, test_obj.m_serial_device.parity)\n self.assertIsNone(None, test_obj.m_serial_device.stopbits)\n self.assertIsNone(None, test_obj.m_serial_device.timeout)\n\n def test_build_object_values(self):\n\n m_atribs = {\n \"port\": \"/dev/ttyUSB0\",\n \"baudrate\": 115200,\n \"bytesize\": 8,\n \"parity\": \"odd\",\n \"stopbits\": 1,\n \"timeout\": 2\n }\n\n test_obj = sd.SerialDeviceObject(**m_atribs)\n test_obj._build_object()\n\n self.assertEqual(\"/dev/ttyUSB0\", test_obj.m_serial_device.port)\n self.assertEqual(115200, test_obj.m_serial_device.baudrate)\n self.assertEqual(\n SerialBytesizeTypes.EIGHTBITS, test_obj.m_serial_device.bytesize)\n self.assertEqual(\n SerialParityTypes.PARITY_ODD, test_obj.m_serial_device.parity)\n self.assertEqual(\n SerialStopbitTypes.STOPBITS_ONE, test_obj.m_serial_device.stopbits)\n self.assertEqual(\n 2, test_obj.m_serial_device.timeout)\n\n def test_build_object_keys(self):\n\n m_atribs = {\n \"port\": \"/dev/ttyUSB0\",\n \"baudrate\": 115200,\n \"bytesize\": SerialBytesizeTypes.EIGHTBITS,\n \"parity\": SerialParityTypes.PARITY_ODD,\n \"stopbits\": SerialStopbitTypes.STOPBITS_ONE,\n }\n\n test_obj = sd.SerialDeviceObject(**m_atribs)\n test_obj._build_object()\n\n self.assertEqual(\"/dev/ttyUSB0\", test_obj.m_serial_device.port)\n self.assertEqual(115200, test_obj.m_serial_device.baudrate)\n self.assertEqual(\n SerialBytesizeTypes.EIGHTBITS, test_obj.m_serial_device.bytesize)\n self.assertEqual(\n SerialParityTypes.PARITY_ODD, test_obj.m_serial_device.parity)\n self.assertEqual(\n SerialStopbitTypes.STOPBITS_ONE, test_obj.m_serial_device.stopbits)\n\n def test_build_attributes_none(self):\n\n test_obj = sd.SerialDeviceObject()\n test_obj.m_device = Device()\n test_obj.m_serial_device = SerialDevice()\n test_obj._build_attributes()\n\n self.assertIsNone(test_obj.port)\n self.assertIsNone(test_obj.baudrate)\n self.assertIsNone(test_obj.bytesize)\n self.assertIsNone(test_obj.parity)\n self.assertIsNone(test_obj.stopbits)\n self.assertIsNone(test_obj.timeout)\n\n def test_add(self):\n m_session = mock.Mock()\n\n # TODO(Dantali0n): change into setting attributes directly\n m_atribs = {\n \"id\": 1,\n \"port\": \"/dev/ttyUSB0\",\n \"baudrate\": 115200,\n \"bytesize\": SerialBytesizeTypes.EIGHTBITS,\n \"parity\": SerialParityTypes.PARITY_ODD,\n \"stopbits\": SerialStopbitTypes.STOPBITS_ONE,\n }\n\n test_obj = sd.SerialDeviceObject(**m_atribs)\n sd.SerialDeviceObject.add(m_session, test_obj)\n\n m_session.add.assert_has_calls(\n [\n mock.call(test_obj.m_device),\n mock.call(test_obj.m_serial_device),\n ],\n any_order=True\n )\n m_session.commit.assert_called_once()\n\n def test_add_error(self):\n m_session = mock.Mock()\n m_session.commit.side_effect = RuntimeError\n\n # TODO(Dantali0n): change into setting attributes directly\n m_atribs = {\n \"id\": 1,\n \"port\": \"/dev/ttyUSB0\",\n \"baudrate\": 115200,\n \"bytesize\": SerialBytesizeTypes.EIGHTBITS,\n \"parity\": SerialParityTypes.PARITY_ODD,\n \"stopbits\": SerialStopbitTypes.STOPBITS_ONE,\n }\n\n test_obj = sd.SerialDeviceObject(**m_atribs)\n self.assertRaises(\n RuntimeError, sd.SerialDeviceObject.add, m_session, test_obj)\n\n m_session.add.assert_has_calls(\n [\n mock.call(test_obj.m_device),\n mock.call(test_obj.m_serial_device),\n ],\n any_order=True\n )\n m_session.commit.assert_called_once()\n m_session.rollback.assert_called_once()\n\n def test_find_obj(self):\n\n \"\"\"Represents mocked device as it will be retrieved from db \"\"\"\n m_device = Device()\n m_device.id = 1\n m_device.name = \"value2\"\n m_device.interface = DeviceInterfaces.SERIAL\n m_device.implementation = mock.Mock(\n code=\"ArduinoGeigerPCB\", value=\"arduinogeigerpcb\")\n\n m_device_serial = SerialDevice()\n m_device_serial.port = \"/dev/ttyUSB0\"\n m_device_serial.baudrate = 115200\n m_device_serial.bytesize = SerialBytesizeTypes.EIGHTBITS\n m_device_serial.parity = SerialParityTypes.PARITY_ODD\n m_device_serial.stopbits = SerialStopbitTypes.STOPBITS_ONE\n\n m_device.serial = [m_device_serial]\n\n \"\"\"Setup query and session to return mocked device\"\"\"\n m_query = mock.Mock()\n m_session = mock.Mock()\n m_session.query.return_value.filter_by.return_value.\\\n join.return_value.filter_by.return_value = m_query\n m_query.one_or_none.return_value = m_device\n\n test_obj = sd.SerialDeviceObject(**{\"baudrate\": 115200})\n result_obj = sd.SerialDeviceObject.find(m_session, test_obj, False)\n\n self.assertEqual(1, result_obj.id)\n self.assertEqual(\"/dev/ttyUSB0\", result_obj.port)\n self.assertEqual(8, result_obj.bytesize)\n self.assertEqual(\"odd\", result_obj.parity)\n self.assertEqual(1, result_obj.stopbits)\n\n @mock.patch.object(sd, 'LOG')\n def test_find_obj_deprecated(self, m_log):\n\n \"\"\"Represents mocked device as it will be retrieved from db \"\"\"\n m_device = Device()\n m_device.id = 1\n m_device.name = \"value2\"\n m_device.interface = DeviceInterfaces.SERIAL\n m_device.implementation = mock.Mock(\n code=\"ArduinoGeigerPCB\", value=\"arduinogeigerpcb\")\n\n m_device_serial = SerialDevice()\n m_device_serial.port = \"/dev/ttyUSB0\"\n m_device_serial.baudrate = 115200\n m_device_serial.bytesize = SerialBytesizeTypes.EIGHTBITS\n m_device_serial.parity = SerialParityTypes.PARITY_ODD\n m_device_serial.stopbits = SerialStopbitTypes.STOPBITS_ONE\n\n m_device.serial = [m_device_serial]\n\n \"\"\"Setup query and session to return mocked device\"\"\"\n m_query = mock.Mock()\n m_session = mock.Mock()\n m_session.query.return_value.filter_by.return_value.\\\n join.return_value.filter_by.return_value = m_query\n m_query.one_or_none.return_value = m_device\n\n test_obj = DeviceObject(**{\"id\": 1})\n result_obj = sd.SerialDeviceObject.find(m_session, test_obj, False)\n\n self.assertEqual(1, result_obj.id)\n self.assertEqual(\"/dev/ttyUSB0\", result_obj.port)\n self.assertEqual(8, result_obj.bytesize)\n self.assertEqual(\"odd\", result_obj.parity)\n self.assertEqual(1, result_obj.stopbits)\n\n m_log.warning.assert_called_once()\n\n def test_find_obj_none(self):\n\n \"\"\"Setup query and session to return mocked device\"\"\"\n m_query = mock.Mock()\n m_session = mock.Mock()\n m_session.query.return_value.filter_by.return_value. \\\n join.return_value.filter_by.return_value = m_query\n m_query.one_or_none.return_value = None\n\n test_obj = sd.SerialDeviceObject(**{\"port\": \"/dev/ttyUSB0\"})\n result_obj = sd.SerialDeviceObject.find(m_session, test_obj, False)\n\n self.assertIsNone(result_obj)\n\n def test_find_obj_multiple(self):\n m_device1 = Device()\n m_device2 = Device()\n m_query = mock.Mock()\n m_session = mock.Mock()\n m_session.query.return_value.filter_by.return_value. \\\n join.return_value.filter_by.return_value = m_query\n\n m_query.all.return_value = [m_device1, m_device2]\n\n m_device1.id = 1\n m_device1.name = \"test1\"\n m_device1.interface = DeviceInterfaces.SERIAL\n m_device1.implementation = mock.Mock(\n code=\"ArduinoGeigerPCB\", value=\"arduinogeigerpcb\")\n\n m_device_serial1 = SerialDevice()\n m_device_serial1.port = \"/dev/ttyUSB0\"\n m_device_serial1.baudrate = 115200\n m_device_serial1.bytesize = SerialBytesizeTypes.EIGHTBITS\n m_device_serial1.parity = SerialParityTypes.PARITY_ODD\n m_device_serial1.stopbits = SerialStopbitTypes.STOPBITS_ONE\n\n m_device1.serial = [m_device_serial1]\n\n m_device2.id = 2\n m_device2.name = \"test2\"\n m_device2.interface = DeviceInterfaces.SERIAL\n m_device2.implementation = mock.Mock(\n code=\"ArduinoGeigerPCB\", value=\"arduinogeigerpcb\")\n\n m_device_serial2 = SerialDevice()\n m_device_serial2.port = \"/dev/ttyUSB2\"\n m_device_serial2.baudrate = 9600\n m_device_serial2.bytesize = SerialBytesizeTypes.SEVENBITS\n m_device_serial2.parity = SerialParityTypes.PARITY_EVEN\n m_device_serial2.stopbits = SerialStopbitTypes.STOPBITS_TWO\n\n m_device2.serial = [m_device_serial2]\n\n test_obj = sd.SerialDeviceObject(**{\"interface\": \"serial\"})\n result_obj = sd.SerialDeviceObject.find(m_session, test_obj, True)\n\n self.assertEqual(1, result_obj[0].id)\n self.assertEqual(\"test1\", result_obj[0].name)\n self.assertEqual(\"serial\", result_obj[0].interface)\n self.assertEqual(\"/dev/ttyUSB0\", result_obj[0].port)\n self.assertEqual(8, result_obj[0].bytesize)\n self.assertEqual(\"odd\", result_obj[0].parity)\n self.assertEqual(1, result_obj[0].stopbits)\n\n self.assertEqual(2, result_obj[1].id)\n self.assertEqual(\"test2\", result_obj[1].name)\n self.assertEqual(\"serial\", result_obj[1].interface)\n self.assertEqual(\"/dev/ttyUSB2\", result_obj[1].port)\n self.assertEqual(7, result_obj[1].bytesize)\n self.assertEqual(\"even\", result_obj[1].parity)\n self.assertEqual(2, result_obj[1].stopbits)\n\n def test_find_obj_multiple_none(self):\n m_query = mock.Mock()\n m_session = mock.Mock()\n m_session.query.return_value.filter_by.return_value. \\\n join.return_value.filter_by.return_value = m_query\n\n m_query.all.return_value = None\n\n test_obj = sd.SerialDeviceObject(**{\"interface\": \"serial\"})\n result_obj = sd.SerialDeviceObject.find(m_session, test_obj, True)\n\n self.assertIsNone(result_obj)\n", "id": "408036", "language": "Python", "matching_score": 5.329638481140137, "max_stars_count": 0, "path": "radloggerpy/tests/database/objects/test_device_serial.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom oslo_log import log\n\n\nfrom radloggerpy import config\nfrom radloggerpy.database.models.device import Device\nfrom radloggerpy.database.objects.device import DeviceObject\nfrom radloggerpy.device.devices.arduino_geiger_pcb import ArduinoGeigerPcb\n\nfrom radloggerpy.tests import base\nfrom radloggerpy.types.device_interfaces import DeviceInterfaces\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass TestDeviceObject(base.TestCase):\n\n def setUp(self):\n super(TestDeviceObject, self).setUp()\n\n def test_init(self):\n\n m_atribs = {\n \"name\": \"value1\",\n \"attributeskip\": \"none\",\n }\n\n test_obj = DeviceObject(**m_atribs)\n\n self.assertEqual(\"value1\", test_obj.name)\n self.assertIsNone(None, getattr(test_obj, \"attributeskip\", None))\n\n def test_filter(self):\n\n m_atribs = {\n \"name\": \"value1\",\n \"attributeskip\": \"none\",\n }\n\n test_obj = DeviceObject(**m_atribs)\n\n m_result = test_obj._filter(test_obj)\n\n self.assertEqual(\n {\"name\": \"value1\"}, m_result)\n\n def test_build_object_unset(self):\n\n test_obj = DeviceObject()\n test_obj._build_object()\n\n self.assertIsNone(None, test_obj.m_device.id)\n self.assertIsNone(None, test_obj.m_device.name)\n self.assertIsNone(None, test_obj.m_device.interface)\n self.assertIsNone(None, test_obj.m_device.implementation)\n\n def test_build_object_values(self):\n\n m_atribs = {\n \"id\": 1,\n \"name\": \"value1\",\n \"interface\": \"serial\",\n \"implementation\": \"ArduinoGeigerPCB\",\n }\n\n test_obj = DeviceObject(**m_atribs)\n test_obj._build_object()\n\n self.assertEqual(1, test_obj.m_device.id)\n self.assertEqual(\"value1\", test_obj.m_device.name)\n self.assertEqual(DeviceInterfaces.SERIAL, test_obj.m_device.interface)\n\n def test_build_object_keys(self):\n\n m_atribs = {\n \"id\": 2,\n \"name\": \"value2\",\n \"interface\": DeviceInterfaces.SERIAL,\n \"implementation\": \"ArduinoGeigerPCB\",\n }\n\n test_obj = DeviceObject(**m_atribs)\n test_obj._build_object()\n\n self.assertEqual(2, test_obj.m_device.id)\n self.assertEqual(\"value2\", test_obj.m_device.name)\n self.assertEqual(DeviceInterfaces.SERIAL, test_obj.m_device.interface)\n\n def test_build_attributes_none(self):\n\n test_obj = DeviceObject()\n test_obj.m_device = Device()\n test_obj._build_attributes()\n\n self.assertIsNone(test_obj.id)\n self.assertIsNone(test_obj.name)\n self.assertIsNone(test_obj.interface)\n self.assertIsNone(test_obj.implementation)\n\n def test_delete(self):\n m_session = mock.Mock()\n\n # TODO(Dantali0n): change into setting attributes directly\n m_atribs = {\n \"id\": 1,\n \"name\": \"test1\",\n \"interface\": DeviceInterfaces.SERIAL,\n \"implementation\": \"ArduinoGeigerPCB\",\n }\n\n m_query = mock.Mock()\n m_device = Device()\n m_device.id = 1\n m_device.name = \"test1\"\n m_device.interface = DeviceInterfaces.SERIAL\n m_device.implementation = mock.Mock(\n code=\"ArduinoGeigerPCB\", value=\"arduinogeigerpcb\")\n\n m_session.query.return_value.filter_by.return_value = m_query\n m_query.one_or_none.return_value = m_device\n\n test_obj = DeviceObject(**m_atribs)\n DeviceObject.delete(m_session, test_obj)\n\n m_session.delete.assert_has_calls(\n [\n mock.call(m_device),\n ],\n any_order=True\n )\n m_session.commit.assert_called_once()\n\n def test_delete_none(self):\n m_session = mock.Mock()\n\n # TODO(Dantali0n): change into setting attributes directly\n m_atribs = {\n \"id\": 1,\n \"name\": \"test1\",\n \"interface\": DeviceInterfaces.SERIAL,\n \"implementation\": \"ArduinoGeigerPCB\",\n }\n\n m_query = mock.Mock()\n m_device = Device()\n m_device.id = 1\n m_device.name = \"test1\"\n m_device.interface = DeviceInterfaces.SERIAL\n m_device.implementation = mock.Mock(\n code=\"ArduinoGeigerPCB\", value=\"arduinogeigerpcb\")\n\n m_session.query.return_value.filter_by.return_value = m_query\n m_query.one_or_none.return_value = None\n\n test_obj = DeviceObject(**m_atribs)\n DeviceObject.delete(m_session, test_obj)\n\n m_session.delete.assert_not_called()\n m_session.commit.assert_not_called()\n\n def test_delete_exception(self):\n m_session = mock.Mock()\n\n # TODO(Dantali0n): change into setting attributes directly\n m_atribs = {\n \"id\": 1,\n \"name\": \"test1\",\n \"interface\": DeviceInterfaces.SERIAL,\n \"implementation\": \"ArduinoGeigerPCB\",\n }\n\n m_query = mock.Mock()\n m_device = Device()\n m_device.id = 1\n m_device.name = \"test1\"\n m_device.interface = DeviceInterfaces.SERIAL\n m_device.implementation = mock.Mock(\n code=\"ArduinoGeigerPCB\", value=\"arduinogeigerpcb\")\n\n m_session.query.return_value.filter_by.return_value = m_query\n m_query.one_or_none.return_value = m_device\n\n m_session.commit.side_effect = RuntimeWarning\n\n test_obj = DeviceObject(**m_atribs)\n self.assertRaises(\n RuntimeWarning, DeviceObject.delete, m_session, test_obj)\n\n m_session.delete.assert_has_calls(\n [\n mock.call(m_device),\n ],\n any_order=True\n )\n m_session.commit.assert_called_once()\n m_session.rollback.assert_called_once()\n\n def test_delete_all(self):\n m_session = mock.Mock()\n\n # TODO(Dantali0n): change into setting attributes directly\n m_atribs = {\n \"id\": 1,\n \"name\": \"test1\",\n \"interface\": DeviceInterfaces.SERIAL,\n \"implementation\": \"ArduinoGeigerPCB\",\n }\n\n m_query = mock.Mock()\n m_device = Device()\n m_device.id = 1\n m_device.name = \"test1\"\n m_device.interface = DeviceInterfaces.SERIAL\n m_device.implementation = mock.Mock(\n code=\"ArduinoGeigerPCB\", value=\"arduinogeigerpcb\")\n\n m_session.query.return_value.filter_by.return_value = m_query\n m_query.all.return_value = [m_device]\n\n test_obj = DeviceObject(**m_atribs)\n DeviceObject.delete(m_session, test_obj, True)\n\n m_session.delete.assert_has_calls(\n [\n mock.call(m_device),\n ],\n any_order=True\n )\n m_session.commit.assert_called_once()\n\n def test_find_obj(self):\n\n \"\"\"Represents mocked device as it will be retrieved from db \"\"\"\n m_device = Device()\n m_device.id = 1\n m_device.name = \"test\"\n m_device.interface = DeviceInterfaces.SERIAL\n m_device.implementation = mock.Mock(\n code=\"ArduinoGeigerPCB\", value=\"arduinogeigerpcb\")\n\n \"\"\"Setup query and session to return mocked device\"\"\"\n m_query = mock.Mock()\n m_session = mock.Mock()\n m_session.query.return_value.filter_by.return_value = m_query\n m_query.one_or_none.return_value = m_device\n\n test_obj = DeviceObject(**{\"id\": 1})\n result_obj = DeviceObject.find(m_session, test_obj, False)\n\n self.assertEqual(1, result_obj.id)\n self.assertEqual(\"test\", result_obj.name)\n self.assertEqual(\"serial\", result_obj.interface)\n self.assertEqual(ArduinoGeigerPcb.NAME, result_obj.implementation)\n\n def test_find_obj_none(self):\n m_query = mock.Mock()\n m_session = mock.Mock()\n m_session.query.return_value.filter_by.return_value = m_query\n\n m_query.one_or_none.return_value = None\n\n test_obj = DeviceObject(**{\"id\": 1})\n result_obj = DeviceObject.find(m_session, test_obj, False)\n\n self.assertIsNone(result_obj)\n\n def test_find_obj_multiple(self):\n m_device1 = Device()\n m_device2 = Device()\n m_query = mock.Mock()\n m_session = mock.Mock()\n m_session.query.return_value.filter_by.return_value = m_query\n\n m_query.all.return_value = [m_device1, m_device2]\n\n m_device1.id = 1\n m_device1.name = \"test1\"\n m_device1.interface = DeviceInterfaces.SERIAL\n m_device1.implementation = mock.Mock(\n code=\"ArduinoGeigerPCB\", value=\"arduinogeigerpcb\")\n\n m_device2.id = 2\n m_device2.name = \"test2\"\n m_device2.interface = DeviceInterfaces.SERIAL\n m_device2.implementation = mock.Mock(\n code=\"ArduinoGeigerPCB\", value=\"arduinogeigerpcb\")\n\n test_obj = DeviceObject(**{\"interface\": \"serial\"})\n result_obj = DeviceObject.find(m_session, test_obj, True)\n\n self.assertEqual(1, result_obj[0].id)\n self.assertEqual(\"test1\", result_obj[0].name)\n self.assertEqual(\"serial\", result_obj[0].interface)\n self.assertEqual(ArduinoGeigerPcb.NAME, result_obj[0].implementation)\n\n self.assertEqual(2, result_obj[1].id)\n self.assertEqual(\"test2\", result_obj[1].name)\n self.assertEqual(\"serial\", result_obj[1].interface)\n self.assertEqual(ArduinoGeigerPcb.NAME, result_obj[1].implementation)\n\n def test_find_obj_multiple_none(self):\n m_query = mock.Mock()\n m_session = mock.Mock()\n m_session.query.return_value.filter_by.return_value = m_query\n\n m_query.all.return_value = None\n\n test_obj = DeviceObject(**{\"id\": 1})\n result_obj = DeviceObject.find(m_session, test_obj, True)\n\n self.assertIsNone(result_obj)\n", "id": "1426860", "language": "Python", "matching_score": 2.0319437980651855, "max_stars_count": 0, "path": "radloggerpy/tests/database/objects/test_device.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_log import log\n\nfrom radloggerpy._i18n import _\nfrom radloggerpy.database.models.device import Device\nfrom radloggerpy.database.models.serial_device import SerialDevice\nfrom radloggerpy.database.objects.device import DeviceObject\nfrom radloggerpy.types.device_interfaces import DeviceInterfaces\nfrom radloggerpy.types.serial_bytesize import BYTESIZE_CHOICES\nfrom radloggerpy.types.serial_parity import PARITY_CHOICES\nfrom radloggerpy.types.serial_stopbit import STOPBIT_CHOICES\n\nLOG = log.getLogger(__name__)\n\n\nclass SerialDeviceObject(DeviceObject):\n \"\"\"SerialDeviceObject\n\n TODO(Dantali0n): Write something nice about SerialDeviceObject\n \"\"\"\n\n \"Device serial model attributes\"\n port = None\n baudrate = None\n bytesize = None\n parity = None\n stopbits = None\n timeout = None\n\n m_serial_device = None\n\n def _build_object(self):\n super(SerialDeviceObject, self)._build_object()\n\n self.m_serial_device = SerialDevice()\n\n if self.port:\n self.m_serial_device.port = self.port\n if self.baudrate:\n self.m_serial_device.baudrate = self.baudrate\n\n if self.bytesize in BYTESIZE_CHOICES.keys():\n self.m_serial_device.bytesize = self.bytesize\n elif self.bytesize in BYTESIZE_CHOICES.values():\n index = list(BYTESIZE_CHOICES.values()).index(self.bytesize)\n self.m_serial_device.bytesize = \\\n list(BYTESIZE_CHOICES.keys())[index]\n\n if self.parity in PARITY_CHOICES.keys():\n self.m_serial_device.parity = self.parity\n elif self.parity in PARITY_CHOICES.values():\n index = list(PARITY_CHOICES.values()).index(self.parity)\n self.m_serial_device.parity = \\\n list(PARITY_CHOICES.keys())[index]\n\n if self.stopbits in STOPBIT_CHOICES.keys():\n self.m_serial_device.stopbits = self.stopbits\n elif self.stopbits in STOPBIT_CHOICES.values():\n index = list(STOPBIT_CHOICES.values()).index(self.stopbits)\n self.m_serial_device.stopbits = \\\n list(STOPBIT_CHOICES.keys())[index]\n\n if self.timeout:\n self.m_serial_device.timeout = self.timeout\n\n def _build_attributes(self):\n super(SerialDeviceObject, self)._build_attributes()\n\n if self.m_serial_device.port:\n self.port = self.m_serial_device.port\n if self.m_serial_device.baudrate:\n self.baudrate = self.m_serial_device.baudrate\n\n if self.m_serial_device.bytesize:\n self.bytesize = BYTESIZE_CHOICES[self.m_serial_device.bytesize]\n\n if self.m_serial_device.parity:\n self.parity = PARITY_CHOICES[self.m_serial_device.parity]\n\n if self.m_serial_device.stopbits:\n self.stopbits = STOPBIT_CHOICES[self.m_serial_device.stopbits]\n\n if self.m_serial_device.timeout:\n self.timeout = self.m_serial_device.timeout\n\n @staticmethod\n def add(session, reference):\n reference._build_object()\n\n session.add(reference.m_device)\n\n reference.m_serial_device.base_device = reference.m_device\n\n session.add(reference.m_serial_device)\n\n try:\n return session.commit()\n except Exception:\n session.rollback()\n # TODO(Dantali0n): These errors are horrendous for users to\n # understand an error abstraction is needed.\n raise\n\n @staticmethod\n def update(session, reference, base, allow_multiple=False):\n NotImplementedError()\n\n @staticmethod\n def delete(session, reference, allow_multiple=False):\n NotImplementedError()\n\n @staticmethod\n def find(session, reference, allow_multiple=True):\n reference._build_object()\n\n \"\"\"Only look for serial devices\"\"\"\n reference.m_device.interface = DeviceInterfaces.SERIAL\n\n base_filters = reference._filter(reference.m_device)\n\n \"\"\"Check if reference is base or child type when setting filters\"\"\"\n if hasattr(reference, 'm_serial_device'):\n filters = reference._filter(reference.m_serial_device)\n else:\n LOG.warning(_(\"Reference should be of type SerialDeviceObject\"))\n filters = {}\n\n query = session.query(Device).filter_by(**base_filters)\\\n .join(SerialDevice).filter_by(**filters)\n\n if allow_multiple:\n results = query.all()\n\n if results is None:\n return None\n\n ret_results = list()\n for result in results:\n dev = SerialDeviceObject()\n dev.m_device = result\n dev.m_serial_device = result.serial[0]\n dev._build_attributes()\n ret_results.append(dev)\n\n return ret_results\n else:\n result = query.one_or_none()\n\n if result is None:\n return None\n\n dev = SerialDeviceObject()\n dev.m_device = result\n dev.m_serial_device = result.serial[0]\n dev._build_attributes()\n return dev\n\n @staticmethod\n def find_enabled(session):\n return SerialDeviceObject.find(\n session, SerialDeviceObject(**{'enabled': True}), True)\n\n @staticmethod\n def find_all(session, references):\n NotImplementedError()\n\n @staticmethod\n def add_all(session, references):\n NotImplementedError()\n", "id": "2101817", "language": "Python", "matching_score": 5.154079914093018, "max_stars_count": 0, "path": "radloggerpy/database/objects/serial_device.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom radloggerpy.database.models.device import Device\nfrom radloggerpy.database.objects.base import DatabaseObject\n# from radloggerpy.database.objects.serial_device import SerialDeviceObject\nfrom radloggerpy.types.device_interfaces import INTERFACE_CHOICES\nfrom radloggerpy.types.device_types import DEVICE_TYPE_CHOICES\n\n\nclass DeviceObject(DatabaseObject):\n \"\"\"device object with base model attributes\"\"\"\n\n id = None\n name = None\n enabled = None\n type = None\n interface = None\n implementation = None\n\n m_device = None\n\n def _build_object(self):\n self.m_device = Device()\n\n if self.id:\n self.m_device.id = self.id\n if self.name:\n self.m_device.name = self.name\n\n if self.enabled:\n self.m_device.enabled = self.enabled\n\n if self.type in DEVICE_TYPE_CHOICES.keys():\n self.m_device.type = self.type\n elif self.type in DEVICE_TYPE_CHOICES.values():\n index = list(DEVICE_TYPE_CHOICES.values()).index(self.type)\n self.m_device.type = list(DEVICE_TYPE_CHOICES.keys())[index]\n\n if self.interface in INTERFACE_CHOICES.keys():\n self.m_device.interface = self.interface\n elif self.interface in INTERFACE_CHOICES.values():\n index = list(INTERFACE_CHOICES.values()).index(self.interface)\n self.m_device.interface = list(INTERFACE_CHOICES.keys())[index]\n\n if self.implementation:\n self.m_device.implementation = self.implementation\n\n def _build_attributes(self):\n if self.m_device.id:\n self.id = self.m_device.id\n if self.m_device.name:\n self.name = self.m_device.name\n\n if self.m_device.enabled:\n self.enabled = self.m_device.enabled\n\n if self.m_device.type:\n self.type = DEVICE_TYPE_CHOICES[self.m_device.type]\n\n if self.m_device.interface:\n self.interface = INTERFACE_CHOICES[self.m_device.interface]\n\n if self.m_device.implementation:\n self.implementation = self.m_device.implementation.code\n\n @staticmethod\n def add(session, reference):\n NotImplementedError()\n\n @staticmethod\n def update(session, reference, base, allow_multiple=False):\n NotImplementedError()\n\n @staticmethod\n def delete(session, reference, allow_multiple=False):\n reference._build_object()\n\n filters = reference._filter(reference.m_device)\n query = session.query(Device).filter_by(**filters)\n\n if allow_multiple:\n results = query.all()\n\n if results is None:\n return None\n\n devs = list()\n\n for result in results:\n dev = DeviceObject()\n dev.m_device = result\n session.delete(result)\n dev._build_attributes()\n devs.append(dev)\n else:\n result = query.one_or_none()\n\n if result is None:\n return None\n\n dev = DeviceObject()\n dev.m_device = result\n dev._build_attributes()\n session.delete(result)\n\n try:\n session.commit()\n except Exception:\n session.rollback()\n # TODO(Dantali0n): These errors are horrendous for users to\n # understand an error abstraction is needed.\n raise\n\n if allow_multiple:\n return devs\n else:\n return dev\n\n @staticmethod\n def find(session, reference, allow_multiple=True):\n reference._build_object()\n\n filters = reference._filter(reference.m_device)\n query = session.query(Device).filter_by(**filters)\n\n if allow_multiple:\n results = query.all()\n\n if results is None:\n return None\n\n ret_results = list()\n for result in results:\n dev = DeviceObject()\n dev.m_device = result\n dev._build_attributes()\n ret_results.append(dev)\n\n return ret_results\n else:\n result = query.one_or_none()\n\n if result is None:\n return None\n\n dev = DeviceObject()\n dev.m_device = result\n dev._build_attributes()\n return dev\n\n @staticmethod\n def find_enabled(session):\n return DeviceObject.find(\n session, DeviceObject(**{'enabled': True}), True)\n\n # @staticmethod\n # def upgrade(session, reference):\n # \"\"\"Upgrade the basic DeviceObject to its specific interface object\"\"\"\n #\n # if reference.interface is DeviceInterfaces.SERIAL:\n # return SerialDeviceObject.find(session, reference)\n # elif reference.interface is DeviceInterfaces.ETHERNET:\n # raise NotImplementedError(_(\"Class EthernetDeviceObject not\"\n # \"implemented yet.\"))\n\n @staticmethod\n def find_all(session, references):\n NotImplementedError()\n\n @staticmethod\n def add_all(session, references):\n NotImplementedError()\n", "id": "9572007", "language": "Python", "matching_score": 2.585313320159912, "max_stars_count": 0, "path": "radloggerpy/database/objects/device.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\nfrom threading import Condition\n\nfrom radloggerpy.database.objects.serial_device import SerialDeviceObject\nfrom radloggerpy.device import device\nfrom radloggerpy.types.device_interfaces import DeviceInterfaces\n\n\nclass SerialDevice(device.Device, metaclass=abc.ABCMeta):\n \"\"\"SerialDevice class for serial communication interface devices\n\n A SerialDevice is used for communication interfaces typically available\n such as `RS-232` or `RS-485`. If the device to support uses a COMx port on\n Windows or is listed in `/dev/tty*` on Linux this is the abstract class to\n implement.\n\n Devices implementing this class their settings are stored in the database\n with the :py:class:`radloggerpy.database.serial_device.SerialDevice`. if\n any additional information is required these can be stored using the\n :py:class:`radloggerpy.database.device_attribute.DeviceAttribute`.\n \"\"\"\n\n # TODO(Dantali0n): Do not refer to database models but to database\n # interfacing classes(add_devices(session, [device])).\n\n NAME = \"SerialDevice\"\n INTERFACE = DeviceInterfaces.SERIAL\n\n def __init__(self, info: SerialDeviceObject, condition: Condition):\n super(SerialDevice, self).__init__(info, condition)\n", "id": "8926928", "language": "Python", "matching_score": 3.063385486602783, "max_stars_count": 0, "path": "radloggerpy/device/device_interfaces/serial_device.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\nfrom threading import Condition\n\nfrom radloggerpy.database.objects.device import DeviceObject\nfrom radloggerpy.device import device\nfrom radloggerpy.types.device_interfaces import DeviceInterfaces\n\n\nclass UsbDevice(device.Device, metaclass=abc.ABCMeta):\n \"\"\"UsbDevice base class\"\"\"\n\n NAME = \"UsbDevice\"\n INTERFACE = DeviceInterfaces.USB\n\n def __init__(self, info: DeviceObject, condition: Condition):\n super(UsbDevice, self).__init__(info, condition)\n", "id": "8373634", "language": "Python", "matching_score": 0.32297375798225403, "max_stars_count": 0, "path": "radloggerpy/device/device_interfaces/usb_device.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom sqlalchemy import Column, Integer, ForeignKey, String, Enum\nfrom sqlalchemy.orm import relationship\n\nfrom radloggerpy.database.declarative_base import base\nfrom radloggerpy.types.serial_bytesize import SerialBytesizeTypes\nfrom radloggerpy.types.serial_parity import SerialParityTypes\nfrom radloggerpy.types.serial_stopbit import SerialStopbitTypes\n\n\nclass SerialDevice(base):\n id = Column(Integer(), primary_key=True)\n base_id = Column(Integer, ForeignKey('device.id'))\n\n port = Column(String, unique=True)\n baudrate = Column(Integer())\n bytesize = Column(Enum(SerialBytesizeTypes))\n parity = Column(Enum(SerialParityTypes))\n stopbits = Column(Enum(SerialStopbitTypes))\n timeout = Column(Integer())\n\n base_device = relationship(\n \"Device\", back_populates=\"serial\", single_parent=True)\n", "id": "4688589", "language": "Python", "matching_score": 3.482982635498047, "max_stars_count": 0, "path": "radloggerpy/database/models/serial_device.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom sqlalchemy import Boolean\nfrom sqlalchemy import Column\nfrom sqlalchemy import Enum\nfrom sqlalchemy import Integer\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy import String\nfrom sqlalchemy_utils import ChoiceType\n\nfrom radloggerpy.database.declarative_base import base\nfrom radloggerpy.types.device_implementations import IMPLEMENTATION_CHOICES\nfrom radloggerpy.types.device_interfaces import DeviceInterfaces\nfrom radloggerpy.types.device_types import DeviceTypes\n\n\nclass Device(base):\n id = Column(Integer, primary_key=True)\n\n name = Column(String, unique=True)\n\n enabled = Column(Boolean, default=True, nullable=False)\n\n type = Column(Enum(DeviceTypes))\n \"\"\"Type stores redundant information that could be extrapolated from\n implementation, however, storing type allows for more efficient\n queries.\"\"\"\n\n interface = Column(Enum(DeviceInterfaces))\n \"\"\"Interface stores redundant information that could be extrapolated\n implementation, however, storing interface allows for more efficient\n queries.\"\"\"\n\n implementation = Column(ChoiceType(IMPLEMENTATION_CHOICES))\n\n attributes = relationship(\n \"DeviceAttribute\", back_populates=\"base_device\",\n cascade=\"all, delete-orphan\")\n ethernet = relationship(\n \"EthernetDevice\", back_populates=\"base_device\", single_parent=True,\n cascade=\"all, delete-orphan\")\n serial = relationship(\n \"SerialDevice\", back_populates=\"base_device\", single_parent=True,\n cascade=\"all, delete-orphan\")\n usb = relationship(\n \"UsbDevice\", back_populates=\"base_device\", single_parent=True,\n cascade=\"all, delete-orphan\")\n", "id": "12130609", "language": "Python", "matching_score": 3.1972503662109375, "max_stars_count": 0, "path": "radloggerpy/database/models/device.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom datetime import datetime\n\nfrom sqlalchemy import Column, Integer, ForeignKey, Float, DateTime\nfrom sqlalchemy.orm import relationship\n\nfrom radloggerpy.database.declarative_base import base\n\n\nclass Measurement(base):\n id = Column(Integer, primary_key=True)\n device_id = Column(Integer, ForeignKey('device.id'))\n timestamp = Column(DateTime, default=datetime.utcnow)\n\n cpm = Column(Integer, nullable=True)\n svh = Column(Float, nullable=True)\n\n bq = Column(Integer, nullable=True)\n cpkg = Column(Integer, nullable=True)\n gray = Column(Integer, nullable=True)\n\n base_device = relationship(\"Device\", single_parent=True)\n", "id": "7338414", "language": "Python", "matching_score": 1.1096497774124146, "max_stars_count": 0, "path": "radloggerpy/database/models/measurement.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nfrom radloggerpy._i18n import _\nfrom radloggerpy.models import timestamp\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass RadiationReading(timestamp.TimeStamp):\n\n _cpm = 0\n\n def __init__(self):\n super().__init__()\n\n def set_cpm(self, cpm):\n \"\"\"Set the counts per minute to the new value\n\n :param cpm: Counts per minute\n :type cpm: int\n \"\"\"\n\n if cpm < 0:\n LOG.warning(_(\"RadiationReading can not have negative cpm\"))\n return\n\n self._cpm = cpm\n\n def get_cpm(self):\n \"\"\"Get the current counts per minute\n\n :return: The current internal counts per minute\n :rtype: int\n \"\"\"\n return self._cpm\n", "id": "6879466", "language": "Python", "matching_score": 2.5374369621276855, "max_stars_count": 0, "path": "radloggerpy/models/radiationreading.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom radloggerpy.models import radiationreading\nfrom radloggerpy.tests import base\n\n\nclass TestRadiationReadingModel(base.TestCase):\n\n def setUp(self):\n super(TestRadiationReadingModel, self).setUp()\n\n self.m_radiation_reading = radiationreading.RadiationReading()\n\n def test_no_instance_attributes(self):\n \"\"\"Test that the class has no instance variables\"\"\"\n\n test = radiationreading.RadiationReading()\n self.assertEqual(\n len(dir(radiationreading.RadiationReading)), len(dir(test)))\n\n def test_set_get(self):\n self.m_radiation_reading.set_cpm(24)\n self.assertEqual(24, self.m_radiation_reading.get_cpm())\n\n @mock.patch.object(radiationreading, 'LOG')\n def test_set_invalid(self, m_log):\n \"\"\"Set cpm to an invalid value and check it stays unchanged and logs\"\"\"\n\n self.m_radiation_reading.set_cpm(0)\n self.m_radiation_reading.set_cpm(-1)\n\n m_log.warning.assert_called_once()\n self.assertEqual(0, self.m_radiation_reading.get_cpm())\n", "id": "9023989", "language": "Python", "matching_score": 1.2107057571411133, "max_stars_count": 0, "path": "radloggerpy/tests/models/test_radiationreading.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nfrom threading import Condition\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nfrom readerwriterlock import rwlock\n\nfrom radloggerpy._i18n import _\nfrom radloggerpy.models.radiationreading import RadiationReading\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass DeviceDataBuffer:\n \"\"\"Native list protected by locks for devices as data buffer\n\n Read and Write lock are used in reverse because CPython GIL allows multiple\n threads to add elements concurrently without the List entering an invalid\n state. When all elements are fetched and cleared the write lock is used\n because it has preference over the readers.\n\n All readings in the data buffer must be of type RadiationReading as\n enforced while calling add_elements.\n\n Adding readings will acquire and notify on the condition as this will wake\n up the DeviceManager.\n \"\"\"\n\n def __init__(self, condition: Condition):\n self.has_reading = False\n self.condition = condition\n self.data = list()\n self.rwlock = rwlock.RWLockRead()\n\n def add_readings(self, readings):\n \"\"\"Add the readings to the buffer\n\n Add all the readings to the buffer and remove any elements not of type\n :py:class: '~.RadiationReading'.\n\n :param readings: The readings to be added to the data buffer\n :type readings: List of :py:class: '~.RadiationReading' instances\n :return: True if the elements were successfully added False otherwise\n \"\"\"\n\n for e in readings:\n if not isinstance(e, RadiationReading):\n LOG.error(_(\"Element: %s, is not of type \"\n \"RadiationReading\") % e)\n readings.remove(e)\n\n lock = self.rwlock.gen_rlock()\n try:\n if lock.acquire():\n self.data.extend(readings)\n self.has_reading = True\n with self.condition:\n self.condition.notify()\n return True\n finally:\n lock.release()\n\n return False\n\n def has_readings(self):\n \"\"\"Indicate if the buffer is not empty\n\n :return: True if one or more entries in buffer, false otherwise\n \"\"\"\n return self.has_reading\n\n def fetch_clear_readings(self):\n \"\"\"Retrieve all the readings from the buffer and clear the buffer\n\n Gets a exclusive write lock to create a reference to current data\n and subsequently clears the internal buffer. Afterwards it returns\n the previous internal readings. If getting the exclusive write lock\n failed it will return None instead.\n\n :return: All the buffered readings available or None if the lock fails\n :rtype: List of :py:class: '~.RadiationReading' instances | None\n \"\"\"\n\n lock = self.rwlock.gen_wlock()\n try:\n if lock.acquire():\n self.has_reading = False\n ref = copy.copy(self.data)\n self.data.clear()\n return ref\n finally:\n lock.release()\n", "id": "9100719", "language": "Python", "matching_score": 3.1849026679992676, "max_stars_count": 0, "path": "radloggerpy/datastructures/device_data_buffer.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nfrom readerwriterlock import rwlock\n\nfrom radloggerpy.datastructures import device_data_buffer\nfrom radloggerpy.models.radiationreading import RadiationReading\nfrom radloggerpy.tests import base\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass TestDeviceDataBuffer(base.TestCase):\n\n def setUp(self):\n super(TestDeviceDataBuffer, self).setUp()\n self.m_condition = mock.Mock()\n self.m_condition.__enter__ = mock.Mock()\n self.m_condition.__exit__ = mock.Mock()\n self.m_buffer = device_data_buffer.DeviceDataBuffer(self.m_condition)\n\n def test_add_readings_empty(self):\n \"\"\"Test that buffer remains empty when adding empty collection\"\"\"\n self.m_buffer.add_readings([])\n\n self.assertEqual([], self.m_buffer.fetch_clear_readings())\n\n def test_add_reading(self):\n \"\"\"Add single valid reading and assert it can be fetched\"\"\"\n m_reading = RadiationReading()\n\n self.m_buffer.add_readings([m_reading])\n\n readings = self.m_buffer.fetch_clear_readings()\n self.assertEqual([m_reading], readings)\n\n @mock.patch.object(device_data_buffer, 'LOG')\n def test_add_reading_invalid(self, m_log):\n \"\"\"Assert that invalid objects can not be added to the buffer\"\"\"\n m_add_readings = [RadiationReading(), object()]\n\n self.m_buffer.add_readings(m_add_readings)\n readings = self.m_buffer.fetch_clear_readings()\n\n m_log.error.assert_called_once()\n self.assertEqual(m_add_readings, readings)\n\n def test_add_reading_condition(self):\n m_condition = mock.Mock()\n m_condition.__enter__ = mock.Mock()\n m_condition.__exit__ = mock.Mock()\n buffer = device_data_buffer.DeviceDataBuffer(m_condition)\n\n m_reading = RadiationReading()\n buffer.add_readings([m_reading])\n\n m_condition.notify.assert_called_once()\n\n def test_clearing_buffer(self):\n \"\"\"Test that fetch_clear will remove previous readings\"\"\"\n m_reading = RadiationReading()\n\n self.m_buffer.add_readings([m_reading])\n\n self.assertEqual([m_reading], self.m_buffer.fetch_clear_readings())\n self.assertEqual([], self.m_buffer.fetch_clear_readings())\n\n @mock.patch.object(\n rwlock.RWLockRead, 'gen_rlock')\n def test_add_reading_lock(self, m_read):\n \"\"\"Simulate failed lock and correct add_readings return value\"\"\"\n m_read.return_value.acquire.return_value = False\n m_reading = RadiationReading()\n\n self.assertFalse(self.m_buffer.add_readings([m_reading]))\n\n @mock.patch.object(\n rwlock.RWLockRead, 'gen_wlock')\n def test_fetch_readings_lock(self, m_write):\n \"\"\"Simulate failed lock and correct fetch_x_readings return value\"\"\"\n m_write.return_value.acquire.side_effect = [False]\n m_reading = RadiationReading()\n\n self.assertTrue(self.m_buffer.add_readings([m_reading]))\n self.assertIsNone(self.m_buffer.fetch_clear_readings())\n", "id": "7500851", "language": "Python", "matching_score": 1.494070053100586, "max_stars_count": 0, "path": "radloggerpy/tests/datastructures/test_device_data_buffer.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nfrom threading import Condition\nfrom unittest import mock\n\nfrom concurrent.futures import ThreadPoolExecutor\nimport time\n\nfrom oslo_log import log\nfrom radloggerpy import config\nfrom radloggerpy.database.objects.device import DeviceObject\n\nfrom radloggerpy.device.device import Device\nfrom radloggerpy.models.radiationreading import RadiationReading\nfrom radloggerpy.tests import base\nfrom radloggerpy.types.device_states import DeviceStates\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass TestDevice(base.TestCase):\n\n class FakeDevice(Device):\n \"\"\"Fake class to implement device for testing\"\"\"\n\n runner = True\n\n def __init__(self):\n m_dev = DeviceObject()\n m_condition = Condition()\n super(TestDevice.FakeDevice, self).__init__(m_dev, m_condition)\n\n def _init(self):\n self.runner = True\n\n def _run(self):\n \"\"\"Add RadiationReading element\"\"\"\n for i in range(2):\n self.data.add_readings([RadiationReading()])\n time.sleep(0.1)\n\n while self.runner:\n \"\"\"Keep running until stopped externally\"\"\"\n time.sleep(0.1)\n\n def stop(self):\n self.runner = False\n\n def is_stopping(self):\n return self.runner\n\n def setUp(self):\n super(TestDevice, self).setUp()\n\n def test_run_sequential(self):\n m_device = self.FakeDevice()\n m_device.runner = False\n m_device._run()\n\n self.assertEqual(2, len(m_device.get_data()))\n\n def test_run_runner(self):\n m_device = self.FakeDevice()\n self.assertEqual(DeviceStates.STOPPED, m_device.get_state())\n\n executor = ThreadPoolExecutor(max_workers=1)\n future = executor.submit(m_device.run)\n\n time.sleep(0.5)\n\n self.assertEqual(2, len(m_device.get_data()))\n self.assertFalse(future.done())\n\n m_device.runner = False\n time.sleep(0.5)\n self.assertTrue(future.done())\n self.assertEqual(DeviceStates.STOPPED, m_device.get_state())\n\n def test_run_error(self):\n m_device = self.FakeDevice()\n self.assertEqual(DeviceStates.STOPPED, m_device.get_state())\n\n m_error = RuntimeError()\n m_init = mock.Mock()\n m_init.side_effect = m_error\n m_device._init = m_init\n\n executor = ThreadPoolExecutor(max_workers=1)\n future = executor.submit(m_device.run)\n\n time.sleep(0.5)\n\n self.assertEqual(DeviceStates.ERROR, m_device.get_state())\n self.assertEqual(m_error, future.exception())\n\n def test_run_stop(self):\n m_device = self.FakeDevice()\n self.assertEqual(DeviceStates.STOPPED, m_device.get_state())\n\n executor = ThreadPoolExecutor(max_workers=1)\n future = executor.submit(m_device.run)\n\n time.sleep(0.5)\n\n self.assertEqual(2, len(m_device.get_data()))\n self.assertFalse(future.done())\n self.assertEqual(DeviceStates.RUNNING, m_device.get_state())\n\n m_device.stop()\n time.sleep(0.5)\n self.assertTrue(future.done())\n self.assertEqual(DeviceStates.STOPPED, m_device.get_state())\n\n def test_run_double(self):\n m_device = self.FakeDevice()\n self.assertEqual(DeviceStates.STOPPED, m_device.get_state())\n\n executor = ThreadPoolExecutor(max_workers=2)\n future = executor.submit(m_device.run)\n\n time.sleep(0.5)\n\n self.assertEqual(2, len(m_device.get_data()))\n self.assertFalse(future.done())\n self.assertEqual(DeviceStates.RUNNING, m_device.get_state())\n\n future2 = executor.submit(m_device.run)\n time.sleep(0.5)\n self.assertIsInstance(future2.exception(), RuntimeError)\n self.assertEqual(DeviceStates.RUNNING, m_device.get_state())\n\n m_device.stop()\n time.sleep(0.5)\n self.assertTrue(future.done())\n self.assertEqual(DeviceStates.STOPPED, m_device.get_state())\n\n def test_transition_double(self):\n \"\"\"Tests against statemachine being a static variable\"\"\"\n\n m_device1 = self.FakeDevice()\n self.assertEqual(DeviceStates.STOPPED, m_device1.get_state())\n\n m_error = RuntimeError()\n m_init = mock.Mock()\n m_init.side_effect = m_error\n m_device1._init = m_init\n\n m_device2 = self.FakeDevice()\n self.assertEqual(DeviceStates.STOPPED, m_device2.get_state())\n\n executor = ThreadPoolExecutor(max_workers=2)\n executor.submit(m_device1.run)\n executor.submit(m_device2.run)\n\n time.sleep(0.5)\n\n self.assertEqual(DeviceStates.ERROR, m_device1.get_state())\n self.assertEqual(DeviceStates.RUNNING, m_device2.get_state())\n\n m_device1.stop()\n m_device2.stop()\n\n time.sleep(0.5)\n self.assertEqual(DeviceStates.STOPPED, m_device2.get_state())\n\n def test_run_stop_run(self):\n m_device = self.FakeDevice()\n executor = ThreadPoolExecutor(max_workers=1)\n future = executor.submit(m_device.run)\n\n time.sleep(0.5)\n\n self.assertEqual(2, len(m_device.get_data()))\n self.assertFalse(future.done())\n\n m_device.stop()\n time.sleep(0.5)\n self.assertTrue(future.done())\n\n future = executor.submit(m_device.run)\n\n time.sleep(0.5)\n\n self.assertEqual(2, len(m_device.get_data()))\n self.assertFalse(future.done())\n\n m_device.stop()\n time.sleep(0.5)\n self.assertTrue(future.done())\n\n def test_init_error_run(self):\n m_device = self.FakeDevice()\n executor = ThreadPoolExecutor(max_workers=1)\n\n r_init = m_device._init\n m_error = RuntimeError()\n m_init = mock.Mock()\n m_init.side_effect = m_error\n m_device._init = m_init\n\n future = executor.submit(m_device.run)\n\n time.sleep(0.5)\n\n self.assertEqual(DeviceStates.ERROR, m_device.get_state())\n self.assertEqual(m_error, future.exception())\n\n m_device._init = r_init\n\n future = executor.submit(m_device.run)\n\n time.sleep(0.5)\n\n self.assertEqual(2, len(m_device.get_data()))\n self.assertFalse(future.done())\n\n m_device.stop()\n time.sleep(0.5)\n self.assertTrue(future.done())\n\n def test_run_error_run(self):\n m_device = self.FakeDevice()\n executor = ThreadPoolExecutor(max_workers=1)\n\n r_run = m_device._run\n m_error = RuntimeError()\n m_run = mock.Mock()\n m_run.side_effect = m_error\n m_device._run = m_run\n\n future = executor.submit(m_device.run)\n\n time.sleep(0.5)\n\n self.assertEqual(DeviceStates.ERROR, m_device.get_state())\n self.assertEqual(m_error, future.exception())\n\n m_device._run = r_run\n\n future = executor.submit(m_device.run)\n\n time.sleep(0.5)\n\n self.assertEqual(2, len(m_device.get_data()))\n self.assertFalse(future.done())\n\n m_device.stop()\n time.sleep(0.5)\n self.assertTrue(future.done())\n", "id": "1132168", "language": "Python", "matching_score": 4.0306010246276855, "max_stars_count": 0, "path": "radloggerpy/tests/device/test_device.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\nfrom threading import Condition\n\nfrom typing import Type\nfrom typing import TypeVar\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nfrom radloggerpy._i18n import _\nfrom radloggerpy.database.objects.device import DeviceObject\nfrom radloggerpy.datastructures.device_data_buffer import DeviceDataBuffer\nfrom radloggerpy.device.device_state_machine import DeviceStateMachine\nfrom radloggerpy.types.device_states import DeviceStates\nfrom radloggerpy.types.device_types import DeviceTypes\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass Device(metaclass=abc.ABCMeta):\n \"\"\"Abstract class all radiation monitoring devices should implement\"\"\"\n\n NAME = \"Device\"\n \"\"\"Each radiation monitoring device should have a unique name\"\"\"\n\n INTERFACE = None\n \"\"\"Each radiation monitoring device should use a specific interface\"\"\"\n\n TYPE = DeviceTypes.UNDEFINED\n \"\"\"Each radiation monitoring device should define its type\"\"\"\n\n _U = TypeVar('_U', bound=DeviceObject)\n \"\"\"Bound to :py:class:`radloggerpy.database.objects.device.DeviceObject`\"\"\"\n\n def __init__(self, info: Type[_U], condition: Condition):\n\n self.condition = condition\n self.info = info\n self.data = DeviceDataBuffer(self.condition)\n\n self._statemachine = DeviceStateMachine()\n\n @abc.abstractmethod\n def _init(self):\n \"\"\"Method to perform device initialization\n\n Devices are allowed to clear any flags or variables set when stop() was\n called previously inside of this method.\n \"\"\"\n\n @abc.abstractmethod\n def _run(self):\n \"\"\"Method to be called to run continuously in its own thread\n\n Devices should not return from this method unless the intent is for the\n device to stop retrieving data. Data can be gathered by either polling\n or using events / wait if the external system supports to do so.\n Timers may also be used, please be sure to honor:\n CONF.devices.minimal_polling_delay\n \"\"\"\n\n def run(self):\n \"\"\"Entry point for devices to initialize and start running\n\n Serves as the entry point for devices and calls _init and _run. In\n addition handles any required state transitions\n\n Any exception encountered will be raised so DeviceManager can handle it\n appropriately.\n \"\"\"\n\n if self._statemachine.get_state() is DeviceStates.ERROR:\n \"Recover device from error state\"\n LOG.info(_(\"Restarting {} device of implementation {} from \"\n \"previous error state.\")\n .format(self.info.name, self.info.implementation))\n self._statemachine.reset_state()\n elif self._statemachine.get_state() is not DeviceStates.STOPPED:\n \"Not logging a message here, DeviceManager can easily do that\"\n raise RuntimeError(_(\"Can not start same device {} multiple times\")\n .format(self.info.name))\n\n try:\n self._statemachine.transition(DeviceStates.INITIALIZING)\n self._init()\n except Exception:\n self._statemachine.transition(DeviceStates.ERROR)\n raise\n\n try:\n self._statemachine.transition(DeviceStates.RUNNING)\n self._run()\n except Exception:\n self._statemachine.transition(DeviceStates.ERROR)\n raise\n\n if self._statemachine.get_state() is DeviceStates.RUNNING:\n self._statemachine.transition(DeviceStates.STOPPED)\n\n @abc.abstractmethod\n def stop(self):\n \"\"\"Method when called that should halt operation of device asap\n\n Halting can be achieved by setting a variable and checking this\n variable inside a loop in the _run method. Other methods include using\n conditions to notify the _run method.\n \"\"\"\n\n @abc.abstractmethod\n def is_stopping(self):\n \"\"\"Should return true if in the progress of stopping false otherwise\n\n :return: True if stopping, false otherwise\n \"\"\"\n\n def get_state(self):\n \"\"\"Return the current statemachine state\"\"\"\n\n return self._statemachine.get_state()\n\n def has_data(self):\n \"\"\"Wrapper around internal buffer\"\"\"\n\n return self.data.has_readings()\n\n def get_data(self):\n \"\"\"Return a collection of radiation monitoring data if any is available\n\n Retrieves the currently stored collection of radiation monitoring data\n and subsequently clears it.\n\n :return: Collection of RadiationReading objects\n :rtype: List of :py:class: '~.RadiationReading' instances\n \"\"\"\n got_data = self.data.fetch_clear_readings()\n if got_data:\n return got_data\n else:\n LOG.error(_(\"Unable to retrieve data for: %s\") % self.NAME)\n return []\n", "id": "11798800", "language": "Python", "matching_score": 3.543755054473877, "max_stars_count": 0, "path": "radloggerpy/device/device.py" }, { "content": "# Copyright (c) 2021 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nfrom radloggerpy.common.state_machine import StateMachine\nfrom radloggerpy.types.device_states import DeviceStates\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass DeviceStateMachine(StateMachine):\n \"\"\"State machine class for devices\n\n can be constructed without arguments\n \"\"\"\n\n POSSIBLE_STATES = DeviceStates.STOPPED\n \"\"\"Initial state and possible state types\"\"\"\n\n _transitions = {\n DeviceStates.STOPPED: {DeviceStates.INITIALIZING},\n DeviceStates.INITIALIZING: {DeviceStates.RUNNING, DeviceStates.ERROR},\n DeviceStates.RUNNING: {DeviceStates.STOPPED, DeviceStates.ERROR},\n DeviceStates.ERROR: {DeviceStates.STOPPED}\n }\n \"\"\"Possible states and subsequent transitions\"\"\"\n\n def __init__(self):\n super(DeviceStateMachine, self).__init__(self._transitions)\n", "id": "10544044", "language": "Python", "matching_score": 2.846869707107544, "max_stars_count": 0, "path": "radloggerpy/device/device_state_machine.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom enum import Enum\nfrom enum import unique\n\n\n@unique\nclass DeviceStates(Enum):\n \"\"\"Enum listing all possible device states\"\"\"\n STOPPED = 1\n INITIALIZING = 2\n RUNNING = 3\n ERROR = 4\n\n\nDEVICE_STATE_CHOICES = {\n DeviceStates.STOPPED: \"stopped\",\n DeviceStates.INITIALIZING: \"initializing\",\n DeviceStates.RUNNING: \"running\",\n DeviceStates.ERROR: \"error\",\n}\n", "id": "10384958", "language": "Python", "matching_score": 0.9895373582839966, "max_stars_count": 0, "path": "radloggerpy/types/device_states.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom enum import Enum\nfrom enum import unique\n\n\n@unique\nclass SerialStopbitTypes(Enum):\n \"\"\"Enum listing all possible supported types of serial stopbits\"\"\"\n STOPBITS_ONE = 1\n STOPBITS_ONE_POINT_FIVE = 1.5\n STOPBITS_TWO = 2\n\n\nSTOPBIT_CHOICES = {\n SerialStopbitTypes.STOPBITS_ONE: 1,\n SerialStopbitTypes.STOPBITS_ONE_POINT_FIVE: 1.5,\n SerialStopbitTypes.STOPBITS_TWO: 2\n}\n", "id": "9564156", "language": "Python", "matching_score": 1.84102463722229, "max_stars_count": 0, "path": "radloggerpy/types/serial_stopbit.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom enum import Enum\nfrom enum import unique\n\n\n@unique\nclass SerialBytesizeTypes(Enum):\n \"\"\"Enum listing all possible supported types of serial byte sizes\"\"\"\n FIVEBITS = 5\n SIXBITS = 6\n SEVENBITS = 7\n EIGHTBITS = 8\n\n\nBYTESIZE_CHOICES = {\n SerialBytesizeTypes.FIVEBITS: 5,\n SerialBytesizeTypes.SIXBITS: 6,\n SerialBytesizeTypes.SEVENBITS: 7,\n SerialBytesizeTypes.EIGHTBITS: 8,\n}\n", "id": "7247313", "language": "Python", "matching_score": 2.2871766090393066, "max_stars_count": 0, "path": "radloggerpy/types/serial_bytesize.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom enum import Enum\nfrom enum import unique\n\n\n@unique\nclass AccountTypes(Enum):\n \"\"\"Enum listing all possible supported types of accounts\"\"\"\n RADMON = 1\n GMCMAP = 2\n", "id": "3219273", "language": "Python", "matching_score": 0.558498203754425, "max_stars_count": 0, "path": "radloggerpy/types/account_types.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom enum import Enum\nfrom enum import unique\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nfrom radloggerpy.common.state_machine import StateMachine\nfrom radloggerpy.tests import base\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\n@unique\nclass DummyEnum(Enum):\n __order__ = 'EXP DUMMY'\n EXP = 1\n DUMMY = 2\n\n\nclass TestStateMachine(base.TestCase):\n\n dummy_transitions = {\n DummyEnum.EXP: {DummyEnum.EXP},\n DummyEnum.DUMMY: {DummyEnum.EXP, DummyEnum.DUMMY}\n }\n\n class DummyStateMachine(StateMachine):\n\n POSSIBLE_STATES = DummyEnum.EXP\n\n class FalseStateMachine(StateMachine):\n \"\"\"FalseStateMachine does not define POSSIBLE_STATES\"\"\"\n\n def setUp(self):\n super(TestStateMachine, self).setUp()\n\n def test_init_variable(self):\n m_machine = self.DummyStateMachine(self.dummy_transitions)\n\n self.assertEqual(DummyEnum.EXP, m_machine.get_state())\n\n def test_init_error(self):\n \"\"\"FalseStateMachine raise error on construction without states param\n\n Test that constructing a StateMachine object raises an error if both\n the POSSIBLE_STATES attribute and states parameter are None.\n \"\"\"\n\n self.assertRaises(RuntimeError, self.FalseStateMachine,\n self.dummy_transitions)\n\n def test_verify_error(self):\n\n transitions = {\n DummyEnum.EXP: {DummyEnum.EXP}\n }\n\n self.assertRaises(RuntimeError, self.DummyStateMachine, transitions)\n\n def test_verify_type_error(self):\n\n transitions = {\n DummyEnum.EXP: {None},\n DummyEnum.DUMMY: {}\n }\n\n self.assertRaises(RuntimeError, self.DummyStateMachine, transitions)\n\n def test_init_argument(self):\n m_machine = self.DummyStateMachine(\n self.dummy_transitions, DummyEnum.DUMMY)\n\n self.assertEqual(DummyEnum.DUMMY, m_machine.get_state())\n\n def test_transition(self):\n m_machine = self.DummyStateMachine(\n self.dummy_transitions, DummyEnum.DUMMY)\n\n m_machine.transition(DummyEnum.EXP)\n self.assertEqual(DummyEnum.EXP, m_machine.get_state())\n\n def test_transition_error(self):\n m_machine = self.DummyStateMachine(\n self.dummy_transitions, DummyEnum.EXP)\n\n self.assertRaises(\n RuntimeWarning, m_machine.transition, DummyEnum.DUMMY)\n\n def test_transition_type_error(self):\n m_machine = self.DummyStateMachine(\n self.dummy_transitions, DummyEnum.EXP)\n\n self.assertRaises(RuntimeWarning, m_machine.transition, None)\n\n def test_reset(self):\n m_machine = self.DummyStateMachine(\n self.dummy_transitions, DummyEnum.DUMMY)\n\n m_machine.transition(DummyEnum.EXP)\n m_machine.reset_state()\n self.assertEqual(DummyEnum.DUMMY, m_machine.get_state())\n", "id": "11528281", "language": "Python", "matching_score": 3.607738733291626, "max_stars_count": 0, "path": "radloggerpy/tests/common/test_state_machine.py" }, { "content": "# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\nimport enum\nfrom typing import Dict\nfrom typing import Optional\nfrom typing import Set\nfrom typing import Type\n\n\nfrom radloggerpy._i18n import _\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass StateMachine(metaclass=abc.ABCMeta):\n \"\"\"Abstract class to provide a state machine to any object\"\"\"\n\n _U = Dict[enum.Enum, Set[enum.Enum]]\n \"\"\"Transition dictionary structure type\"\"\"\n\n _state: enum.Enum\n \"\"\"Internal state object to maintain the current state\"\"\"\n\n _sclass: Type[enum.Enum]\n \"\"\"Enum class used for states\"\"\"\n\n POSSIBLE_STATES: Optional[enum.Enum] = None\n \"\"\"\n Set to a enum that has an item for all desired possible states the initial\n value of this variable is the initial state.\n\n Super classes implementing StateMachine should consider overriding this\n variable.\n \"\"\"\n\n transitions: _U = dict()\n \"\"\"\n Dictionary with sets as values were the key indicates the current state and\n the elements in the set describe valid transitions.\n\n Super classes should not override this variable and instead rely on\n `super().__init__(transitions)`. This ensures that the structure of\n transitions is valid.\n \"\"\"\n\n def __init__(self, transitions: _U, states: enum.Enum = None):\n\n if states and isinstance(states, enum.Enum):\n self.POSSIBLE_STATES = states\n elif not isinstance(self.POSSIBLE_STATES, enum.Enum):\n raise RuntimeError(_(\"Neither POSSIBLE_STATES nor states are of\"\n \"type Enum\"))\n\n self._sclass = self.POSSIBLE_STATES.__class__\n\n self.transitions = transitions\n self._verify_transitions()\n\n self.reset_state()\n\n def _verify_transitions(self):\n \"\"\"Iterate the TRANSITIONS dictionary and validate its completeness\"\"\"\n\n for t in self._sclass:\n if t not in self.transitions:\n raise RuntimeError(\n _(\"Not all states have required valid transition set\"))\n for s in self.transitions[t]:\n if not isinstance(s, self._sclass):\n raise RuntimeError(_(\"Not all members of transition set \"\n \"are of same type as state\"))\n\n def reset_state(self):\n \"\"\"Reset state to the initial state\"\"\"\n self._state = self.POSSIBLE_STATES\n\n def get_state(self):\n return self._state\n\n def transition(self, state: enum.Enum):\n \"\"\"Transition from the current state to a new desired state\n\n :param state: The new desired state\n :raises RuntimeWarning: This warning is raised when the new desired\n state requires an illegal transition\n \"\"\"\n\n if not isinstance(state, self._sclass):\n raise RuntimeWarning(\n _(\"State is not of same type as POSSIBLE_STATES\"))\n\n if state in self.transitions[self._state]:\n self._state = state\n else:\n raise RuntimeWarning(\n _(\"Transition from %(initial)s to %(to)s state is not valid\") %\n {'initial': self._state, 'to': state})\n", "id": "2853730", "language": "Python", "matching_score": 0.09000055491924286, "max_stars_count": 0, "path": "radloggerpy/common/state_machine.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n# Disable pep8 for this file\n# flake8: noqa\n\n\"\"\"Logo's to output to terminal when radlogger starts up\"\"\"\n\nTEXT = \"\"\"\n ____ _ _ ____ \n | _ \\ __ _ __| || | ___ __ _ __ _ ___ _ __ | _ \\ _ _ \n | |_) |/ _` | / _` || | / _ \\ / _` | / _` | / _ \\| '__|| |_) || | | | \n | _ <| (_| || (_| || |___| (_) || (_| || (_| || __/| | | __/ | |_| | \n |_| \\_\\\\\\__,_| \\__,_||_____|\\___/ \\__, | \\__, | \\___||_| |_| \\__, |\n |___/ |___/ |___/ \n\"\"\"\n\nLOGO = \"\"\"\n __svnnnnnnnnnns__, \n )noonvnnnnnnnnnnnoXoo, \n )vn> {nvnnnnnnnnnnnoo( \n )oos, _)nnnnnnnnnnoooow[ \n ajWZ#onnnnnnnnnnnnnoonomZ#6a \n _jQQQmmmg\\\"\\\"\\\"\\\"\\\"\\\"\\\"nnnooonXdZZZQQQa \n __uqWWWWBWBWWhnonoooooonooXZ#Z##QEvns;;.. \n %noSd#ZZZZZZZZZZmonnononoooqZZUZ#ZQkvvvn;;:: \n )nvnd#Z#Z#Z#Z#Z#ZZZ2onoooood#Z#Z#ZZQEvnvvn;=:; \n)vnn3#ZZ#ZUZ#ZZ#Z#Z#ZnoooooXUZZ#ZZUmQEvvnvns:;;:\nvnnnmXUZUZUZZ#ZUZZ#1owmZ#Zw2nX#ZUZmQ@Ivnvvvv;=:;\nnnnvZZ#Z#ZZ#Z#ZUmm\"\"mmmmmmmmr\"mmmQQVvvvvvnvn(;;;\nnnnnnnnnnnnnr` ....9VVVVVHV(...=:::::;;;:;:;;;;\nnnnnnnnnnnn( :::::::\"nnnv}~::;;;:;;;;;:;;;;;;;;\n)vnnnnnnnno ::::::::;;::::;=:;::;:;:;:;;;;;;;;.\n {nnnnnonoo .:::::::vvnvvnvn;;:;;;;;;;;;;;;;;=: \n {nnoooooo .:::::=vvvvnvvnvn<;;:;:;;;;;;;;=;- \n -\"{noooo .::::=vvvvnvvnvvnvs------------- \n .:::=vnvnvvnvvnvvvvs.... \n .::=nvnvvvnvvnvvnvnvi==. \n .:)nvnvvnvvnvvnvvnvvvn;. \n ;::+\\\"|{vnvvnvvnv\\\"\\\"\\\":;;. \n ::;;;;;:;;:;;;;;;;;=- \n ---:;;;;;;;:--- \"\"\"", "id": "6008948", "language": "Python", "matching_score": 1.2022643089294434, "max_stars_count": 0, "path": "radloggerpy/common/ascii_logo.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport hashlib\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nfrom radloggerpy.common import ascii_logo\n\nfrom radloggerpy.tests import base\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass TestASCIILogo(base.TestCase):\n\n def setUp(self):\n super(TestASCIILogo, self).setUp()\n self.m_hash = hashlib.sha384()\n\n def test_text_hash(self):\n m_text_hex = 'b55e32b9d5317638f1f1e0c0aec328a8f94ae9d867240d539ff16d' \\\n '43493de90c89d43553b85174309c9e0f8d62148882'\n\n self.m_hash.update(ascii_logo.TEXT.encode('utf-8'))\n\n self.assertEqual(m_text_hex, self.m_hash.hexdigest())\n\n def test_logo_hash(self):\n m_logo_hex = 'e5394c5c4b95b4747cea38cd271019531418b3beec476ab6bcd0fe' \\\n 'ada6e93cbb447be6abac5494fe7813c92982f76a88'\n\n self.m_hash.update(ascii_logo.LOGO.encode('utf-8'))\n\n self.assertEqual(m_logo_hex, self.m_hash.hexdigest())\n", "id": "4553908", "language": "Python", "matching_score": 1.4865890741348267, "max_stars_count": 0, "path": "radloggerpy/tests/common/test_ascii_logo.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Starter script for RadLoggerPy.\"\"\"\n\nimport os\nimport sys\nimport time\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nfrom radloggerpy._i18n import _\nfrom radloggerpy.common import ascii_logo\nfrom radloggerpy.common.first_time_run import FirstTimeRun\nfrom radloggerpy.config import config as configurator\nfrom radloggerpy.database import database_manager\nfrom radloggerpy.database.objects.serial_device import SerialDeviceObject\nfrom radloggerpy.device.device_manager import DeviceManager\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\nFirstTimeRun.add_check_task(\n database_manager.check_database_missing, database_manager.create_database)\n\n\ndef main():\n configurator.setup_config_and_logging(sys.argv, CONF)\n\n # Display logo's\n LOG.info(ascii_logo.TEXT + ascii_logo.LOGO)\n\n # Display pid\n LOG.info(_('Starting RadLoggerPy service on PID %s') % os.getpid())\n\n # Perform first time initialization if required\n FirstTimeRun()\n\n # Create database session for main thread\n sess = database_manager.create_session()\n\n # launch device manager\n manager = DeviceManager()\n\n devices = SerialDeviceObject.find_enabled(sess)\n for device in devices:\n manager.launch_device(device)\n\n # TODO(Dantali0n): Improve state checking and error handling\n while True:\n manager.check_devices()\n time.sleep(30)\n\n # close all database sessions that are still left open\n database_manager.close_lingering_sessions()\n", "id": "570916", "language": "Python", "matching_score": 4.239383697509766, "max_stars_count": 0, "path": "radloggerpy/radloggerpy.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom radloggerpy import config\n\nfrom cliff import app\nfrom cliff import commandmanager\nfrom cliff.complete import CompleteCommand\n\nfrom radloggerpy._i18n import _\nfrom radloggerpy.common import ascii_logo\nfrom radloggerpy.common.first_time_run import FirstTimeRun\nfrom radloggerpy.config.config import parse_args\nfrom radloggerpy.database import database_manager as dm\nfrom radloggerpy import version\n\nCONF = config.CONF\n\nFirstTimeRun.add_check_task(\n dm.check_database_missing, dm.create_database)\n\n\nclass RadLoggerShell(app.App):\n \"\"\"RadLoggerPy interactive command line interface\"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(\n description=self.__doc__.strip(),\n version=version.version_string,\n command_manager=commandmanager.CommandManager(\n 'radloggerpy.cli'),\n deferred_help=True,\n **kwargs\n )\n self.command_manager.add_command('complete', CompleteCommand)\n self.database_session = dm.create_session()\n\n def initialize_app(self, argv):\n # update configuration (sets CONF.version amongst others)\n parse_args(argv=())\n\n # Display logo\n self.LOG.info(ascii_logo.TEXT + ascii_logo.LOGO)\n\n # Perform first time initialization if required\n FirstTimeRun()\n\n # Display version\n self.LOG.info(_('Initializing radloggercli %s') % CONF.version)\n\n def prepare_to_run_command(self, cmd):\n self.LOG.debug('prepare_to_run_command %s', cmd.__class__.__name__)\n\n def clean_up(self, cmd, result, err):\n self.LOG.debug('clean_up %s', cmd.__class__.__name__)\n if err:\n self.LOG.debug('got an error: %s', err)\n\n def run(self, argv):\n try:\n super().run(argv)\n except Exception as e:\n self.LOG.error(_('Exception raised: %s'), str(e))\n", "id": "12283177", "language": "Python", "matching_score": 2.2522780895233154, "max_stars_count": 0, "path": "radloggerpy/cli/radlogger_shell.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nfrom radloggerpy.common import first_time_run\nfrom radloggerpy.common.first_time_run import FirstTimeRun\nfrom radloggerpy.tests import base\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass TestFirstTimeRun(base.TestCase):\n\n def setUp(self):\n super(TestFirstTimeRun, self).setUp()\n\n self.p_tsk = mock.patch.object(\n FirstTimeRun, '_tasks',\n new=list())\n self.m_tasks = self.p_tsk.start()\n self.addCleanup(self.p_tsk.stop)\n\n self.p_chk = mock.patch.object(\n FirstTimeRun, '_checks',\n new=list())\n self.m_checks = self.p_chk.start()\n self.addCleanup(self.p_chk.stop)\n\n self.p_chk_tsk = mock.patch.object(\n FirstTimeRun, '_check_tasks',\n new=list())\n self.m_check_tasks = self.p_chk_tsk.start()\n self.addCleanup(self.p_chk_tsk.stop)\n\n @staticmethod\n def fake_check_true():\n return True\n\n @staticmethod\n def fake_check_false():\n return False\n\n @staticmethod\n def fake_task():\n pass\n\n def test_add_check(self):\n FirstTimeRun.add_check(self.fake_check_true)\n\n self.assertEqual(1, len(FirstTimeRun._checks))\n\n def test_add_check_fake(self):\n FirstTimeRun.add_check(True)\n\n self.assertEqual(0, len(FirstTimeRun._checks))\n\n def test_add_task(self):\n FirstTimeRun.add_task(self.fake_task)\n\n self.assertEqual(1, len(FirstTimeRun._tasks))\n\n def test_add_task_fake(self):\n FirstTimeRun.add_task(True)\n\n self.assertEqual(0, len(FirstTimeRun._tasks))\n\n def test_add_check_task(self):\n FirstTimeRun.add_check_task(self.fake_check_true, self.fake_task)\n\n self.assertEqual(1, len(FirstTimeRun._check_tasks))\n\n @mock.patch.object(first_time_run, 'LOG')\n def test_add_check_task_fake_check(self, m_log):\n FirstTimeRun.add_check_task(True, self.fake_task)\n\n self.assertEqual(0, len(FirstTimeRun._check_tasks))\n m_log.warning.assert_called_once()\n\n @mock.patch.object(first_time_run, 'LOG')\n def test_add_check_task_fake_task(self, m_log):\n FirstTimeRun.add_check_task(self.fake_check_true, True)\n\n self.assertEqual(0, len(FirstTimeRun._check_tasks))\n m_log.warning.assert_called_once()\n\n @mock.patch.object(first_time_run, 'LOG')\n def test_run_checks_error(self, m_log):\n m_run = FirstTimeRun()\n m_run._checks.append(True)\n\n self.assertFalse(m_run._run_checks())\n m_log.error.assert_called_once()\n\n def test_run_checks_false(self):\n m_run = FirstTimeRun()\n m_run.add_check(self.fake_check_false)\n\n self.assertFalse(m_run._run_checks())\n\n def test_run_checks_true(self):\n m_run = FirstTimeRun()\n m_run.add_check(self.fake_check_true)\n\n self.assertTrue(m_run._run_checks())\n\n def test_run_checks_all_false(self):\n m_run = FirstTimeRun()\n m_run.add_check(self.fake_check_false)\n m_run.add_check(self.fake_check_true)\n\n self.assertFalse(m_run._run_checks(all_to_init=True))\n\n def test_run_checks_all_true(self):\n m_run = FirstTimeRun()\n m_run.add_check(self.fake_check_true)\n m_run.add_check(self.fake_check_true)\n\n self.assertTrue(m_run._run_checks(all_to_init=True))\n\n def test_run_tasks(self):\n # Crate a mocked method\n m_method = mock.Mock()\n m_run = FirstTimeRun()\n\n m_run.add_task(self.fake_task)\n # Force mocked method into the tasks list\n m_run._tasks.append(m_method)\n\n m_run._run_tasks()\n\n m_method.assert_called_once()\n\n @mock.patch.object(first_time_run, 'LOG')\n def test_run_tasks_error(self, m_log):\n # Crate a mocked method\n m_method = mock.Mock()\n m_method.side_effect = Exception(\"Whoops\")\n m_run = FirstTimeRun()\n\n m_run.add_task(self.fake_task)\n # Force mocked method into the tasks list\n m_run._tasks.append(m_method)\n\n m_run._run_tasks()\n\n m_method.assert_called_once()\n m_log.error.assert_called_once()\n\n def test_run_check_tasks(self):\n # Crate a mocked method\n m_method = mock.Mock()\n m_run = FirstTimeRun()\n\n # create CheckTask instance\n check_task = FirstTimeRun.CheckTask(self.fake_check_true, m_method)\n m_run._check_tasks.append(check_task)\n\n m_run._run_check_tasks()\n\n m_method.assert_called_once()\n\n @mock.patch.object(first_time_run, 'LOG')\n def test_run_check_tasks_except_check(self, m_log):\n m_run = FirstTimeRun()\n\n check_task = FirstTimeRun.CheckTask(True, self.fake_task)\n m_run._check_tasks.append(check_task)\n\n m_run._run_check_tasks()\n\n m_log.error.assert_called_once()\n\n @mock.patch.object(first_time_run, 'LOG')\n def test_run_check_tasks_except_task(self, m_log):\n # Crate a mocked method\n m_method = mock.Mock()\n m_method.side_effect = Exception(\"Whoops\")\n m_run = FirstTimeRun()\n\n check_task = FirstTimeRun.CheckTask(self.fake_check_true, m_method)\n # Force mocked method into the tasks list\n m_run._check_tasks.append(check_task)\n\n m_run._run_check_tasks()\n\n m_method.assert_called_once()\n m_log.error.assert_called_once()\n\n def test_constructor(self):\n m_method = mock.Mock()\n\n self.assertEqual(0, len(FirstTimeRun._tasks))\n self.assertEqual(0, len(FirstTimeRun._checks))\n\n FirstTimeRun.add_check(self.fake_check_true)\n FirstTimeRun.add_check(self.fake_check_false)\n FirstTimeRun.add_task(self.fake_task())\n FirstTimeRun._tasks.append(m_method)\n\n FirstTimeRun()\n\n m_method.assert_called_once()\n", "id": "5360176", "language": "Python", "matching_score": 3.3417530059814453, "max_stars_count": 0, "path": "radloggerpy/tests/common/test_first_time_run.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport inspect\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nfrom radloggerpy._i18n import _\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass FirstTimeRun:\n \"\"\"Handles service wide first time initialization\n\n FirstTimeRun should not be instantiated until all checks and tasks have\n been added. The FirstTimeRun will perform checks and if necessary perform\n all tasks upon construction.\n\n Individual tasks should do basic checks before performing that task as it\n could otherwise potentially re-initialize. This is due to a limitation in\n FirstTimeRun were there is no correlation between tasks and checks. The\n result is that even though it can be determined initialization is in\n order it can not be determined due to which check.\n\n Tasks and checks can be added globally by typing:\n FirstTimeRun.add_task(task)\n FirstTimeRun.add_check(check)\n\n If stronger coupling between tasks and checks is required use:\n FirstTimeRun.add_check_task(check, task)\n\n Since these can only be defined after the definition of class methods it\n might be necessary to add the statements to the bottom of the file. Take\n into account that these global statements are only executed if the file in\n which the reside is included in the main file.\n\n Alternatively, a separate file can be created to handle all these\n registrations. This has as advance that all registrations can be observed\n in one overview. Additionally, it will provide cleaner imports since only\n the declaring files has all the combinations of imports. This has the\n potential to solve circular dependencies.\n\n Finally, if unused imports are undesired all the registrations can be\n performed in the main file.\n \"\"\"\n\n class CheckTask:\n \"\"\"Wrapper for associative check and task pair\"\"\"\n\n def __init__(self, check, task):\n self.check = check\n self.task = task\n\n task = None\n check = None\n\n # All checks that have associated tasks.\n _check_tasks = list()\n\n # All calls that will be made if first time init is required.\n _tasks = list()\n\n # All checks to be performed to determine if initialization is required.\n _checks = list()\n\n def __init__(self):\n \"\"\"Run all checks and if required all initialization tasks\"\"\"\n\n if self._run_checks():\n LOG.info(_(\"Performing first time initialization\"))\n self._run_tasks()\n\n self._run_check_tasks()\n\n def _run_check_tasks(self):\n \"\"\"Run each of the checks and tasks as a pair\"\"\"\n for check_task in self._check_tasks:\n try:\n if check_task.check():\n check_task.task()\n except Exception as e:\n LOG.error(_(\"Encountered error during execution of \"\n \"CheckTask: %s\") % e)\n\n def _run_tasks(self):\n \"\"\"Will try to execute all calls from the internal list\"\"\"\n\n for task in self._tasks:\n try:\n task()\n LOG.info(_(\"Ran task: %s\") % task)\n except Exception as e:\n LOG.error(_(\"Encountered error during first time\"\n \"initialization with task: %s\") % e)\n\n def _run_checks(self, all_to_init=False):\n \"\"\"Run all checks from the internal list\n\n :param all_to_init: True if all checks are required to init False if\n one check is sufficient\n :return: True if first time init should be run False otherwise\n \"\"\"\n\n # store return values for all checks\n values = list()\n\n for check in self._checks:\n try:\n values.append(check())\n except Exception as e:\n LOG.error(_(\"Encountered error while performing check for\"\n \"first time init: %s\") % e)\n\n has_true = False\n for v in values:\n if v:\n has_true = True\n if v and not all_to_init:\n return True\n elif not v and all_to_init:\n return False\n return has_true\n\n @staticmethod\n def _validate_check_task(obj):\n \"\"\"Validate the object as much as possible\n\n Has the limitation that it does not verify check() return type\n to be boolean.\n \"\"\"\n return inspect.ismethod(obj) or inspect.isfunction(obj)\n\n @staticmethod\n def add_check_task(check, task):\n if not FirstTimeRun._validate_check_task(check):\n LOG.warning(_(\"Check %s was not of type method\") % check)\n return\n if not FirstTimeRun._validate_check_task(task):\n LOG.warning(_(\"Task %s was not of type method\") % task)\n return\n FirstTimeRun._check_tasks.append(FirstTimeRun.CheckTask(check, task))\n\n @staticmethod\n def add_task(task):\n if FirstTimeRun._validate_check_task(task):\n FirstTimeRun._tasks.append(task)\n else:\n LOG.warning(_(\"Task %s was not of type method\") % task)\n\n @staticmethod\n def add_check(check):\n if FirstTimeRun._validate_check_task(check):\n FirstTimeRun._checks.append(check)\n else:\n LOG.warning(_(\"Check %s was not of type method\") % check)\n", "id": "5870825", "language": "Python", "matching_score": 0.34748178720474243, "max_stars_count": 0, "path": "radloggerpy/common/first_time_run.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport time\n\nfrom radloggerpy.models import base\n\n\nclass TimeStamp(base.BaseModel):\n\n _timestamp = 0\n\n def __init__(self):\n \"\"\"Performs essential initialization for TimeStamp model\"\"\"\n\n super().__init__()\n # auto generate timestamp upon instantiation.\n self._timestamp = time.time()\n\n def set_timestamp(self, timestamp):\n \"\"\"Set the internal timestamp to the passed timestamp in Epoch\n\n :param timestamp: The timestamp in Epoch such as from time.time()\n :type timestamp: float\n \"\"\"\n self._timestamp = timestamp\n\n def update_timestamp(self):\n \"\"\"Update the internal timestamp to the current time.time() Epoch\"\"\"\n\n self._timestamp = time.time()\n\n def get_timestamp(self):\n \"\"\"Retrieve and return the internal timestamp\n\n :return: Epoch, time.time() representation of current time\n :rtype: float\n \"\"\"\n return self._timestamp\n", "id": "3848590", "language": "Python", "matching_score": 1.9050346612930298, "max_stars_count": 0, "path": "radloggerpy/models/timestamp.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport time\nfrom unittest import mock\n\nfrom radloggerpy.models import timestamp\nfrom radloggerpy.tests import base\n\n\nclass TestTimeStampModel(base.TestCase):\n\n def setUp(self):\n super(TestTimeStampModel, self).setUp()\n\n self.m_timestamp = timestamp.TimeStamp()\n\n def test_no_instance_attributes(self):\n \"\"\"Test that the class has no instance variables\"\"\"\n\n self.assertEqual(\n len(dir(timestamp.TimeStamp)), len(dir(self.m_timestamp)))\n\n @mock.patch.object(time, 'time')\n def test_update_get(self, m_time):\n m_time.return_value = 0\n\n self.m_timestamp.update_timestamp()\n self.assertEqual(0, self.m_timestamp.get_timestamp())\n\n def test_set_get(self):\n self.m_timestamp.set_timestamp(1500)\n self.assertEqual(1500, self.m_timestamp.get_timestamp())\n", "id": "1714048", "language": "Python", "matching_score": 1.1276319026947021, "max_stars_count": 0, "path": "radloggerpy/tests/models/test_timestamp.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom copy import copy\nfrom unittest import mock\n\nfrom cliff.lister import Lister\n\nfrom radloggerpy.cli.v1.device import device_models\n\nfrom radloggerpy.tests import base\n\n\nclass TestDeviceModels(base.TestCase):\n\n def setUp(self):\n super(TestDeviceModels, self).setUp()\n\n class BaseDummy(object):\n pass\n\n @mock.patch.object(device_models, 'DeviceManager')\n def test_take_action(self, m_dm):\n\n bases = copy(device_models.DeviceModels.__bases__)\n f_bases = tuple(\n base for base in bases if base != Lister) + (self.BaseDummy, )\n\n m_dm.get_device_map.return_value = {\"test\": [mock.Mock(NAME=\"value\")]}\n\n m_base = mock.patch.object(\n device_models.DeviceModels, '__bases__', f_bases)\n with m_base:\n m_base.is_local = True\n t_device = device_models.DeviceModels()\n\n t_device.app = mock.Mock()\n\n t_result = t_device.take_action(None)\n self.assertEqual(\n t_result,\n ((\"interface\", \"implementation\"), [(\"test\", \"value\")])\n )\n\n # ensure that is_local on the patch does not modify the actual bases\n self.assertEqual(bases, device_models.DeviceModels.__bases__)\n", "id": "10007433", "language": "Python", "matching_score": 3.0763418674468994, "max_stars_count": 0, "path": "radloggerpy/tests/cli/device/test_device_models.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom cliff.lister import Lister\n\nfrom radloggerpy.device.device_manager import DeviceManager\n\n\nclass DeviceModels(Lister):\n \"\"\"Command to list available device interfaces and implementations\"\"\"\n\n def take_action(self, parsed_args):\n columns = (\"interface\", 'implementation')\n\n # Convert data from device_map\n map = DeviceManager.get_device_map()\n data = []\n for key, values in map.items():\n for value in values:\n data.append((key, value.NAME))\n\n return (columns, data)\n", "id": "6075977", "language": "Python", "matching_score": 1.9456982612609863, "max_stars_count": 0, "path": "radloggerpy/cli/v1/device/device_models.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom collections import OrderedDict\nimport multiprocessing\nfrom threading import Condition\nfrom typing import Type\nfrom typing import TypeVar\n\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nimport futurist\nfrom futurist import Future\n\nfrom radloggerpy._i18n import _\nfrom radloggerpy.common.dynamic_import import import_modules\nfrom radloggerpy.common.dynamic_import import list_module_names\nfrom radloggerpy.database.objects.device import DeviceObject\nfrom radloggerpy.device.device import Device\nfrom radloggerpy.device.device_exception import DeviceException\nfrom radloggerpy.device import device_interfaces as di\nfrom radloggerpy.device import devices as dev\nfrom radloggerpy.types.device_interfaces import INTERFACE_CHOICES_R\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass ManagedDevice:\n \"\"\"Small data structure to keep track of running devices\"\"\"\n\n future: Future\n device: Device\n\n consecutive_errors: int = 0\n\n _I = TypeVar('_I', bound=Device)\n \"\"\"Bound to :py:class:`radloggerpy.device.device.Device`\"\"\"\n\n _U = TypeVar('_U', bound=Future)\n \"\"\"Bound to :py:class:`concurrent.futures._base.Future`\"\"\"\n\n def __init__(self, future: Type[_U], device: Type[_I]):\n self.future = future\n self.device = device\n\n\nclass DeviceManager:\n \"\"\"Factory for device creation and management\n\n The following theory of operation is not finalized and alternative\n solutions are not only welcome but encouraged:\n\n Each device is run on the threadpool and gets scheduled and descheduled\n in accordance to the number of concurrent workers. Devices are expected\n to check for data and subsequently return to sleep upon waking up. The\n amount of time in between wake-ups should be long enough to give other\n devices time to retrieve data but short enough to have relevant timing\n data.\n\n Devices are expected to run in a endless loop, upon returning they will\n NOT get automatically rescheduled back into the queue of the threadpool.\n Since all devices inherit the Device super class this class will provide\n methods to store data. The storage and retrieval methods for data can\n be assumed to be thread-safe by the device.\n\n The polling rate of DeviceManager to retrieve data from devices depends\n on the /systems/ used to store data permanently. Some online platforms\n do not allow to specify timestamps while uploading data. This in turn\n requires a high polling rate to be able to ensure measurements get\n uploaded with accurate time information.\n\n \"\"\"\n\n _DEVICE_MAP = None\n \"\"\"Private Map of device types and corresponding implementations\"\"\"\n\n def __init__(self):\n num_workers = CONF.devices.concurrent_worker_amount\n\n if num_workers is -1:\n num_workers = multiprocessing.cpu_count()\n LOG.info(_(\"Configured device manager for %d workers\")\n % num_workers)\n\n self._condition = Condition()\n\n self._mng_devices = []\n \"List of ManagedDevice devices see :py:class:`ManagedDevice`\"\n self._threadpool = futurist.ThreadPoolExecutor(\n max_workers=num_workers)\n # self._threadpool = futurist.GreenThreadPoolExecutor(\n # max_workers=num_workers)\n\n self.get_device_map()\n\n _I = TypeVar('_I', bound=Device)\n \"\"\"Bound to :py:class:`radloggerpy.device.device.Device`\"\"\"\n\n _U = TypeVar('_U', bound=DeviceObject)\n \"\"\"Bound to :py:class:`radloggerpy.database.objects.device.DeviceObject`\"\"\"\n\n def launch_device(self, device_obj: Type[_U]):\n \"\"\"Submit the device and its parameter to the threadpool\n\n Submitted devices are maintained as ManagedDevice instances, this\n enables to correlate a future to its corresponding device.\n \"\"\"\n\n dev_class = self.get_device_class(device_obj)\n dev_inst = dev_class(device_obj, self._condition)\n self._mng_devices.append(\n ManagedDevice(self._threadpool.submit(dev_inst.run), dev_inst)\n )\n\n def check_devices(self):\n \"\"\"Check the status of the devices and handle failures\n\n TODO(Dantali0n): This method should use the get_state method of devices\n instead of relying on the futures to much.\n \"\"\"\n\n removals = []\n for mng_device in self._mng_devices:\n future_exception = mng_device.future.exception()\n\n if type(future_exception) is not DeviceException:\n LOG.error(_(\"Unhandled Exception\"))\n\n if mng_device.future.done() and CONF.devices.restart_on_error:\n mng_device.future =\\\n self._threadpool.submit(mng_device.device.run)\n elif mng_device.future.done():\n removals.append(mng_device)\n\n \"Clean up the managed devices that have run to completion\"\n for device in removals:\n self._mng_devices.remove(device)\n\n @staticmethod\n def _get_device_module(module):\n device_modules = []\n\n # discover the path for the module directory and the package\n package_path = module.__path__[0]\n package = module.__name__\n\n modules = list()\n for module_name in list_module_names(package_path):\n modules.append((module_name, module_name.title().replace('_', '')))\n\n imported_modules = import_modules(\n modules, package, fetch_attribute=True)\n for module, attribute in imported_modules:\n device_modules.append(getattr(module, attribute))\n\n return device_modules\n\n @staticmethod\n def get_device_interfaces():\n \"\"\"Return a collection of all device interfaces their abstract classes\n\n Access abstract classes their INTERFACE to determine how they map to\n :py:class:`radloggerpy.types.device_interfaces.DeviceInterfaces`\n\n :return:\n :rtype:\n \"\"\"\n\n return DeviceManager._get_device_module(di)\n\n @staticmethod\n def get_device_implementations():\n \"\"\"Return a collection of all device implementations\n\n Access implementations their INTERFACE to determine how they map to\n :py:class:`radloggerpy.types.device_interfaces.DeviceInterfaces`\n\n :return:\n :rtype:\n \"\"\"\n\n return DeviceManager._get_device_module(dev)\n\n @staticmethod\n def get_device_map():\n \"\"\"Return dictionary mapping device types to all concrete classes\n\n The map will only be generated the first time this method is called\n and is subsequently stored in :py:attr:`_DEVICE_MAP`.\n\n The dictionary structure follows the following schema:\n\n ``OrderedDict([(DeviceInterfaces.SERIAL,[devices.ArduinoGeigerPcb])])``\n\n :return: Ordered dictionary mapping DeviceInterface enums to concrete\n classes\n :rtype: OrderedDict with DeviceTypes as key and lists as values\n \"\"\"\n\n if DeviceManager._DEVICE_MAP is not None:\n return DeviceManager._DEVICE_MAP\n\n device_map = OrderedDict()\n\n d_interfaces = DeviceManager.get_device_interfaces()\n for d_interface in d_interfaces:\n device_map[d_interface.INTERFACE] = []\n\n implementations = DeviceManager.get_device_implementations()\n for implementation in implementations:\n device_map[implementation.INTERFACE].append(implementation)\n\n DeviceManager._DEVICE_MAP = device_map\n return DeviceManager._DEVICE_MAP\n\n @staticmethod\n def get_device_class(device_obj: Type[_U]) -> _I:\n \"\"\"Determines the matching device class for a device object\n\n The device object must specify a concrete implementation\n \"\"\"\n\n map = DeviceManager.get_device_map()\n interfaces_inverse = INTERFACE_CHOICES_R\n implementations = map[interfaces_inverse[device_obj.interface]]\n for x in implementations:\n if x.NAME == device_obj.implementation:\n return x\n", "id": "814769", "language": "Python", "matching_score": 5.378720760345459, "max_stars_count": 0, "path": "radloggerpy/device/device_manager.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport multiprocessing\nfrom unittest import mock\n\nfrom oslo_log import log\nfrom radloggerpy import config\nfrom radloggerpy.database.objects.device import DeviceObject\n\nfrom radloggerpy.device import device_interfaces as di\nfrom radloggerpy.device import device_manager as dm\nfrom radloggerpy.device import devices as dev\nfrom radloggerpy.tests import base\nfrom radloggerpy.types.device_implementations import IMPLEMENTATION_CHOICES\nfrom radloggerpy.types.device_interfaces import DeviceInterfaces\nfrom radloggerpy.types.device_interfaces import INTERFACE_CHOICES\nfrom radloggerpy.types.device_types import DeviceTypes\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass TestDeviceManager(base.TestCase):\n\n def setUp(self):\n super(TestDeviceManager, self).setUp()\n\n @mock.patch.object(multiprocessing, 'cpu_count')\n def test_num_processors(self, m_cpu):\n m_cpu.return_value = 2\n\n self.m_dmanager = dm.DeviceManager()\n\n m_cpu.assert_called_once_with()\n\n @mock.patch.object(dm, 'futurist')\n def test_conf_workers(self, m_futurist):\n CONF.devices.concurrent_worker_amount = 2\n\n self.m_dmanager = dm.DeviceManager()\n\n m_futurist.ThreadPoolExecutor.\\\n assert_called_once_with(max_workers=2)\n\n @mock.patch.object(dm, 'import_modules')\n @mock.patch.object(dm, 'list_module_names')\n def test_get_device_module(self, m_list_names, m_import):\n m_path = 'path'\n m_package = 'package'\n m_name = 'test'\n m_class = 'Test'\n\n m_module = mock.Mock(__path__=[m_path], __name__=m_package)\n\n m_list_names.return_value = [m_name]\n\n m_result = mock.Mock(Test=True)\n m_import.return_value = [(m_result, m_class)]\n\n result = dm.DeviceManager._get_device_module(m_module)\n\n m_list_names.assert_called_once_with(m_path)\n m_import.assert_called_once_with(\n [(m_name, m_class)], m_package, fetch_attribute=True)\n\n self.assertEqual([True], result)\n\n @mock.patch.object(dm.DeviceManager, '_get_device_module')\n def test_get_device_interfaces(self, m_get_device_module):\n \"\"\"Assert get_device_interfaces called with correct module\"\"\"\n dm.DeviceManager.get_device_interfaces()\n\n m_get_device_module.assert_called_once_with(di)\n\n @mock.patch.object(dm.DeviceManager, '_get_device_module')\n def test_get_device_implementations(self, m_get_device_module):\n \"\"\"Assert get_device_implementations called with correct module\"\"\"\n dm.DeviceManager.get_device_implementations()\n\n m_get_device_module.assert_called_once_with(dev)\n\n def test_get_device_map_created_once(self):\n m_map = dm.DeviceManager.get_device_map()\n\n self.assertEqual(m_map, dm.DeviceManager.get_device_map())\n\n def test_get_device_map_implementations(self):\n m_map = dm.DeviceManager.get_device_map()\n\n choices = {x: False for (x, y) in IMPLEMENTATION_CHOICES}\n num_choices = 0\n\n for key, value in m_map.items():\n num_choices += len(value)\n for imp in value:\n if imp.NAME in choices:\n choices[imp.NAME] = True\n\n for x in choices:\n self.assertTrue(x)\n\n # This will break once an implementation supports multiple interfaces!\n self.assertEqual(num_choices, len(IMPLEMENTATION_CHOICES))\n\n @mock.patch.object(dm.DeviceManager, 'get_device_map')\n def test_get_device_class(self, m_get_device_map):\n \"\"\"Ensure class can be found for instances of DeviceObject\n\n This checks that instances of:\n :py:class:`radloggerpy.database.objects.device.DeviceObject` can have\n their corresponding class found by get_device_class.\n \"\"\"\n\n m_class = mock.Mock(NAME=\"test\")\n m_get_device_map.return_value = {\n DeviceInterfaces.SERIAL: [\n m_class,\n mock.Mock(NAME=\"different\")\n ]\n }\n\n # Create actual DeviceObject instead of mock as to not upset type\n # hinting.\n args = {\n \"implementation\": 'test',\n \"interface\": INTERFACE_CHOICES[DeviceInterfaces.SERIAL]\n }\n m_obj = DeviceObject(**args)\n\n self.assertEqual(m_class, dm.DeviceManager.get_device_class(m_obj))\n\n def test_device_implementations_name(self):\n \"\"\"Assert each concrete device implementation has a name\"\"\"\n implementations = dm.DeviceManager.get_device_implementations()\n\n for imp in implementations:\n self.assertIsNotNone(imp.NAME)\n\n def test_device_implementations_type(self):\n \"\"\"Assert each concrete device implementation has a type\"\"\"\n implementations = dm.DeviceManager.get_device_implementations()\n\n for imp in implementations:\n self.assertNotEqual(imp.TYPE, DeviceTypes.UNDEFINED)\n", "id": "8104810", "language": "Python", "matching_score": 2.5235531330108643, "max_stars_count": 0, "path": "radloggerpy/tests/device/test_device_manager.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\n# TODO(Dantali0n): Dd.get_device_implementations() causes circular import pls\n# fix.\n\n# from radloggerpy.device.device_manager import DeviceManager as Dm\n# IMPLEMENTATION_CHOICES = [(imp.NAME, imp.NAME.lower()) for imp in\n# Dm.get_device_implementations()]\n\nIMPLEMENTATION_CHOICES = [('ArduinoGeigerPCB', 'arduinogeigerpcb')]\n", "id": "4775828", "language": "Python", "matching_score": 0.03539024293422699, "max_stars_count": 0, "path": "radloggerpy/types/device_implementations.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nclass BaseModel:\n\n def __init__(self):\n \"\"\"Do not declare any attributes in models their constructor\n\n By declaring the attributes outside of the constructor it will be\n easier to see which attributes certain models have. Additionally, it\n will improve testing as the attributes can be AutoSpec=True by mock.\n Remember though that all attributes outside the constructor are\n statically accessible as well. The constructor can still be used to\n assign a proper value to the declared attributes.\n\n \"\"\"\n pass\n", "id": "5140427", "language": "Python", "matching_score": 1.0381377935409546, "max_stars_count": 0, "path": "radloggerpy/models/base.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_log import log\n\nfrom radloggerpy import config\n\nfrom radloggerpy.database.declarative_base import Base\nfrom radloggerpy.database.declarative_base import base as decl_base_inst\nfrom radloggerpy.tests import base\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass TestDeclarativeBase(base.TestCase):\n\n # class FakeBase(object):\n # @declared_attr\n # def __tablename__(cls):\n # return cls.__name__.lower()\n\n def setUp(self):\n super(TestDeclarativeBase, self).setUp()\n\n # self.p_base = mock.patch.object(\n # declarative, 'declarative_base',\n # new=declarative.declarative_base)\n # self.m_decl_base = self.p_base.start()\n # self.addCleanup(self.p_base.stop)\n\n def test_base_cls_base(self):\n \"\"\"The sqlalchemy declarative_base passed base object\"\"\"\n\n self.assertEqual(Base.__doc__, decl_base_inst.__doc__)\n\n # @mock.patch.object(decl_base_module, 'base')\n # def test_base_tablename_lower(self, m_base_instance):\n # \"\"\"Assert that the baseclass tablename lower gets applied\"\"\"\n #\n # # Create a in memory sqlite database using a declarative_base\n # m_base = self.m_decl_base(cls=TestDeclarativeBase.FakeBase)\n # m_engine = create_engine('sqlite:///:memory:', echo=True)\n #\n # # This model wil be added to the declarative_base\n # class TestModel(m_base):\n # id = Column(Integer, primary_key=True)\n #\n # # Create all tables for the in memory database\n # m_base.metadata.create_all(bind=m_engine)\n #\n # # Create a ORM session and add an instance of model to the database\n # m_session = sessionmaker(bind=m_engine)\n # m_session = m_session()\n # m_model = TestModel()\n # m_session.add(m_model)\n # m_session.commit()\n # m_session.close()\n #\n # # Create a Core connection and fetch instances using plain SQL\n # m_conn = m_engine.connect()\n # m_text = text(\"SELECT * FROM testmodel\")\n # result = m_conn.execute(m_text).fetchall()\n # m_conn.close()\n #\n # # Check that the inserted element could be retrieved from the\n # # database using the lowercase name of the model class.\n # self.assertEqual(1, len(result))\n", "id": "673856", "language": "Python", "matching_score": 2.692861318588257, "max_stars_count": 0, "path": "radloggerpy/tests/database/test_declarative_base.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nfrom unittest import mock\n\n\nfrom oslo_log import log\nfrom sqlalchemy import orm\n\nfrom radloggerpy import config\n\nfrom radloggerpy.database import create_database as cd\nfrom radloggerpy.database import database_manager as dbm\nfrom radloggerpy.tests import base\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass TestDatabaseManager(base.TestCase):\n\n def setUp(self):\n super(TestDatabaseManager, self).setUp()\n\n self.p_file = mock.patch.object(\n os.path, 'isfile',\n new_callable=mock.PropertyMock)\n self.m_isfile = self.p_file.start()\n self.addCleanup(self.p_file.stop)\n\n self.p_database = mock.patch.object(\n dbm, 'database_exists',\n new_callable=mock.PropertyMock)\n self.m_database = self.p_database.start()\n self.addCleanup(self.p_database.stop)\n\n def test_create_engine(self):\n engine = dbm.create_engine(\"test.sqlite\")\n self.assertEqual(\"sqlite:///test.sqlite\", str(engine.url))\n\n @mock.patch.object(dbm, 'create_engine')\n def test_create_session(self, m_engine):\n m_engine.return_value = \"sqlite:///test.sqlite\"\n\n session = dbm.create_session()\n\n m_engine.assert_called_once()\n self.assertIsInstance(session, orm.Session)\n self.assertEqual(\"sqlite:///test.sqlite\", session.bind)\n\n @mock.patch.object(dbm, 'LOG')\n @mock.patch.object(dbm, 'create_engine')\n def test_create_session_error(self, m_engine, m_log):\n m_engine.side_effect = Exception()\n\n session = dbm.create_session()\n\n m_engine.assert_called_once()\n m_log.error.assert_called_once()\n self.assertIsNone(None, session)\n\n def test_check_database_missing(self):\n self.m_isfile.return_value = False\n\n self.assertTrue(dbm.check_database_missing())\n\n def test_check_database_exists(self):\n self.m_isfile.return_value = True\n self.m_database.return_value = True\n\n self.assertFalse(dbm.check_database_missing())\n\n def test_check_database_missing_exists(self):\n self.m_isfile.return_value = True\n self.m_database.return_value = False\n\n self.assertTrue(dbm.check_database_missing())\n\n @mock.patch.object(dbm, 'LOG')\n def test_check_database_missing_error(self, m_log):\n self.m_isfile.return_value = True\n self.m_database.side_effect = Exception()\n\n self.assertTrue(dbm.check_database_missing())\n m_log.warning.assert_called_once()\n\n @mock.patch.object(dbm, 'create_engine')\n @mock.patch.object(cd, 'create_database_tables')\n def test_create_database(self, m_create, m_engine):\n m_engine.return_value = mock.Mock()\n m_create.return_value = True\n\n dbm.create_database()\n\n m_create.assert_called_once_with(m_engine())\n\n @mock.patch.object(dbm, 'create_engine')\n @mock.patch.object(dbm, 'LOG')\n @mock.patch.object(cd, 'create_database_tables')\n def test_create_database_error(self, m_create, m_log, m_engine):\n m_engine.return_value = mock.Mock()\n m_create.side_effect = AssertionError()\n\n self.assertRaises(AssertionError, dbm.create_database)\n\n m_create.assert_called_once_with(m_engine())\n m_log.error.assert_called_once()\n", "id": "4502202", "language": "Python", "matching_score": 3.1377999782562256, "max_stars_count": 0, "path": "radloggerpy/tests/database/test_database_manager.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os.path\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nimport sqlalchemy\nfrom sqlalchemy import orm\nfrom sqlalchemy_utils import database_exists\n\nfrom radloggerpy._i18n import _\nfrom radloggerpy.database import create_database as cd\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\"\"\"\nEnsure all models their tables and relationships are loaded before sessions\nare created. Otherwise SQLAlchemy will have troubling finding\nunimported relationshios.\n\"\"\"\nRUNTIME_TABLES = cd._list_tables()\n\n\ndef create_session():\n \"\"\"Create a session using the appropriate configuration\n\n :return: Returns an sqlalchemy session or None if a error occurred\n :rtype: Instance of :py:class: 'orm.Session'\n \"\"\"\n file = CONF.database.filename\n\n try:\n sess = orm.sessionmaker(bind=create_engine(file))\n return sess()\n except Exception as e:\n LOG.error(_(\"Failed to create session due to exception: %s\") % e)\n\n return None\n\n\ndef close_lingering_sessions():\n \"\"\"Closes all lingering sqlalchemy sessions\"\"\"\n orm.session.close_all_sessions()\n\n\ndef create_engine(database_name):\n \"\"\"Create the database engine with appropriate parameters\n\n This method should be used whenever sqlalchemy.create_engine is to be\n called. It ensures the same parameters are used across the application.\n\n :parameter database_name: base name of the database without sqlite://\n :type database_name: str\n :return: sqlalchemy engine instance\n :rtype: Instance of :py:class: 'sqlalchemy.engine.Engine`\n \"\"\"\n\n return sqlalchemy.create_engine(f\"sqlite:///{database_name}\")\n\n\ndef check_database_missing():\n \"\"\"Check if the database is missing, used for first time init\n\n :return: True if the database does not exist False if it does\n \"\"\"\n file = CONF.database.filename\n\n LOG.info(_(\"Checking if database: %s exists\") % file)\n\n if not os.path.isfile(file):\n LOG.warning(_(\"Database file does not exist in configured path\"))\n return True\n\n try:\n engine = create_engine(file)\n if not database_exists(engine.url):\n return True\n except Exception as e:\n LOG.warning(e)\n return True\n\n return False\n\n\ndef create_database():\n \"\"\"Create the database using sqlalchemy, used for first time init \"\"\"\n file = CONF.database.filename\n\n try:\n LOG.info(_(\"Creating database\"))\n engine = create_engine(file)\n LOG.info(_(\"Creating database tables\"))\n cd.create_database_tables(engine)\n except Exception as e:\n LOG.error(_(\"Failed to create database due to error: %s\") % e)\n raise e\n", "id": "7182589", "language": "Python", "matching_score": 1.7058095932006836, "max_stars_count": 0, "path": "radloggerpy/database/database_manager.py" }, { "content": "# Copyright (c) 2021 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nfrom radloggerpy.common.exception import RadLoggerPyException\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass DeviceException(RadLoggerPyException):\n \"\"\"Exception to be used by devices indicating handled exceptions.\n\n Used by devices to halt the execution of that device while informing\n DeviceManager that the exception has already been handled / logged etc.\n \"\"\"\n", "id": "12373076", "language": "Python", "matching_score": 1.571564793586731, "max_stars_count": 0, "path": "radloggerpy/device/device_exception.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport errno\nimport serial\nfrom threading import Condition\nimport time\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nfrom radloggerpy._i18n import _\nfrom radloggerpy.database.objects.serial_device import SerialDeviceObject\nfrom radloggerpy.device.device_exception import DeviceException\nfrom radloggerpy.device.device_interfaces.serial_device import SerialDevice\nfrom radloggerpy.models.radiationreading import RadiationReading\nfrom radloggerpy.types.device_types import DeviceTypes\nfrom radloggerpy.types.serial_parity import PARITY_CHOICES_R\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass ArduinoGeigerPcb(SerialDevice):\n \"\"\"\"\"\"\n\n NAME = \"ArduinoGeigerPCB\"\n\n TYPE = DeviceTypes.AVERAGE\n\n def __init__(self, info: SerialDeviceObject, condition: Condition):\n super(ArduinoGeigerPcb, self).__init__(info, condition)\n self.stop = False\n self.serial = None\n\n def _init(self):\n self.stop = False\n parity = PARITY_CHOICES_R[self.info.parity].value\n try:\n self.serial = serial.Serial(\n port=self.info.port, baudrate=self.info.baudrate,\n parity=parity, stopbits=self.info.stopbits,\n bytesize=self.info.bytesize)\n except serial.serialutil.SerialException as e:\n if e.errno == errno.EACCES:\n LOG.critical(_(\"Insufficient permissions \"\n \"to open device.\"))\n raise DeviceException\n elif e.errno == errno.ENOENT:\n LOG.critical(_(\"Device does not exist\"))\n raise DeviceException\n else:\n LOG.critical(_(\"Device error %d\") % e.errno)\n raise DeviceException\n\n def _run(self):\n string = \"\"\n while not self.stop:\n while self.serial.inWaiting() > 0:\n char = self.serial.read(1).decode(\"utf-8\")\n if char == '\\n':\n measure = RadiationReading()\n measure.set_cpm(int(string))\n self.data.append(measure)\n string = \"\"\n elif char == '\\r':\n pass\n else:\n string += char\n time.sleep(CONF.devices.minimal_polling_delay / 1000)\n\n # clear serial object when returning from _run\n self.serial = None\n\n def stop(self):\n self.stop = True\n\n def is_stopping(self):\n return self.stop\n", "id": "12474707", "language": "Python", "matching_score": 3.7143726348876953, "max_stars_count": 0, "path": "radloggerpy/device/devices/arduino_geiger_pcb.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom threading import Condition\nfrom unittest import mock\n\nfrom oslo_log import log\nfrom radloggerpy import config\n\nfrom radloggerpy.database.objects.serial_device import SerialDeviceObject\nfrom radloggerpy.device.devices import arduino_geiger_pcb as agpcb\nfrom radloggerpy.tests import base\nfrom radloggerpy.types.serial_parity import PARITY_CHOICES\nfrom radloggerpy.types.serial_parity import SerialParityTypes\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass TestArduinoGeigerPcb(base.TestCase):\n\n def setUp(self):\n super(TestArduinoGeigerPcb, self).setUp()\n self.m_info = SerialDeviceObject()\n self.m_info.parity = PARITY_CHOICES[SerialParityTypes.PARITY_NONE]\n self.m_condition = Condition()\n\n def test_name(self):\n m_device = agpcb.ArduinoGeigerPcb(self.m_info, self.m_condition)\n self.assertEqual(agpcb.ArduinoGeigerPcb.NAME, m_device.NAME)\n\n @mock.patch.object(agpcb, 'serial')\n @mock.patch.object(agpcb, 'time')\n def test_run(self, m_time, m_serial):\n m_time.sleep.side_effect = [InterruptedError]\n m_waiting = mock.Mock()\n m_waiting.inWaiting.side_effect = [1, 0]\n m_waiting.read.return_value.decode.return_value = 'a'\n m_serial.Serial.return_value = m_waiting\n m_device = agpcb.ArduinoGeigerPcb(self.m_info, self.m_condition)\n\n self.assertRaises(InterruptedError, m_device.run)\n m_serial.Serial.assert_called_once()\n", "id": "10571600", "language": "Python", "matching_score": 2.8531858921051025, "max_stars_count": 0, "path": "radloggerpy/tests/device/devices/test_arduino_geiger_pcb.py" }, { "content": "# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom radloggerpy import radloggerpy\n\nfrom radloggerpy.tests import base\n\n\nclass TestRadloggerpy(base.TestCase):\n\n def setUp(self):\n super().setUp()\n\n self.p_configurator = mock.patch.object(\n radloggerpy, 'configurator')\n self.m_configurator = self.p_configurator.start()\n self.addCleanup(self.p_configurator.stop)\n\n self.p_database = mock.patch.object(\n radloggerpy, 'database_manager')\n self.m_database = self.p_database.start()\n self.addCleanup(self.p_database.stop)\n\n self.p_first_run = mock.patch.object(\n radloggerpy, 'FirstTimeRun')\n self.m_first_run = self.p_first_run.start()\n self.addCleanup(self.p_first_run.stop)\n\n # self.p_serial = mock.patch.object(\n # radloggerpy.serial, 'Serial')\n # self.m_serial = self.p_serial.start()\n # self.addCleanup(self.p_serial.stop)\n\n # @mock.patch.object(radloggerpy.time, 'sleep')\n # @mock.patch.object(radloggerpy, 'MeasurementObject')\n # def test_run_main(self, m_measurement, m_sleep):\n # m_sleep.side_effect = InterruptedError\n #\n # m_serial_instance = mock.Mock()\n # m_serial_instance.inWaiting.side_effect = [11, 1, 0]\n # m_serial_instance.read.side_effect = [\n # \"14\".encode('utf-8'),\n # '\\n'.encode('utf-8')\n # ]\n #\n # self.m_serial.return_value = m_serial_instance\n #\n # self.assertRaises(InterruptedError, radloggerpy.main)\n # m_sleep.assert_called_once()\n # m_measurement.add.assert_called_once()\n #\n # def test_run_main_err_no_device(self):\n # m_execption = SerialException\n # m_execption.errno = errno.EACCES\n # self.m_serial.side_effect = m_execption\n #\n # radloggerpy.main()\n #\n # self.m_first_run.assert_called_once()\n # self.m_database.close_lingering_sessions.assert_called_once()\n # self.m_configurator.setup_config_and_logging.assert_called_once()\n #\n # def test_run_main_err_access(self):\n # m_execption = SerialException\n # m_execption.errno = errno.ENOENT\n # self.m_serial.side_effect = m_execption\n #\n # radloggerpy.main()\n #\n # self.m_first_run.assert_called_once()\n # self.m_database.close_lingering_sessions.assert_called_once()\n # self.m_configurator.setup_config_and_logging.assert_called_once()\n #\n # def test_run_main_err_arbitrary(self):\n # m_execption = SerialException\n # m_execption.errno = errno.EFAULT\n # self.m_serial.side_effect = m_execption\n #\n # radloggerpy.main()\n #\n # self.m_first_run.assert_called_once()\n # self.m_database.close_lingering_sessions.assert_called_once()\n # self.m_configurator.setup_config_and_logging.assert_called_once()\n", "id": "7401834", "language": "Python", "matching_score": 1.6585248708724976, "max_stars_count": 0, "path": "radloggerpy/tests/test_radloggerpy.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2020 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom radloggerpy._i18n import _\nfrom radloggerpy.database.models.device import Device\nfrom radloggerpy.database.models.measurement import Measurement\nfrom radloggerpy.database.objects.base import DatabaseObject\nfrom radloggerpy.database.objects.device import DeviceObject\n\n\nclass MeasurementObject(DatabaseObject):\n \"\"\"Measurement object with base model attributes\n\n The device attribute can be set to an instance of\n :py:class:`radloggerpy.database.objects.device.DeviceObject` with any\n desired attribute set. When this is set it will be used by methods if\n applicable.\n \"\"\"\n\n id = None # Type: int\n timestamp = None # Type: datetime.datetime\n\n cpm = None # Type: int\n svh = None # Type: float\n\n device = None # Type: DeviceObject\n\n m_measurement = None # Type: Measurement\n\n def _build_object(self):\n self.m_measurement = Measurement()\n\n if self.id:\n self.m_measurement.id = self.id\n if self.timestamp:\n self.m_measurement.timestamp = self.timestamp\n\n if self.device:\n self.device._build_object()\n\n if self.cpm:\n self.m_measurement.cpm = self.cpm\n if self.svh:\n self.m_measurement.svh = self.svh\n\n def _build_attributes(self):\n if self.m_measurement.id:\n self.id = self.m_measurement.id\n if self.m_measurement.timestamp:\n self.timestamp = self.m_measurement.timestamp\n\n if self.m_measurement.base_device:\n dev_obj = DeviceObject()\n dev_obj.m_device = self.m_measurement.base_device\n dev_obj._build_attributes()\n self.device = dev_obj\n\n if self.m_measurement.cpm:\n self.cpm = self.m_measurement.cpm\n if self.m_measurement.svh:\n self.svh = self.m_measurement.svh\n\n @staticmethod\n def add(session, reference):\n reference._build_object()\n\n \"\"\"Measurement.device_id must be set to populate the field\"\"\"\n if reference.m_measurement.device_id is None \\\n and hasattr(reference.device, 'id') and reference.device.id:\n \"\"\"If no device_id is set find it through device id\"\"\"\n reference.m_measurement.device_id = reference.device.id\n elif reference.m_measurement.device_id is None and reference.device:\n \"\"\"If no device_id find it through device\"\"\"\n dev = DeviceObject.find(session, reference.device, False)\n if dev is None:\n raise RuntimeError(_(\"No associateable Device found\"))\n reference.m_measurement.device_id = dev.id\n\n session.add(reference.m_measurement)\n\n try:\n return session.commit()\n except Exception:\n session.rollback()\n # TODO(Dantali0n): These errors are horrendous for users to\n # understand an error abstraction is needed.\n raise\n\n @staticmethod\n def update(session, reference, base, allow_multiple=False):\n NotImplementedError()\n\n @staticmethod\n def delete(session, reference, allow_multiple=False):\n reference._build_object()\n\n filters = reference._filter(reference.m_measurement)\n query = session.query(Measurement).filter_by(**filters)\n\n if allow_multiple:\n results = query.all()\n\n if results is None:\n return None\n\n devs = list()\n\n for result in results:\n dev = MeasurementObject()\n dev.m_measurement = result\n session.delete(result)\n dev._build_attributes()\n devs.append(dev)\n else:\n result = query.one_or_none()\n\n if result is None:\n return None\n\n dev = MeasurementObject()\n dev.m_measurement = result\n dev._build_attributes()\n session.delete(result)\n\n try:\n session.commit()\n except Exception:\n session.rollback()\n # TODO(Dantali0n): These errors are horrendous for users to\n # understand an error abstraction is needed.\n raise\n\n if allow_multiple:\n return devs\n else:\n return dev\n\n @staticmethod\n def find(session, reference, allow_multiple=True):\n reference._build_object()\n\n filters = reference._filter(reference.m_measurement)\n query = session.query(Measurement).filter_by(**filters)\n\n if reference.device:\n dev_filter = reference.device._filter(\n reference.device.m_device)\n query = session.query(Measurement).filter_by(**filters)\\\n .join(Device).filter_by(**dev_filter)\n\n if allow_multiple:\n results = query.all()\n\n if results is None:\n return None\n\n ret_results = list()\n for result in results:\n dev = MeasurementObject()\n dev.m_measurement = result\n dev._build_attributes()\n ret_results.append(dev)\n\n return ret_results\n else:\n result = query.one_or_none()\n\n if result is None:\n return None\n\n dev = MeasurementObject()\n dev.m_measurement = result\n dev._build_attributes()\n return dev\n\n @staticmethod\n def find_all(session, references):\n NotImplementedError()\n\n @staticmethod\n def add_all(session, references):\n NotImplementedError()\n", "id": "9808457", "language": "Python", "matching_score": 2.618751287460327, "max_stars_count": 0, "path": "radloggerpy/database/objects/measurement.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nfrom datetime import datetime\nfrom unittest import mock\n\nfrom oslo_log import log\n\nfrom radloggerpy import config\nfrom radloggerpy.database.models.device import Device\nfrom radloggerpy.database.models.measurement import Measurement\nfrom radloggerpy.database.objects import device\nfrom radloggerpy.database.objects import measurement as ms\n\nfrom radloggerpy.tests import base\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass TestMeasurementObject(base.TestCase):\n\n def setUp(self):\n super(TestMeasurementObject, self).setUp()\n\n def test_init(self):\n\n m_atribs = {\n \"cpm\": 12,\n \"svh\": 0.045,\n \"skipme\": \"shouldnotexist\"\n }\n\n test_obj = ms.MeasurementObject(**m_atribs)\n\n self.assertEqual(12, test_obj.cpm)\n self.assertIsNone(None, getattr(test_obj, \"skipme\", None))\n\n def test_filter(self):\n\n m_atribs = {\n \"cpm\": 12,\n \"attributeskip\": \"none\",\n }\n\n test_obj = ms.MeasurementObject(**m_atribs)\n\n m_result = test_obj._filter(test_obj)\n\n self.assertEqual(\n {\"cpm\": 12}, m_result)\n\n def test_build_object_unset(self):\n\n test_obj = ms.MeasurementObject()\n test_obj._build_object()\n\n self.assertIsNone(None, test_obj.m_measurement.id)\n self.assertIsNone(None, test_obj.m_measurement.device_id)\n self.assertIsNone(None, test_obj.m_measurement.timestamp)\n self.assertIsNone(None, test_obj.m_measurement.base_device)\n self.assertIsNone(None, test_obj.m_measurement.cpm)\n self.assertIsNone(None, test_obj.m_measurement.svh)\n\n def test_build_object_values(self):\n\n m_device = mock.Mock()\n\n m_date = datetime.utcnow()\n\n m_atribs = {\n \"cpm\": 12,\n \"svh\": 0.0045,\n \"timestamp\": m_date,\n \"device\": m_device\n }\n\n test_obj = ms.MeasurementObject(**m_atribs)\n test_obj._build_object()\n\n self.assertEqual(12, test_obj.m_measurement.cpm)\n self.assertEqual(0.0045, test_obj.m_measurement.svh)\n self.assertEqual(m_date, test_obj.m_measurement.timestamp)\n m_device._build_object.assert_called_once()\n\n def test_build_attributes_none(self):\n\n test_obj = ms.MeasurementObject()\n test_obj.m_measurement = Measurement()\n test_obj._build_attributes()\n\n self.assertIsNone(test_obj.id)\n self.assertIsNone(test_obj.timestamp)\n self.assertIsNone(test_obj.device)\n self.assertIsNone(test_obj.cpm)\n self.assertIsNone(test_obj.svh)\n\n def test_add(self):\n m_session = mock.Mock()\n\n m_date = datetime.utcnow()\n\n # TODO(Dantali0n): change into setting attributes directly\n m_atribs = {\n \"cpm\": 12,\n \"svh\": 0.0045,\n \"timestamp\": m_date,\n \"device\": device.DeviceObject(**{'id': 1})\n }\n\n test_obj = ms.MeasurementObject(**m_atribs)\n ms.MeasurementObject.add(m_session, test_obj)\n\n m_session.add.assert_has_calls(\n [\n mock.call(test_obj.m_measurement),\n ],\n any_order=True\n )\n m_session.commit.assert_called_once()\n\n def test_add_find_device(self):\n m_session = mock.Mock()\n m_date = datetime.utcnow()\n\n # TODO(Dantali0n): change into setting attributes directly\n m_atribs = {\n \"cpm\": 12,\n \"svh\": 0.0045,\n \"timestamp\": m_date,\n \"device\": device.DeviceObject(**{'name': 'test'})\n }\n\n with mock.patch.object(ms, 'DeviceObject') as m_find:\n test_obj = ms.MeasurementObject(**m_atribs)\n\n m_dev = Device()\n m_dev.id = 1\n m_find.find.return_value = m_dev\n\n ms.MeasurementObject.add(m_session, test_obj)\n\n m_session.add.assert_has_calls(\n [\n mock.call(test_obj.m_measurement),\n ],\n any_order=True\n )\n m_session.commit.assert_called_once()\n\n m_find.find.assert_called_once()\n\n def test_add_find_device_error(self):\n m_session = mock.Mock()\n m_date = datetime.utcnow()\n\n # TODO(Dantali0n): change into setting attributes directly\n m_atribs = {\n \"cpm\": 12,\n \"svh\": 0.0045,\n \"timestamp\": m_date,\n \"device\": device.DeviceObject(**{'name': 'test'})\n }\n\n with mock.patch.object(ms, 'DeviceObject') as m_find:\n test_obj = ms.MeasurementObject(**m_atribs)\n m_find.find.return_value = None\n self.assertRaises(\n RuntimeError, ms.MeasurementObject.add, m_session, test_obj)\n\n m_find.find.assert_called_once()\n\n def test_add_error(self):\n m_session = mock.Mock()\n m_session.commit.side_effect = RuntimeError\n\n # TODO(Dantali0n): change into setting attributes directly\n m_atribs = {\n \"cpm\": 12,\n \"device\": device.DeviceObject(**{'id': 1})\n }\n\n test_obj = ms.MeasurementObject(**m_atribs)\n self.assertRaises(\n RuntimeError, ms.MeasurementObject.add, m_session, test_obj)\n\n m_session.add.assert_has_calls(\n [\n mock.call(test_obj.m_measurement),\n ],\n any_order=True\n )\n m_session.commit.assert_called_once()\n m_session.rollback.assert_called_once()\n\n def test_delete_obj(self):\n m_date = datetime.utcnow()\n\n \"\"\"Represents mocked device as it will be retrieved from db \"\"\"\n m_measurement = Measurement()\n m_measurement.id = 1\n m_measurement.timestamp = m_date\n m_measurement.cpm = 12\n m_measurement.svh = 0.0045\n m_measurement.base_device = Device()\n m_measurement.base_device.id = 1\n\n \"\"\"Setup query and session to return mocked device\"\"\"\n m_query = mock.Mock()\n m_session = mock.Mock()\n m_session.query.return_value.filter_by.return_value = m_query\n m_query.one_or_none.return_value = m_measurement\n\n test_obj = ms.MeasurementObject(\n **{\"device\": device.DeviceObject(**{'id': 1})})\n result_obj = ms.MeasurementObject.delete(m_session, test_obj, False)\n\n m_session.delete.assert_has_calls(\n [\n mock.call(m_measurement),\n ],\n any_order=True\n )\n m_session.commit.assert_called_once()\n\n self.assertEqual(1, result_obj.id)\n self.assertEqual(m_date, result_obj.timestamp)\n self.assertEqual(12, result_obj.cpm)\n self.assertEqual(0.0045, result_obj.svh)\n self.assertEqual(1, result_obj.device.id)\n\n def test_delete_obj_multiple(self):\n m_measurement_1 = Measurement()\n m_measurement_1.id = 1\n m_measurement_1.cpm = 12\n m_measurement_1.base_device = Device()\n m_measurement_1.base_device.id = 1\n\n m_measurement_2 = Measurement()\n m_measurement_2.id = 2\n m_measurement_2.cpm = 34\n m_measurement_2.base_device = Device()\n m_measurement_2.base_device.id = 1\n\n m_query = mock.Mock()\n m_session = mock.Mock()\n m_session.query.return_value.filter_by.return_value = m_query\n\n m_query.all.return_value = [m_measurement_1, m_measurement_2]\n\n test_obj = ms.MeasurementObject(\n **{\"device\": device.DeviceObject(**{'id': 1})})\n result_obj = ms.MeasurementObject.delete(m_session, test_obj, True)\n\n m_session.delete.assert_has_calls(\n [\n mock.call(m_measurement_1),\n mock.call(m_measurement_2),\n ],\n any_order=True\n )\n m_session.commit.assert_called_once()\n\n self.assertEqual(1, result_obj[0].id)\n self.assertEqual(12, result_obj[0].cpm)\n self.assertEqual(1, result_obj[0].device.id)\n\n self.assertEqual(2, result_obj[1].id)\n self.assertEqual(34, result_obj[1].cpm)\n self.assertEqual(1, result_obj[1].device.id)\n\n def test_find_obj(self):\n m_date = datetime.utcnow()\n\n \"\"\"Represents mocked device as it will be retrieved from db \"\"\"\n m_measurement = Measurement()\n m_measurement.id = 1\n m_measurement.timestamp = m_date\n m_measurement.cpm = 12\n m_measurement.svh = 0.0045\n m_measurement.base_device = Device()\n m_measurement.base_device.id = 1\n\n \"\"\"Setup query and session to return mocked device\"\"\"\n m_query = mock.Mock()\n m_session = mock.Mock()\n m_session.query.return_value.filter_by.return_value.\\\n join.return_value.filter_by.return_value = m_query\n m_query.one_or_none.return_value = m_measurement\n\n test_obj = ms.MeasurementObject(\n **{\"device\": device.DeviceObject(**{'id': 1})})\n result_obj = ms.MeasurementObject.find(m_session, test_obj, False)\n\n self.assertEqual(1, result_obj.id)\n self.assertEqual(m_date, result_obj.timestamp)\n self.assertEqual(12, result_obj.cpm)\n self.assertEqual(0.0045, result_obj.svh)\n self.assertEqual(1, result_obj.device.id)\n\n def test_find_obj_none(self):\n\n \"\"\"Setup query and session to return mocked device\"\"\"\n m_query = mock.Mock()\n m_session = mock.Mock()\n m_session.query.return_value.filter_by.return_value = m_query\n m_query.one_or_none.return_value = None\n\n test_obj = ms.MeasurementObject(**{\"id\": 1})\n result_obj = ms.MeasurementObject.find(m_session, test_obj, False)\n\n self.assertIsNone(result_obj)\n\n def test_find_obj_multiple(self):\n m_measurement_1 = Measurement()\n m_measurement_1.id = 1\n m_measurement_1.cpm = 12\n m_measurement_1.base_device = Device()\n m_measurement_1.base_device.id = 1\n\n m_measurement_2 = Measurement()\n m_measurement_2.id = 2\n m_measurement_2.cpm = 34\n m_measurement_2.base_device = Device()\n m_measurement_2.base_device.id = 1\n\n m_query = mock.Mock()\n m_session = mock.Mock()\n m_session.query.return_value.filter_by.return_value. \\\n join.return_value.filter_by.return_value = m_query\n\n m_query.all.return_value = [m_measurement_1, m_measurement_2]\n\n test_obj = ms.MeasurementObject(\n **{\"device\": device.DeviceObject(**{'id': 1})})\n result_obj = ms.MeasurementObject.find(m_session, test_obj, True)\n\n self.assertEqual(1, result_obj[0].id)\n self.assertEqual(12, result_obj[0].cpm)\n self.assertEqual(1, result_obj[0].device.id)\n\n self.assertEqual(2, result_obj[1].id)\n self.assertEqual(34, result_obj[1].cpm)\n self.assertEqual(1, result_obj[1].device.id)\n\n def test_find_obj_multiple_none(self):\n m_query = mock.Mock()\n m_session = mock.Mock()\n m_session.query.return_value.filter_by.return_value. \\\n join.return_value.filter_by.return_value = m_query\n\n m_query.all.return_value = None\n\n test_obj = ms.MeasurementObject(\n **{\"device\": device.DeviceObject(**{'id': 1})})\n result_obj = ms.MeasurementObject.find(m_session, test_obj, True)\n\n self.assertIsNone(result_obj)\n", "id": "8285784", "language": "Python", "matching_score": 3.5831944942474365, "max_stars_count": 0, "path": "radloggerpy/tests/database/objects/test_measurement.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom oslo_log import log\n\nfrom radloggerpy import config\n\nfrom radloggerpy.database.objects import base as base_obj\nfrom radloggerpy.tests import base\n\nLOG = log.getLogger(__name__)\nCONF = config.CONF\n\n\nclass TestDatabaseObject(base.TestCase):\n\n class ExampleDatabaseObject(base_obj.DatabaseObject):\n\n attribute1 = None\n attribute2 = None\n\n m_model = None\n\n def _build_object(self):\n pass\n\n def _build_attributes(self):\n pass\n\n @staticmethod\n def add(session, reference):\n pass\n\n @staticmethod\n def update(session, reference, base, allow_multiple=False):\n pass\n\n @staticmethod\n def delete(session, reference, allow_multiple=False):\n pass\n\n @staticmethod\n def find(session, reference, allow_multiple=True):\n pass\n\n @staticmethod\n def find_all(session, references):\n pass\n\n @staticmethod\n def add_all(session, references):\n pass\n\n def setUp(self):\n super(TestDatabaseObject, self).setUp()\n\n def test_init(self):\n\n m_atribs = {\n \"attribute1\": \"value1\",\n \"attribute2\": \"value2\",\n \"attributeskip\": \"none\",\n }\n\n test_obj = self.ExampleDatabaseObject(**m_atribs)\n\n self.assertEqual(\"value1\", test_obj.attribute1)\n self.assertEqual(\"value2\", test_obj.attribute2)\n self.assertIsNone(getattr(test_obj, \"attributeskip\", None))\n\n def test_filter(self):\n\n m_atribs = {\n \"attribute1\": \"value1\",\n \"attribute2\": \"value2\",\n \"attributeskip\": \"none\",\n }\n\n test_obj = self.ExampleDatabaseObject(**m_atribs)\n\n m_result = test_obj._filter(test_obj)\n\n self.assertEqual(\n {\"attribute1\": \"value1\", \"attribute2\": \"value2\"}, m_result)\n\n @mock.patch.object(base_obj, 'LOG')\n def test_filter_deprecate(self, m_log):\n\n m_atribs = {\n \"attribute1\": \"value1\",\n \"attribute2\": \"value2\",\n \"attributeskip\": \"none\",\n }\n\n test_obj = self.ExampleDatabaseObject(**m_atribs)\n\n m_result = test_obj._filter(test_obj, ignore=['attribute2'])\n\n m_log.warning.assert_called_once()\n\n self.assertEqual(\n {\"attribute1\": \"value1\"}, m_result)\n", "id": "10763681", "language": "Python", "matching_score": 3.2230143547058105, "max_stars_count": 0, "path": "radloggerpy/tests/database/objects/test_base.py" }, { "content": "# -*- encoding: utf-8 -*-\n# Copyright (c) 2019 Dantali0n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\n\nfrom oslo_log import log\n\nfrom radloggerpy._i18n import _\nfrom radloggerpy.common.common import seq_but_not_str\n\nLOG = log.getLogger(__name__)\n\n\nclass DatabaseObject(metaclass=abc.ABCMeta):\n \"\"\"Abstract database object providing abstract CRUD interfaces\n\n When using SQLAlchemy database sessions all interactions with these\n sessions should be achieved using objects which implement\n :py:class:`~.DatabaseObject`. These objects provide CRUD methods to handle\n interactions allowing to obfuscate that many of the objects in the database\n are consistent of multiple models.\n\n As an example, to commit an object to the database one would call:\n ``DatabaseObject.add(session, object)``\n\n Classes implementing these interfaces should implement at least\n :py:func:`~add`, :py:func:`~update`, :py:func:`~delete` and\n :py:func:`~find`, however, also implementing :py:func:`~find_all` and\n :py:func:`~add_all` is preferred.\n\n All reference objects used as parameter by static methods should be\n instances of the implementing class itself. Likewise, find and find_all\n should only return objects which are instances of the class itself.\n\n Below is a demonstration of how interactions should look:\n ``dbo = DatabaseObject(**{field1: value1, field2: value2})``\n ``result = DatabaseObject.find(session, dbo)``\n ``print(result.field1)``\n\n Alternatively the fields can be set after the object is instantiated:\n ``dbo = DatabaseObject()``\n ``dbo.field1 = 'hello world'``\n ``result = DatabaseObject.find(session, dbo)``\n ``print(result.field1)``\n\n For models using enums or choicetypes the values should be set as object\n attributes while the keys should be used for internal models.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialize the class attributes matching its arguments\n\n :param kwargs: named arguments\n \"\"\"\n\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)\n\n @abc.abstractmethod\n def _build_object(self):\n \"\"\"Build the object with its given attributes for internal models\"\"\"\n pass\n\n @abc.abstractmethod\n def _build_attributes(self):\n \"\"\"Build the attributes for the given state of internal models\"\"\"\n pass\n\n @staticmethod\n def _filter(filter_object, ignore=[]):\n \"\"\"Filters the object depending on it's set attributes\n\n Removes certain empty objects such as empty collections but not empty\n strings or byte arrays.\n \"\"\"\n\n if ignore:\n LOG.warning(_(\"Use of deprecated ignore parameter on database \"\n \"object filter!\"))\n\n return {key: name for (key, name) in vars(filter_object).items()\n if hasattr(filter_object.__class__, key) and\n (key not in ignore or (seq_but_not_str(key and key)))}\n\n @staticmethod\n @abc.abstractmethod\n def add(session, reference):\n \"\"\"Add the reference object to the database\n\n :param session: an active :py:class:`sqlalchemy.orm.session.Session`\n :param reference: add database entries based on this object\n \"\"\"\n pass\n\n @staticmethod\n @abc.abstractmethod\n def update(session, reference, base, allow_multiple=False):\n \"\"\"Find the reference(s) in the database and update with own state\n\n :param session: an active :py:class:`sqlalchemy.orm.session.Session`\n :param reference: the object with the desired changes\n :param base: current state of the object in the database\n :param allow_multiple: if updating multiple database items is allowed\n :raises MultipleResultsFound: if multiple results were found with\n allow_multiple as False of type\n :py:class:`sqlalchemy.orm.exc.MultipleResultsFound`\n \"\"\"\n pass\n\n @staticmethod\n @abc.abstractmethod\n def delete(session, reference, allow_multiple=False):\n \"\"\"Remove the object(s) that match the reference\n\n :param session: an active :py:class:`sqlalchemy.orm.session.Session`\n :param reference: remove database entries based on this object\n :param allow_multiple: if updating multiple database items is allowed\n :raises MultipleResultsFound: if multiple results were found with\n allow_multiple as False of type\n :py:class:`sqlalchemy.orm.exc.MultipleResultsFound`\n \"\"\"\n pass\n\n @staticmethod\n @abc.abstractmethod\n def find(session, reference, allow_multiple=True):\n \"\"\"Return object(s) that match the reference\n\n :param session: an active :py:class:`sqlalchemy.orm.session.Session`\n :param reference: find database results based on this object\n :param allow_multiple: if updating multiple database items is allowed\n :raises MultipleResultsFound: if multiple results were found with\n allow_multiple as False of type\n :py:class:`sqlalchemy.orm.exc.MultipleResultsFound`\n :return: A single object, list of objects or none, all objects will be\n instances of the class.\n \"\"\"\n pass\n\n @staticmethod\n @abc.abstractmethod\n def find_all(session, references):\n \"\"\"For every specified object find all its matching database objects\n\n :param session: an active :py:class:`sqlalchemy.orm.session.Session`\n :param references: find database results based on these objects\n :return: list of objects or none, all objects will be instances of the\n class.\n \"\"\"\n pass\n\n @staticmethod\n @abc.abstractmethod\n def add_all(session, references):\n \"\"\"Add all specified objects to the database\n\n :param session: an active :py:class:`sqlalchemy.orm.session.Session`\n :param references: add all these objects to the database\n \"\"\"\n pass\n", "id": "6920942", "language": "Python", "matching_score": 2.5156989097595215, "max_stars_count": 0, "path": "radloggerpy/database/objects/base.py" }, { "content": "from collections.abc import Sequence\n\n\ndef seq_but_not_str(obj):\n \"\"\"Determines if object is a collection but not a string or byte array\"\"\"\n return isinstance(obj, Sequence) and \\\n not isinstance(obj, (str, bytes, bytearray))\n", "id": "9604745", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "radloggerpy/common/common.py" }, { "content": "import xattr\n\ndef enable_csd_kernel():\n xattr.setxattr(\"test/test\", \"user.process.csd_read\", \"yes\")\n\nimport pdb; pdb.set_trace()\nf = open(\"test/test\", \"rb\")\nprint(f.read())\nf.close()\n", "id": "6083289", "language": "Python", "matching_score": 0, "max_stars_count": 19, "path": "python/magic-read.py" } ]
2.530495
yhyuan
[ { "content": "import sys\nreload(sys)\nsys.setdefaultencoding(\"latin-1\")\n\nfname = 'n18811799744.sql'\nwith open(fname) as f:\n\tcontent = f.readlines()\n\tcontent = filter(lambda x: len(x) > 100, content)\n\tcontent = map(lambda x: x.strip().split(\",\"), content)\n\tcontent = map(lambda x: \"Hourlys.insert({deviceId: '\" + x[0][14:25] + \"',uploadTime: new Date('\" + x[1].strip()[1:-1] + \"'), airtemp:\" + x[2].strip()[1:-1] + \", airhumidity:\" + x[3].strip()[1:-1]+ \", atmosphericpressure:\" + x[4].strip()[1:-1]+ \", soiltemp:\" + x[6].strip()[1:-1]+ \", soilhumidity:\" + x[7].strip()[1:-1]+ \", windspeed:\" + x[8].strip()[1:-1]+ \", rainfall:\" + x[11].strip()[1:-1] + \"});\", content)\n#\tIds = map(lambda x: \"Hourlys.insert({deviceId: '\" + x[14:26] + \"',uploadTime: \", content)\n\tprint \"\\r\\n\".join(content)\n", "id": "8509106", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "readme/convert.py" } ]
0
cheekynie
[ { "content": "import nuke\nimport nukescripts\nimport operator, math, os\nimport string\nimport random\n\n\n# Utilities for enhancing efficiency when interacting with Nuke's Directed Acyclic Graph\n\n\n# Register keyboard shortcuts and menu entries\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Move/Move Right', 'dag.move(4, 0)', 'alt+meta+Right')\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Move/Move Left', 'dag.move(-4, 0)', 'alt+meta+Left')\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Move/Move Up', 'dag.move(0, -4)', 'alt+meta+Up')\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Move/Move Down', 'dag.move(0, 4)', 'alt+meta+Down')\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Move/Move Right Big', 'dag.move(1, 0)', 'alt+meta+shift+Right')\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Move/Move Left Big', 'dag.move(-1, 0)', 'alt+meta+shift+Left')\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Move/Move Up Big', 'dag.move(0, -1)', 'alt+meta+shift+Up')\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Move/Move Down Big', 'dag.move(0, 1)', 'alt+meta+shift+Down')\n\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Scale/Scale Up Vertical', 'dag.scale(1, 2)', 'meta+shift++', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Scale/Scale Down Vertical', 'dag.scale(1, 0.5)', 'meta+shift+_', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Scale/Scale Up Horizontal', 'dag.scale(0, 2)', 'meta+=', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Scale/Scale Down Horizontal', 'dag.scale(0, 0.5)', 'meta+-', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Mirror Horizontal from Right', 'dag.scale(0, -1, pivot=\"min\")', 'meta+m')\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Mirror Horizontal from Left', 'dag.scale(0, -1, pivot=\"max\")', 'meta+shift+m')\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Mirror Vertical from Top', 'dag.scale(1, -1, pivot=\"max\")', 'ctrl+meta+alt+m')\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Mirror Vertical from Bottom', 'dag.scale(1, -1, pivot=\"min\")', 'ctrl+alt+meta+shift+m')\n\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Align/Left', 'dag.align(\"left\")', 'ctrl+shift+left', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Align/Right', 'dag.align(\"right\")', 'ctrl+shift+right', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Align/Up', 'dag.align(\"up\")', 'ctrl+shift+up', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Align/Down', 'dag.align(\"down\")', 'ctrl+shift+down', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Snap to Grid', 'dag.snap_to_grid()', 'alt+s', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Connect Selected to Closest', 'dag.connect_to_closest()', 'meta+shift+y', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Connect Closest to Selected', 'dag.connect_to_closest(direction=1)', 'alt+meta+shift+y', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Paste To Selected', 'dag.paste_to_selected()', 'alt+v', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Read from Write', 'dag.read_from_write()', 'alt+r', shortcutContext=2)\n\nnuke.menu('Nuke').addCommand('Edit/Select Similar/Select Similar Class', 'nuke.selectSimilar(nuke.MATCH_CLASS)', 'alt+meta+shift+s', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Select Similar/Select Similar Color', 'nuke.selectSimilar(nuke.MATCH_COLOR)', 'alt+meta+shift+c', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Select Similar/Select Similar Y Position', 'dag.select_similar_position(axis=1)', 'alt+meta+shift+v', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Select Similar/Select Similar X Position', 'dag.select_similar_position(axis=0)', 'ctrl+alt+meta+shift+v', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Select Upstream', 'dag.select_upstream(nuke.selectedNodes())', 'alt+meta+shift+u', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Invert Selection', 'nuke.invertSelection()', 'alt+meta+shift+i', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Select Connected Nodes', 'dag.select_connected(nuke.selectedNodes())', 'alt+meta+shift+o', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Select Downstream', 'dag.select_downstream(nuke.selectedNodes())', 'alt+meta+shift+p', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Select Unused Nodes', 'dag.select_unused(nuke.selectedNodes())', 'ctrl+alt+meta+shift+u', shortcutContext=2)\n\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Properties Panel Open', 'dag.open_panels()', 'a', shortcutContext=1)\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Properties Panel Close', 'dag.close_panels()', 'alt+a', shortcutContext=1)\n\nnuke.menu('Nuke').addCommand('Edit/Node/DAG/Sort By File Knob', 'dag.auto_place()', 'l', shortcutContext=2)\n\nnuke.menu('Nuke').addCommand('Edit/Node/Declone', 'dag.declone_nodes(nuke.selectedNodes())', 'alt+shift+k', shortcutContext=2)\nnuke.menu('Nuke').addCommand('File/Export Selected with Root Settings', 'dag.export_selected_nodes()', 'ctrl+shift+e', index=7)\nnuke.menu('Nuke').addCommand('File/Import Script', 'nukescripts.import_script()', 'ctrl+shift+i', index=8)\n\n\nnuke.menu('Nuke').addCommand('Edit/Node/Swap A - B', 'dag.swap_node()', 'shift+x')\nnuke.menu('Viewer').addCommand(\"Swap View\", \"dag.swap_view()\", \"shift+q\")\n\nnuke.menu('Nodes').addCommand( 'Transform/Transform', 'dag.create_transform()', 't')\n\nnuke.menu('Nodes').addCommand('Other/Create Dots', 'dag.create_dots()', 'alt+d', shortcutContext=2)\nnuke.menu('Nodes').addCommand('Other/Create Side Dots', 'dag.create_dots(side=True)', 'alt+shift+d', shortcutContext=2)\n\n\n# DAG Position Commands\nnuke.menu('Nuke').addCommand('Edit/Bookmark/Restore Position 1', 'nukescripts.bookmarks.quickRestore(1)', 'ctrl+1', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Bookmark/Restore Position 2', 'nukescripts.bookmarks.quickRestore(2)', 'ctrl+2', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Bookmark/Restore Position 3', 'nukescripts.bookmarks.quickRestore(3)', 'ctrl+3', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Bookmark/Restore Position 4', 'nukescripts.bookmarks.quickRestore(4)', 'ctrl+4', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Bookmark/Restore Position 5', 'nukescripts.bookmarks.quickRestore(5)', 'ctrl+5', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Bookmark/Restore Position 6', 'nukescripts.bookmarks.quickRestore(6)', 'ctrl+6', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Bookmark/Save Position 1', 'nukescripts.bookmarks.quickSave(1)', 'ctrl+shift+1', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Bookmark/Save Position 2', 'nukescripts.bookmarks.quickSave(2)', 'ctrl+shift+2', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Bookmark/Save Position 3', 'nukescripts.bookmarks.quickSave(3)', 'ctrl+shift+3', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Bookmark/Save Position 4', 'nukescripts.bookmarks.quickSave(4)', 'ctrl+shift+4', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Bookmark/Save Position 5', 'nukescripts.bookmarks.quickSave(5)', 'ctrl+shift+5', shortcutContext=2)\nnuke.menu('Nuke').addCommand('Edit/Bookmark/Save Position 6', 'nukescripts.bookmarks.quickSave(6)', 'ctrl+shift+6', shortcutContext=2)\n\n# # Hlink Nodes\n# nuke.menu('Nuke').addCommand('Edit/HLink Cut', 'dag.hlink_cut()', 'ctrl+x')\n# nuke.menu('Nuke').addCommand('Edit/HLink Copy', 'dag.hlink_copy()', 'ctrl+c')\n# nuke.menu('Nuke').addCommand('Edit/HLink Paste', 'dag.hlink_paste()', 'ctrl+v')\n# nuke.menu('Nuke').addCommand('Edit/HLink Create', 'dag.hlink_create()', 'alt+shift+p')\n# nuke.menu('Nuke').addCommand('Edit/Paste', 'nuke.nodePaste(\"%clipboard%\")', 'ctrl+shift+v', index=6)\n\nnuke.menu('Nodes').addCommand('Other/Create Pointer', 'dag.create_pointer()', 'alt+t')\n\n\n\n\n\n\n\n# Get the grid size from the preferences. Used as the default unit of movement.\ngrid = (int(nuke.toNode('preferences').knob('GridWidth').value()), int(nuke.toNode('preferences').knob('GridHeight').value()))\n\n\ndef unselect(nodes=None):\n # Unselect nodes\n if not nodes:\n nodes = nuke.allNodes(recurseGroups=True)\n if not isinstance(nodes, list):\n return\n _ = [n.setSelected(False) for n in nodes]\n\n\ndef select(nodes):\n # Select specified nodes\n if not isinstance(nodes, list):\n return\n _ = [n.setSelected(True) for n in nodes]\n\n\ndef get_parent(node):\n # return node's parent node, return nuke.root() if on the top level\n return nuke.toNode('.'.join(node.fullName().split('.')[:-1])) or nuke.root()\n\n\ndef get_topnode(node):\n\n # return the topnode of node\n return nuke.toNode(nuke.tcl('return [value [topnode {0}].name]'.format(node.fullName())))\n\n\ndef get_pos(node):\n # return 2d list of centered node positions\n if node.Class() == 'BackdropNode':\n return [node.xpos(), node.ypos()]\n else:\n return [node.xpos() + node.screenWidth()/2, node.ypos() + node.screenHeight()/2]\n\n\ndef set_pos(node, posx, posy):\n # Set node's position given a centered position based on screen width\n # param: pos - 2dim list of int node positions\n if node.Class() == 'BackdropNode':\n return node.setXYpos(int(posx), int(posy))\n else:\n return node.setXYpos(int(posx - node.screenWidth()/2), int(posy - node.screenHeight()/2))\n\n\ndef hide_panel():\n # Always hide control panels on node creation if node not in exceptions\n node = nuke.thisNode()\n exceptions = ['Roto', 'RotoPaint']\n if node.Class() not in exceptions:\n nuke.thisNode().showControlPanel()\n nuke.thisNode().hideControlPanel()\nnuke.addOnUserCreate(hide_panel)\n\n\ndef open_panels(nodes=None):\n # Open properties panels\n if not nodes:\n nodes = nuke.selectedNodes()\n ignored = ['Viewer']\n if len(nodes) > 10:\n if not nuke.ask('Continuing will open {0} properties panels. \\nAre you sure you want to continue?'.format(len(nodes))):\n return\n for node in nodes:\n if node.Class() not in ignored:\n # if node.shown():\n # if nclass in buggy:\n # # There is a bug with node.shown() for some node classes, where .shown()\n # # incorrectly returns true if it is hidden. Workaround by cutting node and undoing\n # nuke.Undo().begin()\n # nuke.delete(node)\n # nuke.Undo().end()\n # nuke.undo()\n # node.setSelected(True)\n # node.hideControlPanel()\n # else:\n node.showControlPanel()\n\n\ndef close_panels(nodes=None):\n # Close all properties panels\n if not nodes:\n nodes = nuke.allNodes(recurseGroups=True)\n for node in nodes:\n node.hideControlPanel()\n\n\ndef select_similar_position(axis=1):\n nodes = nuke.selectedNodes()\n if not nodes:\n return\n node = nodes[0]\n prev_selected = nodes[1:]\n threshold = 1\n unselect()\n select(prev_selected)\n if axis:\n same_pos_nodes = {n:n.xpos() for n in nuke.allNodes() if abs(n.ypos()- node.ypos()) < threshold}\n else:\n same_pos_nodes = {n:n.ypos() for n in nuke.allNodes() if abs(n.xpos()- node.xpos()) < threshold}\n sorted_nodes = sorted(same_pos_nodes.items(), key=operator.itemgetter(1))\n for n, pos in sorted_nodes:\n n.setSelected(True)\n\n\ndef snap_to_grid():\n # Snap selected nodes to grid\n nodes = nuke.selectedNodes()\n for node in nodes:\n nuke.autoplaceSnap(node)\n\n\ndef auto_place():\n # autoplace all selected\n nodes = nuke.selectedNodes()\n\n # Sort by file knob value if the nodes have one\n filenodes = {n: n['file'].getValue() for n in nodes if 'file' in n.knobs()}\n if filenodes:\n sorted_filenodes = sorted(filenodes.items(), key=operator.itemgetter(1))\n filenodes_pos = {n: [n.xpos(), n.ypos()] for n in nodes if 'file' in n.knobs()}\n ypos_sort = sorted(filenodes_pos.items(), key=lambda (k, v): v[1])\n xpos_sort = sorted(filenodes_pos.items(), key=lambda (k, v): v[0])\n start_pos = [xpos_sort[0][1][0], ypos_sort[0][1][1]]\n for node, filepath in sorted_filenodes:\n node.setXYpos(start_pos[0], start_pos[1])\n start_pos = (start_pos[0] + grid[0]*2, start_pos[1])\n\n # Normal autoplace for nodes without file knob\n normal_nodes = [n for n in nodes if 'file' not in n.knobs()]\n unselect()\n _ = [n.setSelected(True) for n in normal_nodes]\n nuke.autoplace_all()\n _ = [n.setSelected(True) for n in nodes]\n\n\ndef move(xvel, yvel):\n # Move selected nodes by specified number of grid lengths in x and y\n yvel *= 3\n nodes = nuke.selectedNodes()\n for node in nodes:\n node.setXYpos(int(node.xpos() + grid[0] * xvel), int(node.ypos() + grid[1] * yvel))\n\n\ndef get_closest_node(node):\n # Return the closest node to node\n distances = {}\n for n in nuke.allNodes():\n if n.name() == node.name():\n continue\n distance = math.sqrt( \n math.pow( (node.xpos() - n.xpos()), 2 ) + math.pow( (node.ypos() - n.ypos()), 2 )\n )\n distances[n.name()] = distance\n return nuke.toNode(min(distances, key=distances.get))\n\n\ndef connect_to_closest(direction=0):\n # Connect next available input of all selected nodes to the closest node\n for node in nuke.selectedNodes():\n closest = get_closest_node(node)\n if direction:\n closest.setInput(0, node)\n else:\n node.connectInput(0, closest)\n\n\ndef paste_to_selected():\n nodes = nuke.selectedNodes()\n all_nodes = nuke.allNodes()\n unselect()\n for node in nodes:\n node.setSelected(True)\n nuke.nodePaste('%clipboard')\n unselect()\n if not nodes:\n nuke.nodePaste('%clipboard')\n # Select pasted nodes\n select(all_nodes)\n nuke.invertSelection()\n \n\ndef align(direction):\n # Align nodes to the farthest outlier in the specified direction.\n # param: direction - one of: left | right | up | down\n\n nodes = nuke.selectedNodes()\n\n if len(nodes) < 2:\n return\n\n horizontally = ['left', 'right']\n vertically = ['up', 'down']\n\n if direction in horizontally:\n align = 0\n elif direction in vertically:\n align = 1\n else:\n print 'Error: invalid direction specified: {0}'.format(direction)\n return\n\n positions = {n: get_pos(n) for n in nodes}\n sorted_positions = sorted(positions.items(), key=lambda (k, v): v[align])\n if direction in ['down', 'right']:\n sorted_positions.reverse()\n target = sorted_positions[0]\n target_pos = target[1]\n\n offset = 0\n\n other_axis = abs(1 - align)\n\n sorted_other_axis = sorted(positions.items(), key=lambda (k, v): v[other_axis])\n\n nuke.Undo().begin()\n for i in range(len(sorted_other_axis)):\n node = sorted_other_axis[i][0]\n pos = sorted_other_axis[i][1]\n if i == 0: \n distance = 0\n overlapping = False\n prev_pos = pos\n else:\n prev_pos = sorted_other_axis[i-1][1]\n # Compare current node position to previous node position.\n # If difference is < overlap threshold, nodes are overlapping.\n distance = abs(pos[other_axis] + grid[other_axis] * offset - prev_pos[other_axis])\n overlap_threshold = [int(node.screenWidth() * 1.1), int(node.screenHeight() * 1.1)]\n overlapping = distance < overlap_threshold[other_axis]\n\n if overlapping:\n offset += 1\n\n new_pos = pos\n new_pos[other_axis] = int(pos[other_axis] + grid[other_axis] * offset)\n\n # Set value into sorted_other_axis also so we access the right value on the next loop\n sorted_other_axis[i][1][other_axis] = new_pos[other_axis]\n \n if align:\n set_pos(node, new_pos[other_axis], target_pos[align])\n else:\n set_pos(node, target_pos[align], new_pos[other_axis])\n i += 1\n nuke.Undo().end()\n\n\ndef scale(axis, scale, pivot='max'):\n # Scale selected nodes by factor of xscale, yscale\n # param: axis - one of 0 or 1 - x or y scale\n # param: float scale - factor to scale. 1 will do nothing. 2 will scale up 1 grid unit.\n # param: str pivot - where to scale from. One of min | max | center\n pivots = ['min', 'max', 'center']\n if pivot not in pivots:\n return\n nodes = nuke.selectedNodes()\n if len(nodes) < 2:\n return\n\n positions = {n: get_pos(n) for n in nodes}\n sort = sorted(positions.items(), key=lambda (k, v): v[axis])\n\n minpos = sort[0][1][axis]\n maxpos = sort[-1][1][axis]\n\n if pivot == 'max':\n pivot_pos = maxpos\n elif pivot == 'min':\n pivot_pos = minpos\n elif pivot == 'center':\n pivot_pos = (minpos - maxpos)/2 + minpos\n\n nuke.Undo().begin()\n for node, pos in positions.iteritems():\n if axis:\n new_pos = (pos[1] - pivot_pos) * scale + pivot_pos\n set_pos(node, pos[0], new_pos)\n if node.Class() == 'BackdropNode':\n bdpos = ((pos[1] + node['bdheight'].getValue()) - pivot_pos) * scale + pivot_pos - node.ypos()\n print pos[1]\n print new_pos\n print bdpos\n if scale > 0:\n node['bdheight'].setValue(bdpos)\n else:\n node.setXYpos(pos[0], int(new_pos-abs(bdpos)))\n else:\n new_pos = (pos[0] - pivot_pos) * scale + pivot_pos\n set_pos(node, new_pos, pos[1])\n if node.Class() == 'BackdropNode':\n bdpos = ((pos[0] + node['bdwidth'].getValue()) - pivot_pos) * scale + pivot_pos - node.xpos()\n if scale > 0:\n node['bdwidth'].setValue(bdpos)\n else:\n node.setXYpos(int(new_pos-abs(bdpos)), int(node.ypos()))\n nuke.Undo().end()\n\n\n\ndef copy_inputs(src, dst):\n # copy input connections from src node to dst node\n # number of inputs must be the same between nodes\n for j in range(dst.inputs()):\n dst.setInput(j, None)\n for i in range(src.inputs()):\n dst.setInput(i, src.input(i))\n\n\ndef declone(node):\n # Declone a single node\n if not node.clones():\n return\n parent = get_parent(node)\n parent.begin()\n node.setSelected(True)\n args = node.writeKnobs( nuke.WRITE_ALL | nuke.WRITE_USER_KNOB_DEFS |\n nuke.WRITE_NON_DEFAULT_ONLY | nuke.TO_SCRIPT)\n decloned_node = nuke.createNode(node.Class(), knobs=args, inpanel=False)\n copy_inputs(node, decloned_node)\n nuke.delete(node)\n parent.end()\n return decloned_node\n\n\ndef declone_nodes(nodes):\n # A better declone than the buggy default nukescripts.misc.declone()\n unselect()\n decloned_nodes = list()\n for node in nodes:\n decloned_nodes.append(declone(node))\n if decloned_nodes:\n # Restore selection\n _ = [n.setSelected(True) for n in decloned_nodes]\n\n\ndef export_selected_nodes():\n path = nuke.getFilename(\"Export Selected To:\")\n if not path:\n return\n nuke.nodeCopy(path)\n root = nuke.root()\n rootstring = root.writeKnobs(nuke.TO_SCRIPT | nuke.WRITE_USER_KNOB_DEFS)\n rootstring = \"%s\\nfirst_frame %d\\nlast_frame %d\" % (rootstring, root['first_frame'].value(), root['last_frame'].value())\n rootstring = \"%s\\nproxy_format \\\"%s\\\"\" % (rootstring, root['proxy_format'].toScript())\n rootstring = \"Root {\\n%s\\n}\" % rootstring\n noroot = open(path).read()\n with open(path, \"w+\") as f:\n f.write((rootstring + \"\\n\" + noroot))\n\n\n\n#--------------------------------------------------------------\n# Nuke Node Dependency Utilities\nif nuke.NUKE_VERSION_MAJOR > 11:\n connection_filter = nuke.INPUTS | nuke.HIDDEN_INPUTS | nuke.EXPRESSIONS | nuke.LINKINPUTS\nelse:\n connection_filter = nuke.INPUTS | nuke.HIDDEN_INPUTS | nuke.EXPRESSIONS\n\ndef find_root_nodes(node, results=[], remove_roots_with_inputs=True):\n # Find all root nodes of node. \n # If remove_roots_with_inputs: remove root nodes with an input (like Roto etc)\n for dependency in node.dependencies():\n if not dependency.dependencies():\n results.append(dependency)\n else:\n find_root_nodes(dependency, results)\n if remove_roots_with_inputs:\n results = [res for res in results if res.maxInputs() == 0]\n return results\n\n\ndef upstream(node, max_depth=-1, deps=set([])):\n if max_depth != 0:\n new_deps = set([n for n in nuke.dependencies(node, what=connection_filter) if n not in deps])\n deps |= new_deps\n for dep in new_deps:\n upstream(dep, max_depth-1, deps)\n return deps\n\n\ndef connected(nodes, upstream=True, downstream=True):\n # return all upstream and/or downstream nodes of node\n # based on nuke.overrides.selectConnectedNodes()\n all_deps = set()\n deps_list = nodes\n evaluate_all = True\n while deps_list:\n deps = []\n if upstream:\n deps += nuke.dependencies(deps_list, connection_filter)\n if downstream:\n deps += nuke.dependentNodes(connection_filter, deps_list, evaluate_all)\n evaluate_all = False\n deps_list = [d for d in deps if d not in all_deps and not all_deps.add(d)]\n return all_deps\n\ndef select_upstream(nodes):\n # Select all upstream dependencies of node\n deps = [n for n in connected(nodes, upstream=True, downstream=False)]\n select(deps)\n return deps\n\ndef select_downstream(nodes):\n # Select all downstream dependencies of node\n deps = [n for n in connected(nodes, upstream=False, downstream=True)]\n select(deps)\n return deps\n\ndef select_connected(nodes):\n # Select all nodes connected to node\n deps = [n for n in connected(nodes, upstream=True, downstream=True)]\n select(deps)\n return deps\n\ndef select_unused(nodes):\n # select all nodes that are not upstream or downstream of :param: nodes\n # Backdrops and dot nodes with a label are omitted.\n connected_nodes = [n for n in connected(nodes, upstream=True, downstream=True)]\n unused_nodes = [n for n in nuke.allNodes() if n not in connected_nodes and n.Class() != 'BackdropNode' and not (n.Class() == 'Dot' and n['label'].getValue())]\n unselect()\n select(unused_nodes)\n return unused_nodes\n\n\n\n\n\n\n# DAG Positions\n# Inspired by <NAME>'s sb_dagPosition.py https://www.bjorkvisuals.com/tools/the-foundrys-nuke/python\n# Using built-in nukescripts.bookmarks module now instead.\ndef save_dag_pos(preset):\n # Save current dag zoom and position as a preset on the active viewer\n zoom = nuke.zoom()\n pos = nuke.center()\n viewer = nuke.activeViewer()\n if not viewer:\n nuke.message('Error: please create a viewer to store the dag positions on...')\n return\n else:\n viewer = viewer.node()\n if 'dagpos' not in viewer.knobs():\n viewer.addKnob(nuke.String_Knob('dagpos', 'dagpos', '0,0,0:0,0,0:0,0,0:0,0,0:0,0,0:0,0,0:0,0,0:0,0,0:0,0,0:0,0,0'))\n dagpos_knob = viewer['dagpos']\n dagpos_knob.setFlag(nuke.STARTLINE)\n dagpos_knob.setEnabled(False)\n else:\n dagpos_knob = viewer['dagpos']\n dagpos_vals = dagpos_knob.getValue().split(':')\n dagpos_vals.pop(preset-1)\n new_dagpos = ','.join([str(zoom), str(pos[0]), str(pos[1])])\n dagpos_vals.insert(preset-1, new_dagpos)\n dagpos_knob.setValue(':'.join(dagpos_vals))\n\ndef load_dag_pos(preset):\n # Load dag zoom and position from specified preset number\n viewer = nuke.activeViewer()\n if not viewer:\n nuke.message('Error: please create a viewer to store the dag positions on...')\n return\n viewer = viewer.node()\n if 'dagpos' not in viewer.knobs():\n nuke.message('No preset positions created yet...')\n return\n dagpos_knob = viewer['dagpos']\n dagpos_vals = dagpos_knob.getValue().split(':')[preset-1]\n zoom, xpos, ypos = dagpos_vals.split(',')\n nuke.zoom(float(zoom), [float(xpos), float(ypos)])\n\n\n\n\n#----------------------------------------------------------------------------------\n# Hidden Input Link Nodes\n# This is no longer used in favor of the anchor / pointer workflow\n\ndef hidden_inputs_in_selection(nodes):\n return [n for n in nodes if 'hide_input' in n.knobs() and n['hide_input'].getValue()]\n\ndef set_hlink_knobs(nodes):\n # Add knob to track what node this node is connected to\n for node in hidden_inputs_in_selection(nodes):\n if not 'hlink_node' in node.knobs():\n node.addKnob(nuke.String_Knob('hlink_node', 'hlink_node'))\n input_node = node.input(0)\n if input_node:\n node['hlink_node'].setValue(input_node.fullName())\n else:\n node['hlink_node'].setValue('')\n\ndef hlink_copy():\n nodes = nuke.selectedNodes()\n if nodes:\n set_hlink_knobs(nodes)\n nuke.nodeCopy('%clipboard%')\n\ndef hlink_cut():\n hlink_copy()\n nukescripts.node_delete(popupOnError=True)\n\ndef hlink_paste():\n nuke.nodePaste('%clipboard%')\n for node in hidden_inputs_in_selection(nuke.selectedNodes()):\n if 'hlink_node' in node.knobs():\n target = nuke.toNode(node['hlink_node'].getValue())\n if target:\n node.setInput(0, target)\n\ndef hlink_create():\n # Creates an hlink node for each selected node\n nodes = nuke.selectedNodes()\n unselect()\n hlinks = []\n for node in nodes:\n hlink = nuke.createNode('Dot', 'hide_input 1 note_font_size 18', inpanel=False)\n hlinks.append(hlink)\n hlink.setInput(0, node)\n target_name = node.fullName()\n set_hlink_knobs([hlink])\n hlink['hlink_node'].setValue(target_name)\n label = hlink['label']\n target_label = node['label'].getValue()\n if node.Class() == 'Read':\n label.setValue(' | ' + node['label'].getValue() + '\\n' + os.path.basename(node['file'].getValue()))\n elif target_label:\n label.setValue(' | ' + target_label)\n else:\n label.setValue(' | ' + target_name)\n hlink.setXYpos(node.xpos() - grid[0]*2, node.ypos()-grid[1]*0)\n nuke.autoplaceSnap(hlink)\n _ = [n.setSelected(True) for n in hlinks]\n\n\n\ndef dec2hex(dec):\n hexcol = '%08x' % dec\n return '0x%02x%02x%02x' % (int(hexcol[0:2], 16), int(hexcol[2:4], 16), int(hexcol[4:6], 16))\n\n\n\ndef create_pointer():\n # Create an anchor / pointer set\n \n # Customization Options\n # Node class to use for anchor / pointer nodes. Defaults to NoOp but could be a Dot node if you prefer\n AP_CLASS = 'NoOp' \n\n # Displays an input / output icon on the node to visually differentiate it from the standard node class\n AP_ICON = True\n\n\n\n nodes = nuke.selectedNodes()\n if not nodes:\n return\n\n for target in nodes:\n upstream = [n for n in connected(nodes, upstream=True, downstream=False)]\n\n if len(upstream) > 5:\n if not nuke.ask('More than 5 upstream nodes. Are you sure you want to continue?'):\n return\n\n randstr = ''.join(random.choice(string.ascii_lowercase) for i in range(4))\n \n topnode = get_topnode(target)\n\n target_label = target['label'].getValue()\n\n # If topnode has a file knob, use that to set title\n # If it's a roto node, use the roto label\n if 'file' in topnode.knobs():\n pointer_title = os.path.basename(topnode['file'].getValue())\n if '.' in pointer_title:\n pointer_title = pointer_title.split('.')[0]\n elif topnode.Class() in ['Roto', 'RotoPaint'] and topnode['label'].getValue():\n pointer_title = topnode['label'].getValue()\n elif target_label:\n pointer_title = target_label\n else:\n pointer_title = ''\n\n topnode_color = topnode['tile_color'].value()\n\n if topnode_color == 0:\n # Get default color from prefs if node is not colored https://community.foundry.com/discuss/topic/103301/get-the-default-tile-color-from-preferences\n prefs = nuke.toNode('preferences')\n default_colors = {prefs['NodeColour{0:02d}Color'.format(i)].value(): prefs['NodeColourClass{0:02d}'.format(i)].value() for i in range(1, 14)}\n node_class = topnode.Class().lower()\n node_class = ''.join([i for i in node_class if not i.isdigit()])\n for color, classes in default_colors.items():\n if node_class in classes:\n topnode_color = color\n break\n if 'deep' in node_class:\n topnode_color = prefs['NodeColourDeepColor'].value()\n \n if len(nodes) == 1:\n # Only prompt the user for info if there is one selected node\n panel = nuke.Panel('Create Pointer')\n panel.addSingleLineInput('title', pointer_title)\n if panel.show():\n pointer_title = panel.value('title')\n else:\n return\n\n has_downstream = len(select_downstream(target)) > 0\n unselect()\n\n if not has_downstream:\n target.setSelected(True)\n\n # create anchor node\n\n anchor = nuke.createNode(AP_CLASS, 'name ___anchor_{0}{1}label \"<font size=7>\\[value title]\"'.format(randstr, ' icon Output.png ' if AP_ICON else ' '))\n anchor.addKnob(nuke.Tab_Knob('anchor_tab', 'anchor'))\n anchor.addKnob(nuke.String_Knob('title', 'title'))\n anchor['title'].setValue(pointer_title)\n anchor['tile_color'].setValue(topnode_color)\n anchor.setInput(0, target)\n anchor.setSelected(True)\n\n # create pointer node\n pointer = nuke.createNode(AP_CLASS, 'name ___pointer_{0} hide_input true{1}'.format(randstr, ' icon Input.png ' if AP_ICON else ''))\n pointer.addKnob(nuke.Tab_Knob('pointer_tab', 'pointer'))\n pointer.addKnob(nuke.String_Knob('target', 'target'))\n pointer['target'].setValue(anchor.fullName())\n pointer['label'].setValue('<font size=7> [if {[exists input.title]} {return [value input.title]}]')\n pointer.addKnob(nuke.PyScript_Knob('connect_to_target', 'connect'))\n pointer['connect_to_target'].setFlag(nuke.STARTLINE)\n pointer.addKnob(nuke.PyScript_Knob('zoom_to_target', 'zoom'))\n pointer.addKnob(nuke.PyScript_Knob('set_target', 'set target'))\n pointer['connect_to_target'].setValue('''n = nuke.thisNode()\nt = n['target'].getValue()\nif nuke.exists(t):\n tn = nuke.toNode(t)\n n.setInput(0, tn)''')\n pointer['zoom_to_target'].setValue('''t = nuke.thisNode()['target'].getValue()\nif nuke.exists(t):\n tn = nuke.toNode(t)\n nuke.zoom(2.0, [tn.xpos(), tn.ypos()])''')\n pointer['set_target'].setValue('''n = nuke.thisNode()\nsn = nuke.selectedNodes()\nif sn:\n t = sn[-1]\nn['target'].setValue(t.fullName())''')\n # set autolabel knob to execute python script to reconnect node to target.\n # it's a hack but it works to automatically reconnect the input without using knobChanged callbacks!\n # FYI, onCreate callback can not connect input 0 due to a nuke bug\n pointer['autolabel'].setValue('\"{0}\\\\n{1}\".format(nuke.thisNode().name(), nuke.thisNode()[\"label\"].evaluate()) if nuke.thisNode().setInput(0, nuke.toNode(nuke.thisNode()[\"target\"].getValue())) else \"\"')\n pointer.setXYpos(anchor.xpos(), anchor.ypos()+120)\n pointer['tile_color'].setValue(topnode_color)\n\n\ndef create_dots(side=False):\n # Create dot nodes\n nodes = nuke.selectedNodes()\n dots = list()\n for node in nodes:\n unselect()\n pos = get_pos(node)\n if not side:\n select([node])\n dot = nuke.createNode('Dot', inpanel=False)\n if side:\n set_pos(dot, pos[0] - grid[0], pos[1])\n dot.setInput(0, node)\n else:\n set_pos(dot, pos[0], pos[1] + grid[1]*2)\n dots.append(dot)\n unselect(dot)\n select(dots)\n if not nodes:\n dot = nuke.createNode('Dot', inpanel=False)\n\n\n\ndef create_transform():\n # Create a Transform or TransformGeo node depending on node type\n nodes = nuke.selectedNodes()\n if not nodes:\n nuke.createNode('Transform')\n return\n unselect()\n transform_nodes = list()\n for node in nodes:\n node.setSelected(True)\n if 'render_mode' in node.knobs():\n new_node = nuke.createNode('TransformGeo')\n if new_node:\n transform_nodes.append(new_node)\n else:\n new_node = nuke.createNode('Transform')\n if new_node:\n transform_nodes.append(new_node)\n unselect()\n select(transform_nodes)\n\n\ndef read_from_write():\n # Create read nodes from selected write nodes\n nodes = [n for n in nuke.selectedNodes() if 'file' in n.knobs()]\n excluded = ['Read', ]\n for node in nodes:\n if node.Class() in excluded:\n continue\n pos = get_pos(node)\n filepath = node['file'].getValue()\n if '[' in filepath:\n # contains tcl expression. use evaluate instead.\n filepath_eval = node['file'].evaluate()\n \n dirname = os.path.dirname(filepath)\n filename = os.path.basename(filepath)\n if '#' in filename:\n is_sequence = True\n filename_base = filename.split('#')[0]\n elif r'%' in filename:\n is_sequence = True\n filename_base = filename.split(r'%')[0]\n else:\n is_sequence = False\n if is_sequence:\n sequences = nuke.getFileNameList(dirname)\n for seq in sequences:\n if seq.startswith(filename_base):\n filepath = os.path.join(dirname, seq)\n break\n read = nuke.createNode('Read', 'file {{{0}}}'.format(filepath), inpanel=False)\n set_pos(read, pos[0], pos[1] + grid[1]*4)\n # match colorspace\n colorspace = node['colorspace'].value()\n if '(' in colorspace and ')' in colorspace:\n # parse out role\n colorspace = colorspace.split('(')[1].split(')')[0]\n read['colorspace'].setValue(colorspace)\n read['raw'].setValue(node['raw'].getValue())\n\n\n\n\n# Enhanced swap functionality.\ndef swap_node():\n nodes = nuke.selectedNodes()\n for node in nodes:\n if node.inputs() > 1:\n nukescripts.swapAB(node)\n if node.Class() == 'OCIOColorSpace':\n in_colorspace = node['in_colorspace'].value()\n out_colorspace = node['out_colorspace'].value()\n node['out_colorspace'].setValue(in_colorspace)\n node['in_colorspace'].setValue(out_colorspace)\n elif 'direction' in node.knobs():\n direction = node['direction']\n if direction.getValue() == 1:\n direction.setValue(0)\n else:\n direction.setValue(1)\n elif 'invert' in node.knobs():\n invert = node['invert']\n if invert.getValue() == 1:\n invert.setValue(0)\n else:\n invert.setValue(1)\n elif 'reverse' in node.knobs():\n reverse = node['reverse']\n if reverse.getValue() == 1:\n reverse.setValue(0)\n else:\n reverse.setValue(1)\n elif node.Class() == 'Colorspace':\n colorspace_in = node['colorspace_in'].value()\n colorspace_out = node['colorspace_out'].value()\n node['colorspace_out'].setValue(colorspace_in)\n node['colorspace_in'].setValue(colorspace_out)\n\ndef swap_view():\n views = nuke.views()\n if len(views) == 2:\n nuke.activeViewer().setView(views[1]) if nuke.activeViewer().view() == views[0] else nuke.activeViewer().setView(views[0])\n", "id": "12766439", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tools/python/nuketools/dag.py" } ]
0
nOkuda
[ { "content": "\"\"\"Tests for ensuring that functions in pytesserae.score work properly\"\"\"\nimport pytesserae.score as score\n\n\nEPSILON = 1e-6\n\n\ndef test_vanilla_normal():\n \"\"\"Tries out vanilla under normal circumstances\"\"\"\n matching_terms = {'hoc', 'illud'}\n source_distance = 1\n target_distance = 1\n source_counts = {'hoc': 73, 'illud': 98}\n target_counts = {'hoc': 65, 'illud': 99}\n expected = 1.4191252668389973\n assert (\n score.vanilla(\n matching_terms, source_distance, target_distance, source_counts,\n target_counts\n ) - expected < EPSILON\n )\n\n\ndef test_get_two_lowest():\n \"\"\"Test _get_two_lowest when there are two matching terms\"\"\"\n matching_terms = {'a', 'b'}\n counts = {'a': 20, 'b': 25}\n term1, term2 = score._get_two_lowest(matching_terms, counts)\n assert term1 == 'a' and term2 == 'b'\n\n\ndef test_get_two_lowest_more():\n \"\"\"Test _get_two_lowest when there are more than two matching terms\"\"\"\n matching_terms = {'a', 'b', 'c'}\n counts = {'a': 20, 'b': 25, 'c': 30}\n term1, term2 = score._get_two_lowest(matching_terms, counts)\n assert term1 == 'a' and term2 == 'b'\n\n\ndef test_find_distance():\n \"\"\"Test find_distance when there are two matching terms\"\"\"\n matching_terms = {'a', 'b'}\n chunk = ['a', 'b']\n counts = {'a': 20, 'b': 25}\n distance = score.find_distance(matching_terms, chunk, counts)\n assert distance == 1\n\n\ndef test_find_distance_later():\n \"\"\"Test find_distance when there are two matching terms and closest\n distance not in first pair of found terms\"\"\"\n matching_terms = {'a', 'b'}\n chunk = ['a', 'c', 'b', 'a']\n counts = {'a': 20, 'b': 25}\n distance = score.find_distance(matching_terms, chunk, counts)\n assert distance == 1\n\n\ndef test_find_distance_more():\n \"\"\"Test find_distance when there are more than two matching terms\"\"\"\n matching_terms = {'a', 'b', 'c'}\n chunk = ['a', 'c', 'b']\n counts = {'a': 20, 'b': 25, 'c': 30}\n distance = score.find_distance(matching_terms, chunk, counts)\n assert distance == 2\n\n\ndef test_find_distance_one():\n \"\"\"Test find_distance when there is one matching term\"\"\"\n matching_terms = {'a'}\n chunk = ['a', 'b', 'a']\n counts = {'a': 20, 'b': 25}\n distance = score.find_distance(matching_terms, chunk, counts)\n assert distance == 2\n\n\ndef test_find_distance_one_later():\n \"\"\"Test find_distance when there is one matching term and closest distance\n not in first pair of found terms\"\"\"\n matching_terms = {'a'}\n chunk = ['a', 'b', 'a', 'a']\n counts = {'a': 20, 'b': 25}\n distance = score.find_distance(matching_terms, chunk, counts)\n assert distance == 1\n", "id": "12678262", "language": "Python", "matching_score": 2.8494486808776855, "max_stars_count": 0, "path": "tests/test_score.py" }, { "content": "\"\"\"Module for Tesserae scoring\"\"\"\nimport math\n\n\ndef vanilla(\n matching_terms,\n source_distance, target_distance,\n source_counts, target_counts,\n):\n \"\"\"Calculates the Tesserae score between source and target units\n\n Parameters\n ----------\n matching_terms : {str}\n A set of words found to match between the source and target\n source_distance, target_distance : int\n Distance between least frequent matching terms for source and\n target, respectively\n source_counts, target_counts : {str: int}\n A dictionary of word counts to consult in looking up frequency\n information\n\n Returns\n -------\n float\n The Tesserae score between source and target units\n\n Notes\n -----\n The scoring function is defined in [1]_.\n\n Note that frequency for some word x in some text y refers to the number of\n times x appears in y divided by the total number of tokens in y.\n\n score = ln (\n (\n sum([1/f(t) for t in matching_terms]) +\n sum([1/f(s) for s in matching_terms])\n )\n / (d_t + d_s)\n )\n * f(t) is the frequency of a matching term in the target\n * f(s) is the frequency of a matching term in the source\n * d_t = target_distance\n * d_s = source_distance\n\n References\n ----------\n .. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, \"Modeling\n the Scholars: Detecting Intertextuality through Enhanced Word-Level\n N-Gram Matching,\" Digital Scholarship in the Humanities vol. 30.4, pp.\n 503-515, 2014.\n\n Examples\n --------\n Begin with a unit from the source and from the target.\n\n >>> source_chunk = ['a', 'b']\n >>> target_chunk = ['a', 'c', 'b']\n\n Consider the terms that match across both units.\n\n >>> matching_terms = {'a', 'b'}\n\n Count statistics for source and target texts.\n\n >>> source_counts = {'a': 10, 'b': 50, 'c': 25}\n >>> target_counts = {'a': 4, 'b': 73, 'c': 15}\n\n Calculate distance information.\n\n >>> source_distance = score.find_distance(\n ... matching_terms, source_chunk, source_counts)\n >>> target_distance = score.find_distance(\n ... matching_terms, target_chunk, target_counts)\n\n Now score the units.\n\n >>> score.vanilla(\n ... matching_terms, source_distance, target_distance, source_counts,\n ... target_counts)\n 2.4411948928528475\n\n \"\"\"\n target_size = sum([v for v in target_counts.values()])\n source_size = sum([v for v in source_counts.values()])\n return math.log(\n (\n sum([1 / (target_counts[t]/target_size) for t in matching_terms]) +\n sum([1 / (source_counts[s]/source_size) for s in matching_terms])\n ) / (target_distance + source_distance)\n )\n\n\ndef _get_two_lowest(matching_terms, counts):\n \"\"\"Gets two lowest frequency matching terms\n\n Assumes that len(matching_terms) >= 2\n\n Parameters\n ----------\n matching_terms : {str}\n The set of matching words\n counts : {str: int}\n A dictionary of word counts for the text from which the chunk\n comes\n\n Returns\n -------\n lowest_term : str\n lowest frequency term\n next_lowest_term : str\n second lowest frequency term\n\n Examples\n --------\n >>> matching_terms = {'a', 'b'}\n >>> counts = {'a': 5, 'b': 10}\n >>> _get_two_lowest(matching_terms, counts)\n 'a', 'b'\n\n >>> matching_terms = {'a', 'b', 'c'}\n >>> counts = {'a': 5, 'b': 10, 'c': 1}\n >>> _get_two_lowest(matching_terms, counts)\n 'c', 'a'\n \"\"\"\n assert len(matching_terms) >= 2, 'not enough matching terms'\n match_tuple = tuple(matching_terms)\n # lowest and next_lowest are tuples of (term, count)\n lowest = (match_tuple[0], counts[match_tuple[0]])\n next_lowest = (match_tuple[1], counts[match_tuple[1]])\n if lowest[1] > next_lowest[1]:\n tmp = lowest\n lowest = next_lowest\n next_lowest = tmp\n for term in match_tuple[2:]:\n term_count = counts[term]\n if term_count < lowest[1]:\n next_lowest = lowest\n lowest = (term, term_count)\n elif term_count < next_lowest[1]:\n next_lowest = (term, term_count)\n return lowest[0], next_lowest[0]\n\n\ndef _get_indices(term, chunk):\n \"\"\"Get indices where term appears in chunk\n\n Parameters\n ----------\n term : str\n The token to look for in the `chunk`\n chunk : [str]\n A chunk of text in which to look for instances of `term`\n\n Returns\n -------\n [int]\n Indices in `chunk` where `term` was found\n\n Examples\n --------\n >>> term = 'a'\n >>> chunk = ['a', 'a', 'b', 'b', 'a']\n >>> _get_indices(term, chunk)\n [0, 1, 5]\n\n \"\"\"\n return [i for i, token in enumerate(chunk) if token == term]\n\n\ndef find_distance(matching_terms, chunk, counts):\n \"\"\"Calculates distance between matching terms in given chunk\n\n When there is only one matching term, the distance should be the smallest\n between instances of the matching term in chunk.\n\n Where there is more than one matching term, the distance should be the\n smallest between the instances of the lowest frequency matching term and\n the instances of the second lowest frequency matching term.\n\n Parameters\n ----------\n matching_terms : {str}\n The set of matching words\n chunk : [str]\n A chunk of text\n counts : {str: int}\n A dictionary of word counts for the text from which the chunk\n comes\n\n Returns\n -------\n int\n The distance between the two lowest frequency terms in the chunk\n\n Examples\n --------\n >>> matching_terms = {'a', 'b'}\n >>> chunk = ['a', 'b', 'c', 'd']\n >>> counts = {'a': 2, 'b': 2, 'c': 50, 'd': 25}\n >>> find_distance(matching_terms, chunk, counts)\n 1\n\n \"\"\"\n if len(matching_terms) == 1:\n # handle case where same term shows up multiple times in chunk\n term = tuple(matching_terms)[0]\n positions = _get_indices(term, chunk)\n # if this becomes a bottleneck, there is always numpy.diff\n inter_position_diffs = [\n j - i for i, j in zip(positions[:-1], positions[1:])]\n return min(inter_position_diffs)\n\n term1, term2 = _get_two_lowest(matching_terms, counts)\n term1_positions = _get_indices(term1, chunk)\n term2_positions = _get_indices(term2, chunk)\n # following lines might be improved with better algorithm\n min_dist = abs(term1_positions[0] - term2_positions[0])\n for pos1 in term1_positions:\n for pos2 in term2_positions:\n cur_dist = abs(pos2 - pos1)\n if cur_dist < min_dist:\n min_dist = cur_dist\n return min_dist\n", "id": "12402385", "language": "Python", "matching_score": 3.2243566513061523, "max_stars_count": 0, "path": "pytesserae/score.py" }, { "content": "\"\"\"Example for using pytesserae.score\"\"\"\nimport pytesserae.score as score\n\n\ndef _run():\n \"\"\"Example of how to score a match\"\"\"\n matching_terms = {'a', 'b'}\n source_counts = {'a': 10, 'b': 50, 'c': 25}\n target_counts = {'a': 4, 'b': 73, 'c': 15}\n source_chunk = ['a', 'b']\n target_chunk = ['a', 'c', 'b']\n source_distance = score.find_distance(\n matching_terms, source_chunk, source_counts)\n target_distance = score.find_distance(\n matching_terms, target_chunk, target_counts)\n match_score = score.vanilla(\n matching_terms, source_distance, target_distance, source_counts,\n target_counts)\n print('Calculated score:', match_score)\n\n\nif __name__ == '__main__':\n _run()\n", "id": "4007399", "language": "Python", "matching_score": 0.9032250046730042, "max_stars_count": 0, "path": "examples/example_score.py" }, { "content": "from setuptools import setup, find_packages\n\nsetup(\n name='pytesserae',\n version='0.0.1',\n description='Tesserae v5 sandbox',\n url='https://github.com/nOkuda/pytesserae',\n license='MIT',\n classifiers=[\n 'Development Status :: 1 - Science/Research',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n ],\n keywords='Tesserae',\n python_requires='~=3.5',\n packages=find_packages(exclude=['examples', 'tests', 'scripts']),\n install_requires=[\n 'regex',\n ],\n include_package_data=True,\n)\n", "id": "12096433", "language": "Python", "matching_score": 0.4404376745223999, "max_stars_count": 0, "path": "setup.py" }, { "content": "\"\"\"Tesserae queries\n\nThe functions in this directory keep track of how to execute queries and parse\nresults on multiple Tesserae versions.\n\"\"\"\n\nfrom . import data, v3, v4\n", "id": "9279056", "language": "Python", "matching_score": 1.0219509601593018, "max_stars_count": 0, "path": "tesserae_tester/__init__.py" }, { "content": "\"\"\"v3 Tesserae queries\n\nThe functions in this file keep track of how to execute queries and parse\nresults on v3 Tesserae.\n\"\"\"\nimport os\nimport re\nimport subprocess\nimport uuid\n\nimport tesserae_tester as tess\n\n\nSEARCH_BINS = {\n 'vanilla': 'read_table.pl'\n }\n\n\ndef get_query_results(v3path, query):\n \"\"\"Executes query in V3 and return results\"\"\"\n v3bin = os.path.join(v3path, 'cgi-bin')\n v3search = os.path.join(v3bin, SEARCH_BINS[query.searchtype])\n results_dir = '/tmp/'+'tess'+str(uuid.uuid4())\n subprocess.run([\n v3search,\n '--target', query.targettext,\n '--source', query.sourcetext,\n '--unit', query.unit,\n '--feature', query.feature,\n '--freq_basis', query.freq_basis,\n '--score', query.score,\n '--stop', query.stop,\n '--stbasis', query.stbasis,\n '--dist', query.dist,\n '--dibasis', query.dibasis,\n '--cutoff', query.cutoff,\n '--binary', results_dir], check=True)\n v3result = os.path.join(v3bin, 'read_bin.pl')\n results_file = '/tmp/'+'tess'+str(uuid.uuid4())\n with open(results_file, 'w') as ofh:\n subprocess.run([\n v3result,\n results_dir,\n '--export', 'tab'], stdout=ofh)\n # subprocess.run(['rm', '-rf', results_dir])\n result = tess.data.TesseraeResults('v3', [])\n with open(results_file) as ifh:\n stopwords = _parse_header(ifh)\n result.stopwords = stopwords\n for line in ifh:\n source_words, target_words, shared_words, score = _parse_line(line)\n result.container[\n tess.data.TesseraeMatch(source_words, target_words)] = \\\n tess.data.TesseraeData(shared_words, score)\n return result\n\n\ndef _parse_header(fh):\n \"\"\"Advances file handle past non-results lines\n\n Also returns stopwords list\n \"\"\"\n stopwords = []\n for line in fh:\n if line.startswith('# stopwords'):\n stopwords = line.strip().split()[3:]\n if line.startswith('\"RESULT\"'):\n return stopwords\n\n\ndef _parse_line(line):\n \"\"\"Extracts match information of tab delimited V3 results\n\n return value :: (source words, target words, shared words, score)\n \"\"\"\n entries = line.strip().split('\\t')\n return (\n tess.data.clean_words(entries[4]),\n tess.data.clean_words(entries[2]),\n entries[-2],\n float(entries[-1]))\n", "id": "12669518", "language": "Python", "matching_score": 4.772574424743652, "max_stars_count": 0, "path": "tesserae_tester/v3.py" }, { "content": "\"\"\"Data structures and functions for Tesserae comparisons\n\nEvery Tesserae version must implement a get_query_results function that returns\nan instance of TesseraeResults\n\"\"\"\nimport collections\nimport re\n\n\nclass TesseraeQuery(object):\n \"\"\"Holder class for Tesserae query parameters\"\"\"\n\n def __init__(self, searchtype, source, target):\n self.searchtype = searchtype\n if searchtype == 'vanilla':\n # simple text to text query\n self.targettext = target\n self.sourcetext = source\n # defaults for v3 according to read_table.pl\n self.unit = 'line'\n self.feature = 'stem'\n self.freq_basis = 'texts'\n self.score = 'feature'\n self.stop = '10'\n self.stbasis = 'corpus'\n self.dist = '999'\n self.dibasis = 'freq'\n self.cutoff = '0'\n else:\n raise NotImplementedError(\n 'No query implementation for '+searchtype)\n\n\nclass TesseraeResults(object):\n \"\"\"Holder class for Tesserae results\n\n self.container :: {(source words, target words): ((shared words), score)}\n \"\"\"\n\n def __init__(self, version, stopwords):\n self.version = version\n self.stopwords = stopwords\n self.container = {}\n\n\nTesseraeMatch = collections.namedtuple(\n 'TesseraeMatch', 'source_text target_text')\nTesseraeData = collections.namedtuple('TesseraeData', 'match_terms score')\n\n\nNON_ALPHA = re.compile('\\W+', re.UNICODE)\n\n\ndef clean_words(words):\n \"\"\"Normalize text across versions\n\n There seemed to be discrepancies in the way v3 and v4 handled punctuation,\n so we're going to ignore punctuation formatting differences.\n \"\"\"\n return ' '.join(NON_ALPHA.sub(' ', words).strip().split())\n", "id": "9379966", "language": "Python", "matching_score": 1.1678638458251953, "max_stars_count": 0, "path": "tesserae_tester/data.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\nnotes:\n\n The following program is designed to create stop-word lists for texts being\n used in Tesserae. There are several methods, including methods for deriving\n longer and shorter stop-word lists. These methods are strictly for\n experimentation and are available if the need for them arises in the future\n\n However, the most important method is the following:\n ***This method produces the stop word list***\n \n tenstopwords(doc001, doc002)\n \n parameters: (file.tess, file.tess)\n This method takes two text files of the .tess format as its paramaters,\n and produces a list of ten stop words to be removed from the first\n document, doc001.\n \n\n Additional Methods:\n \n a)readfile(doc) - paramaters: (file.tess); returns a\n dictionary of word-frequency pairs; used by all other methods\n b)topten(frequencydictionary) - parameters:(word-frequency dictionary);\n returns a list of ten most frequent words from a word frequency dictionary\n c)topx(frequencydictionary, x) - parameters: (word-frequency dictionary\n , integer); returns a list of x-length of the most frequent words from the\n frequency dictionary\n d)tfidf(docmen1, docmen2) - parameters(file.tess, file.tess); produces\n tfidf dictionary of words in docmen1\n \n e)\n \n For information on the tfidf please see:\n https://en.wikipedia.org/wiki/Tf%E2%80%93idf\n \nCreated on Sat Oct 28 18:37:18 2017\n@author: mitchellkristie\n\"\"\"\nimport re\nimport math\n\n\"\"\"Reads .tess file; creates word-frequency dictionary\"\"\"\ndef readfile(doc):\n freqdict = dict()\n with open(doc, encoding='utf-8') as f:\n for line in f:\n line = line.lower()\n line = re.sub(\"^<.*\\>\", \"\", line) #removes tess line indexing\n line = re.sub(\"-\", \"\", line)\n line = re.sub(\"[^\\w\\s]\", \" \", line)\n line = line.rsplit() #returns list of the words in the line\n for word in line: #creates raw word count\n if word in freqdict.keys():\n freqdict[word] = freqdict[word] + 1\n else:\n freqdict[word] = 1\n wordcount = math.fsum(freqdict.values())\n for word in freqdict.keys(): #creates frequency dictionary\n freqdict[word] = (freqdict[word]/wordcount)\n\n return freqdict\n\n\"\"\"Reads frequency dictionary; returns top ten most frequent words\"\"\"\ndef topten(frequencydictionary):\n sort = sorted(frequencydictionary,\n key=frequencydictionary.__getitem__,\n reverse=True)\n return sort[0:10]\n\n\"\"\" Returns stop-word list of x length\"\"\"\ndef topx(frequencydictionary, x):\n sort = sorted(frequencydictionary,\n key=frequencydictionary.__getitem__,\n reverse=True)\n return sort[0:x]\n\n\"\"\"Reads two .tess files; Returns tfidf list of .tess file docmen1\"\"\" \ndef tfidf(docmen1, docmen2):\n freqdict1 = readfile(docmen1)\n freqdict2 = readfile(docmen2)\n tfidf1 = dict()\n for key in freqdict1:\n if key not in freqdict2:\n x = 1\n else:\n x = 2\n tfidf1[key] = (math.log((freqdict1.get(key)))) * math.log(1+(2/x))\n return tfidf1\n\n\"\"\"Returns list of ten stop words for doc01\"\"\"\ndef tenstopwords(doc01, doc02):\n return topten(tfidf(doc01, doc02))\n\n\"\"\"Returns list stop word list of x length, for doc001\"\"\"\ndef xstopwords(doc001, doc002, x):\n return topx(tfidf(doc001, doc002), x)\n", "id": "10777243", "language": "Python", "matching_score": 1.2189031839370728, "max_stars_count": 0, "path": "pytesserae/tfidf.py" }, { "content": "\"\"\"v4 Tesserae queries\"\"\"\nimport requests\n\nimport tesserae_tester as tess\n\n\nQUERY_FORM = 'author:\"{0}\" AND title:\"{1}\" AND parse_type:\"{2}\"'\n\n\ndef get_query_results(v4path, query):\n \"\"\"Executes query in V4 and return results\n\n Assumes that Solr is reachable on v4path\n \"\"\"\n s_author, s_title = query.sourcetext.split('.')\n t_author, t_title = query.targettext.split('.')\n s_author = ' '.join([a.capitalize() for a in s_author.split()])\n t_author = ' '.join([a.capitalize() for a in t_author.split()])\n s_title = ' '.join([a.capitalize() for a in s_title.split('_')])\n t_title = ' '.join([a.capitalize() for a in t_title.split('_')])\n params = {\n 'wt': 'python',\n 'tess.sq': QUERY_FORM.format(s_author, s_title, query.unit),\n 'tess.sf': 'text',\n 'tess.sfl': 'text',\n 'tess.tq': QUERY_FORM.format(t_author, t_title, query.unit),\n 'tess.tf': 'text',\n 'tess.tfl': 'text',\n 'tess.sw': query.stop,\n 'tess.cut': query.cutoff,\n 'tess.md': query.dist,\n 'tess.metric': query.dibasis,\n 'tess.sb': query.stbasis,\n 'tess.fb': query.freq_basis,\n 'tess.rc': 'false',\n 'start': '0',\n 'rows': '999999'\n }\n response = requests.get(v4path+'latin/compare', params=params)\n response.raise_for_status()\n return _solr_to_results(eval(str(response.text)))\n\n\ndef _solr_to_results(solr_result):\n \"\"\"Converts Solr result in TesseraeResults\"\"\"\n result = tess.data.TesseraeResults('v4', solr_result['stopList'])\n for match in solr_result['matches']:\n result.container[tess.data.TesseraeMatch(\n tess.data.clean_words(match['source']['fields']['text']),\n tess.data.clean_words(match['target']['fields']['text']))] = \\\n tess.data.TesseraeData(\n '; '.join(match['terms']), match['score'])\n return result\n", "id": "8537217", "language": "Python", "matching_score": 2.659846782684326, "max_stars_count": 0, "path": "tesserae_tester/v4.py" }, { "content": "\"\"\"Run comparison tests against configured Tesserae versions\"\"\"\nimport argparse\nimport json\nimport os\nimport urllib.request as request\n\nimport tesserae_tester as tess\n\n\ndef _parse_args():\n \"\"\"Parses arguments\"\"\"\n parser = argparse.ArgumentParser(\n description='Run Tesserae version comparison tests')\n parser.add_argument(\n 'config',\n help='Configuration file for comparison tests')\n parser.add_argument(\n 'outdir',\n help='Directory where output will be placed')\n return parser.parse_args()\n\n\ndef _report_setdiff(pairs, same_pairs, container, outdir, version, label):\n \"\"\"If there are items in the set difference, notifies user and dumps\"\"\"\n diff = pairs.difference(same_pairs)\n if diff:\n print('****{0} has unshared matches'.format(version))\n with open(os.path.join(outdir, label+'.'+version+'.out'), 'w') as ofh:\n for item in diff:\n ofh.write(str(item))\n ofh.write('\\n\\t')\n ofh.write(str(container[item]))\n ofh.write('\\n')\n\n\ndef compare(r1, r2, label, outdir):\n \"\"\"Compares results\n\n * r1, r2 :: TesseraeResults\n\n The score returned is the sum of the differences of scores between the same\n match pair.\n \"\"\"\n with open(os.path.join(outdir, label+'.results'), 'w') as ofh:\n r1_stop = {s for s in r1.stopwords}\n r2_stop = {s for s in r2.stopwords}\n same_stop = r1_stop.intersection(r2_stop)\n if len(same_stop) != len(r1_stop):\n ofh.write('****Stopword lists do not match (')\n ofh.write(str(len(r1.stopwords)) + ') (')\n ofh.write(str(len(r2.stopwords)) + ')\\n')\n for r1s, r2s in zip(sorted(r1.stopwords), sorted(r2.stopwords)):\n ofh.write('\\t'+r1s+'\\t'+r2s+'\\n')\n if len(r1.container) != len(r2.container):\n ofh.write('****Results do not have same number of matches\\n')\n ofh.write(str(len(r1.container))+' '+str(len(r2.container))+'\\n')\n r1_pairs = {k for k in r1.container}\n r2_pairs = {k for k in r2.container}\n same_pairs = r1_pairs.intersection(r2_pairs)\n ofh.write('####Number of matching matches '+str(len(same_pairs))+'\\n')\n _report_setdiff(\n r1_pairs, same_pairs, r1.container, outdir, r1.version, label)\n _report_setdiff(\n r2_pairs, same_pairs, r2.container, outdir, r2.version, label)\n total_diff = 0.0\n mismatches = []\n for pair in same_pairs:\n diff = abs(r1.container[pair].score - r2.container[pair].score)\n if diff:\n mismatches.append((diff, pair, r1.container[pair],\n r2.container[pair]))\n total_diff += diff\n if mismatches:\n mismatches.sort()\n with open(os.path.join(outdir, label+'.mismatches.out'), 'w') as ofh:\n for mm in mismatches:\n ofh.write(str(mm))\n ofh.write('\\n')\n ofh.write('####Total difference: '+str(total_diff)+'\\n')\n print('####Total difference: ', total_diff)\n return total_diff\n\n\ndef _get_queries():\n \"\"\"Gets queries to test\"\"\"\n result = {\n 'freqbasis': tess.data.TesseraeQuery(\n 'vanilla', 'ovid.ars_amatoria', 'martial.epigrams'),\n }\n result['freqbasis'].freq_basis = 'corpus'\n return result\n\ndef _get_all_queries():\n \"\"\"Gets queries to test\"\"\"\n result = {\n 'vanilla': tess.data.TesseraeQuery(\n 'vanilla', 'ovid.ars_amatoria', 'martial.epigrams'),\n 'phrase': tess.data.TesseraeQuery(\n 'vanilla', 'ovid.ars_amatoria', 'martial.epigrams'),\n 'stopsize': tess.data.TesseraeQuery(\n 'vanilla', 'ovid.ars_amatoria', 'martial.epigrams'),\n 'cutoff': tess.data.TesseraeQuery(\n 'vanilla', 'ovid.ars_amatoria', 'martial.epigrams'),\n 'dist': tess.data.TesseraeQuery(\n 'vanilla', 'ovid.ars_amatoria', 'martial.epigrams'),\n 'dibasis_span': tess.data.TesseraeQuery(\n 'vanilla', 'ovid.ars_amatoria', 'martial.epigrams'),\n 'dibasis_span_target': tess.data.TesseraeQuery(\n 'vanilla', 'ovid.ars_amatoria', 'martial.epigrams'),\n 'dibasis_span_source': tess.data.TesseraeQuery(\n 'vanilla', 'ovid.ars_amatoria', 'martial.epigrams'),\n 'dibasis_freq_target': tess.data.TesseraeQuery(\n 'vanilla', 'ovid.ars_amatoria', 'martial.epigrams'),\n 'dibasis_freq_source': tess.data.TesseraeQuery(\n 'vanilla', 'ovid.ars_amatoria', 'martial.epigrams'),\n 'stbasis_source': tess.data.TesseraeQuery(\n 'vanilla', 'ovid.ars_amatoria', 'martial.epigrams'),\n 'stbasis_target': tess.data.TesseraeQuery(\n 'vanilla', 'ovid.ars_amatoria', 'martial.epigrams'),\n 'stbasis_both': tess.data.TesseraeQuery(\n 'vanilla', 'ovid.ars_amatoria', 'martial.epigrams'),\n 'freqbasis': tess.data.TesseraeQuery(\n 'vanilla', 'ovid.ars_amatoria', 'martial.epigrams'),\n }\n result['phrase'].unit = 'phrase'\n result['stopsize'].stop = '50'\n result['cutoff'].cutoff = '8.1'\n result['dist'].dist = '5'\n result['dibasis_span'].dibasis = 'span'\n result['dibasis_span_target'].dibasis = 'span_target'\n result['dibasis_span_source'].dibasis = 'span_source'\n result['dibasis_freq_target'].dibasis = 'freq_target'\n result['dibasis_freq_source'].dibasis = 'freq_source'\n result['stbasis_source'].stbasis = 'source'\n result['stbasis_target'].stbasis = 'target'\n result['stbasis_both'].stbasis = 'both'\n result['freqbasis'].freq_basis = 'corpus'\n return result\n\n\ndef _run(args):\n \"\"\"Runs tests\"\"\"\n with open(args.config) as ifh:\n config = json.load(ifh)\n try:\n request.urlopen(config['v4path'])\n except:\n print('Cannot connect to v4')\n return\n queries = _get_queries()\n os.makedirs(args.outdir, exist_ok=True)\n for label, query in queries.items():\n v3results = tess.v3.get_query_results(config['v3path'], query)\n otherresults = tess.v4.get_query_results(config['v4path'], query)\n compare(v3results, otherresults, label, args.outdir)\n\n\nif __name__ == '__main__':\n _run(_parse_args())\n", "id": "3651091", "language": "Python", "matching_score": 3.4719252586364746, "max_stars_count": 0, "path": "run.py" }, { "content": "\"\"\"A template Python file for convenience\"\"\"\nimport argparse\n\n\ndef _parse_args():\n \"\"\"Parses command line arguments\"\"\"\n parser = argparse.ArgumentParser(description='Template file')\n parser.add_argument(\n 'required',\n help='an example required argument')\n return parser.parse_args()\n\n\ndef _run():\n \"\"\"Displays input command line arguments\"\"\"\n args = _parse_args()\n print('Required argument value:', args.required)\n\n\nif __name__ == '__main__':\n _run()\n", "id": "6341411", "language": "Python", "matching_score": 2.2072322368621826, "max_stars_count": 0, "path": "examples/template.py" }, { "content": "\"\"\"Example for using tokenize function\"\"\"\nimport argparse\nimport pytesserae.handler as handler\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(\n description='Normalizes and tokenizes a line from a .tess file')\n parser.add_argument(\n 'text',\n help='A .tess file')\n return parser.parse_args()\n\n\ndef _run():\n args = _parse_args()\n with open(args.text, 'r') as ifh:\n for line in ifh:\n print(handler.tokenize(line))\n\n\nif __name__ == '__main__':\n _run()\n", "id": "11166701", "language": "Python", "matching_score": 1.7104320526123047, "max_stars_count": 0, "path": "examples/example_tokenize.py" }, { "content": "\"\"\"Example of the match function\"\"\"\nimport pytesserae.matcher as matcher\n\n\ndef _run():\n source = ['a', 'b', 'c']\n target = ['b', 'c', 'd']\n print(matcher.match(source, target))\n\n\nif __name__ == '__main__':\n _run()\n", "id": "2468989", "language": "Python", "matching_score": 0.6330626010894775, "max_stars_count": 0, "path": "examples/example_match.py" }, { "content": "\"\"\"Lemmatization dictionary generation\n\nGenerates lemmatization data\n\n\"\"\"\nimport gzip\nimport json\nimport os\n\nimport pytesserae.norm\n\n\ndef _generate_lookup(csv_path, normalizer):\n \"\"\"Generates lookup dictionary as JSON\"\"\"\n result = {}\n with open(csv_path) as ifh:\n for line in ifh:\n entries = line.split(',')\n if len(entries) < 3:\n continue\n morphed = normalizer(entries[0])\n lemma = normalizer(entries[2])\n if morphed and lemma:\n if morphed in result:\n result[morphed][lemma] = True\n else:\n result[morphed] = {lemma: True}\n return result\n\n\ndef _run():\n \"\"\"Generates lemma dictionary file\"\"\"\n drop_point = os.path.abspath(os.path.join(\n os.path.dirname(os.path.realpath(__file__)), '..', 'pytesserae'))\n latin_lookup = _generate_lookup(\n 'la.lexicon.csv',\n pytesserae.norm.normalize_latin,\n )\n with gzip.open(os.path.join(\n drop_point, 'latin.lemma.json.gz'), 'wb') as ofh:\n ofh.write(json.dumps(latin_lookup).encode('utf-8'))\n greek_lookup = _generate_lookup(\n 'grc.lexicon.csv',\n pytesserae.norm.normalize_greek,\n )\n with gzip.open(os.path.join(\n drop_point, 'greek.lemma.json.gz'), 'wb') as ofh:\n ofh.write(json.dumps(greek_lookup).encode('utf-8'))\n\n\nif __name__ == '__main__':\n _run()\n", "id": "1506516", "language": "Python", "matching_score": 1.244431972503662, "max_stars_count": 0, "path": "scripts/lookup.py" }, { "content": "\"\"\"Test Latin normalization\"\"\"\nfrom pytesserae.norm import normalize_latin\n\n\ndef test_latin_normalization_simple():\n \"\"\"Tests Latin normalization when nothing should change\"\"\"\n word = 'amor'\n expected = 'amor'\n assert(expected == normalize_latin(word))\n\n\ndef test_latin_normalization_lower():\n \"\"\"Tests Latin normalization when capitalization occurs\"\"\"\n word = 'Amor'\n expected = 'amor'\n assert(expected == normalize_latin(word))\n\n\ndef test_latin_normalization_macron():\n \"\"\"Tests Latin normalization when macron occurs\"\"\"\n word = 'linguā'\n expected = 'lingua'\n assert(expected == normalize_latin(word))\n\n\ndef test_latin_normalization_j():\n \"\"\"Tests Latin normalization when j occurs\"\"\"\n word = 'jus'\n expected = 'ius'\n assert(expected == normalize_latin(word))\n\n\ndef test_latin_normalization_v():\n \"\"\"Tests Latin normalization when v occurs\"\"\"\n word = 'verum'\n expected = 'uerum'\n assert(expected == normalize_latin(word))\n\n\ndef test_latin_normalization_jv():\n \"\"\"Tests Latin normalization when both j and v occur\"\"\"\n word = 'juvo'\n expected = 'iuuo'\n assert(expected == normalize_latin(word))\n\n\ndef test_latin_normalization_numbers():\n \"\"\"Tests Latin normalization when a numeral occurs\"\"\"\n word = 'ab2'\n expected = 'ab'\n assert(expected == normalize_latin(word))\n\n\ndef test_latin_normalization_quotes():\n \"\"\"Tests Latin normalization when quotation marks occur\"\"\"\n word = '\"deus\"'\n expected = 'deus'\n assert(expected == normalize_latin(word))\n", "id": "6931511", "language": "Python", "matching_score": 1.1896220445632935, "max_stars_count": 0, "path": "tests/test_latin_norm.py" }, { "content": "\"\"\"Lemmatization functions\n\nGiven a word form, we would like to know its possible lemmata.\n\"\"\"\nimport gzip\nimport json\nimport os\n\nimport pytesserae.norm\n\n\ndef _get_data(language):\n \"\"\"Loads data for lemmatization\n\n Parameters\n ----------\n language : str\n The kind of lemma dictionary to load\n\n Returns\n -------\n {str: {str: bool}}\n A mapping of word forms to possible lemmata\n \"\"\"\n with gzip.open(os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n language+'.lemma.json.gz'), 'rb') as ifh:\n return json.loads(ifh.read().decode('utf-8'))\n\n\n_LATIN_LOOKUP = _get_data('latin')\n_GREEK_LOOKUP = _get_data('greek')\n\n\ndef lemmatize_latin(raw_word):\n \"\"\"Lemmatizes a token according to Latin lemmatization rules\n\n Parameters\n ----------\n raw_word : str\n A token extracted from raw text\n\n Returns\n -------\n [str]\n All possible lemmata for `raw_word`\n\n Examples\n --------\n >>> lemmatize_latin('Juvat')\n '[iuuo]'\n \"\"\"\n normed = pytesserae.norm.normalize_latin(raw_word)\n if normed in _LATIN_LOOKUP:\n return [w for w in _LATIN_LOOKUP[normed]]\n return []\n\n\ndef lemmatize_greek(raw_word):\n \"\"\"Lemmatizes a token according to Latin lemmatization rules\n\n Parameters\n ----------\n raw_word : str\n A token extracted from raw text\n\n Returns\n -------\n [str]\n All possible lemmata for `raw_word`\n\n Examples\n --------\n >>> lemmatize_greek('πέμψεις')\n '[πέμπω, πέμψις]'\n \"\"\"\n normed = pytesserae.norm.normalize_greek(raw_word)\n if normed in _GREEK_LOOKUP:\n return [w for w in _GREEK_LOOKUP[normed]]\n return []\n", "id": "4850038", "language": "Python", "matching_score": 2.9865288734436035, "max_stars_count": 0, "path": "pytesserae/lemma.py" }, { "content": "\"\"\"Normalization functions\n\nGiven a word form, we would like to process it into a canonical form against\nwhich we can compare equivalently with other words that have slightly different\nforms. For example, \"Ab\" and \"ab\" should match, despite capitalization\ndifferences.\n\n\"\"\"\nimport regex as re\nimport unicodedata\n\n\nDIGITS = re.compile(r'\\d')\nNONWORDS = re.compile(r'\\W')\nGRAVE = re.compile(r'\\u0300')\nSIGMA = re.compile(r'σ\\b')\n\n\ndef normalize_latin(raw_word):\n \"\"\"Normalizes a token according to Latin normalization rules\n\n Parameters\n ----------\n raw_word : str\n A token extracted from a raw text\n\n Returns\n -------\n str\n The Latin-normalized token\n\n Examples\n --------\n >>> normalize_latin('Juvat')\n 'iuuat'\n\n \"\"\"\n nfkd = unicodedata.normalize('NFKD', raw_word)\n lowercased = nfkd.lower()\n no_digits = DIGITS.sub('', lowercased)\n j_to_i = re.sub('j', 'i', no_digits)\n v_to_u = re.sub('v', 'u', j_to_i)\n return NONWORDS.sub('', v_to_u)\n\ndef normalize_greek(raw_word):\n \"\"\"Normalizes a token according to Greek normalization rules\n\n Parameters\n ----------\n raw_word : str\n A token extracted from a raw text\n\n Returns\n -------\n str\n The Greek-normalized token\n\n Examples\n --------\n >>> normalize_greek('σεμνοὺσ')\n 'σεμνούς'\n\n \"\"\"\n nfkd = unicodedata.normalize('NFKD', raw_word)\n lowercased = nfkd.lower()\n no_digits = DIGITS.sub('', lowercased)\n accented = GRAVE.sub('\\u0301', no_digits)\n sigmas = SIGMA.sub('ς', accented)\n return NONWORDS.sub('', sigmas)\n", "id": "2306252", "language": "Python", "matching_score": 1.167042851448059, "max_stars_count": 0, "path": "pytesserae/norm.py" }, { "content": "\"\"\"A function to prepare .tess texts for analysis\"\"\"\nimport re\n\n\ndef tokenize(line):\n \"\"\"Normalizes and tokenizes a line from a .tess file\n\n Capital letters are changed to lowercase, line numbering and\n extraneous whitespace is removed, and the result is given as\n a list of tokens.\n\n The tokenizer is intended to be applied to a line from a .tess\n file at a later point, e.g.:\n\n with open('example.tess', 'r') as text:\n for line in text:\n tokens = handler.tokenize(line)\n ...\n\n The resulting tokens can then be passed, along with that of\n another file's line, into the matching function, and the result\n of that used in the scoring process.\n\n Parameters\n ----------\n line : str\n Line from a .tess file to be cleaned up and tokenized\n\n Returns\n -------\n [str]\n A list containing each token in the line\n\n \"\"\"\n tokens = line.lower()\n # removes line indexing\n tokens = re.sub(r'^<.*\\>', '', tokens)\n tokens = re.sub(r'[\\-\\t\\n]', '', tokens)\n tokens = re.sub(r'[^\\w\\s]', '', tokens)\n # removes punctuation\n tokens = re.sub(r'[^A-Za-z ]', '', tokens)\n tokens = tokens.split(' ')\n return tokens\n", "id": "11788689", "language": "Python", "matching_score": 1.6371279954910278, "max_stars_count": 0, "path": "pytesserae/handler.py" }, { "content": "\"\"\"A function for finding matching tokens in two lists\"\"\"\n\n\ndef match(source, target):\n \"\"\"Returns matching tokens from two lists of words\n\n Parameters\n ----------\n source : [str]\n A list of tokens from the source text\n target : [str]\n A list of tokens from the target text, to be compared\n with the source\n\n Returns\n -------\n [str]\n A list of tokens common to both input lists\n \"\"\"\n return list(set(source).intersection(set((target))))\n", "id": "3278697", "language": "Python", "matching_score": 1.0109587907791138, "max_stars_count": 0, "path": "pytesserae/matcher.py" } ]
1.244432
lhiguer1
[ { "content": "import string\nimport random\nfrom hashlib import md5\n\nimport requests\n\n\nclass TempMail(object):\n \"\"\"\n API Wrapper for service which provides temporary email address.\n\n :param login: (optional) login for email address.\n :param domain: (optional) domain (from current available)\n for email address.\n :param api_domain: (optional) domain for temp-mail api.\n Default value is ``privatix-temp-mail-v1.p.mashape.com``.\n \"\"\"\n\n def __init__(self, api_key, login=None, domain=None, api_domain='privatix-temp-mail-v1.p.rapidapi.com'):\n self.login = login\n self.domain = domain\n self.api_domain = api_domain\n self.api_key = api_key\n\n def __repr__(self):\n return u'<TempMail [{0}]>'.format(self.get_email_address())\n\n @property\n def available_domains(self):\n \"\"\"\n Return list of available domains for use in email address.\n \"\"\"\n if not hasattr(self, '_available_domains'):\n url = 'https://{0}/request/domains/format/json/'.format(\n self.api_domain)\n req = requests.get(url, headers={\n 'x-rapidapi-host': self.domain,\n 'x-rapidapi-key': self.api_key\n })\n domains = req.json()\n setattr(self, '_available_domains', domains)\n return self._available_domains\n\n def generate_login(self, min_length=6, max_length=10, digits=True):\n \"\"\"\n Generate string for email address login with defined length and\n alphabet.\n\n :param min_length: (optional) min login length.\n Default value is ``6``.\n :param max_length: (optional) max login length.\n Default value is ``10``.\n :param digits: (optional) use digits in login generation.\n Default value is ``True``.\n \"\"\"\n chars = string.ascii_lowercase\n if digits:\n chars += string.digits\n length = random.randint(min_length, max_length)\n return ''.join(random.choice(chars) for x in range(length))\n\n def get_email_address(self):\n \"\"\"\n Return full email address from login and domain from params in class\n initialization or generate new.\n \"\"\"\n if self.login is None:\n self.login = self.generate_login()\n\n available_domains = self.available_domains\n if self.domain is None:\n self.domain = random.choice(available_domains)\n elif self.domain not in available_domains:\n raise ValueError('Domain not found in available domains!')\n return u'{0}{1}'.format(self.login, self.domain)\n\n def get_hash(self, email):\n \"\"\"\n Return md5 hash for given email address.\n\n :param email: email address for generate md5 hash.\n \"\"\"\n return md5(email.encode('utf-8')).hexdigest()\n\n def get_mailbox(self, email=None, email_hash=None):\n \"\"\"\n Return list of emails in given email address\n or dict with `error` key if mail box is empty.\n\n :param email: (optional) email address.\n :param email_hash: (optional) md5 hash from email address.\n \"\"\"\n if email is None:\n email = self.get_email_address()\n if email_hash is None:\n email_hash = self.get_hash(email)\n\n url = 'https://{0}/request/mail/id/{1}/format/json/'.format(\n self.api_domain, email_hash)\n req = requests.get(url, headers={\n \"X-Mashape-Key\": self.api_key,\n \"Accept\": \"application/json\"\n })\n return req.json()\n\n def delete_email(self, email, email_hash=None):\n \"\"\"\n Delete a given email in a given email address\n\n :param email: (optional) email address.\n :param email_hash: (optional) md5 hash from email address.\n \"\"\"\n if email_hash is None:\n email_hash = self.get_hash(email)\n\n url = 'https://{0}/request/delete/id/{1}/format/json/'.format(\n self.api_domain, email_hash)\n\n req = requests.get(url, headers={\n \"X-Mashape-Key\": self.api_key,\n \"Accept\": \"application/json\"\n })\n return req.json()\n\n def get_attachments(self, email, email_hash=None):\n \"\"\"\n Get attachments of a given email in a given email address\n\n :param email: (optional) email address.\n :param email_hash: (optional) md5 hash from email address.\n \"\"\"\n if email_hash is None:\n email_hash = self.get_hash(email)\n\n url = 'https://{0}/request/attachments/id/{1}/format/json/'.format(\n self.api_domain, email_hash)\n\n req = requests.get(url, headers={\n \"X-Mashape-Key\": self.api_key,\n \"Accept\": \"application/json\"\n })\n return req.json()\n\n def get_message(self, email, email_hash=None):\n \"\"\"\n Get a given email in a given email address\n\n :param email: (optional) email address.\n :param email_hash: (optional) md5 hash from email address.\n \"\"\"\n if email_hash is None:\n email_hash = self.get_hash(email)\n\n url = 'https://{0}/request/one_mail/id/{1}/format/json/'.format(\n self.api_domain, email_hash)\n\n req = requests.get(url, headers={\n \"X-Mashape-Key\": self.api_key,\n \"Accept\": \"application/json\"\n })\n return req.json()\n\n\n def source_message(self, email, email_hash=None):\n \"\"\"\n Source a given email in a given email address\n\n :param email: (optional) email address.\n :param email_hash: (optional) md5 hash from email address.\n \"\"\"\n if email_hash is None:\n email_hash = self.get_hash(email)\n\n url = 'https://{0}/request/source/id/{1}/format/json/'.format(\n self.api_domain, email_hash)\n\n req = requests.get(url, headers={\n \"X-Mashape-Key\": self.api_key,\n \"Accept\": \"application/json\"\n })\n return req.json()\n", "id": "10164862", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tempMail2.py" } ]
0
sonbyj01
[ { "content": "import plotly.express as px\nimport sys\nimport plotly.graph_objects as go\nfrom pathlib import Path\n\n\ndef generatePlot3(fileAbs):\n current = Path.cwd()\n file = current.joinpath(fileAbs)\n xVal = list()\n yVal = list()\n zVal = list()\n\n with open(file, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.rstrip()\n splits = line.split(',')\n if len(splits) == 3:\n xVal.append(float(splits[0]))\n yVal.append(float(splits[1]))\n zVal.append(float(splits[2]))\n\n fig = px.scatter_3d(x=xVal, y=yVal, z=zVal)\n fig.write_image(str(Path.joinpath(file.parent, file.stem + \".PNG\")))\n\n\ndef generatePlot(fileAbs):\n current = Path.cwd()\n file = current.joinpath(fileAbs)\n xVal = list()\n yVal = list()\n\n with open(file, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.rstrip()\n splits = line.split(',')\n if len(splits) == 2:\n xVal.append(float(splits[0]))\n yVal.append(float(splits[1]))\n\n fig = px.scatter(x=xVal, y=yVal)\n fig.write_image(str(Path.joinpath(file.parent, file.stem + \".PNG\")))\n\n\ndef generateCombinePlot(fileAbsOrig, fileAbsNew):\n current = Path.cwd()\n fileOrig = current.joinpath(fileAbsOrig)\n fileNew = current.joinpath(fileAbsNew)\n\n xValOrig = list()\n yValOrig = list()\n\n xValNew = list()\n yValNew = list()\n\n with open(fileOrig, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.rstrip()\n splits = line.split(',')\n if len(splits) == 2:\n xValOrig.append(float(splits[0]))\n yValOrig.append(float(splits[1]))\n\n with open(fileNew, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.rstrip()\n splits = line.split(',')\n if len(splits) == 2:\n xValNew.append(float(splits[0]))\n yValNew.append(float(splits[1]))\n\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=xValOrig,\n y=yValOrig,\n mode='markers'))\n fig.add_trace(go.Scatter(x=xValNew,\n y=yValNew,\n mode='lines+markers'))\n fig.show()\n fig.write_image(str(Path.joinpath(fileNew.parent, fileNew.stem + \".PNG\")))\n\ndef generateCombinePlot3(fileAbsOrig, fileAbsNew):\n current = Path.cwd()\n fileOrig = current.joinpath(fileAbsOrig)\n fileNew = current.joinpath(fileAbsNew)\n\n xValOrig = list()\n yValOrig = list()\n zValOrig = list()\n\n xValNew = list()\n yValNew = list()\n zValNew = list()\n\n with open(fileOrig, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.rstrip()\n splits = line.split(',')\n if len(splits) == 3:\n xValOrig.append(float(splits[0]))\n yValOrig.append(float(splits[1]))\n zValOrig.append(float(splits[2]))\n\n with open(fileNew, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.rstrip()\n splits = line.split(',')\n if len(splits) == 3:\n xValNew.append(float(splits[0]))\n yValNew.append(float(splits[1]))\n zValNew.append(float(splits[2]))\n\n fig = go.Figure()\n fig.add_trace(go.Scatter3d(x=xValOrig,\n y=yValOrig,\n z=zValOrig,\n mode='markers'))\n fig.add_trace(go.Scatter3d(x=xValNew,\n y=yValNew,\n z=zValNew,\n mode='markers'))\n fig.show()\n fig.write_image(str(Path.joinpath(fileNew.parent, fileNew.stem + \".PNG\")))\n\n\ndef main():\n if str(sys.argv[1]) == '3':\n generatePlot3(sys.argv[2])\n elif str(sys.argv[1]) == '4': #2d\n generateCombinePlot(sys.argv[2], sys.argv[3])\n elif str(sys.argv[1]) == '5': #3d\n generateCombinePlot3(sys.argv[2], sys.argv[3])\n else:\n generatePlot(sys.argv[2])\n\n\nif __name__ == '__main__':\n main()\n", "id": "3011762", "language": "Python", "matching_score": 1.4073950052261353, "max_stars_count": 0, "path": "plotter/graph.py" }, { "content": "#!/usr/bin/env python3\n\nimport plotly.graph_objects as go\nimport plotly\nimport pandas as pd\n\nold_data = pd.read_pickle('./weather_data.pickle')\n\n# create CSV file for readability\nold_data.to_csv('./weather_data.csv', encoding='utf-8')\n\n# generate plotly graph\ndate_time = []\ntemperature = []\ndew_point = []\nhumidity = []\nrainfall = []\n\nfor index, row in old_data.iterrows():\n date_time.append('{} {}, {}-{}:{}'.format(row['Month'],\n row['Day'],\n row['Year'],\n row['Hour'],\n row['Minute']))\n temperature.append(row['Temperature'])\n dew_point.append(row['Dew Point'])\n humidity.append(row['Humidity'])\n rainfall.append(row['Rainfall'])\n\nfig = go.Figure()\n\nfig.add_trace(\n go.Scatter(x=date_time, y=temperature, name='Temperature',\n line=dict(color='royalblue', width=2))\n)\nfig.add_trace(\n go.Scatter(x=date_time, y=dew_point, name='Dew Point',\n line=dict(color='firebrick', width=2))\n)\nfig.add_trace(\n go.Scatter(x=date_time, y=humidity, name='Humidity',\n line=dict(color='royalblue', width=2, dash='dot'))\n)\nfig.add_trace(\n go.Scatter(x=date_time, y=rainfall, name='Rainfall',\n line=dict(color='firebrick', width=2, dash='dot'))\n)\n\nfig.update_layout(\n updatemenus=[\n dict(\n active=0,\n buttons=list([\n dict(label='Weather Data',\n method='update',\n args=[{'visible': [True, True, True, True]},\n {'title': 'Weather Data'}]),\n dict(label='Temperature',\n method='update',\n args=[{'visible': [True, False, False, False]},\n {'title': 'Temperature'}]),\n dict(label='Dew Point',\n method='update',\n args=[{'visible': [False, True, False, False]},\n {'title': 'Dew Point'}]),\n dict(label='Humidity',\n method='update',\n args=[{'visible': [False, False, True, False]},\n {'title': 'Humidity'}]),\n dict(label='Rainfall',\n method='update',\n args=[{'visible': [False, False, False, True]},\n {'title': 'Rainfall'}]),\n dict(label='Temperature and Dew Point',\n method='update',\n args=[{'visible': [True, True, False, False]},\n {'title': 'Rainfall'}]),\n dict(label='Humidity and Rainfall',\n method='update',\n args=[{'visible': [False, False, True, True]},\n {'title': 'Humidity and Rainfall'}]),\n ])\n )\n ]\n)\n\nfig.show()\nplotly.offline.plot(fig, auto_open=False, show_link=False)\n\n# ---------------------------------------------\n\n# data = [go.Scatter(x=date_time, y=temperature, name='Temperature',\n# line=dict(color='royalblue', width=2)),\n# go.Scatter(x=date_time, y=dew_point, name='Dew Point',\n# line=dict(color='firebrick', width=2)),\n# go.Scatter(x=date_time, y=humidity, name='Humidity',\n# line=dict(color='royalblue', width=2, dash='dot')),\n# go.Scatter(x=date_time, y=rainfall, name='Rainfall',\n# line=dict(color='firebrick', width=2, dash='dot')),\n# ]\n#\n# update_menus = list(\n# [dict(active=-1,\n# buttons=list([\n# dict(label='Weather Data',\n# method='update',\n# args=[{'visible': [True, True, True, True]},\n# {'title': 'Weather Data'}]),\n# dict(label='Temperature',\n# method='update',\n# args=[{'visible': [True, False, False, False]},\n# {'title': 'Temperature'}]),\n# dict(label='Dew Point',\n# method='update',\n# args=[{'visible': [False, True, False, False]},\n# {'title': 'Dew Point'}]),\n# dict(label='Humidity',\n# method='update',\n# args=[{'visible': [False, False, True, False]},\n# {'title': 'Humidity'}]),\n# dict(label='Rainfall',\n# method='update',\n# args=[{'visible': [False, False, False, True]},\n# {'title': 'Rainfall'}]),\n# ]),\n# )\n# ]\n# )\n#\n# layout = dict(title='Weather Data', showlegend=True,\n# updatemenus=update_menus)\n#\n# fig = dict(data=data, layout=layout)\n#\n# plotly.offline.plot(fig, auto_open=True, show_link=False)\n\n# ---------------------------------------------\n\n# fig.add_trace(go.Scatter(x=date_time, y=temperature, name='Temperature',\n# line=dict(color='royalblue', width=2)))\n# fig.update_layout(title='Recorded Weather Data',\n# xaxis_title='[Month] [Day], [Year]-[Hour]:[Minute]',\n# yaxis_title='Temperature (F)')\n# fig.show()\n", "id": "4020117", "language": "Python", "matching_score": 2.826820135116577, "max_stars_count": 0, "path": "graph.py" }, { "content": "#!/usr/bin/env python3\n\n# Weather Scrapper program from wunderground\n# @sonbyj01\n\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nimport sqlalchemy\nimport requests\nimport pandas as pd\nimport pymongo\n\n# URL of specific location where data will be pulled from\nURL = \"https://www.wunderground.com/weather/us/ny/manhasset/11030\"\n\n# \"database_dialect://user:password@host/database\"\nMONGODB_URL = 'mongodb://{{IP ADDRESS || HOST NAME}}:{{PORT}}/'\nMONGODB_DATABASE = 'weather_scrapping'\nMONGODB_COLLECTION = 'data'\nPOSTGRES_URL = 'postgresql://{{USERNAME}}:{{PASSWORD}}!@{{IP ADDRESS || HOST NAME}}:{{PORT}/{{DATABASE}}'\nPOSTGRES_TABLE = 'data'\n\n# specify storing method based on individual case\nSTORING_METHOD = {\n 'Pickle': False,\n 'MongoDB': True,\n 'Postgres': False\n}\n\n\n# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_dict.html\n# https://www.datasciencelearner.com/insert-pandas-dataframe-into-mongodb/\ndef _to_mongodb(df):\n client = pymongo.MongoClient(MONGODB_URL)\n db = client[MONGODB_DATABASE]\n collection = db[MONGODB_COLLECTION]\n\n df_dict = df.to_dict('records')\n collection.insert_one(df_dict[0])\n\n\n# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_sql.html\n# http://www.jan-langfellner.de/storing-a-pandas-dataframe-in-a-postgresql-database/\ndef _to_postgres(df):\n engine = sqlalchemy.create_engine(POSTGRES_URL)\n con = engine.connect()\n df.to_sql(POSTGRES_TABLE, con, if_exists='append')\n con.close()\n return\n\n\ndef _to_pickle_file(df):\n # first checks if there's existing pickle file\n try:\n old_data = pd.read_pickle('./weather_data.pickle')\n except FileNotFoundError as fnf:\n old_data = pd.DataFrame()\n\n # sees if there's previous data and append, otherwise move on\n if old_data.empty:\n old_data = df\n else:\n old_data = old_data.append(df, ignore_index=True)\n\n # stores into pickle file\n old_data.to_pickle('./weather_data.pickle')\n return\n\n\ndef store_information(df):\n if STORING_METHOD.get('Pickle'):\n _to_pickle_file(df)\n if STORING_METHOD.get('MongoDB'):\n _to_mongodb(df)\n if STORING_METHOD.get('Postgres'):\n _to_postgres(df)\n return\n\n\ndef gather_information():\n # requests for the web page\n response = requests.get(URL)\n soup = BeautifulSoup(response.text, 'html.parser')\n data = {}\n\n # gets current date and time\n now = datetime.now()\n data['Day'] = [now.strftime('%d')]\n data['Month'] = [now.strftime('%m')]\n data['Year'] = [now.strftime('%Y')]\n data['Hour'] = [now.strftime('%H')]\n data['Minute'] = [now.strftime('%M')]\n\n # retrieve temperature\n temperature_results = soup.find('span', class_=\"wu-value wu-value-to\")\n temperature = str(temperature_results.contents[0])\n data['Temperature'] = [temperature]\n # print('{}: {}'.format('Temperature', temp))\n\n # retrieve additional information\n additional_information = {'Pressure': 'test-false wu-unit wu-unit-pressure ng-star-inserted',\n 'Visibility': 'test-false wu-unit wu-unit-distance ng-star-inserted',\n 'Dew Point': 'test-false wu-unit wu-unit-temperature ng-star-inserted',\n 'Humidity': 'test-false wu-unit wu-unit-humidity ng-star-inserted',\n 'Rainfall': 'test-false wu-unit wu-unit-rain ng-star-inserted',\n 'Snow Depth': 'test-false wu-unit wu-unit-snow ng-star-inserted'}\n for info in additional_information.keys():\n temp_results = soup.find('span', class_=additional_information[info])\n temp = str(temp_results.contents[3].contents[0])\n data[info] = [temp]\n\n # converts data into data frame\n store_information(pd.DataFrame(data))\n return\n\n\ndef main():\n gather_information()\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "3782122", "language": "Python", "matching_score": 2.442309617996216, "max_stars_count": 0, "path": "scrapper.py" }, { "content": "#!/usr/bin/env python3\n\nimport smtplib\nimport ssl\nimport random\nimport pickle\nimport time\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\n\nclass RandAndSend:\n def __init__(self, addresses_text=None):\n # loads existing pickle file that contains updated list of addresses to choose from\n # otherwise create a new list of addresses from text file specified\n try:\n with open('data.pickle', 'rb') as f:\n self.data = pickle.load(f)\n except FileNotFoundError as fnf:\n print(fnf)\n self.data = []\n with open(addresses_text, 'r') as f:\n content = f.readlines()\n self.data = [x.strip() for x in content]\n\n def add_new_emails(self, new_emails):\n with open(new_emails, 'r') as f:\n content = f.readlines()\n self.data.extend([x.strip() for x in content])\n self._dump_pickle()\n\n def _dump_pickle(self):\n with open('data.pickle', 'wb') as f:\n pickle.dump(self.data, f)\n\n def run(self):\n self._send(self._select())\n self._dump_pickle()\n\n def _select(self):\n try:\n random_number = random.randint(0, len(self.data) - 1)\n return self.data.pop(random_number)\n except (IndexError, ValueError) as err:\n print(err)\n exit()\n\n def _send(self, to_email):\n try:\n with open('.credentials', 'r') as f:\n self.from_email = f.readline()\n self.from_password = f.readline()\n except FileNotFoundError as fnf:\n print(fnf)\n exit()\n\n self.from_port = 465 # SSL\n\n # set up email packet\n self.message = MIMEMultipart('alternative')\n self.message['Subject'] = 'Daily Prayer Request'\n self.message['From'] = 'Prayer Request'\n self.message['To'] = to_email\n\n # set up message\n self.text = \"\"\"\\\n Hey you, today's your day!\n Fill the Google Form for tonight's prayer: \n https://forms.gle/4VzjZjRuxV1CTN1V9\n \"\"\"\n\n self.part = MIMEText(self.text, 'plain')\n self.message.attach(self.part)\n\n # create a secure SSL context\n self.context = ssl.create_default_context()\n try:\n with smtplib.SMTP_SSL('smtp.gmail.com', self.from_port, context=self.context) as server:\n server.login(self.from_email, self.from_password)\n server.sendmail(self.from_email, to_email, self.message.as_string())\n except:\n time.sleep(5)\n self._send(to_email)\n\n\ndef main():\n RandAndSend('emails.txt').run()\n\n\nif __name__ == '__main__':\n main()\n", "id": "10129621", "language": "Python", "matching_score": 0.12516231834888458, "max_stars_count": 0, "path": "rand_and_send.py" }, { "content": "from pathlib import Path\n\nimport sys\nclass Tree:\n def __init__(self, directory, save_location):\n assert isinstance(directory, Path), 'Not a Path object'\n assert isinstance(save_location, Path), 'Not a Path object'\n\n self.directory = directory\n self.directory_name = self.directory.parts[-1]\n self.save_location = save_location\n\n ## Changeable ##\n self.text_name = 'tree.txt'\n self.spaces = 2\n self.file_format = '- '\n self.folder_format = '+ '\n ################\n\n self.generate_tree()\n\n def generate_tree(self):\n with open(str(Path.joinpath(self.save_location, self.text_name)), 'w', encoding='UTF-8') as f:\n f.write('Tree Diagram for \\'{}\\' path.\\n\\n'.format(self.directory))\n f.write('{}{}{}'.format(self.folder_format, self.directory_name, '\\n'))\n\n f.write(self.recursive_generate_tree(self.directory))\n\n def recursive_generate_tree(self, folder_path):\n tree = ''\n for path in Path(folder_path).glob('*'):\n tree += self.generate_line(path)\n if path.is_dir():\n tree += self.recursive_generate_tree(path)\n return tree\n \n def generate_line(self, path):\n path_relative = path.relative_to(self.directory)\n number = len(path_relative.parts)\n if path.is_dir():\n return (len(self.folder_format) + self.spaces * number) * ' ' + self.folder_format + str(path_relative.parts[-1]) + '\\n'\n else:\n return (len(self.folder_format) + self.spaces * number) * ' ' + self.file_format + str(path_relative.parts[-1]) + '\\n'\n", "id": "3700483", "language": "Python", "matching_score": 2.208242177963257, "max_stars_count": 0, "path": "tree.py" }, { "content": "#!/usr/bin/env\nfrom pathlib import Path\nimport sys\n\nfrom tree import Tree\n\ndef main():\n directory = input('Enter absolute path of directory: ')\n save_location = input('Enter absolute path of where you want to save text file: ')\n\n if Path(directory).is_absolute():\n directory_path = Path(directory)\n # else:\n # directory_path = Path.joinpath(Path.cwd(), Path(directory)).resolve()\n\n if Path(save_location).is_absolute():\n save_location_path = Path(save_location)\n # else:\n # save_location_path = Path.joinpath(Path.cwd(), Path(save_location)).resolve()\n \n # print(directory_path)\n # print(save_location_path)\n # sys.exit()\n Tree(directory_path, save_location_path)\n\nif __name__ == \"__main__\":\n main()\n", "id": "6496909", "language": "Python", "matching_score": 1.1028722524642944, "max_stars_count": 0, "path": "terminal.py" }, { "content": "#!/usr/bin/env python3\n\n##############\n# References #\n##############\n# https://pymotw.com/2/zipfile/\n\nfrom pathlib import Path\nimport hashlib\nimport zipfile\nimport zlib\nimport sys\n\n\nclass SourceFiles:\n def __init__(self, source):\n if Path(source).is_absolute():\n self.source_path = Path(source)\n else:\n current_directory = Path.cwd()\n self.source_path = Path.joinpath(current_directory, source)\n\n self.records_text_name = 'records.txt'\n self.records_text_path = Path.joinpath(self.source_path, self.records_text_name)\n self.zip = ''\n\n # self.folder_name = self.source_path.relative_to(self.source_path.parents[0])\n\n # try:\n # with open(self.records_text_path, 'r', encoding='utf-8') as f:\n # self.hash = f.readline()\n # except FileNotFoundError as fnf:\n # pass\n\n self.records_file = []\n self.records_folder = []\n self.records = {}\n\n # keeps track of each path and whether it's a file/folder\n for path in self.source_path.glob('**/*'):\n if path.is_file():\n if path != self.records_text_path:\n self.records[path] = 0\n self.records_file.append(path)\n\n elif path.is_dir():\n self.records[path] = 1\n self.records_folder.append(path)\n\n # self._generate_hash()\n self.generate_text_file()\n\n def _generate_hash(self):\n self.hash = hashlib.md5()\n for record in self.records.keys():\n if record != self.records_text_path:\n self.hash.update(str(record).encode('utf-8'))\n\n def get_record(self):\n return self.records\n\n def generate_text_file(self, path=None):\n if path is None:\n absolute_path = self.source_path\n elif Path(path).is_absolute():\n absolute_path = Path(path)\n else:\n absolute_path = Path.joinpath(self.source_path, path)\n\n file = Path.joinpath(absolute_path, self.records_text_name)\n\n with open(file, 'w', encoding='utf-8') as f:\n # f.write('{}{}'.format(self.hash.hexdigest(), '\\n\\n'))\n f.write('Directories\\n')\n for folder in self.records_folder:\n f.write('{}{}'.format(str(folder), '\\n'))\n f.write('\\nFiles\\n')\n for file in self.records_file:\n f.write('{}{}'.format(str(file), '\\n'))\n\n def create_zip_file(self):\n self.zip = zipfile.ZipFile('{}{}'.format(self.source_path, '.zip'), mode='w')\n\n try:\n compression = zipfile.ZIP_DEFLATED\n except:\n print('something\\'s wrong')\n\n modes = {\n zipfile.ZIP_DEFLATED: 'deflated',\n zipfile.ZIP_STORED: 'stored'\n }\n\n for path in self.records_file:\n new_path = path.relative_to(self.source_path.parents[0])\n\n try:\n self.zip.write(path, compress_type=compression, arcname=new_path)\n except:\n print('something\\'s wrong')\n\n self.zip.close()\n", "id": "206539", "language": "Python", "matching_score": 2.679824113845825, "max_stars_count": 0, "path": "src/SourceFiles.py" }, { "content": "#!/usr/bin/env python3\nfrom shutil import copy2\nfrom pathlib import Path\nimport sys\n\nfrom .SourceFiles import SourceFiles\n\n\nclass SimpleCopy:\n def __init__(self, source):\n assert isinstance(source, SourceFiles), 'Not a SourceFiles object.'\n self.source_object = source\n self.source_parent = self.source_object.source_path.parents[0]\n\n def simple_copy2(self, target):\n try:\n target_folder = Path(target)\n except TypeError as fnf:\n sys.exit()\n\n if not target_folder.exists():\n target_folder.mkdir(parents=True)\n\n for source_folder_path in self.source_object.records_folder:\n source_folder_path = Path(source_folder_path)\n source_folder_relative_path = source_folder_path.relative_to(self.source_parent)\n\n target_absolute = target_folder.joinpath(source_folder_relative_path)\n\n if not target_absolute.exists():\n target_absolute.mkdir(parents=True)\n\n for source_file_path in self.source_object.records_file:\n source_file_path = Path(source_file_path)\n source_file_relative_path = source_file_path.relative_to(self.source_parent)\n\n target_absolute = target_folder.joinpath(source_file_relative_path)\n\n copy2(source_file_path, target_absolute)\n", "id": "10372368", "language": "Python", "matching_score": 2.077544927597046, "max_stars_count": 0, "path": "src/SimpleCopy.py" }, { "content": "#!/usr/bin/env python3\n\"\"\"\nReferences:\n- https://github.com/PySimpleGUI/PySimpleGUI/blob/master/DemoPrograms/Demo_Design_Pattern_Multiple_Windows3.py\n\"\"\"\n\nimport PySimpleGUI as sg\nimport time\n\nfrom src.SourceFiles import SourceFiles\nfrom src.SimpleCopy import SimpleCopy\n\nsg.theme('SystemDefault')\nblue = '#005CAE'\ntoday_date = time.strftime('%m-%d-%Y')\n\n\ndef generate_window2(directory):\n column_copy = [\n [sg.Text('I Fcked Up; Save Me', font=('Helvetica', 25), size=(40, 1), justification='center')],\n [sg.Text('* Backup Module Toolkit, @sonbyj01, MIT License, v0.1.')],\n [sg.Text('Date: {}'.format(today_date))],\n [sg.Text(\"Source Folder:\", font=('Helvetica', 15), size=(40, 1), justification='center')],\n [sg.Text('{}'.format(directory), justification='center')],\n [sg.Button('Simple Copy', button_color=('white', blue))],\n [sg.Button('Create Zip File', button_color=('white', blue))],\n [sg.Text('Status', justification='left')],\n [sg.Listbox(values=[], size=(80, 20), key='_STATUS_')]\n ]\n\n return [\n [sg.Column(column_copy, element_justification='center')],\n [sg.Button('Exit', button_color=('white', blue))]\n ]\n\n\nclass GUI:\n column_source = [\n [sg.Text('I Fcked Up; Save Me', font=('Helvetica', 25), size=(20, 1), justification='center')],\n [sg.Text('* Backup Module Toolkit, @sonbyj01, MIT License, v0.1.')],\n [sg.Text('Date: {}'.format(today_date))],\n [sg.Text('Source Folder', font=('Helvetica', 15), size=(20, 1), justification='center')],\n [sg.Button('Choose', button_color=('white', blue))]\n ]\n\n layout_source = [\n [\n sg.Column(column_source, element_justification='center')\n ],\n [\n sg.Button('Exit', button_color=('white', blue))\n ]\n ]\n\n def __init__(self):\n self.event_log = list()\n self.window = sg.Window('Backup Module GUI Source Selection', self.layout_source, element_padding=(5, 5))\n self.window2_active = False\n\n while True:\n if not self.window2_active:\n event1, values1 = self.window.read()\n if event1 is None or event1 == 'Exit':\n break\n\n elif event1 == 'Choose':\n self.window2_active = True\n directory = sg.popup_get_folder('Source Folder: ',\n title='Source',\n button_color=('white', blue),\n keep_on_top=True, )\n\n if directory != '':\n if directory is not None:\n self.window.hide()\n layout_copy = generate_window2(directory)\n window2 = sg.Window('Backup Module GUI Copy',\n layout_copy,\n # resizable=True,\n finalize=True)\n source_files = SourceFiles(directory)\n self._update_status(window2.Element('_STATUS_'),\n custom_mes=True,\n message='Source File Object Initialized')\n\n if self.window2_active:\n event2, values2 = window2.read()\n\n if event2 in (sg.WIN_CLOSED, 'Exit'):\n self.window2_active = False\n window2.close()\n self.window.un_hide()\n\n elif event2 == 'Simple Copy':\n directory = sg.popup_get_folder('Target Folder: ',\n title='Target',\n button_color=('white', blue),\n keep_on_top=True, )\n self._update_status(window2.Element('_STATUS_'),\n start=True,\n command='Simple Copy')\n SimpleCopy(source_files).simple_copy2(directory)\n self._update_status(window2.Element('_STATUS_'),\n start=False,\n command='Simple Copy')\n\n elif event2 == 'Create Zip File':\n self._update_status(window2.Element('_STATUS_'),\n start=True,\n command='Create Zip File')\n source_files.create_zip_file()\n self._update_status(window2.Element('_STATUS_'),\n start=False,\n command='Create Zip File')\n\n self.window.close()\n\n def _update_status(self, status_window, start=False, command='', custom_mes=False, message=''):\n now = time.strftime('%Y-%m-%d %H:%M:%S')\n\n if custom_mes:\n self.event_log.append('{}: {}'.format(now, message))\n elif start:\n self.event_log.append('{}: {} started'.format(now, command))\n elif not start:\n self.event_log.append('{}: {} finished'.format(now, command))\n self.event_log.append('')\n\n status_window.Update(values=self.event_log)\n\n\n\ndef main():\n GUI()\n\n\nif __name__ == '__main__':\n main()\n", "id": "6202517", "language": "Python", "matching_score": 3.574878692626953, "max_stars_count": 0, "path": "GUI.py" }, { "content": "#!/bin/bash\nfrom src.SimpleCopy import SimpleCopy\nfrom src.SourceFiles import SourceFiles\n\nsrc_path = r'D:\\_Games\\Gameboy'\n# source = input(\"Enter source drive/directories with full path: \")\ntarget = r'D:\\_Games\\Gameboy_Backup'\n# target = input(\"Enter destination drive/directories with full path: \")'\n\nx = SourceFiles(src_path)\ny = SimpleCopy(x)\ny.simple_copy2(target)\n", "id": "12383018", "language": "Python", "matching_score": 2.3159658908843994, "max_stars_count": 0, "path": "backup.py" } ]
2.262104
clembu
[ { "content": "# Mustard Menu Creator addon\r\n# https://github.com/Mustard2/MenuCreator\r\n\r\nbl_info = {\r\n \"name\": \"Menu Creator\",\r\n \"description\": \"Create a custom menu for each Object. To add properties or collections, just right click on the properties and hit Add property to the Menu\",\r\n \"author\": \"Mustard\",\r\n \"version\": (0, 0, 3),\r\n \"blender\": (2, 91, 0),\r\n \"warning\": \"\",\r\n \"wiki_url\": \"https://github.com/Mustard2/MenuCreator\",\r\n \"category\": \"User Interface\",\r\n}\r\n\r\nimport bpy\r\nimport addon_utils\r\nimport sys\r\nimport os\r\nimport re\r\nimport time\r\nimport math\r\nfrom bpy.types import Header, Menu, Panel\r\nfrom bpy.props import *\r\nfrom bpy.app.handlers import persistent\r\nfrom mathutils import Vector, Color\r\nimport webbrowser\r\n\r\n# CLASSES\r\n\r\n# Arrays for ENUM properties\r\n# Array to store different section type\r\nmc_section_type_list = [\r\n (\"DEFAULT\",\"Standard\",\"A simple collection of properties that can be added right clicking on fields -> Add Property to the Menu\"),\r\n (\"COLLECTION\",\"Collection List\",\"Right clicking on them in the Outliner, you can add collections whose elements can be shown/hidden in the Menu. Only one collection will be shown at the same time.\\nIdeal for: Outfit lists\",\"OUTLINER_COLLECTION\",1)\r\n ]\r\n# Array to store possible icons to be used by properties and sections\r\nmc_icon_list = [\r\n (\"NONE\",\"No Icon\",\"No Icon\"),\r\n (\"USER\", \"Face\", \"Face\",\"USER\",1),\r\n (\"HAIR\", \"Hair\", \"Hair\",\"HAIR\",2),\r\n (\"MOD_CLOTH\", \"Cloth\", \"Cloth\",\"MOD_CLOTH\",3),\r\n (\"MATERIAL\", \"Material\", \"Material\",\"MATERIAL\",4),\r\n (\"ARMATURE_DATA\", \"Armature\", \"Armature\",\"ARMATURE_DATA\",5),\r\n (\"MOD_ARMATURE\", \"Armature\", \"Armature\",\"MOD_ARMATURE\",6),\r\n (\"EXPERIMENTAL\", \"Experimental\", \"Experimental\",\"EXPERIMENTAL\",7),\r\n (\"WORLD\", \"World\", \"World\",\"WORLD\",8),\r\n (\"PARTICLEMODE\", \"Comb\", \"Comb\",\"PARTICLEMODE\",9)\r\n ]\r\n\r\n# Class with all the settings variables\r\nclass MC_Settings(bpy.types.PropertyGroup):\r\n \r\n # Update functions for settings\r\n # Function to avoid edit mode and fixed object while exiting edit mode\r\n def mc_ms_editmode_update(self, context):\r\n \r\n if not self.ms_editmode:\r\n for obj in bpy.data.objects:\r\n obj.mc_edit_enable = False\r\n \r\n return\r\n \r\n # Function to save the fixed object pointer to be used until the object is released\r\n def mc_em_fixobj_update(self, context):\r\n \r\n if self.em_fixobj:\r\n self.em_fixobj_pointer = context.active_object\r\n \r\n return\r\n \r\n # Main Settings definitions\r\n ms_editmode: bpy.props.BoolProperty(name=\"Enable Edit Mode Tools\",\r\n description=\"Unlock tools to customize the menu.\\nDisable when the Menu is complete\",\r\n default=False,\r\n update = mc_ms_editmode_update)\r\n ms_advanced: bpy.props.BoolProperty(name=\"Advanced Options\",\r\n description=\"Unlock advanced options\",\r\n default=False)\r\n ms_debug: bpy.props.BoolProperty(name=\"Debug mode\",\r\n description=\"Unlock debug mode.\\nMore messaged will be generated in the console.\\nEnable it only if you encounter problems, as it might degrade general Blender performance\",\r\n default=False)\r\n \r\n # Menu Specific properties\r\n mss_name: bpy.props.StringProperty(name=\"Name\",\r\n description=\"Name of the menu.\\nChoose the name of the menu to be shown before the properties\",\r\n default=\"Object: \")\r\n mss_obj_name: bpy.props.BoolProperty(name=\"Show the Object Name\",\r\n description=\"Show the Object name after the Name.\\nFor instance, if the Name is \\\"Object: \\\", the shown name will be \\\"Object: name_of_object\\\"\",\r\n default=True)\r\n \r\n # Edit mode properties\r\n em_fixobj: bpy.props.BoolProperty(name=\"Pin Object\",\r\n description=\"Pin the Object you are using to edit the menu.\\nThe object you pin will be considered as the target of all properties addition, and only this Object menu will be shown\",\r\n default=False,\r\n update = mc_em_fixobj_update)\r\n em_fixobj_pointer : bpy.props.PointerProperty(type=bpy.types.Object)\r\n\r\nbpy.utils.register_class(MC_Settings)\r\nbpy.types.Scene.mc_settings = bpy.props.PointerProperty(type=MC_Settings)\r\n\r\n# Object specific properties\r\nbpy.types.Object.mc_enable = bpy.props.BoolProperty(name=\"\", default=False)\r\nbpy.types.Object.mc_edit_enable = bpy.props.BoolProperty(name=\"Edit Mode\", default=False, description=\"Enable edit mode in this menu.\\nActivating this option you will have access to various tools to modify properties and sections\")\r\n\r\n# Class to store collections for section informations\r\nclass MCCollectionItem(bpy.types.PropertyGroup):\r\n collection : bpy.props.PointerProperty(name=\"Collection\",type=bpy.types.Collection)\r\n\r\nbpy.utils.register_class(MCCollectionItem)\r\n\r\n# Class to store section informations\r\nclass MCSectionItem(bpy.types.PropertyGroup):\r\n \r\n # Properties and update functions\r\n # Function to update the collapsed status if the collapsed section property is changed\r\n def mc_sections_collapsed_update(self, context):\r\n \r\n if not self.collapsable:\r\n self.collapsed = False\r\n \r\n return\r\n \r\n # Function to create an array of tuples for enum collections\r\n def mc_collections_list(self, context):\r\n \r\n items = []\r\n \r\n for el in self.collections:\r\n if hasattr(el.collection, 'name'):\r\n items.append( (el.collection.name,el.collection.name,el.collection.name) )\r\n \r\n return sorted(items)\r\n\r\n # Function to update global collection properties\r\n def mc_collections_list_update(self, context):\r\n \r\n for collection in self.collections:\r\n if collection.collection.name == self.collections_list:\r\n collection.collection.hide_viewport = False\r\n collection.collection.hide_render = False\r\n else:\r\n collection.collection.hide_viewport = True\r\n collection.collection.hide_render = True\r\n\r\n def mc_collections_global_options_update(self, context):\r\n \r\n items = []\r\n \r\n i = 0\r\n for el in self.collections:\r\n for obj in el.collection.objects:\r\n \r\n if obj.type == \"MESH\":\r\n obj.data.use_auto_smooth = self.collections_global_normalautosmooth\r\n \r\n for modifier in obj.modifiers:\r\n if modifier.type == \"CORRECTIVE_SMOOTH\":\r\n modifier.show_viewport = self.collections_global_smoothcorrection\r\n modifier.show_render = self.collections_global_smoothcorrection\r\n elif modifier.type == \"MASK\":\r\n modifier.show_viewport = self.collections_global_mask\r\n modifier.show_render = self.collections_global_mask\r\n elif modifier.type == \"SHRINKWRAP\":\r\n modifier.show_viewport = self.collections_global_shrinkwrap\r\n modifier.show_render = self.collections_global_shrinkwrap\r\n \r\n if self.outfit_enable:\r\n for modifier in self.outfit_body.modifiers:\r\n if modifier.type == \"MASK\":\r\n if not self.collections_global_mask:\r\n modifier.show_viewport = False\r\n modifier.show_render = False\r\n else:\r\n for el in self.collections:\r\n for obj in el.collection.objects:\r\n if obj.name in modifier.name and not obj.hide_viewport:\r\n modifier.show_viewport = True\r\n modifier.show_render = True\r\n \r\n return\r\n \r\n # Poll function for the selection of mesh only in pointer properties\r\n def mc_poll_mesh(self, object):\r\n return object.type == 'MESH'\r\n \r\n \r\n # Global section options\r\n id : bpy.props.IntProperty(name=\"Section ID\")\r\n name : bpy.props.StringProperty(name=\"Section Name\")\r\n icon : bpy.props.StringProperty(name=\"Section Icon\", default=\"\")\r\n type : bpy.props.StringProperty(name=\"Section Type\", default=\"DEFAULT\")\r\n collapsable : bpy.props.BoolProperty(name=\"Section Collapsable\", default=False, update=mc_sections_collapsed_update)\r\n \r\n # Global section option enforcer\r\n collapsed : bpy.props.BoolProperty(name=\"\", default = False, description=\"\")\r\n \r\n # COLLECTION type options\r\n collections_enable_global_smoothcorrection: bpy.props.BoolProperty(default=False)\r\n collections_enable_global_shrinkwrap: bpy.props.BoolProperty(default=False)\r\n collections_enable_global_mask: bpy.props.BoolProperty(default=False)\r\n collections_enable_global_normalautosmooth: bpy.props.BoolProperty(default=False)\r\n # COLLECTION type data\r\n collections: bpy.props.CollectionProperty(name=\"Section Collection List\", type=MCCollectionItem)\r\n collections_list: bpy.props.EnumProperty(name=\"Section Collection List\", items = mc_collections_list, update=mc_collections_list_update)\r\n collections_global_smoothcorrection: bpy.props.BoolProperty(name=\"Smooth Correction\", default=True, update=mc_collections_global_options_update)\r\n collections_global_shrinkwrap: bpy.props.BoolProperty(name=\"Shrinkwrap\", default=True, update=mc_collections_global_options_update)\r\n collections_global_mask: bpy.props.BoolProperty(name=\"Mask\", default=True, update=mc_collections_global_options_update)\r\n collections_global_normalautosmooth: bpy.props.BoolProperty(name=\"Normals Auto Smooth\", default=True, update=mc_collections_global_options_update)\r\n # Outfit variant\r\n outfit_enable : bpy.props.BoolProperty(name=\"Outfit\", default=False)\r\n outfit_body : bpy.props.PointerProperty(name=\"Outfit Body\", description = \"The masks of this object will be switched on/off depending on which elements of the collections visibility\", type=bpy.types.Object, poll=mc_poll_mesh)\r\n\r\nbpy.utils.register_class(MCSectionItem)\r\nbpy.types.Object.mc_sections = bpy.props.CollectionProperty(type=MCSectionItem)\r\n\r\n# Class to store linked properties informations\r\nclass MCLinkedPropertyItem(bpy.types.PropertyGroup):\r\n path: bpy.props.StringProperty(name=\"Property Path\")\r\n id : bpy.props.StringProperty(name=\"Property Identifier\")\r\n\r\nbpy.utils.register_class(MCLinkedPropertyItem)\r\n\r\n# Class to store properties informations\r\nclass MCPropertyItem(bpy.types.PropertyGroup):\r\n mc_id : bpy.props.IntProperty(name=\"Section ID\")\r\n name : bpy.props.StringProperty(name=\"Property Name\")\r\n path: bpy.props.StringProperty(name=\"Property Path\")\r\n id : bpy.props.StringProperty(name=\"Property Identifier\")\r\n icon : bpy.props.EnumProperty(name=\"Property Icon\", default=\"NONE\",items=mc_icon_list)\r\n section : bpy.props.StringProperty(name=\"Section\", default=\"Unsorted\")\r\n hide : bpy.props.BoolProperty(name=\"Hide Property\", default=False)\r\n \r\n linked_props: bpy.props.CollectionProperty(name=\"Linked properties\", type=MCLinkedPropertyItem)\r\n\r\nbpy.utils.register_class(MCPropertyItem)\r\nbpy.types.Object.mc_properties = bpy.props.CollectionProperty(type=MCPropertyItem)\r\n\r\n\r\n\r\n# COLLECTION MANAGEMENT FUNCTIONS\r\n\r\n# ---- Properties only functions\r\n\r\n# Function to remove a specific property from the collection\r\n# Return 1 if the property was found and deleted\r\ndef mc_remove_property_item(collection, item):\r\n i=-1\r\n for el in collection:\r\n i=i+1\r\n if el.path == item[1] and el.id == item[2]:\r\n break\r\n if i>=0:\r\n collection.remove(i)\r\n \r\n return i>=0\r\n\r\n# Function to add a specific property to the collection, if not already there\r\n# Return 0 if the property has not been added because already in the properties list\r\ndef mc_add_property_item(collection, item):\r\n i=True\r\n for el in collection:\r\n if el.path == item[1] and el.id == item[2]:\r\n i=False\r\n break\r\n if i:\r\n add_item = collection.add()\r\n add_item.name = item[0]\r\n add_item.path = item[1]\r\n add_item.id = item[2]\r\n add_item.mc_id = mc_len_collection(collection)\r\n \r\n return i\r\n\r\n# Function to find the index of a property\r\ndef mc_find_index(collection, item):\r\n i=-1\r\n for el in collection:\r\n i=i+1\r\n if el.path == item[1] and el.id == item[2]:\r\n break\r\n return i\r\n\r\n# Function to clean properties of a single object\r\ndef mc_clean_single_properties(obj):\r\n obj.mc_properties.clear()\r\n\r\n# Function to clean all the properties of every object\r\ndef mc_clean_properties():\r\n for obj in bpy.data.objects:\r\n obj.mc_properties.clear()\r\n\r\n# Function to print the properties\r\ndef mc_print_properties():\r\n for obj in bpy.data.objects:\r\n for el in obj.mc_properties:\r\n print(el.id + \" : property\" + el.name + \" with path \"+el.path)\r\n\r\n# Function to iutput the ID of the element\r\ndef mc_prop_ID(elem):\r\n return elem.mc_id\r\n\r\n# ---- Sections only functions\r\n\r\n# Function to create an array of tuples for enum properties\r\ndef mc_section_list(scene, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n \r\n items = []\r\n \r\n i = 0\r\n for el in obj.mc_sections:\r\n if el.type == \"DEFAULT\":\r\n items.append( (el.name,el.name,el.name,el.icon,i) )\r\n i = i + 1\r\n \r\n return items\r\n\r\n# Function to clean sections of a single object\r\ndef mc_clean_single_sections(obj):\r\n obj.mc_sections.clear()\r\n \r\n# Function to clean the sections of every object\r\ndef mc_clean_sections():\r\n for obj in bpy.data.objects:\r\n obj.mc_sections.clear()\r\n\r\n# Function to find the index of a section from the name\r\ndef mc_find_index_section(collection, item):\r\n i=-1\r\n for el in collection:\r\n i=i+1\r\n if el.name == item:\r\n break\r\n return i\r\n\r\n# Function to find the index of a section from the ID\r\ndef mc_find_index_section_fromID(collection, item):\r\n i=-1\r\n for el in collection:\r\n i=i+1\r\n if el.id == item:\r\n break\r\n return i\r\n\r\n# Function to iutput the ID of the element\r\ndef mc_sec_ID(elem):\r\n return elem.id\r\n\r\n# ---- Sections and properties functions\r\n\r\n# Function to find the length of a collection\r\ndef mc_len_collection(collection):\r\n i=0\r\n for el in collection:\r\n i=i+1\r\n return i\r\n\r\n\r\n\r\n\r\n# OPERATORS\r\n\r\n# Right click functions and operators\r\ndef dump(obj, text):\r\n print('-'*40, text, '-'*40)\r\n for attr in dir(obj):\r\n if hasattr( obj, attr ):\r\n print( \"obj.%s = %s\" % (attr, getattr(obj, attr)))\r\n\r\n# Operator to add the right click button on properties\r\nclass MC_AddProperty(bpy.types.Operator):\r\n \"\"\"Add the property to the menu\"\"\"\r\n bl_idname = \"mc.add_property\"\r\n bl_label = \"Add property to Menu\"\r\n\r\n @classmethod\r\n def poll(cls, context):\r\n return context.active_object is not None\r\n\r\n def execute(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n \r\n #if hasattr(context, 'button_pointer'):\r\n # btn = context.button_pointer \r\n # dump(btn, 'button_pointer')\r\n\r\n if hasattr(context, 'button_prop'):\r\n prop = context.button_prop\r\n #dump(prop, 'button_prop')\r\n \r\n try:\r\n bpy.ops.ui.copy_data_path_button(full_path=True)\r\n except:\r\n self.report({'WARNING'}, 'Menu Creator - Invalid selection.')\r\n return {'FINISHED'}\r\n \r\n rna, path = context.window_manager.clipboard.rsplit('.', 1)\r\n if '][' in path:\r\n path, rem = path.rsplit('[', 1)\r\n rna = rna + '.' + path\r\n path = '[' + rem\r\n elif '[' in path:\r\n path, rem = path.rsplit('[', 1)\r\n \r\n if obj.mc_enable:\r\n \r\n if mc_add_property_item(obj.mc_properties, [prop.name,rna,path]):\r\n self.report({'INFO'}, 'Menu Creator - Property added to the \\'' + obj.name + '\\' menu.')\r\n else:\r\n self.report({'WARNING'}, 'Menu Creator - Property of \\'' + obj.name + '\\' was already added.')\r\n \r\n else:\r\n self.report({'ERROR'}, 'Menu Creator - Can not add property \\'' + obj.name + '\\'. No menu has been initialized.')\r\n\r\n #if hasattr(context, 'button_operator'):\r\n # op = context.button_operator\r\n # dump(op, 'button_operator') \r\n\r\n return {'FINISHED'}\r\n\r\n# Operator to link a property to another one\r\nclass MC_LinkProperty(bpy.types.Operator):\r\n \"\"\"Link the selected property to this one\"\"\"\r\n bl_idname = \"mc.link_property\"\r\n bl_label = \"Link Property\"\r\n \r\n prop_id: bpy.props.StringProperty()\r\n prop_path: bpy.props.StringProperty()\r\n\r\n @classmethod\r\n def poll(cls, context):\r\n return context.active_object is not None\r\n\r\n def execute(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n\r\n if hasattr(context, 'button_prop'):\r\n prop = context.button_prop\r\n #dump(prop, 'button_prop')\r\n \r\n try:\r\n bpy.ops.ui.copy_data_path_button(full_path=True)\r\n except:\r\n self.report({'WARNING'}, 'Menu Creator - Invalid selection.')\r\n return {'FINISHED'}\r\n \r\n rna, path = context.window_manager.clipboard.rsplit('.', 1)\r\n if '][' in path:\r\n path, rem = path.rsplit('[', 1)\r\n rna = rna + '.' + path\r\n path = '[' + rem\r\n elif '[' in path:\r\n path, rem = path.rsplit('[', 1)\r\n \r\n if obj.mc_enable:\r\n \r\n i = mc_find_index(obj.mc_properties, ['',self.prop_path,self.prop_id])\r\n \r\n prop_type = type(eval(obj.mc_properties[i].path + '.' + obj.mc_properties[i].id))\r\n if '].[' in rna + '.' + path:\r\n link_type = type(eval(rna + path))\r\n else:\r\n link_type = type(eval(rna + '.' + path))\r\n \r\n if prop_type == link_type:\r\n \r\n already_added = False\r\n for el in obj.mc_properties[i].linked_props:\r\n if el.path == rna and el.id == path:\r\n already_added = True\r\n break \r\n if not already_added: \r\n add_item = obj.mc_properties[i].linked_props.add()\r\n add_item.id = path\r\n add_item.path = rna\r\n \r\n self.report({'INFO'}, 'Menu Creator - Property \\'' + path + '\\' linked to \\'' + obj.mc_properties[i].name + '\\'')\r\n else:\r\n self.report({'WARNING'}, 'Menu Creator - Property \\'' + path + '\\' already linked to \\'' + obj.mc_properties[i].name + '\\'')\r\n \r\n else:\r\n self.report({'ERROR'}, 'Menu Creator - Property \\'' + path + '\\' can not be linked to \\'' + obj.mc_properties[i].name + '\\'')\r\n if settings.ms_debug:\r\n print('MenuCreator - Property \\'' + path + '\\' can not be linked to \\'' + obj.mc_properties[i].name + '\\'')\r\n print(' Data types are ' + str(link_type) + ' and ' + str(prop_type) + '.')\r\n \r\n else:\r\n self.report({'ERROR'}, 'Menu Creator - Can not link property in \\'' + obj.name + '\\'. No menu has been initialized.') \r\n\r\n return {'FINISHED'}\r\n\r\n# Operator to add the collection to the selected section\r\nclass MC_AddCollection(bpy.types.Operator):\r\n \"\"\"Add the collection to the selected section\"\"\"\r\n bl_idname = \"mc.add_collection\"\r\n bl_label = \"Add collection to Menu\"\r\n \r\n section: bpy.props.StringProperty()\r\n\r\n @classmethod\r\n def poll(cls, context):\r\n return context.active_object is not None\r\n\r\n def execute(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n \r\n add_coll = bpy.context.collection\r\n \r\n sec_index = mc_find_index_section(obj.mc_sections, self.section)\r\n\r\n i=True\r\n for el in obj.mc_sections[sec_index].collections:\r\n if el.collection == add_coll:\r\n i=False\r\n break\r\n if i:\r\n add_item = obj.mc_sections[sec_index].collections.add()\r\n add_item.collection = add_coll\r\n self.report({'INFO'}, 'Menu Creator - Collection has been added to section \\''+self.section+'\\'.')\r\n else:\r\n self.report({'WARNING'}, 'Menu Creator - Collection was already added to section \\''+self.section+'\\'.')\r\n\r\n return {'FINISHED'}\r\n\r\nclass WM_MT_button_context(Menu):\r\n bl_label = \"Custom Action\"\r\n\r\n def draw(self, context):\r\n pass\r\n\r\ndef menu_func(self, context):\r\n \r\n if hasattr(context, 'button_prop'):\r\n layout = self.layout\r\n layout.separator()\r\n layout.operator(MC_AddProperty.bl_idname)\r\n \r\ndef menu_func_link(self, context):\r\n \r\n if hasattr(context, 'button_prop'):\r\n layout = self.layout\r\n #layout.label(text=\"Try\")\r\n self.layout.menu(OUTLINER_MT_link_mcmenu.bl_idname)\r\n\r\nclass OUTLINER_MT_collection(Menu):\r\n bl_label = \"Custom Action Collection\"\r\n\r\n def draw(self, context):\r\n pass\r\n\r\n# Operator to create the list of sections when right clicking on the property -> Link to property\r\nclass OUTLINER_MT_link_mcmenu(bpy.types.Menu):\r\n bl_idname = 'mc.menu_link'\r\n bl_label = 'Link to Property'\r\n\r\n def draw(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n \r\n layout = self.layout\r\n \r\n no_prop = True\r\n for prop in obj.mc_properties:\r\n op = layout.operator(MC_LinkProperty.bl_idname, text=prop.name, icon=prop.icon)\r\n op.prop_id = prop.id\r\n op.prop_path = prop.path\r\n no_prop = False\r\n \r\n if no_prop:\r\n layout.label(text=\"No properties found\")\r\n\r\n# Operator to create the list of sections when right clicking on the collection -> Add collection to Section\r\nclass OUTLINER_MT_collection_mcmenu(bpy.types.Menu):\r\n bl_idname = 'mc.menu_collection'\r\n bl_label = 'Add Collection to Section'\r\n\r\n def draw(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n \r\n layout = self.layout\r\n \r\n no_col_sec = True\r\n for sec in obj.mc_sections:\r\n if sec.type == \"COLLECTION\":\r\n layout.operator(MC_AddCollection.bl_idname, text=sec.name, icon=sec.icon).section = sec.name\r\n no_col_sec = False\r\n \r\n if no_col_sec:\r\n layout.label(text=\"No Collection List sections found\")\r\n\r\ndef mc_collection_menu(self, context):\r\n self.layout.separator()\r\n self.layout.menu(OUTLINER_MT_collection_mcmenu.bl_idname)\r\n\r\n# Operator to clean all properties and sections from all objects\r\nclass MC_CleanAll(bpy.types.Operator):\r\n \"\"\"Clean all the menus.\\nIf you choose reset, it will also delete all Menu options from all objects\"\"\"\r\n bl_idname = \"mc.cleanprop\"\r\n bl_label = \"Clean all the properties\"\r\n \r\n reset : BoolProperty(default=False)\r\n \r\n def execute(self, context):\r\n \r\n mc_clean_properties()\r\n mc_clean_sections()\r\n \r\n if self.reset:\r\n for obj in bpy.data.objects:\r\n obj.mc_enable = False\r\n \r\n self.report({'INFO'}, 'Menu Creator - All the objects has been reset.')\r\n \r\n return {'FINISHED'}\r\n\r\n# Operator to clean all properties and sections from an objects. If reset is on, it will also disable the menu for that object\r\nclass MC_CleanObject(bpy.types.Operator):\r\n \"\"\"Clean all the object properties.\\nIf you choose reset, it will also delete all Menu options from the object\"\"\"\r\n bl_idname = \"mc.cleanpropobj\"\r\n bl_label = \"Clean the object\"\r\n \r\n reset : BoolProperty(default=False)\r\n \r\n def execute(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n \r\n mc_clean_single_properties(obj)\r\n mc_clean_single_sections(obj)\r\n if self.reset:\r\n obj.mc_enable = False\r\n \r\n self.report({'INFO'}, 'Menu Creator - \\'' + obj.name + '\\' menu has been reset.')\r\n \r\n return {'FINISHED'}\r\n\r\n# Operator to remove a linked property (button in UI)\r\nclass MC_RemoveLinkedProperty(bpy.types.Operator):\r\n \"\"\"Remove the linked property\"\"\"\r\n bl_idname = \"mc.removelinkedproperty\"\r\n bl_label = \"\"\r\n \r\n prop_index : bpy.props.IntProperty()\r\n link_path : bpy.props.StringProperty()\r\n link_id : bpy.props.StringProperty()\r\n\r\n @classmethod\r\n def poll(cls, context):\r\n return context.active_object is not None\r\n\r\n def execute(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n props = obj.mc_properties\r\n \r\n i=-1\r\n for el in obj.mc_properties[self.prop_index].linked_props:\r\n i=i+1\r\n if el.path == self.link_path and el.id == self.link_id:\r\n break\r\n if i>=0:\r\n obj.mc_properties[self.prop_index].linked_props.remove(i)\r\n\r\n return {'FINISHED'}\r\n\r\n# Single Property settings\r\nclass MC_PropertySettings(bpy.types.Operator):\r\n \"\"\"Modify some of the property settings\"\"\"\r\n bl_idname = \"mc.propsettings\"\r\n bl_label = \"Property settings\"\r\n bl_icon = \"PREFERENCES\"\r\n bl_options = {'UNDO'}\r\n \r\n name : bpy.props.StringProperty(name='Name',\r\n description=\"Choose the name of the property\")\r\n path : bpy.props.StringProperty()\r\n id : bpy.props.StringProperty()\r\n icon : bpy.props.EnumProperty(name='Icon',\r\n description=\"Choose the icon.\\nNote that the icon name MUST respect Blender convention. All the icons can be found in the Icon Viewer default Blender addon.\",items=mc_icon_list)\r\n section : bpy.props.EnumProperty(name='Section',\r\n description=\"Choose the icon.\\nNote that the icon name MUST respect Blender convention. All the icons can be found in the Icon Viewer default Blender addon.\",items=mc_section_list)\r\n\r\n def execute(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n \r\n i = mc_find_index(obj.mc_properties,[self.name,self.path,self.id])\r\n \r\n if i>=0:\r\n obj.mc_properties[i].name = self.name\r\n obj.mc_properties[i].icon = self.icon\r\n obj.mc_properties[i].section = self.section\r\n \r\n return {'FINISHED'}\r\n \r\n def invoke(self, context, event):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n \r\n if settings.ms_debug:\r\n return context.window_manager.invoke_props_dialog(self, width=650)\r\n else:\r\n return context.window_manager.invoke_props_dialog(self, width=550)\r\n \r\n def draw(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n \r\n i = mc_find_index(obj.mc_properties,[self.name,self.path,self.id])\r\n \r\n layout = self.layout\r\n \r\n layout.prop(self, \"name\")\r\n layout.prop(self, \"icon\")\r\n layout.prop(self, \"section\")\r\n \r\n layout.separator()\r\n layout.label(text=\"Property info\", icon=\"INFO\")\r\n box = layout.box()\r\n box.label(text=\"Identifier: \"+self.id)\r\n \r\n if settings.ms_debug:\r\n layout.label(text=\"Full path\", icon=\"RNA\")\r\n box = layout.box()\r\n box.label(text=self.path+'.'+self.id)\r\n \r\n if len(obj.mc_properties[i].linked_props)>0:\r\n layout.separator()\r\n layout.label(text=\"Linked Properties\", icon=\"LINKED\")\r\n box = layout.box()\r\n for prop in obj.mc_properties[i].linked_props:\r\n row = box.row()\r\n row.label(text=prop.path + '.' + prop.id, icon=\"DOT\")\r\n link_del_op = row.operator(MC_RemoveLinkedProperty.bl_idname, icon=\"X\")\r\n link_del_op.prop_index = i\r\n link_del_op.link_id = prop.id\r\n link_del_op.link_path = prop.path\r\n \r\n\r\n# Swap Properties Operator\r\nclass MC_SwapProperty(bpy.types.Operator):\r\n \"\"\"Change the position of the property\"\"\"\r\n bl_idname = \"mc.swapprops\"\r\n bl_label = \"Change the property position\"\r\n \r\n mod : BoolProperty(default=False) # False = down, True = Up\r\n \r\n name : bpy.props.StringProperty()\r\n path : bpy.props.StringProperty()\r\n id : bpy.props.StringProperty()\r\n \r\n def execute(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n col = sorted(obj.mc_properties, key = mc_prop_ID)\r\n col_len = mc_len_collection(col)\r\n \r\n i = mc_find_index(col,[self.name,self.path,self.id])\r\n \r\n if i>=0:\r\n if self.mod:\r\n \r\n j=i\r\n while j>0:\r\n j = j - 1\r\n if col[j].section==col[i].section:\r\n break\r\n if j>-1:\r\n \r\n col[i].mc_id = j\r\n col[j].mc_id = i\r\n \r\n else:\r\n \r\n j=i\r\n while j<col_len-1:\r\n j=j+1\r\n if col[j].section==col[i].section:\r\n break\r\n if j<col_len:\r\n \r\n col[i].mc_id = j\r\n col[j].mc_id = i\r\n \r\n return {'FINISHED'}\r\n\r\n# Operator to remove a property (button in UI)\r\nclass MC_RemoveProperty(bpy.types.Operator):\r\n \"\"\"Remove the property from the current menu\"\"\"\r\n bl_idname = \"mc.removeproperty\"\r\n bl_label = \"Remove the property\"\r\n \r\n path : bpy.props.StringProperty()\r\n id : bpy.props.StringProperty()\r\n\r\n @classmethod\r\n def poll(cls, context):\r\n return context.active_object is not None\r\n\r\n def execute(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n props = obj.mc_properties\r\n \r\n mc_remove_property_item(obj.mc_properties,['',self.path,self.id])\r\n\r\n return {'FINISHED'}\r\n\r\n# Operator to add a new section\r\nclass MC_AddSection(bpy.types.Operator):\r\n \"\"\"Add a new section to the section list.\"\"\"\r\n bl_idname = \"mc.addsection\"\r\n bl_label = \"Add section\"\r\n bl_icon = \"PREFERENCES\"\r\n bl_options = {'UNDO'}\r\n \r\n name : bpy.props.StringProperty(name='Name',\r\n description=\"Choose the name of the section\", default = \"Section\")\r\n icon : bpy.props.EnumProperty(name='Icon',\r\n description=\"Choose the icon.\\nNote that the icon name MUST respect Blender convention. All the icons can be found in the Icon Viewer default Blender addon\",items=mc_icon_list)\r\n collapsable : bpy.props.BoolProperty(name=\"Collapsable\",\r\n description=\"Add a collapse button near the name of the section\")\r\n type : bpy.props.EnumProperty(name='Type',\r\n description=\"Choose the section type\",items=mc_section_type_list)\r\n\r\n def execute(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n \r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n sec_obj = obj.mc_sections\r\n sec_len = mc_len_collection(sec_obj)\r\n \r\n if self.name!=\"\":\r\n \r\n i=True\r\n j=-1\r\n for el in sec_obj:\r\n j=j+1\r\n if el.name == self.name:\r\n i=False\r\n break\r\n if i:\r\n add_item = sec_obj.add()\r\n add_item.name = self.name\r\n add_item.type = self.type\r\n add_item.icon = self.icon\r\n add_item.collapsable = self.collapsable\r\n add_item.id = sec_len\r\n \r\n self.report({'INFO'}, 'Menu Creator - Section \\'' + self.name +'\\' created.')\r\n else:\r\n self.report({'WARNING'}, 'Menu Creator - Cannot create sections with same name.')\r\n \r\n else:\r\n self.report({'ERROR'}, 'Menu Creator - Cannot create sections with this name.')\r\n \r\n return {'FINISHED'}\r\n \r\n def invoke(self, context, event):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n \r\n if settings.ms_debug:\r\n return context.window_manager.invoke_props_dialog(self, width=550)\r\n else:\r\n return context.window_manager.invoke_props_dialog(self)\r\n \r\n def draw(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n \r\n layout = self.layout\r\n \r\n scale = 3.0\r\n \r\n row=layout.row()\r\n row.label(text=\"Name:\")\r\n row.scale_x=scale\r\n row.prop(self, \"name\", text=\"\")\r\n \r\n row=layout.row()\r\n row.label(text=\"Icon:\")\r\n row.scale_x=scale\r\n row.prop(self, \"icon\", text=\"\")\r\n \r\n row=layout.row()\r\n row.label(text=\"\")\r\n row.scale_x=scale\r\n row.prop(self, \"collapsable\")\r\n \r\n layout.separator()\r\n \r\n row=layout.row()\r\n row.label(text=\"Type:\")\r\n row.scale_x=scale\r\n row.prop(self, \"type\", text=\"\")\r\n\r\n# Section Property settings\r\nclass MC_SectionSettings(bpy.types.Operator):\r\n \"\"\"Modify the section settings.\"\"\"\r\n bl_idname = \"mc.sectionsettings\"\r\n bl_label = \"Section settings\"\r\n bl_icon = \"PREFERENCES\"\r\n bl_options = {'UNDO'}\r\n \r\n name : bpy.props.StringProperty(name='Name',\r\n description=\"Choose the name of the section\")\r\n icon : bpy.props.EnumProperty(name='Icon',\r\n description=\"Choose the icon.\\nNote that the icon name MUST respect Blender convention. All the icons can be found in the Icon Viewer default Blender addon.\",items=mc_icon_list)\r\n collapsable : bpy.props.BoolProperty(name=\"Collapsable\",\r\n description=\"Add a collapse button near the name of the section\")\r\n type : bpy.props.EnumProperty(name='Type',\r\n description=\"The Section type can not be changed after creation\",items=mc_section_type_list)\r\n \r\n # COLLECTION type settings\r\n collections_enable_global_smoothcorrection : bpy.props.BoolProperty(name=\"Enable Global Smooth Correction\")\r\n collections_enable_global_shrinkwrap : bpy.props.BoolProperty(name=\"Enable Global Shrinkwrap\")\r\n collections_enable_global_mask : bpy.props.BoolProperty(name=\"Enable Global Mask\")\r\n collections_enable_global_normalautosmooth : bpy.props.BoolProperty(name=\"Enable Global Normal Auto Smooth\")\r\n # Outfit variant\r\n outfit_enable : bpy.props.BoolProperty(name=\"Outfit\", description=\"With this option a Body entry will be added to the Section. This Body's masks will be enabled when elements of the collections are shown, and viceversa, if the masks are called the same name as the element of the collection\")\r\n \r\n name_edit : bpy.props.StringProperty(name='Name',\r\n description=\"Choose the name of the section\")\r\n ID : bpy.props.IntProperty()\r\n\r\n def execute(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n \r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n prop_obj = obj.mc_properties\r\n sec_obj = obj.mc_sections\r\n \r\n \r\n i = mc_find_index_section(sec_obj,self.name)\r\n \r\n if i>=0:\r\n \r\n for el in prop_obj:\r\n if el.section == self.name:\r\n el.section = self.name_edit\r\n \r\n sec_obj[i].name = self.name_edit\r\n sec_obj[i].icon = self.icon\r\n sec_obj[i].collapsable = self.collapsable\r\n sec_obj[i].collections_enable_global_smoothcorrection = self.collections_enable_global_smoothcorrection\r\n sec_obj[i].collections_enable_global_shrinkwrap = self.collections_enable_global_shrinkwrap\r\n sec_obj[i].collections_enable_global_mask = self.collections_enable_global_mask\r\n sec_obj[i].collections_enable_global_normalautosmooth = self.collections_enable_global_normalautosmooth\r\n sec_obj[i].outfit_enable = self.outfit_enable\r\n if obj.type == \"MESH\":\r\n sec_obj[i].outfit_body = obj\r\n \r\n return {'FINISHED'}\r\n \r\n def invoke(self, context, event):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n \r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n sec_obj = obj.mc_sections\r\n \r\n self.name_edit = self.name\r\n self.ID = mc_find_index_section(sec_obj,self.name)\r\n self.collapsable = sec_obj[self.ID].collapsable\r\n self.collections_enable_global_smoothcorrection = sec_obj[self.ID].collections_enable_global_smoothcorrection\r\n self.collections_enable_global_shrinkwrap = sec_obj[self.ID].collections_enable_global_shrinkwrap\r\n self.collections_enable_global_mask = sec_obj[self.ID].collections_enable_global_mask\r\n self.collections_enable_global_normalautosmooth = sec_obj[self.ID].collections_enable_global_normalautosmooth\r\n self.outfit_enable = sec_obj[self.ID].outfit_enable\r\n \r\n return context.window_manager.invoke_props_dialog(self)\r\n \r\n def draw(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n \r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n sec_obj = obj.mc_sections\r\n \r\n scale = 3.0\r\n \r\n layout = self.layout\r\n \r\n row=layout.row()\r\n row.label(text=\"Name:\")\r\n row.scale_x=scale\r\n row.prop(self, \"name_edit\", text=\"\")\r\n \r\n row=layout.row()\r\n row.label(text=\"Icon:\")\r\n row.scale_x=scale\r\n row.prop(self, \"icon\", text=\"\")\r\n \r\n row=layout.row()\r\n row.label(text=\"\")\r\n row.scale_x=scale\r\n row.prop(self, \"collapsable\")\r\n \r\n layout.separator()\r\n col = layout.column()\r\n col.enabled = False\r\n col.prop(self, \"type\")\r\n if self.type == \"COLLECTION\":\r\n layout.separator()\r\n row = layout.row()\r\n row.label(text=\"\")\r\n row.scale_x = 3\r\n row.prop(self,\"collections_enable_global_smoothcorrection\")\r\n row = layout.row()\r\n row.label(text=\"\")\r\n row.scale_x = 3\r\n row.prop(self,\"collections_enable_global_shrinkwrap\")\r\n row = layout.row()\r\n row.label(text=\"\")\r\n row.scale_x = 3\r\n row.prop(self,\"collections_enable_global_mask\")\r\n row = layout.row()\r\n row.label(text=\"\")\r\n row.scale_x = 3\r\n row.prop(self,\"collections_enable_global_normalautosmooth\")\r\n layout.separator()\r\n row = layout.row()\r\n row.label(text=\"\")\r\n row.scale_x = 3\r\n row.prop(self,\"outfit_enable\")\r\n\r\n# Operator to change Section position\r\nclass MC_SwapSection(bpy.types.Operator):\r\n \"\"\"Change the position of the section\"\"\"\r\n bl_idname = \"mc.swapsections\"\r\n bl_label = \"Change the section position\"\r\n \r\n mod : BoolProperty(default=False) # False = down, True = Up\r\n \r\n name : bpy.props.StringProperty()\r\n icon : bpy.props.StringProperty()\r\n \r\n def execute(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n col = obj.mc_sections\r\n col_len = mc_len_collection(col)\r\n \r\n sec_index = mc_find_index_section(col,self.name)\r\n i = col[sec_index].id\r\n \r\n if self.mod and i > 1:\r\n j = mc_find_index_section_fromID(col, i-1)\r\n col[sec_index].id = i-1\r\n col[j].id = i\r\n elif not self.mod and i < col_len-1:\r\n j = mc_find_index_section_fromID(col, i+1)\r\n col[sec_index].id = i+1\r\n col[j].id = i\r\n \r\n return {'FINISHED'}\r\n\r\n# Delete Section\r\nclass MC_DeleteSection(bpy.types.Operator):\r\n \"\"\"Delete Section\"\"\"\r\n bl_idname = \"mc.deletesection\"\r\n bl_label = \"Section settings\"\r\n bl_options = {'UNDO'}\r\n \r\n name : bpy.props.StringProperty(name='Name',\r\n description=\"Choose the name of the section\")\r\n\r\n def execute(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n sec_obj = obj.mc_sections\r\n \r\n i=-1\r\n for el in sec_obj:\r\n i=i+1\r\n if el.name == self.name:\r\n break\r\n \r\n if i>=0:\r\n \r\n j = sec_obj[i].id\r\n \r\n for k in range(j+1,len(sec_obj)):\r\n sec_obj[mc_find_index_section_fromID(sec_obj, k)].id = k-1\r\n \r\n sec_obj.remove(i)\r\n \r\n self.report({'INFO'}, 'Menu Creator - Section \\'' + self.name +'\\' deleted.')\r\n \r\n return {'FINISHED'}\r\n\r\n# Operator to shiwtch visibility of an object\r\nclass MC_CollectionObjectVisibility(bpy.types.Operator):\r\n \"\"\"Chenge the visibility of the selected object\"\"\"\r\n bl_idname = \"mc.colobjvisibility\"\r\n bl_label = \"Hide/Unhide Object visibility\"\r\n bl_options = {'UNDO'}\r\n \r\n obj : bpy.props.StringProperty()\r\n sec : bpy.props.StringProperty()\r\n\r\n def execute(self, context):\r\n \r\n bpy.data.objects[self.obj].hide_viewport = not bpy.data.objects[self.obj].hide_viewport\r\n bpy.data.objects[self.obj].hide_render = not bpy.data.objects[self.obj].hide_render\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n body_obj = settings.em_fixobj_pointer\r\n else:\r\n body_obj = context.active_object\r\n sec_obj = body_obj.mc_sections\r\n i = mc_find_index_section(sec_obj,self.sec)\r\n \r\n if sec_obj[i].outfit_enable:\r\n if sec_obj[i].outfit_body:\r\n for modifier in sec_obj[i].outfit_body.modifiers:\r\n if modifier.type == \"MASK\" and self.obj in modifier.name and sec_obj[i].collections_global_mask:\r\n modifier.show_viewport = not bpy.data.objects[self.obj].hide_viewport\r\n modifier.show_render = not bpy.data.objects[self.obj].hide_viewport\r\n else:\r\n self.report({'WARNING'}, 'Menu Creator - Outfit Body has not been specified.')\r\n \r\n return {'FINISHED'}\r\n\r\n# Operator to delete a collection\r\nclass MC_RemoveCollection(bpy.types.Operator):\r\n \"\"\"Remove the selected collection from the Menu.\\nThe collection will NOT be deleted\"\"\"\r\n bl_idname = \"mc.deletecollection\"\r\n bl_label = \"Remove the selected collection from the menu\"\r\n bl_options = {'UNDO'}\r\n \r\n col : bpy.props.StringProperty()\r\n sec : bpy.props.StringProperty()\r\n\r\n def execute(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n sec_obj = obj.mc_sections\r\n \r\n sec_index = mc_find_index_section(sec_obj,self.sec)\r\n \r\n i = 0\r\n for el in sec_obj[sec_index].collections:\r\n if el.collection.name == self.col:\r\n sec_obj[sec_index].collections.remove(i)\r\n break\r\n i = i + 1\r\n \r\n self.report({'INFO'}, 'Menu Creator - Collection removed from the Menu.')\r\n \r\n return {'FINISHED'}\r\n\r\n# Initial Configuration Operator\r\nclass MC_InitialConfiguration(bpy.types.Operator):\r\n \"\"\"Clean all the object properties\"\"\"\r\n bl_idname = \"mc.initialconfig\"\r\n bl_label = \"Clean all the properties\"\r\n \r\n def execute(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n \r\n mc_clean_single_sections(obj)\r\n mc_clean_single_properties(obj)\r\n \r\n add_item = obj.mc_sections.add()\r\n add_item.id = 0\r\n add_item.name = \"Unsorted\"\r\n add_item.icon = \"LIBRARY_DATA_BROKEN\"\r\n \r\n obj.mc_enable = True\r\n \r\n self.report({'INFO'}, 'Menu Creator - Menu for \\''+obj.name+'\\' successfully created.')\r\n \r\n return {'FINISHED'}\r\n\r\n\r\n\r\n# USER INTERFACE\r\n\r\n# Poll functions\r\n\r\n@classmethod\r\ndef mc_panel_poll(cls, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n \r\n return obj.mc_enable\r\n\r\n# User Interface Panels\r\n\r\nclass MainPanel:\r\n bl_space_type = \"VIEW_3D\"\r\n bl_region_type = \"UI\"\r\n bl_category = \"Menu\"\r\n\r\nclass PT_MenuCreator_InitialConfiguration_Panel(MainPanel, bpy.types.Panel):\r\n bl_idname = \"PT_MenuCreator_InitialConfiguration_Panel\"\r\n bl_label = \"Initial Configuration\"\r\n \r\n @classmethod\r\n def poll(cls, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n \r\n if obj is not None:\r\n return not obj.mc_enable\r\n else:\r\n return False\r\n \r\n def draw(self, context):\r\n \r\n layout = self.layout\r\n \r\n layout.label(text=\"Menu Configuration\")\r\n \r\n layout.operator('mc.initialconfig', text=\"Create Menu\")\r\n\r\nclass PT_MenuCreator_Panel(MainPanel, bpy.types.Panel):\r\n bl_idname = \"PT_MenuCreator_Panel\"\r\n bl_label = \"Menu\"\r\n \r\n @classmethod\r\n def poll(cls, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n \r\n if obj is not None:\r\n return obj.mc_enable\r\n else:\r\n return False\r\n\r\n def draw(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n if settings.em_fixobj:\r\n obj = settings.em_fixobj_pointer\r\n else:\r\n obj = context.active_object\r\n mc_col = obj.mc_properties\r\n mcs_col = obj.mc_sections\r\n mc_col_len = mc_len_collection(mc_col)\r\n mcs_col_len = mc_len_collection(mcs_col)\r\n \r\n layout = self.layout\r\n \r\n row = layout.row(align=False)\r\n menu_name = settings.mss_name;\r\n if settings.mss_obj_name:\r\n menu_name = menu_name+obj.name\r\n row.label(text=menu_name)\r\n \r\n if settings.ms_editmode:\r\n row.prop(obj, \"mc_edit_enable\", text=\"\",icon=\"MODIFIER\")\r\n row.operator(\"mc.addsection\",text=\"\",icon=\"ADD\")\r\n if settings.em_fixobj:\r\n row.prop(settings,\"em_fixobj\",icon=\"PINNED\", text=\"\")\r\n else:\r\n row.prop(settings,\"em_fixobj\",icon=\"UNPINNED\", text= \"\")\r\n else:\r\n if settings.em_fixobj:\r\n row.prop(settings,\"em_fixobj\",icon=\"PINNED\", text=\"\")\r\n else:\r\n row.prop(settings,\"em_fixobj\",icon=\"UNPINNED\", text= \"\")\r\n \r\n if mcs_col_len>1:\r\n \r\n for sec in sorted(mcs_col, key = mc_sec_ID):\r\n \r\n if sec.type == \"DEFAULT\":\r\n \r\n sec_empty = True\r\n sec_hidden = True\r\n for el in mc_col:\r\n if el.section == sec.name:\r\n sec_empty = False\r\n if not el.hide:\r\n sec_hidden = False\r\n \r\n if (sec_empty and sec.name == \"Unsorted\") or (not obj.mc_edit_enable and not sec_empty and sec_hidden):\r\n continue\r\n else:\r\n row = layout.row(align=False)\r\n if sec.collapsable:\r\n row.prop(sec, \"collapsed\", icon=\"TRIA_DOWN\" if not sec.collapsed else \"TRIA_RIGHT\", icon_only=True, emboss=False)\r\n if sec.icon == \"NONE\":\r\n row.label(text=sec.name)\r\n else:\r\n row.label(text=sec.name,icon=sec.icon)\r\n \r\n if obj.mc_edit_enable:\r\n \r\n if sec.name != \"Unsorted\":\r\n ssett_button = row.operator(\"mc.sectionsettings\", icon=\"PREFERENCES\", text=\"\")\r\n ssett_button.name = sec.name\r\n ssett_button.icon = sec.icon\r\n ssett_button.type = sec.type\r\n \r\n row2 = row.row(align=True)\r\n sup_button = row2.operator(\"mc.swapsections\", icon=\"TRIA_UP\", text=\"\")\r\n sup_button.mod = True\r\n sup_button.name = sec.name\r\n sup_button.icon = sec.icon\r\n sdown_button = row2.operator(\"mc.swapsections\", icon=\"TRIA_DOWN\", text=\"\")\r\n sdown_button.mod = False\r\n sdown_button.name = sec.name\r\n sdown_button.icon = sec.icon\r\n \r\n if not sec.collapsed:\r\n box = layout.box()\r\n if sec_empty and sec.name != \"Unsorted\":\r\n row = box.row(align=False)\r\n row.label(text=\"Section Empty\", icon=\"ERROR\")\r\n row.operator(\"mc.deletesection\",text=\"\",icon=\"X\").name = sec.name\r\n \r\n if not sec.collapsed:\r\n \r\n for el in sorted(mc_col, key = mc_prop_ID):\r\n \r\n if el.section == sec.name:\r\n \r\n el_index = mc_find_index(mc_col,[el.name,el.path,el.id])\r\n \r\n if obj.mc_edit_enable:\r\n \r\n row = box.row(align=False)\r\n if el.icon !=\"NONE\":\r\n row.label(text=el.name,icon=el.icon)\r\n else:\r\n row.label(text=el.name)\r\n \r\n sett_button = row.operator(\"mc.propsettings\", icon=\"PREFERENCES\", text=\"\")\r\n sett_button.name = el.name\r\n sett_button.path = el.path\r\n sett_button.id = el.id\r\n sett_button.icon = el.icon\r\n sett_button.section = el.section\r\n \r\n row2 = row.row(align=True)\r\n up_button = row2.operator(\"mc.swapprops\", icon=\"TRIA_UP\", text=\"\")\r\n up_button.mod = True\r\n up_button.name = el.name\r\n up_button.path = el.path\r\n up_button.id = el.id\r\n down_button = row2.operator(\"mc.swapprops\", icon=\"TRIA_DOWN\", text=\"\")\r\n down_button.mod = False\r\n down_button.name = el.name\r\n down_button.path = el.path\r\n down_button.id = el.id\r\n \r\n if el.hide:\r\n row.prop(el, \"hide\", text=\"\", icon = \"HIDE_ON\")\r\n else:\r\n row.prop(el, \"hide\", text=\"\", icon = \"HIDE_OFF\")\r\n \r\n del_button = row.operator(\"mc.removeproperty\", icon=\"X\", text=\"\")\r\n del_button.path = el.path\r\n del_button.id = el.id\r\n else:\r\n \r\n if not el.hide:\r\n row = box.row(align=False)\r\n if el.icon !=\"NONE\":\r\n row.label(text=el.name,icon=el.icon)\r\n else:\r\n row.label(text=el.name)\r\n \r\n row.scale_x=1.0\r\n row.prop(eval(el.path), el.id, text=\"\")\r\n \r\n elif sec.type == \"COLLECTION\":\r\n \r\n sec_empty = True\r\n for el in sec.collections:\r\n sec_empty = False\r\n break\r\n \r\n row = layout.row(align=False)\r\n if sec.collapsable:\r\n row.prop(sec, \"collapsed\", icon=\"TRIA_DOWN\" if not sec.collapsed else \"TRIA_RIGHT\", icon_only=True, emboss=False)\r\n if sec.icon == \"NONE\":\r\n row.label(text=sec.name)\r\n else:\r\n row.label(text=sec.name,icon=sec.icon)\r\n \r\n if obj.mc_edit_enable:\r\n \r\n ssett_button = row.operator(\"mc.sectionsettings\", icon=\"PREFERENCES\", text=\"\")\r\n ssett_button.name = sec.name\r\n ssett_button.icon = sec.icon\r\n ssett_button.type = sec.type\r\n \r\n row2 = row.row(align=True)\r\n sup_button = row2.operator(\"mc.swapsections\", icon=\"TRIA_UP\", text=\"\")\r\n sup_button.mod = True\r\n sup_button.name = sec.name\r\n sup_button.icon = sec.icon\r\n sdown_button = row2.operator(\"mc.swapsections\", icon=\"TRIA_DOWN\", text=\"\")\r\n sdown_button.mod = False\r\n sdown_button.name = sec.name\r\n sdown_button.icon = sec.icon\r\n \r\n row.operator(\"mc.deletesection\",text=\"\",icon=\"X\").name = sec.name\r\n \r\n if not sec.collapsed and len(sec.collections)>0:\r\n box = layout.box()\r\n if sec.outfit_enable:\r\n box.prop(sec,\"outfit_body\", text=\"Body\", icon=\"OUTLINER_OB_MESH\")\r\n \r\n if len(sec.collections)>0:\r\n box.label(text=\"Collection List\", icon=\"OUTLINER_COLLECTION\")\r\n box = box.box()\r\n for collection in sec.collections:\r\n row = box.row()\r\n row.label(text=collection.collection.name)\r\n del_col = row.operator(\"mc.deletecollection\",text=\"\",icon=\"X\")\r\n del_col.sec = sec.name\r\n del_col.col = collection.collection.name\r\n \r\n else:\r\n if not sec.collapsed:\r\n box = layout.box()\r\n if sec_empty:\r\n row = box.row(align=False)\r\n row.label(text=\"No Collection Assigned\", icon=\"ERROR\")\r\n row.operator(\"mc.deletesection\",text=\"\",icon=\"X\").name = sec.name\r\n \r\n if len(sec.collections)>0:\r\n box.prop(sec,\"collections_list\", text=\"\")\r\n box2 = box.box()\r\n if len(bpy.data.collections[sec.collections_list].objects)>0:\r\n for obj2 in bpy.data.collections[sec.collections_list].objects:\r\n row = box2.row()\r\n if obj2.hide_viewport:\r\n vop=row.operator(\"mc.colobjvisibility\",text=obj2.name, icon='OUTLINER_OB_'+obj2.type)\r\n vop.obj = obj2.name\r\n vop.sec = sec.name\r\n else:\r\n vop = row.operator(\"mc.colobjvisibility\",text=obj2.name, icon='OUTLINER_OB_'+obj2.type, depress = True)\r\n vop.obj = obj2.name\r\n vop.sec = sec.name\r\n else:\r\n box2.label(text=\"This Collection seems empty\", icon=\"ERROR\")\r\n \r\n if sec.collections_enable_global_smoothcorrection or sec.collections_enable_global_shrinkwrap or sec.collections_enable_global_mask or sec.collections_enable_global_normalautosmooth:\r\n box.label(text= \"Global Properties\", icon=\"MODIFIER\")\r\n box2 = box.box()\r\n if sec.collections_enable_global_smoothcorrection:\r\n box2.prop(sec,\"collections_global_smoothcorrection\")\r\n if sec.collections_enable_global_shrinkwrap:\r\n box2.prop(sec,\"collections_global_shrinkwrap\")\r\n if sec.collections_enable_global_mask:\r\n box2.prop(sec,\"collections_global_mask\")\r\n if sec.collections_enable_global_normalautosmooth:\r\n box2.prop(sec,\"collections_global_normalautosmooth\")\r\n \r\n else:\r\n box = layout.box()\r\n box.label(text=\"No section added.\",icon=\"ERROR\")\r\n \r\n\r\nclass PT_MenuCreator_Settings_Panel(MainPanel, bpy.types.Panel):\r\n bl_idname = \"PT_MenuCreator_Settings_Panel\"\r\n bl_label = \"Settings\"\r\n \r\n def draw(self, context):\r\n \r\n settings = bpy.context.scene.mc_settings\r\n \r\n layout = self.layout\r\n \r\n # Main Settings\r\n layout.label(text=\"Main Settings\",icon=\"SETTINGS\")\r\n box = layout.box()\r\n \r\n box.prop(settings,\"ms_editmode\")\r\n box.prop(settings,\"ms_debug\")\r\n box.prop(settings,\"ms_advanced\")\r\n \r\n # Menu specific settings\r\n layout.label(text=\"Menu Settings\",icon=\"SETTINGS\")\r\n box = layout.box()\r\n \r\n box.prop(settings,\"mss_name\")\r\n box.prop(settings,\"mss_obj_name\")\r\n \r\n layout.label(text=\"Reset functions\",icon=\"SETTINGS\")\r\n box = layout.box()\r\n \r\n box.operator('mc.cleanpropobj', text=\"Reset Object\", icon=\"ERROR\").reset = True\r\n box.operator('mc.cleanprop', text=\"Reset All Objects\", icon=\"ERROR\").reset = True\r\n\r\n# Handlers\r\n\r\n@persistent\r\ndef mc_scene_modification_handler(scene):\r\n \"\"\"Called at every modification done to the scene.\"\"\"\r\n \r\n for obj in bpy.data.objects:\r\n \r\n # Handler for linked custom properties\r\n for prop in obj.mc_properties:\r\n for link_prop in prop.linked_props:\r\n if '].[' in link_prop.path + '.' + link_prop.id:\r\n exec(link_prop.path + link_prop.id + '=' + prop.path + '.' + prop.id)\r\n else:\r\n exec(link_prop.path + '.' + link_prop.id + '=' + prop.path + '.' + prop.id)\r\n \r\n # Part checking for changes in the list collection\r\n # This is needed to ensure a clean list against deletion of collections from the outliner\r\n for sec in obj.mc_sections:\r\n i = 0\r\n for el in sec.collections:\r\n if not hasattr(el.collection, 'name'):\r\n sec.collections.remove(i)\r\n i = i + 1\r\n\r\n\r\n# Register\r\n\r\nclasses = (\r\n MC_AddProperty,\r\n MC_LinkProperty,\r\n WM_MT_button_context,\r\n MC_RemoveProperty,\r\n MC_CleanAll,\r\n MC_CleanObject,\r\n MC_RemoveLinkedProperty,\r\n MC_PropertySettings,\r\n MC_SwapProperty,\r\n MC_AddSection,\r\n MC_AddCollection,\r\n MC_RemoveCollection,\r\n MC_SectionSettings,\r\n MC_SwapSection,\r\n MC_DeleteSection,\r\n MC_CollectionObjectVisibility,\r\n MC_InitialConfiguration,\r\n OUTLINER_MT_link_mcmenu,\r\n OUTLINER_MT_collection_mcmenu,\r\n PT_MenuCreator_InitialConfiguration_Panel,\r\n PT_MenuCreator_Panel,\r\n PT_MenuCreator_Settings_Panel\r\n)\r\n\r\ndef register():\r\n \r\n from bpy.utils import register_class\r\n for cls in classes:\r\n register_class(cls)\r\n \r\n bpy.types.WM_MT_button_context.append(menu_func)\r\n bpy.types.WM_MT_button_context.append(menu_func_link)\r\n bpy.types.OUTLINER_MT_collection.append(mc_collection_menu)\r\n \r\n # Handlers\r\n bpy.app.handlers.depsgraph_update_post.append(mc_scene_modification_handler)\r\n bpy.app.handlers.redo_post.append(mc_scene_modification_handler)\r\n bpy.app.handlers.undo_post.append(mc_scene_modification_handler)\r\n\r\ndef unregister():\r\n \r\n from bpy.utils import unregister_class\r\n for cls in reversed(classes):\r\n unregister_class(cls)\r\n \r\n bpy.types.WM_MT_button_context.remove(menu_func)\r\n bpy.types.WM_MT_button_context.remove(menu_func_link)\r\n bpy.types.OUTLINER_MT_collection.remove(mc_collection_menu)\r\n \r\n # Handlers\r\n bpy.app.handlers.depsgraph_update_post.remove(mc_scene_modification_handler)\r\n bpy.app.handlers.redo_post.remove(mc_scene_modification_handler)\r\n bpy.app.handlers.undo_post.remove(mc_scene_modification_handler)\r\n\r\nif __name__ == \"__main__\":\r\n register()\r\n ", "id": "1611471", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "menu_creator.py" } ]
0
sgpy
[ { "content": "import logging\n\nimport click\nimport os\n\nlogging.basicConfig(filename='app.log', level=logging.DEBUG)\n\n\ndef resolve_hosted_url(port, local=False):\n if local:\n url = \"127.0.0.1\"\n else:\n url = \"0.0.0.0\"\n\n hosted_url = \"{}:{}\".format(url, port)\n return hosted_url, url\n\n\[email protected]()\[email protected]('--port', type=int, default=5000, help=\"server port to run on\")\[email protected]('--backend_host_name', type=str, default=\"http://localhost\", help=\"Backend server url\")\[email protected]('--backend_host_port', type=int, default=5001, help=\"Backend server port\")\[email protected]('--local', type=bool, is_flag=False, help=\"Backend server port\")\ndef deploy_relay_server(port, backend_host_name, backend_host_port, local):\n \"\"\"Starts a relay server to communicate with dialogflow server\n\n Default: Runs on 5000 and connects to backend http://localhost:5001\n \"\"\"\n logger = logging.getLogger(__name__)\n\n logger.info(\"Deploying relay server on port: {}...\".format(port))\n hosted_url, url = resolve_hosted_url(port, local=local)\n logger.info(\"Deployed: {}\".format(hosted_url))\n\n if not backend_host_name.startswith('http'):\n backend_host_name = 'http://' + backend_host_name\n logger.info(\"Backend Host: {} Port: {}...\".format(backend_host_name, backend_host_port))\n os.environ['backend_host_name'] = backend_host_name\n os.environ['backend_host_port'] = str(backend_host_port)\n from assets.questbot import app\n app.run(host=url, port=port)\n\n\[email protected]()\[email protected]('--port', type=int, default=5001, help=\"server port to run on\")\[email protected]('--local', type=bool, is_flag=False, help=\"Backend server port\")\ndef deploy_backend_server(port, local):\n \"\"\"Starts a mock backend server to serve relay server\n\n Default: Runs on 5001\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info(\"Deploying mock backend server on port: {}...\".format(port))\n hosted_url, url = resolve_hosted_url(port, local=local)\n from backend.server import validate\n try:\n validate()\n except ValueError as e:\n logger.error(\"Unable to launch backend server\", exc_info=e)\n return -1\n from backend.server import app\n app.run(host=url, port=port)\n logger.info(\"Deployed: {}\".format(hosted_url))\n\[email protected]()\ndef generate_db():\n from backend.client import seed\n seed()\n", "id": "6951920", "language": "Python", "matching_score": 2.4226722717285156, "max_stars_count": 1, "path": "commands/server.py" }, { "content": "from setuptools import setup\n\nsetup(\n name='questalliance',\n version='0.1b',\n py_modules=['questalliance'],\n install_requires=[\n \"Click\",\n \"flask\",\n \"six\",\n \"dialogflow\",\n \"python-dotenv\",\n ],\n entry_points='''\n [console_scripts]\n deploy_relay_server=commands.server:deploy_relay_server\n deploy_backend_server=commands.server:deploy_backend_server\n generate_db=commands.server:generate_db\n ''',\n)", "id": "492777", "language": "Python", "matching_score": 0.6405646204948425, "max_stars_count": 1, "path": "setup.py" }, { "content": "\nimport csv\n\ndef generate(file_name):\n rs = []\n with open(file_name, encoding='utf-8') as f:\n for line in f:\n reader = csv.reader(f, dialect=\"excel\")\n for index, _ in enumerate(reader):\n rs.append(_)\n return rs\n\ndef load_sudent_data():\n rs = generate(file_name=\"./data/Indian-Female-Names.csv\") + \\\n generate(file_name=\"./data/Indian-Male-Names.csv\")\n return rs\n\n\ndef load_languages():\n rs = generate(file_name=\"./data/INDIAN_LANGUAGE.csv\")\n return rs\n\n\ndef load_courses():\n rs = generate(file_name=\"./data/courses.csv\")\n return rs\n\nif __name__ == '__main__':\n rs = load_courses()\n\n for i, data in enumerate(rs):\n print(\"{} = {}\".format(i, data))", "id": "7881708", "language": "Python", "matching_score": 0.1622437983751297, "max_stars_count": 1, "path": "backend/data.py" }, { "content": "import requests\nimport logging\nimport json\nimport os\n\n\nclass Endpoints(object):\n def __init__(self):\n self.BASE_URL = os.getenv('backend_host_name') + ':' + os.getenv('backend_host_port')\n self.service = {\n 'uploadSurveyResult': self.BASE_URL + '/quest_app/app/api/users/uploadSurveyResult/{user_id}',\n 'find_courses': self.BASE_URL + '/quest_app/app/api/users/getTagsCourse',\n 'user_info': self.BASE_URL + '/quest_app/app/api/users/get_student_data/{user_id}',\n }\n\n\n def _fetch_user_info(self, user_id):\n URL = self.service.get('user_info', '').format(user_id=user_id)\n logging.info('_fetch_user_info({user_id}), url: {url}'.format(user_id=user_id, url=URL))\n r = requests.get(url=URL)\n return r.json()\n\n\n def getNameFromID(self, user_id):\n user_name = self._fetch_user_info(user_id)['student_data']['stud_first_name']\n logging.info('[service] getNameFromID({user_id}): {user_name}'.format(user_id=user_id, user_name=user_name))\n return user_name\n\n\n def getSurveyStatus(self, user_id):\n survey_status = self._fetch_user_info(user_id)['student_data']['survey_status']\n logging.info('[service] getSurveyStatus({user_id}): {survey_status}'.format(user_id=user_id, survey_status=survey_status))\n return survey_status\n\n\n def saveSurveyResult(self, user_id, answers):\n URL = self.service.get('uploadSurveyResult', '').format(user_id=user_id)\n logging.info('[service] saveSurveyResult({user_id}), url: {url}'.format(user_id=user_id, url=URL))\n r = requests.post(url=URL, data=json.dumps(answers), headers={'Content-Type': 'application/json'})\n return True if r.status_code == 200 else False\n\n\n def find_courses(self, query):\n user_id = 261\n URL = self.service.get('find_courses', '').format(user_id=user_id)\n logging.info('[service] find_courses({user_id}), url: {url}'.format(user_id=user_id, url=URL))\n req = requests.post(URL, json=query)\n return req.json()\n\n\nendpoint = Endpoints()\n", "id": "8744091", "language": "Python", "matching_score": 2.3304953575134277, "max_stars_count": 1, "path": "assets/endpoints.py" }, { "content": "import logging\nimport os.path\nfrom uuid import uuid4\n\nfrom flask import Flask, request, make_response, jsonify\n\nfrom backend.client import survey_complete, search_courses, users_info, find_user_info\n\napp = Flask(__name__)\nlogging.basicConfig(filename='backend.log', level=logging.DEBUG)\n\n\[email protected]('/ping', methods=['POST', 'GET'])\ndef ping():\n from uuid import uuid4\n return \"Backend: {}\".format(str(uuid4()))\n\[email protected]('/quest_app/app/api/users/uploadSurveyResult/<user>', methods=['POST'])\ndef process(user):\n try:\n req_json = request.get_json(force=True)\n with open('survey.log', 'a+') as f:\n for qna in req_json.get('Q&A'):\n f.write('Q: %s \\n' % qna.get('Question'))\n f.write(' O: %s \\n' % qna.get('Options'))\n f.write(' A: %s \\n\\n' % qna.get('Answer'))\n f.write('=================================================\\n')\n\n user = int(user)\n survey_complete(user)\n return \"Thanks\"\n except Exception as e:\n logger = logging.getLogger(__name__)\n msg = \"Unknown user: {}\".format(user)\n logger.error(msg, exc_info=e)\n return msg\n\n\[email protected]('/quest_app/app/api/users/getTagsCourse', methods=['POST'])\ndef find_courses():\n req_json = request.get_json(force=True)\n tags = req_json.get(\"tags\")\n tags = tags.split(\",\")\n tags = [_.strip() for _ in tags]\n courses = search_courses(tags)\n resp = {\"status\": 1, \"message\": \"success\", \"data\": courses}\n\n return make_response(jsonify(resp))\n\n\[email protected]('/quest_app/app/api/users/get_student_data/<user>', methods=['GET'])\ndef user_info(user):\n user = int(user)\n user_info = find_user_info([user])\n info = user_info[user]\n\n resp = {\"status\": \"1\", \"student_data\": info}\n return resp\n\n\ndef validate():\n from backend.client import DB\n logger = logging.getLogger(__name__)\n if not os.path.exists(DB):\n msg = \"unable to start server due to missing db at path {}\".format(DB)\n logger.critical(msg)\n raise ValueError(msg)\n\n\ndef start():\n app.run(host='0.0.0.0', port=1234)\n validate()\n\nif __name__ == '__main__':\n start()\n", "id": "4498882", "language": "Python", "matching_score": 2.4116857051849365, "max_stars_count": 1, "path": "backend/server.py" }, { "content": "from uuid import uuid4\n\nfrom flask import Flask, request, make_response, jsonify, session\nfrom dotenv import load_dotenv\nimport os\nimport logging\nimport json\nimport collections\nimport requests\nimport os\nfrom assets.endpoints import endpoint\nfrom assets.Course import Course\n\n# Read env variables from .env file\ndotenv_path = os.path.join(os.path.dirname(__file__), '..', '.env')\nload_dotenv(dotenv_path)\n\n# Import dialogflow api\nimport dialogflow\n\napp = Flask(__name__)\n\n# Dialog flow entity client\nentity_client = dialogflow.EntityTypesClient()\nproject_name = 'newagent-fc3d4'\n\n\ndef get_proficiency_level ():\n entity_id = os.getenv('PROFICIENCY_ENTITY_ID')\n name = entity_client.entity_type_path('newagent-fc3d4', entity_id)\n entity = entity_client.get_entity_type(name)\n values = []\n for ent in entity.entities:\n values.append(ent.value)\n return values\n\n\ndef get_find_job_parameter_values ():\n entity_id = os.getenv('FIND_JOB_ENTITY_ID')\n name = entity_client.entity_type_path('newagent-fc3d4', entity_id)\n entity = entity_client.get_entity_type(name)\n values = []\n for ent in entity.entities:\n values.append(ent.value)\n return values\n\n\ndef get_start_own_business_parameter_values ():\n entity_id = os.getenv('START_OWN_BUSINESS_ENTITY_ID')\n name = entity_client.entity_type_path('newagent-fc3d4', entity_id)\n entity = entity_client.get_entity_type(name)\n values = []\n for ent in entity.entities:\n values.append(ent.value)\n return values\n\n\n\n'''\nSurvey question flow:\n\n Q1: Hi! May I have your user ID please?\n A1: typing\n\n\n Q2: Hello {user_name}! To help you with your Quest App journey, I need to get some more information. Is that fine with you?\n A2: 1. Yes / 2. No\n Yes: Jump to Q3\n No : I guess, you already know what you should be learning from the platform for now. Let me know if you need my help in future.\n\n\n Q3: How did you come to know about Quest App platform?\n A3: 1. My teacher told me to use it\n 2. I found the app on playstore and downloaded it\n 3. A friend told me about it\n 4. I saw the poster about the App\n\n\n Q4: What is your favourite thing to do? If not in the list, please type.\n A4: 1. Play a sport\n 2. Go to training center/college/school\n 3. Spend time with friends\n 4. Read a book\n 5. Travel to new places\n\n\n Q5: Have you learnt anything using digital learning platform before?\n A5: 1. Yes / 2. No\n Yes: Jump to Q6\n No : Jump to Q7\n\n\n Q6: What websites do you use?\n A6: typing\n\n\n Q7: Do you have your own mobile phone?\n A7: 1. Yes / 2. No\n Yes: Jump to Q9\n No : Jump to Q8\n\n\n Q8: Whose phone are you using?\n A8: 1. My Mother's\n 2. My Father's\n 3. My elder Brother's\n 4. My elder Sister's\n 5. My Friend's\n 6. Another relative in the house\n\n\n Q9: Do you have a facebook_account?\n A9: 1. Yes / 2. No\n\n\nQ10: Do you have a Whatsapp_account?\nA10: 1. Yes / 2. No\n\n\nQ11: What are the languages you know? You can enter multiple options separated by, like - English, Hindi.\nA11: typing\n\n\nQ12: Thanks for completing the survey. I can help you choose the right courses on Quest App. Would you like to look at the Help topics?\nA12: 1. Yes / 2. No\n Yes: Jump to Q13\n No : Thanks. Have a good day. You can call me back just type 'Hi'\n\n\nQ13: Here are the help topics. Please select the one you would like to learn\nA13: 1. English Communication\n 2. IT Skills\n 3. Find A Job\n 4. Start My Own Business\n\n'''\n\n\ndef _telegram_payload_wrapper(question, options):\n logging.info('_telegram_payload_wrapper')\n telegram = {\n 'payload':{\n 'telegram':{\n 'text': question,\n 'reply_markup': {\n 'remove_keyboard': False if options else True,\n 'one_time_keyboard': True,\n 'resize_keyboard': True,\n 'keyboard': [\n # [\n # {\n # \"text\": \"YesAAAAAA\",\n # \"callback_data\": \"YES\"\n # }\n # ],\n # [\n # {\n # \"callback_data\": \"NO\",\n # \"text\": \"NoBBBBBBB\"\n # }\n # ],\n ]\n },\n }\n },\n 'platform': 'TELEGRAM',\n }\n\n for op in options:\n telegram['payload']['telegram']['reply_markup']['keyboard'].append([{ 'text' : op}])\n return telegram\n\ndef _suggestion_payload_wrapper(options):\n logging.info('_suggestion_payload_wrapper')\n feedback = {\n \"quickReplies\": {\n \"quickReplies\": options\n }\n }\n return feedback\n\n\ndef welcome(req_json):\n logging.info('Welcome')\n req_json = request.get_json(force=True)\n logging.info('RESET QUEST CONTEXT')\n reset_context(req_json)\n return question_and_answer(req_json)\n\n\ndef id_confirmation(req_json):\n logging.info('id_confirmation')\n question, user_id = _fetch_user_input(req_json) # further processing\n text = req_json.get('queryResult').get('fulfillmentText')\n username = endpoint.getNameFromID(user_id)\n answers = _give_me_cache_space(req_json)\n answers.update({'user_id': user_id,\n 'user_name': username})\n\n text = req_json.get('queryResult').get('fulfillmentText')\n if endpoint.getSurveyStatus(user_id) == '1':\n event_context = {\n 'name': 'trigger_help',\n 'parameters': {\n 'username': username\n }\n }\n req_json.update({'followupEventInput': event_context})\n else:\n greeting = 'Hello {0}! '.format(username) + text\n req_json['queryResult']['fulfillmentText'] = greeting\n\n return question_and_answer(req_json)\n\n\ndef get_payload_from_message(req_json):\n fullfilmentMessages = req_json.get('queryResult').get('fulfillmentMessages')\n # Grab the payload from the message\n if not fullfilmentMessages:\n return []\n payload = [msg for msg in fullfilmentMessages if msg.get('payload')]\n if len(payload) > 0:\n payload = payload[0].get('payload')\n logging.info('get_payload_from_message: %s' % payload)\n return payload\n return None\n\n\ndef get_quick_replies_from_messages(req_json):\n fullfilmentMessages = req_json.get('queryResult').get('fulfillmentMessages')\n # Grab the payload from the message\n if not fullfilmentMessages:\n return []\n payload = [msg for msg in fullfilmentMessages if msg.get('payload')]\n quickReplies = []\n if len(payload) > 0:\n quickReplies = payload[0].get('payload').get('quickReplies')\n return quickReplies\n\n\ndef validate_parameters (parameters):\n valid = True\n for key, value in parameters.items():\n if value == '' or value is None:\n valid = False\n return valid\n\n\ndef get_next_parameter (parameters):\n param = False\n for key, value in parameters.items():\n if value == '' or value is None:\n param = key\n break\n return param\n\n\ndef question_and_answer(req_json):\n # Construct a default response if no intent match is found\n query_result = req_json.get('queryResult')\n followupEvent = req_json.get('followupEventInput')\n\n quick_replies = get_quick_replies_from_messages(req_json)\n bot_response = {'output_contexts': req_json.get('queryResult').get('outputContexts')}\n bot_response['fulfillmentMessages'] = query_result.get('fulfillmentMessages')\n bot_response.update({'followupEventInput': followupEvent })\n action = query_result.get('action')\n parameters = query_result.get('parameters')\n logging.info('question_and_answer, Act: %s Params: %s' % (action, parameters))\n payload = get_payload_from_message(req_json)\n courses = None\n\n if followupEvent is None and action == 'ShowHelpTopics':\n logging.info('Action: ShowHelpTopics')\n event_context = {\n 'name': 'trigger_help',\n }\n bot_response.update({'followupEventInput': event_context})\n\n if action == 'ShowCourses' and validate_parameters(parameters):\n # Check if payload contains dictionary of tags\n # payload = { tags: '#Understanding self' }\n # OR\n # payload = { tags: { 'Career planning': \"#Understanding self\" }}\n query = payload\n tags = payload.get('tags')\n\n if (not isinstance(tags, str)):\n # Create tags for each parameter\n for key, value in parameters.items():\n query['tags'] = tags.get(value) if tags.get(value) is not None else ''\n\n logging.info('Finding course for ', query)\n\n courses = endpoint.find_courses(query)\n for course in courses.get('data'):\n logging.info(' course: %s' % course)\n courseobj = Course(course.get('tk_pk_id'),\n course.get('tk_tags'),\n course.get('tk_name'),\n course.get('tk_description'),\n course.get('language'),\n course.get('url'),\n course.get('tk_image'))\n response = courseobj.get_card_response('TELEGRAM')\n bot_response['fulfillmentMessages'].append(response)\n\n else:\n logging.info('NOT ShowCourses')\n parameter_to_ask = get_next_parameter(parameters)\n if (parameter_to_ask == 'ProficiencyLevel'):\n quick_replies = get_proficiency_level()\n\n if (parameter_to_ask == 'FindJob'):\n quick_replies = get_find_job_parameter_values()\n\n if (parameter_to_ask == 'StartOwnBusiness'):\n quick_replies = get_start_own_business_parameter_values()\n\n next_question = query_result.get('fulfillmentText')\n logging.info('question_and_answer, Next Question: %s' % (next_question))\n # we should copy fulfillmentText into fulfillmentMessages together.\n for item in bot_response['fulfillmentMessages']:\n if 'text' in item:\n item['text']['text'] = [next_question]\n logging.info(' fulfillmentMessages item updated')\n\n # For Quick replies\n bot_response['fulfillmentMessages'].append(_suggestion_payload_wrapper(quick_replies))\n\n # For Telegram\n if not courses:\n telegram_response = _telegram_payload_wrapper(next_question, quick_replies)\n bot_response['fulfillmentMessages'].append(telegram_response)\n return bot_response\n\n\nintent_map = {\n 'Default Welcome Intent': welcome,\n 'ID Confirmation': id_confirmation,\n 'Source Confirmation': question_and_answer,\n 'Source Invalid': question_and_answer,\n 'Survey Confirmation': question_and_answer,\n 'Survey Invalid': question_and_answer,\n 'Fav Confirmation': question_and_answer,\n 'Fav Invalid': question_and_answer,\n 'Digital Confirmation': question_and_answer,\n 'Digital Negation': question_and_answer,\n 'Digital Invalid': question_and_answer,\n 'Digital Details': question_and_answer,\n 'Mobile Confirmation': question_and_answer,\n 'Mobile Invalid': question_and_answer,\n 'Mobile Negation': question_and_answer,\n 'Mobile Others': question_and_answer,\n 'Facebook Confirmation': question_and_answer,\n 'Facebook Invalid': question_and_answer,\n 'Whatsapp Confirmation': question_and_answer,\n 'Whatsapp Invalid': question_and_answer,\n 'Language Confirmation': question_and_answer,\n }\n\ndef _give_me_cache_space(req_json):\n output_contexts = req_json.get('queryResult').get('outputContexts')\n\n # context_name pattern: 'projects/$bot_id/agent/sessions/$session_id/contexts/quest_context'\n prefix = output_contexts[0]['name'].split('/')[:-1]\n quest_context_name = '/'.join(prefix + ['quest_context'])\n\n quest_context = None\n for context in output_contexts:\n if context.get('name', '') == quest_context_name:\n quest_context = context\n break\n\n if not quest_context:\n logging.info('context: %s not found, build a new one ' % quest_context_name)\n quest_context = {\n 'name': quest_context_name,\n 'lifespanCount': 99,\n 'parameters': {'answers': {}, }\n }\n output_contexts.append(quest_context)\n return quest_context['parameters']['answers']\n\n\ndef saveQuestContext(req_json, user_input):\n logging.info('saveQuestContext: {0}'.format(user_input))\n answers = _give_me_cache_space(req_json)\n answers.update(user_input)\n\n\ndef reset_context(req_json):\n logging.info('reset_context')\n answers = _give_me_cache_space(req_json)\n answers.clear()\n\n\ndef _fetch_user_input(req_json):\n question = ','.join(req_json.get(u'queryResult').get(u'parameters').keys())\n answer = req_json.get(u'queryResult').get(u'queryText')\n logging.info('_fetch_user_input, Q: %s, A: %s' % (question, answer))\n return question, answer\n\n\ndef _fetch_intent(req_json):\n intent = req_json.get(\"queryResult\").get(\"intent\").get(\"displayName\")\n logging.info('_fetch_intent: %s' % intent)\n return intent\n\n\[email protected]('/ping', methods=['GET', 'POST'])\ndef ping():\n return \"Relay: {}\".format(str(uuid4()))\n\n\ndef _cache_survey_QOA(req_json, key_intent):\n # try to cache intents flow and Question,Options,Answer together\n answers = _give_me_cache_space(req_json)\n if 'intents_flow' not in answers:\n answers['intents_flow'] = []\n\n last_intent = answers['intents_flow'][-1] if answers['intents_flow'] else None\n answers['intents_flow'].append(key_intent)\n\n if last_intent:\n question, user_input = _fetch_user_input(req_json)\n answers[last_intent]['Answer'] = user_input\n next_question = req_json.get('queryResult').get('fulfillmentText')\n answers[key_intent] = { 'Question': next_question,\n 'Options': get_quick_replies_from_messages(req_json),\n 'Answer': None }\n\n\[email protected]('/api/endpoint', methods=['GET', 'POST'])\ndef questbot():\n \"\"\"\n Json structure:\n {'fulfillmentMessages': [{'text': {'text': ['How did you come to know about Quest App platform?']}},\n {'quickReplies': {'quickReplies': ['1. My teacher told me to use it',\n '2. I found the app on playstore and downloaded it',\n '3. A friend told me about it',\n '4. I saw the poster about the App']}}],\n 'outputContexts': [[{'lifespanCount': 1,\n 'name': 'projects/qabotlocal-voalga/agent/sessions/35938982-36c6-8225-3b09-1933c06a52a9/contexts/awaiting_survey'},\n {'lifespanCount': 98,\n 'name': 'projects/qabotlocal-voalga/agent/sessions/35938982-36c6-8225-3b09-1933c06a52a9/contexts/quest_context',\n 'parameters': {'answers': {}}}]]}\n \"\"\"\n logging.info('/////////// Questbot %s ////////////' % os.getenv('backend_host_name'))\n req_json = request.get_json(force=True)\n intent = _fetch_intent(req_json)\n\n\n if intent in intent_map:\n response_json = intent_map.get(intent)(req_json)\n output_contexts = req_json.get('queryResult').get('outputContexts')\n response_json.update({'output_contexts': output_contexts})\n _cache_survey_QOA(req_json, key_intent=intent)\n\n if intent == 'Language Confirmation':\n answers = _give_me_cache_space(req_json)\n user_id = answers.get('user_id')\n\n copy_answer = {'Q&A': []}\n for x in answers['intents_flow'][:-1]:\n copy_answer.get('Q&A').append(answers.get(x))\n endpoint.saveSurveyResult(user_id, copy_answer)\n\n return make_response(jsonify(response_json))\n\n # Construct a default response if no intent match is found\n bot_response = question_and_answer(req_json)\n return jsonify(bot_response)\n\n\nif __name__ == '__main__':\n logging.basicConfig(filename='app.log',level=logging.DEBUG)\n app.run(host='0.0.0.0', port=5000)\n\n", "id": "11136533", "language": "Python", "matching_score": 5.631105899810791, "max_stars_count": 1, "path": "assets/questbot.py" }, { "content": "from flask import Flask, request, make_response, jsonify, session\nimport dialogflow\nimport logging\nimport json\nimport collections\nimport requests\nimport os\nfrom endpoints import endpoint\nfrom Course import Course\nimport json\nfrom collections import namedtuple\n\napp = Flask(__name__)\n\ndef get_quick_replies_payload (req_json):\n fullfilmentMessages = req_json.get('queryResult').get('fulfillmentMessages')\n # Grab the payload from the message\n if not fullfilmentMessages:\n return []\n payload = [msg for msg in fullfilmentMessages if msg.get('payload')]\n quickReplies = []\n if len(payload) > 0:\n quickReplies = payload[0].get('payload').get('quickReplies')\n return quickReplies\n\n\[email protected]('/init', methods=['POST'])\ndef init():\n\n #data = request.get_json(silent=True)\n data = {\n 'tags': '#Understanding Self'\n }\n req_json = request.get_json(force=True)\n query_result = req_json.get('queryResult')\n bot_response = {'output_contexts': req_json.get('queryResult').get('outputContexts')}\n bot_response['fulfillmentMessages'] = query_result.get('fulfillmentMessages')\n payload= [msg for msg in query_result.get('fulfillmentMessages') if msg.get('payload')]\n print(payload)\n\n\n\n\n data = {\n 'tags': '#Understanding Self'\n }\n courses = endpoint.find_courses(data)\n for course in courses.get('data'):\n\n courseobj= Course(course.get('tk_pk_id'),\n course.get('tk_tags'),\n course.get('tk_name'),\n course.get('tk_description'),\n course.get('language'),\n course.get('url'),\n course.get('tk_image'))\n response =courseobj.get_card_response('TELEGRAM')\n bot_response['fulfillmentMessages'].append(response)\n print (bot_response)\n return jsonify(bot_response)\n\n\nif __name__ == '__main__':\n logging.basicConfig(filename='app.log',level=logging.DEBUG)\n app.run(host='0.0.0.0', port=5000)\n", "id": "9990379", "language": "Python", "matching_score": 3.1563987731933594, "max_stars_count": 1, "path": "assets/CourseFulfillment.py" }, { "content": "from assets.endpoints import endpoint\nclass Course:\n def __init__(self,tk_pk_id,tk_tags,tk_name,tk_description,language,url,tk_image):\n self.name=tk_name\n self.tags = tk_tags\n self.image=tk_image\n self.description=tk_description\n self.url =url\n\n\n def get_card_response(self, platform):\n platform_obj = self.get_supported_pltform(platform)\n return platform_obj.card_response(self)\n\n\n def get_supported_pltform(self,platform):\n platform_dct = {\n 'TELEGRAM': Telegram(),\n 'FACEBOOK': Facebook()\n }\n\n return platform_dct.get(platform, lambda: InvalidPlatform() )\n\n\n\n\nclass Telegram:\n\n\n\n def card_response(self,course):\n\n return {\n 'card': {\n 'buttons': [\n {\n 'postback': course.url,\n 'text': 'Play'\n }\n ],\n 'imageUri': course.image,\n 'title': course.name\n },\n 'platform': 'TELEGRAM'\n }\n\n\nclass Facebook:\n\n\n def card_response(self):\n print(\"in facebook\")\n\nclass InvalidPlatform:\n\n def __init__(self,course):\n self.course =course\n\n def card_response(self):\n print(\"in telegram\")\n\n\n\n\ndef main():\n\n bot_response=[]\n #bot_response['fulfillmentMessages']=''\n data = {\n 'tags': '#Understanding Self'\n }\n courses = endpoint.find_courses(data)\n for course in courses.get('data'):\n courseobj = Course(course.get('tk_pk_id'),\n course.get('tk_tags'),\n course.get('tk_name'),\n course.get('tk_description'),\n course.get('language'),\n course.get('url'),\n course.get('tk_image'))\n response = courseobj.get_card_response('TELEGRAM')\n bot_response['fulfillmentMessages'].append(response)\n\n\nif __name__ == \"__main__\": main()\n\n #Course course = Course('My course', '' ,'Enlgi')", "id": "716249", "language": "Python", "matching_score": 0.5736780762672424, "max_stars_count": 1, "path": "assets/Course.py" }, { "content": "import os\nimport random\nfrom functools import partial\nimport os\nfrom backend.admin import connection\n\nfrom backend.data import load_sudent_data, load_languages, load_courses\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\nDB = dir_path + '/questalliance.db'\nDB = 'questalliance.db'\n\nCOURSES = [\"tk_pk_id\", \"tk_tags\", \"tk_name\", \"tk_description\", \"language\", \"url\", \"tk_image\"]\n\n@connection\ndef create_user_table(conn):\n sql = \\\n \"\"\"\n CREATE TABLE IF NOT EXISTS users (stud_pk_id INTEGER PRIMARY KEY AUTOINCREMENT, stud_first_name VARCHAR(50), stud_gender char(1), nationality varchar(50), survey_status char(1) DEFAULT \"0\");\n \"\"\"\n conn.execute(sql);\n pass\n\n@connection\ndef create_course_table(conn):\n sql = \\\n \"\"\"\n CREATE TABLE IF NOT EXISTS courses (tk_pk_id INTEGER PRIMARY KEY AUTOINCREMENT, tk_tags VARCHAR(50) DEFAULT \"\", tk_name VARCHAR(50) DEFAULT \"\", tk_description VARCHAR(50) DEFAULT \"\", language VARCHAR(50) DEFAULT \"\", url VARCHAR(100) DEFAULT \"\", tk_image BLOB DEFAULT \"\");\n \"\"\"\n conn.execute(sql);\n pass\n\n\n@connection\ndef create_language_table(conn):\n sql = \\\n \"\"\"\n CREATE TABLE IF NOT EXISTS languages (lang_pk_id INTEGER PRIMARY KEY AUTOINCREMENT, language VARCHAR(50));\n \"\"\"\n conn.execute(sql);\n\n\n@connection\ndef create_language_known(conn):\n sql = \\\n \"\"\"\n CREATE TABLE IF NOT EXISTS languages_known (stud_pk_id INTEGER, lang_pk_id INTEGER, FOREIGN KEY (stud_pk_id) REFERENCES users(stud_pk_id));\n \"\"\"\n conn.execute(sql);\n\n\n@connection\ndef upload_student_data(conn):\n create_user_table(DB)\n rs = load_sudent_data()\n sql = \\\n \"\"\"\n INSERT INTO users (stud_first_name, stud_gender, nationality) VALUES (?, ?, ?); \n \"\"\"\n with conn:\n for _ in rs:\n conn.execute(sql, _)\n return len(rs)\n\n\n@connection\ndef upload_languages(conn):\n create_language_table(DB)\n\n rs = load_languages()\n sql = \\\n \"\"\"\n INSERT INTO languages (language) VALUES (?); \n \"\"\"\n with conn:\n for _ in rs:\n conn.execute(sql, (_[1],))\n\n return len(rs)\n\n\n@connection\ndef add_language_known(conn, elems):\n create_language_known(conn)\n sql = \\\n \"\"\"\n INSERT INTO languages_known (stud_pk_id, lang_pk_id) VALUES (?, ?);\n \"\"\"\n with conn:\n for elem in elems:\n conn.execute(sql, elem)\n\n\n@connection\ndef users_info(conn, users):\n sql = \\\n \"\"\"\n SELECT stud_pk_id, stud_first_name, survey_status FROM users WHERE stud_pk_id in ({});\n \"\"\"\n\n sql = sql.format(str(users)[1:-1])\n # print(sql)\n cols = tuple(map(lambda x: x.strip(), \"stud_pk_id, stud_first_name, survey_status\".split(\",\")))\n all_users = {}\n\n for data in conn.execute(sql):\n all_users[data[0]] = dict(zip(cols, data))\n all_users[data[0]]['languages_known'] = []\n\n sql = \\\n \"\"\"\n SELECT x.stud_pk_id, language FROM(\n (SELECT stud_pk_id, lang_pk_id FROM languages_known WHERE stud_pk_id in ({}) ) x\n INNER JOIN \n languages y\n ON x.lang_pk_id = y.lang_pk_id\n );\n \"\"\"\n\n sql = sql.format(str(users)[1:-1])\n\n for user_id, language in tuple(conn.execute(sql)):\n all_users[user_id]['languages_known'].append(language)\n\n return all_users\n\n\ndef upload_language_known(db, total_students, total_languages):\n rs = []\n max_language_known = 5\n for i in range(1, total_students + 1):\n language_known = random.randint(1, max_language_known)\n for _ in range(0, language_known):\n j = random.randint(1, total_languages + 1)\n rs.append((i, j))\n add_language_known(db, rs)\n\n\ndef validate_seed(total_students, view=10):\n users = [random.randint(1, total_students + 1) for _ in range(0, view)]\n for _, user_info in users_info(DB, users).items():\n print(user_info)\n\nfind_user_info = partial(users_info, DB)\n\ndef seed():\n if os.path.exists(DB):\n os.remove(DB)\n total_students = upload_student_data(DB)\n total_languages = upload_languages(DB)\n upload_language_known(DB, total_students, total_languages)\n\n upload_course_data(DB)\n return total_students\n\n\n@connection\ndef mark_survey_complete(conn, user_ids):\n if not user_ids:\n return\n if not isinstance(user_ids, (list, tuple)):\n user_ids = [user_ids]\n\n sql = \\\n \"\"\"\n UPDATE users SET survey_status = \"1\" WHERE stud_pk_id = ?\n \"\"\"\n with conn:\n for user_id in user_ids:\n conn.execute(sql, (user_id,))\n\nsurvey_complete = partial(mark_survey_complete, DB)\n\n@connection\ndef upload_course_data(conn):\n create_course_table(DB)\n data = load_courses()\n sql = \\\n \"\"\"\n INSERT INTO courses ({}) VALUES ({})\n \"\"\"\n\n\n\n\n for row in data:\n\n if len(row) > len(COURSES):\n row = row[:len(COURSES)]\n\n # row = [_.strip() for _ in row]\n data_size = len(row)\n cols = COURSES[0:data_size]\n cols_placeholders = \",\".join([\"?\" for _ in range(0, len(cols))])\n sql = sql.format(str(cols)[1:-1], cols_placeholders)\n with conn:\n conn.execute(sql, row)\n@connection\ndef courses(conn, tags):\n\n\n sql = \\\n \"\"\"\n SELECT * FROM courses WHERE tk_tags IN ({})\n \"\"\"\n sql = sql.format(str(tags)[1:-1])\n print(sql)\n with conn:\n rs = conn.execute(sql)\n\n result = []\n for _ in rs:\n result.append(dict(zip(COURSES, _)))\n return result\n\nsearch_courses = partial(courses, DB)\n\ndef main():\n total_students = seed()\n #pass\n\n\n\n\nif __name__ == '__main__':\n main()\n", "id": "5449403", "language": "Python", "matching_score": 2.1241729259490967, "max_stars_count": 1, "path": "backend/client.py" }, { "content": "import sqlite3\nimport logging\nfrom functools import partial, wraps\n\n\ndef connect(path_to_db, verbose=False):\n try:\n db = sqlite3.connect(path_to_db)\n\n # Apply pragma to enforce foreign keys\n db.execute('pragma foreign_keys = 1;')\n\n return db\n\n except Exception as e:\n logger = logging.getLogger(__name__)\n logger.exception('Unable to connect to db', exc_info=e)\n if verbose:\n msg = \"Unable to open db: {} Error: {} reason: {}. Probably missing folder or insufficient permission\"\n raise RuntimeError(msg.format(path_to_db, e.__class__.__name__, e))\n\n\ndef initialise(path_to_db, verbose=False):\n from os.path import split\n from os.path import join\n from os.path import expanduser\n from os import mkdir\n from os.path import exists\n home, _ = split(path_to_db)\n home = expanduser(home)\n if not exists(home):\n mkdir(home)\n path_to_db = join(home, _)\n connect(path_to_db, verbose=verbose)\n return path_to_db\n\n\ndef connection(func=None):\n if func is None:\n return partial(connection)\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n conn = args[0] if args else None\n if isinstance(conn, str):\n conn = connect(conn)\n\n return func(conn, *args[1:], **kwargs)\n\n return wrapper\n", "id": "12575487", "language": "Python", "matching_score": 1.2197133302688599, "max_stars_count": 1, "path": "backend/admin.py" } ]
2.227334
rahulmoundekar
[ { "content": "from flask import Flask, request, render_template, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.exc import IntegrityError\n\napp = Flask(__name__)\n\napp.config.from_object('settings.Config')\ndb = SQLAlchemy(app)\n\n\nclass Person(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80), nullable=False)\n mobile = db.Column(db.String(12), nullable=False)\n license = db.relationship('License', backref=db.backref('person', uselist=False), cascade='all, delete-orphan',\n lazy=True,\n uselist=False) # delete parent record in one to many relationship in flask cascade='all, delete-orphan'\n\n def __repr__(self):\n return f'Person: {self.id}, {self.name}, {self.mobile}'\n\n\nclass License(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n license_number = db.Column(db.String(80), nullable=False)\n issue_date = db.Column(db.DateTime, nullable=False)\n expiry_date = db.Column(db.DateTime, nullable=False)\n\n person_id = db.Column(db.Integer, db.ForeignKey('person.id'), unique=True, nullable=False)\n\n def __repr__(self):\n return f'License: {self.id}, {self.license_number}, {self.issue_date}, {self.expiry_date}'\n\n\[email protected](\"/\", methods=['GET', 'POST'])\[email protected](\"/<int:person_id>\", methods=['GET'])\ndef PersonHome(person_id=None):\n if request.method == 'POST':\n # person data\n pid = request.form.get('id')\n name = request.form.get('name')\n mobile = request.form.get('mobile')\n # license data\n license_number = request.form.get('license_number')\n issue_date = request.form.get('issue_date')\n expiry_date = request.form.get('expiry_date')\n\n if pid: # if id present update records\n person = Person.query.filter_by(id=pid).first()\n person.name = name\n person.mobile = mobile\n\n person.license.license_number = license_number\n person.license.issue_date = issue_date\n person.license.expiry_date = expiry_date\n\n db.session.commit()\n else:\n # id not None save record\n try:\n person_entry = Person(name=name, mobile=mobile)\n license_entry = License(license_number=license_number, issue_date=issue_date, expiry_date=expiry_date)\n person_entry.license = license_entry\n db.session.add(person_entry)\n db.session.commit()\n except IntegrityError as e:\n print(e, 'Something went wrong please try again later')\n person = None\n licenses = None\n if person_id: # load record form edit form data\n person = Person.query.filter_by(id=person_id).first()\n licenses = License.query.filter_by(person_id=person_id).first()\n persons = Person.query.all()\n\n return render_template('index.html', persons=persons, person=person, license=licenses)\n\n\[email protected](\"/delete/<int:person_id>\", methods=['GET'])\ndef deletePerson(person_id):\n try:\n person = Person.query.filter_by(id=person_id).first()\n print(person)\n if person:\n db.session.delete(person)\n db.session.commit()\n else:\n print('Could not find any note to delete')\n except IntegrityError as e:\n print(e, 'Something went wrong please try again later')\n return redirect(\"/\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n", "id": "3779902", "language": "Python", "matching_score": 4.586611270904541, "max_stars_count": 1, "path": "PersonLicenseManagement.py" }, { "content": "from flask import Flask, render_template, request, redirect\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:root@localhost/flaskapp'\ndb = SQLAlchemy(app)\n\n\nclass Employee(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80), nullable=False)\n email = db.Column(db.String(20), nullable=False)\n contact = db.Column(db.String(12), nullable=False)\n\n\[email protected](\"/\", methods=['GET', 'POST'])\[email protected](\"/<int:employee_id>\", methods=['GET'])\ndef employeeHome(employee_id=None):\n if request.method == 'POST':\n eid = request.form.get('id')\n name = request.form.get('name')\n email = request.form.get('email')\n contact = request.form.get('contact')\n if eid: # if id present update records\n employee = Employee.query.filter_by(id=eid).first()\n employee.name = name\n employee.email = email\n employee.contact = contact\n db.session.commit()\n else:\n # id not None save record\n entry = Employee(name=name, contact=contact, email=email)\n db.session.add(entry)\n db.session.commit()\n employee = None\n if employee_id: # load record form edit form data\n employee = Employee.query.filter_by(id=employee_id).first()\n\n employees = Employee.query.all() # get list of records\n return render_template('index.html', employees=employees, employee=employee)\n\n\[email protected](\"/delete/<int:employee_id>\", methods=['GET'])\ndef deleteEmployee(employee_id):\n employee = Employee.query.filter_by(id=employee_id).first()\n db.session.delete(employee)\n db.session.commit()\n return redirect(\"/\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n", "id": "12445110", "language": "Python", "matching_score": 2.7608890533447266, "max_stars_count": 1, "path": "EmployeeManagementSystem.py" }, { "content": "# configuration\nclass Config:\n DEBUG = True\n # db\n SQLALCHEMY_DATABASE_URI = 'mysql://root:root@localhost/djangoapp'\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n", "id": "12594491", "language": "Python", "matching_score": 0.38773247599601746, "max_stars_count": 1, "path": "settings.py" } ]
2.760889
skookygoost
[ { "content": "import random\r\nfrom tkinter import *\r\nimport string\r\n\r\n\r\ndef generate_password():\r\n password = []\r\n for i in range(5):\r\n alpha = random.choice(string.ascii_letters)\r\n symbol = random.choice(string.punctuation)\r\n numbers = random.choice(string.digits)\r\n password.append(alpha)\r\n password.append(symbol)\r\n password.append(numbers)\r\n\r\n y = \"\".join(str(x) for x in password)\r\n lbl.config(text=y)\r\n\r\n\r\nroot = Tk()\r\nroot.title(\"PASSWORD GENERATOR\")\r\nroot.geometry(\"250x200\")\r\nbtn = Button(root, text=\"Generate Password\", command=generate_password)\r\nbtn.grid(row=2, column=2)\r\nlbl = Label(root, font=(\"times\", 15, \"bold\"))\r\nlbl.grid(row=4, column=2)\r\nroot.mainloop()", "id": "12725170", "language": "Python", "matching_score": 1.7470484972000122, "max_stars_count": 0, "path": "password.py" }, { "content": "import os\r\nimport random\r\nimport string\r\nimport time\r\nfrom discord_webhook import DiscordWebhook\r\n\r\ntry: # Check if the requrements have been installed\r\n from discord_webhook import DiscordWebhook # Try to import discord_webhook\r\nexcept ImportError: # If it chould not be installed\r\n input(f\"Module discord_webhook not installed, to install run '{'py -3' if os.name == 'nt' else 'python3.8'} -m pip install discord_webhook'\\nPress enter to exit\") # Tell the user it has not been installed and how to install it\r\n exit() # Exit the program\r\ntry: # Setup try statement to catch the error\r\n import requests # Try to import requests\r\nexcept ImportError: # If it has not been installed\r\n input(f\"Module requests not installed, to install run '{'py -3' if os.name == 'nt' else 'python3.8'} -m pip install requests'\\nPress enter to exit\")# Tell the user it has not been installed and how to install it\r\n exit() # Exit the program\r\n\r\n\r\nclass NitroGen: # Initialise the class\r\n def __init__(self): # The initaliseaiton function\r\n self.fileName = \"Nitro Codes.txt\" # Set the file name the codes are stored in\r\n\r\n def main(self): # The main function contains the most important code\r\n os.system('cls' if os.name == 'nt' else 'clear') # Clear the screen\r\n\r\n print(\"\"\" ,-----. ,--. ,--.,--. ,--.,--. ,--.,------.\\n' .-. '| ,'.| || | | || ,'.| || .---' \\n| | | || |' ' || | | || |' ' || `--, \\n' '-' '| | ` || '--.| || | ` || `---. \\n `-----' `--' `--'`-----'`--'`--' `--'`------'\r\n \"\"\") # Print the title card\r\n time.sleep(2) # Wait a few seconds\r\n self.slowType(\"Made by skookygoost\", .02) # Print who developed the code\r\n time.sleep(1) # Wait a little more\r\n self.slowType(\"\\nInput How Many Codes to Generate and Check: \", .02, newLine = False) # Print the first question\r\n\r\n num = int(input('')) # Ask the user for the amount of codes\r\n\r\n # Get the webhook url, if the user does not wish to use a webhook the message will be an empty string\r\n self.slowType(\"\\nDo you wish to use a discord webhook? \\nIf so type it here or press enter to ignore: \", .02, newLine = False)\r\n url = input('') # Get the awnser\r\n webhook = url if url != \"\" else None # If the url is empty make it be None insted\r\n\r\n print() # Print a newline for looks\r\n\r\n valid = [] # Keep track of valid codes\r\n invalid = 0 # Keep track of how many invalid codes was detected\r\n\r\n for i in range(num): # Loop over the amount of codes to check\r\n code = \"\".join(random.choices( # Generate the id for the gift\r\n string.ascii_uppercase + string.digits + string.ascii_lowercase,\r\n k = 16\r\n ))\r\n url = f\"https://discord.gift/{code}\" # Generate the url\r\n\r\n result = self.quickChecker(url, webhook) # Check the codes\r\n\r\n if result: # If the code was valid\r\n valid.append(url) # Add that code to the list of found codes\r\n else: # If the code was not valid\r\n invalid += 1 # Increase the invalid counter by one\r\n\r\n if result and webhook is None: # If the code was found and the webhook is not setup\r\n break # End the script\r\n\r\n\r\n print(f\"\"\"\r\nResults:\r\n Valid: {len(valid)}\r\n Invalid: {invalid}\r\n Valid Codes: {', '.join(valid )}\"\"\") # Give a report of the results of the check\r\n\r\n input(\"\\nThe end! Press Enter 5 times to close the program.\") # Tell the user the program finished\r\n [input(i) for i in range(4,0,-1)] # Wait for 4 enter presses\r\n\r\n\r\n def slowType(self, text, speed, newLine = True): # Function used to print text a little more fancier\r\n for i in text: # Loop over the message\r\n print(i, end = \"\", flush = True) # Print the one charecter, flush is used to force python to print the char\r\n time.sleep(speed) # Sleep a little before the next one\r\n if newLine: # Check if the newLine argument is set to True\r\n print() # Print a final newline to make it act more like a normal print statement\r\n\r\n def generator(self, amount): # Function used to generate and store nitro codes in a seperate file\r\n with open(self.fileName, \"w\", encoding=\"utf-8\") as file: # Load up the file in write mode\r\n print(\"Wait, Generating for you\") # Let the user know the code is generating the codes\r\n\r\n start = time.time() # Note the initaliseation time\r\n\r\n for i in range(amount): # Loop the amount of codes to generate\r\n code = \"\".join(random.choices(\r\n string.ascii_uppercase + string.digits + string.ascii_lowercase,\r\n k = 16\r\n )) # Generate the code id\r\n\r\n file.write(f\"https://discord.gift/{code}\\n\") # Write the code\r\n\r\n # Tell the user its done generating and how long tome it took\r\n print(f\"Genned {amount} codes | Time taken: {round(time.time() - start, 5)}s\\n\") #\r\n\r\n def fileChecker(self, notify = None): # Function used to check nitro codes from a file\r\n valid = [] # A list of the valid codes\r\n invalid = 0 # The amount of invalid codes detected\r\n with open(self.fileName, \"r\", encoding=\"utf-8\") as file: # Open the file containing the nitro codes\r\n for line in file.readlines(): # Loop over each line in the file\r\n nitro = line.strip(\"\\n\") # Remove the newline at the end of the nitro code\r\n\r\n # Create the requests url for later use\r\n url = f\"https://discordapp.com/api/v6/entitlements/gift-codes/{nitro}?with_application=false&with_subscription_plan=true\"\r\n\r\n response = requests.get(url) # Get the responce from the url\r\n\r\n if response.status_code == 200: # If the responce went through\r\n print(f\" Valid | {nitro} \") # Notify the user the code was valid\r\n valid.append(nitro) # Append the nitro code the the list of valid codes\r\n\r\n if notify is not None: # If a webhook has been added\r\n DiscordWebhook( # Send the message to discord letting the user know there has been a valid nitro code\r\n url = notify,\r\n content = f\"Valid Nito Code detected! @everyone \\n{nitro}\"\r\n ).execute()\r\n else: # If there has not been a discord webhook setup just stop the code\r\n break # Stop the loop since a valid code was found\r\n\r\n else: # If the responce got ignored or is invalid ( such as a 404 or 405 )\r\n print(f\" Invalid | {nitro} \") # Tell the user it tested a code and it was invalid\r\n invalid += 1 # Increase the invalid counter by one\r\n\r\n return {\"valid\" : valid, \"invalid\" : invalid} # Return a report of the results\r\n\r\n def quickChecker(self, nitro, notify = None): # Used to check a single code at a time\r\n # Generate the request url\r\n url = f\"https://discordapp.com/api/v6/entitlements/gift-codes/{nitro}?with_application=false&with_subscription_plan=true\"\r\n response = requests.get(url) # Get the response from discord\r\n\r\n if response.status_code == 200: # If the responce went through\r\n print(f\" Valid | {nitro} \") # Notify the user the code was valid\r\n\r\n if notify is not None: # If a webhook has been added\r\n DiscordWebhook( # Send the message to discord letting the user know there has been a valid nitro code\r\n url = notify,\r\n content = f\"Valid Nito Code detected! @everyone \\n{nitro}\"\r\n ).execute()\r\n\r\n return True # Tell the main function the code was found\r\n\r\n else: # If the responce got ignored or is invalid ( such as a 404 or 405 )\r\n print(f\" Invalid | {nitro} \") # Tell the user it tested a code and it was invalid\r\n return False # Tell the main function there was not a code found\r\n\r\nif __name__ == '__main__':\r\n Gen = NitroGen() # Create the nitro generator object\r\n Gen.main() # Run the main code", "id": "323726", "language": "Python", "matching_score": 2.2207560539245605, "max_stars_count": 0, "path": "main.py" }, { "content": "import os\r\nimport pyqrcode\r\nfrom PIL import Image\r\n\r\nclass QR_Gen(object):\r\n def __init__(self, text):\r\n self.qr_image = self.qr_generator(text)\r\n\r\n @staticmethod\r\n def qr_generator(text):\r\n qr_code = pyqrcode.create(text)\r\n file_name = \"QR Code Result\"\r\n save_path = os.path.join(os.path.expanduser('~'),'Desktop')\r\n\r\n name = f\"{save_path}{file_name}.png\"\r\n qr_code.png(name, scale=10)\r\n image = Image.open(name)\r\n image = image.resize((400,400),Image.ANTIALIAS)\r\n image.show()\r\n\r\nif __name__ == \"__main__\":\r\n QR_Gen(input(\"[QR] Enter text or link: \"))\r\n", "id": "3997184", "language": "Python", "matching_score": 1.2178393602371216, "max_stars_count": 0, "path": "QRCODEGENERATOR.py" }, { "content": "import cv2\r\n\r\nfilename = '' #file name goes in here\r\nimg = cv2.imread(filename)\r\n\r\ngray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\ninverted_gray_img = cv2.bitwise_not(gray_img)\r\n\r\n\r\nblurred_img = cv2.GaussianBlur(inverted_gray_img, (21, 21), 0)\r\n\r\ninverted_blurred_img = cv2.bitwise_not(blurred_img)\r\n\r\npencil_sketch_IMG = cv2.divide(gray_img, inverted_blurred_img, scale=256.0)\r\n\r\ncv2.imwrite('', pencil_sketch_IMG) #new file name goes in here", "id": "5104590", "language": "Python", "matching_score": 0.1094580739736557, "max_stars_count": 0, "path": "sketch.py" }, { "content": "import platform\r\n\r\nmy_system = platform.uname()\r\n\r\nprint(f\"System: {my_system.system}\")\r\nprint(f\"Node Name: {my_system.node}\")\r\nprint(f\"Release: {my_system.release}\")\r\nprint(f\"Version: {my_system.version}\")\r\nprint(f\"Machine: {my_system.machine}\")\r\nprint(f\"Processor: {my_system.processor}\")\r\n", "id": "2068355", "language": "Python", "matching_score": 1.353326439857483, "max_stars_count": 0, "path": "systeminformation.py" }, { "content": "import instaloader \r\n\r\nd = instaloader.Instaloader()\r\n\r\nprofile_Name = '' #name of the profile\r\n\r\nd.download_profile(profile_Name, profile_pic_only = True)", "id": "1236466", "language": "Python", "matching_score": 0.4240621030330658, "max_stars_count": 0, "path": "instagramdownload.py" } ]
1.285583
idesign0
[ { "content": "\n\n# so lets talk about Variables\n\n# varieables are just placeholders for any data\n\n# And tou should give variable name starting with letter\n\n# variables are used when you dont know which values you have to deal with !\n\n#ex\n\n#tuna is variable and i assign it a value 5\n\n# you can run the code by RUN > Run module or (f5)\n\n#try it on shell\n\n#tuna=5 \n\n#20 + tuna\n\n#25 (lets try more )\n\n\n#bacum = 20\n\n#tuna = 3 // now tuna s new value is 3\n\n#bacum/tuna // by deviding two variable we get alternatively 20/3\n\n#6.6666\n\n\n\n# Do check my shell file for refrence\n", "id": "11443811", "language": "Python", "matching_score": 1.5388752222061157, "max_stars_count": 1, "path": "Python/3. VARIABLES/variables.py" }, { "content": "# Today we are going to learn al=bout lists\n# lists in python is pretty much as list ofwords or numbers,\n# lets take about Numbers\n\n\n#ex\n\n#numbers = [20,54,869,75,15,31]\n\n\n\n#you can access individual items in list , just like we did it in strings\n\n#numbers[2]\n\n#869\n\n#you can different values in perticular list itema\n\n#number[2]=25\n\n#numbers = [20,54,25,75,15,31]\n\n\n#you can add new list item to the available list\n\n#numbers + [90,55,25]\n\n#numbers\n\n#numbers = [20,54,25,75,15,31,90,55,25]\n\n\n### but this method do it temperory only\n\n#for permanent basis we have to use \" dot operator '.' and one function called 'append'\n\n#we will talk about functions soon\n\n\n#for permanent list addition\n\n#numbers.append[120]\n\n#numbers\n\n#[20,54,25,75,15,31,120]\n\n\n\n\n\n#### another functions are\n\n###numbers[:2]\n\n#[20.54]\n\n#we can also assign multiple values,\n\n#numbers[:2] = [0,0]\n\n#numbers\n\n#[0,0,25,75,15,31,120]\n\n\n#you can also delet items from lists\n\n\n#just follow numbers[:2] = []\n\n#numbers\n#[25,75,15,31,120]\n\n\n#do remove entire list\n\n#number[:] = []\n\n\n#numbers\n\n#[]\n\n# [:] it tells we are taking all list numbers , just like we did in strings\n\n\n## Do check my shell file for refrence\n\n\n\n\n\n\n", "id": "9756853", "language": "Python", "matching_score": 2.15679931640625, "max_stars_count": 1, "path": "Python/7.LISTS/list.py" }, { "content": "\n\n#lets talk about numbers\n# In python you have to put one hashtag on starting of the line for comments\n# Usually you open python shell ( IDLE )\n# to open this file you Should go to FILE > New , and you are in.\n# IDLE cant save your program but you can save code in \"New files\".\n\n\n# now lets talk about Numbers .\n# you are able to do simple math in python without declaring some variable.\n\n\n#EX\n# i recommend to try this operations on python shell\n#just type it and press enter\n\n# i am puting one screen shot so you can understand well\n\n\n#3+3\n#3-3\n#3/3\n#3*3\n\n#5*5*5 \n#5 ** 3\n\n# TRY LAST TWO. YOU WILL GET SAME ANSWER = 125 (5^3)\n\n#there are some special cases\n\n#if you devide 5/3\n\n#you will get 1.66666\n\n #if you try 5//3\n\n # you will get only whole number\n\n # 1\n\n# if you try 5%3\n\n# you will get reminder\n\n# 2\n\n#i've posted screenshot of output in file. \n# and if you have some douts regarding this or any problem in future\n# mail me on \" <EMAIL>\n\n\n\n# Do check my shell file for refrence\n\n", "id": "11554504", "language": "Python", "matching_score": 1.4102483987808228, "max_stars_count": 1, "path": "Python/2. NUMBERS/Numbers.py" }, { "content": "x = int(input(\"enter value x : \"))\ny = int(input(\"enter value y : \"))\nz = int(input(\"enter value z : \"))\n\nprint(\"max value is :\")\nprint(max(x,y,z))\n\ninput(\"press enter\")\n", "id": "4526494", "language": "Python", "matching_score": 1, "max_stars_count": 1, "path": "Python/9. SAVE AND RUN PYTHON FILE/save and run python file.py" }, { "content": "a=1\ns=0\n\nprint(\"enter the value to sum\")\nprint(\"enter 0 to quite\" )\n\nwhile a !=0 or s<0:\n\n print(\"total sum \", s)\n a = float(input(\"enter new value\"))\n s +=a\n\nprint('total sum', s)\n", "id": "6480478", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "Python/12. while loop.py" }, { "content": "\n#Topic : 1\n\n#YOU CAN HAVE ACCESS TO INDIVIDUAL CHARACTERS FROM STRINGS\n\n# LETS SEE HOW ITS WORKS\n\n#just see screenshot 1\n\n# so how it works\n#EVERY CHARACTER IN STRING HAVE ONE POSITION , STARTING FROM '0' ( COMPUTER COUNTS FROM ZERO )\n\n# > 0 , -13 (positions) WHite space also counts\n#D> 1 , -12\n#H> 2 , -11\n#R> 3 , -10\n#U> 4 , -9 \n#C> 5 , -8\n# > 6 , -7\n#P> 7 , -6\n#A> 8 , -5 \n#T> 9 , -4\n#E> 10 , -3 \n#L> 11 , -2 \n# > 12 , -1\n\n#YOU CAN SEE THAT EACH ALPHABET HAS TWO POSITION NUMBERS\n\n# TO ACCESS CHARACTERS TYPE VARIABLE NAME AND WRITE POSITION\n# myname[5]\n# Character 'v' will show up\n\n\n# Topic : 2\n\n# Slicing string\n\n# you can slice bunch of characters from string\n\n# myname[2:9]\n\n#it means youcan take character from position 2 UPTO POSITION 9 , NOTE IT WON'T INCLUDE\n# ALPHABET OF POSITION 9 > T , TRY IT\n\n#myname[2:]\n\n#MEANS THAT ALL CHARACTERS FROM POSITION 2 COUNTS\n\n#myname[:9]\n\n#MEANS ALPHABETS SHOWS UPTO POSITION 9 ONLY\n\n#myname[:]\n\n#ALL ALPHABET WILL SHOW UP\n\n\n\n#### YOU CAN SEE IN SCREENSHOT 2 SOME TIMES STRINGS DO NOT SHOW UP , IT IS BECAUSE OF WRONG ORDER.\n\n\n\n\n# Do check my shell file for refrence\n", "id": "9689962", "language": "Python", "matching_score": 3.158020257949829, "max_stars_count": 1, "path": "Python/6.SLICING UP STRINGS/6.SLICING UP STRINGS.py" }, { "content": "#strings in a computer are simply set of words of simply words\n#Example - Names , Sentences\n\n#they are written between duble quotation marks like in many programming\n# languages or you can also write between single quotations too in python generally\n\n# ex \"MY NAME IS <NAME>\" OR 'MY NAME IS <NAME>' ( try it in IDLE )\n\n\n# One thing is necessary to mention is that you have be careful while using single quotation\n\n# Normally quotation marks tells pc where strings Start/End if you use it in sentence like\n# ' I don't think that ... ' ( try it on shell(idle) ) yyou will see different results\n\n\n#you can also watch in screenshot attached \" SyntaxError: invalid syntax \" , something like that\n\n# so i prefer to use double quotation always\n\n\n#ANOTHER WAY IS WHEN YOU HAVE SENTENCE THAT HAS SINGLE QUOTE , PLACE IT IN BETWEEN DOUBLE QUOTES. \" I don't think that ... \"\n\n# AND\n\n#WHEN YOU HAVE SENTENCE INCLUDE DOUBLE QUOTES , PLACE IT IN BETWEEN SINGLE QUOTE , SIMPLE AS THAT ..\n\n#AND SOME CASES WHEN YOU HAVE BOTH TYPE OF QOUTES , YOU HAVE TO USE ESCAPING CHARACTER \" / \"\n\n# IT SIMPLY TELLS TREAT THE BACK CHARACTER AS PART OF THE STRING , TRY IT ON IDLE \n\n\n# Do check my shell file for refrence\n", "id": "10578332", "language": "Python", "matching_score": 1.7978618144989014, "max_stars_count": 1, "path": "Python/4.ABOUT STRINGS/ABOUT STRINGS.py" }, { "content": "Python 3.7.2rc1 (tags/v3.7.2rc1:75a402a217, Dec 11 2018, 23:05:39) [MSC v.1916 64 bit (AMD64)] on win32\nType \"help\", \"copyright\", \"credits\" or \"license()\" for more information.\n>>> \n RESTART: C:\\Users\\dhruv\\Desktop\\dhruv.github.io\\Python\\4.ABOUT STRINGS\\ABOUT STRINGS.py \n>>> \"My name is <NAME>\"\n'My name is <NAME>'\n>>> 'My name is <NAME>'\n'My name is <NAME>'\n>>> 'i don't think that'\nSyntaxError: invalid syntax\n>>> 'i don\\'t think that'\n\"i don't think that\"\n>>> \"i don't think that\"\n\"i don't think that\"\n>>> \n", "id": "6360430", "language": "Python", "matching_score": 2.605748176574707, "max_stars_count": 1, "path": "Python/4.ABOUT STRINGS/shell.py" }, { "content": "Python 3.7.2rc1 (tags/v3.7.2rc1:75a402a217, Dec 11 2018, 23:05:39) [MSC v.1916 64 bit (AMD64)] on win32\nType \"help\", \"copyright\", \"credits\" or \"license()\" for more information.\n>>> \n RESTART: C:\\Users\\dhruv\\Desktop\\dhruv.github.io\\Python\\5.PRINT FUNCTION\\print function.py \nMy name is Dhruv\nC:\\Users\\dhruv\\Desktop\\dhruv.github.io\n>>> print(\"My name is Dhruv\")\nMy name is Dhruv\n>>> print(\"C:\\Users\\dhruv\\Desktop\\dhruv.github.io\")\nSyntaxError: (unicode error) 'unicodeescape' codec can't decode bytes in position 2-3: truncated \\UXXXXXXXX escape\n>>> print(\"C:\\Users\\dhruv\\Desktop\\dhruv.github.io\")\nSyntaxError: (unicode error) 'unicodeescape' codec can't decode bytes in position 2-3: truncated \\UXXXXXXXX escape\n>>> print(r\"C:\\Users\\dhruv\\Desktop\\dhruv.github.io\")\nC:\\Users\\dhruv\\Desktop\\dhruv.github.io\n>>> myname = \"Dhruv \"\n>>> myname + \"patel\"\n'<NAME>'\n>>> myname*5\n'Dhruv Dhruv Dhruv Dhruv Dhruv '\n>>> \n", "id": "8024401", "language": "Python", "matching_score": 1.2435674667358398, "max_stars_count": 1, "path": "Python/5.PRINT FUNCTION/shell.py" }, { "content": "m = input(\"number ?\")\nnameList=[]\n\nif m=='ajay':\n print(\"entered name is \" , m )\nelif m=='karan':\n print(\"entered name is \" , m )\nelif m=='mukesh':\n print(\"entered name is \" , m )\nelif m=='dhruv':\n print(\"entered name is \" , m )\nelif m=='patel':\n print(\"entered name is \" , m )\nelse:\n print(\"no match !\")\n \n\n\nnameList.append(m)\n", "id": "2197281", "language": "Python", "matching_score": 1, "max_stars_count": 1, "path": "Python/11. if else.py" }, { "content": "\ndef score(name=tom,score=0):\n print(name,\" scored \", score )\n \n", "id": "12755234", "language": "Python", "matching_score": 0.09266210347414017, "max_stars_count": 1, "path": "Python/13.define function.py" }, { "content": "# FUNCTIONS is piece of code which executes some logic\n\n# for example\n\n# lets try exponent function\n \n var = pow(2,3)\n\n print(var)\n\n# here 'pow' is exponential function , pow function requires 2 values\n\n# '2' is to be powered , '3' is how much power we want to give\n\n# 2**3 will do same thing but , pow is more easy in programming\n\n# there are so many built in functions to know type \" dir(__builtins__) \" on command shellc\n\n#to know the functionality of built in function use 'help' fuction.\n\n# write help(function name) to know what is does\n\n\n# modules are extra functions , which contains extra functionality\n\n# include module by 'import math' math is function\n\n# how you will use , type 'functionname.(builtinfunctions)'\n", "id": "9233452", "language": "Python", "matching_score": 0.7049043774604797, "max_stars_count": 1, "path": "Python/8.BUILT IN FUNCTIONS AND MODULES/built in function and mudules.py" }, { "content": "#print is function when we want to print something on output\n\n\n\nprint(\"My name is Dhruv\")\n#You will notice something strange if you try to print any directory\n\n#print(\"C:\\Users\\dhruv\\Desktop\\dhruv.github.io\") \n\n\n#Yes unicodeescape error\n# Remember i told about escape character on previous tutorial\n# yes it causing problems\n\n# now place \"r\" in starting of sentence\n\nprint(r\"C:\\Users\\dhruv\\Desktop\\dhruv.github.io\") \n\n\n#yes it is printed\n\n# what what r means ? r means Rush string \n\n# it means that \" take the string as it , take no special meaning in this perticular STRING \"\n\n# One amazing thing you can do is , string can be store in variables\n#You can also Add and Multiply strings\n\nmyname = \"Dhruv \"\n\nmyname + \"Patel\"\n\nmyname * 5\n# now press run\n\n\n# Do check my shell file for refrence\n\n\n", "id": "12645861", "language": "Python", "matching_score": 1.134173035621643, "max_stars_count": 1, "path": "Python/5.PRINT FUNCTION/print function.py" }, { "content": "Python 3.7.2rc1 (tags/v3.7.2rc1:75a402a217, Dec 11 2018, 23:05:39) [MSC v.1916 64 bit (AMD64)] on win32\nType \"help\", \"copyright\", \"credits\" or \"license()\" for more information.\n>>> \n RESTART: C:/Users/dhruv/Desktop/dhruv.github.io/Python/6.SLICING UP STRINGS/6.SLICING UP STRINGS.py \n>>> myname = ' <NAME> '\n>>> myname[0]\n' '\n>>> myname[5]\n'v'\n>>> myname[2]\n'h'\n>>> myname[8]\n'a'\n>>> myname[6]\n' '\n>>> myname[10]\n'e'\n>>> myname[7]\n'p'\n>>> myname[2:9]\n'<NAME>'\n>>> myname[2:5]\n'hru'\n>>> myname[2:12]\n'<NAME>'\n>>> myname[-2:-9]\n''\n>>> myname[-1:-5]\n''\n>>> myname[-7:-2]\n' pate'\n>>> myname[2:]\n'<NAME> '\n>>> myname[:9]\n' <NAME>'\n>>> myname[-2:]\n'l '\n>>> myname[:-9]\n' Dhr'\n>>> \n>>> \n>>> myname[:]\n' <NAME> '\n>>> \n", "id": "8907139", "language": "Python", "matching_score": 2.4519553184509277, "max_stars_count": 1, "path": "Python/6.SLICING UP STRINGS/SHELL.py" }, { "content": "Python 3.7.2rc1 (tags/v3.7.2rc1:75a402a217, Dec 11 2018, 23:05:39) [MSC v.1916 64 bit (AMD64)] on win32\nType \"help\", \"copyright\", \"credits\" or \"license()\" for more information.\n>>> \n RESTART: C:\\Users\\dhruv\\Desktop\\dhruv.github.io\\Python\\3. VARIABLES\\variables.py \n>>> tuna = 5\n>>> 20 + tuna\n25\n>>> bacum = 20\n>>> tuna =3\n>>> bacum/tuna\n6.666666666666667\n>>> \n", "id": "7174928", "language": "Python", "matching_score": 4.156571388244629, "max_stars_count": 1, "path": "Python/3. VARIABLES/shell.py" }, { "content": "Python 3.7.2rc1 (tags/v3.7.2rc1:75a402a217, Dec 11 2018, 23:05:39) [MSC v.1916 64 bit (AMD64)] on win32\nType \"help\", \"copyright\", \"credits\" or \"license()\" for more information.\n>>> \n RESTART: C:\\Users\\dhruv\\Desktop\\dhruv.github.io\\Python\\2. NUMBERS\\Numbers.py \n>>> 3+3\n6\n>>> 3-3\n0\n>>> 3/3\n1.0\n>>> 3*3\n9\n>>> 5*5*5\n125\n>>> 5 ** 3\n125\n>>> 5/3\n1.6666666666666667\n>>> 5//3\n1\n>>> 5%3\n2\n>>> \n", "id": "10567166", "language": "Python", "matching_score": 3.669743776321411, "max_stars_count": 1, "path": "Python/2. NUMBERS/shell.py" }, { "content": "Python 3.7.2rc1 (tags/v3.7.2rc1:75a402a217, Dec 11 2018, 23:05:39) [MSC v.1916 64 bit (AMD64)] on win32\nType \"help\", \"copyright\", \"credits\" or \"license()\" for more information.\n>>> numbers = [20,54,869,75,15,31]\n>>> numbers\n[20, 54, 869, 75, 15, 31]\n>>> numbers[2]\n869\n>>> numbers[2]=25\n>>> numbers\n[20, 54, 25, 75, 15, 31]\n>>> numbers + [90,55,25]\n[20, 54, 25, 75, 15, 31, 90, 55, 25]\n>>> numbers\n[20, 54, 25, 75, 15, 31]\n>>> numbers.append(120)\n>>> numbers\n[20, 54, 25, 75, 15, 31, 120]\n>>> numbers[:2]\n[20, 54]\n>>> numbers[:2]=[0,0]\n>>> numbers\n[0, 0, 25, 75, 15, 31, 120]\n>>> numbers[:2]=[]\n>>> numbers\n[25, 75, 15, 31, 120]\n>>> numbers[:]=[]\n>>> numbers\n[]\n>>> \n", "id": "3453779", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "Python/7.LISTS/shell.py" }, { "content": "###strings\n\n# 3 ways\n\nx ='xyzeervqrv'\ny =\"xyevnvz\"\nz =\"\"\"xyzeveqrveq\"\"\"\n\nprint(len(x)*100)\nprint(len(y))\nprint(len(z))\nprint('max length of above strings')\nprint(max(len(x),len(y),len(z)))\n\n\nprint(len(x*100))\n\nd=10\n\nprint(x + str(d))\n", "id": "4427749", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "Python/10.STRINGS/strings.py" } ]
1.326908
shiranD
[ { "content": "\"\"\"Tests of the BitWeight Python wrapper.\"\"\"\n\nfrom __future__ import division\n\nimport unittest\nimport math\n\nfrom ebitweight import BitWeight, BitWeightRangeError\n\n\nclass TestBitWeight(unittest.TestCase):\n\n def setUp(self):\n self.one = BitWeight(1)\n \n self.one_num = BitWeight(50)\n self.sec_num = BitWeight(30)\n\n def testAddition(self):\n # logadd\n add = (self.one_num + self.sec_num).real()\n self.assertEqual((self.sec_num + self.one_num).loge(), -math.log(add))\n\n def testMultiplication(self):\n # addition in log domain\n mult = self.one_num * self.sec_num\n self.assertEqual(mult.loge(), 80)\n self.assertAlmostEqual(mult.real(), math.pow(math.e,-mult.loge()))\n \n def testDivision(self):\n # subtraction in log domain\n divide = self.one_num / self.sec_num\n self.assertEqual(divide.loge(), 20)\n self.assertAlmostEqual(divide.real(), math.pow(math.e,-divide.loge()))\n\n def testComparison(self):\n self.assertEqual(self.one.real(), 0.36787944117144233)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "7372874", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "ebitweight_test.py" }, { "content": "from setuptools import setup, Extension\nfrom Cython.Build import cythonize\n\nsetup(name=\"eBitWeight\",\n version=\"0.1\",\n description=\"Underflow-proof floating-poing math for NLP\",\n author=\"<NAME>, <NAME>, and <NAME>\",\n author_email=\"<EMAIL>\",\n install_requires=[\"Cython >= 0.22\"],\n ext_modules=cythonize([Extension(\"ebitweight\", [\"ebitweight.pyx\"],\n language=\"c++\",\n extra_compile_args=[\"-std=c++11\"])]),\n test_suite=\"ebitweight_test\",\n)\n", "id": "5293948", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "setup.py" } ]
0
t04glovern
[ { "content": "#!/usr/bin/env python3\nimport os\nimport sys\nimport json\nimport base64\nimport subprocess\nimport urllib.request\n\nfrom string import Template\nfrom typing import List\n\nTEMPLATE_MASTER = \"templates/cloud-init-config-template.master.yml\"\nOUTPUT_MASTER = \"output/cloud-init-config.master.yaml\"\n\nTEMPLATE_WORKER = \"templates/cloud-init-config-template.worker.yml\"\nOUTPUT_WORKER = \"output/cloud-init-config.worker.yaml\"\n\nMASTER_FILES = [\n \"provision-k8s/root/k8s-cluster/1.docker.sh\",\n \"provision-k8s/root/k8s-cluster/2.install-k8s.sh\",\n \"provision-k8s/root/k8s-cluster/3.create-k8s-master.sh\",\n \"provision-k8s/user/k8s-config/1.configure-user.sh\",\n \"provision-k8s/user/k8s-config/2.add-flannel-pod-network.sh\",\n \"provision-k8s/user/k8s-config/2.add-weavenet-pod-network.sh\",\n \"provision-k8s/user/k8s-addons/add-dashboard.sh\",\n \"provision-k8s/user/k8s-utils/get-dashboard-token.sh\",\n]\n\nWORKER_FILES = [\n \"provision-k8s/root/k8s-cluster/1.docker.sh\",\n \"provision-k8s/root/k8s-cluster/2.install-k8s.sh\",\n]\n\n\nwith urllib.request.urlopen(\"https://ipapi.co/timezone\") as response:\n TIMEZONE = response.read().decode()\n\nwith open(\"./conf.json\", \"r\") as f:\n config = json.load(f)\n\n\ndef filter_pod_network(value: str, pod_network: str) -> bool:\n if \"pod-network\" in value:\n return pod_network in value\n return True\n\n\ndef generate_base64(file_path: str, username: str) -> str:\n with open(file_path, \"rb\") as f:\n encoded_content = base64.b64encode(f.read())\n node_file_path = file_path.replace(\"provision-k8s\", \"\").replace(\n \"user\", f\"home/{username}\"\n )\n return f\"\"\"\n - encoding: b64\n content: {encoded_content.decode()}\n path: {node_file_path}\n permissions: '0755'\"\"\"\n\n\ndef get_wpa_passphrase(wifi_name: str, wifi_password: str) -> str:\n cmd = f\"wpa_passphrase {wifi_name} {wifi_password}\"\n process = subprocess.Popen(\n cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n stdout, stderr = process.communicate()\n if stderr:\n sys.exit()\n out = stdout.decode().split()[3]\n return out\n\n\ndef create_config(input_file: str, output_file: str, cluster_scripts: List[str]):\n\n ssh_location = config[\"ssh_public_key\"]\n\n with open(os.path.expanduser(ssh_location), \"r\") as f:\n ssh_public = f.read()\n username = config[\"username\"]\n pod_network = config[\"pod_network\"]\n cluster_scripts = list(filter(\n lambda value: filter_pod_network(value, pod_network), cluster_scripts\n ))\n\n write_files = \"\".join(\n [generate_base64(filepath, username) for filepath in cluster_scripts]\n )\n wifi_passphrase = get_wpa_passphrase(\n config[\"wifi_ssid_name\"], config[\"wifi_password\"]\n )\n variables = {\n \"USERNAME\": username,\n \"WIFI_SSID_NAME\": config[\"wifi_ssid_name\"],\n \"WIFI_PASSWORD\": wifi_passphrase,\n \"WIFI_COUNTRY\": config[\"wifi_country\"],\n \"SSH_PUBLIC_KEY\": ssh_public,\n \"WRITE_FILES\": write_files,\n \"TIMEZONE\": TIMEZONE,\n }\n with open(input_file, \"r\") as f:\n src = Template(f.read())\n file_path = os.path.dirname(output_file)\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n with open(output_file, \"w\") as g:\n g.write(src.safe_substitute(variables))\n\n\ncreate_config(TEMPLATE_MASTER, OUTPUT_MASTER, MASTER_FILES)\ncreate_config(TEMPLATE_WORKER, OUTPUT_WORKER, WORKER_FILES)\n", "id": "11569400", "language": "Python", "matching_score": 3.813021421432495, "max_stars_count": 16, "path": "scripts/render.py" }, { "content": "#!/usr/bin/env python3\nimport json\nfrom pathlib import Path\nfrom getpass import getpass\nfrom typing import Dict\nfrom distutils.util import strtobool\n\n\ndef load_conf() -> Dict:\n conf_path = Path(\"conf.json\")\n if not conf_path.is_file():\n conf_path = Path(\"conf.example.json\")\n with conf_path.open() as f:\n conf = json.load(f)\n return conf\n\n\nconf = load_conf()\nusername: str = (\n input(f\"Username of the host machines [{conf['username']}]:\\n\") or conf[\"username\"]\n)\nwifi_ssid_name: str = input(\n f\"\\nWiFi network name [{conf['wifi_ssid_name']}]:\\n\"\n) or conf[\"wifi_ssid_name\"]\n\nwifi_password: str = getpass(\n f\"\\nWiFi password [{'*' * len(conf['wifi_password'])}]:\\n\"\n) or conf[\"wifi_password\"]\n\nprint(f\"\\nCountry code of your wifi [{conf['wifi_country']}]\")\nprint(\n \"More info: https://github.com/recalbox/recalbox-os/wiki/Wifi-country-code-%28EN%29\"\n)\nwifi_country: str = input() or conf[\"wifi_country\"]\n\nprint(f\"\\nSSH Public key path [{conf['ssh_public_key']}]\")\nprint(\"Used to access to the nodes through the local network\")\nprint(\n \"Create a new one: \"\n \"https://help.github.com/en/articles/generating-a-new-ssh-key-\"\n \"and-adding-it-to-the-ssh-agent\"\n)\nssh_public_key: str = input() or conf[\"ssh_public_key\"]\n\nprint(f\"\\nHostname prefix for the nodes [{conf['hostname_prefix']}]\")\nprint(\n f\"Your hostnames will look like: \"\n f\"{conf['hostname_prefix']}-1 {conf['hostname_prefix']}-2\"\n)\nhostname_prefix: str = input() or conf[\"hostname_prefix\"]\n\nprint(f\"\\nNumber of machines [{conf['number_of_nodes']}]\")\nprint(\"Or in other words, the number of raspberries you have\")\nnumber_of_nodes: int = int(input() or conf[\"number_of_nodes\"])\n\nprint(f\"\\nInclude a k8s master ready node? [Y]/n\")\ninclude_master: bool = bool(strtobool(input() or \"y\"))\n\nprint(f\"\\nNode range beginning [{conf['node_range_start']}]\")\nprint(\n \"Offset applied to the number of nodes, let's say you have 4 nodes\\n\"\n \"and you already have 3 machines, if you set the offset to 3\\n\"\n \"the newely created nodes will have this range: [4..7]\"\n)\nnode_range_start: int = int(input() or conf[\"node_range_start\"])\n\nprint(f\"Pod network [{conf['pod_network']}]\")\nprint(\"Used as the underlying layer for the pods to communicate across machines\")\nprint(\"Choices: flannel, weavenet\")\npod_network: str = input() or conf[\"pod_network\"]\n\nconf.update(\n {\n \"username\": username,\n \"wifi_ssid_name\": wifi_ssid_name,\n \"wifi_password\": <PASSWORD>,\n \"wifi_country\": wifi_country,\n \"ssh_public_key\": ssh_public_key,\n \"hostname_prefix\": hostname_prefix,\n \"number_of_nodes\": number_of_nodes,\n \"include_master\": include_master,\n \"node_range_start\": node_range_start,\n \"pod_network\": pod_network,\n }\n)\n\nwith Path(\"conf.json\").open(\"w\") as f:\n json.dump(conf, f, indent=2)\n\nprint(\"New configuration created!\")\nprint(\"This file is not being tracked by git.\")\nprint(\"Location './conf.json'\")\n", "id": "5370649", "language": "Python", "matching_score": 0.6199946999549866, "max_stars_count": 16, "path": "scripts/assistant.py" }, { "content": "from mcpi.minecraft import Minecraft\nimport time\nimport math\nmc = Minecraft.create()\n\ntry:\n timer = int(input(\"How long shall we sprint for?: \"))\nexcept:\n mc.postToChat(\"Please enter a valid int for the timer\")\n\nmc.postToChat(\"Ready\")\ntime.sleep(1)\nmc.postToChat(\"Set\")\ntime.sleep(1)\nmc.postToChat(\"Go!\")\n\npos_start = mc.player.getTilePos()\nx_start = pos_start.x\ny_start = pos_start.y\nz_start = pos_start.z\n\nfor t in range(0, timer, 1):\n time.sleep(1)\n mc.postToChat(str(timer - t) + \" seconds left; \" +\n \" X: \" + str(mc.player.getTilePos().x) +\n \" Y: \" + str(mc.player.getTilePos().y) +\n \" Z: \" + str(mc.player.getTilePos().z))\n\npos_end = mc.player.getTilePos()\nx_end = pos_end.x\ny_end = pos_end.y\nz_end = pos_end.z\n\nx_travel = x_end - x_start\ny_travel = y_end - y_start\nz_travel = z_end - z_start\n\nmc.postToChat(\"You travelled \" + str(x_travel) + \" from X\")\nmc.postToChat(\"You travelled \" + str(y_travel) + \" from Y\")\nmc.postToChat(\"You travelled \" + str(z_travel) + \" from Z\")\n\n# Pythagorean calculation\ndistance = math.sqrt(math.pow((x_end - x_start), 2) +\n math.pow((y_end - y_start), 2) +\n math.pow((z_end - z_start), 2))\n\nmc.postToChat(\"You travelled a calculated distance of: \" + str(distance))", "id": "5794087", "language": "Python", "matching_score": 3.332900047302246, "max_stars_count": 0, "path": "chapter04-strings/sprintPythagorean.py" }, { "content": "from mcpi.minecraft import Minecraft\nimport time\nmc = Minecraft.create()\n\npos_start = mc.player.getTilePos()\nx_start = pos_start.x\ny_start = pos_start.y\nz_start = pos_start.z\n\nmc.postToChat(\"Ready\")\ntime.sleep(1)\nmc.postToChat(\"Set\")\ntime.sleep(1)\nmc.postToChat(\"Go!\")\n\nfor t in range(0, 15, 1):\n time.sleep(1)\n mc.postToChat(str(15 - t) + \" seconds left; \" +\n \" X: \" + str(mc.player.getTilePos().x) +\n \" Y: \" + str(mc.player.getTilePos().y) +\n \" Z: \" + str(mc.player.getTilePos().z))\n\npos_end = mc.player.getTilePos()\nx_end = pos_end.x\ny_end = pos_end.y\nz_end = pos_end.z\n\nx_travel = x_end - x_start\ny_travel = y_end - y_start\nz_travel = z_end - z_start\n\nmc.postToChat(\"You travelled \" + str(x_travel) + \" from X\")\nmc.postToChat(\"You travelled \" + str(y_travel) + \" from Y\")\nmc.postToChat(\"You travelled \" + str(z_travel) + \" from Z\")", "id": "6031313", "language": "Python", "matching_score": 1.3408238887786865, "max_stars_count": 0, "path": "chapter04-strings/sprint.py" }, { "content": "from mcpi.minecraft import Minecraft\nimport time\n\n# connect to Minecraft\nmc = Minecraft.create()\n\n# y, and z variables to represent coordinates\nx = mc.player.getPos().x\ny = mc.player.getPos().y\nz = mc.player.getPos().z\n\nfor x in range(int(x), int(x + 10), 1):\n # change the player's position\n mc.player.setTilePos(x, y, z)\n\n # wait 2 seconds\n time.sleep(0.1)\n", "id": "3385732", "language": "Python", "matching_score": 3.5537219047546387, "max_stars_count": 0, "path": "chapter02-variables/tour-bonus.py" }, { "content": "from mcpi.minecraft import Minecraft\nimport time\n\n# connect to Minecraft\nmc = Minecraft.create()\n\n# set x, y, and z variables to represent coordinates\nx = 54\ny = 72\nz = 0\n\n# change the player's position\nmc.player.setTilePos(x, y, z)\n\n# wait 2 seconds\ntime.sleep(2)\n\n# set x, y, and z variables to represent coordinates\nx = 135\ny = 74\nz = -15\n\n# change the player's position\nmc.player.setTilePos(x, y, z)", "id": "5670547", "language": "Python", "matching_score": 2.468430280685425, "max_stars_count": 0, "path": "chapter02-variables/tour.py" }, { "content": "from mcpi.minecraft import Minecraft\n\n# connect to Minecraft\nmc = Minecraft.create()\n\n# set x, y, and z variables to represent coordinates\nx = 53.594\ny = 72\nz = -0.872\n\n# change the player's position\nmc.player.setPos(x, y, z)", "id": "10236246", "language": "Python", "matching_score": 2.766155481338501, "max_stars_count": 0, "path": "chapter02-variables/teleport-floats.py" }, { "content": "from mcpi.minecraft import Minecraft\n\n# connect to Minecraft\nmc = Minecraft.create()\n\n# set x, y, and z variables to represent coordinates\nx = 55\ny = 72\nz = 0\n\n# change the player's position\nmc.player.setTilePos(x, y, z)", "id": "3138455", "language": "Python", "matching_score": 0.26908501982688904, "max_stars_count": 0, "path": "chapter02-variables/teleport.py" }, { "content": "from mcpi.minecraft import Minecraft\nimport time\nmc = Minecraft.create()\n\nmessage1 = input(\"What you would like to see?\")\ntime.sleep(1)\nmessage2 = input(\"What else?\")\nmc.postToChat(message1 + \" and \" + message2)", "id": "12140496", "language": "Python", "matching_score": 3.0299501419067383, "max_stars_count": 0, "path": "chapter04-strings/messageWithWait.py" }, { "content": "from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\nmessage = input(\"What you would like to see?\")\nmc.postToChat(message)", "id": "491222", "language": "Python", "matching_score": 0.999409019947052, "max_stars_count": 0, "path": "chapter04-strings/messageInput.py" }, { "content": "from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\nusername = input(\"Enter your username: \")\nmessage = input(\"Enter your message: \")\nmc.postToChat(username + \": \" + message)", "id": "12310355", "language": "Python", "matching_score": 1.876591682434082, "max_stars_count": 0, "path": "chapter04-strings/userChat.py" }, { "content": "from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\nmc.postToChat(\"Hello, Minecraft World\")", "id": "12248467", "language": "Python", "matching_score": 1.2133864164352417, "max_stars_count": 0, "path": "chapter04-strings/message.py" }, { "content": "from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\nans = input(\"Do you want blocks to be immutable? Y/N \")\n\nif ans == \"Y\":\n mc.setting(\"world_immutable\", True)\n mc.postToChat(\"World is immutable\")\nelif ans == \"N\":\n mc.setting(\"world_immutable\", False)\n mc.postToChat(\"World is mutable\")\n", "id": "1013146", "language": "Python", "matching_score": 1.9842878580093384, "max_stars_count": 0, "path": "chapter06-ifstatements/immutableChoice.py" }, { "content": "from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\nmc.setting(\"world_immutable\", False)", "id": "2542079", "language": "Python", "matching_score": 0.06640560179948807, "max_stars_count": 0, "path": "chapter05-booleans/immutableOff.py" }, { "content": "from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\n# get player position\npos = mc.player.getTilePos()\nx = pos.x\ny = pos.y\nz = pos.z\n\nb_width = 10\nb_height = 5\nb_length = 6\n\nblock_type = 4 # cobblestone\nblock_fill = 0 # air\n\n# create base cube\nmc.setBlocks(x, # start pos x\n y, # start pos z\n z, # start pos z\n x + b_width, # end pos x\n y + b_length, # end pos y\n z + b_length, # end pos z\n block_type)\n\n# punch out center of cube\nmc.setBlocks(x + 1, # start pos x\n y + 1, # start pos z\n z + 1, # start pos z\n x + b_width - 1, # end pos x\n y + b_length - 1, # end pos y\n z + b_length - 1, # end pos z\n block_fill)", "id": "12687452", "language": "Python", "matching_score": 3.4368579387664795, "max_stars_count": 0, "path": "chapter03-math/building.py" }, { "content": "from mcpi.minecraft import Minecraft\nimport time\n\nmc = Minecraft.create()\n\n# get player position\npos = mc.player.getTilePos()\nx = pos.x + 1\ny = pos.y\nz = pos.z\n\nb_width = 10\nb_height = 5\nb_length = 6\nb_thickness = 1\n\nblock_type = 20 # glass\nblock_fill = 9 # water\n\n# create base cube\nmc.setBlocks(x, # start pos x\n y, # start pos y\n z, # start pos z\n x + b_width, # end pos x\n y + b_length, # end pos y\n z + b_length, # end pos z\n block_type)\n\n# punch out center of cube\nmc.setBlocks(x + b_thickness, # start pos x\n y + b_thickness, # start pos y\n z + b_thickness, # start pos z\n x + b_width - b_thickness, # end pos x\n y + b_length, # end pos y\n z + b_length - b_thickness, # end pos z\n block_fill)\n", "id": "6955740", "language": "Python", "matching_score": 1.5887930393218994, "max_stars_count": 0, "path": "chapter03-math/swimmingPool.py" }, { "content": "from mcpi.minecraft import Minecraft\n\nmc = Minecraft.create()\n\n# get player position\npos = mc.player.getTilePos()\nx = pos.x + 1\ny = pos.y\nz = pos.z\n\nb_height = 2\nblockType = 1 # stone\n\n# Spire sides: should be same as height\nsideHeight = b_height\nmc.setBlocks(x + 1, # start pos x\n y, # start pos y\n z + 1, # start pos z\n x + 3, # end pos x\n y + sideHeight - 1, # end pos y\n z + 3, # end pos z\n blockType)\n\n# Spire point: should be two times the height\npointHeight = b_height * 2\nmc.setBlocks(x + 2, # start pos x\n y, # start pos y\n z + 2, # start pos z\n x + 2, # end pos x\n y + pointHeight - 1, # end pos y\n z + 2, # end pos z\n blockType)\n\n# Spire base: should be half the height\nbaseHeight = b_height / 2\nmc.setBlocks(x, # start pos x\n y, # start pos y\n z, # start pos z\n x + 4, # end pos x\n y + baseHeight - 1, # end pos y\n z + 4, # end pos z\n blockType)\n\n", "id": "7755097", "language": "Python", "matching_score": 2.8031177520751953, "max_stars_count": 0, "path": "chapter03-math/spire.py" }, { "content": "from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\npos = mc.player.getTilePos()\nx = pos.x\ny = pos.y\nz = pos.z\n\nhighestBlockY = mc.getHeight(x, y)\nmc.postToChat(highestBlockY)", "id": "3121175", "language": "Python", "matching_score": 3.4593758583068848, "max_stars_count": 0, "path": "chapter05-booleans/aboveGround.py" }, { "content": "from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\nx = mc.player.getTilePos().x\ny = mc.player.getTilePos().y\nz = mc.player.getTilePos().z\nmc.setBlock(x, y - 1, z, 20)\n\nwhile 1:\n gift = mc.getBlock(x, y - 1, z)\n\n if gift == 57:\n mc.postToChat(\"Thanks for the Diamond!\")\n elif gift == 6:\n mc.postToChat(\"I don't want your damn sapling!\")\n else:\n mc.postToChat(\"Bring a gift to \" + str(x) + \", \" + str(y) + \", \" + str(z))", "id": "9440600", "language": "Python", "matching_score": 3.246971368789673, "max_stars_count": 0, "path": "chapter06-ifstatements/gift.py" }, { "content": "from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\nanswer = input(\"Create a crater? Y/N\")\n\nif answer == \"Y\":\n pos = mc.player.getPos()\n x = pos.x\n y = pos.y\n z = pos.z\n\n mc.setBlocks(x + 1, y + 1, z + 1, x - 1, y - 1, z - 1, 0)\n mc.postToChat(\"Boom!\")", "id": "11142633", "language": "Python", "matching_score": 2.196678638458252, "max_stars_count": 0, "path": "chapter06-ifstatements/crater.py" }, { "content": "from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\n\nwhile 1:\n pos = mc.player.getPos()\n x = pos.x\n y = pos.y\n z = pos.z\n\n blockType = mc.getBlock(x, y, z)\n blockType2 = mc.getBlock(x, y + 1, z)\n\n if blockType == 9 and blockType2 == 9:\n mc.postToChat(\"Underwater\")\n else:\n mc.postToChat(\"Not Underwater\")", "id": "636902", "language": "Python", "matching_score": 3.6822009086608887, "max_stars_count": 0, "path": "chapter05-booleans/isSwimming.py" }, { "content": "from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\nwhile 1:\n pos = mc.player.getPos()\n x = pos.x\n y = pos.y\n z = pos.z\n\n blockType = mc.getBlock(x, y - 1, z)\n mc.postToChat(blockType == 0)", "id": "9758524", "language": "Python", "matching_score": 3.6822009086608887, "max_stars_count": 0, "path": "chapter05-booleans/isFlying.py" }, { "content": "from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\npos = mc.player.getPos()\nx = pos.x\ny = pos.y\nz = pos.z\n\nblockType = mc.getBlock(x, y, z)\nmc.postToChat(blockType == 0)", "id": "6917942", "language": "Python", "matching_score": 2.245189666748047, "max_stars_count": 0, "path": "chapter05-booleans/swimming.py" }, { "content": "from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\ntry:\n blockType = int(input(\"Enter block ID: \"))\nexcept:\n mc.postToChat(\"Invalid input, please enter a number\")\n\npos = mc.player.getTilePos()\nx = pos.x\ny = pos.y\nz = pos.z\n\nmc.setBlock(x + 1, # offset position of block\n y,\n z,\n blockType)", "id": "9436275", "language": "Python", "matching_score": 3.0406951904296875, "max_stars_count": 0, "path": "chapter04-strings/blockInputFixed.py" }, { "content": "from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\nx_cord = int(input(\"Enter X: \"))\ny_cord = int(input(\"Enter Y: \"))\nz_cord = int(input(\"Enter Z: \"))\nblockType = int(input(\"Enter Block ID: \"))\n\nmc.player.setTilePos(x_cord + 1, y_cord, z_cord)\nmc.setBlock(x_cord, y_cord, z_cord, blockType)", "id": "263149", "language": "Python", "matching_score": 2.5581345558166504, "max_stars_count": 0, "path": "chapter04-strings/teleportInput.py" }, { "content": "from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\nposition = mc.player.getTilePos()\nx = position.x\ny = position.y\nz = position.z\n\n# lava block...\nblockType = 10\n\nmc.setBlock(x, (y - 1), z, blockType)\n\n", "id": "12303061", "language": "Python", "matching_score": 3.491133213043213, "max_stars_count": 0, "path": "chapter03-math/blockBelow.py" }, { "content": "from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\nposition = mc.player.getTilePos()\nx = position.x\ny = position.y\nz = position.z\n\nblockType = 103\n\nfor _y in range(y, y + 20, 1):\n mc.setBlock((x + 2), _y, z, blockType)", "id": "12540281", "language": "Python", "matching_score": 0.3337668478488922, "max_stars_count": 0, "path": "chapter03-math/blockStack.py" }, { "content": "from mcpi.minecraft import Minecraft\nimport math\nmc = Minecraft.create()\n\nhomeX = 10\nhomeZ = 10\n\nwhile 1:\n pos = mc.player.getTilePos()\n x = pos.x\n z = pos.z\n\n distance = math.sqrt((homeX - x) ** 2 + (homeZ - z) ** 2)\n mc.postToChat(distance)", "id": "987300", "language": "Python", "matching_score": 4.047215938568115, "max_stars_count": 0, "path": "chapter05-booleans/farFromHome.py" }, { "content": "from mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\nmc.postToChat(mc.player.getTilePos())", "id": "2547477", "language": "Python", "matching_score": 2.252768039703369, "max_stars_count": 0, "path": "chapter04-strings/messageLocation.py" }, { "content": "from mcpi.minecraft import Minecraft\nimport time\n\nmc = Minecraft.create()\n\ny_change = False\n\nwhile True:\n position = mc.player.getTilePos()\n x = position.x\n y = position.y\n z = position.z\n\n if mc.player.getTilePos().y > y:\n y_change = True\n\n if y_change:\n for _y in range(mc.player.getTilePos().y, (mc.player.getTilePos().y + 10), 1):\n mc.player.setTilePos(x, _y, z)\n time.sleep(0.015)\n y_change = False\n", "id": "11306992", "language": "Python", "matching_score": 1.142778754234314, "max_stars_count": 0, "path": "chapter03-math/superJump.py" }, { "content": "# Import the Minecraft python library\nfrom mcpi.minecraft import Minecraft\n\n# create the minecraft connection instance\nmc = Minecraft.create()\n\n# this opens an connection when run and will provide the following\n# feedback in the server status window\n\"\"\"\n[14:06:27 INFO]: [RaspberryJuice] Opened connection to/127.0.0.1:57543.\n[14:06:27 INFO]: [RaspberryJuice] Starting output thread!\n[14:06:27 INFO]: [RaspberryJuice] Closed connection to/127.0.0.1:57543.\n\"\"\"\n\n# sets players x, y and z values in game (teleport)\nmc.player.setTilePos(0, 120, 0)", "id": "5440992", "language": "Python", "matching_score": 1.3279122114181519, "max_stars_count": 0, "path": "chapter01-intro/connectToMinecraft.py" }, { "content": "from mcpi.minecraft import Minecraft\nimport random\nimport time\n\nmc = Minecraft.create()\n\npos = mc.player.getPos()\nx = pos.x\ny = pos.y\nz = pos.z\n\nfor a in range(1, 10, 1):\n a += a\n _x = x + random.randint(-10, 10)\n _y = y + random.randint(-10, 10)\n _z = z + random.randint(-10, 10)\n\n mc.player.setPos(_x, _y, _z)\n mc.postToChat(\" x: \" + str(int(_x)) +\n \" y: \" + str(int(_y)) +\n \" z: \" + str(int(_z)))\n\n time.sleep(1)\n\n# set back to starting point\nmc.player.setPos(x, y, z)", "id": "2186592", "language": "Python", "matching_score": 1.4142135381698608, "max_stars_count": 0, "path": "chapter03-math/randomJump.py" }, { "content": "# pythonLovesMinecraft.py\n\nprint(2 + 2)\nprint(\"W\" + \"o\" * 20)\nprint(\"PYTHON!\")\nprint(\"<3s\")\nprint(\"Minecraft!\")", "id": "3387419", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "chapter01-intro/pythonLovesMinecraft.py" }, { "content": "from setuptools import setup\n\n__version__ = '0.1'\n\nsetup(\n name='overStat',\n version=__version__,\n\n packages=['overStat'],\n\n description='Overwatch API Wrapper using SunDwarf/OWAPI',\n url='https://github.com/t04glovern/overStat',\n author='<NAME> <t04glovern>',\n license='MIT',\n install_requires=['requests']\n )", "id": "11863183", "language": "Python", "matching_score": 1.998740553855896, "max_stars_count": 0, "path": "setup.py" }, { "content": "from .overStat import *\n", "id": "783571", "language": "Python", "matching_score": 0.026616472750902176, "max_stars_count": 0, "path": "overStat/__init__.py" }, { "content": "import cv2 as cv\nimport numpy as np\nimport os\nfrom time import time, sleep\nfrom windowcapture import WindowCapture\nfrom utils import ResizeWithAspectRatio\n\nimport requests\n\n# Change the working directory to the folder this script is in.\n# Doing this because I'll be putting the files from each video in their own folder on GitHub\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n\n# initialize the WindowCapture class\nwincap = WindowCapture('Dota 2')\n\nskill_off_cooldown_img = cv.imread('assets/skill-off-cooldown-1080p.png', cv.IMREAD_UNCHANGED)\nskill_on_cooldown_img = cv.imread('assets/skill-on-cooldown-1080p.png', cv.IMREAD_UNCHANGED)\n\ndef charge():\n response = requests.request(\"POST\", \"http://192.168.188.77:5678/turbo\")\n print(response.text)\n sleep(5)\n response = requests.request(\"POST\", \"http://192.168.188.77:5678/normal\")\n print(response.text)\n\ndef find_match(image, match):\n result = cv.matchTemplate(image, match, cv.TM_SQDIFF_NORMED)\n\n # I've inverted the threshold and where comparison to work with TM_SQDIFF_NORMED\n threshold = 0.25\n # The np.where() return value will look like this:\n # (array([482, 483, 483, 483, 484], dtype=int32), array([514, 513, 514, 515, 514], dtype=int32))\n locations = np.where(result <= threshold)\n # We can zip those up into a list of (x, y) position tuples\n locations = list(zip(*locations[::-1]))\n print(locations)\n\n if locations:\n print('Found object.')\n charge()\n\n object_w = match.shape[1]\n object_h = match.shape[0]\n line_color = (0, 255, 0)\n line_type = cv.LINE_4\n\n # Loop over all the locations and draw their rectangle\n for loc in locations:\n # Determine the box positions\n top_left = loc\n bottom_right = (top_left[0] + object_w, top_left[1] + object_h)\n # Draw the box\n cv.rectangle(image, top_left, bottom_right, line_color, line_type)\n return image\n\n\nloop_time = time()\nwhile(True):\n\n # get an updated image of the game\n screenshot = wincap.get_screenshot()\n\n match_result = find_match(screenshot, skill_on_cooldown_img)\n\n screenshot_resized = ResizeWithAspectRatio(match_result, width=1280)\n\n cv.imshow('Computer Vision', screenshot_resized)\n\n # debug the loop rate\n print('FPS {}'.format(1 / (time() - loop_time)))\n loop_time = time()\n\n # press 'q' with the output window focused to exit.\n # waits 1 ms every loop to process key presses\n if cv.waitKey(1) == ord('q'):\n cv.destroyAllWindows()\n break\n\nprint('Done.')\n", "id": "10529215", "language": "Python", "matching_score": 7.422122478485107, "max_stars_count": 0, "path": "dota/main.py" }, { "content": "import cv2 as cv\nimport numpy as np\nimport os\n\nfrom utils import ResizeWithAspectRatio\n\n# Change the working directory to the folder this script is in.\n# Doing this because I'll be putting the files from each video in their own folder on GitHub\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n# Can use IMREAD flags to do different pre-processing of image files,\n# like making them grayscale or reducing the size.\n# https://docs.opencv.org/4.2.0/d4/da8/group__imgcodecs.html\nsample_img = cv.imread('assets/sample.png', cv.IMREAD_UNCHANGED)\n\n# skill_casting_img = cv.imread('assets/skill-casting.png', cv.IMREAD_UNCHANGED)\nskill_off_cooldown_img = cv.imread('assets/skill-off-cooldown.png', cv.IMREAD_UNCHANGED)\n# skill_on_cooldown_img = cv.imread('assets/skill-on-cooldown.png', cv.IMREAD_UNCHANGED)\n\n# There are 6 comparison methods to choose from:\n# TM_CCOEFF, TM_CCOEFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_SQDIFF, TM_SQDIFF_NORMED\n# You can see the differences at a glance here:\n# https://docs.opencv.org/master/d4/dc6/tutorial_py_template_matching.html\n# Note that the values are inverted for TM_SQDIFF and TM_SQDIFF_NORMED\nresult = cv.matchTemplate(sample_img, skill_off_cooldown_img, cv.TM_SQDIFF_NORMED)\n\n# I've inverted the threshold and where comparison to work with TM_SQDIFF_NORMED\nthreshold = 0.17\n# The np.where() return value will look like this:\n# (array([482, 483, 483, 483, 484], dtype=int32), array([514, 513, 514, 515, 514], dtype=int32))\nlocations = np.where(result <= threshold)\n# We can zip those up into a list of (x, y) position tuples\nlocations = list(zip(*locations[::-1]))\nprint(locations)\n\nif locations:\n print('Found object.')\n\n object_w = skill_off_cooldown_img.shape[1]\n object_h = skill_off_cooldown_img.shape[0]\n line_color = (0, 255, 0)\n line_type = cv.LINE_4\n\n # Loop over all the locations and draw their rectangle\n for loc in locations:\n # Determine the box positions\n top_left = loc\n bottom_right = (top_left[0] + object_w, top_left[1] + object_h)\n # Draw the box\n cv.rectangle(sample_img, top_left, bottom_right, line_color, line_type)\n\n resized = ResizeWithAspectRatio(sample_img, width=1280)\n\n cv.imshow('Matches', resized)\n cv.waitKey()\n #cv.imwrite('result.jpg', resized)\n\n\nelse:\n print('object not found.')\n", "id": "11957576", "language": "Python", "matching_score": 1.3259358406066895, "max_stars_count": 0, "path": "dota/matcher.py" }, { "content": "#!/usr/bin/env python\nimport numpy as np\nimport cv2\n\ncap = cv2.VideoCapture(0)\n\n\ndef circleDetect(img, cnt, approx):\n x = approx.ravel()[0]\n y = approx.ravel()[1]\n cv2.putText(img, 'Circle', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0))\n cv2.drawContours(img, [cnt], 0, (0, 255, 255), -1)\n\n\ndef squareDetect(img, cnt, approx):\n x = approx.ravel()[0]\n y = approx.ravel()[1]\n cv2.putText(img, 'Square', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0))\n cv2.drawContours(img, [cnt], 0, (0, 0, 255), -1)\n\n\nwhile True:\n ret, frame = cap.read()\n\n # Turn image to grayscale\n gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n\n # Remove gaussian noise\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n kernel = np.ones((3, 3), np.uint8)\n mask = cv2.erode(blur, kernel)\n\n # Extract contours from frame\n contours, _ = cv2.findContours(\n mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\n # For each large contour detected\n for cnt in contours:\n\n # Get the area of the contour\n if cv2.contourArea(cnt) < 400:\n continue\n\n # Approximate the number of sizes to the shape\n approx = cv2.approxPolyDP(\n cnt, 0.01 * cv2.arcLength(cnt, True), True)\n\n # if 4 sided shape\n if len(approx) == 4:\n squareDetect(frame, cnt, approx)\n # if greater than 15, assume circle\n elif len(approx) > 15:\n circleDetect(frame, cnt, approx)\n\n cv2.imshow('frame', frame)\n\n cv2.waitKey(3)\n\ncap.release()\ncv2.destroyAllWindows()\n", "id": "8356738", "language": "Python", "matching_score": 0.3189018666744232, "max_stars_count": 0, "path": "shape_detect.py" }, { "content": "from os import environ\nfrom os.path import join, dirname\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nimport re\nimport boto3\nimport urllib.parse\n\n\nclass EmailService(object):\n EMAIL_REGEX = re.compile(\n r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\")\n\n def __init__(self):\n self.ses = boto3.client('ses')\n\n def is_email(self, candidate):\n is_email = False\n if self.EMAIL_REGEX.match(candidate):\n is_email = True\n return is_email\n\n def send_email(self, email_addr, image_url):\n email = self.build_email(email_addr, image_url)\n self.ses.send_raw_email(\n RawMessage={'Data': email.as_string()},\n Source=email['From'],\n Destinations=[email['To']]\n )\n\n def build_email(self, email_addr, image_url):\n email = MIMEMultipart()\n email['Subject'] = 'Your Anime Selfie is ready!'\n email['From'] = environ.get('SENDER_EMAIL')\n email['To'] = email_addr\n\n email.preamble = 'Multipart message.\\n'\n\n email_body = self.build_email_body(image_url)\n part = MIMEText(email_body, 'html')\n email.attach(part)\n\n return email\n\n @staticmethod\n def build_email_body(image_url):\n image_url_escaped = urllib.parse.quote(image_url)\n html_file = join(dirname(__file__),\n 'templates', 'template.html')\n html_file = open(html_file, 'r')\n email = html_file.read()\n email = email.replace('{{image_url}}', image_url)\n email = email.replace('{{image_url_escaped}}', image_url_escaped)\n return email", "id": "6613651", "language": "Python", "matching_score": 1.410658836364746, "max_stars_count": 28, "path": "email_service.py" }, { "content": "import boto3\nimport urllib.request\nimport json\n\n\ndef build_response(message):\n return {\n \"dialogAction\": {\n \"type\": \"Close\",\n \"fulfillmentState\": \"Fulfilled\",\n \"message\": {\n \"contentType\": \"PlainText\",\n \"content\": message\n }\n }\n }\n\n\ndef lambda_handler(event, context):\n if 'GetSensorData' == event['currentIntent']['name']:\n sensor_id = event['currentIntent']['slots']['sensor_id']\n\n url = \"https://dt.nathanglover.com/api/v1/data\"\n res = urllib.request.urlopen(url)\n res_body = res.read()\n json_data = json.loads(res_body.decode(\"utf-8\"))\n\n msg = \"\"\n for item in json_data['data']:\n if item['sensor_id'] == (\"NODE-\" + sensor_id):\n msg += \"*Node ID*: \" + item['sensor_id'] + \"\\n*Node MAC*: \" + item[\n 'sensor_mac'] + \"\\n*Latitude*: \" + str(item['location_lat']) + \"\\n*Longitude*: \" + str(\n item['location_lon']) + \"\\n*Last Updated*: \" + item['timestamp'] + \"\\n*GPS Datestamp*: \" + item[\n 'datestamp'] + \"\\n*Altitude*: \" + str(item['altitude']) + \"\\n*Velocity*: \" + str(item[\n 'velocity']) + \"\\n*GPS Error*: \" + str(\n item['GPSerror']) + \"\\n*IMU Error*: \" + str(item[\n 'IMUerror']) + \"\\n*Valid Orientation*: \" + str(\n item['rightdirection']) + \"\\n*Course*: \" + str(item[\n 'course']) + \"\\n*Satellites*: \" + str(\n item['nsats']) + \"\\n*SNR1*: \" + str(item['snr1']) + \"\\n*SNR2*: \" + \\\n str(item['snr2']) + \"\\n*SNR3*: \" + str(item['snr3']) + \"\\n*SNR4*: \" + str(item['snr4'])\n\n return build_response(msg)\n", "id": "12455555", "language": "Python", "matching_score": 1.965041160583496, "max_stars_count": 0, "path": "lambda/get_sensor_data.py" }, { "content": "import boto3\nimport os\nimport json\nimport uuid\nimport decimal\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom botocore.exceptions import ClientError\n\ndynamo_table = os.environ['DYNAMO_TABLE']\ndrone_id = 'drone_01'\n\n\n# Helper class to convert a DynamoDB item to JSON.\nclass DecimalEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n if o % 1 > 0:\n return float(o)\n else:\n return int(o)\n return super(DecimalEncoder, self).default(o)\n\n\ndef altitude(event, context):\n body = json.loads(event['body'])\n altitude_change = body['altitude_change']\n\n # Construct message\n state = {\n \"idempotency\": str(uuid.uuid1()),\n \"commands\": [\n {\n \"command\": \"absolute_altitude\",\n \"argument\": altitude_change\n }\n ]\n }\n\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(dynamo_table)\n response = table.put_item(\n Item={\n 'droneId': drone_id,\n 'state': state\n }\n )\n\n response = {\n \"headers\": {\n \"Access-Control-Allow-Origin\": \"*\",\n },\n \"statusCode\": 200\n }\n\n return response\n\n\ndef waypoint(event, context):\n body = json.loads(event['body'])\n waypoint = body['go_waypoint']\n\n # Construct message\n state = {\n \"idempotency\": str(uuid.uuid1()),\n \"commands\": [\n {\n \"command\": \"go_waypoint\",\n \"argument\": waypoint\n }\n ]\n }\n\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(dynamo_table)\n response = table.put_item(\n Item={\n 'droneId': drone_id,\n 'state': state\n }\n )\n\n response = {\n \"headers\": {\n \"Access-Control-Allow-Origin\": \"*\",\n },\n \"statusCode\": 200\n }\n\n return response\n\n\ndef poll(event, context):\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(dynamo_table)\n try:\n response = table.query(\n KeyConditionExpression=Key('droneId').eq(drone_id)\n )\n except (ClientError, KeyError) as e:\n print(e)\n\n else:\n item = response['Items'][0]['state']\n response = {\n \"headers\": {\n \"Access-Control-Allow-Origin\": \"*\",\n },\n \"statusCode\": 200,\n \"body\": json.dumps(item, cls=DecimalEncoder)\n }\n\n return response\n", "id": "10984302", "language": "Python", "matching_score": 0.16193531453609467, "max_stars_count": 0, "path": "backend/handler.py" }, { "content": "\"\"\"\nModule that contains the command line app.\n\nWhy does this file exist, and why not put this in __main__?\n\n You might be tempted to import things from __main__ later, but that will cause\n problems: the code will get executed twice:\n\n - When you run `python -m nathanglover` python will execute\n ``__main__.py`` as a script. That means there won't be any\n ``nathanglover.__main__`` in ``sys.modules``.\n - When you import __main__ it will get executed again (as a module) because\n there's no ``nathanglover.__main__`` in ``sys.modules``.\n\n Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration\n\"\"\"\nimport click\n\n\ndef echo_name():\n click.secho(\"<NAME>\")\n\n\ndef echo_website():\n click.secho(\"https://nathanglover.com\")\n\n\[email protected]()\ndef main():\n pass\n\n\[email protected]()\ndef website():\n echo_website()\n\n\[email protected]()\ndef name():\n echo_name()\n", "id": "11254247", "language": "Python", "matching_score": 0.9375752210617065, "max_stars_count": 0, "path": "src/nathanglover/cli.py" }, { "content": "from overStat import *\nimport pprint\n\nbattle_tag = 'GloverDude#1471'\n\now = OverStatAPI('key')\n\n\"\"\" --- Request Stats --- \"\"\"\nstats = ow.get_stats(battle_tag)\nprint(pprint.pformat(stats))\n\"\"\"\n{'_request': {'api_ver': 1, 'route': '/api/v2/u/GloverDude-1471/stats/general'},\n 'battletag': 'GloverDude-1471',\n 'game_stats': {'cards': 134.0,\n 'damage_done': 1846927.0,\n 'damage_done_most_in_game': 22312.0,\n 'deaths': 2460.0,\n 'defensive_assists': 467.0,\n 'defensive_assists_most_in_game': 28.0,\n 'eliminations': 4317.0,\n 'eliminations_most_in_game': 46.0,\n 'environmental_deaths': 55.0,\n 'environmental_kills': 33.0,\n 'final_blows': 2483.0,\n 'final_blows_most_in_game': 25.0,\n 'games_played': 354.0,\n 'games_won': 176.0,\n 'healing_done': 446029.0,\n 'healing_done_most_in_game': 9753.0,\n 'kpd': 1.75,\n 'medals': 869.0,\n 'medals_bronze': 235.0,\n 'medals_gold': 357.0,\n 'medals_silver': 277.0,\n 'melee_final_blows': 1.0,\n 'melee_final_blows_most_in_game': 1.0,\n 'multikill_best': 5.0,\n 'multikills': 25.0,\n 'objective_kills': 1525.0,\n 'objective_kills_most_in_game': 23.0,\n 'objective_time': '02:57:59',\n 'objective_time_most_in_game': '04:39',\n 'offensive_assists': 104.0,\n 'offensive_assists_most_in_game': 16.0,\n 'recon_assists': 14.0,\n 'solo_kills': 1018.0,\n 'solo_kills_most_in_game': 25.0,\n 'teleporter_pads_destroyed': 2.0,\n 'time_played': '47 hours',\n 'time_spent_on_fire': '05:47:25',\n 'time_spent_on_fire_most_in_game': '09:49'},\n 'overall_stats': {'comprank': 45,\n 'games': 354,\n 'level': 61,\n 'losses': 178,\n 'rank': None,\n 'win_rate': 49,\n 'wins': 176},\n 'region': 'us'}\n\"\"\"\n\n\"\"\" --- Request Heroes Stat --- \"\"\"\nstats = ow.get_hero(battle_tag, '1')\nprint(pprint.pformat(stats))\n\"\"\"\n{'_request': {'api_ver': 1, 'route': '/api/v1/u/GloverDude-1471/heroes/1'},\n 'battletag': 'GloverDude-1471',\n 'region': 'us',\n 'stats': [{'name': 'hero-specific stats',\n 'stats': [{'name': 'average enemies hooked', 'value': 15.0},\n {'name': 'average healing done', 'value': 3199.0},\n {'name': 'average self healing', 'value': 0.0},\n {'name': 'average whole hog kills', 'value': 3.0},\n {'name': 'best enemies hooked', 'value': 30.0},\n {'name': 'best healing done', 'value': 6378.0},\n {'name': 'best hook accuracy in game', 'value': '100%'},\n {'name': 'best self healing', 'value': 6378.0},\n {'name': 'best whole hog kills', 'value': 11.0},\n {'name': 'one life healing done', 'value': 3174.0},\n {'name': 'total enemies hooked', 'value': 889.0},\n {'name': 'total healing done', 'value': 185180.0},\n {'name': 'total hook accuracy', 'value': '51%'},\n {'name': 'total hooks attempted', 'value': 1738.0},\n {'name': 'total self healing', 'value': 185180.0},\n {'name': 'total whole hog kills', 'value': 177.0}]},\n {'name': 'overall',\n 'stats': [{'name': 'final blows', 'value': 562.0},\n {'name': 'eliminations', 'value': 942.0},\n {'name': 'deaths', 'value': 393.0},\n {'name': 'damage done', 'value': 378775.0},\n {'name': 'score', 'value': 0.0},\n {'name': 'melee kills', 'value': 0.0},\n {'name': 'time played', 'value': '8 hours'},\n {'name': 'time on fire', 'value': '3 hours'},\n {'name': 'objective kills', 'value': 335.0},\n {'name': 'objective time', 'value': '47 minutes'},\n {'name': 'accuracy', 'value': '32%'},\n {'name': 'medals earned', 'value': 211.0},\n {'name': 'cards earned', 'value': 43.0}]},\n {'name': 'averages per game',\n 'stats': [{'name': 'final blows', 'value': 9.7},\n {'name': 'eliminations', 'value': 16.27},\n {'name': 'deaths', 'value': 6.78},\n {'name': 'damage done', 'value': 6543.0},\n {'name': 'objective kills', 'value': 5.78},\n {'name': 'objective time', 'value': '49.09 seconds'}]},\n {'name': 'best in one game',\n 'stats': [{'name': 'final blows', 'value': 23.0},\n {'name': 'eliminations', 'value': 33.0},\n {'name': 'killstreak', 'value': 0.0},\n {'name': 'damage done', 'value': 13139.0},\n {'name': 'objective kills', 'value': 19.0},\n {'name': 'objective time', 'value': 200.0},\n {'name': 'accuracy', 'value': '-'}]},\n {'name': 'best in one life',\n 'stats': [{'name': 'eliminations', 'value': 24.0},\n {'name': 'damage done', 'value': 7465.0},\n {'name': 'score', 'value': 0.0}]}]}\n\"\"\"\n", "id": "7915623", "language": "Python", "matching_score": 2.4406535625457764, "max_stars_count": 0, "path": "example.py" }, { "content": "import requests\n\nclass OverStatAPI:\n def __init__(self, key):\n self.key = key # key to future proof API\n\n def get_stats(self, battle_tag):\n return self._make_request(\n 'v2',\n battle_tag,\n 'stats'\n )\n\n def get_hero(self, battle_tag, hero_id):\n return self._make_request(\n 'v1',\n battle_tag,\n 'heroes/' + hero_id,\n )\n\n def validate_response(self, response):\n if response.status_code != 200:\n raise Exception\n\n def sanitize_battletag(self, battle_tag):\n if '#' in battle_tag:\n battle_tag = battle_tag.replace('#','-')\n return battle_tag\n\n def _make_request(self, api_version,battle_tag, url):\n battle_tag = self.sanitize_battletag(battle_tag)\n r = requests.get(\n 'https://owapi.net/api/{api_version}/u/{battle_tag}/{url}'.format(\n api_version = api_version,\n battle_tag = battle_tag,\n url = url\n )\n )\n self.validate_response(r)\n return r.json()", "id": "9289896", "language": "Python", "matching_score": 1.6353408098220825, "max_stars_count": 0, "path": "overStat/overStat.py" }, { "content": "from overStat import *\nfrom HD44780 import *\nfrom time import strftime, localtime\n\nlcd = HD44780()\n\n\nclass OverLCD:\n def __init__(self, battle_tag):\n self.battle_tag = battle_tag\n\n def queryStats(self):\n ow = OverStatAPI('key')\n return ow.get_stats(self.battle_tag)\n\n def printIntro(self):\n lcd.clear()\n lcd.writeMsg('Overwatch Stats', 0.1)\n lcd.writeMsg('\\nv0.1: t04glovern', 0.1)\n sleep(1)\n lcd.clear()\n\n def printWinLoss(self):\n lcd.clear()\n lcd.writeMsg('..Querying API..')\n stats = self.queryStats()\n lcd.clear()\n lcd.writeMsg(stats['battletag'], 0.1)\n lcd.writeMsg(('\\nW/L:' +\n str(stats['overall_stats']['wins']) +\n '/' +\n str(stats['overall_stats']['losses']) +\n ' Lv' +\n str(stats['overall_stats']['level'])), 0.1)\n sleep(5)\n lcd.clear()\n\n def highNoon(self):\n display.printIntro()\n while(1):\n lcd.writeMsg(\" \" + strftime(\"%H:%M:%S\", localtime()))\n sleep(5)\n display.printWinLoss()\n\n\nif __name__ == '__main__':\n battle_tag = 'GloverDude#1471'\n display = OverLCD(battle_tag)\n display.highNoon()\n", "id": "2678104", "language": "Python", "matching_score": 1.3424125909805298, "max_stars_count": 0, "path": "overLCD.py" }, { "content": "import smbus\nfrom time import sleep\n\nEn = 0b00000100 # Enable bit\nRw = 0b00000010 # Read/Write bit\nRs = 0b00000001 # Register select bit\nBLon = 0x08 # Backlight on\nBLoff = 0x00 # Backlight off\n\n\nclass HD44780:\n #################################################################\n # LOW LEVEL FUNCTIONS, CAN'T TOUCH THIS! #\n #################################################################\n\n def __init__(self, addr=0x27, port=1, backlight=True):\n self.currentline = 0\n self.bus = smbus.SMBus(port)\n self.addr = addr\n self.setBacklight(backlight)\n\n # Prepare LCD\n self._cmdWrite(0x03)\n self._cmdWrite(0x03)\n self._cmdWrite(0x03)\n self._cmdWrite(0x02)\n\n self._cmdWrite(0x33)\n self._cmdWrite(0x32)\n self._cmdWrite(0x28) # Send data in 4 bit, 2 lines, 5x7 pixels\n self._cmdWrite(0x0C) # Hide cursor\n self._cmdWrite(0x06) # Writing mode (From left to right)\n self._cmdWrite(0x01) # Clear screen\n\n # Custom chars\n self.setCustomChar(0, [0x00, 0x00, 0x04, 0x0E, 0x1F, 0x00, 0x00, 0x00]) # ARROW UP\n self.setCustomChar(1, [0x00, 0x00, 0x00, 0x1F, 0x0E, 0x04, 0x00, 0x00]) # ARROW DOWN\n self.setCustomChar(2, [0x1F, 0x1F, 0x1B, 0x15, 0x0E, 0x1F, 0x1F, 0x1F]) # ARROW UP NEG\n self.setCustomChar(3, [0x1F, 0x1F, 0x1F, 0x0E, 0x15, 0x1B, 0x1F, 0x1F]) # ARROW DOWN NEG\n\n sleep(0.2)\n\n def _busWrite(self, data):\n # Set data pins\n self.bus.write_byte(self.addr, data | self.backlight)\n # Send data to lcd\n self.bus.write_byte(self.addr, data | En | self.backlight)\n sleep(.0005)\n self.bus.write_byte(self.addr, ((data & ~En) | self.backlight))\n sleep(.0001)\n\n def _cmdWrite(self, cmd, mode=0):\n # Send first 4 bytes\n self._busWrite(mode | (cmd & 0xF0))\n # Send last 4 bytes\n self._busWrite(mode | ((cmd << 4) & 0xF0))\n\n #################################################################\n # MEDIUM LEVEL FUNCTIONS #\n #################################################################\n\n def setCustomChar(self, pos, char):\n positions = [0x40, 0x48, 0x50, 0x58, 0x60, 0x68, 0x70, 0x78]\n self._cmdWrite(positions[pos])\n for i in char:\n self._cmdWrite(i, Rs)\n\n def customChar(self, charId):\n self._cmdWrite(charId, Rs)\n\n #################################################################\n # HIGH LEVEL FUNCTIONS FOR USER #\n #################################################################\n\n def writeMsg(self, text, time=0, homing=True):\n if homing:\n self.currentline = 0\n self.home()\n for char in text:\n if char == '\\n': # NEW LINE\n self.nextLine()\n elif char == '\\a': # ARROW UP\n sleep(time)\n self._cmdWrite(0, Rs)\n elif char == '\\b': # ARROW DOWN\n sleep(time)\n self._cmdWrite(1, Rs)\n elif char == '\\f': # ARROW UP NEG\n sleep(time)\n self._cmdWrite(2, Rs)\n elif char == '\\t': # ARROW DOWN NEG\n sleep(time)\n self._cmdWrite(3, Rs)\n else:\n sleep(time)\n self._cmdWrite(ord(char), Rs)\n\n def clear(self): # Clear display\n self._cmdWrite(0x01)\n\n def scrollRight(self): # Move all characters right\n self._cmdWrite(0x1E)\n\n def scrollLeft(self): # Move all characters left\n self._cmdWrite(0x18)\n\n def blank(self): # Hide all characters\n self._cmdWrite(0x08)\n\n def restore(self): # Show all characters\n self._cmdWrite(0x0C)\n\n def setBacklight(self, value): # Power on/off backlight\n self.backlight = BLon if value else BLoff\n\n def home(self): # Move cursor to left, top corner\n self._cmdWrite(0x02)\n self.currentline = 0\n\n def cursorRight(self): # Move cursor one place right\n self._cmdWrite(0x02)\n\n def cursorLeft(self): # Move cursor one place left\n self._cmdWrite(0x02)\n\n def nextLine(self): # Move cursor to next line\n self.setLine(self.currentline + 1)\n\n def setLine(self, line): # Move cursor to line\n if line == 0:\n self._cmdWrite(0x80)\n elif line == 1:\n self._cmdWrite(0xC0)\n elif line == 2:\n self._cmdWrite(0x94)\n elif line == 3:\n self._cmdWrite(0xD4)\n self.currentline = line\n", "id": "9367390", "language": "Python", "matching_score": 1.4142135381698608, "max_stars_count": 0, "path": "HD44780/HD44780.py" }, { "content": "from .HD44780 import *", "id": "9823886", "language": "Python", "matching_score": 0.04250742867588997, "max_stars_count": 0, "path": "HD44780/__init__.py" } ]
1.965041
kxxoling
[ { "content": "from functools import wraps\nfrom flask import request, current_app, make_response, Response\n\n\ndef json_or_jsonp(func):\n \"\"\"Wrap response in JSON or JSONP style\"\"\"\n @wraps(func)\n def _(*args, **kwargs):\n mimetype = 'application/javascript'\n callback = request.args.get('callback', None)\n if callback is None:\n content = func(*args, **kwargs)\n\n else:\n content = \"%s(%s)\" % (callback, func(*args, **kwargs))\n return current_app.response_class(content, mimetype=mimetype)\n return _\n\n\ndef add_response_headers(headers):\n \"\"\"Add headers passed in to the response\n\n Usage:\n\n .. code::py\n\n @app.route('/')\n @add_response_headers({'X-Robots-Tag': 'noindex'})\n def not_indexed():\n # This will set ``X-Robots-Tag: noindex`` in the response headers\n return \"Check my headers!\"\n \"\"\"\n\n def decorator(func):\n @wraps(func)\n def _(*args, **kwargs):\n rsp = make_response(func(*args, **kwargs))\n rsp_headers = rsp.headers\n for header, value in headers.items():\n rsp_headers[header] = value\n return rsp\n return _\n return decorator\n\n\ndef gen(mimetype):\n \"\"\"``gen`` is a decorator factory function, you just need to set\n a mimetype before using::\n\n @app.route('/')\n @gen('')\n def index():\n pass\n\n A full demo for creating a image stream is available on\n `GitHub <https://github.com/kxxoling/flask-video-streaming>`__ .\n \"\"\"\n def streaming(func, *args, **kwargs):\n @wraps(func)\n def _():\n return Response(func(*args, **kwargs),\n mimetype=mimetype)\n return _\n return streaming\n", "id": "3918259", "language": "Python", "matching_score": 1.3825181722640991, "max_stars_count": 1, "path": "flask_decorators/__init__.py" }, { "content": "\"\"\"\nFlask-Decorators\n----------------\nA list of Flask decorator utilities not include in the origin flask project.\n\nLinks\n`````\n* `GitHub <https://github.com/kxxoling/flask-decorators>`_\n\"\"\"\nfrom setuptools import setup\n\n\nsetup(\n name='Flask-Decorators',\n version='0.0.1',\n url='https://github.com/kxxoling/flask-decorators',\n license='MIT',\n author='<NAME>',\n author_email='<EMAIL>',\n description='A list of Flask decorator utilities'\n 'not include in the origin flask project.',\n long_description=__doc__,\n packages=['flask_decorators'],\n zip_safe=False,\n platforms='any',\n install_requires=[\n 'Flask>=0.10',\n ],\n test_suite='test_decorators.suite',\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n ],\n)\n", "id": "11161523", "language": "Python", "matching_score": 1.7476574182510376, "max_stars_count": 1, "path": "setup.py" }, { "content": "import unittest\nimport flask\n\n\nclass MainTestCase(unittest.TestCase):\n\n def setUp(self):\n app = flask.Flask(__name__)\n\n self.app = app\n\n def tearDown(self):\n pass\n\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(MainTestCase))\n\n return suite\n\n\nif __name__ == '__main__':\n unittest.main(defaultTest='suite')\n", "id": "1592603", "language": "Python", "matching_score": 0.1699717938899994, "max_stars_count": 1, "path": "test_decorators.py" }, { "content": "from flask import url_for\nfrom flask.ext.admin.contrib.sqla import ModelView\nfrom flask.ext.admin import Admin\n\nfrom .models import User, Work, Company, Resume\n\n\nclass AdminRequiredView(ModelView):\n def is_accessible(self):\n return True\n\n\ndef register_admin(app, db):\n admin = Admin(app, endpoint='admin', template_mode='bootstrap3')\n admin.add_view(AdminRequiredView(User, db.session))\n admin.add_view(AdminRequiredView(Work, db.session))\n admin.add_view(AdminRequiredView(Company, db.session))\n admin.add_view(AdminRequiredView(Resume, db.session))\n", "id": "3617761", "language": "Python", "matching_score": 1.7230026721954346, "max_stars_count": 0, "path": "horus/admin.py" }, { "content": "from flask.ext.sqlalchemy import SQLAlchemy\n\n\ndb = SQLAlchemy()\n\n\nclass BasicModel(object):\n\n id_ = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(100))\n\n def to_dict(self):\n raise NotImplementedError # Exception('NotImplemented')\n\n def to_json(self):\n pass\n\n def __unicode__(self):\n return \"<Model %s>%d: %s\" % (self.__name__, self.id_, self.name)\n\n\nclass User(BasicModel, db.Model):\n __tablename__ = 'users'\n\n email = db.Column(db.String(50))\n family_name = db.Column(db.String(100))\n register_time = db.Column(db.DateTime)\n role = db.Column(db.Integer)\n\n\nclass Company(BasicModel, db.Model):\n __tablename__ = 'companies'\n\n register_time = db.Column(db.DateTime)\n scale = db.Column(db.String(100))\n bonus = db.Column(db.String(500))\n\n\nclass Work(BasicModel, db.Model):\n __tablename__ = 'works'\n\n publish_time = db.Column(db.DateTime)\n description = db.Column(db.String(1000))\n is_effective = db.Column(db.Boolean)\n\n\nclass Resume(BasicModel, db.Model):\n __tablename__ = 'resumes'\n\n create_time = db.Column(db.DateTime)\n update_time = db.Column(db.DateTime)\n content = db.Column(db.Text)\n\n", "id": "11642003", "language": "Python", "matching_score": 0.4541301131248474, "max_stars_count": 0, "path": "horus/models.py" }, { "content": "from flask import Blueprint\nfrom flask import render_template\nfrom flask import url_for, session, request, redirect, flash\nfrom flask.ext.oauthlib.client import OAuth\n\nfrom .models import Resume\n\n\nfrontend_views = Blueprint('frontend', __name__, url_prefix='/')\n\noauth_views = Blueprint('oauth', __name__, url_prefix='/oauth/')\n\noauth = OAuth()\ngithub = oauth.remote_app(\n 'github',\n consumer_key='08db72ce47a207704fb4',\n consumer_secret='f5e5eff75760ea886e033a6ec87b23d33d4903a0',\n request_token_params={'scope': 'user:email'},\n base_url='https://api.github.com/',\n request_token_url=None,\n access_token_method='POST',\n access_token_url='https://github.com/login/oauth/access_token',\n authorize_url='https://github.com/login/oauth/authorize'\n)\n\n\n@frontend_views.route('/')\ndef index():\n return render_template('index.jade', page_title='Welcome to Horus')\n\n\n@frontend_views.route('resume/')\ndef list_resumes():\n resume_list = Resume.query.all()\n return render_template('resume_list.jade', resume_list=resume_list)\n\n\n@frontend_views.route('resume/<int:resume_id>/')\ndef show_resume(resume_id=None):\n resume = Resume.query.get_or_404(resume_id)\n\n return render_template('resume.jade', resume=resume)\n\n\n@oauth_views.route('login/')\ndef github_login():\n return github.authorize(callback=url_for('oauth.github_authorized', _external=True))\n\n\n@oauth_views.route('logout')\ndef github_logout():\n session.pop('github_token', None)\n return redirect(url_for('frontend.index'))\n\n\n@oauth_views.route('login/authorized')\ndef github_authorized():\n resp = github.authorized_response()\n if resp is None:\n return 'Access denied: reason=%s error=%s' % (\n request.args['error'],\n request.args['error_description'],\n )\n session['github_token'] = (resp['access_token'], '')\n user = github.get('user')\n flash('%s, Welcome!' % user.data['name'])\n return redirect('/')\n\n\[email protected]\ndef get_github_oauth_token():\n return session.get('github_token')\n", "id": "5479549", "language": "Python", "matching_score": 1.936702013015747, "max_stars_count": 0, "path": "horus/views.py" }, { "content": "from flask import Flask\nfrom flask import send_file\n\nfrom .views import frontend_views\nfrom .views import oauth_views\nfrom .views import oauth\nfrom .models import db as main_db\nfrom .admin import register_admin\n\n\ndef create_app(config=None):\n app = Flask(\n __name__,\n template_folder='templates'\n )\n\n if isinstance(config, dict):\n app.config.update(config)\n elif config:\n app.config.from_pyfile(config)\n\n #: prepare for database\n main_db.init_app(app)\n main_db.app = app\n main_db.create_all()\n\n register_jinja(app)\n register_static(app)\n register_oauth(app, oauth)\n register_routes(app)\n register_admin(app, main_db)\n\n return app\n\n\ndef register_routes(app):\n app.register_blueprint(frontend_views)\n app.register_blueprint(oauth_views)\n return app\n\n\ndef register_oauth(app, oauth):\n oauth.init_app(app)\n\n\ndef register_static(app):\n @app.route('/<file_name>.txt')\n def plain_file(file_name):\n return send_file(file_name)\n return app\n\n\ndef register_jinja(app):\n app.jinja_env.add_extension('pyjade.ext.jinja.PyJadeExtension')\n return app\n", "id": "6707741", "language": "Python", "matching_score": 1.9613524675369263, "max_stars_count": 0, "path": "horus/apps.py" }, { "content": "from __future__ import print_function\nimport os\nfrom horus.apps import create_app\n\n\nconfig_file = os.path.join(os.path.dirname(os.path.realpath(__file__))\n , 'config.py')\napp = create_app(config_file)\n\n\nif __name__ == '__main__':\n print('URL map:')\n print(app.url_map)\n app.run(debug=True, host='0.0.0.0')\n", "id": "9190875", "language": "Python", "matching_score": 1.0980799198150635, "max_stars_count": 0, "path": "run.py" }, { "content": "import os\n\n\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\n\nCSRF_ENABLED = True\nSECRET_KEY = 'you-will-never-guess'\n\nSQLITE = 'db.sqlite3'\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR, SQLITE) + '?check_same_thread=False'\n", "id": "20126", "language": "Python", "matching_score": 0.8416875004768372, "max_stars_count": 0, "path": "config.py" } ]
1.382518
TheMeier
[ { "content": "\n\nimport logging\nimport sys\n\nimport ldap\nfrom flask import current_app as app\n\nfrom dim import db\nfrom dim.models import Group, User, GroupMembership, Department\nfrom dim.transaction import time_function, transaction\n\n\nclass LDAP(object):\n def __init__(self):\n ldap_server = app.config['LDAP_SERVER']\n try:\n self.conn = ldap.initialize(ldap_server, bytes_mode=False)\n self.conn.set_option(ldap.OPT_TIMEOUT, app.config['LDAP_OPT_TIMEOUT'])\n self.conn.set_option(ldap.OPT_TIMELIMIT, app.config['LDAP_OPT_TIMELIMIT'])\n self.conn.set_option(ldap.OPT_NETWORK_TIMEOUT, app.config['LDAP_OPT_NETWORK_TIMEOUT'])\n self.conn.simple_bind_s()\n except:\n logging.exception('Error connecting to ldap server %s', ldap_server)\n raise\n\n def query(self, base, filter):\n try:\n if filter:\n return self.conn.search_s(base, filterstr=filter, scope=ldap.SCOPE_ONELEVEL)\n else:\n return self.conn.search_s(base, scope=ldap.SCOPE_ONELEVEL)\n except:\n logging.exception('Error in LDAP query %s %s', base, filter)\n raise\n\n def users(self, filter):\n '''Return the set of usernames matching the ldap query.'''\n def fix_int(s): return int(s) if s is not None else s\n return [User(username=u[1]['o'][0].decode('utf-8'),\n ldap_cn=u[1]['cn'][0].decode('utf-8'),\n ldap_uid=fix_int(u[1]['uid'][0]),\n department_number=fix_int(u[1]['departmentNumber'][0]),\n register=False)\n for u in self.query(app.config['LDAP_USER_BASE'], filter)]\n\n def departments(self, filter):\n '''Return the list of departments'''\n res = self.query(app.config['LDAP_DEPARTMENT_BASE'], filter)\n if not res:\n return []\n else:\n return [Department(department_number=int(dept[1]['ou'][0]),\n name=dept[1]['cn'][0].decode('utf-8'))\n for dept in res]\n\n\ndef sync_departments(ldap, dry_run=False):\n '''Update the department table'''\n db_departments = Department.query.all()\n ldap_departments = dict((dep.department_number, dep) for dep in ldap.departments(None))\n # handle renamed or deleted departments\n for ddep in db_departments:\n ldep = ldap_departments.get(ddep.department_number)\n if ldep:\n if ddep.name != ldep.name:\n logging.info('Renaming department %s to %s' % (ddep.name, ldep.name))\n if not dry_run:\n ddep.name = ldep.name\n del ldap_departments[ddep.department_number]\n else:\n logging.info('Deleting department %s' % ddep.name)\n if not dry_run:\n db.session.delete(ddep)\n # handle new departments\n for ldep in list(ldap_departments.values()):\n logging.info('Creating department %s' % ldep.name)\n if not dry_run:\n db.session.add(ldep)\n\n\ndef log_stdout(message):\n logging.info(message)\n print(message)\n\n\ndef sync_users(ldap, dry_run=False):\n '''Update the user table ldap_cn, ldap_uid and department_number fields'''\n db_users = User.query.all()\n ldap_users = dict((u.username, u)\n for u in ldap.users('(|%s)' % ''.join('(o=%s)' % u.username for u in db_users)))\n for db_user in db_users:\n ldap_user = ldap_users.get(db_user.username)\n if ldap_user:\n if db_user.ldap_cn != ldap_user.ldap_cn:\n logging.info('User %s changed cn from %s to %s' %\n (db_user.username,\n db_user.ldap_cn,\n ldap_user.ldap_cn))\n if not dry_run:\n db_user.ldap_cn = ldap_user.ldap_cn\n if db_user.department_number != ldap_user.department_number:\n logging.info('User %s moved from department_number %s to %s' %\n (db_user.username,\n db_user.department_number,\n ldap_user.department_number))\n if not dry_run:\n db_user.department_number = ldap_user.department_number\n if db_user.ldap_uid != ldap_user.ldap_uid:\n logging.info('User %s changed uid from %s to %s' %\n (db_user.username,\n db_user.ldap_uid,\n ldap_user.ldap_uid))\n if not dry_run:\n db_user.ldap_uid = ldap_user.ldap_uid\n elif db_user.ldap_uid:\n log_stdout('Deleting user %s' % db_user.username)\n if not dry_run:\n db.session.delete(db_user)\n\n\n@time_function\n@transaction\ndef ldap_sync(dry_run=False):\n ldap = LDAP()\n\n if sys.stdout.isatty():\n logging.getLogger().addHandler(logging.StreamHandler(sys.stderr))\n\n sync_departments(ldap, dry_run)\n sync_users(ldap, dry_run)\n\n # Synchronize group members\n ldap_users = {} # map department_number to list of usernames\n for group in Group.query.filter(Group.department_number != None).all(): # noqa\n search_results = ldap.departments('ou=%s' % group.department_number)\n if len(search_results) == 0:\n group.department_number = None\n log_stdout('Department %s %s was deleted and had the following members from LDAP: %s' % (\n group.department_number,\n group.name,\n ' '.join(gm.user.username for gm in GroupMembership.query\n .filter(GroupMembership.from_ldap)\n .filter(GroupMembership.group == group).all())))\n else:\n dept = search_results[0]\n if dept.name != group.name:\n new_name = dept.name\n if Group.query.filter(Group.name == new_name).count():\n # DIM-209 append id to department name to generate an unique user group name\n new_name += '_%s' % dept.department_number\n logging.info('Renaming group %s to %s' % (group.name, new_name))\n if not dry_run:\n group.name = new_name\n ldap_users[group.department_number] = \\\n [u.username for u in ldap.users('departmentNumber=%s' % dept.department_number)]\n # Remove all users added by a ldap query that are no longer present in the group\n for membership in GroupMembership.query.filter(GroupMembership.from_ldap).all(): # noqa\n if membership.group.department_number is None or \\\n membership.user.username not in ldap_users[membership.group.department_number]:\n logging.info('User %s was removed from group %s' %\n (membership.user.username, membership.group.name))\n if not dry_run:\n membership.group.remove_user(membership.user)\n # Add new users to groups\n for group in Group.query.filter(Group.department_number != None).all(): # noqa\n group_users = set([u.username for u in group.users])\n for username in [u for u in ldap_users[group.department_number] if u not in group_users]:\n user = User.query.filter_by(username=username).first()\n if user is None:\n ldap_search = ldap.users('o=%s' % username)\n if ldap_search:\n lu = ldap_search[0]\n user = User(username=username,\n ldap_uid=lu.ldap_uid,\n ldap_cn=lu.ldap_cn,\n department_number=lu.department_number)\n if not dry_run:\n db.session.add(user)\n db.session.add(GroupMembership(user=user, group=group, from_ldap=True))\n group_users.add(username)\n logging.info('User %s was created and added to group %s', username, group.name)\n else:\n logging.info('User %s was added to group %s', username, group.name)\n if not dry_run:\n db.session.add(GroupMembership(user=user, group=group, from_ldap=True))\n group_users.add(username)\n", "id": "3290110", "language": "Python", "matching_score": 1.3301546573638916, "max_stars_count": 0, "path": "dim/dim/ldap_sync.py" }, { "content": "import base64\nimport math\n\nimport Crypto.PublicKey.RSA\nimport Crypto.Util.number\nfrom Crypto.Util.number import inverse\n\n\n_file_privkey_rsa = \"\"\"Private-key-format: v1.2\nAlgorithm: %(alg)d (%(algtxt)s)\nModulus: %(n)s\nPublicExponent: %(e)s\nPrivateExponent: %(d)s\nPrime1: %(p)s\nPrime2: %(q)s\nExponent1: %(dmp1)s\nExponent2: %(dmq1)s\nCoefficient: %(u)s\n\"\"\"\n\n\ndef _rsa2dnskey(key):\n \"\"\"Get RSA public key in DNSKEY resource record format (RFC-3110)\"\"\"\n octets = b''\n explen = int(math.ceil(math.log(key.e, 2)/8))\n if explen > 255:\n octets = \"\\x00\"\n octets += (Crypto.Util.number.long_to_bytes(explen) +\n Crypto.Util.number.long_to_bytes(key.e) +\n Crypto.Util.number.long_to_bytes(key.n))\n return octets\n\n\ndef generate_RSASHA256_key_pair(bits):\n key = Crypto.PublicKey.RSA.generate(bits)\n pubkey = base64.b64encode(_rsa2dnskey(key))\n RSASHA256 = 8\n keydata = dict(alg=RSASHA256,\n algtxt='RSASHA256')\n for field in ['n', 'e', 'd', 'p', 'q', 'u']:\n f = getattr(key, field)\n f = Crypto.Util.number.long_to_bytes(f)\n keydata[field] = base64.b64encode(f).decode('utf-8')\n dmp1 = Crypto.Util.number.long_to_bytes(key.d % (key.p - 1))\n keydata['dmp1'] = base64.b64encode(dmp1).decode('utf-8')\n dmq1 = Crypto.Util.number.long_to_bytes(key.d % (key.q - 1))\n keydata['dmq1'] = base64.b64encode(dmq1).decode('utf-8')\n # key.u == inverse(p, q), but rfc3447 needs inverse(q, p)\n u = Crypto.Util.number.long_to_bytes(inverse(key.q, key.p))\n keydata['u'] = base64.b64encode(u).decode('utf-8')\n privkey = _file_privkey_rsa % keydata\n return (pubkey, privkey)\n", "id": "4441592", "language": "Python", "matching_score": 0.6911045908927917, "max_stars_count": 37, "path": "dim/dim/crypto.py" }, { "content": "import unittest\nimport hashlib\nfrom dim.models.dns import dnskey_tag, ds_hash, dnskey_rdata\n\npubkey = '<KEY> # noqa\nalgorithm = 8\nflags = 257\nprotocol = 3\n\n\nclass DNSKEYTest(unittest.TestCase):\n def test_dnskey_tag(self):\n assert dnskey_tag(dnskey_rdata(flags, protocol, algorithm, pubkey)) == 20842\n\n def test_ds_hash(self):\n assert ds_hash('a.com', dnskey_rdata(flags, protocol, algorithm, pubkey), hashlib.sha1) == \\\n '91053B9A59B05FB08D5469472A5F1B588C5CA092'\n assert ds_hash('a.com', dnskey_rdata(flags, protocol, algorithm, pubkey), hashlib.sha256) == \\\n 'FFB15B5EF961E0AE3474E7B868FBD3C8F7C861D3BEA4527382CBDA791D4B9FF4'\n", "id": "828656", "language": "Python", "matching_score": 0.34627026319503784, "max_stars_count": 37, "path": "dim-testsuite/tests/dnssec_test.py" }, { "content": "import unittest\nimport logging\nfrom contextlib import contextmanager\nfrom pprint import saferepr as safe_repr\nfrom dim import create_app, db, rpc\nfrom dim.ipaddr import IP\nfrom dim.models import clean_database, User, Group, AccessRight, Layer3Domain, Ipblock\n\n\n@contextmanager\ndef raises(error):\n try:\n yield\n except error:\n pass\n except Exception as e:\n logging.exception(e)\n raise AssertionError(\"Expected exception %s but got %s\" % (error.__name__, e.__class__.__name__))\n else:\n raise AssertionError(\"No exception raised\")\n\n\nclass DatabaseTest(unittest.TestCase):\n def setUp(self):\n self.app = create_app('TEST', testing=True)\n self.ctx = self.app.test_request_context()\n self.ctx.push()\n clean_database()\n\n def tearDown(self):\n db.session.remove()\n db.get_engine(self.app).dispose()\n self.ctx.pop()\n\n # taken from unittest/python 2.7\n def assertDictSubset(self, actual, expected, msg=None):\n \"\"\"Checks whether actual is a superset of expected.\"\"\"\n missing = []\n mismatched = []\n for key, value in expected.items():\n if key not in actual:\n missing.append(key)\n elif value != actual[key]:\n mismatched.append('%s, expected: %s, actual: %s' %\n (safe_repr(key), safe_repr(value),\n safe_repr(actual[key])))\n\n if not (missing or mismatched):\n return\n\n if missing:\n msg = 'Missing: %s' % ','.join(safe_repr(m) for m in missing)\n if mismatched:\n if msg:\n msg += '; '\n msg += 'Mismatched values: %s' % ','.join(mismatched)\n self.fail(msg)\n\n\nclass RPCTest(DatabaseTest):\n def setUp(self):\n DatabaseTest.setUp(self)\n group = Group(name='group')\n group.users.add(User('test_user'))\n group.rights.add(AccessRight(access='network_admin', object_class='all', object_id=0))\n group.rights.add(AccessRight(access='dns_admin', object_class='all', object_id=0))\n db.session.add(group)\n db.session.commit()\n self.r = rpc.TRPC('test_user')\n\n\ndef query_ip(ip_str):\n layer3domain = Layer3Domain.query.first()\n return Ipblock.query_ip(IP(ip_str), layer3domain)\n", "id": "7857249", "language": "Python", "matching_score": 2.62369966506958, "max_stars_count": 37, "path": "dim-testsuite/tests/util.py" }, { "content": "from contextlib import wraps\n\nfrom sqlalchemy import Column, Integer, BigInteger, Boolean, String, Text, ForeignKey, UniqueConstraint, or_\nfrom sqlalchemy.ext.associationproxy import association_proxy\nfrom sqlalchemy.orm import relationship, backref\n\nfrom dim import db\nfrom dim.errors import PermissionDeniedError, InvalidAccessRightError\nfrom dim.models import TrackChanges, get_session_tool\nfrom dim.util import is_reverse_zone\n\n\ndef _find_or_create(klass):\n def find_or_create(**kwargs):\n o = klass.query.filter_by(**kwargs).first()\n if not o:\n o = klass(**kwargs)\n db.session.add(o)\n return o\n return find_or_create\n\n\nclass GroupMembership(db.Model):\n __tablename__ = 'usergroupuser'\n\n usergroup_id = Column(BigInteger, ForeignKey('usergroup.id'), primary_key=True)\n user_id = Column(BigInteger, ForeignKey('user.id'), primary_key=True)\n from_ldap = Column(Boolean, nullable=False)\n\n user = relationship('User', uselist=False, backref=backref('group_membership', cascade='all, delete-orphan'))\n group = relationship('Group', uselist=False, backref=backref('membership', cascade='all, delete-orphan', collection_class=set))\n\n def __init__(self, user=None, group=None, from_ldap=False):\n self.user = user\n self.group = group\n self.from_ldap = from_ldap\n\n\nclass UserType(db.Model):\n id = Column(BigInteger, primary_key=True, nullable=False)\n name = Column(String(128), nullable=False, unique=True)\n\n\nclass Group(db.Model, TrackChanges):\n __tablename__ = 'usergroup'\n\n id = Column(BigInteger, primary_key=True, nullable=False)\n # when department_number is set, ldap_sync will update the group name to match the linked\n # department name\n name = Column(String(128), nullable=False, unique=True)\n # when not null, this group is linked to an LDAP department\n department_number = Column(Integer, nullable=True, unique=True)\n\n users = association_proxy('membership', 'user')\n rights = association_proxy('group_rights', 'accessright')\n\n def __str__(self):\n return self.name\n\n @property\n def is_network_admin(self):\n return GroupRight.query.filter_by(group=self)\\\n .join(AccessRight).filter_by(access='network_admin').count() != 0\n\n @property\n def is_dns_admin(self):\n return GroupRight.query.filter_by(group=self)\\\n .join(AccessRight).filter_by(access='dns_admin').count() != 0\n\n @property\n def network_rights(self):\n return GroupRight.query.filter_by(group=self).join(AccessRight)\\\n .filter(AccessRight.access.in_(AccessRight.grantable_by_network_admin)).count()\n\n @property\n def dns_rights(self):\n return GroupRight.query.filter_by(group=self).join(AccessRight)\\\n .filter(AccessRight.access.in_(AccessRight.grantable_by_dns_admin)).count()\n\n def remove_user(self, user):\n self.users.remove(user)\n\n\nclass Department(db.Model):\n '''This table only serves as a cache of the departments available in LDAP'''\n department_number = Column(Integer, primary_key=True)\n name = Column(String(128), nullable=False)\n\n\nclass GroupRight(db.Model):\n __table_constraints__ = (UniqueConstraint('usergroup_id', 'accessright_id'), )\n\n id = Column(BigInteger, primary_key=True, nullable=False)\n accessright_id = Column(BigInteger, ForeignKey('accessright.id'))\n usergroup_id = Column(BigInteger, ForeignKey('usergroup.id'))\n\n group = relationship(Group, backref=backref('group_rights', collection_class=set, cascade='all, delete-orphan'))\n accessright = relationship('AccessRight', uselist=False, backref=backref('grouprights', cascade='all, delete-orphan'))\n\n def __init__(self, accessright):\n self.accessright = accessright\n\n\nclass AccessRight(db.Model):\n __table_constraints__ = (UniqueConstraint('access', 'object_class', 'object_id'), )\n\n id = Column(BigInteger, primary_key=True, nullable=False)\n access = Column(String(128), nullable=False)\n object_class = Column(String(128), nullable=False)\n object_id = Column(BigInteger, nullable=False)\n\n groups = association_proxy('grouprights', 'group')\n\n grantable_by_network_admin = ['allocate']\n grantable_by_dns_admin = [\n 'dns_update_agent',\n 'zone_create',\n 'zone_admin',\n 'create_rr',\n 'delete_rr']\n\n\nAccessRight.find_or_create = staticmethod(_find_or_create(AccessRight))\n\n\ndef permission(func):\n @wraps(func)\n def wrapper(self, *args):\n if self.is_super_admin:\n return\n if not func(self, *args):\n import inspect\n argspec = inspect.getargspec(func).args\n if 'type' in argspec:\n args = args[0:argspec.index('type') - 1] + args[argspec.index('type'):]\n reason = ' '.join(str(a) for a in (func.__name__, ) + args)\n raise PermissionDeniedError('Permission denied (%s)' % reason)\n else:\n return True\n return wrapper\n\n\nUserRights = dict(\n can_dns_admin={'tool_access': False, 'access': [('dns_admin', None)]},\n can_create_forward_zones={'tool_access': False, 'access': [('dns_admin', None), ('zone_create', None)]},\n can_create_reverse_zones={'tool_access': True, 'access': [('dns_admin', None), ('network_admin', None),\n ('zone_create', None)]},\n can_delete_reverse_zones={'tool_access': True, 'access': [('dns_admin', None), ('network_admin', None)]},\n can_dns_update_agent={'tool_access': False, 'access': [('dns_update_agent', None)]},\n\n can_network_admin={'tool_access': True, 'access': [('network_admin', None)]},\n can_modify_pool_attributes={'tool_access': False, 'access': [('dns_admin', None), ('network_admin', None)]},\n can_modify_container_attributes={'tool_access': False, 'access': [('dns_admin', None), ('network_admin', None)]},\n\n can_create_groups={'tool_access': False, 'access': [('dns_admin', None), ('network_admin', None)]})\n\n\nclass User(db.Model):\n __tablename__ = 'user'\n\n id = Column(BigInteger, primary_key=True, nullable=False)\n user_type_id = Column(BigInteger, ForeignKey('usertype.id'))\n username = Column(String(128), unique=True) # same as the LDAP o attribute\n preferences = Column(Text)\n\n # LDAP fields\n ldap_uid = Column(Integer, unique=True, nullable=True)\n ldap_cn = Column(String(128), nullable=True)\n department_number = Column(Integer, nullable=True)\n\n groups = association_proxy('group_membership', 'group')\n\n user_type = relationship(UserType)\n\n def __init__(self, username, user_type='User', ldap_uid=None, ldap_cn=None, department_number=None,\n register=True):\n self.username = username\n self.user_type = UserType.query.filter_by(name=user_type).one() # TODO enum\n self.ldap_uid = ldap_uid\n self.ldap_cn = ldap_cn\n self.department_number = department_number\n if register:\n self.register()\n\n def register(self):\n all_users = Group.query.filter_by(name='all_users').first()\n if all_users is None:\n all_users = Group(name='all_users')\n db.session.add(all_users)\n all_users.users.add(self)\n\n def __hash__(self):\n return hash(self.username)\n\n def __eq__(self, o):\n return self.username == o.username\n\n def has_any_access(self, access_list):\n anylist = []\n for access, obj in access_list:\n if obj is None:\n anylist.append(Group.rights.any(access=access))\n else:\n class_name = dict(Pool='Ippool').get(obj.__class__.__name__, obj.__class__.__name__)\n anylist.append(Group.rights.any(access=access, object_class=class_name, object_id=obj.id))\n return Group.query.filter(Group.users.any(id=self.id)).filter(or_(*anylist)).count() != 0\n\n @property\n def is_super_admin(self):\n return self.user_type.name == 'Admin'\n\n @permission\n def can_grant_access(self, group, access):\n if get_session_tool():\n return False\n if access in ('network_admin', 'dns_admin'):\n return self.user_type.name == 'Admin'\n else:\n if access in AccessRight.grantable_by_network_admin:\n self.can_network_admin()\n elif access in AccessRight.grantable_by_dns_admin:\n self.can_dns_admin()\n else:\n raise InvalidAccessRightError('Invalid access right: %r' % access)\n return self.can_edit_group(group)\n\n @permission\n def can_edit_group(self, group):\n if get_session_tool():\n return False\n if group.is_network_admin or group.is_dns_admin:\n return False\n is_network_admin = self.has_any_access([('network_admin', None)])\n if group.network_rights and not is_network_admin:\n return False\n is_dns_admin = self.has_any_access([('dns_admin', None)])\n if group.dns_rights and not is_dns_admin:\n return False\n return is_network_admin or is_dns_admin\n\n @permission\n def can_allocate(self, pool):\n return self.has_any_access([('network_admin', None),\n ('dns_admin', None),\n ('allocate', pool)])\n\n @permission\n def can_manage_zone(self, zone):\n return self.has_any_access([('dns_admin', None),\n ('zone_admin', zone)])\n\n @permission\n def can_create_rr(self, view, type):\n if is_reverse_zone(view.zone.name) and type == 'PTR':\n return True\n else:\n return self.has_any_access([('create_rr', view),\n ('zone_admin', view.zone),\n ('dns_admin', None)])\n\n @permission\n def can_delete_rr(self, view, type):\n if is_reverse_zone(view.zone.name) and type == 'PTR':\n return True\n else:\n return self.has_any_access([('delete_rr', view),\n ('zone_admin', view.zone),\n ('dns_admin', None),\n ('network_admin', None)])\n\n def get_rights(self):\n available = list(UserRights.keys())\n if self.is_super_admin:\n return available\n perms = []\n for perm in available:\n if getattr(self, perm, None):\n try:\n getattr(self, perm)()\n perms.append(perm)\n except PermissionDeniedError:\n pass\n return perms\n\n\nfor can, conf in list(UserRights.items()):\n def can_func(self, tool_access=conf['tool_access'], access=conf['access']):\n if not tool_access and get_session_tool():\n return False\n return self.has_any_access(access)\n can_func.__name__ = can\n setattr(User, can, permission(can_func))\n", "id": "12715240", "language": "Python", "matching_score": 3.578397035598755, "max_stars_count": 37, "path": "dim/dim/models/rights.py" }, { "content": "import dim.rpc as rpc\nfrom .dns_test import rrs\nfrom dim import db\nfrom dim.errors import PermissionDeniedError, InvalidGroupError, DimError\nfrom dim.models import User\nfrom tests.util import DatabaseTest, raises\n\n\nclass RightsTest(DatabaseTest):\n def setUp(self):\n DatabaseTest.setUp(self)\n db.session.add_all([User('net'),\n User('user')])\n db.session.commit()\n self.admin = rpc.TRPC('admin')\n self.net = rpc.TRPC('net')\n self.user = rpc.TRPC('user')\n\n self.admin.group_create('networkgroup')\n self.admin.group_grant_access('networkgroup', 'network_admin')\n self.admin.group_add_user('networkgroup', 'net')\n\n def test_list_users(self):\n assert self.user.user_list(include_groups=False) == [\n {'name': 'admin'},\n {'name': 'net'},\n {'name': 'user'},\n ]\n assert self.user.user_list(include_groups=True) == [\n {'name': 'admin', 'groups': ['all_users']},\n {'name': 'net', 'groups': ['all_users', 'networkgroup']},\n {'name': 'user', 'groups': ['all_users']},\n ]\n\n def test_create_add(self):\n with raises(PermissionDeniedError):\n self.user.group_create('usergroup')\n self.net.group_create('usergroup')\n with raises(PermissionDeniedError):\n self.user.group_add_user('usergroup', 'user')\n self.net.group_add_user('usergroup', 'user')\n with raises(PermissionDeniedError):\n self.user.group_delete('usergroup')\n\n def test_delete_group(self):\n assert set(self.user.group_list()) == set(['networkgroup', 'all_users'])\n assert set(self.user.user_get_groups('net')) == set(['networkgroup', 'all_users'])\n assert self.user.group_get_users('networkgroup') == ['net']\n self.admin.group_delete('networkgroup')\n assert self.user.group_list() == ['all_users']\n assert self.user.user_get_groups('net') == ['all_users']\n with raises(InvalidGroupError):\n assert self.user.group_get_users('networkgroup')\n with raises(InvalidGroupError):\n self.admin.group_delete('networkgroup')\n\n def test_delete_group2(self):\n self.net.group_create('usergroup')\n self.net.group_add_user('usergroup', 'user')\n assert set(self.user.user_get_groups('user')) == set(['usergroup', 'all_users'])\n self.net.group_delete('usergroup')\n assert self.user.user_get_groups('user') == ['all_users']\n self.net.group_create('usergroup')\n assert self.user.user_get_groups('user') == ['all_users']\n self.net.group_add_user('usergroup', 'user')\n assert set(self.user.user_get_groups('user')) == set(['usergroup', 'all_users'])\n\n def test_remove_user(self):\n self.admin.group_remove_user('networkgroup', 'net')\n assert self.user.group_get_users('networkgroup') == []\n assert self.user.user_get_groups('net') == ['all_users']\n self.admin.group_remove_user('networkgroup', 'net')\n\n def test_grant(self):\n assert self.user.group_get_access('networkgroup') == [['network_admin', 'all']]\n self.net.group_create('usergroup')\n assert self.user.group_get_access('usergroup') == []\n\n self.net.ippool_create('pool')\n self.net.group_add_user('usergroup', 'user')\n self.net.group_grant_access('usergroup', 'allocate', 'pool')\n self.net.group_grant_access('usergroup', 'allocate', 'pool')\n assert self.user.group_get_access('usergroup') == [['allocate', 'pool']]\n\n with raises(PermissionDeniedError):\n self.net.group_grant_access('usergroup', 'network_admin')\n with raises(PermissionDeniedError):\n self.net.group_add_user('networkgroup', 'user')\n\n self.net.group_revoke_access('usergroup', 'allocate', 'pool')\n assert self.user.group_get_access('usergroup') == []\n self.net.group_revoke_access('usergroup', 'allocate', 'pool')\n\n def test_group_rename(self):\n self.net.group_create('usergroup1')\n self.net.group_rename('usergroup1', 'usergroup')\n assert 'usergroup' in set(self.user.group_list())\n assert 'usergroup1' not in set(self.user.group_list())\n with raises(PermissionDeniedError):\n self.net.group_rename('networkgroup', 'test')\n with raises(PermissionDeniedError):\n self.user.group_rename('networkgroup', 'test')\n\n def test_allocate(self):\n with raises(PermissionDeniedError):\n self.user.ippool_create('pool')\n self.net.ippool_create('pool')\n self.net.ipblock_create('172.16.17.32/8', status='Container')\n self.net.ippool_add_subnet('pool', '172.16.17.32/24')\n self.net.ippool_get_ip('pool')\n self.user.ippool_list(pool='pool')\n\n self.net.group_create('usergroup')\n self.net.group_add_user('usergroup', 'user')\n self.net.group_grant_access('usergroup', 'allocate', 'pool')\n self.user.ippool_get_ip('pool')\n self.user.ipblock_set_attrs('172.16.17.32', {'key': 'value'})\n assert self.user.ipblock_get_attrs('172.16.17.32')['key'] == 'value'\n self.user.ipblock_delete_attrs('172.16.17.32', ['key'])\n self.user.ip_free('172.16.17.32')\n delegation = self.user.ippool_get_delegation('pool', 27)\n self.user.ipblock_remove(delegation[0]['ip'])\n\n self.net.group_revoke_access('usergroup', 'allocate', 'pool')\n with raises(PermissionDeniedError):\n self.user.ippool_get_ip('pool')\n self.net.group_grant_access('usergroup', 'allocate', 'pool')\n self.net.group_remove_user('usergroup', 'user')\n with raises(PermissionDeniedError):\n self.user.ippool_get_ip('pool')\n\n def test_delete_pool(self):\n self.net.group_create('usergroup')\n self.net.ippool_create('pool')\n self.net.group_grant_access('usergroup', 'allocate', 'pool')\n self.net.ippool_delete('pool')\n\n self.net.ippool_create('pool2')\n self.net.group_grant_access('usergroup', 'allocate', 'pool2')\n\n def test_delete_subnet2(self):\n self.net.group_create('usergroup')\n self.net.ippool_create('pool')\n self.net.ipblock_create('172.16.17.32/8', status='Container')\n self.net.ippool_add_subnet('pool', '172.16.17.32/24')\n self.net.group_add_user('usergroup', 'user')\n self.net.group_grant_access('usergroup', 'allocate', 'pool')\n self.admin.zone_delete('0.0.12.in-addr.arpa')\n with raises(PermissionDeniedError):\n self.user.ipblock_remove('172.16.17.32/24', force=True, recursive=True)\n\n def test_rename_pool(self):\n self.net.group_create('usergroup')\n self.net.ippool_create('pool')\n self.net.group_grant_access('usergroup', 'allocate', 'pool')\n self.net.ippool_rename('pool', 'newname')\n assert self.net.group_get_access('usergroup') == [['allocate', 'newname']]\n\n def test_list_pools_writable(self):\n self.net.group_create('usergroup')\n self.net.group_add_user('usergroup', 'user')\n self.net.group_create('usergroup2')\n self.net.group_add_user('usergroup2', 'user')\n self.net.ippool_create('pool1')\n self.net.ippool_create('pool2')\n self.net.ippool_create('pool3')\n self.net.group_grant_access('usergroup2', 'allocate', 'pool1')\n self.net.group_grant_access('usergroup', 'allocate', 'pool1')\n self.net.group_grant_access('usergroup', 'allocate', 'pool2')\n assert set(p['name'] for p in self.admin.ippool_list(can_allocate=True)) == set(['pool1', 'pool2', 'pool3'])\n assert set(p['name'] for p in self.net.ippool_list(can_allocate=True)) == set(['pool1', 'pool2', 'pool3'])\n assert set(p['name'] for p in self.user.ippool_list(can_allocate=True)) == set(['pool1', 'pool2'])\n assert set(p['name'] for p in self.user.ippool_list()) == set(['pool1', 'pool2', 'pool3'])\n\n def test_add_twice(self):\n self.net.group_create('testgroup')\n self.net.group_add_user('testgroup', 'user')\n self.net.group_add_user('testgroup', 'user')\n\n\nclass ErrorChecker(object):\n '''\n Proxy for a TRPC object.\n\n If *must_fail* the proxy will verify the call raises and retry it as\n 'admin'. This allows future calls to depend on the side effects of\n the previous calls.\n '''\n def __init__(self, user, must_fail):\n self.user = user\n self.must_fail = must_fail\n\n def __getattr__(self, attr):\n def wrapper(*args, **kwargs):\n if self.must_fail:\n with raises(DimError):\n getattr(rpc.TRPC(self.user), attr)(*args, **kwargs)\n ret = getattr(rpc.TRPC('admin'), attr)(*args, **kwargs)\n else:\n ret = getattr(rpc.TRPC(self.user), attr)(*args, **kwargs)\n return ret\n return wrapper\n\n\nclass RightsMatrixTest(DatabaseTest):\n def setUp(self):\n DatabaseTest.setUp(self)\n db.session.add_all([User('net'),\n User('dns'),\n User('user'),\n User('granted'),\n User('netdns')])\n db.session.commit()\n self.admin = rpc.TRPC('admin')\n\n self.admin.group_create('network_admin_group')\n self.admin.group_grant_access('network_admin_group', 'network_admin')\n self.admin.group_add_user('network_admin_group', 'net')\n\n self.admin.group_create('dns_admin_group')\n self.admin.group_grant_access('dns_admin_group', 'dns_admin')\n self.admin.group_add_user('dns_admin_group', 'dns')\n\n self.admin.group_add_user('network_admin_group', 'netdns')\n self.admin.group_add_user('dns_admin_group', 'netdns')\n\n self.admin.group_create('granted')\n self.admin.group_add_user('granted', 'granted')\n\n def user_proxies(self, allowed):\n for user in ('granted', 'user', 'dns', 'net'):\n yield ErrorChecker(user, user not in allowed)\n yield ErrorChecker('netdns', 'dns' not in allowed and 'net' not in allowed)\n\n def who(self, u):\n '''Helper for RightsMatrixTest returning self.admin instead of u if u is a network_admin.'''\n return u if u.user != 'net' else self.admin\n\n def grant_revoke(self, allowed, group, right, obj=None):\n for u in self.user_proxies(allowed):\n u.group_grant_access(group, right, obj)\n u.group_revoke_access(group, right, obj)\n\n def test_grant_revoke(self):\n self.admin.group_create('group')\n self.grant_revoke((), 'group', 'network_admin')\n self.grant_revoke((), 'group', 'dns_admin')\n\n self.admin.ippool_create('pool')\n self.grant_revoke(('net', ), 'group', 'allocate', 'pool')\n\n self.admin.zone_create('test.com')\n self.grant_revoke(('dns', ), 'group', 'create_rr', ['test.com', []])\n self.grant_revoke(('dns', ), 'group', 'delete_rr', ['test.com', []])\n\n def test_dual_admin(self):\n '''user with both dns_admin and network_admin can grant ip rights'''\n self.admin.ippool_create('pool')\n self.admin.group_create('group')\n netdns = rpc.TRPC('netdns')\n netdns.group_grant_access('group', 'allocate', 'pool')\n\n def test_create_modify_delete_group(self):\n '''create/modify/delete group'''\n for u in self.user_proxies(('dns', 'net')):\n u.group_create('testgroup')\n u.group_add_user('testgroup', 'user')\n u.group_remove_user('testgroup', 'user')\n u.group_rename('testgroup', 'testgroup1')\n u.group_delete('testgroup1')\n\n def test_create_delete_container(self):\n '''create/delete container'''\n for u in self.user_proxies(('net', )):\n u.ipblock_create('192.168.3.11/24')\n u.ipblock_remove('192.168.3.11/24')\n\n def test_modify_container_attributes(self):\n '''modify container attributes'''\n self.admin.ipblock_create('192.168.3.11/24')\n for u in self.user_proxies(('dns', 'net')):\n u.ipblock_set_attrs('192.168.3.11/24', attributes={'a': 'b'})\n u.ipblock_delete_attrs('192.168.3.11/24', ['a'])\n\n def test_create_modify_delete_pool(self):\n '''create/modify/delete pool'''\n self.admin.ipblock_create('172.16.17.32/8', status='Container')\n for u in self.user_proxies(('net', )):\n u.ippool_create('pool')\n u.ippool_add_subnet('pool', '172.16.17.32/24')\n u.ippool_set_vlan('pool', 1)\n u.ippool_remove_vlan('pool')\n self.admin.zone_delete('0.0.12.in-addr.arpa')\n u.ipblock_remove('172.16.17.32/24', force=True, recursive=True)\n u.ippool_rename('pool', 'pool1')\n u.ippool_delete('pool1')\n\n def test_modify_pool_attributes(self):\n '''modify pool attributes'''\n self.admin.ippool_create('pool')\n for u in self.user_proxies(('dns', 'net')):\n u.ippool_set_attrs('pool', attributes={'a': 'b'})\n u.ippool_delete_attrs('pool', ['a'])\n\n def test_modify_rr(self):\n '''set ttl and comment'''\n self.admin.zone_create('test.com')\n rr = dict(name='test.com.', type='TXT', strings=['a'])\n self.admin.rr_create(**rr)\n self.admin.group_grant_access('granted', 'delete_rr', ['test.com', []])\n for u in self.user_proxies(('dns', 'granted', 'net')):\n u.rr_set_attrs(ttl=10, **rr)\n u.rr_set_comment(comment='a', **rr)\n u.rr_set_ttl(ttl=11, **rr)\n\n def test_create_delete_forward_zones(self):\n '''create/delete fwd-zones'''\n self.admin.group_grant_access('granted', 'zone_create')\n for u in self.user_proxies(('dns', 'granted', 'net')):\n self.who(u).zone_create('test.com')\n self.who(u).rr_create(name='a.test.com.', type='NS', nsdname='x.test.com.')\n u.rr_delete(name='a.test.com.', type='NS', nsdname='x.test.com.')\n self.who(u).zone_delete('test.com')\n\n def test_create_reverse_zones(self):\n '''create rev-zones'''\n for u in self.user_proxies(('dns', 'net')):\n u.zone_create('1.1.1.in-addr.arpa')\n self.admin.zone_delete('1.1.1.in-addr.arpa')\n\n def test_delete_reverse_zones(self):\n '''delete rev-zones'''\n for u in self.user_proxies(('dns', 'net')):\n self.admin.zone_create('1.1.1.in-addr.arpa')\n u.zone_delete('1.1.1.in-addr.arpa')\n\n def test_create_modify_delete_zone_profiles(self):\n '''create/modify/delete zone-profiles'''\n for u in self.user_proxies(('dns', 'net')):\n self.who(u).zone_create('profile', profile=True)\n self.who(u).rr_create(name='a.profile.', type='NS', nsdname='a.test.com.')\n u.rr_delete(name='a.profile.', type='NS', nsdname='a.test.com.')\n self.who(u).zone_delete('profile', profile=True)\n\n def test_create_delete_zone_groups(self):\n '''create/delete zone-groups'''\n self.admin.zone_create('test.com')\n for u in self.user_proxies(('dns', )):\n u.zone_group_create('zg')\n u.zone_group_rename('zg', 'zg1')\n u.zone_group_delete('zg1')\n\n def test_create_modify_delete_output(self):\n '''create/modify/delete output'''\n self.admin.zone_group_create('zg')\n for u in self.user_proxies(('dns', )):\n u.output_create('out', plugin='bind')\n u.output_set_comment('out', 'test')\n u.output_add_group('out', 'zg')\n u.output_remove_group('out', 'zg')\n u.output_get_attrs('out')\n u.output_delete('out')\n\n def test_create_rr_everywhere(self):\n '''create/delete rr in every zone (fwd and rev)'''\n self.admin.ipblock_create('1.0.0.0/8', status='Container')\n self.admin.zone_create('test.com')\n self.admin.ippool_create('pool')\n self.admin.ippool_add_subnet('pool', '192.168.3.11/24')\n rpc.TRPC('dns').rr_create(name='a.test.com.', type='A', ip='1.0.0.1')\n for u in self.user_proxies(('dns', 'net')):\n self.who(u).rr_create(name='a.test.com.', type='A', ip='1.0.0.1')\n u.rr_delete(name='a.test.com.', type='A', ip='1.0.0.1', free_ips=True)\n self.who(u).rr_create(name='b.test.com.', type='NS', nsdname='c.test.com.')\n u.rr_delete(name='b.test.com.', type='NS', nsdname='c.test.com.')\n\n def test_create_rr_ptr(self):\n '''create/delete PTR rr in every rev-zone'''\n self.admin.zone_create('test.com')\n self.admin.ipblock_create('1.0.0.0/8', status='Container')\n self.admin.ippool_create('pool')\n self.admin.ippool_add_subnet('pool', '192.168.3.11/24')\n self.admin.group_create('group')\n self.admin.group_grant_access('group', 'create_rr', ['test.com', []])\n self.admin.group_grant_access('group', 'delete_rr', ['test.com', []])\n self.admin.group_grant_access('group', 'allocate', 'pool')\n self.admin.group_add_user('group', 'user')\n self.admin.group_add_user('group', 'net')\n for u in self.user_proxies(('user', 'dns', 'net')):\n u.rr_create(ptrdname='a.test.com.', type='PTR', ip='1.0.0.1')\n u.rr_delete(ptrdname='a.test.com.', type='PTR', ip='1.0.0.1')\n\n def test_granted_allocate(self):\n self.admin.ippool_create('pool')\n self.admin.group_grant_access('granted', 'allocate', 'pool')\n self.admin.ipblock_create('172.16.17.32/8', status='Container')\n for u in self.user_proxies(('dns', 'net', 'granted')):\n self.admin.ippool_add_subnet('pool', '172.16.17.32/24')\n ip = u.ippool_get_ip('pool')['ip']\n u.ip_free(ip)\n delegation = u.ippool_get_delegation('pool', 30)[0]['ip']\n u.ipblock_remove(delegation)\n self.admin.ipblock_remove('172.16.17.32/24', force=True, recursive=True)\n\n def test_create_rr(self):\n self.admin.zone_create('test.com')\n self.admin.group_grant_access('granted', 'create_rr', ['test.com', []])\n for u in self.user_proxies(('dns', 'granted')):\n u.rr_create(name='test.com.', type='TXT', strings=['test'])\n self.admin.rr_delete(name='test.com.', type='TXT')\n\n def test_rr_reverse_zone(self):\n self.admin.zone_create('1.1.1.in-addr.arpa')\n self.admin.group_grant_access('granted', 'create_rr', ['1.1.1.in-addr.arpa', []])\n self.admin.group_grant_access('granted', 'delete_rr', ['1.1.1.in-addr.arpa', []])\n for u in self.user_proxies(('dns', 'granted', 'net')):\n self.who(u).rr_create(name='1.1.1.in-addr.arpa.', type='TXT', strings=['test'])\n u.rr_delete(name='1.1.1.in-addr.arpa.', type='TXT')\n\n def test_granted_create_rr_ptr(self):\n def test(users):\n for u in self.user_proxies(users):\n u.rr_create(ptrdname='a.test.com.', type='PTR', ip='1.0.0.1')\n self.admin.rr_delete(ptrdname='a.test.com.', type='PTR', ip='1.0.0.1')\n self.admin.ip_free('1.0.0.1')\n self.admin.ipblock_create('1.0.0.0/8', status='Container')\n self.admin.ippool_create('pool')\n self.admin.ippool_add_subnet('pool', '192.168.3.11/24')\n self.admin.group_grant_access('granted', 'allocate', 'pool')\n test(('granted', 'net', 'dns'))\n self.admin.zone_create('test.com')\n test(('dns', ))\n self.admin.group_grant_access('granted', 'create_rr', ['test.com', []])\n test(('dns', 'granted'))\n\n def test_create_rr_overwrite(self):\n def test_overwrite_a(users):\n for u in self.user_proxies(users):\n self.admin.rr_create(name='a.test.com.', type='A', ip='1.0.0.1')\n u.rr_create(name='a.test.com.', type='A', ip='1.0.0.2', overwrite_ptr=True, overwrite_a=True)\n self.admin.rr_delete(name='a.test.com.', type='A', ip='1.0.0.2')\n self.admin.zone_create('test.com')\n self.admin.ipblock_create('1.0.0.0/8', status='Container')\n self.admin.ippool_create('pool')\n self.admin.ippool_add_subnet('pool', '192.168.3.11/24')\n self.admin.group_grant_access('granted', 'allocate', 'pool')\n self.admin.group_grant_access('granted', 'create_rr', ['test.com', []])\n for u in self.user_proxies(('granted', 'dns')):\n u.rr_create(name='a.test.com.', type='A', ip='1.0.0.1')\n u.rr_create(name='a.test.com.', type='A', ip='172.16.58.3', overwrite_ptr=True)\n self.admin.rr_delete(name='a.test.com.', type='A', ip='172.16.58.3')\n test_overwrite_a(('dns', ))\n self.admin.group_grant_access('granted', 'delete_rr', ['test.com', []])\n test_overwrite_a(('dns', 'granted'))\n\n def test_delete_rr(self):\n self.admin.zone_create('test.com')\n self.admin.group_grant_access('granted', 'delete_rr', ['test.com', []])\n for u in self.user_proxies(('dns', 'granted', 'net')):\n self.admin.rr_create(name='test.com.', type='TXT', strings=['test'])\n u.rr_delete(name='test.com.', type='TXT')\n\n def test_delete_rr_ptr(self):\n def test_delete_ptr(users):\n for u in self.user_proxies(users):\n self.admin.rr_create(ptrdname='a.test.com.', type='PTR', ip='1.0.0.1')\n u.rr_delete(ptrdname='a.test.com.', type='PTR', ip='1.0.0.1')\n self.admin.ipblock_create('1.0.0.0/8', status='Container')\n self.admin.ippool_create('pool')\n self.admin.ippool_add_subnet('pool', '192.168.3.11/24')\n test_delete_ptr(('user', 'dns', 'net', 'granted'))\n self.admin.zone_create('test.com')\n self.admin.group_grant_access('granted', 'delete_rr', ['test.com', []])\n test_delete_ptr(('dns', 'granted', 'net'))\n\n def test_zone_list(self):\n self.admin.zone_create('oneview.com')\n self.admin.zone_create('delete.com')\n self.admin.zone_create('twoviews.com')\n self.admin.zone_create_view('twoviews.com', 'second')\n self.admin.group_grant_access('granted', 'create_rr', ['oneview.com', []])\n self.admin.group_grant_access('granted', 'delete_rr', ['delete.com', []])\n self.admin.group_grant_access('granted', 'delete_rr', ['twoviews.com', ['second']])\n self.admin.group_grant_access('granted', 'create_rr', ['twoviews.com', ['default']])\n a = ErrorChecker('admin', False)\n u = ErrorChecker('user', False)\n g = ErrorChecker('granted', False)\n assert g.zone_count(can_create_rr=True) == 2\n assert g.zone_count(can_delete_rr=True) == 2\n assert g.zone_count(can_delete_rr=True, can_create_rr=True) == 3\n assert u.zone_count(can_delete_rr=True, can_create_rr=True) == 0\n\n def names(zones):\n return [z['name'] for z in zones]\n assert set(names(u.zone_list(can_create_rr=True, can_delete_rr=True))) == set()\n assert set(names(g.zone_list(can_create_rr=True))) == set(['oneview.com', 'twoviews.com'])\n assert set(names(g.zone_list(can_delete_rr=True))) == set(['delete.com', 'twoviews.com'])\n assert rrs(u.zone_list(fields=True),\n fields=('name', 'can_create_rr', 'can_delete_rr')) == rrs(\n [('oneview.com', False, False),\n ('twoviews.com', False, False),\n ('delete.com', False, False)\n ])\n assert rrs(g.zone_list(fields=True),\n fields=('name', 'can_create_rr', 'can_delete_rr')) == rrs(\n [('oneview.com', True, False),\n ('twoviews.com', True, True),\n ('delete.com', False, True)\n ])\n assert rrs(a.zone_list(fields=True),\n fields=('name', 'can_create_rr', 'can_delete_rr')) == rrs(\n [('oneview.com', True, True),\n ('twoviews.com', True, True),\n ('delete.com', True, True)\n ])\n\n def _zone_view_rights_setup(self):\n self.admin.zone_create('test.com')\n self.admin.zone_create_view('test.com', 'create')\n self.admin.zone_create_view('test.com', 'delete')\n self.admin.zone_create_view('test.com', 'createdelete')\n self.admin.group_grant_access('granted', 'create_rr', ['test.com', ['create', 'createdelete']])\n self.admin.group_grant_access('granted', 'delete_rr', ['test.com', ['delete', 'createdelete']])\n self.u = ErrorChecker('user', False)\n self.a = ErrorChecker('admin', False)\n self.g = ErrorChecker('granted', False)\n\n def test_zone_list_views(self):\n self._zone_view_rights_setup()\n\n def names(zones):\n return [z['name'] for z in zones]\n assert set(names(self.g.zone_list_views('test.com'))) == set(['default', 'create', 'delete', 'createdelete'])\n assert set(names(self.g.zone_list_views('test.com', can_create_rr=True))) == set(['create', 'createdelete'])\n assert set(names(self.g.zone_list_views('test.com', can_delete_rr=True))) == set(['delete', 'createdelete'])\n assert set(names(self.g.zone_list_views('test.com', can_create_rr=True, can_delete_rr=True))) \\\n == set(['create', 'delete', 'createdelete'])\n assert rrs(self.g.zone_list_views('test.com', fields=True),\n fields=('name', 'can_create_rr', 'can_delete_rr')) == rrs(\n [('default', False, False),\n ('create', True, False),\n ('delete', False, True),\n ('createdelete', True, True)\n ])\n assert rrs(self.a.zone_list_views('test.com', fields=True),\n fields=('name', 'can_create_rr', 'can_delete_rr')) == rrs(\n [('default', True, True),\n ('create', True, True),\n ('delete', True, True),\n ('createdelete', True, True)\n ])\n assert rrs(self.u.zone_list_views('test.com', fields=True),\n fields=('name', 'can_create_rr', 'can_delete_rr')) == rrs(\n [('default', False, False),\n ('create', False, False),\n ('delete', False, False),\n ('createdelete', False, False)\n ])\n\n def test_rr_list(self):\n self._zone_view_rights_setup()\n views = {'default': [False, False],\n 'create': [True, False],\n 'delete': [False, True],\n 'createdelete': [True, True]}\n self.admin.rr_create(name='a.test.com.', type='txt', strings='', views=list(views.keys()))\n for rr in self.g.rr_list(type='txt', fields=True):\n assert rr['can_create_rr'] == views[rr['view']][0]\n assert rr['can_delete_rr'] == views[rr['view']][1]\n\n def test_ippool_list(self):\n self.admin.ippool_create('p')\n self.admin.ippool_create('p2')\n self.admin.group_grant_access('granted', 'allocate', 'p')\n\n def pools(p, p2):\n return [{'name': 'p', 'can_allocate': p, 'vlan': None},\n {'name': 'p2', 'can_allocate': p2, 'vlan': None}]\n assert ErrorChecker('granted', False).ippool_list(fields=True, include_subnets=False) == pools(True, False)\n assert ErrorChecker('user', False).ippool_list(fields=True, include_subnets=False) == pools(False, False)\n assert self.admin.ippool_list(fields=True, include_subnets=False) == pools(True, True)\n\n def test_rr_list_ptr_rights(self):\n self.admin.ipblock_create('1.0.0.0/8', status='Container')\n self.admin.ippool_create('p')\n self.admin.ippool_add_subnet('p', '172.16.17.32/24')\n self.admin.rr_create(ip='1.1.1.1', type='PTR', ptrdname='a.de.')\n self.admin.rr_create(name='gigi.1.1.1.in-addr.arpa.', type='TXT', strings=['s'])\n self.user = rpc.TRPC('user')\n rr = self.user.rr_list(zone='1.1.1.in-addr.arpa', type='PTR', fields=True)[0]\n assert rr['can_create_rr']\n assert rr['can_delete_rr']\n rr = self.user.rr_list(zone='1.1.1.in-addr.arpa', type='TXT', fields=True)[0]\n assert not rr['can_create_rr']\n assert not rr['can_delete_rr']\n\n def test_create_a_rr_unmanaged_ip(self):\n # ND-58\n self.admin.zone_create('test.com')\n self.admin.group_grant_access('granted', 'create_rr', ['test.com', []])\n for u in self.user_proxies(('dns', 'granted')):\n u.rr_create(name='test.com.', type='A', ip='172.16.31.10')\n self.admin.rr_delete(name='test.com.', type='A', ip='1.2.3.4')\n\n def test_zombie_rights(self):\n # ND-73\n self.admin.ippool_create('p')\n self.admin.zone_create('test.com')\n self.admin.group_grant_access('granted', 'create_rr', ['test.com', []])\n self.admin.group_grant_access('granted', 'delete_rr', ['test.com', []])\n self.admin.zone_delete('test.com')\n for u in self.user_proxies(('net', )):\n u.group_grant_access('granted', 'allocate', 'p')\n self.admin.ippool_delete('p')\n\n def test_proxied_user(self):\n self.net = rpc.TRPC('net', 'smth')\n with raises(PermissionDeniedError):\n self.net.zone_create('a.de')\n\n def test_manage_zone(self):\n self.admin.zone_create('a.de')\n self.admin.zone_group_create('zg')\n self.admin.registrar_account_create('ra', 'autodns3', 'url', 'u', 'p', 's')\n self.admin.group_grant_access('granted', 'zone_admin', 'a.de')\n for u in self.user_proxies(('dns', 'granted')):\n u.zone_set_attrs('a.de', {'a': 'b'})\n u.zone_delete_attrs('a.de', ['a'])\n u.zone_create_view('a.de', 'a')\n u.zone_rename_view('a.de', 'a', 'b')\n u.zone_delete_view('a.de', 'b')\n u.zone_set_owner('a.de', 'granted')\n u.zone_set_soa_attrs('a.de', {'ttl': 60})\n u.zone_dnssec_enable('a.de')\n u.zone_create_key('a.de', 'ksk')\n u.zone_dnssec_disable('a.de')\n u.zone_group_add_zone('zg', 'a.de')\n u.zone_group_remove_zone('zg', 'a.de')\n u.registrar_account_add_zone('ra', 'a.de')\n u.registrar_account_delete_zone('ra', 'a.de')\n\n def test_delete_rr_by_id(self):\n self.admin.zone_create('a.de')\n self.admin.rr_create(name='a.de.', type='NS', nsdname='b.de.')\n self.admin.zone_create('b.de')\n self.admin.group_grant_access('granted', 'delete_rr', ['b.de', []])\n # rr_delete by id with references\n for u in self.user_proxies(('dns', 'net')):\n self.admin.rr_create(name='b.de.', type='A', ip='172.16.31.10')\n rr_id = self.admin.rr_get_references(name='b.de.', type='A', ip='172.16.31.10')['root']\n u.rr_delete(ids=[rr_id], references='delete')\n # rr_delete by id without references\n for u in self.user_proxies(('dns', 'net', 'granted')):\n self.admin.rr_create(name='b.de.', type='A', ip='172.16.31.10')\n rr_id = self.admin.rr_get_references(name='b.de.', type='A', ip='172.16.31.10')['root']\n u.rr_delete(ids=[rr_id], references='ignore')\n # rr_delete by id with no pool rights\n for u in self.user_proxies(('dns', 'net', 'granted')):\n self.admin.rr_create(name='b.de.', type='A', ip='172.16.31.10')\n rr_id = self.admin.rr_get_references(name='b.de.', type='A', ip='172.16.31.10')['root']\n u.rr_delete(ids=[rr_id], references='ignore', free_ips=True)\n", "id": "10279157", "language": "Python", "matching_score": 4.059502601623535, "max_stars_count": 37, "path": "dim-testsuite/tests/rights_test.py" }, { "content": "from dim import db\nfrom dim.dns import get_ip_from_ptr_name\nfrom dim.rrtype import validate_strings\nfrom dim.errors import InvalidParameterError, AlreadyExistsError, InvalidZoneError, DimError\nfrom tests.util import RPCTest, raises\n\n\ndef test_validate_strings():\n validate_strings(None, 'strings', [r'''\\\"\\\\\\223'''])\n validate_strings(None, 'strings', [r'''\\\"\\\\\\223'''])\n\n\ndef rrs(coll, fields=('record', 'zone', 'type', 'value')):\n if not coll:\n return set()\n if isinstance(coll[0], dict):\n return set(tuple(rr[field] for field in fields) for rr in coll\n if 'type' not in fields or rr['type'] != 'SOA')\n else:\n return set(coll)\n\n\ndef print_messages(result):\n print('\\n'.join(m[1] for m in result['messages']))\n\n\ndef test_get_ip_from_ptr_name():\n assert get_ip_from_ptr_name('1.2.3.4.in-addr.arpa.') == '4.3.2.1'\n assert get_ip_from_ptr_name('1.2/32.2.3.4.in-addr.arpa.') == '4.3.2.1'\n assert get_ip_from_ptr_name('1.2/32.2.3.4.in-addr.arpa.') == '4.3.2.1'\n assert get_ip_from_ptr_name('2.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.ip6.arpa.') == \\\n 'fc00:e968:6179::de52:7100'\n with raises(ValueError):\n get_ip_from_ptr_name('abc')\n with raises(ValueError):\n get_ip_from_ptr_name('1.3.4.in-addr.arpa.')\n\n\nclass ZoneTest(RPCTest):\n def test_create_zone(self):\n with raises(InvalidParameterError):\n self.r.zone_create('a 0.com')\n with raises(InvalidParameterError):\n self.r.zone_create('test.com', soa_attributes={'a': 1})\n self.r.zone_create('test.com')\n with raises(AlreadyExistsError):\n self.r.zone_create('test.com')\n with raises(InvalidParameterError):\n self.r.zone_create('test.com.')\n with raises(InvalidParameterError):\n self.r.zone_create('test-')\n\n def test_zone_rename(self):\n self.r.zone_create('internal', profile=True)\n self.r.rr_create(name='internal.', type='NS', nsdname='external.')\n self.r.rr_create(name='a.internal.', type='CNAME', cname='c')\n self.r.zone_rename('internal', 'public', profile=True)\n assert self.r.zone_list(profile=True) == [{'name': 'public'}]\n assert rrs(self.r.rr_list(zone='public', profile=True)) == rrs([\n ('@', 'public', 'NS', 'external.'),\n ('a', 'public', 'CNAME', 'c')])\n with raises(InvalidParameterError):\n self.r.zone_rename('public', 'private', profile=False)\n\n def test_add_view_1(self):\n self.r.zone_create('test.com')\n self.r.zone_create_view('test.com', 'view')\n assert self.r.zone_list_views('test.com') == [{'name': 'default'}, {'name': 'view'}]\n\n def test_rename_view(self):\n self.r.zone_create('test.com')\n self.r.zone_create_view('test.com', 'view')\n self.r.zone_rename_view('test.com', 'view', 'test')\n assert self.r.zone_list_views('test.com') == [{'name': 'default'}, {'name': 'test'}]\n\n def test_add_view_2(self):\n self.r.zone_create('profile', profile=True)\n with raises(DimError):\n self.r.zone_create_view('profile', 'test')\n\n def test_attrs(self):\n self.r.zone_create('test.com', attributes={'a': 'b'}, soa_attributes={'primary': 'c.'})\n assert self.r.zone_get_attrs('test.com')['a'] == 'b'\n\n self.r.zone_set_attrs('test.com', {'a': '1'})\n assert self.r.zone_get_attrs('test.com')['a'] == '1'\n self.r.zone_delete_attrs('test.com', ['a'])\n assert 'a' not in self.r.zone_get_attrs('test.com')\n\n assert self.r.zone_get_soa_attrs('test.com')['primary'] == 'c.'\n self.r.zone_set_soa_attrs('test.com', {'primary': 'd.'})\n assert self.r.zone_get_soa_attrs('test.com')['primary'] == 'd.'\n\n def test_profiles(self):\n self.r.zone_create('internal', profile=True, soa_attributes=dict(mail='a.b.com.', refresh='1337', expire=1))\n self.r.zone_create('test.com', from_profile='internal', soa_attributes=dict(refresh='47'))\n assert self.r.zone_get_soa_attrs('test.com')['refresh'] == 47\n assert self.r.zone_get_soa_attrs('test.com')['mail'] == 'a.b.com.'\n with raises(InvalidZoneError):\n self.r.zone_delete('internal', profile=False)\n with raises(InvalidZoneError):\n self.r.zone_delete('test.com', profile=True)\n self.r.zone_delete('internal', profile=True)\n self.r.zone_delete('test.com')\n\n def test_profile_rrs(self):\n self.r.zone_create('profile', profile=True)\n self.r.rr_create(name='@', zone='profile', type='NS', nsdname='whatever.com.', profile=True)\n self.r.rr_create(name='a', zone='profile', type='TXT', strings='\"something\"', profile=True)\n self.r.zone_create('test.com', from_profile='profile')\n assert rrs(self.r.rr_list('*test.com.')) == rrs(\n [('a', 'test.com', 'TXT', '\"something\"'),\n ('@', 'test.com', 'NS', 'whatever.com.')])\n\n def test_list_zone(self):\n self.r.zone_create('some.domain', soa_attributes=dict(primary='ns01.company.com.', mail='dnsadmin.company.com.'))\n self.r.rr_create(name='some.domain.', type='MX', preference=10, exchange='mail.other.domain.', ttl=1200)\n self.r.rr_create(name='www.some.domain.', type='A', ip='192.168.78.2')\n records = self.r.rr_list(zone='some.domain')\n assert records[0]['type'] == 'SOA' and records[0]['value'].startswith('ns01.company.com. dnsadmin.company.com')\n assert rrs([('@', 'some.domain', 1200, 'MX', '10 mail.other.domain.'),\n ('www', 'some.domain', None, 'A', '192.168.78.2')])\\\n <= rrs(records, fields=('record', 'zone', 'ttl', 'type', 'value'))\n\n def test_zone_list_underscore(self):\n self.r.zone_create('nounderscore.com')\n self.r.zone_create('with_underscore.com')\n assert self.r.zone_list() == [\n {'name': 'nounderscore.com'},\n {'name': 'with_underscore.com'}]\n assert self.r.zone_list('*_*') == [{'name': 'with_underscore.com'}]\n\n def test_zone_list(self):\n self.r.zone_create('profile.domain', profile=True)\n self.r.zone_create('master.domain')\n self.r.zone_create('no-no.domain')\n self.r.zone_create('multipleviews.domain')\n self.r.zone_create_view('multipleviews.domain', 'secondview')\n self.r.zone_create('second.domain')\n self.r.zone_group_create('zg')\n self.r.zone_group_create('zg2')\n self.r.zone_group_create('zg3')\n self.r.zone_group_add_zone('zg', 'master.domain')\n self.r.zone_group_add_zone('zg2', 'master.domain')\n self.r.zone_group_add_zone('zg', 'second.domain')\n self.r.zone_group_add_zone('zg', 'multipleviews.domain', 'default')\n self.r.zone_group_add_zone('zg2', 'multipleviews.domain', 'secondview')\n self.r.zone_group_add_zone('zg3', 'multipleviews.domain', 'default')\n assert rrs(self.r.zone_list('*domain', profile=False, fields=True),\n fields=('name', 'views', 'zone_groups')) == rrs(\n [('second.domain', 1, 1),\n ('master.domain', 1, 2),\n ('multipleviews.domain', 2, 3),\n ('no-no.domain', 1, 0)\n ])\n assert rrs(self.r.zone_list('*domain', profile=True, fields=True),\n fields=('name',)) == rrs([('profile.domain',)])\n assert rrs(self.r.zone_list('*domain', profile=False, fields=True),\n fields=('name', 'views')) == rrs(\n [('second.domain', 1),\n ('master.domain', 1),\n ('no-no.domain', 1),\n ('multipleviews.domain', 2)\n ])\n assert self.r.zone_list(profile=True) == [{'name': 'profile.domain'}]\n assert set([x['name'] for x in self.r.zone_list(profile=False)]) == set(\n ['master.domain',\n 'no-no.domain',\n 'multipleviews.domain',\n 'second.domain'\n ])\n assert set([x['name'] for x in self.r.zone_list(profile=False, limit=2, offset=1)]) == set(\n ['multipleviews.domain',\n 'no-no.domain'\n ])\n assert self.r.zone_count(profile=False) == 4\n\n def test_zone_list_alias(self):\n assert len(self.r.zone_list(alias=1)) == 0\n assert self.r.zone_count(alias='a') == 0\n self.r.zone_create('a.de')\n assert [x['name'] for x in self.r.zone_list(profile=False, alias=True)] == ['a.de']\n\n def test_revzone_profiles(self):\n self.r.zone_create('revzone-profile', profile=True, soa_attributes={'primary': 'revzone.'})\n self.r.ipblock_create('172.16.31.10/8', status='Container', attributes={'reverse_dns_profile': 'revzone-profile'})\n self.r.ippool_create('pool')\n self.r.ippool_add_subnet('pool', '172.16.31.10/23')\n assert self.r.zone_get_soa_attrs('1.0.12.in-addr.arpa')['primary'] == 'revzone.'\n\n def test_revzone_ipv6(self):\n self.r.ipblock_create('2001:db8::/32', status='Container')\n self.r.ippool_create('pool')\n self.r.ippool_add_subnet('pool', '20fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/126')\n assert len(self.r.zone_list('a.0.0.0.0.0.1.0.8.b.d.0.1.0.0.2.ip6.arpa')) == 1\n\n def test_subzone(self):\n self.r.zone_create('server.lan')\n self.r.rr_create(name='srv-monitoring.company.com.', type='TXT', strings=['test'])\n self.r.rr_create(name='monitoring.company.com.', type='TXT', strings=['test2'])\n self.r.zone_create('monitoring.company.com')\n assert rrs(self.r.rr_list(zone='company.com', type='TXT')) == rrs([\n ('srv-monitoring', 'company.com', 'TXT', '\"test\"')])\n assert rrs(self.r.rr_list(zone='monitoring.company.com', type='TXT')) == rrs([\n ('@', 'monitoring.company.com', 'TXT', '\"test2\"')])\n\n def test_dnssec_attrs(self):\n self.r.zone_create('test.com')\n self.r.zone_set_attrs('test.com', {'default_algorithm': '8'})\n self.r.zone_set_attrs('test.com', {'default_ksk_bits': 2048})\n self.r.zone_set_attrs('test.com', {'default_zsk_bits': 1024})\n with raises(InvalidParameterError):\n self.r.zone_set_attrs('test.com', {'default_algorithm': 'rsasha1'})\n with raises(InvalidParameterError):\n self.r.zone_set_attrs('test.com', {'default_ksk_bits': 'a'})\n with raises(InvalidParameterError):\n self.r.zone_set_attrs('test.com', {'default_zsk_bits': 'a'})\n\n def test_favorites(self):\n # Test for a zone with a single view\n self.r.zone_create('a.de')\n assert self.r.zone_list2(favorite_only=True)['count'] == 0\n\n assert not self.r.zone_favorite('a.de')\n self.r.zone_favorite_add('a.de')\n assert self.r.zone_favorite('a.de')\n print(self.r.zone_list2(favorite_only=True))\n assert self.r.zone_list2(favorite_only=True)['data'][0]['name'] == 'a.de'\n\n self.r.zone_favorite_remove('a.de')\n assert not self.r.zone_favorite('a.de')\n\n\nclass RR(RPCTest):\n def test_create_twice(self):\n self.r.ipblock_create('172.16.31.10/8', status='Container')\n self.r.ippool_create('test')\n self.r.ippool_add_subnet('test', '172.16.31.10/24')\n self.r.ip_mark('172.16.58.3')\n self.r.zone_create('test.com')\n self.r.rr_create(name='a.test.com.', type='a', ip='172.16.58.3')\n self.r.rr_create(name='a.test.com.', type='a', ip='172.16.58.3', overwrite_ptr=True)\n self.r.zone_delete('test.com', cleanup=True)\n assert rrs(self.r.rr_list(pattern='*0.0.12.in-addr.arpa.')) == rrs([])\n\n def test_rr_create_invalid_profile(self):\n with raises(InvalidZoneError):\n self.r.rr_create(profile=True, type='NS', nsdname='a.', zone='inexistent', name='@')\n\n def test_create_invalid_record_name(self):\n self.r.zone_create('a.de')\n self.r.rr_create(name='a.de.', type='TXT', strings=['text'], zone='a.de')\n with raises(InvalidParameterError):\n self.r.rr_create(name='suba.de.', type='TXT', strings=['text'], zone='a.de')\n\n def test_rr_delete_1(self):\n self.r.zone_create('test.com')\n self.r.rr_create(name='a.test.com.', type='cname', cname='b.test.com.')\n assert len(rrs(self.r.rr_list())) == 1\n self.r.rr_delete(name='a.test.com.', type='cname', cname='b.test.com.')\n assert len(rrs(self.r.rr_list())) == 0\n\n def test_rr_delete_2(self):\n self.r.ipblock_create('172.16.31.10/8', status='Container')\n self.r.ippool_create('test')\n self.r.ippool_add_subnet('test', '172.16.31.10/24')\n self.r.zone_create('test.com')\n self.r.rr_create(name='a.test.com.', type='a', ip='172.16.58.3')\n assert len(rrs(self.r.rr_list())) == 2\n self.r.rr_delete(name='a.test.com.', type='a', ip='172.16.58.3', free_ips=True)\n assert len(rrs(self.r.rr_list())) == 0\n assert self.r.ipblock_get_attrs('172.16.58.3')['status'] == 'Available'\n\n def test_rr_delete_3(self):\n self.r.ipblock_create('12::/32', status='Container')\n self.r.zone_create('test.com')\n self.r.ippool_create('test')\n self.r.ippool_add_subnet('test', '12::/64')\n self.r.rr_create(name='a.test.com.', type='a', ip='172.16.58.3')\n self.r.rr_create(name='a.test.com.', type='aaaa', ip='fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')\n self.r.rr_delete(name='a.test.com.', type='a', ip='172.16.58.3')\n assert rrs(self.r.rr_list('a.test.com.')) == rrs([\n ('a', 'test.com', 'AAAA', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b'),\n ('1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0', '0.0.0.0.0.0.0.0.0.0.0.0.2.1.0.0.ip6.arpa', 'PTR', 'a.test.com.')])\n\n def test_rr_delete_4(self):\n self.r.ipblock_create('172.16.31.10/8', status='Container')\n self.r.ippool_create('test')\n self.r.ippool_add_subnet('test', '172.16.31.10/24')\n self.r.zone_create('test.com')\n self.r.rr_create(name='a.test.com.', type='a', ip='172.16.58.3')\n self.r.rr_create(name='b.test.com.', type='a', ip='172.16.58.3', overwrite_ptr=True)\n self.r.rr_delete(name='a.test.com.', type='a', ip='172.16.58.3')\n assert not self.r.rr_list('a.test.com.')\n assert rrs(self.r.rr_list('b.test.com.')) == rrs([\n ('b', 'test.com', 'A', '172.16.58.3'),\n ('1', '0.0.12.in-addr.arpa', 'PTR', 'b.test.com.')])\n\n def test_rr_delete_5(self):\n # trigger recursive delete via rr_delete(ptr)\n self.r.ipblock_create('172.16.31.10/8', status='Container')\n self.r.ippool_create('test')\n self.r.ippool_add_subnet('test', '172.16.31.10/24')\n self.r.zone_create('test.com')\n self.r.rr_create(name='a.test.com.', type='a', ip='172.16.58.3')\n self.r.rr_create(name='b.test.com.', type='cname', cname='a')\n self.r.rr_delete(ip='172.16.58.3', type='ptr', ptrdname='a.test.com.', references='delete')\n assert rrs(self.r.rr_list()) == set()\n\n def test_rr_delete_6(self):\n # delete only one forward reference; expect ptr unchanged\n self.r.ipblock_create('172.16.31.10/8', status='Container')\n self.r.ippool_create('test')\n self.r.ippool_add_subnet('test', '172.16.31.10/24')\n self.r.zone_create('test.com')\n self.r.zone_create_view('test.com', 'other')\n self.r.rr_create(name='a.test.com.', type='a', ip='172.16.58.3', views=['default', 'other'])\n self.r.rr_delete(name='a.test.com.', type='a', ip='172.16.58.3', views=['default'])\n assert rrs(self.r.rr_list()) == rrs([\n ('a', 'test.com', 'A', '172.16.58.3'),\n ('1', '0.0.12.in-addr.arpa', 'PTR', 'a.test.com.')])\n\n def test_rr_delete_by_id(self):\n self.r.zone_create('test.com')\n self.r.rr_create(name='a.test.com.', type='a', ip='172.16.58.3')\n rr_id = self.r.rr_get_references(name='a.test.com.', type='A')['root']\n with raises(InvalidParameterError):\n self.r.rr_delete(ids=rr_id)\n self.r.rr_delete(ids=[rr_id], zone='a.de')\n self.r.rr_delete(ids=[rr_id], unknown='a')\n self.r.rr_delete(ids=[rr_id])\n\n def test_ptr_overwrite(self):\n self.r.ipblock_create('172.16.31.10/8', status='Container')\n self.r.ippool_create('test')\n self.r.ippool_add_subnet('test', '172.16.31.10/24')\n self.r.ip_mark('172.16.58.3')\n self.r.ip_mark('192.168.3.11')\n self.r.zone_create('test.com')\n self.r.rr_create(name='a.test.com.', type='A', ip='172.16.58.3')\n self.r.rr_create(ip='172.16.58.3', type='PTR', ptrdname='b.test.com.')\n assert rrs(self.r.rr_list(pattern='*')) == rrs(\n [('a', 'test.com', 'A', '172.16.58.3'),\n ('b', 'test.com', 'A', '172.16.58.3'),\n ('1', '0.0.12.in-addr.arpa', 'PTR', 'a.test.com.')])\n self.r.rr_create(ip='172.16.58.3', type='PTR', ptrdname='b.test.com.', overwrite_ptr=True)\n assert rrs(self.r.rr_list(pattern='*')) == rrs(\n [('a', 'test.com', 'A', '172.16.58.3'),\n ('b', 'test.com', 'A', '172.16.58.3'),\n ('1', '0.0.12.in-addr.arpa', 'PTR', 'b.test.com.')])\n self.r.rr_create(name='b.test.com.', type='A', ip='192.168.3.11')\n self.r.rr_create(ip='172.16.58.3', type='PTR', ptrdname='b.test.com.', overwrite_ptr=True, overwrite_a=True)\n assert rrs(self.r.rr_list(pattern='*')) == rrs(\n [('a', 'test.com', 'A', '172.16.58.3'),\n ('b', 'test.com', 'A', '172.16.58.3'),\n ('1', '0.0.12.in-addr.arpa', 'PTR', 'b.test.com.'),\n ('2', '0.0.12.in-addr.arpa', 'PTR', 'b.test.com.')])\n\n def test_create_a(self):\n self.r.ip_mark('172.16.58.3')\n self.r.ip_mark('192.168.3.11')\n self.r.ip_mark('fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')\n self.r.ip_mark('fdf8:f53e:61e4::18')\n self.r.zone_create('test.com')\n self.r.rr_create(name='a.test.com.', type='A', ip='172.16.58.3', ttl=1)\n self.r.rr_create(name='b.test.com.', type='A', ip='192.168.3.11')\n self.r.rr_create(name='c.test.com.', type='AAAA', ip='fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')\n self.r.rr_create(name='d.test.com.', type='AAAA', ip='fdf8:f53e:61e4::18')\n assert rrs(self.r.rr_list('*test.com.')) == rrs(\n [('a', 'test.com', 'A', '172.16.58.3'),\n ('b', 'test.com', 'A', '192.168.3.11'),\n ('c', 'test.com', 'AAAA', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b'),\n ('d', 'test.com', 'AAAA', 'fdf8:f53e:61e4::18')])\n\n def test_create_a2(self):\n # ND-57\n self.r.zone_create('test.com')\n with raises(InvalidParameterError):\n self.r.rr_create(name='test.com.', type='A', ip='::1')\n with raises(InvalidParameterError):\n self.r.rr_create(name='test.com.', type='AAAA', ip='127.0.0.1')\n with raises(InvalidParameterError):\n self.r.rr_get_attrs(name='test.com', type='A', ip='::1')\n with raises(InvalidParameterError):\n self.r.rr_get_attrs(name='test.com', type='AAAA', ip='0.0.0.1')\n self.r.rr_create(name='test.com.', type='AAAA', ip='::1')\n assert rrs(self.r.rr_list('*test.com.')) == rrs(\n [('@', 'test.com', 'AAAA', '::1')])\n self.r.rr_get_attrs(name='test.com.', type='AAAA', ip='::1')\n\n def test_create_cname(self):\n self.r.zone_create('test.com')\n with raises(InvalidParameterError):\n self.r.rr_create(name='a.test.com', type='CNAME', cname='c.test.com')\n self.r.rr_create(name='a.test.com.', type='CNAME', cname='c.test.com.')\n self.r.rr_create(name='b.test.com.', type='MX', preference=10, exchange='test.com.')\n with raises(InvalidParameterError):\n self.r.rr_create(name='b.test.com', type='CNAME', cname='c.test.com')\n with raises(InvalidParameterError):\n self.r.rr_create(name='d.test.com.', type='MX', preference=10, exchange='a.test.com.')\n\n def test_create_cname_2(self):\n # ND-100\n self.r.zone_create('test.com')\n self.r.rr_create(name='cname.test.com.', type='CNAME', cname='test.com.')\n self.r.ipblock_create('172.16.31.10/8', status='Container')\n self.r.ippool_create('test')\n self.r.ippool_add_subnet('test', '172.16.31.10/24')\n self.r.rr_create(ip='172.16.58.3', type='PTR', ptrdname='cname.test.com.', create_linked=False)\n with raises(InvalidParameterError):\n self.r.rr_create(ip='172.16.58.3', type='PTR', ptrdname='cname.test.com.', create_linked=True)\n\n def test_create_srv(self):\n self.r.zone_create('test.com')\n self.r.rr_create(name='a.test.com.', type='CNAME', cname='c.test.com.')\n with raises(InvalidParameterError):\n self.r.rr_create(name='_a._b.test.com.', type='SRV', priority=10, weight=1, port=1, target='a.test.com.')\n self.r.rr_create(name='_a._b.test.com.', type='SRV', priority=10, weight=1, port=1, target='c.test.com.')\n with raises(InvalidParameterError):\n self.r.rr_create(name='c.test.com.', type='CNAME', cname='a.test.com.')\n\n def test_email(self):\n self.r.zone_create('test.com')\n self.r.zone_set_soa_attrs('test.com', {'mail': 'first\\.last.test.com.'})\n assert \" first\\.last.test.com. \" in self.r.zone_dump('test.com')\n\n def test_create_revzone(self):\n self.r.rr_create(ip='172.16.58.3', type='PTR', ptrdname='test.com.', create_linked=False, create_revzone=True)\n\n def test_create_rr_rp(self):\n self.r.zone_create('test.com')\n self.r.rr_create(name='a.test.com.', type='RP', mbox='john\\.doe.example.com.', txtdname='test.com.')\n\n def test_create_rr_cert(self):\n self.r.zone_create('test.com')\n self.r.rr_create(name='a.test.com.', type='CERT', certificate_type=1, key_tag=2, algorithm=3, certificate='abc')\n with raises(DimError):\n self.r.rr_create(name='a.test.com.', type='CERT', certificate_type=1, key_tag=2, algorithm=3, certificate='a c')\n\n def test_create_rr_tlsa(self):\n default = dict(name='a.test.com.',\n type='TLSA',\n certificate_usage=1, selector=2, matching_type=1, certificate='abcd')\n\n def rr_create(**kwargs):\n d = default.copy()\n d.update(kwargs)\n return self.r.rr_create(**d)\n\n self.r.zone_create('test.com')\n assert set(rr_create(certificate_usage=4, selector=2, matching_type=3)['messages']) == set([\n (20, 'Creating RR a TLSA 4 2 3 abcd in zone test.com'),\n (30, 'certificate_usage value 4 is unassigned'),\n (30, 'selector value 2 is unassigned'),\n (30, 'matching_type value 3 is unassigned'),\n ])\n rr_create(certificate_usage='PKIX-TA', selector='PRIVSEL', matching_type='SHA2-512')\n for k, v in (('certificate', '1 2'),\n ('certificate', 'afcs'),\n ('selector', -1),\n ('matching_type', 256),\n ('certificate_usage', 'bad')):\n with raises(DimError):\n rr_create(k=v)\n\n def test_rr_list_value_as_object(self):\n self.r.zone_create('test.com')\n rrs = [dict(type='TXT', strings='\"a\" \"b\"'),\n dict(type='mx', preference=5, exchange='test.com.'),\n dict(type='HINFO', os='os', cpu='cpu'),\n dict(type='a', ip='1.2.3.4'),\n dict(type='srv', priority=10, weight=1, port=1, target='a.test.com.'),\n dict(type='naptr', order=1, preference=2, flags='f', service=r'223', regexp=r'r', replacement='a.de.'),\n dict(type='cert', certificate_type=1, algorithm=2, key_tag=3, certificate='cert'),\n dict(type='rp', mbox='gigi.a.de.', txtdname='test.com.')\n ]\n for param in rrs:\n name = '_a._b.test.com.'\n self.r.rr_create(name=name, **param)\n del param['type']\n assert self.r.rr_list(name, value_as_object=True)[0]['value'] == param\n self.r.rr_delete(name=name)\n\n def test_root_zone_list(self):\n self.r.zone_create('.')\n self.r.rr_create(name='a.', type='TXT', strings=[''])\n assert self.r.rr_list('a.')[0]['record'] == 'a'\n\n def test_rr_attrs(self):\n self.r.zone_create('a.de')\n rrs = [dict(name='hinfo.a.de.', type='HINFO', os='os\\\\\"', cpu='\\\\\\\\'),\n dict(name='mx.a.de.', type='MX', preference=10, exchange='a.de.')]\n for rr in rrs:\n self.r.rr_create(**rr)\n self.r.rr_set_ttl(ttl=300, **rr)\n self.r.rr_set_comment(comment='com', **rr)\n attrs = self.r.rr_get_attrs(**rr)\n assert attrs['comment'] == 'com'\n assert attrs['ttl'] == 300\n with raises(InvalidParameterError):\n self.r.rr_set_attrs(**rrs[0])\n for dryrun in [False, True]:\n comment = '%s' % dryrun\n ttl = int(dryrun)\n attrs = self.r.rr_set_attrs(ttl=ttl, comment=comment, dryrun=dryrun, **rr)\n assert attrs['comment'] == comment\n assert attrs['ttl'] == ttl\n\n def test_rr_sorting(self):\n self.r.zone_create('a.de')\n rrs = [dict(name='a.de.', type='NS', nsdname='ns.a.de.', ttl=600),\n dict(name='a.de.', type='A', ip='1.2.3.4', ttl=3600),\n dict(name='*.b.a.de.', type='CNAME', cname='b.a.de.'),\n dict(name='mx.a.de.', type='MX', preference=10, exchange='a.de.')]\n for rr in rrs:\n self.r.rr_create(**rr)\n assert(self.r.rr_list(zone='a.de', limit=2)[1]['record'] == '@')\n\n # TODO: test rr_list(created_by, modified_by)\n\n\nclass PTR(RPCTest):\n def setUp(self):\n RPCTest.setUp(self)\n # Initial setup:\n # Forward Zone:\n # w1.zone. IN A \t172.16.58.3\n # Reverse Zone:\n # 172.16.31.10.in-addr.arpa IN PTR w1.zone.\n # 192.168.127.12.in-addr.arpa IN PTR w2.zone.\n self.r.ipblock_create('172.16.31.10/8', status='Container')\n self.r.ippool_create(\"pool\")\n self.r.ippool_add_subnet(\"pool\", \"172.16.31.10/24\")\n self.r.ip_mark('172.16.58.3')\n self.r.ip_mark('192.168.3.11')\n self.r.ip_mark('192.168.3.11')\n self.r.rr_create(ip='192.168.3.11', type='PTR', ptrdname='w2.zone.')\n self.r.zone_create(\"zone\")\n self.r.rr_create(name=\"w1.zone.\", type='A', ip=\"172.16.58.3\")\n assert rrs(self.r.rr_list('*zone.')) == rrs(\n [('w1', 'zone', 'A', '172.16.58.3'),\n ('14', '0.0.12.in-addr.arpa', 'PTR', 'w2.zone.'),\n ('13', '0.0.12.in-addr.arpa', 'PTR', 'w1.zone.')])\n\n def test_new(self):\n self.r.rr_create(ip='192.168.3.11', type='PTR', ptrdname='w2.zone.')\n assert rrs(self.r.rr_list('192.168.3.11')) == rrs(\n [('15', '0.0.12.in-addr.arpa', 'PTR', 'w2.zone.'),\n ('w2', 'zone', 'A', '192.168.3.11')])\n\n def test_no_overwrite(self):\n assert self.r.rr_create(type='PTR', ip='172.16.58.3', ptrdname='w3.zone.')['messages'] == [\n (30, 'Not overwriting: 172.16.31.10.in-addr.arpa. PTR w1.zone.'),\n (20, 'Creating RR w3 A 172.16.58.3 in zone zone')]\n assert rrs(self.r.rr_list('172.16.58.3')) == rrs([\n ('w1', 'zone', 'A', '172.16.58.3'),\n ('w3', 'zone', 'A', '172.16.58.3'),\n ('13', '0.0.12.in-addr.arpa', 'PTR', 'w1.zone.')])\n\n def test_overwrite(self):\n assert set(self.r.rr_create(type='PTR', ip='172.16.58.3', ptrdname='w3.zone.', overwrite_ptr=True)['messages']) == set([\n (30, 'Deleting RR 13 PTR w1.zone. from zone 0.0.12.in-addr.arpa'),\n (20, 'Creating RR 13 PTR w3.zone. in zone 0.0.12.in-addr.arpa'),\n (20, 'Creating RR w3 A 172.16.58.3 in zone zone')])\n assert rrs(self.r.rr_list('172.16.58.3')) == rrs(\n [('w1', 'zone', 'A', '172.16.58.3'),\n ('13', '0.0.12.in-addr.arpa', 'PTR', 'w3.zone.'),\n ('w3', 'zone', 'A', '172.16.58.3')])\n\n\nclass TXT(RPCTest):\n def setUp(self):\n RPCTest.setUp(self)\n self.r.zone_create('test.com')\n\n def test_parse(self):\n for txt in ('unquoted', '\"', '\\\\\"', '\\\\', '\"\\\\\"', '\"\\\\', '\"\\\\0\"', '\"\\\\999\"', 'a\"b\"', '\"a\"b', '\"\"\"', '\"\\\\\\\\\\\\\"'):\n with raises(InvalidParameterError):\n self.r.rr_create(name='a.test.com.', type='TXT', txt=txt)\n canonical = {'\"simple\"': '\"simple\"',\n '\"ignore\" \\t\\n\"whitespace\"': '\"ignore\" \"whitespace\"',\n '\"regular escape\\\\100\"': '\"regular escaped\"',\n '\"preserved escape\\\\\\\\\\\\\"\\\\244\"': '\"preserved escape\\\\\\\\\\\\\"\\\\244\"',\n '\"\"': '',\n '\"\" \"a\"': '\"a\"',\n '\"a\" \"\"': '\"a\"',\n r'\"\\\\\" \"\\\"\" \"\\223\"': r'\"\\\\\" \"\\\"\" \"\\223\"'}\n for i, original in enumerate(canonical.keys()):\n rr_name = '%d.test.com.' % i\n self.r.rr_create(name=rr_name, type='TXT', strings=original)\n assert self.r.rr_list(rr_name)[0]['value'] == canonical[original]\n\n\nclass IpblockRRs(RPCTest):\n def setUp(self):\n RPCTest.setUp(self)\n self.r.ipblock_create('172.16.31.10/8', status='Container')\n self.r.ippool_create('test')\n self.r.ippool_add_subnet('test', '172.16.31.10/24')\n self.r.zone_create('test.com')\n self.r.rr_create(type='PTR', ptrdname='test.com.', ip='172.16.58.3')\n assert len(rrs(self.r.rr_list())) == 2\n\n def test_free_ip_simple(self):\n self.r.ip_free('172.16.58.3')\n assert len(rrs(self.r.rr_list('*'))) == 0\n\n def test_free_ip_cname(self):\n self.r.rr_create(name='a.test.com.', type='CNAME', cname='test.com.')\n self.r.rr_create(name='b.test.com.', type='CNAME', cname='a.test.com.')\n self.r.rr_create(name='c.test.com.', type='CNAME', cname='b.test.com.')\n self.r.ip_free('172.16.58.3')\n assert len(rrs(self.r.rr_list())) == 0\n\n def test_delete_pool(self):\n self.r.ippool_delete('test', force=True, delete_subnets=True)\n assert len(rrs(self.r.rr_list())) == 0\n\n\nclass ZoneViewTest(RPCTest):\n def setUp(self):\n RPCTest.setUp(self)\n self.r.zone_create('example.com')\n self.r.zone_rename_view('example.com', 'default', 'us')\n self.r.zone_create_view('example.com', 'de')\n self.r.zone_create_view('example.com', 'sg')\n self.r.rr_create(type='A', name='example.com.', ip='192.168.127.12', views=['us', 'de', 'sg'])\n self.r.rr_create(type='A', name='example.com.', ip='192.168.127.12', views=['us'])\n self.r.rr_create(type='A', name='example.com.', ip='192.168.3.11', views=['sg'])\n self.r.rr_create(type='MX', name='example.com.', preference=10, exchange='mx-ha1.company.de.', views=['us', 'de', 'sg'])\n self.r.rr_create(type='MX', name='example.com.', preference=10, exchange='mx-ha2.company.de.', views=['us', 'de', 'sg'])\n self.r.rr_create(type='MX', name='example.com.', preference=10, exchange='mx01.company.com.', views=['de'])\n self.r.rr_create(type='MX', name='example.com.', preference=10, exchange='mx02.company.com.', views=['de'])\n self.r.rr_create(type='MX', name='example.com.', preference=10, exchange='mx1.example.com.', views=['us'])\n\n def test_export_views(self):\n assert rrs(self.r.rr_list(zone='example.com', view='de')[1:]) == rrs(\n [('@', 'example.com', 'A', '192.168.127.12'),\n ('@', 'example.com', 'MX', '10 mx01.company.com.'),\n ('@', 'example.com', 'MX', '10 mx02.company.com.'),\n ('@', 'example.com', 'MX', '10 mx-ha1.company.de.'),\n ('@', 'example.com', 'MX', '10 mx-ha2.company.de.')])\n assert rrs(self.r.rr_list(zone='example.com', view='us')[1:]) == rrs(\n [('@', 'example.com', 'A', '192.168.127.12'),\n ('@', 'example.com', 'A', '192.168.127.12'),\n ('@', 'example.com', 'MX', '10 mx1.example.com.'),\n ('@', 'example.com', 'MX', '10 mx-ha1.company.de.'),\n ('@', 'example.com', 'MX', '10 mx-ha2.company.de.')])\n assert rrs(self.r.rr_list(zone='example.com', view='sg')[1:]) == rrs(\n [('@', 'example.com', 'A', '192.168.127.12'),\n ('@', 'example.com', 'A', '192.168.3.11'),\n ('@', 'example.com', 'MX', '10 mx-ha1.company.de.'),\n ('@', 'example.com', 'MX', '10 mx-ha2.company.de.')])\n\n def test_favorites(self):\n assert not self.r.zone_favorite('example.com')\n self.r.zone_favorite_add('example.com', view='us')\n assert self.r.zone_favorite('example.com', 'us')\n assert not self.r.zone_favorite('example.com', 'de')\n self.r.zone_favorite_add('example.com', view='us')\n assert self.r.zone_favorite('example.com', 'us')\n\n self.r.zone_favorite_add('example.com', view='de')\n assert self.r.zone_favorite('example.com', 'de')\n assert self.r.zone_favorite('example.com', 'us')\n\n fav = self.r.zone_list2(favorite_only=True)['data']\n assert len(fav) == 1\n assert len(fav[0]['views']) == 2\n\n self.r.zone_favorite_remove('example.com', view='us')\n assert not self.r.zone_favorite('example.com', 'us')\n assert self.r.zone_favorite('example.com', 'de')\n\n self.r.zone_favorite_remove('example.com', view='de')\n assert not self.r.zone_favorite('example.com', 'de')\n assert not self.r.zone_favorite('example.com', 'us')\n\n\ndef no_warn(result):\n assert [x for x in result['messages'] if x[0] == 30] == []\n\n\nclass RRReferencesTest(RPCTest):\n def setUp(self):\n RPCTest.setUp(self)\n db.session.execute('ALTER TABLE rr AUTO_INCREMENT = 1')\n self.r.ipblock_create('1.0.0.0/8', status='Container')\n self.r.ippool_create('p')\n self.r.ippool_add_subnet('p', '1.1.1.0/24')\n self.r.zone_create('a.de')\n self.r.zone_create_view('a.de', 'second')\n self.r.rr_create(type='A', name='a.de.', ip='1.1.1.1', views=['default', 'second'])\n self.r.zone_create('b.de')\n self.r.rr_create(type='MX', name='mx.b.de.', preference=10, exchange='a.de.')\n self.r.zone_create('c.de')\n self.r.rr_create(type='CNAME', name='cname.c.de.', cname='mx.b.de.')\n self.r.zone_create('subzone.a.de')\n self.r.zone_delete_view('subzone.a.de', 'second')\n self.r.zone_create_view('subzone.a.de', 'third')\n nodes = [{'id': 1,\n 'name': 'a.de.',\n 'type': 'A',\n 'value': '1.1.1.1',\n 'view': 'default',\n 'zone': 'a.de'},\n {'id': 2,\n 'name': '1.1.1.1.in-addr.arpa.',\n 'type': 'PTR',\n 'value': 'a.de.',\n 'view': 'default',\n 'zone': '1.1.1.in-addr.arpa'},\n {'id': 3,\n 'name': 'a.de.',\n 'type': 'A',\n 'value': '1.1.1.1',\n 'view': 'second',\n 'zone': 'a.de'},\n {'id': 4,\n 'name': 'mx.b.de.',\n 'type': 'MX',\n 'value': '10 a.de.',\n 'view': 'default',\n 'zone': 'b.de'},\n {'id': 5,\n 'name': 'cname.c.de.',\n 'type': 'CNAME',\n 'value': 'mx.b.de.',\n 'view': 'default',\n 'zone': 'c.de'}]\n self.nodes = {}\n for node in nodes:\n self.nodes[node['id']] = node\n self.mx_ref_result = {'graph': {4: [5], 5: []},\n 'records': [self.nodes[i] for i in [4, 5]],\n 'root': 4}\n\n def test_get_references(self):\n a_rr = dict(name='a.de.', type='A', view='second', ip='1.1.1.1')\n assert self.r.rr_get_references(delete=True, **a_rr) == \\\n {'graph': {3: [4], 4: [5], 5: []},\n 'records': [self.nodes[i] for i in [3, 4, 5]],\n 'root': 3}\n assert self.r.rr_get_references(delete=False, **a_rr) == \\\n {'graph': {3: [4], 4: []},\n 'records': [self.nodes[i] for i in [3, 4]],\n 'root': 3}\n ptr_rr = dict(name='1.1.1.1.in-addr.arpa.', type='PTR', ptrdname='a.de.')\n assert self.r.rr_get_references(delete=True, **ptr_rr) == \\\n {'graph': {1: [4], 2: [1, 3], 3: [4], 4: [5], 5: []},\n 'records': [self.nodes[i] for i in [1, 2, 3, 4, 5]],\n 'root': 2}\n assert self.r.rr_get_references(delete=False, **ptr_rr) == \\\n {'graph': {1: [4], 2: [1, 3], 3: [4], 4: []},\n 'records': [self.nodes[i] for i in [1, 2, 3, 4]],\n 'root': 2}\n mx_rr = dict(name='mx.b.de.', type='MX', exchange='a.de.', preference=10)\n assert self.r.rr_get_references(delete=True, **mx_rr) == self.mx_ref_result\n assert self.r.rr_get_references(delete=False, **mx_rr) == self.mx_ref_result\n\n self.r.zone_delete_view('a.de', 'default', cleanup=True)\n assert self.r.rr_get_references(delete=True, **a_rr) == \\\n {'graph': {2: [], 3: [4, 2], 4: [5], 5: []},\n 'records': [self.nodes[i] for i in [2, 3, 4, 5]],\n 'root': 3}\n assert self.r.rr_get_references(delete=False, **a_rr) == \\\n {'graph': {2: [], 3: [4, 2], 4: []},\n 'records': [self.nodes[i] for i in [2, 3, 4]],\n 'root': 3}\n\n def test_edit_comment_ttl(self):\n no_warn(self.r.rr_edit(2))\n no_warn(self.r.rr_edit(2, comment='comment'))\n no_warn(self.r.rr_edit(2, comment=None))\n no_warn(self.r.rr_edit(2, ttl=77))\n no_warn(self.r.rr_edit(2, ttl=None))\n no_warn(self.r.rr_edit(2, references=[1, 3, 4], comment='comment', ttl=77))\n ptr_rr = dict(name='1.1.1.1.in-addr.arpa.', type='PTR', ptrdname='a.de.')\n assert self.r.rr_get_references(delete=False, **ptr_rr) == \\\n {'graph': {1: [4], 2: [1, 3], 3: [4], 4: []},\n 'records': [self.nodes[i] for i in [1, 2, 3, 4]],\n 'root': 2}\n attrs = self.r.rr_get_attrs(**ptr_rr)\n assert attrs['comment'] == 'comment'\n assert attrs['ttl'] == 77\n\n def test_edit_no_diff(self):\n props = dict(name='mx.b.de.', comment=None, ttl=None, preference=10, exchange='a.de.', views=['default'])\n import itertools\n for i in range(len(props)):\n no_warn(self.r.rr_edit(4, dict([x[0] for x in itertools.combinations(iter(props.items()), i + 1)])))\n no_warn(self.r.rr_edit(4, name='mx.b.de.'))\n assert self.r.rr_get_references(delete=False, type='MX', name='mx.b.de.') == \\\n {'graph': {4: [5], 5: []},\n 'records': [self.nodes[i] for i in [4, 5]],\n 'root': 4}\n\n def test_edit_no_references(self):\n no_warn(self.r.rr_edit(4, references=[5], preference=20))\n assert rrs(self.r.rr_list(zone='b.de')) == rrs([('mx', 'b.de', 'MX', '20 a.de.')])\n assert self.r.rr_get_references(name='mx.b.de.')['graph'][6] == [5]\n\n def test_edit_references(self):\n no_warn(self.r.rr_edit(4, references=[5], name='mx2.b.de.'))\n assert rrs(self.r.rr_list(zone='b.de')) == rrs([\n ('mx2', 'b.de', 'MX', '10 a.de.')])\n assert rrs(self.r.rr_list(zone='c.de')) == rrs([\n ('cname', 'c.de', 'CNAME', 'mx2.b.de.')])\n\n # TODO fix this; it's probably bad\n def test_edit_fail(self):\n self.r.rr_create(name='mx2.b.de.', type='CNAME', cname='smth')\n with raises(InvalidParameterError):\n self.r.rr_edit(77)\n self.r.rr_edit(4, references=[77])\n self.r.rr_edit(4, cname='smth.')\n self.r.rr_edit(4, exchange='smth.')\n self.r.rr_edit(4, references=[5], name='mx2.b.de.')\n\n def test_edit_subzone(self):\n self.r.rr_edit(2, references=[1, 3, 4], ptrdname='subzone.a.de.', views=['default', 'third'])\n assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\\\n rrs([('1', '1.1.1.in-addr.arpa', 'PTR', 'subzone.a.de.')])\n a_rrs = rrs([('@', 'subzone.a.de', 'A', '1.1.1.1')])\n for view in ['default', 'third']:\n assert rrs(self.r.rr_list(zone='subzone.a.de', type='A', view=view)) == a_rrs\n\n def test_edit_a_ip_with_ref(self):\n self.r.rr_edit(1, references=[2], ip='1.1.1.2')\n assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\\\n rrs([('2', '1.1.1.in-addr.arpa', 'PTR', 'a.de.')])\n assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('@', 'a.de', 'A', '1.1.1.2')])\n assert rrs(self.r.rr_list(zone='a.de', view='second')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])\n\n def test_edit_a_ip_no_ref(self):\n self.r.rr_edit(1, references=None, ip='1.1.1.2')\n assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\\\n rrs([('1', '1.1.1.in-addr.arpa', 'PTR', 'a.de.'),\n ('2', '1.1.1.in-addr.arpa', 'PTR', 'a.de.')])\n assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('@', 'a.de', 'A', '1.1.1.2')])\n assert rrs(self.r.rr_list(zone='a.de', view='second')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])\n\n def test_edit_ptr_ip_no_ref(self):\n self.r.rr_edit(2, references=None, ip='1.1.1.2')\n assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\\\n rrs([('2', '1.1.1.in-addr.arpa', 'PTR', 'a.de.')])\n assert rrs(self.r.rr_list(zone='a.de', view='second')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])\n assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])\n\n def test_edit_ptr_ip_with_ref(self):\n self.r.rr_edit(2, references=[1], ip='1.1.1.2')\n assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\\\n rrs([('2', '1.1.1.in-addr.arpa', 'PTR', 'a.de.')])\n assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('@', 'a.de', 'A', '1.1.1.2')])\n assert rrs(self.r.rr_list(zone='a.de', view='second')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])\n\n def test_edit_a_name_no_ref(self):\n self.r.rr_edit(1, references=None, name='new.a.de.')\n assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\\\n rrs([('1', '1.1.1.in-addr.arpa', 'PTR', 'a.de.')])\n assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('new', 'a.de', 'A', '1.1.1.1')])\n assert rrs(self.r.rr_list(zone='a.de', view='second')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])\n\n def test_edit_a_name_no_ref2(self):\n self.r.zone_delete_view('a.de', 'second', True)\n self.r.rr_edit(1, references=None, name='new.a.de.')\n assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\\\n rrs([('1', '1.1.1.in-addr.arpa', 'PTR', 'new.a.de.')])\n assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('new', 'a.de', 'A', '1.1.1.1')])\n\n def test_edit_a_name_with_ref(self):\n self.r.rr_edit(1, references=[2], name='new.a.de.')\n assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\\\n rrs([('1', '1.1.1.in-addr.arpa', 'PTR', 'new.a.de.')])\n assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('new', 'a.de', 'A', '1.1.1.1')])\n assert rrs(self.r.rr_list(zone='a.de', view='second')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])\n\n def test_edit_ptr_name_no_ref(self):\n self.r.rr_edit(2, references=[], ptrdname='new.a.de.')\n assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\\\n rrs([('1', '1.1.1.in-addr.arpa', 'PTR', 'new.a.de.')])\n assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])\n assert rrs(self.r.rr_list(zone='a.de', view='second')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])\n\n def test_edit_ptr_name_with_ref(self):\n self.r.rr_edit(2, references=[1], ptrdname='new.a.de.')\n assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\\\n rrs([('1', '1.1.1.in-addr.arpa', 'PTR', 'new.a.de.')])\n assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('new', 'a.de', 'A', '1.1.1.1')])\n assert rrs(self.r.rr_list(zone='a.de', view='second')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])\n\n def test_edit_cname_with_ref(self):\n self.r.zone_create_view('c.de', 'second')\n self.r.rr_create(type='CNAME', name='cname.c.de.', cname='mx.b.de.', views=['second'])\n self.r.rr_edit(id=4, name='mx2.b.de.')\n for view in ['default', 'second']:\n assert rrs(self.r.rr_list(zone='c.de', view=view, type='cname')) == rrs([('cname', 'c.de', 'CNAME', 'mx.b.de.')])\n self.r.rr_edit(id=7, name='mx3.b.de.', references=[5])\n for view, cname in [('default', 'mx3.b.de.'), ('second', 'mx.b.de.')]:\n assert rrs(self.r.rr_list(zone='c.de', view=view, type='cname')) == rrs([('cname', 'c.de', 'CNAME', cname)])\n", "id": "3424374", "language": "Python", "matching_score": 3.9392244815826416, "max_stars_count": 37, "path": "dim-testsuite/tests/dns_test.py" }, { "content": "from tests.util import RPCTest\nfrom dim.models import Zone\n\n\ndef zone(name):\n return Zone.query.filter_by(name=name).first()\n\n\nclass RevzoneTest(RPCTest):\n def assert_mark_revzone(self, ip, expected_revzone):\n self.r.ip_mark(ip)\n revzone = self.r.ipblock_get_attrs(ip)['reverse_zone']\n if revzone != expected_revzone:\n raise AssertionError(\"Reverse zone for %s is %s, expected %s\" % (ip, revzone, expected_revzone))\n\n def test_attrs_v4(self):\n self.r.ipblock_create('192.168.127.12/8', status='Container')\n self.r.ippool_create('pool')\n self.r.ippool_add_subnet('pool', '192.168.127.12/24', gateway='172.16.31.10')\n self.assertDictSubset(\n self.r.ipblock_get_attrs('192.168.127.12/24'),\n {'gateway': '172.16.31.10',\n 'status': 'Subnet',\n 'ip': '192.168.127.12/24',\n 'mask': '255.255.255.0',\n 'pool': 'pool',\n 'reverse_zone': '0.0.12.in-addr.arpa',\n })\n self.r.ip_mark('172.16.31.10', pool='pool', attributes={'key': 'value'})\n self.assertDictSubset(\n self.r.ipblock_get_attrs('172.16.31.10'),\n {'ip': '172.16.31.10',\n 'gateway': '172.16.31.10',\n 'status': 'Static',\n 'subnet': '192.168.127.12/24',\n 'mask': '255.255.255.0',\n 'key': 'value',\n 'pool': 'pool',\n 'reverse_zone': '0.0.12.in-addr.arpa',\n })\n self.r.ippool_get_ip('pool', attributes={'key': 'value'})\n self.assertDictSubset(\n self.r.ipblock_get_attrs('192.168.127.12'),\n {'ip': '192.168.127.12',\n 'gateway': '172.16.31.10',\n 'status': 'Static',\n 'subnet': '192.168.127.12/24',\n 'mask': '255.255.255.0',\n 'key': 'value',\n 'pool': 'pool',\n 'reverse_zone': '0.0.12.in-addr.arpa',\n })\n\n def test_simple_v6(self):\n self.r.ipblock_create('2001::/16', status='Container')\n self.r.ippool_create('pool6')\n self.r.ippool_add_subnet('pool6', '2001:08d8:0640:0000:0000:0000:0000:0000/55')\n self.assertDictSubset(\n self.r.ipblock_get_attrs('2fc00:e968:6179::de52:7100'),\n {'status': 'Available',\n 'reverse_zone': '1.0.0.4.6.0.8.d.8.0.1.0.0.2.ip6.arpa',\n })\n\n def test_mark_v4(self):\n self.r.ipblock_create('192.168.127.12/8', status='Container')\n self.r.ippool_create(\"pool\")\n self.r.ippool_add_subnet(\"pool\", '192.168.127.12/22')\n self.assert_mark_revzone('172.16.31.10', '0.0.12.in-addr.arpa')\n self.assert_mark_revzone('172.16.58.3', '1.0.12.in-addr.arpa')\n self.assert_mark_revzone('172.16.17.32', '2.0.12.in-addr.arpa')\n self.assert_mark_revzone('172.16.58.3', '3.0.12.in-addr.arpa')\n\n def test_small_subnets_v4(self):\n self.r.ipblock_create('172.16.17.32/8', status='Container')\n self.r.ippool_create(\"pool\")\n self.r.ippool_add_subnet(\"pool\", '172.16.17.32/25')\n self.r.ippool_add_subnet(\"pool\", '172.16.31.10/25')\n self.assert_mark_revzone('172.16.31.10', '0.0.13.in-addr.arpa')\n self.assert_mark_revzone('172.16.31.10', '0.0.13.in-addr.arpa')\n\n def test_delete_subnet_v4(self):\n self.r.ipblock_create('172.16.17.32/8', status='Container')\n self.r.ippool_create(\"pool\")\n self.r.ippool_add_subnet(\"pool\", '172.16.17.32/25')\n self.r.ippool_add_subnet(\"pool\", '172.16.31.10/25')\n assert self.r.ippool_delete(\"pool\", force=True, delete_subnets=True)\n # The reverse zone is deleted when the last subnet for it is deleted.\n # ND-91\n assert not zone('0.0.13.in-addr.arpa')\n assert not zone('0.0.12.in-addr.arpa')\n assert not zone('1.0.12.in-addr.arpa')\n assert not zone('2.0.12.in-addr.arpa')\n assert not zone('3.0.12.in-addr.arpa')\n\n def test_nosplit_v6(self):\n self.r.ipblock_create('3001::/16', status='Container')\n self.r.ippool_create(\"pool6\")\n self.r.ippool_add_subnet(\"pool6\", '3001:08d8:a640:0000:0000:0000:0000:0000/56')\n self.assert_mark_revzone('fdf8:f53e:61e4::18',\n '0.0.0.4.6.a.8.d.8.0.1.0.0.3.ip6.arpa')\n assert not zone('1.0.0.4.6.a.8.d.8.0.1.0.0.3.ip6.arpa')\n assert self.r.ippool_delete(\"pool6\", force=True, delete_subnets=True)\n assert not zone('0.0.0.4.6.a.8.d.8.0.1.0.0.3.ip6.arpa')\n\n def test_split_v6(self):\n self.r.ipblock_create('2001::/16', status='Container')\n self.r.ippool_create(\"pool6\")\n self.r.ippool_add_subnet(\"pool6\", '2001:08d8:0640:0200:0000:0000:0000:0000/55')\n self.assert_mark_revzone('2fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',\n '2.0.0.4.6.0.8.d.8.0.1.0.0.2.ip6.arpa')\n self.assert_mark_revzone('2fc00:db20:35b:7399::5',\n '3.0.0.4.6.0.8.d.8.0.1.0.0.2.ip6.arpa')\n assert not zone('4.0.0.4.6.0.8.d.8.0.1.0.0.2.ip6.arpa')\n assert self.r.ippool_delete(\"pool6\", force=True, delete_subnets=True)\n assert not zone('2.0.0.4.6.0.8.d.8.0.1.0.0.2.ip6.arpa')\n assert not zone('3.0.0.4.6.0.8.d.8.0.1.0.0.2.ip6.arpa')\n", "id": "11941225", "language": "Python", "matching_score": 2.139982223510742, "max_stars_count": 37, "path": "dim-testsuite/tests/revzone_test.py" }, { "content": "from tests.util import RPCTest\n\n\nclass IpblockTest(RPCTest):\n def test_function_classification(self):\n unmarked = []\n for name in [n for n in dir(self.r.obj) if not n.startswith('_')]:\n f = getattr(self.r.obj, name)\n if callable(f) and not hasattr(f, 'readonly'):\n unmarked.append(name)\n assert not unmarked\n", "id": "4449778", "language": "Python", "matching_score": 0.04645658656954765, "max_stars_count": 37, "path": "dim-testsuite/tests/rpc_test.py" }, { "content": "import os.path\nimport re\nimport sys\nfrom glob import glob\nfrom subprocess import Popen, PIPE, call, check_call\n\n\ndef this_dir(path):\n return os.path.join(os.path.dirname(os.path.realpath(__file__)), path)\n\n\ndef exists(prog):\n p = Popen(['which', prog], stdout=PIPE)\n p.communicate()\n return p.returncode == 0\n\n\ndef diff_files(a, b):\n prog = ['diff', '-uw']\n if sys.stdout.isatty() and exists('colordiff'):\n prog = ['colordiff', '-uw']\n if os.getenv('DIFF_TOOL') is not None:\n prog = [os.getenv('DIFF_TOOL')]\n call(prog + [a, b])\n\n\ndef onetab(s): return re.sub('\\t+', '\\t', s)\n\n\ndef compact(zone):\n return sorted(set(onetab(l) for l in zone.splitlines() if not re.match('^\\s*(;.*)?\\s*?$', l)))\n\n\ndef zones_equal(a, b):\n '''Ignores differences in whitespace even inside strings'''\n ac = compact(a)\n bc = compact(b)\n if len(ac) != len(bc) or any(re.sub('\\s+', '', l1) != re.sub('\\s+', '', l2)\n for l1, l2 in zip(ac, bc)):\n pdns_file = '/tmp/pdns.zone'\n dim_file = '/tmp/dim.zone'\n with open(pdns_file, 'w') as f:\n f.write('\\n'.join(compact(a)) + '\\n')\n with open(dim_file, 'w') as f:\n f.write('\\n'.join(compact(b)) + '\\n')\n diff_files(pdns_file, dim_file)\n return False\n return True\n\n\nPDNS_OUTPUT_JAR = None\n\n\ndef pdns_output_jar():\n def some(l, p):\n for e in l:\n if p(e):\n return e\n return None\n\n global PDNS_OUTPUT_JAR\n if PDNS_OUTPUT_JAR is None:\n PDNS_OUTPUT_JAR = some(glob(this_dir('../pdns-output/build/libs/pdns-output-*.jar')) +\n ['/opt/dim/pdns-output.jar'],\n os.path.exists)\n if PDNS_OUTPUT_JAR is None:\n raise Exception('pdns-output.jar not found')\n return PDNS_OUTPUT_JAR\n\n\ndef test_pdns_output_process(log):\n jvm_options = ['-Dlog4j.configurationFile=' + this_dir('log4j2.properties')] if not log else []\n cmd = ['java'] + jvm_options + ['-jar', pdns_output_jar(),\n '--config', this_dir('pdns-output-test.properties')]\n return Popen(cmd, stdout=PIPE, close_fds=True)\n\n\ndef verify_zone(zone):\n '''Verify if zone is correctly signed'''\n pdns_file = '/tmp/signed.zone'\n with open(pdns_file, 'w') as f:\n f.write('\\n'.join(compact(zone)) + '\\n')\n check_call(['java', '-cp', pdns_output_jar(), 'com.verisignlabs.dnssec.cl.VerifyZone', pdns_file])\n\n\ndef is_generated(l):\n for r in ['RRSIG', 'NSEC', 'NSEC3', 'NSEC3PARAM', 'DNSKEY']:\n if r in l:\n return True\n return False\n\n\ndef setup_pdns_output(dim):\n '''Add all zones to zone group pdns_group and return a zone->view mapping'''\n zone_list = dim.zone_list(fields=True)\n zones = [z['name'] for z in zone_list]\n zone_views = dict((z['name'], z['views']) for z in zone_list)\n pdns_group = dict((v['zone'], v['view']) for v in dim.zone_group_get_views('pdns_group'))\n for zone in set(zones) - set(pdns_group.keys()):\n if zone_views[zone] > 1:\n view = dim.zone_list_views(zone)[0]['name']\n else:\n view = None\n dim.zone_group_add_zone('pdns_group', zone, view=view)\n pdns_group[zone] = view\n return pdns_group\n\n\ndef compare_dim_pdns_zones(dim, pdns_ip, zone_view):\n '''\n Compare dim dump zone with pdns axfr\n\n zone_view is a map zone_name -> view_name\n '''\n for zone, view in zone_view.items():\n pdns = Popen(['dig', 'axfr', zone, '@' + pdns_ip], stdout=PIPE).communicate()[0]\n dump = dim.zone_dump(zone, view=view)\n if 'RRSIG' in pdns:\n verify_zone(pdns)\n # Strip DNSSEC records\n pdns = '\\n'.join([l for l in pdns.splitlines() if not is_generated(l)])\n if not zones_equal(pdns, dump):\n return False\n return True\n", "id": "9922256", "language": "Python", "matching_score": 3.2374331951141357, "max_stars_count": 37, "path": "dim-testsuite/tests/pdns_util.py" }, { "content": "import logging\nimport os\nimport random\nimport sys\nfrom functools import wraps\nfrom pprint import pformat\nfrom subprocess import Popen, PIPE\nfrom threading import Thread\n\nfrom dim import db\nfrom dim.models.dns import OutputUpdate\nfrom dim.rpc import TRPC\nfrom tests.pdns_test import PDNSTest\nfrom tests.pdns_util import compare_dim_pdns_zones, this_dir, test_pdns_output_process\n\n\ndef delete_record(rpc, r):\n rpc.rr_delete(zone=r['zone'], name=r['record'], type=r['type'], **r['value'])\n\n\ndef add_record(rpc, r):\n rpc.rr_create(zone=r['zone'], name=r['record'], type=r['type'], ttl=r['ttl'], **r['value'])\n\n\ndef extract(l, selected_idx):\n '''split l into two lists: elements with indices in selected and the rest'''\n selected = []\n rejected = []\n selected_idx = set(selected_idx)\n for i, e in enumerate(l):\n if i in selected_idx:\n selected.append(e)\n else:\n rejected.append(e)\n return selected, rejected\n\n\nclass TestRequestProxy(object):\n ''''\n Simulate the flask lifecycle of a request by creating a new TRPC instance and request context\n (which in turns creates a new db session)\n '''\n def __init__(self, username, app):\n self.app = app\n self.username = username\n\n def __getattr__(self, name):\n if not name.startswith('_'):\n obj = TRPC(username=self.username)\n func = getattr(obj, name)\n if callable(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n with self.app.test_request_context():\n return func(*args, **kwargs)\n return wrapper\n raise AttributeError\n\n\ndone = False\n\n\ndef run_test(app, zone, pdns_output, db_uri, pdns_ip):\n global done\n try:\n rpc = TestRequestProxy('test_user', app)\n\n def check_zone():\n global done\n pdns_output.wait_updates(zone)\n if not compare_dim_pdns_zones(rpc, pdns_ip, {zone: None}):\n done = True\n if done:\n sys.exit()\n\n check_zone()\n rpc.zone_dnssec_enable(zone, nsec3_algorithm=1, nsec3_iterations=1, nsec3_salt='<PASSWORD>')\n check_zone()\n records = rpc.rr_list(zone=zone, value_as_object=True)\n created = [r for r in records if r['type'] not in ('SOA', 'DNSKEY')]\n deleted = []\n total = len(created)\n for _ in range(30):\n selected = random.sample(range(total), random.randint(1, 5))\n midpoint = len(created)\n to_del, created = extract(created, [i for i in selected if i < midpoint])\n to_add, deleted = extract(deleted, [i - midpoint for i in selected if i >= midpoint])\n created.extend(to_add)\n deleted.extend(to_del)\n print('Adding', pformat(to_add))\n print('Deleting', pformat(to_del))\n for r in to_del:\n delete_record(rpc, r)\n for r in to_add:\n add_record(rpc, r)\n check_zone()\n rpc.zone_dnssec_disable(zone)\n check_zone()\n except:\n logging.exception('Exception in run_test')\n done = True\n\n\ndef import_zone(zone):\n proc = Popen(['ndcli', 'import', 'zone', zone], stdin=PIPE, stdout=PIPE)\n zone_contents = open(this_dir(zone)).read()\n stdout, stderr = proc.communicate(zone_contents)\n if proc.returncode != 0:\n raise Exception('zone import failed')\n\n\nclass PDNSOutputProcess(object):\n def __enter__(self):\n self.proc = test_pdns_output_process(True)\n return self\n\n def __exit__(self, *args):\n self.proc.kill()\n self.proc = None\n\n def wait_updates(self, zone):\n '''Wait for all updates to be processed'''\n with test.app.test_request_context():\n while True:\n db.session.rollback()\n if OutputUpdate.query.filter(OutputUpdate.zone_name == zone).count() == 0:\n break\n else:\n os.read(self.proc.stdout.fileno(), 1024)\n\n\nif __name__ == '__main__':\n zones = {'web.de': {'db_uri': 'mysql://pdns:[email protected]:3307/pdns1',\n 'pdns_ip': '127.1.1.1'},\n 'web2.de': {'db_uri': 'mysql://pdns:[email protected]:3307/pdns2',\n 'pdns_ip': '127.2.2.2'}}\n\n global test\n test = PDNSTest('__init__')\n test.setUp()\n\n for zone in list(zones.keys()):\n test.cleanup_pdns_db(zones[zone]['db_uri'])\n import_zone(zone)\n test.create_output_for_zone(zone, zone, zone, db_uri=zones[zone]['db_uri'])\n with PDNSOutputProcess() as pdns_output:\n threads = []\n for zone, attr in zones.items():\n t = Thread(target=run_test, args=(test.app, zone, pdns_output), kwargs=attr)\n t.start()\n threads.append(t)\n for t in threads:\n while t.isAlive():\n t.join(0.1)\n", "id": "6437786", "language": "Python", "matching_score": 2.2873542308807373, "max_stars_count": 37, "path": "dim-testsuite/tests/pdns_changes.py" }, { "content": "from sqlalchemy import create_engine\n\nfrom tests.util import RPCTest\n\n\nclass PDNSTest(RPCTest):\n def cleanup_pdns_db(self, db_uri):\n with create_engine(db_uri).begin() as conn:\n conn.execute('delete from domains')\n conn.execute('delete from domainmetadata')\n conn.execute('delete from records')\n\n def create_output_for_zone(self, zone, output, zone_group, db_uri):\n self.r.output_create(output, plugin='pdns-db', db_uri=db_uri)\n self.r.zone_group_create(zone_group)\n self.r.zone_group_add_zone(zone_group, zone)\n self.r.output_add_group(output, zone_group)\n", "id": "11205089", "language": "Python", "matching_score": 2.8527920246124268, "max_stars_count": 37, "path": "dim-testsuite/tests/pdns_test.py" }, { "content": "#\n# Test the correctness of the pdns daemon in case of random failures in a\n# load-balanced scenario.\n#\n# $ python -m tests.pdns_locking\n#\nimport logging\nimport os\nimport random\nimport subprocess\nimport threading\nimport time\n\nfrom sqlalchemy import create_engine\n\nfrom tests.pdns_test import PDNSTest\n\n\ndef run_checks(pdns_uri):\n with create_engine(pdns_uri).begin() as conn:\n records = conn.execute('select * from records').fetchall()\n if len(records) >= 3:\n print(records)\n os.killpg(os.getpgid(0), 9)\n assert conn.execute('select count(*) from records').scalar() < 3\n\n\ndef event_generator():\n test = PDNSTest('__init__')\n test.setUp()\n logging.getLogger().setLevel(logging.WARNING)\n\n test.r.zone_create('test.com')\n test.create_output_for_zone('test.com')\n test.r.rr_create(name='a.test.com.', type='txt', strings=['0'])\n for i in range(1, 10000):\n test.r.rr_delete(name='a.test.com.', type='txt', strings=[str(i - 1)])\n test.r.rr_create(name='a.test.com.', type='txt', strings=[str(i)])\n run_checks(test.pdns_uri)\n time.sleep(0.05)\n\n\nclass PDNSDaemon(object):\n def __init__(self):\n self.start()\n\n def start(self):\n self.p = subprocess.Popen(['python', 'manage_dim', 'pdns', '-t'])\n\n def stop(self):\n self.p.kill()\n self.p.wait()\n\n def restart(self):\n self.stop()\n self.start()\n\n\ndef chaos_monkey():\n services = [PDNSDaemon() for _ in range(2)]\n while True:\n time.sleep(random.uniform(0, 0.3))\n random.choice(services).restart()\n\n\nif __name__ == '__main__':\n threading.Thread(target=event_generator).start()\n chaos_monkey()\n", "id": "12468356", "language": "Python", "matching_score": 0.8087897300720215, "max_stars_count": 37, "path": "dim-testsuite/tests/pdns_locking.py" }, { "content": "\n\nimport re\nimport subprocess\nfrom collections import deque\n\nimport pkg_resources\n\nfrom dim.models import SchemaInfo, SCHEMA_VERSION, db\n\n\ndef migrate():\n start_version = SchemaInfo.current_version()\n if start_version == SCHEMA_VERSION:\n print('Nothing to do')\n return\n print(('From', start_version, 'to', SCHEMA_VERSION))\n graph = gather_graph()\n q = deque([start_version])\n prev = {start_version: None}\n while q:\n ver = q.pop()\n if ver == SCHEMA_VERSION:\n plan = []\n while ver != start_version:\n prev_ver, script = prev[ver]\n plan.append((ver, script))\n ver = prev_ver\n for new_version, script in reversed(plan):\n run_script(new_version, script)\n return\n for nv, script in graph[ver]:\n if nv not in prev:\n prev[nv] = (ver, script)\n q.append(nv)\n raise Exception(\"Migration path not found from %s to %s\" % (start_version, SCHEMA_VERSION))\n\n\ndef gather_graph():\n graph = {}\n for script in pkg_resources.resource_listdir('dim', 'sql'):\n m = re.match('(migrate|rollback)_(.*)_to_(.*).sql', script)\n if m:\n x, y = m.group(2), m.group(3)\n graph.setdefault(x, []).append((y, script))\n return graph\n\n\ndef run_script(new_version, script):\n print((\"Changing schema version %s to %s: %s\"\n % (SchemaInfo.current_version(), new_version, script)))\n url = db.engine.url\n cmd = ['mysql',\n '-h%s' % url.host,\n url.database]\n if url.port:\n cmd.append('-P%s' % url.port)\n if url.username:\n cmd.append('-u%s' % url.username)\n if url.password:\n cmd.append('-p%s' % url.password)\n stdin = open(pkg_resources.resource_filename('dim', 'sql/' + script))\n subprocess.check_call(cmd, stdin=stdin)\n", "id": "7400911", "language": "Python", "matching_score": 1.0872143507003784, "max_stars_count": 37, "path": "dim/dim/models/migrate.py" }, { "content": "from distutils.core import setup\nfrom dim import version\n\nsetup(name='dim',\n version=version.VERSION,\n packages=['dim', 'dim.models'],\n package_data={'dim': ['sql/*.sql']},\n scripts=['report', 'manage_dim', 'manage_db'])\n", "id": "4514755", "language": "Python", "matching_score": 0.4671931862831116, "max_stars_count": 37, "path": "dim/setup.py" }, { "content": "from setuptools import setup\n\nsetup(name='dim-bind-file-agent',\n version='0.1',\n scripts=['dim-bind-file-agent'],\n install_requires=['dimclient==0.2',\n 'argparse']\n )\n", "id": "11832610", "language": "Python", "matching_score": 1.3549376726150513, "max_stars_count": 37, "path": "dim-bind/setup.py" }, { "content": "from setuptools import setup\nimport codecs\nimport os\nfrom dimcli import version\n\nsetup(name='ndcli',\n version=version.VERSION,\n scripts=['ndcli'],\n install_requires=['dimclient>=0.4.1',\n 'python-dateutil',\n 'dnspython'],\n packages=['dimcli'])\n", "id": "6828351", "language": "Python", "matching_score": 2.4087517261505127, "max_stars_count": 37, "path": "ndcli/setup.py" }, { "content": "from setuptools import setup\nfrom dimclient import version\n\nsetup(name='dimclient',\n version=version.VERSION,\n install_requires=['simplejson'],\n packages=['dimclient'])\n", "id": "1180070", "language": "Python", "matching_score": 0.08304865658283234, "max_stars_count": 37, "path": "dimclient/setup.py" }, { "content": "import copy\nimport os\nimport shlex\nimport sys\nimport textwrap\nfrom functools import wraps\n\n\ndef bash_quote(w, quote):\n '''\n Quote word *w* with quote character *quote* which may be empty, single quote or double quote.\n '''\n assert quote in ('', '\"', \"'\")\n if quote == \"'\":\n w = w.replace(\"'\", quote + '\"\\'\"' + quote)\n else:\n # some characters are special and cannot be escaped unless we use a single quote:\n # ! - get rid of history expansion\n # \\x01 - breaks escaping in bash: echo \"\\\u0001\\$\" -> \\\\$\n # \\n - when only using escapes\n special_characters = '!\\x01'\n if quote == '':\n special_characters += '\\n'\n for special in special_characters:\n if special in w:\n return (\"'\" + special + \"'\").join(bash_quote(s, quote) for s in w.split(special))\n\n # escape characters\n escaped_chars = set()\n if quote == '':\n escaped_chars |= set(os.environ.get(\"COMP_WORDBREAKS\", \" \\t\\\"'@><=;|&(:.\"))\n escaped_chars |= set(\"`$\\\"'\\t ~&;?|#()*{><[\")\n elif quote == '\"':\n escaped_chars |= set(\"`$\\\"\")\n escaped = ''\n last = ''\n for i, c in enumerate(w):\n if last == '\\\\' and (c in escaped_chars | set('\\n\\\\') or quote == ''):\n escaped += '\\\\'\n if (c == '\\\\' and i == len(w) - 1) or (c in escaped_chars):\n escaped += '\\\\'\n escaped += c\n last = c\n w = escaped\n return quote + w + quote\n\n\nclass Namespace(dict):\n def __getattr__(self, key):\n return self[key]\n\n def __setattr__(self, key, value):\n self[key] = value\n\n def __deepcopy__(self, memo):\n return copy.deepcopy(dict(self), memo)\n\n\nclass Parser(object):\n def __init__(self, tokens, complete_token=None):\n '''\n :param complete_token: the token to be completed; `None` disables completion\n '''\n self.tokens = tokens\n self.complete_token = complete_token\n # internal state\n self.long = {}\n self.short = {}\n self.pos = 0\n # results\n self._completions = []\n self.values = Namespace()\n self.errors = []\n self.subcommands = []\n\n def get_state(self):\n return dict([(attr, copy.copy(getattr(self, attr)))\n for attr in ('long', 'short', 'pos', '_completions', 'errors', 'subcommands')] +\n [('values', Namespace(copy.deepcopy(self.values)))])\n\n def set_state(self, state):\n for attr, val in state.items():\n setattr(self, attr, val)\n\n def add_options(self, options):\n for opt in options:\n if opt.short:\n self.short[opt.short] = opt\n if opt.long:\n self.long[opt.long] = opt\n\n def error(self, error):\n self.errors.append(error)\n\n def __repr__(self):\n return \"<Parser values=%r, errors=%r, subcommands=%r>\" % (self.values, self.errors, self.subcommands)\n\n @property\n def token(self):\n return self.tokens[self.pos] if self.pos < len(self.tokens) else None\n\n @property\n def last_token(self):\n return self.tokens[self.pos - 1] if self.pos - 1 >= 0 else None\n\n def token_is_option(self):\n return self.token.startswith('-')\n\n def eat_token(self):\n token = self.token\n self.pos += 1\n return token\n\n def barf_token(self):\n self.pos -= 1\n\n def parse_options(self):\n while self.token and self.token_is_option():\n option = None\n token = self.eat_token()\n if token.startswith('--'):\n if token[2:] in self.long:\n option = self.long[token[2:]]\n elif token[1:] in self.short:\n option = self.short[token[1:]]\n if option is None:\n self.error('Unknown option %s' % token)\n return\n else:\n option.parse(self)\n if self._completing_option:\n self._add_completions('-' + k for k in list(self.short.keys()))\n self._add_completions('--' + k for k in list(self.long.keys()))\n\n def parse_arguments(self, arguments):\n for arg in arguments:\n if arg.nargs not in (None, '?', '*', '+'):\n raise Exception('Invalid nargs %s' % arg.nargs)\n self._add_arg_completions(arg)\n self.parse_options()\n if arg.nargs in (None, '+'):\n arg.parse(self)\n self.parse_options()\n if arg.nargs in ('?', '*', '+'):\n rewind_state = None\n while self.token and (not arg.choices or self.token in arg.choices):\n if type(arg.stop_at) != list and self.token == arg.stop_at:\n rewind_state = self.get_state()\n elif type(arg.stop_at) == list and self.token in arg.stop_at:\n rewind_state = self.get_state()\n arg.parse(self)\n self.parse_options()\n if arg.nargs == '?':\n break\n if rewind_state:\n self.set_state(rewind_state)\n if arg.nargs in ('*', '+'):\n # Even if the token doesn't match the set of choices, it\n # might still yield valid completions for the current arg\n self._add_arg_completions(arg)\n if self.errors:\n return\n self.parse_options()\n\n @property\n def completing(self):\n return not self.errors and self.token is None and self.complete_token is not None\n\n @property\n def _completing_option(self):\n return self.completing and len(self.complete_token) > 0 and self.complete_token[0] == '-'\n\n @property\n def _completing_argument(self):\n return self.completing and (len(self.complete_token) == 0 or self.complete_token[0] != '-')\n\n def _add_completions(self, completions):\n self._completions.extend(c for c in completions if c.startswith(self.complete_token))\n\n def _add_arg_completions(self, arg):\n if self._completing_argument:\n self._add_completions(arg.completions(self.complete_token, self))\n\n\nclass Option(object):\n def __init__(self, short, long, action='store_true', dest=None, help=None, default=None):\n '''\n The number of additional tokens needed for an Option is determined by\n *action*:\n\n - ``store_true`` requires 0 tokens and stores True in *dest*\n - ``store`` requires 1 token and stores it in *dest**\n '''\n self.short = short\n self.long = long\n self.dest = dest if dest else long\n self.help = help\n self.action = action\n self.default = default\n\n def __repr__(self):\n return '-%s/--%s' % (self.short, self.long)\n\n def set_default(self, parser):\n parser.values[self.dest] = self.default\n\n def parse(self, parser):\n if self.action == 'store_true':\n parser.values[self.dest] = True\n elif self.action == 'store':\n if parser.token is None or parser.token_is_option():\n parser.error(\"%s expects an argument\" % parser.last_token)\n else:\n value = parser.eat_token()\n parser.values[self.dest] = value\n\n\nclass ArgMixin(object):\n def usage(self):\n if self.nargs is None:\n return self.metavar\n elif self.nargs == '?':\n return '[%s]' % self.metavar\n elif self.nargs == '*':\n return '[%s]...' % self.metavar\n elif self.nargs == '+':\n return '%s...' % self.metavar\n else:\n raise Exception('Invalid nargs %s' % self.nargs)\n\n def __repr__(self):\n return self.metavar\n\n def set_default(self, parser):\n '''\n Sets the default value for the curent argument. Called as soon as the argument's command is seen.\n '''\n pass\n\n def completions(self, complete_token, parser):\n '''\n Returns the completions matching `complete_token` for the current state from `parser`.\n '''\n pass\n\n def parse(self, parser):\n '''\n Uses the state from `parser` to consume the tokens for the current arg\n (only one instance, even if nargs says otherwise). Called only if at\n least a token is required for the current argument.\n '''\n pass\n\n\nclass Argument(ArgMixin):\n def __init__(self, name, dest=None, metavar=None, nargs=None, action='store', choices=None,\n default=None, completions=None, stop_at=None):\n self.name = name\n self.dest = dest if dest else name\n if metavar:\n self.metavar = metavar\n elif choices:\n self.metavar = '|'.join(choices)\n else:\n self.metavar = name.upper()\n self.nargs = nargs\n self.action = action\n self.choices = choices\n self.completion_fn = completions\n self.default = default\n # stop_at is an ugly hack to resolve grammar ambiguity\n # The parser will revert to the state for the last instance of this token\n self.stop_at = stop_at\n\n def set_default(self, parser):\n if self.action in ('append', 'append_unique') or self.nargs in ('*', '+'):\n parser.values.setdefault(self.dest, [])\n elif self.action == 'store':\n parser.values.setdefault(self.dest, self.default)\n else:\n pass\n\n def completions(self, complete_token, parser):\n if self.choices:\n if self.action == 'append_unique':\n return set(self.choices) - set(parser.values[self.dest])\n else:\n return self.choices\n elif hasattr(self, 'completion_fn') and callable(self.completion_fn):\n comps = self.completion_fn(complete_token, parser)\n if self.action == 'append_unique':\n return set(comps) - set(parser.values[self.dest])\n return comps\n else:\n return []\n\n def parse(self, parser):\n token = parser.eat_token()\n if token is None:\n parser.error(\"A value is required for %s\" % self.metavar)\n return\n if self.choices and token not in self.choices:\n parser.error(\"%s must be one of: %s\" % (self.metavar, ' '.join(self.choices)))\n return\n\n if self.action == 'append' or self.nargs in ('*', '+'):\n parser.values[self.dest].append(token)\n elif self.action == 'store':\n parser.values[self.dest] = token\n elif self.action == 'append_unique':\n pv = parser.values[self.dest]\n if token in pv:\n parser.error('%s cannot be specified twice' % token)\n else:\n pv.append(token)\n elif self.action is None:\n pass\n else:\n raise Exception('Invalid action %s' % self.action)\n\n\nclass Token(Argument):\n def __init__(self, name, dest=None, nargs=None, action=None):\n super(Token, self).__init__(name, metavar=name, choices=(name, ), action=action, nargs=nargs)\n if dest is None:\n self.dest = None\n\n\nclass Group(ArgMixin):\n '''\n If the group has nargs='?' or nargs='*' and it's not followed by eof it must\n start with a static set of choices (otherwise the grammar would be\n ambiguous).\n '''\n def __init__(self, *args, **kwargs):\n self.nargs = kwargs.pop('nargs', None)\n self.stop_at = kwargs.pop('stop_at', None)\n self.arguments = args\n\n @property\n def metavar(self):\n return ' '.join(a.usage() for a in self.arguments)\n\n @property\n def choices(self):\n return self.arguments[0].choices\n\n def completions(self, complete_token, parser):\n return self.arguments[0].completions(complete_token, parser)\n\n def parse(self, parser):\n parser.parse_arguments(self.arguments)\n\n def set_default(self, parser):\n for arg in self.arguments:\n arg.set_default(parser)\n\n\nclass Command(object):\n def __init__(self, name, *args, **kwargs):\n self.name = name\n self.options = []\n self.subcommands = []\n self.arguments = []\n for o in args:\n if isinstance(o, Option):\n self.options.append(o)\n elif isinstance(o, Command):\n self.subcommands.append(o)\n else:\n self.arguments.append(o)\n self.help = kwargs.pop('help', None)\n self.description = kwargs.pop('description', None)\n self.defaults = kwargs.pop('defaults', {})\n self.default_subcommand = kwargs.pop('default_subcommand', None)\n assert not kwargs\n\n def register(self, *args, **kwargs):\n def decorator(func):\n cmd, path = self._get_scmd_path(args[0])\n if 'description' not in kwargs and func.__doc__:\n kwargs['description'] = textwrap.dedent(func.__doc__).strip()\n kwargs.setdefault('defaults', {}).setdefault('run', func)\n cmd.subcommands.append(Command(path[-1], *(args[1:]), **kwargs))\n\n @wraps(func)\n def wrapper(*wargs, **wkwargs):\n func(*wargs, **wkwargs)\n return wrapper\n return decorator\n\n def alias(self, source_path, dest_path):\n scmd, spath = self._get_scmd_path(source_path)\n dcmd, dpath = self._get_scmd_path(dest_path)\n dest_cmd = copy.copy(scmd._get_subcommand(spath[-1]))\n dest_cmd.name = dpath[-1]\n dcmd.subcommands.append(dest_cmd)\n\n def set_default(self, parser):\n parser.values.update(self.defaults)\n for arg in self.arguments:\n arg.set_default(parser)\n for opt in self.options:\n opt.set_default(parser)\n\n def parse(self, tokens):\n parser = Parser(tokens)\n self._parse_command(parser)\n if parser.token:\n parser.error('Unparsed tokens: %s' % ' '.join(parser.tokens[parser.pos:]))\n return parser\n\n def complete(self, line, point):\n # ignore everything after point\n line = line[:point]\n # if the line ends in an incomplete escape sequence skip it\n if line[-1] == '\\\\' and line[-2] != '\\\\':\n line = line[:-1]\n quote_char = ''\n for attempt in range(2):\n try:\n lex = shlex.shlex(line, posix=True)\n lex.whitespace_split = True\n tokens = list(lex)\n except ValueError:\n if attempt == 0:\n # close the quotes and try again\n quote_char = lex.state\n line += quote_char\n else:\n raise\n tokens = tokens[1:] # skip the program name\n if tokens and (line[-1] != ' ' or line[-2:] == '\\ '):\n complete_token = tokens.pop()\n else:\n complete_token = ''\n parser = Parser(tokens, complete_token)\n self._parse_command(parser)\n return set(bash_quote(c, quote_char) for c in parser._completions)\n\n def handle_shell_completion(self):\n if 'COMP_LINE' in os.environ:\n for c in self.complete(os.environ['COMP_LINE'], int(os.environ['COMP_POINT'])):\n print(c)\n sys.exit()\n\n def usage(self):\n return ' '.join([self.name] + [a.usage() for a in self.arguments])\n\n def chain_usage(self, chain):\n return ' '.join(c.usage() for c in chain)\n\n def print_help(self, subcommands):\n '''Only works for the top-level command'''\n last = self\n chain = [self]\n for cmd_name in subcommands:\n last = last._get_subcommand(cmd_name)\n if last is None:\n print(\"Unknown subcommand: %s\" % cmd_name)\n return\n chain.append(last)\n\n usage = self.chain_usage(chain)\n if last.subcommands:\n if last.default_subcommand:\n usage += ' [<subcommand>]'\n else:\n usage += ' <subcommand>'\n print(\"Usage: {}\".format(usage))\n if last.description or last.help:\n print(\"\\n\", last.description or last.help)\n\n def _cmd_chains(cmd, stop_on_args=False):\n '''Follows subcommand chains until an argument can be specified'''\n if not cmd.subcommands or (cmd.arguments and stop_on_args):\n return {'': cmd}\n else:\n return dict(((s.name + ' ' + name).strip(), cmd)\n for s in cmd.subcommands\n for name, cmd in _cmd_chains(s, True).items())\n if last.subcommands:\n print(\"\\nSubcommands:\")\n if last.default_subcommand:\n cmd = last._get_subcommand(last.default_subcommand)\n print(\" %-20s %s\" % ('[%s]' % cmd.name, cmd.help or cmd.name))\n for name, cmd in sorted(_cmd_chains(last).items()):\n if not last.default_subcommand or last.default_subcommand != name:\n print(\" %-20s %s\" % (name, cmd.help or name))\n\n for i, cmd in enumerate(reversed(chain)):\n if cmd.options:\n print(\"\\nOptions for %s:\" % ' '.join(c.name for c in chain[:len(chain) - i]))\n wrapper = textwrap.TextWrapper(width=80,\n initial_indent=' ' * 26,\n subsequent_indent=' ' * 26)\n for opt in sorted(cmd.options, key=lambda x: x.long or x.short):\n print(\" %-2s %-20s %s\" % ('-' + opt.short if opt.short else '',\n '--' + opt.long if opt.long else '',\n wrapper.fill(opt.help or '').lstrip()))\n\n def _get_subcommand(self, subcommand):\n for cmd in self.subcommands:\n if cmd.name == subcommand:\n return cmd\n else:\n return None\n\n def _get_scmd_path(self, path_string):\n path = path_string.split()\n cmd = self\n for cname in path[:-1]:\n cmd = cmd._get_subcommand(cname)\n if cmd is None:\n raise Exception('Invalid command path: %s (%s not found)' % (path_string, cname))\n return cmd, path\n\n def _parse_command(self, parser):\n self.set_default(parser)\n parser.add_options(self.options)\n parser.parse_arguments(self.arguments)\n if self.subcommands:\n if parser._completing_argument:\n parser._add_completions(s.name for s in self.subcommands)\n token = parser.eat_token()\n if token is None:\n if self.default_subcommand:\n self._get_subcommand(self.default_subcommand).set_default(parser)\n else:\n parser.error(\"Subcommand expected\")\n else:\n cmd = self._get_subcommand(token.lower())\n if cmd:\n parser.subcommands.append(cmd.name)\n cmd._parse_command(parser)\n elif self.default_subcommand:\n parser.barf_token()\n cmd = self._get_subcommand(self.default_subcommand)\n cmd._parse_command(parser)\n else:\n parser.error(\"Invalid subcommand %s\" % token)\n\n\n\n", "id": "2573445", "language": "Python", "matching_score": 2.643223524093628, "max_stars_count": 37, "path": "ndcli/dimcli/cliparse.py" }, { "content": "import sys\nsys.path.insert(0, \"..\")\nfrom dimcli import cmd\n\n\ndef options(opt_list):\n for opt in opt_list:\n opt_str = []\n if opt.short:\n opt_str.append('-' + opt.short)\n if opt.long:\n opt_str.append('--' + opt.long)\n yield ', '.join(opt_str), opt.help\n\n\ndef print_options(opt_list):\n for name, help in options(opt_list):\n print('%-23s' % name, help)\n\n\ndef command_leaves(cmd):\n if cmd.subcommands:\n for sub in sorted(cmd.subcommands, key=lambda s: s.name):\n for leaf, chain in command_leaves(sub):\n yield leaf, [cmd] + chain\n else:\n yield cmd, [cmd]\n\n\ndef gendoc():\n print(\"Global Options\\n==============\\n\")\n print_options(cmd.options)\n\n print(\"\\nCommands\\n========\\n\")\n for leaf, chain in command_leaves(cmd):\n usage = cmd.chain_usage(chain)\n print(usage)\n print('-' * len(usage))\n if leaf.description or leaf.help:\n print(\"\\n\", leaf.description or leaf.help)\n options = sum([c.options for c in chain[1:]], [])\n if options:\n print(\"\\nOptions:\\n\")\n print_options(options)\n print\n\ngendoc()\n", "id": "5283497", "language": "Python", "matching_score": 0.5287874341011047, "max_stars_count": 37, "path": "ndcli/doc/gendoc.py" }, { "content": "from dim.ipaddr import IP\nfrom dim.iptrie import IPTrie\n\n\ndef test_create():\n t = IPTrie(4)\n\n ip2 = IP('192.168.127.12/24')\n t.insert(ip2, 2)\n assert t.parent(ip2) is None\n assert t.find(ip2).data == 2\n t.insert(ip2, 3)\n assert t.find(ip2).data == 3\n\n ip1 = IP('192.168.127.12/8')\n t.insert(ip1, data=1)\n assert t.parent(ip1) is None\n assert t.parent(ip2) is t.find(ip1)\n\n ip3 = IP('192.168.127.12/28')\n t.insert(ip3, 4)\n assert t.parent(ip3) is t.find(ip2)\n t.delete(ip2)\n assert t.parent(ip3) is t.find(ip1)\n\n t.delete_subtree(ip2)\n assert t.find(ip3) is None\n\n t.delete_subtree(ip1)\n assert t.find(ip1) is None\n", "id": "10515301", "language": "Python", "matching_score": 1.1153674125671387, "max_stars_count": 37, "path": "dim-testsuite/tests/iptrie_test.py" }, { "content": "from dim.ipaddr import IP\n\n\ndef test_mask():\n ip1 = IP('192.168.127.12/24')\n assert ip1.hostmask == 0x000000ff\n assert ip1.netmask == 0xffffff00\n\n\ndef test_contains():\n ip1 = IP('192.168.127.12/25')\n ip2 = IP('192.168.127.12/24')\n assert ip1 in ip2\n assert ip2 not in ip1\n assert ip1 in ip1\n\n assert IP('192.168.3.11') in ip1\n", "id": "11169020", "language": "Python", "matching_score": 0.00728070642799139, "max_stars_count": 37, "path": "dim-testsuite/tests/ip_test.py" }, { "content": "import configparser\nimport datetime\nimport logging\nimport io\nimport time\nimport xml.etree.ElementTree as et\nfrom contextlib import contextmanager\n\nimport requests\n\nfrom dim import db\nfrom dim.models import RegistrarAccount, RegistrarAction, Zone, ZoneKey\nfrom dim.models.history import record_history\n\n\nproxies = {}\n\n\n@contextmanager\ndef transaction_scope():\n \"\"\"Provide a transactional scope around a series of operations.\"\"\"\n try:\n yield\n except:\n db.session.rollback()\n raise\n finally:\n db.session.close()\n\n\ndef make_request(request, account, ctid=''):\n request = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <request>\n <auth>\n <user>%s</user>\n <password>%s</password>\n <context>%s</context>\n </auth>\n <language>en</language>\n <task><ctid>%s</ctid>''' % (account['username'], account['password'], account['subaccount'],\n ctid) + request + '</task></request>'\n\n r = requests.post(account['url'],\n data=request,\n proxies=proxies,\n headers={'Content-Type': 'application/xml',\n 'charset': 'utf-8'})\n if r.status_code != 200:\n raise Exception('status code: %d' % r.status_code)\n return r.text\n\n\ndef replace_keys(domain, keys):\n key_template = '''\n <dnssec>\n <flags>257</flags>\n <protocol>3</protocol>\n <algorithm>8</algorithm>\n <publickey>%s</publickey>\n </dnssec>'''\n key_info = '\\n'.join([key_template % key for key in keys])\n return '''<code>0102</code>\n <domain>\n <name>%s</name>\n %s\n <extension>\n <mode>1</mode>\n </extension>\n </domain>''' % (domain, key_info)\n\n\ndef pollinfo():\n return '''<code>0905</code>'''\n\n\ndef ack(message_id):\n return '''<code>0906</code><message><id>%s</id></message>''' % message_id\n\n\ndef got_notification(e):\n try:\n return e.find('./result/status/code').text == 'N0102'\n except:\n return False\n\n\ndef get_pretty_error_message(reply):\n try:\n e = et.fromstring(reply)\n except:\n return reply\n try:\n return get_error_from_message(e)\n except:\n try:\n return get_request_error(e)\n except:\n return reply\n\n\ndef get_request_error(e):\n if e.find('./result/status/type').text != 'error':\n return ''\n errors = [e.find('./result/status/text').text]\n for msg in e.findall('./result/msg'):\n errors.append(msg.find('text').text)\n return '\\n'.join(errors)\n\n\ndef get_error_from_message(e):\n if e.find('./result/data/message/job/status/type').text != 'error':\n return ''\n errors = [e.find('./result/data/message/job/status/text').text]\n for msg in e.findall('./result/data/message/job/nic_response'):\n errors.append(msg.text)\n return '\\n'.join(errors)\n\n\ndef is_success_message(e):\n return e.find('./result/data/message/job/status/type').text == 'success'\n\n\ndef queued(e):\n return int(e.find('./result/data/summary').text)\n\n\ndef get_action_keys(action):\n return ZoneKey.query.filter(ZoneKey.registrar_action == action).all()\n\n\ndef get_account_info(account):\n '''Get info from a RegistrarAccount and save it in a dict'''\n return dict(id=account.id,\n name=account.name,\n url=account.url,\n username=account.username,\n password=<PASSWORD>,\n subaccount=account.subaccount)\n\n\ndef unlink_action_keys(action_keys):\n for key in action_keys:\n key.registrar_action = None\n\n\ndef handle_pending():\n while True:\n with transaction_scope():\n action = RegistrarAction.query.filter_by(status='pending').first()\n if action is None:\n break\n try:\n action.started = datetime.datetime.utcnow()\n reply = make_request(replace_keys(action.zone.name,\n [key.pubkey for key in action.zone.keys if\n key.registrar_action == action]),\n get_account_info(action.zone.registrar_account), action.id)\n if got_notification(et.fromstring(reply)):\n logging.info('Request domain update for %s with registrar-account %s accepted' %\n (action.zone.display_name, action.zone.registrar_account.name))\n action.status = 'running'\n else:\n logging.info('Request domain update for %s with registrar-account %s failed' %\n (action.zone.display_name, action.zone.registrar_account.name))\n action.status = 'failed'\n action.error = reply\n action.completed = datetime.datetime.utcnow()\n unlink_action_keys(get_action_keys(action))\n db.session.commit()\n except Exception:\n logging.exception('Error requesting domain update for %s with registrar-account %s' %\n (action.zone.display_name, action.zone.registrar_account.name))\n\n\ndef poll_queue(account):\n with transaction_scope():\n running = RegistrarAction.query.filter_by(status='running').join(Zone).join(RegistrarAccount)\\\n .filter(RegistrarAccount.id == account['id']).count()\n if running == 0:\n return\n while True:\n poll_info = make_request(pollinfo(), account)\n pi = et.fromstring(poll_info)\n queued_messages = queued(pi)\n if not queued_messages:\n break\n logging.debug('registrar-account %s has %d messages queued' %\n (account['name'], queued_messages))\n msg_id = pi.find('./result/data/message/id')\n ctid = pi.find('./result/data/message/job/ctid')\n if ctid is not None:\n with transaction_scope():\n action = RegistrarAction.query.filter_by(id=int(ctid.text)).first()\n if action is None:\n logging.warn('Ignoring registrar-account %s reply for action with id %s' %\n (account['name'], ctid.text))\n else:\n action.completed = datetime.datetime.utcnow()\n action_keys = list(get_action_keys(action))\n if is_success_message(pi):\n action.status = 'done'\n action.zone.update_registrar_keys(action_keys)\n if action_keys:\n record_history(action.zone.registrar_account, action='published',\n zone=action.zone.display_name,\n action_info=(', '.join([act.label for act in action_keys]) +\n ' published ' +\n ', '.join([act.ds(2) for act in action_keys])))\n else:\n record_history(action.zone.registrar_account, action='published',\n zone=action.zone.display_name,\n action_info='unpublished all DS records')\n logging.info('Domain update for %s successful with registrar-account %s' %\n (action.zone.display_name, action.zone.registrar_account.name))\n else:\n action.error = poll_info\n action.status = 'failed'\n logging.info('Domain update for %s failed with registrar-account %s' %\n (action.zone.display_name, action.zone.registrar_account.name))\n unlink_action_keys(action_keys)\n db.session.commit()\n if msg_id is not None:\n r = make_request(ack(msg_id.text), account)\n if not queued(et.fromstring(r)):\n break\n\n\ndef discard_old_operations():\n ''' Discard operations running for more than 1 day by setting their status to 'unknown'.\n From the autodns3 API docs: \"Polling allows you to connect directly to our system and poll messages in XML format.\n If the messages are not polled within 24 hours, they are sent in XML format by email.\"\n '''\n with transaction_scope():\n actions = RegistrarAction.query.filter_by(status='running') \\\n .filter(RegistrarAction.started < datetime.datetime.utcnow() - datetime.timedelta(days=1)).all()\n for action in actions:\n action_keys = list(get_action_keys(action))\n unlink_action_keys(action_keys)\n action.status = 'unknown'\n logging.warn('Marked action on zone %s started on %s as unknown' %\n (action.zone.display_name, action.started))\n db.session.commit()\n\n\ndef read_config():\n global proxies\n try:\n CONFIG_FILE = '/etc/dim/dim-autodns3-plugin.cfg'\n parser = configparser.ConfigParser()\n with open(CONFIG_FILE) as stream:\n stream = io.StringIO(\"[root]\\n\" + stream.read())\n parser.readfp(stream)\n proxy = parser.get('root', 'proxy')\n if proxy:\n proxies = {'http': proxy, 'https': proxy}\n logging.debug('Running autodns3 with proxy %s' % proxy)\n except Exception:\n logging.exception('Error reading configuration file %s' % CONFIG_FILE)\n raise\n\n\ndef run():\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n read_config()\n while True:\n handle_pending()\n discard_old_operations()\n accounts = []\n with transaction_scope():\n for account in RegistrarAccount.query.all():\n accounts.append(get_account_info(account))\n for account in accounts:\n try:\n poll_queue(account)\n except Exception:\n logging.exception('Error polling registrar-account %s' % account.name)\n time.sleep(5)\n", "id": "622833", "language": "Python", "matching_score": 2.755906105041504, "max_stars_count": 37, "path": "dim/dim/autodns3.py" }, { "content": "from dim import db\nfrom dim.models import Zone, RegistrarAction\nfrom dim.autodns3 import get_action_keys\nfrom dim.errors import DimError\nfrom tests.util import RPCTest, raises\n\n\nclass Autodns3Test(RPCTest):\n def setUp(self):\n RPCTest.setUp(self)\n self.r.zone_create('a.de')\n self.r.registrar_account_create('ra', 'autodns3', '', '', '', '')\n self.r.registrar_account_add_zone('ra', 'a.de')\n\n def get_action(self, zone):\n zone = Zone.query.filter_by(name=zone, profile=False).first()\n assert zone\n action = RegistrarAction.query.filter_by(zone=zone).filter(\n RegistrarAction.status.in_(['running', 'pending'])).first()\n assert action\n return action\n\n def start(self, zone):\n self.r.registrar_account_update_zone(zone)\n action = self.get_action(zone)\n action.status = 'running'\n db.session.commit()\n\n def finish_action(self, zone, error):\n action = self.get_action(zone)\n action.error = error\n action.status = 'failed' if error else 'done'\n action_keys = get_action_keys(action)\n for key in action_keys:\n key.registrar_action = None\n if not error:\n action.zone.update_registrar_keys(action_keys)\n db.session.commit()\n\n def fail(self, zone):\n self.finish_action(zone, 'error')\n\n def success(self, zone):\n self.finish_action(zone, None)\n\n def check_pending(self, zone, expected):\n assert len(self.r.zone_registrar_actions(zone)) == expected\n\n def first_key(self, zone):\n return [k['label'] for k in self.r.zone_list_keys(zone) if k['type'] == 'KSK'][0]\n\n def test_list_actions(self):\n self.r.zone_dnssec_enable('a.de')\n self.check_pending('a.de', 1)\n self.start('a.de')\n self.check_pending('a.de', 1)\n self.fail('a.de')\n self.check_pending('a.de', 1)\n self.start('a.de')\n self.check_pending('a.de', 1)\n self.success('a.de')\n self.check_pending('a.de', 0)\n\n def test_list_actions2(self):\n self.r.zone_dnssec_enable('a.de')\n self.check_pending('a.de', 1)\n self.start('a.de')\n self.check_pending('a.de', 1)\n self.r.zone_create_key('a.de', 'ksk')\n self.check_pending('a.de', 2)\n\n def test_no_op(self):\n self.check_pending('a.de', 0)\n\n def test_rollover(self):\n self.r.zone_dnssec_enable('a.de')\n old_key = self.first_key('a.de')\n self.start('a.de')\n self.success('a.de')\n self.r.zone_create_key('a.de', 'ksk')\n self.start('a.de')\n self.success('a.de')\n self.r.zone_delete_key('a.de', old_key)\n self.check_pending('a.de', 1)\n self.start('a.de')\n self.check_pending('a.de', 1)\n self.success('a.de')\n self.check_pending('a.de', 0)\n self.r.zone_delete_key('a.de', self.first_key('a.de'))\n self.check_pending('a.de', 1)\n self.start('a.de')\n self.success('a.de')\n self.check_pending('a.de', 0)\n with raises(DimError):\n self.start('a.de')\n\n def test_ra_list_zones(self):\n self.r.zone_dnssec_enable('a.de')\n self.start('a.de')\n self.fail('a.de')\n r = self.r.registrar_account_list_zones('ra', include_status=True)\n assert len(r) == 1\n assert r[0]['zone'] == 'a.de'\n assert r[0]['status'] == 'failed'\n assert r[0]['error'] == 'error'\n\n def test_delete(self):\n self.r.zone_dnssec_enable('a.de')\n self.start('a.de')\n with raises(DimError):\n self.r.zone_delete_key('a.de', self.first_key('a.de'))\n with raises(DimError):\n self.r.zone_delete('a.de')\n with raises(DimError):\n self.r.registrar_account_delete_zone('ra', 'a.de')\n self.r.zone_group_create('zg')\n self.r.zone_group_add_zone('zg', 'a.de')\n with raises(DimError):\n self.r.zone_group_remove_zone('zg', 'a.de')\n self.r.output_create('o', plugin='pdns-db', db_uri='')\n self.r.output_add_group('o', 'zg')\n with raises(DimError):\n self.r.output_remove_group('o', 'zg')\n", "id": "11708670", "language": "Python", "matching_score": 0.9743911623954773, "max_stars_count": 37, "path": "dim-testsuite/tests/autodns3_test.py" }, { "content": "import re\nfrom dim import db\nfrom dim.commands import pool_report, update_history\nfrom dim.models import AllocationHistory, Pool\nfrom tests.util import RPCTest\nfrom datetime import datetime, timedelta\n\n\nclass CommandsTest(RPCTest):\n def test_pool_report(self):\n assert 'does not exist' in pool_report('pool')\n\n self.r.ipblock_create('172.16.17.32/8', status='Container')\n self.r.ipblock_create('192.168.127.12/8', status='Container')\n self.r.ippool_create('pool')\n self.r.ippool_add_subnet('pool', '172.16.17.32/24')\n self.r.ippool_add_subnet('pool', '192.168.127.12/24')\n assert re.search('usage *n/a *n/a *n/a', pool_report('pool'))\n\n pool = Pool.query.filter_by(name='pool').one()\n now = datetime.now()\n db.session.add_all(\n [AllocationHistory(pool=pool, date=now - timedelta(days=30), total_ips=512, used_ips=4),\n AllocationHistory(pool=pool, date=now - timedelta(days=7), total_ips=512, used_ips=40),\n AllocationHistory(pool=pool, date=now - timedelta(days=1), total_ips=512, used_ips=44)])\n db.session.commit()\n assert self.r.ippool_get_delegation('pool', 26)\n report = pool_report('pool')\n assert re.search('usage *24 *28 *64', report)\n assert '444 IPs are still free' in report\n assert 'Based on data from the last 30 days, the pool will be full in 208.1 days.' in report\n\n def test_pool_reportv6(self):\n self.r.ipblock_create('12::/32', status='Container')\n self.r.ipblock_create('13::/32', status='Container')\n self.r.ippool_create('pool')\n self.r.ippool_add_subnet('pool', '12::/56', dont_reserve_network_broadcast=True)\n self.r.ippool_add_subnet('pool', '13::/56', dont_reserve_network_broadcast=True)\n pool = Pool.query.filter_by(name='pool').one()\n now = datetime.now()\n db.session.add_all(\n [AllocationHistory(pool=pool, date=now - timedelta(days=30), total_ips=2 ** 57, used_ips=4 * 2 ** 64),\n AllocationHistory(pool=pool, date=now - timedelta(days=7), total_ips=2 ** 57, used_ips=40 * 2 ** 64),\n AllocationHistory(pool=pool, date=now - timedelta(days=1), total_ips=2 ** 57, used_ips=44 * 2 ** 64)])\n db.session.commit()\n assert self.r.ippool_get_delegation('pool', 58)\n report = pool_report('pool', prefix=64)\n assert re.search('usage *20 *24 *60', report)\n assert '448 /64 blocks are still free' in report\n assert 'Based on data from the last 30 days, the pool will be full in 224.0 days.' in report\n\n def test_update_history(self):\n self.r.ipblock_create('12::/32', status='Container')\n self.r.ipblock_create('13::/32', status='Container')\n self.r.ippool_create('pool')\n self.r.ippool_add_subnet('pool', '12::/56')\n self.r.ippool_add_subnet('pool', '13::/56')\n assert self.r.ippool_get_delegation('pool', 58)\n update_history()\n\n pool = Pool.query.filter_by(name='pool').one()\n ah = pool.allocation_history(0)\n assert ah.total_ips == 512 * 2 ** 64\n assert ah.used_ips == 64 * 2 ** 64 + 2\n", "id": "11773189", "language": "Python", "matching_score": 3.187255620956421, "max_stars_count": 37, "path": "dim-testsuite/tests/commands_test.py" }, { "content": "from tests.util import RPCTest\n\n\ndef ips(d):\n return set(o['ip'] for o in d)\n\n\nclass AllocatorTest(RPCTest):\n def test_delegation1(self):\n self.r.ippool_create('pool')\n self.r.ipblock_create('192.168.3.11/8', status='Container')\n self.r.ippool_add_subnet('pool', '192.168.3.11/29')\n assert ips(self.r.ippool_get_delegation('pool', 30, maxsplit=1)) == \\\n set(['172.16.17.32/31', '192.168.127.12/31'])\n\n def test_delegation2(self):\n '''Don't allow hosts to be returned from ippool_get_delegation'''\n # .8 __*_\n # .16 _*_*\n self.r.ippool_create('pool')\n self.r.ipblock_create('192.168.3.11/8', status='Container')\n self.r.ippool_add_subnet('pool', '192.168.127.12/29', dont_reserve_network_broadcast=True)\n self.r.ip_mark('172.16.17.32')\n self.r.ip_mark('192.168.3.11')\n self.r.ip_mark('172.16.58.3')\n assert not self.r.ippool_get_delegation('pool', 30, maxsplit=2)\n\n def test_delegation3(self):\n # .16 ____\n # .20 *___\n # .24 ___*\n # .28 *__*\n self.r.ippool_create('pool')\n self.r.ipblock_create('192.168.3.11/8', status='Container')\n self.r.ippool_add_subnet('pool', '172.16.31.10/28', dont_reserve_network_broadcast=True)\n self.r.ip_mark('172.16.31.10')\n self.r.ip_mark('172.16.58.3')\n self.r.ip_mark('172.16.17.32')\n self.r.ip_mark('172.16.17.32')\n assert ips(self.r.ippool_get_delegation('pool', 29, maxsplit=2)) == \\\n set(['172.16.31.10/30', '172.16.31.10/31', '192.168.3.11/31'])\n\n def test_delegation4(self):\n # R*__\n # _**_\n # __*_\n # ____\n self.r.ippool_create('pool')\n self.r.ipblock_create('192.168.3.11/8', status='Container')\n self.r.ippool_add_subnet('pool', '192.168.3.11/28', dont_reserve_network_broadcast=True)\n self.r.ip_mark('192.168.3.11', pool='pool')\n self.r.ip_mark('172.16.58.3', pool='pool')\n self.r.ip_mark('192.168.3.11', pool='pool')\n self.r.ip_mark('172.16.17.32', pool='pool')\n d = self.r.ippool_get_delegation('pool', 29, maxsplit=2, attributes={'country': 'de'})\n assert len(d) == 3\n self.assertDictSubset(\n d[0],\n {'country': 'de',\n 'pool': 'pool',\n 'status': 'Delegation',\n 'ip': '172.16.31.10/30',\n 'subnet': '192.168.3.11/28',\n 'mask': '255.255.255.240',\n })\n self.assertDictSubset(\n d[1],\n {'country': 'de',\n 'pool': 'pool',\n 'status': 'Delegation',\n 'ip': '172.16.17.32/31',\n 'subnet': '192.168.3.11/28',\n 'mask': '255.255.255.240',\n })\n self.assertDictSubset(\n d[2],\n {'country': 'de',\n 'pool': 'pool',\n 'status': 'Delegation',\n 'ip': '192.168.127.12/31',\n 'subnet': '192.168.3.11/28',\n 'mask': '255.255.255.240',\n })\n assert self.r.ippool_get_ip('pool')['ip'] == '192.168.127.12'\n assert self.r.ippool_get_ip('pool')['ip'] == '172.16.31.10'\n assert self.r.ippool_get_ip('pool')['ip'] == '172.16.31.10'\n assert not self.r.ippool_get_ip('pool')\n\n def test_delegation_no_subnets(self):\n self.r.ippool_create('pool')\n assert not self.r.ippool_get_delegation('pool', 30)\n\n def test_delegation_full(self):\n self.r.ippool_create('pool')\n self.r.ipblock_create('192.168.3.11/8', status='Container')\n self.r.ippool_add_subnet('pool', '192.168.127.12/29', dont_reserve_network_broadcast=True)\n self.r.ipblock_create('192.168.127.12/30', status='Delegation')\n self.r.ipblock_create('172.16.31.10/30', status='Delegation')\n assert not self.r.ippool_get_delegation('pool', 31)\n\n def test_delegation_ipv6(self):\n self.r.ipblock_create('2001::/32', status='Container')\n self.r.ippool_create('pool')\n self.r.ippool_add_subnet('pool', '2001::/64')\n assert ips(self.r.ippool_get_delegation('pool', 96)) == set(['2001::1:0:0/96'])\n assert ips(self.r.ippool_get_delegation('pool', 96)) == set(['2001::2:0:0/96'])\n assert self.r.ipblock_get_ip('2001::1:0:0/96')['ip'] == '2001::1:0:0'\n\n def test_random(self):\n self.r.ipblock_create('192.168.3.11/8', status='Container')\n self.r.ippool_create('pool')\n self.r.ippool_set_attrs('pool', {'allocation_strategy': 'random'})\n self.r.ippool_add_subnet('pool', '192.168.3.11/28', dont_reserve_network_broadcast=True)\n delegations = []\n for _ in range(4):\n d = self.r.ippool_get_delegation('pool', 30)\n d_ip = d[0]['ip']\n delegations.extend(d)\n self.r.ipblock_set_attrs(d_ip, {'allocation_strategy': 'first'})\n ip1 = self.r.ipblock_get_ip(d_ip)['ip']\n ip2 = self.r.ipblock_get_ip(d_ip)['ip']\n assert ip1 < ip2\n assert len(delegations) == 4\n assert ips(delegations) == set(['192.168.3.11/30', '192.168.3.11/30', '192.168.127.12/30', '192.168.127.12/30'])\n assert not self.r.ippool_get_delegation('pool', 30)\n\n def test_random_big(self):\n self.r.ipblock_create('12::/32', status='Container')\n self.r.ippool_create('pool')\n self.r.ippool_set_attrs('pool', {'allocation_strategy': 'random'})\n self.r.ippool_add_subnet('pool', '12::/64')\n assert self.r.ippool_get_ip('pool')['ip']\n\n def test_fill_pool(self):\n self.r.ipblock_create('192.168.3.11/8', status='Container')\n self.r.ippool_create('pool')\n self.r.ippool_add_subnet('pool', '192.168.3.11/31', dont_reserve_network_broadcast=True)\n assert self.r.ippool_get_ip('pool')['ip'] == '192.168.3.11'\n assert self.r.ippool_get_ip('pool') is None\n\n def test_priorities(self):\n self.r.ipblock_create('192.0.0.0/8', status='Container')\n self.r.ippool_create('testpool')\n self.r.ippool_add_subnet('testpool', '192.168.0.0/24')\n self.r.ippool_add_subnet('testpool', '192.168.1.0/24')\n assert self.r.ippool_get_ip('testpool')['ip'] == '192.168.0.1'\n self.r.subnet_set_priority('192.168.1.0/24', 1)\n assert self.r.ippool_get_ip('testpool')['ip'] == '192.168.1.1'\n\n def test_delegation_not_subnet(self):\n self.r.ipblock_create('10::/32', status='Container')\n self.r.ippool_create('pool')\n self.r.ippool_add_subnet('pool', '10::/64', dont_reserve_network_broadcast=True)\n assert not self.r.ippool_get_delegation('pool', 64)\n assert ips(self.r.ippool_get_delegation('pool', 64, maxsplit=1)) == \\\n set(['10::/65', 'fdf8:f53e:61e4::18:0:0/65'])\n\n def test_pool_noversion(self):\n self.r.ippool_create('pool')\n assert self.r.ippool_get_ip('pool') is None\n assert self.r.ippool_get_delegation('pool', 30) == []\n", "id": "1131159", "language": "Python", "matching_score": 3.1202540397644043, "max_stars_count": 37, "path": "dim-testsuite/tests/allocator_test.py" }, { "content": "# -*- coding: utf-8 -*-\nfrom tests.util import RPCTest, raises, query_ip\nfrom dim.ipaddr import IP\nfrom dim.models import db, Pool, Ipblock, PoolAttr, Vlan\nfrom dim.errors import InvalidVLANError, AlreadyExistsError, InvalidIPError, InvalidStatusError, \\\n NotInPoolError, InvalidPriorityError, InvalidParameterError\n\n\ndef get_pool(name):\n return Pool.query.filter_by(name=name).first()\n\n\nclass PoolTest(RPCTest):\n def test_create_remove(self):\n with raises(InvalidVLANError):\n self.r.ippool_create('invalid', vlan=0)\n with raises(InvalidVLANError):\n self.r.ippool_create('invalid', vlan=4095)\n with raises(InvalidVLANError):\n self.r.ippool_create('invalid', vlan='test')\n self.r.ippool_create('valid', vlan='22')\n self.r.ippool_create('test')\n with raises(AlreadyExistsError):\n self.r.ippool_create('test')\n assert get_pool('test')\n assert self.r.ippool_delete('test')\n assert not get_pool('test')\n\n def test_create_remove_reserved(self):\n self.r.ipblock_create('192.168.127.12/8', status='Container')\n self.r.ippool_create('test')\n self.r.ippool_add_subnet('test', '192.168.127.12/25')\n assert self.r.ipblock_get_attrs('192.168.127.12')['status'] == 'Reserved'\n assert self.r.ipblock_get_attrs('172.16.31.10')['status'] == 'Reserved'\n self.r.ipblock_remove('192.168.127.12/25', status='Subnet', pool='test')\n self.r.ipblock_remove('192.168.127.12/8')\n assert self.r.ipblock_get_attrs('192.168.127.12')['status'] == 'Unmanaged'\n assert self.r.ipblock_get_attrs('172.16.31.10')['status'] == 'Unmanaged'\n assert self.r.ippool_delete('test')\n\n def test_force_remove(self):\n self.r.ipblock_create('192.168.127.12/8', status='Container')\n self.r.ippool_create('test', vlan=4)\n assert get_pool('test')\n self.r.ippool_add_subnet('test', '192.168.127.12/24')\n assert not self.r.ippool_delete('test')\n assert self.r.ippool_delete('test', force=True, delete_subnets=True)\n assert not get_pool('test')\n\n def test_create_invalid(self):\n with raises(ValueError):\n self.r.ippool_create('ü')\n with raises(InvalidParameterError):\n self.r.ippool_create('a' * 300)\n\n def test_rename(self):\n self.r.ippool_create('old')\n self.r.ippool_rename('old', 'new')\n assert not get_pool('old')\n assert get_pool('new')\n with raises(InvalidParameterError):\n self.r.ippool_rename('new', 'a' * 300)\n\n def test_reserves(self):\n self.r.ipblock_create('12::/32', status='Container')\n self.r.ippool_create('v6')\n self.r.ippool_add_subnet('v6', '12::/64')\n assert Ipblock.query.count() == 3\n assert self.r.ippool_delete('v6', force=True, delete_subnets=True)\n\n self.r.ipblock_create('192.168.127.12/8', status='Container')\n self.r.ippool_create('v4')\n self.r.ippool_add_subnet('v4', '192.168.127.12/23')\n assert query_ip('192.168.127.12').first().status.name == 'Reserved'\n assert query_ip('172.16.17.32').first().status.name == 'Reserved'\n assert query_ip('192.168.127.12').first().status.name == 'Reserved'\n assert query_ip('172.16.17.32').first().status.name == 'Reserved'\n assert self.r.ippool_delete('v4', force=True, delete_subnets=True)\n\n # .0 and .255 addresses should always be Reserved\n self.r.ippool_create('v4')\n self.r.ippool_add_subnet('v4', '192.168.127.12/23', dont_reserve_network_broadcast=True)\n assert query_ip('192.168.127.12').first().status.name == 'Reserved'\n assert query_ip('172.16.17.32').first().status.name == 'Reserved'\n assert query_ip('192.168.127.12').first().status.name == 'Reserved'\n assert query_ip('172.16.17.32').first().status.name == 'Reserved'\n assert self.r.ippool_delete('v4', force=True, delete_subnets=True)\n\n self.r.ippool_create('v4')\n self.r.ippool_add_subnet('v4', '172.16.58.3/26')\n assert query_ip('172.16.58.3').first().status.name == 'Reserved'\n assert query_ip('172.16.31.10').first().status.name == 'Reserved'\n assert self.r.ippool_delete('v4', force=True, delete_subnets=True)\n\n self.r.ippool_create('v4')\n self.r.ippool_add_subnet('v4', '172.16.58.3/26', dont_reserve_network_broadcast=True)\n assert query_ip('172.16.58.3').first() is None\n assert query_ip('172.16.31.10').first() is None\n assert self.r.ippool_delete('v4', force=True, delete_subnets=True)\n\n def test_attrs(self):\n self.r.ippool_create(\"control\", attributes={'team': '1'})\n\n create_attrs = {'country': 'ro', 'team': 'IT Operations'}\n self.r.ippool_create(\"pool_attrs\", attributes=create_attrs)\n\n self.assertDictSubset(self.r.ippool_get_attrs('pool_attrs'), create_attrs)\n self.r.ippool_set_attrs('pool_attrs', {'country': 'de'})\n self.assertDictSubset(self.r.ippool_get_attrs('pool_attrs'), {'country': 'de'})\n self.r.ippool_delete_attrs('pool_attrs', ['team'])\n assert 'team' not in self.r.ippool_get_attrs('pool_attrs')\n\n assert self.r.ippool_get_attrs('control')['team'] == '1'\n assert 'country' not in self.r.ippool_get_attrs('control')\n\n assert self.r.ippool_delete('control')\n assert self.r.ippool_delete('pool_attrs')\n assert PoolAttr.query.count() == 0\n\n def check_ippool_add_subnet(self, pool, subnet, **options):\n self.r.ippool_add_subnet(pool, subnet, **options)\n pool = Pool.query.filter_by(name=pool).one()\n subnet = query_ip(subnet).one()\n gateway = None\n if 'gateway' in options:\n gateway = IP(options.get('gateway')).address\n assert subnet.pool == pool\n assert subnet.version == pool.version\n assert subnet.vlan == pool.vlan\n assert subnet.gateway == gateway\n self.assertEqual(dict((a.name.name, a.value) for a in subnet.attributes), options.get('attributes', {}))\n\n def test_create_complex(self):\n for prefix in range(12, 17):\n self.r.ipblock_create('%d.0.0.0/8' % prefix, status='Container')\n self.r.ipblock_create('2001::/16', status='Container')\n self.r.ippool_create(\"pool1\", vlan=4)\n self.check_ippool_add_subnet('pool1', '192.168.127.12/24', gateway='192.168.127.12')\n self.check_ippool_add_subnet('pool1', '172.16.17.32/24', gateway='192.168.127.12')\n self.check_ippool_add_subnet('pool1', '172.16.31.10/23')\n self.check_ippool_add_subnet('pool1', '192.168.3.11/24', attributes={'country': 'ro', 'team': 'IP Operations'})\n with raises(AlreadyExistsError):\n self.r.ippool_add_subnet('pool1', '192.168.127.12/24')\n with raises(InvalidIPError):\n self.r.ippool_add_subnet('pool1', '2001:db8::/32')\n self.r.ipblock_create('172.16.31.10/24', status='Container')\n with raises(InvalidStatusError):\n self.r.ippool_add_subnet('pool1', '172.16.31.10/24')\n self.r.ippool_create(\"pool2\", vlan=4)\n self.check_ippool_add_subnet('pool2', '172.16.31.10/24')\n with raises(AlreadyExistsError):\n self.r.ippool_add_subnet('pool1', '172.16.31.10/24')\n assert self.r.ipblock_remove('172.16.31.10/24', pool='pool2') == 1\n self.r.ippool_add_subnet('pool1', '172.16.31.10/24')\n with raises(NotInPoolError):\n self.r.ipblock_remove('172.16.31.10/24', pool='pool2', status='Subnet')\n assert self.r.ipblock_remove('172.16.31.10/24', pool='pool1') == 1\n self.r.ippool_set_vlan('pool2', 5)\n self.check_ippool_add_subnet('pool2', '172.16.31.10/24')\n assert self.r.ippool_delete('pool1', force=True, delete_subnets=True)\n assert self.r.ippool_delete('pool2', force=True, delete_subnets=True)\n\n def test_subnet_vlan(self):\n self.r.ipblock_create('172.16.31.10/8', status='Container')\n self.r.ippool_create(\"pool2\", vlan=5)\n self.r.ippool_add_subnet('pool2', '172.16.31.10/24')\n self.r.ippool_create(\"pool\", vlan=4)\n assert Ipblock.query_ip(IP('172.16.31.10/24'), None).one().vlan.vid == 5\n self.r.ippool_add_subnet('pool', '172.16.31.10/24', allow_move=True)\n assert Ipblock.query_ip(IP('172.16.31.10/24'), None).one().vlan.vid == 4\n\n def test_list_ippools(self):\n self.r.ipblock_create('172.16.17.32/8', status='Container')\n self.r.ippool_create(\"pool1\", vlan=5)\n assert self.r.ippool_list(pool='pool1')[0]['subnets'] == []\n pool1_nets = ['172.16.17.32/24', '172.16.31.10/24']\n for net in pool1_nets:\n self.r.ippool_add_subnet(\"pool1\", net)\n self.r.ippool_create(\"pool2\")\n pool2_nets = ['172.16.31.10/24']\n for net in pool2_nets:\n self.r.ippool_add_subnet(\"pool2\", net)\n l1 = self.r.ippool_list(pool='pool1')\n assert l1[0]['name'] == 'pool1'\n assert l1[0]['vlan'] == 5\n self.assertEqual(set(l1[0]['subnets']), set(pool1_nets))\n l2 = self.r.ippool_list(pool='pool2')\n assert l2[0]['name'] == 'pool2'\n assert l2[0]['vlan'] is None\n self.assertEqual(set(l2[0]['subnets']), set(pool2_nets))\n\n data_set = [(dict(pool='*'), ['pool1', 'pool2']),\n (dict(pool='*1'), ['pool1']),\n (dict(vlan=5), ['pool1']),\n (dict(cidr='172.16.17.32/8'), ['pool1', 'pool2']),\n (dict(cidr='172.16.31.10/24'), ['pool2']),\n (dict(cidr='172.16.31.10/16'), ['pool1', 'pool2']),\n ]\n for params, pools in data_set:\n self.assertEqual(set(p['name'] for p in self.r.ippool_list(**params)),\n set(pools))\n self.assertEqual(set(p['name'] for p in self.r.ippool_list(include_subnets=False, **params)),\n set(pools))\n\n def test_list_ippools_pagination(self):\n def query(limit=None, offset=0):\n pools = self.r.ippool_list(pool='pool*', include_subnets=True, limit=limit, offset=offset)\n return set([p['name'] for p in pools])\n pools = []\n for i in range(5):\n pools.append('pool%d' % (i,))\n self.r.ippool_create(pools[-1])\n self.r.ipblock_create('172.16.17.32/8', status='Container')\n self.r.ippool_add_subnet(\"pool0\", '172.16.31.10/24')\n self.r.ippool_add_subnet(\"pool0\", '172.16.58.3/24')\n self.r.ippool_add_subnet(\"pool0\", '172.16.58.3/24')\n assert self.r.ippool_count(pool='pool*') == 5\n assert self.r.ippool_count(cidr='172.16.17.32/16') == 1\n assert query() == set(pools)\n assert query(limit=2) == set(pools[:2])\n assert query(limit=2, offset=2) == set(pools[2:4])\n assert query(limit=2, offset=4) == set(pools[4:])\n\n def test_priority(self):\n self.r.ipblock_create('172.16.17.32/8', status='Container')\n self.r.ipblock_create('172.16.17.32/8', status='Container')\n self.r.ippool_create(\"pool\")\n self.r.ippool_add_subnet(\"pool\", '172.16.17.32/24', gateway='172.16.17.32')\n self.r.ippool_add_subnet(\"pool\", '172.16.31.10/24')\n self.r.ippool_add_subnet(\"pool\", '172.16.58.3/24')\n l = self.r.ippool_get_subnets('pool')\n self.assertDictSubset(l[0], {'subnet': '172.16.17.32/24', 'priority': 1, 'gateway': '172.16.17.32'})\n self.assertDictSubset(l[1], {'subnet': '172.16.31.10/24', 'priority': 2})\n self.assertDictSubset(l[2], {'subnet': '172.16.58.3/24', 'priority': 3})\n\n self.r.ipblock_remove('172.16.31.10/24', pool='pool', status='Subnet')\n self.r.ippool_add_subnet('pool', '172.16.58.3/24')\n l = self.r.ippool_get_subnets('pool')\n self.assertDictSubset(l[0], {'subnet': '172.16.17.32/24', 'priority': 1})\n self.assertDictSubset(l[1], {'subnet': '172.16.58.3/24', 'priority': 3})\n self.assertDictSubset(l[2], {'subnet': '172.16.58.3/24', 'priority': 4})\n\n self.r.ipblock_create('192.168.127.12/24', status='Container')\n with raises(InvalidStatusError):\n self.r.subnet_set_priority('192.168.127.12/24', 1)\n with raises(InvalidPriorityError):\n self.r.subnet_set_priority('172.16.17.32/24', 'g')\n with raises(InvalidPriorityError):\n self.r.subnet_set_priority('172.16.17.32/24', 0)\n self.r.subnet_set_priority('172.16.17.32/24', '1')\n\n self.r.subnet_set_priority('172.16.58.3/24', 1, pool='pool')\n l = self.r.ippool_get_subnets('pool')\n self.assertDictSubset(l[0], {'subnet': '172.16.58.3/24', 'priority': 1})\n self.assertDictSubset(l[1], {'subnet': '172.16.17.32/24', 'priority': 2})\n self.assertDictSubset(l[2], {'subnet': '172.16.58.3/24', 'priority': 4})\n\n self.r.ippool_add_subnet('pool', '172.16.31.10/24')\n self.r.ippool_add_subnet('pool', '192.168.127.12/24')\n self.r.subnet_set_priority('192.168.127.12/24', 3, pool='pool')\n l = self.r.ippool_get_subnets('pool')\n self.assertDictSubset(l[0], {'subnet': '172.16.58.3/24', 'priority': 1})\n self.assertDictSubset(l[1], {'subnet': '172.16.17.32/24', 'priority': 2})\n self.assertDictSubset(l[2], {'subnet': '192.168.127.12/24', 'priority': 3})\n self.assertDictSubset(l[3], {'subnet': '172.16.58.3/24', 'priority': 4})\n self.assertDictSubset(l[4], {'subnet': '172.16.31.10/24', 'priority': 5})\n\n self.r.subnet_set_priority('192.168.127.12/24', 1, pool='pool')\n l = self.r.ippool_get_subnets('pool')\n self.assertDictSubset(l[0], {'subnet': '192.168.127.12/24', 'priority': 1})\n self.assertDictSubset(l[1], {'subnet': '172.16.58.3/24', 'priority': 2})\n self.assertDictSubset(l[2], {'subnet': '172.16.17.32/24', 'priority': 3})\n self.assertDictSubset(l[3], {'subnet': '172.16.58.3/24', 'priority': 4})\n self.assertDictSubset(l[4], {'subnet': '172.16.31.10/24', 'priority': 5})\n\n def test_free_total(self):\n self.r.ipblock_create('0::/8', status='Container')\n self.r.ippool_create(\"pool\")\n self.r.ippool_add_subnet(\"pool\", '12::/16')\n total = 2 ** (128 - 16)\n free = total - 1\n l = self.r.ippool_get_subnets('pool')\n assert l[0]['total'] == total\n assert l[0]['free'] == free\n\n self.r.ipblock_create('12:13::/32', status='Delegation')\n free -= 2 ** (128 - 32)\n assert self.r.ippool_get_subnets('pool')[0]['free'] == free\n\n for i in range(3):\n self.r.ipblock_create('12:2:%d::/48' % i, status='Delegation')\n free -= 3 * 2 ** (128 - 48)\n assert self.r.ippool_get_subnets('pool')[0]['free'] == free\n\n self.r.ip_mark('fc00:db20:35b:7399::5')\n free -= 1\n assert self.r.ippool_get_subnets('pool')[0]['free'] == free\n\n def test_get_subnets_without_usage(self):\n self.r.ipblock_create('0::/8', status='Container')\n self.r.ippool_create(\"pool\")\n self.r.ippool_add_subnet(\"pool\", '12::/16')\n self.r.ippool_add_subnet(\"pool\", '16::/16')\n assert [s['subnet'] for s in self.r.ippool_get_subnets('pool')] == ['12::/16', '16::/16']\n\n def test_get_delegations(self):\n self.r.ipblock_create('0::/8', status='Container')\n self.r.ippool_create(\"pool\")\n self.r.ippool_add_subnet(\"pool\", '12::/16', dont_reserve_network_broadcast=True)\n self.assertEqual(self.r.ippool_get_delegations(\"pool\"), [])\n\n self.r.ippool_get_delegation(\"pool\", 120)\n self.r.ippool_get_delegation(\"pool\", 120)\n self.assertEqual(self.r.ippool_get_delegations(\"pool\"),\n [{'delegation': '12::/120', 'total': 256, 'free': 256},\n {'delegation': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/120', 'total': 256, 'free': 256}])\n\n def test_favorite(self):\n self.r.ippool_create('p')\n self.r.ippool_create('p2')\n assert not self.r.ippool_favorite('p')\n assert len(self.r.ippool_list(favorite_only=True)) == 0\n\n self.r.ippool_favorite_add('p')\n assert self.r.ippool_favorite('p')\n assert not self.r.ippool_favorite('p2')\n favorites = self.r.ippool_list(favorite_only=True)\n assert favorites[0]['name'] == 'p'\n assert len(favorites) == 1\n assert len(self.r.ippool_list(favorite_only=False)) == 2\n\n self.r.ippool_favorite_remove('p')\n assert not self.r.ippool_favorite('p')\n assert len(self.r.ippool_list(favorite_only=True)) == 0\n", "id": "10685987", "language": "Python", "matching_score": 4.96539306640625, "max_stars_count": 37, "path": "dim-testsuite/tests/pool_test.py" }, { "content": "from datetime import datetime, timedelta\nfrom contextlib import contextmanager\nfrom dim.models import db, Ipblock, IpblockAttr\nfrom dim.ipaddr import IP\nfrom dim.errors import InvalidIPError, AlreadyExistsError, InvalidPoolError, InvalidStatusError, DimError\nfrom tests.util import RPCTest, raises, query_ip\n\n\ndef get_ipblock(ip):\n return query_ip(ip).one()\n\n\ndef ip_status(l):\n return [dict(ip=d['ip'], status=d['status']) for d in l]\n\n\ndef ips(l):\n return [d['ip'] for d in l]\n\n\n@contextmanager\ndef modifies(block, should=True):\n ipb = get_ipblock(block)\n ipb.modified = datetime.utcnow() - timedelta(days=1)\n ipb.modified_by = 'dummy_modifies'\n db.session.commit()\n yield\n ipb = get_ipblock(block)\n was_modified = datetime.utcnow() - ipb.modified < timedelta(minutes=1)\n assert should == was_modified\n if was_modified:\n assert ipb.modified_by != 'dummy_modifies'\n else:\n assert ipb.modified_by == 'dummy_modifies'\n\n\nclass IpblockTest(RPCTest):\n def test_create(self):\n self.r.ip_mark('192.168.127.12')\n self.r.ipblock_create('172.16.17.32/16', status='Container')\n self.r.ip_mark('192.168.3.11')\n self.r.ipblock_create('172.16.17.32/24', status='Container')\n self.r.ip_mark('172.16.31.10')\n assert get_ipblock('172.16.17.32/16').parent is None\n assert get_ipblock('172.16.17.32/24').parent.ip == IP('172.16.17.32/16')\n assert get_ipblock('192.168.127.12').parent.ip == IP('172.16.17.32/24')\n assert get_ipblock('192.168.3.11').parent.ip == IP('172.16.17.32/24')\n assert get_ipblock('172.16.31.10').parent.ip == IP('172.16.17.32/24')\n with raises(AlreadyExistsError):\n self.r.ipblock_create('172.16.17.32/16')\n\n def test_create_errors(self):\n with raises(InvalidIPError):\n self.r.ipblock_create('192.168.127.12/24')\n\n def test_delete1(self):\n self.r.ipblock_create('172.16.17.32/16', status='Container')\n self.r.ipblock_create('172.16.17.32/24', status='Container')\n self.r.ip_mark('192.168.127.12')\n\n with raises(DimError):\n self.r.ipblock_remove('172.16.17.32/24')\n self.r.ip_free('192.168.127.12')\n assert self.r.ipblock_remove('172.16.17.32/24')\n self.r.ip_mark('192.168.127.12')\n assert Ipblock.query.count() == 2\n assert query_ip('172.16.17.32/24').count() == 0\n\n self.r.ipblock_remove('172.16.17.32/16', force=True)\n assert Ipblock.query.count() == 1\n assert query_ip('172.16.17.32/16').count() == 0\n\n def test_recursive_delete1(self):\n self.r.ipblock_create('172.16.17.32/16', status='Container')\n self.r.ipblock_create('172.16.17.32/24', status='Container')\n self.r.ip_mark('192.168.127.12')\n self.r.ipblock_remove('172.16.17.32/16', force=True, recursive=True)\n assert Ipblock.query.count() == 0\n\n def test_recursive_delete2(self):\n self.r.ipblock_create('12::/16', status='Container')\n self.r.ipblock_create('12::/24', status='Container')\n self.r.ip_mark('fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b')\n self.r.ipblock_remove('12::/24', force=True, recursive=True)\n assert Ipblock.query.count() == 1\n\n def test_parents(self):\n self.r.ipblock_create('192.168.0.0/16', status='Container')\n self.r.ip_mark('192.168.0.1')\n assert get_ipblock('192.168.0.1').parent.ip == IP('192.168.0.0/16')\n\n self.r.ipblock_create('192.168.0.0/24', status='Container')\n assert get_ipblock('192.168.0.1').parent.ip == IP('192.168.0.0/24')\n\n self.r.ipblock_remove('192.168.0.0/24', force=True)\n assert get_ipblock('192.168.0.1').parent.ip == IP('192.168.0.0/16')\n\n self.r.ipblock_create('192.168.0.0/24', status='Container')\n assert get_ipblock('192.168.0.1').parent.ip == IP('192.168.0.0/24')\n\n def test_ip_mark(self):\n self.r.ipblock_create('172.16.17.32/8', status='Container')\n self.r.ippool_create('pool')\n self.r.ippool_add_subnet('pool', '172.16.17.32/24')\n with raises(InvalidPoolError):\n self.r.ip_mark('192.168.3.11', pool='test')\n self.r.ip_mark('192.168.3.11')\n with raises(AlreadyExistsError):\n self.r.ip_mark('192.168.3.11')\n assert self.r.ip_mark('172.16.31.10')['status'] == 'Static'\n with raises(AlreadyExistsError):\n self.r.ip_mark('172.16.31.10')\n assert self.r.ip_free('172.16.31.10') == 1\n assert self.r.ip_free('172.16.31.10') == 0\n assert self.r.ip_free('5.0.0.5') == 0\n assert self.r.ipblock_get_attrs('172.16.31.10')['status'] == 'Available'\n assert self.r.ip_mark('172.16.31.10')['status'] == 'Static'\n assert self.r.ipblock_get_attrs('172.16.17.32')['status'] == 'Reserved'\n assert self.r.ip_free('172.16.17.32') == -1\n assert self.r.ip_free('172.16.17.32', reserved=True) == 1\n assert self.r.ipblock_get_attrs('172.16.17.32')['status'] == 'Available'\n\n def test_attrs(self):\n self.r.ipblock_create('172.16.17.32/24', attributes={'team': '1'})\n\n create_attrs = {'country': 'ro', 'team': 'IT Operations'}\n self.r.ipblock_create('172.16.58.3/24', attributes=create_attrs)\n\n self.assertDictSubset(self.r.ipblock_get_attrs('172.16.58.3/24'), create_attrs)\n self.r.ipblock_set_attrs('172.16.58.3/24', {'country': 'de'})\n self.assertDictSubset(self.r.ipblock_get_attrs('172.16.58.3/24'), {'country': 'de'})\n self.r.ipblock_delete_attrs('172.16.58.3/24', ['team'])\n assert 'team' not in self.r.ipblock_get_attrs('172.16.58.3/24')\n\n assert self.r.ipblock_get_attrs('172.16.17.32/24')['team'] == '1'\n assert 'country' not in self.r.ipblock_get_attrs('172.16.17.32/24')\n\n assert self.r.ipblock_remove('172.16.17.32/24')\n assert self.r.ipblock_remove('172.16.58.3/24')\n assert IpblockAttr.query.count() == 0\n with raises(InvalidIPError):\n self.r.ipblock_get_attrs('172.16.17.32/24')\n\n def test_invalid_attrs(self):\n self.r.ipblock_create('172.16.17.32/24')\n with raises(Exception):\n self.r.ipblock_set_attrs('172.16.17.32/24', {'ip': '192.168.127.12'})\n with raises(Exception):\n self.r.ipblock_set_attrs('172.16.17.32/24', {'-a': 'b'})\n\n def test_system_attrs(self):\n self.r.ipblock_create('172.16.17.32/16', status='Container')\n self.r.ippool_create('test')\n self.r.ippool_add_subnet('test', '172.16.17.32/24', gateway='192.168.127.12')\n self.r.ipblock_create('172.16.31.10/26', status='Delegation')\n self.r.ip_mark('172.16.31.10')\n self.assertDictSubset(self.r.ipblock_get_attrs('172.16.17.32'),\n {'ip': '172.16.17.32',\n 'status': 'Reserved',\n 'subnet': '172.16.17.32/24',\n 'gateway': '192.168.127.12',\n 'mask': '255.255.255.0',\n 'pool': 'test'})\n self.assertDictSubset(self.r.ipblock_get_attrs('172.16.31.10'),\n {'ip': '172.16.31.10',\n 'status': 'Available',\n 'delegation': '172.16.31.10/26',\n 'subnet': '172.16.17.32/24',\n 'gateway': '192.168.127.12',\n 'mask': '255.255.255.0',\n 'pool': 'test'})\n self.r.subnet_remove_gateway('172.16.17.32/24')\n assert 'gateway' not in self.r.ipblock_get_attrs('172.16.31.10')\n self.r.subnet_set_gateway('172.16.17.32/24', '1.0.0.0')\n assert self.r.ipblock_get_attrs('172.16.31.10')['gateway'] == '1.0.0.0'\n self.assertEqual(self.r.ipblock_get_attrs('1.0.0.0')['status'], 'Unmanaged')\n\n def test_system_attrs6(self):\n self.r.ipblock_create('12::/32', status='Container')\n self.r.ippool_create('test')\n self.r.ippool_add_subnet('test', '12::/64', gateway='fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b')\n self.r.ipblock_create('fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/122', status='Delegation')\n self.r.ip_mark('fdf8:f53e:61e4::18')\n self.assertDictSubset(self.r.ipblock_get_attrs('fdf8:f53e:61e4::18'),\n {'ip': 'fdf8:f53e:61e4::18',\n 'status': 'Static',\n 'subnet': '12::/64',\n 'delegation': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/122',\n 'gateway': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',\n 'prefixlength': 64,\n 'pool': 'test'})\n\n def test_modified(self):\n self.r.ipblock_create('172.16.17.32/8', status='Container')\n self.r.ip_mark('192.168.127.12')\n self.r.ippool_create('test')\n with modifies('192.168.127.12'):\n self.r.ipblock_set_attrs('192.168.127.12', {'test': 1})\n assert self.r.ipblock_get_attrs('192.168.127.12')['test'] == '1'\n with modifies('192.168.127.12'):\n self.r.ipblock_delete_attrs('192.168.127.12', ['test'])\n with modifies('192.168.127.12', False):\n self.r.ippool_add_subnet('test', '172.16.17.32/24', dont_reserve_network_broadcast=True)\n with modifies('192.168.127.12', False):\n self.r.ipblock_create('172.16.17.32/26', status='Delegation')\n with modifies('192.168.127.12', False):\n with modifies('172.16.17.32/24'):\n self.r.subnet_set_gateway('172.16.17.32/24', '192.168.127.12')\n with modifies('172.16.17.32/24'):\n self.r.subnet_remove_gateway('172.16.17.32/24')\n with modifies('172.16.17.32/24'):\n self.r.subnet_set_priority('172.16.17.32/24', 2)\n with modifies('172.16.17.32/24'):\n self.r.ippool_set_vlan('test', 1)\n with modifies('172.16.17.32/24'):\n self.r.ippool_remove_vlan('test')\n with modifies('192.168.127.12', False):\n assert self.r.ipblock_remove('172.16.17.32/24', status='Subnet', force=True)\n\n def test_list_ipsv4(self):\n self.r.ipblock_create('172.16.17.32/8', status='Container')\n self.r.ippool_create('test')\n self.r.ippool_add_subnet('test', '172.16.17.32/24')\n self.r.ip_mark('172.16.31.10')\n assert ip_status(self.r.ip_list(pool='*', type='used')) == \\\n [{'ip': '172.16.31.10', 'status': 'Static'}]\n self.r.ip_mark('192.168.3.11', pool='test')\n assert ip_status(self.r.ip_list(pool='*', type='used')) == \\\n [{'ip': '172.16.31.10', 'status': 'Static'},\n {'ip': '192.168.3.11', 'status': 'Static'}]\n assert ip_status(self.r.ip_list(pool='*', type='used', limit=1)) == \\\n [{'ip': '172.16.31.10', 'status': 'Static'}]\n assert ip_status(self.r.ip_list(pool='*', type='free', limit=5)) == \\\n [{'ip': '192.168.127.12', 'status': 'Available'},\n {'ip': '192.168.3.11', 'status': 'Available'},\n {'ip': '172.16.31.10', 'status': 'Available'},\n {'ip': '192.168.3.11', 'status': 'Available'},\n {'ip': '192.168.127.12', 'status': 'Available'}]\n assert ip_status(self.r.ip_list(pool='*', type='all', limit=7)) == \\\n [{'ip': '172.16.17.32', 'status': 'Reserved'},\n {'ip': '192.168.127.12', 'status': 'Available'},\n {'ip': '192.168.3.11', 'status': 'Available'},\n {'ip': '172.16.31.10', 'status': 'Available'},\n {'ip': '192.168.3.11', 'status': 'Available'},\n {'ip': '172.16.31.10', 'status': 'Static'},\n {'ip': '192.168.127.12', 'status': 'Available'}]\n\n def test_list_ipsv6(self):\n self.r.ipblock_create('11::/16', status='Container')\n self.r.ippool_create('poolv6')\n self.r.ippool_add_subnet('poolv6', '11::/126')\n self.r.ip_mark('fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b', pool='poolv6')\n assert ip_status(self.r.ip_list(pool='poolv6', type='all')) == \\\n [{'ip': '11::', 'status': 'Reserved'},\n {'ip': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b', 'status': 'Static'},\n {'ip': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b', 'status': 'Available'},\n {'ip': 'fdf8:f53e:61e4::18', 'status': 'Available'}]\n assert ip_status(self.r.ip_list(pool='poolv6', type='all', full=1)) == \\\n [{'ip': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b', 'status': 'Reserved'},\n {'ip': 'fc00:db20:35b:7399::5', 'status': 'Static'},\n {'ip': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b', 'status': 'Available'},\n {'ip': 'fc00:db20:35b:7399::5', 'status': 'Available'}]\n\n def test_list_ips_after(self):\n self.r.ipblock_create('172.16.58.3/8', status='Container')\n self.r.ippool_create('pool1', vlan=5)\n self.r.ippool_add_subnet('pool1', '172.16.17.32/22')\n self.r.ippool_add_subnet('pool1', '172.16.58.3/22')\n subnet = IP('172.16.58.3')\n\n def nth(n):\n return str(IP(subnet.address + n, prefix=32, version=4))\n\n def iprange(start, end):\n return [nth(i) for i in range(start, end + 1)]\n assert ips(self.r.ip_list(pool='pool1', type='all', limit=5)) == iprange(0, 4)\n assert ips(self.r.ip_list(pool='pool1', type='all', after='192.168.127.12', limit=5)) == iprange(5, 9)\n assert ips(self.r.ip_list(pool='*', type='all', after='192.168.127.12', limit=49)) == iprange(3 * 256 + 207 + 1, 3 * 256 + 207 + 49)\n assert ips(self.r.ip_list(pool='*', type='all', offset=3 * 256 + 207 + 1, limit=49))\\\n == iprange(3 * 256 + 207 + 1, 3 * 256 + 207 + 49)\n\n def test_list_ips_offset(self):\n def iprange(start, end):\n return [str(IP(IP('172.16.58.3').address + i, prefix=32, version=4)) for i in range(start, end + 1)]\n self.r.ipblock_create('172.16.58.3/8', status='Container')\n self.r.ippool_create('pool1', vlan=5)\n self.r.ippool_add_subnet('pool1', '172.16.58.3/30', dont_reserve_network_broadcast=True)\n self.r.ippool_add_subnet('pool1', '192.168.127.12/30', dont_reserve_network_broadcast=True)\n self.r.ip_mark('172.16.31.10')\n self.r.ip_mark('172.16.58.3')\n assert ips(self.r.ip_list(pool='pool1', type='all', limit=3, offset=1)) == iprange(1, 3)\n assert ips(self.r.ip_list(pool='pool1', type='all', limit=3, offset=4)) == iprange(4, 6)\n assert ips(self.r.ip_list(pool='pool1', type='all', limit=3, offset=3)) == iprange(3, 5)\n free = ['172.16.31.10', '172.16.17.32', '192.168.127.12', '192.168.3.11', '192.168.127.12']\n assert ips(self.r.ip_list(pool='pool1', type='free', limit=5)) == free\n for i in range(5):\n assert ips(self.r.ip_list(pool='pool1', type='free', limit=1, offset=i)) == [free[i]]\n assert ips(self.r.ip_list(pool='pool1', type='used', limit=3)) == ['172.16.31.10', '172.16.58.3']\n assert ips(self.r.ip_list(pool='pool1', type='used', limit=3, offset=1)) == ['172.16.58.3']\n\n def test_list_ip_attrs(self):\n self.r.ipblock_create('172.16.58.3/8', status='Container')\n self.r.ipblock_create('172.16.31.10/8', status='Container')\n self.r.ippool_create('pool')\n self.r.ippool_add_subnet('pool', '172.16.17.32/22')\n self.r.ippool_add_subnet('pool', '172.16.31.10/22')\n self.r.ippool_get_ip('pool', attributes={'k1': 'v1', 'k2': 'v2'})\n self.r.ippool_get_ip('pool', attributes={'k3': 'v3'})\n self.r.ippool_get_ip('pool')\n\n ips = self.r.ip_list(pool='pool', type='all', limit=5)\n self.assertDictSubset(ips[0], {'ip': '172.16.17.32', 'modified_by': 'test_user', 'status': 'Reserved'})\n self.assertDictSubset(ips[1], {'ip': '192.168.127.12', 'modified_by': 'test_user', 'status': 'Static', 'k1': 'v1', 'k2': 'v2'})\n self.assertDictSubset(ips[2], {'ip': '192.168.127.12', 'modified_by': 'test_user', 'status': 'Static', 'k3': 'v3'})\n self.assertDictSubset(ips[3], {'ip': '192.168.127.12', 'modified_by': 'test_user', 'status': 'Static'})\n self.assertDictSubset(ips[4], {'ip': '172.16.31.10', 'status': 'Available'})\n\n ips = self.r.ip_list(pool='pool', type='all', limit=5, attributes=['k1', 'k3', 'modified_by'])\n assert set(ips[1].keys()) == set(['ip', 'status', 'modified_by', 'k1', 'layer3domain'])\n assert 'k1' in ips[1]\n assert 'k2' not in ips[1]\n assert 'k3' in ips[2]\n\n def test_list_ip_attrs_ptr(self):\n self.r.ipblock_create('172.16.58.3/8', status='Container')\n self.r.ipblock_create('14.0.0.0/8', status='Container')\n self.r.zone_create('test')\n self.r.ippool_create('pool')\n self.r.ippool_add_subnet('pool', '172.16.17.32/22')\n self.r.ippool_add_subnet('pool', '172.16.31.10/22')\n self.r.rr_create_from_pool('a.test.', 'pool', attributes={'k1': 'v1', 'k2': 'v2'})\n self.r.rr_create_from_pool('b.test.', 'pool', attributes={'k3': 'v3'})\n self.r.rr_create_from_pool('c.test.', 'pool')\n import pprint\n pprint.pprint(self.r.ip_list(pool='pool', type='all', limit=5, attributes=None))\n\n def test_list_ip_attrs_pool(self):\n def has_pool(ips, pool):\n for ip in ips:\n if ip.get('pool', None) != pool:\n return False\n return True\n self.r.ippool_create('pool')\n self.r.ippool_create('pool2')\n self.r.ipblock_create('172.16.58.3/8', status='Container')\n self.r.ippool_add_subnet('pool', '172.16.17.32/30')\n self.r.ippool_add_subnet('pool2', '172.16.31.10/30')\n assert has_pool(self.r.ip_list(cidr='172.16.17.32/30', attributes=['pool']), 'pool')\n assert has_pool(self.r.ip_list(cidr='172.16.17.32/31', attributes=['pool']), 'pool')\n assert has_pool(self.r.ip_list(cidr='172.16.17.32/32', attributes=['pool']), 'pool')\n assert has_pool(self.r.ip_list(cidr='172.16.31.10/30', attributes=['pool']), 'pool2')\n self.r.ip_list(cidr='172.16.17.32/28', attributes=['pool'])\n # TODO fix pool attr code for ip_list()\n # assert has_pool(both[:4], 'pool')\n # assert has_pool(both[4:], 'pool2')\n\n def test_list_containers_edge(self):\n assert self.r.container_list() == []\n with raises(InvalidIPError):\n self.r.container_list('172.16.17.32/24')\n self.r.ipblock_create('172.16.17.32/8', status='Container')\n self.r.ippool_create('pool')\n self.r.ippool_add_subnet('pool', '172.16.17.32/24')\n with raises(InvalidStatusError):\n self.r.container_list('172.16.17.32/24')\n\n def test_list_containers_simple(self):\n self.r.ipblock_create('0.0.0.0/16', status='Container')\n self.r.ipblock_create('0.0.160.0/19', status='Container')\n self.r.ipblock_create('0.0.192.0/19', status='Container')\n assert ips(self.r.container_list()[0]['children']) == [\n '0.0.0.0/17',\n '0.0.128.0/19',\n '0.0.160.0/19',\n '0.0.192.0/19',\n '0.0.224.0/19']\n\n def test_list_containers_sort(self):\n self.r.ipblock_create('2001:8d8::/54', status='Container')\n self.r.ippool_create('pool6')\n self.r.ippool_add_subnet('pool6', '2001:8d8::/56')\n assert ips(self.r.container_list('2001:8d8::/54')[0]['children']) == [\n '2001:8d8::/56',\n '2001:8d8:0:100::/56',\n '2001:8d8:0:200::/55']\n\n def test_list_containers_v6(self):\n self.r.ipblock_create('2001:db8:0:f00::/64', status='Container')\n self.r.ipblock_create('2001:db8:0:f00:22::/80', status='Container')\n assert ips(self.r.container_list('2001:db8:0:f00::/64')[0]['children']) == [\n '2001:db8:0:f00::/75',\n '2001:db8:0:f00:20::/79',\n '2001:db8:0:f00:22::/80',\n '2001:db8:0:f00:23::/80',\n '2001:db8:0:f00:24::/78',\n '2001:db8:0:f00:28::/77',\n '2001:db8:0:f00:30::/76',\n '2001:db8:0:f00:40::/74',\n '2001:db8:0:f00:80::/73',\n '2001:db8:0:f00:100::/72',\n '2001:db8:0:f00:200::/71',\n '2001:db8:0:f00:400::/70',\n '2001:db8:0:f00:800::/69',\n '2001:db8:0:f00:1000::/68',\n '2001:db8:0:f00:2000::/67',\n '2001:db8:0:f00:4000::/66',\n '2001:db8:0:f00:8000::/65']\n\n def test_ip_list_attributes(self):\n self.r.ipblock_create('172.16.17.32/8', status='Container')\n self.r.ippool_create('test')\n self.r.ippool_add_subnet('test', '172.16.17.32/24')\n self.r.rr_create(ip='192.168.127.12', type='A', name='t.0.0.12.in-addr.arpa.', create_linked=False)\n assert 'ptr_target' not in self.r.ip_list(pool='test', attributes=['ptr_target'], type='used')[0]\n self.r.rr_create(ip='192.168.127.12', type='PTR', ptrdname='test.com.')\n assert self.r.ip_list(pool='test', attributes=['ptr_target'], type='used')[0]['ptr_target'] == 'test.com.'\n\n # test for ND-94\n def test_add_subnet(self):\n self.r.ippool_create('test')\n # allocate ip without a subnet\n self.r.rr_create(name='some.domain.', type='A', ip='1.1.1.0')\n self.r.ipblock_create('1.0.0.0/8', status='Container')\n self.r.ippool_add_subnet('test', '1.1.1.0/24')\n\n # test for GPHDIM-432\n def test_delegation_no_parent(self):\n with raises(Exception):\n self.r.ipblock_create('1.0.0.0/30', status='Delegation')\n\n # test for GPHDIM-432\n def test_subnet_no_pool(self):\n self.r.ipblock_create('1.0.0.0/24', status='Container')\n with raises(Exception):\n self.r.ipblock_create('1.0.0.0/30', status='Subnet')\n", "id": "3698425", "language": "Python", "matching_score": 1.1469271183013916, "max_stars_count": 37, "path": "dim-testsuite/tests/ipblock_test.py" }, { "content": "\n\nimport six\nfrom ipaddress import ip_address, IPv4Address, IPv6Address, ip_network\n\n\ndef valid_block(addr):\n try:\n ip_network(six.text_type(addr))\n return True\n except ValueError:\n return False\n\n\nclass IP(object):\n slots = ('version', 'address', 'prefix')\n\n def __init__(self, address, prefix=None, version=None, auto_correct=False):\n '''\n :param auto_correct: if the address has bits set outside its netmask they will be cleared\n '''\n if isinstance(address, six.string_types):\n address = six.text_type(address)\n s = address.split('/')\n if len(s) > 2:\n raise ValueError('Bad prefix')\n if len(s) == 2:\n ip = ip_address(s[0])\n self.prefix = int(s[1])\n self.version = ip.version\n else:\n ip = ip_address(address)\n self.prefix = ip.max_prefixlen\n self.address = ip._ip\n self.version = ip.version\n else:\n self.address = address\n self.version = version\n self.prefix = prefix if prefix is not None else self.bits\n if self.version not in (4, 6):\n raise ValueError('Invalid IP version %s' % repr(version))\n if not (self.prefix >= 0 and self.prefix <= self.bits):\n raise ValueError('Invalid IP prefix %s' % repr(prefix))\n if self.address & self.hostmask != 0:\n if auto_correct:\n self.address &= self.netmask\n else:\n raise ValueError('Invalid IP %s (not base address of the block)' % str(self))\n\n def __str__(self):\n return self.label(expanded=False)\n\n def __eq__(self, other):\n return (self.version, self.address, self.prefix) == (other.version, other.address, other.prefix)\n\n def __ne__(self, other):\n return not self == other\n\n def label(self, expanded=False):\n tmp = IPv4Address(self.address) if self.version == 4 else IPv6Address(self.address)\n if expanded:\n ret = tmp.exploded\n else:\n ret = tmp.compressed\n if self.prefix == self.bits:\n return ret\n else:\n return ret + \"/\" + str(self.prefix)\n\n @property\n def bits(self):\n return 32 if self.version == 4 else 128\n\n @property\n def is_host(self):\n return self.prefix == self.bits\n\n @property\n def hostmask(self):\n return 2 ** (self.bits - self.prefix) - 1\n\n @property\n def netmask(self):\n return (2 ** self.bits - 1) ^ self.hostmask\n\n @property\n def network(self):\n return IP(self.address, self.bits, self.version)\n\n @property\n def broadcast(self):\n return IP(self.address | self.hostmask, self.bits, self.version)\n\n @property\n def numhosts(self):\n return self.broadcast.address - self.network.address + 1\n\n def __contains__(self, item):\n if self.version != item.version:\n return False\n if self.prefix > item.prefix:\n return False\n if self.address & self.netmask == item.address & self.netmask:\n return True\n else:\n return False\n", "id": "9300832", "language": "Python", "matching_score": 0, "max_stars_count": 37, "path": "dim/dim/ipaddr.py" }, { "content": "from setuptools import setup\nimport glob\nimport os\nfrom cas import version\n\n# python can't handle the fact, that data_files might contain directories.\n# So we have to split off every directory into its own data_files statement\n# with all its files just to make this work.\ndata_files = []\ndata_files.append(('share/dim-web', ['cas.wsgi', 'cas/config.py.example']))\ndata_files.append(('share/dim-web/www', []))\nfor dir in list(filter(lambda x: os.path.isdir(x), sorted(set(\n ['www'] + \\\n glob.glob('www/**', recursive=True))))):\n data_files.append((os.path.join('share/dim-web', dir), list(filter(\n lambda x: not os.path.isdir(x),\n sorted(glob.glob(os.path.join(dir, '*')))))))\n\nsetup(name='dim-web',\n packages=['cas'],\n data_files = data_files,\n version=version.VERSION,\n install_requires=['xmltodict', 'flask', 'requests'])\n", "id": "6052761", "language": "Python", "matching_score": 1.4142135381698608, "max_stars_count": 37, "path": "dim-web/setup.py" }, { "content": "VERSION = 0.1\n", "id": "2567765", "language": "Python", "matching_score": 0.02607281133532524, "max_stars_count": 41, "path": "dim-web/cas/version.py" } ]
1.354938
venturehacks
[ { "content": "import unittest\nfrom pathlib import Path\nfrom unittest.mock import patch\n\nfrom dbt_invoke import properties\nfrom dbt_invoke.internal import _utils\nfrom test import TestDbtInvoke\n\nPARENT_DIR = Path(__file__).parent\nDESCRIPTION = 'A fake test description.'\nCOL_TESTS = ['not_null']\n\n\nclass TestProperties(TestDbtInvoke):\n def test_create_update_delete_property_files(self):\n \"\"\"\n Test the create -> update -> delete cycle of property files\n\n :return: None\n \"\"\"\n # Create property files\n with patch('builtins.input', return_value='y'):\n properties.update(\n self.ctx,\n project_dir=self.project_dir,\n profiles_dir=self.profiles_dir,\n log_level='DEBUG',\n )\n # Check that the property files contain the expected contents\n all_files_actual_properties = dict()\n for file_location, exp_props in self.expected_properties.items():\n full_file_path = Path(self.project_dir, file_location)\n actual_props = _utils.parse_yaml(full_file_path)\n self.assertEqual(exp_props, actual_props)\n # Simulate a manual update of the property files\n for section in actual_props:\n if section.lower() != 'version':\n actual_props[section][0]['description'] = DESCRIPTION\n actual_props[section][0]['columns'][0]['tests'] = COL_TESTS\n all_files_actual_properties[full_file_path] = actual_props\n _utils.write_yaml(full_file_path, actual_props)\n # Automatically update property files, using threads\n properties.update(\n self.ctx,\n project_dir=self.project_dir,\n profiles_dir=self.profiles_dir,\n threads=2,\n log_level='DEBUG',\n )\n # Check that the automatic update did not overwrite the\n # previous manual update\n for full_file_path, exp_props in all_files_actual_properties.items():\n actual_props = _utils.parse_yaml(full_file_path)\n self.assertEqual(exp_props, actual_props)\n # Initiate then abort deletion of property files\n with patch('builtins.input', return_value='n'):\n properties.delete(\n self.ctx,\n project_dir=self.project_dir,\n profiles_dir=self.profiles_dir,\n log_level='DEBUG',\n )\n # Check that the property files still exist\n for full_file_path in all_files_actual_properties:\n self.assertTrue(full_file_path.exists())\n # Delete property files\n with patch('builtins.input', return_value='y'):\n properties.delete(\n self.ctx,\n project_dir=self.project_dir,\n profiles_dir=self.profiles_dir,\n log_level='DEBUG',\n )\n # Check that the property files no longer exist\n for full_file_path in all_files_actual_properties:\n self.assertFalse(full_file_path.exists())\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "11538088", "language": "Python", "matching_score": 4.176948070526123, "max_stars_count": 36, "path": "tests/test_properties.py" }, { "content": "import unittest\nfrom pathlib import Path\nfrom unittest.mock import patch\n\nfrom dbt_invoke import properties\nfrom dbt_invoke.internal import _utils\nfrom test import TestDbtInvoke\n\nPARENT_DIR = Path(__file__).parent\nSUPPORTED_RESOURCE_TYPES = properties._SUPPORTED_RESOURCE_TYPES\n\n\nclass TestUtils(TestDbtInvoke):\n def test_add_macro(self):\n \"\"\"\n Test the automatic addition of a macro to a dbt project\n\n :return: None\n \"\"\"\n with patch('builtins.input', return_value='n'):\n try:\n _utils.add_macro(self.ctx, self.macro_name, logger=self.logger)\n except SystemExit:\n pass\n with patch('builtins.input', return_value='y'):\n _utils.add_macro(self.ctx, self.macro_name, logger=self.logger)\n with open(self.macro_path, 'r') as f:\n lines = f.read()\n self.assertEqual(lines, self.macro_value)\n\n def test_dbt_ls(self):\n \"\"\"\n Test the \"dbt ls\" command with different arguments\n\n :return: None\n \"\"\"\n for db_ls_kwarg, values in self.expected_dbt_ls_results.items():\n for value, expected_result_lines in values.items():\n dbt_ls_kwargs = {db_ls_kwarg: value}\n result_lines = _utils.dbt_ls(\n self.ctx,\n project_dir=self.project_dir,\n profiles_dir=self.profiles_dir,\n supported_resource_types=SUPPORTED_RESOURCE_TYPES,\n logger=self.logger,\n **dbt_ls_kwargs,\n )\n self.assertCountEqual(result_lines, expected_result_lines)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "8516489", "language": "Python", "matching_score": 0.6200135350227356, "max_stars_count": 0, "path": "tests/test_utils.py" }, { "content": "import json\nimport logging\nfrom typing import Any, Sequence, Optional, Tuple, Iterable, MutableMapping, Union\n\nimport requests\nimport time\n\nfrom .models.metabase import MetabaseModel, MetabaseColumn\n\n\nclass MetabaseClient:\n \"\"\"Metabase API client.\"\"\"\n\n _SYNC_PERIOD_SECS = 5\n\n def __init__(\n self,\n host: str,\n user: str,\n password: str,\n use_http: bool = False,\n verify: Union[str, bool] = None,\n ):\n \"\"\"Constructor.\n\n Arguments:\n host {str} -- Metabase hostname.\n user {str} -- Metabase username.\n password {str} -- Metabase password.\n\n Keyword Arguments:\n use_http {bool} -- Use HTTP instead of HTTPS. (default: {False})\n verify {Union[str, bool]} -- Path to certificate or disable verification. (default: {None})\n \"\"\"\n\n self.host = host\n self.protocol = \"http\" if use_http else \"https\"\n self.verify = verify\n self.session_id = self.get_session_id(user, password)\n logging.info(\"Session established successfully\")\n\n def get_session_id(self, user: str, password: str) -> str:\n \"\"\"Obtains new session ID from API.\n\n Arguments:\n user {str} -- Metabase username.\n password {str} -- Metabase password.\n\n Returns:\n str -- Session ID.\n \"\"\"\n\n return self.api(\n \"post\",\n \"/api/session\",\n authenticated=False,\n json={\"username\": user, \"password\": password},\n )[\"id\"]\n\n def sync_and_wait(\n self, database: str, schema: str, models: Sequence, timeout: Optional[int]\n ) -> bool:\n \"\"\"Synchronize with the database and wait for schema compatibility.\n\n Arguments:\n database {str} -- Metabase database name.\n schema {str} -- Metabase schema name.\n models {list} -- List of dbt models read from project.\n\n Keyword Arguments:\n timeout {int} -- Timeout before giving up in seconds. (default: {30})\n\n Returns:\n bool -- True if schema compatible with models, false if still incompatible.\n \"\"\"\n if timeout is None:\n timeout = 30\n\n if timeout < self._SYNC_PERIOD_SECS:\n logging.critical(\n \"Timeout provided %d secs, must be at least %d\",\n timeout,\n self._SYNC_PERIOD_SECS,\n )\n return False\n\n database_id = self.find_database_id(database)\n if not database_id:\n logging.critical(\"Cannot find database by name %s\", database)\n return False\n\n self.api(\"post\", f\"/api/database/{database_id}/sync_schema\")\n\n deadline = int(time.time()) + timeout\n sync_successful = False\n while True:\n sync_successful = self.models_compatible(database_id, schema, models)\n time_after_wait = int(time.time()) + self._SYNC_PERIOD_SECS\n if not sync_successful and time_after_wait <= deadline:\n time.sleep(self._SYNC_PERIOD_SECS)\n else:\n break\n return sync_successful\n\n def models_compatible(\n self, database_id: str, schema: str, models: Sequence\n ) -> bool:\n \"\"\"Checks if models compatible with the Metabase database schema.\n\n Arguments:\n database_id {str} -- Metabase database ID.\n schema {str} -- Metabase schema name.\n models {list} -- List of dbt models read from project.\n\n Returns:\n bool -- True if schema compatible with models, false otherwise.\n \"\"\"\n\n _, field_lookup = self.build_metadata_lookups(database_id, schema)\n\n are_models_compatible = True\n for model in models:\n\n schema_name = model.schema.upper()\n model_name = model.name.upper()\n\n lookup_key = f\"{schema_name}.{model_name}\"\n\n if lookup_key not in field_lookup:\n logging.warning(\n \"Model %s not found in %s schema\", lookup_key, schema_name\n )\n are_models_compatible = False\n else:\n table_lookup = field_lookup[lookup_key]\n for column in model.columns:\n column_name = column.name.upper()\n if column_name not in table_lookup:\n logging.warning(\n \"Column %s not found in %s model\", column_name, lookup_key\n )\n are_models_compatible = False\n\n return are_models_compatible\n\n def export_models(\n self, database: str, schema: str, models: Sequence[MetabaseModel], aliases\n ):\n \"\"\"Exports dbt models to Metabase database schema.\n\n Arguments:\n database {str} -- Metabase database name.\n schema {str} -- Metabase schema name.\n models {list} -- List of dbt models read from project.\n aliases {dict} -- Provided by reader class. Shuttled down to column exports to resolve FK refs against relations to aliased source tables\n \"\"\"\n\n database_id = self.find_database_id(database)\n if not database_id:\n logging.critical(\"Cannot find database by name %s\", database)\n return\n\n table_lookup, field_lookup = self.build_metadata_lookups(database_id, schema)\n\n for model in models:\n self.export_model(model, table_lookup, field_lookup, aliases)\n\n def export_model(\n self,\n model: MetabaseModel,\n table_lookup: dict,\n field_lookup: dict,\n aliases: dict,\n ):\n \"\"\"Exports one dbt model to Metabase database schema.\n\n Arguments:\n model {dict} -- One dbt model read from project.\n table_lookup {dict} -- Dictionary of Metabase tables indexed by name.\n field_lookup {dict} -- Dictionary of Metabase fields indexed by name, indexed by table name.\n aliases {dict} -- Provided by reader class. Shuttled down to column exports to resolve FK refs against relations to aliased source tables\n \"\"\"\n\n schema_name = model.schema.upper()\n model_name = model.name.upper()\n\n lookup_key = f\"{schema_name}.{aliases.get(model_name, model_name)}\"\n\n api_table = table_lookup.get(lookup_key)\n if not api_table:\n logging.error(\"Table %s does not exist in Metabase\", lookup_key)\n return\n\n # Empty strings not accepted by Metabase\n if not model.description:\n model_description = None\n else:\n model_description = model.description\n\n table_id = api_table[\"id\"]\n if api_table[\"description\"] != model_description and model_description:\n # Update with new values\n self.api(\n \"put\",\n f\"/api/table/{table_id}\",\n json={\"description\": model_description},\n )\n logging.info(\"Updated table %s successfully\", lookup_key)\n elif not model_description:\n logging.info(\"No model description provided for table %s\", lookup_key)\n else:\n logging.info(\"Table %s is up-to-date\", lookup_key)\n\n for column in model.columns:\n self.export_column(schema_name, model_name, column, field_lookup, aliases)\n\n def export_column(\n self,\n schema_name: str,\n model_name: str,\n column: MetabaseColumn,\n field_lookup: dict,\n aliases: dict,\n ):\n \"\"\"Exports one dbt column to Metabase database schema.\n\n Arguments:\n model_name {str} -- One dbt model name read from project.\n column {dict} -- One dbt column read from project.\n field_lookup {dict} -- Dictionary of Metabase fields indexed by name, indexed by table name.\n aliases {dict} -- Provided by reader class. Used to resolve FK refs against relations to aliased source tables\n \"\"\"\n\n table_lookup_key = f\"{schema_name}.{model_name}\"\n column_name = column.name.upper()\n\n field = field_lookup.get(table_lookup_key, {}).get(column_name)\n if not field:\n logging.error(\n \"Field %s.%s does not exist in Metabase\", table_lookup_key, column_name\n )\n return\n\n field_id = field[\"id\"]\n\n api_field = self.api(\"get\", f\"/api/field/{field_id}\")\n\n if \"special_type\" in api_field:\n semantic_type = \"special_type\"\n else:\n semantic_type = \"semantic_type\"\n\n fk_target_field_id = None\n if column.semantic_type == \"type/FK\":\n # Target table could be aliased if we parse_ref() on a source, so we caught aliases during model parsing\n # This way we can unpack any alias mapped to fk_target_table when using yml folder parser\n target_table = (\n column.fk_target_table.upper()\n if column.fk_target_table is not None\n else None\n )\n target_field = (\n column.fk_target_field.upper()\n if column.fk_target_field is not None\n else None\n )\n\n if not target_table or not target_field:\n logging.info(\n \"Passing on fk resolution for %s. Target field %s was not resolved during dbt model parsing.\",\n table_lookup_key,\n target_field,\n )\n\n else:\n # Now we can trust our parse_ref even if it is pointing to something like source(\"salesforce\", \"my_cool_table_alias\")\n # just as easily as a simple ref(\"stg_salesforce_cool_table\") -> the dict is empty if parsing from manifest.json\n was_aliased = (\n aliases.get(target_table.split(\".\", 1)[-1])\n if target_table\n else None\n )\n if was_aliased:\n target_table = \".\".join(\n [target_table.split(\".\", 1)[0], was_aliased]\n )\n\n logging.info(\n \"Looking for field %s in table %s\", target_field, target_table\n )\n fk_target_field_id = (\n field_lookup.get(target_table, {}).get(target_field, {}).get(\"id\")\n )\n\n if fk_target_field_id:\n logging.info(\n \"Setting target field %s to PK in order to facilitate FK ref for %s column\",\n fk_target_field_id,\n column_name,\n )\n self.api(\n \"put\",\n f\"/api/field/{fk_target_field_id}\",\n json={semantic_type: \"type/PK\"},\n )\n else:\n logging.error(\n \"Unable to find foreign key target %s.%s\",\n target_table,\n target_field,\n )\n\n # Nones are not accepted, default to normal\n if not column.visibility_type:\n column.visibility_type = \"normal\"\n\n # Empty strings not accepted by Metabase\n if not column.description:\n column_description = None\n else:\n column_description = column.description\n\n if (\n api_field[\"description\"] != column_description\n or api_field[semantic_type] != column.semantic_type\n or api_field[\"visibility_type\"] != column.visibility_type\n or api_field[\"fk_target_field_id\"] != fk_target_field_id\n ):\n # Update with new values\n self.api(\n \"put\",\n f\"/api/field/{field_id}\",\n json={\n \"description\": column_description,\n semantic_type: column.semantic_type,\n \"visibility_type\": column.visibility_type,\n \"fk_target_field_id\": fk_target_field_id,\n },\n )\n logging.info(\"Updated field %s.%s successfully\", model_name, column_name)\n else:\n logging.info(\"Field %s.%s is up-to-date\", model_name, column_name)\n\n def find_database_id(self, name: str) -> Optional[str]:\n \"\"\"Finds Metabase database ID by name.\n\n Arguments:\n name {str} -- Metabase database name.\n\n Returns:\n str -- Metabase database ID.\n \"\"\"\n\n for database in self.api(\"get\", \"/api/database\"):\n if database[\"name\"].upper() == name.upper():\n return database[\"id\"]\n return None\n\n def build_metadata_lookups(\n self, database_id: str, schema: str, schemas_to_exclude: Iterable = None\n ) -> Tuple[dict, dict]:\n \"\"\"Builds table and field lookups.\n\n Arguments:\n database_id {str} -- Metabase database ID.\n schema {str} -- Metabase schema name.\n\n Returns:\n dict -- Dictionary of tables indexed by name.\n dict -- Dictionary of fields indexed by name, indexed by table name.\n \"\"\"\n\n if schemas_to_exclude is None:\n schemas_to_exclude = []\n\n table_lookup = {}\n field_lookup = {}\n\n metadata = self.api(\n \"get\",\n f\"/api/database/{database_id}/metadata\",\n params=dict(include_hidden=True),\n )\n for table in metadata.get(\"tables\", []):\n table_schema = table.get(\"schema\", \"public\").upper()\n table_name = table[\"name\"].upper()\n\n if schema:\n if table_schema != schema.upper():\n logging.debug(\n \"Ignoring Metabase table %s in schema %s. It does not belong to selected schema %s\",\n table_name,\n table_schema,\n schema,\n )\n continue\n\n if schemas_to_exclude:\n schemas_to_exclude = {\n exclusion.upper() for exclusion in schemas_to_exclude\n }\n\n if table_schema in schemas_to_exclude:\n logging.debug(\n \"Ignoring Metabase table %s in schema %s. It belongs to excluded schemas %s\",\n table_name,\n table_schema,\n schemas_to_exclude,\n )\n continue\n\n lookup_key = f\"{table_schema}.{table_name}\"\n table_lookup[lookup_key] = table\n table_field_lookup = {}\n\n for field in table.get(\"fields\", []):\n field_name = field[\"name\"].upper()\n table_field_lookup[field_name] = field\n\n field_lookup[lookup_key] = table_field_lookup\n\n return table_lookup, field_lookup\n\n def api(\n self,\n method: str,\n path: str,\n authenticated: bool = True,\n critical: bool = True,\n **kwargs,\n ) -> Any:\n \"\"\"Unified way of calling Metabase API.\n\n Arguments:\n method {str} -- HTTP verb, e.g. get, post, put.\n path {str} -- Relative path of endpoint, e.g. /api/database.\n\n Keyword Arguments:\n authenticated {bool} -- Includes session ID when true. (default: {True})\n critical {bool} -- Raise on any HTTP errors. (default: {True})\n\n Returns:\n Any -- JSON payload of the endpoint.\n \"\"\"\n\n headers: MutableMapping = {}\n if \"headers\" not in kwargs:\n kwargs[\"headers\"] = headers\n else:\n headers = kwargs[\"headers\"].copy()\n\n if authenticated:\n headers[\"X-Metabase-Session\"] = self.session_id\n\n response = requests.request(\n method, f\"{self.protocol}://{self.host}{path}\", verify=self.verify, **kwargs\n )\n if critical:\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError:\n if \"password\" in kwargs[\"json\"]:\n logging.error(\"HTTP request failed. Response: %s\", response.text)\n else:\n logging.error(\n \"HTTP request failed. Payload: %s. Response: %s\",\n kwargs[\"json\"],\n response.text,\n )\n raise\n elif not response.ok:\n return False\n\n response_json = json.loads(response.text)\n\n # Since X.40.0 responses are encapsulated in \"data\" with pagination parameters\n if \"data\" in response_json:\n return response_json[\"data\"]\n return response_json\n", "id": "9142924", "language": "Python", "matching_score": 4.236568927764893, "max_stars_count": 0, "path": "dbtmetabase/metabase.py" }, { "content": "import logging\nimport sys\nimport os\nimport argparse\n\nfrom .metabase import MetabaseClient\nfrom .parsers.dbt_folder import DbtFolderReader\nfrom .parsers.dbt_manifest import DbtManifestReader\n\nfrom typing import Iterable, List, Union\n\n__version__ = \"0.8.0\"\n\n\ndef export(\n dbt_database: str,\n metabase_database: str,\n metabase_host: str,\n metabase_user: str,\n metabase_password: str,\n dbt_manifest_path: str = \"\",\n dbt_path: str = \"\",\n dbt_docs_url: str = None,\n metabase_use_http: bool = False,\n metabase_verify: Union[str, bool] = True,\n metabase_sync_skip: bool = False,\n metabase_sync_timeout: int = None,\n schema: str = \"public\",\n schema_excludes: Iterable = None,\n includes: Iterable = None,\n excludes: Iterable = None,\n include_tags: bool = True,\n):\n \"\"\"Exports models from dbt to Metabase.\n\n Args:\n dbt_database (str): Source database name.\n metabase_database (str): Target Metabase database name. Database in Metabase is aliased.\n metabase_host (str): Metabase hostname.\n metabase_user (str): Metabase username.\n metabase_password (str): Metabase password.\n dbt_manifest_path (str, optional): Path to dbt project manifest.json [Primary]. Defaults to \"\".\n dbt_path (str, optional): Path to dbt project. [Alternative]. Defaults to \"\".\n dbt_docs_url (str, optional): URL to your dbt docs hosted catalog, a link will be appended to the model description (only works for manifest parsing). Defaults to None.\n metabase_use_http (bool, optional): Use HTTP to connect to Metabase instead of the default HTTPS. Defaults to False.\n metabase_verify (Union[str, bool], optional): Supply path to certificate or disable verification. Defaults to True.\n metabase_sync_skip (bool, optional): Skip synchronizing Metabase database before export. Defaults to False.\n metabase_sync_timeout (int, optional): Metabase synchronization timeout in seconds. Defaults to None.\n schema (str, optional): Target schema name. Defaults to \"public\".\n schema_excludes (Iterable, optional): Alternative to target schema, specify schema exclusions (only works for manifest parsing). Defaults to None.\n includes (Iterable, optional): Model names to limit processing to. Defaults to None.\n excludes (Iterable, optional): Model names to exclude. Defaults to None.\n include_tags (bool, optional): Append the dbt tags to the end of the table description. Defaults to True.\n \"\"\"\n\n if schema_excludes is None:\n schema_excludes = []\n if includes is None:\n includes = []\n if excludes is None:\n excludes = []\n\n # Assertions\n assert bool(dbt_path) != bool(\n dbt_manifest_path\n ), \"Bad arguments. dbt_path and dbt_manifest_path cannot be provide at the same time. One option must be specified.\"\n if dbt_path:\n assert (\n schema and not schema_excludes\n ), \"Must target a single schema if using yaml parser, multiple schemas not supported.\"\n assert bool(schema) != bool(\n schema_excludes\n ), \"Bad arguments. schema and schema_excludes cannot be provide at the same time. One option must be specified.\"\n\n # Instantiate Metabase client\n mbc = MetabaseClient(\n host=metabase_host,\n user=metabase_user,\n password=<PASSWORD>,\n use_http=metabase_use_http,\n verify=metabase_verify,\n )\n reader: Union[DbtFolderReader, DbtManifestReader]\n\n # Resolve dbt reader being either YAML or manifest.json based\n if dbt_path:\n reader = DbtFolderReader(os.path.expandvars(dbt_path))\n else:\n reader = DbtManifestReader(os.path.expandvars(dbt_manifest_path))\n\n if schema_excludes:\n schema_excludes = {schema.upper() for schema in schema_excludes}\n\n # Process dbt stuff\n models = reader.read_models(\n database=dbt_database,\n schema=schema,\n schema_excludes=schema_excludes,\n includes=includes,\n excludes=excludes,\n include_tags=include_tags,\n docs_url=dbt_docs_url,\n )\n\n # Sync and attempt schema alignment prior to execution; if timeout is not explicitly set, proceed regardless of success\n if not metabase_sync_skip:\n if metabase_sync_timeout is not None and not mbc.sync_and_wait(\n metabase_database, schema, models, metabase_sync_timeout\n ):\n logging.critical(\"Sync timeout reached, models still not compatible\")\n return\n\n # Process Metabase stuff\n mbc.export_models(metabase_database, schema, models, reader.catch_aliases)\n\n\ndef main(args: List = None):\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(message)s\", level=logging.INFO\n )\n\n parser = argparse.ArgumentParser(\n description=\"Model synchronization from dbt to Metabase.\"\n )\n parser.add_argument(\"command\", choices=[\"export\"], help=\"command to execute\")\n\n # dbt arguments\n parser.add_argument(\n \"--dbt_database\",\n metavar=\"DB\",\n required=True,\n help=\"Target database name as specified in dbt\",\n )\n parser.add_argument(\n \"--dbt_path\",\n help=\"Path to dbt project. Cannot be specified with --dbt_manifest_path\",\n )\n parser.add_argument(\n \"--dbt_manifest_path\",\n help=\"Path to dbt manifest.json (typically located in the /target/ directory of the dbt project directory). Cannot be specified with --dbt_path\",\n )\n parser.add_argument(\n \"--dbt_docs\",\n metavar=\"URL\",\n help=\"Pass in URL to dbt docs site. Appends dbt docs URL for each model to Metabase table description\",\n )\n\n # Metabase arguments\n parser.add_argument(\n \"--metabase_database\",\n metavar=\"DB\",\n required=True,\n help=\"Target database name as set in Metabase (typically aliased)\",\n )\n parser.add_argument(\n \"--metabase_host\", metavar=\"HOST\", required=True, help=\"Metabase hostname\"\n )\n parser.add_argument(\n \"--metabase_user\", metavar=\"USER\", required=True, help=\"Metabase username\"\n )\n parser.add_argument(\n \"--metabase_password\", metavar=\"PASS\", required=True, help=\"Metabase password\"\n )\n parser.add_argument(\n \"--metabase_use_http\",\n action=\"store_true\",\n help=\"use HTTP to connect to Metabase instead of HTTPS\",\n )\n parser.add_argument(\n \"--metabase_verify\",\n metavar=\"CERT\",\n help=\"Path to certificate bundle used by Metabase client\",\n )\n parser.add_argument(\n \"--metabase_sync_skip\",\n action=\"store_true\",\n help=\"Skip synchronizing Metabase database before export\",\n )\n parser.add_argument(\n \"--metabase_sync_timeout\",\n metavar=\"SECS\",\n type=int,\n help=\"Synchronization timeout (in secs). If set, we will fail hard on synchronization failure; if not set, we will proceed after attempting sync regardless of success\",\n )\n\n # Common/misc arguments\n parser.add_argument(\n \"--schema\",\n metavar=\"SCHEMA\",\n help=\"Target schema name. Cannot be specified with --schema_excludes\",\n )\n parser.add_argument(\n \"--schema_excludes\",\n help=\"Target schemas to exclude. Cannot be specified with --schema. Will sync all schemas not excluded\",\n )\n parser.add_argument(\n \"--includes\",\n metavar=\"MODELS\",\n nargs=\"*\",\n default=[],\n help=\"Model names to limit processing to\",\n )\n parser.add_argument(\n \"--excludes\",\n metavar=\"MODELS\",\n nargs=\"*\",\n default=[],\n help=\"Model names to exclude\",\n )\n parser.add_argument(\n \"--include_tags\",\n action=\"store_true\",\n default=False,\n help=\"Append tags to Table descriptions in Metabase\",\n )\n parser.add_argument(\n \"--verbose\",\n action=\"store_true\",\n default=False,\n help=\"Verbose output\",\n )\n\n parsed = parser.parse_args(args=args)\n\n if parsed.verbose:\n logger = logging.getLogger()\n logger.addHandler(logging.StreamHandler(sys.stdout))\n logger.setLevel(logging.DEBUG)\n\n if parsed.command == \"export\":\n export(\n dbt_database=parsed.dbt_database,\n dbt_manifest_path=parsed.dbt_manifest_path,\n dbt_path=parsed.dbt_path,\n dbt_docs_url=parsed.dbt_docs,\n metabase_database=parsed.metabase_database,\n metabase_host=parsed.metabase_host,\n metabase_user=parsed.metabase_user,\n metabase_password=parsed.metabase_password,\n metabase_use_http=parsed.metabase_use_http,\n metabase_verify=parsed.metabase_verify,\n metabase_sync_skip=parsed.metabase_sync_skip,\n metabase_sync_timeout=parsed.metabase_sync_timeout,\n schema=parsed.schema,\n schema_excludes=parsed.schema_excludes,\n includes=parsed.includes,\n excludes=parsed.excludes,\n include_tags=parsed.include_tags,\n )\n", "id": "1865570", "language": "Python", "matching_score": 2.762495279312134, "max_stars_count": 0, "path": "dbtmetabase/__init__.py" }, { "content": "import unittest\n\nfrom dbtmetabase.metabase import MetabaseClient\n\n\nclass MockMetabaseClient(MetabaseClient):\n def get_session_id(self, user: str, password: str) -> str:\n return \"dummy\"\n\n\nclass TestMetabaseClient(unittest.TestCase):\n def setUp(self):\n self.client = MockMetabaseClient(\n host=\"localhost\",\n user=\"dummy\",\n password=\"<PASSWORD>\",\n use_http=True,\n )\n\n def test_dummy(self):\n self.assertTrue(True)\n", "id": "2937021", "language": "Python", "matching_score": 0.6926072835922241, "max_stars_count": 0, "path": "tests/test_metabase.py" }, { "content": "from .test_dbt_folder_reader import *\nfrom .test_dbt_manifest_reader import *\nfrom .test_metabase import *\n", "id": "10936288", "language": "Python", "matching_score": 0.08631733804941177, "max_stars_count": 0, "path": "tests/__init__.py" }, { "content": "from invoke import Collection, Program\n\nfrom dbt_invoke import properties\nfrom dbt_invoke.internal import _version\n\nns = Collection()\nns.add_collection(properties)\nprogram = Program(namespace=ns, version=_version.__version__)\n", "id": "10886950", "language": "Python", "matching_score": 0.08679318428039551, "max_stars_count": 36, "path": "dbt_invoke/main.py" } ]
0.692607
phenopolis
[ { "content": "from annotation import *\nfrom monarch import *\n\n", "id": "11894359", "language": "Python", "matching_score": 0, "max_stars_count": 24, "path": "rest/__init__.py" }, { "content": "from views import *\nfrom lookups import *\n\[email protected]('/irdc/summary')\ndef irdc_summary():\n if session['user'] == 'demo':\n return 'Error, you do not have access to this page'\n hpo_freq = get_hpo_size_freq('hpo_freq.tsv')\n hpo_dot_file = os.path.join('dot','irdc_hpo.json')\n hpo_dot_inf = open(hpo_dot_file,'r')\n hpo_dot = json.load(hpo_dot_inf)\n return render_template('irdc_summary.html', \n hpo_dot = json.dumps(hpo_dot),\n hpo_freq = json.dumps(hpo_freq))\n\n", "id": "11347075", "language": "Python", "matching_score": 0.7634276747703552, "max_stars_count": 24, "path": "views/uclex_irdc.py" }, { "content": "\nimport unittest\nimport runserver\nimport sys\nimport load_data\nimport helper\n\nclass GenePageTestCase(unittest.TestCase):\n\n def setUp(self):\n load_data.load_data()\n runserver.app.config['TESTING'] = True\n runserver.app.config['DB_NAME'] = 'test_uclex'\n runserver.app.config['DB_NAME_HPO'] = 'test_hpo'\n runserver.app.config['DB_NAME_PATIENTS'] = 'test_patients'\n runserver.app.config['DB_NAME_USERS'] = 'test_users'\n self.app = runserver.app.test_client()\n helper.create_neo4j_demo_user()\n helper.login(self.app)\n\n def tearDown(self):\n self.app.get('/logout', follow_redirects=True)\n \n def gene_page(self, geneName):\n return self.app.get('/gene/'+geneName, follow_redirects=True)\n\n def test_gene_page(self):\n page = self.gene_page('TTLL5')\n assert page.status_code == 200\n assert 'TTLL5' in page.data \n assert 'ENSG00000119685' in page.data\n assert 'Macular dystrophy' in page.data \n assert 'Abnormality of the macula' in page.data\n assert 'Autosomal recessive inheritance' in page.data\n assert 'Mode of inheritance' in page.data\n assert 'Visual impairment' in page.data\n assert 'Abnormality of vision' in page.data\n assert 'Abnormal eye physiology' in page.data\n assert 'Retinal dystrophy' in page.data\n assert 'Abnormality of the retina' in page.data\n assert 'Abnormality of the fundus' in page.data\n assert 'Abnormality of the posterior segment of the globe' in page.data\n assert 'Abnormality of the globe' in page.data\n assert 'Abnormal eye morphology' in page.data\n assert 'Abnormality of the eye' in page.data\n assert 'Phenotypic abnormality' in page.data\n assert 'All' in page.data\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "10124924", "language": "Python", "matching_score": 2.7652628421783447, "max_stars_count": 24, "path": "tests/test_gene.py" }, { "content": "\nimport unittest\nfrom config import config\n\n\nclass ConfigTestCase(unittest.TestCase):\n\n def setUp(self):\n pass\n\n\n def tearDown(self):\n pass\n\n\n def test_import_flag(self):\n assert config.IMPORT_PYSAM_PRIMER3 == True # This needs to be True before committing to the repo. \n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "2090960", "language": "Python", "matching_score": 1.4502536058425903, "max_stars_count": 24, "path": "tests/test_config.py" }, { "content": "import sys \n\nif len(sys.argv)>1 and sys.argv[1]=='SERVER':\n LOCAL=False\nelse:\n LOCAL=True\n\nIMPORT_PYSAM_PRIMER3 = True # Set this to False to develop on Windows, where pysam and primer3 fail to install.\n\n", "id": "2425557", "language": "Python", "matching_score": 0.294958233833313, "max_stars_count": 24, "path": "config/config.py" }, { "content": "import pathlib\nfrom setuptools import setup, find_packages\n\n# the directory containing this file\nHERE = pathlib.Path(__file__).parent\nREADME = (HERE / \"README.md\").read_text()\n\nsetup(\n name='oct_converter',\n version='0.2',\n description='Extract OCT and fundus data from proprietary file formats.',\n long_description=README,\n long_description_content_type=\"text/markdown\",\n url='https://github.com/marksgraham/OCT-Converter',\n author='<NAME>',\n author_email='<EMAIL>',\n license=\"MIT\",\n python_requires='>=3.7',\n install_requires=['construct','imageio','natsort','numpy','opencv-python','pydicom','six','matplotlib','imageio-ffmpeg', 'pylibjpeg', 'h5py'],\n packages=find_packages(),\n include_package_data=True\n)\n", "id": "3351587", "language": "Python", "matching_score": 0.5094863772392273, "max_stars_count": 0, "path": "setup.py" }, { "content": "# code modified from https://realpython.com/handling-email-confirmation-in-flask/\nfrom itsdangerous import URLSafeTimedSerializer\nfrom views import application\n\n\ndef generate_confirmation_token(email):\n serializer = URLSafeTimedSerializer(application.config[\"SECRET_KEY\"])\n return serializer.dumps(email, salt=application.config[\"SECURITY_PASSWORD_SALT\"])\n\n\ndef confirm_token(token, expiration=3600):\n serializer = URLSafeTimedSerializer(application.config[\"SECRET_KEY\"])\n try:\n email = serializer.loads(token, salt=application.config[\"SECURITY_PASSWORD_SALT\"], max_age=expiration)\n except Exception:\n email = None\n return email\n", "id": "12736597", "language": "Python", "matching_score": 0.46856340765953064, "max_stars_count": 24, "path": "views/token.py" }, { "content": "\"\"\"\nDB schema\n\"\"\"\n# \"postgres://admin:<PASSWORD>@aws-us-east-1-portal.19.dblayer.com:15813/compose\"\n\nimport enum\nfrom sqlalchemy import Column, String, Integer, ForeignKey, JSON, Boolean, DateTime, Enum, func\nfrom sqlalchemy import BigInteger, SmallInteger\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta\nfrom sqlalchemy.sql.schema import MetaData\n\n\nPublic: DeclarativeMeta = declarative_base()\nPhenopolis: DeclarativeMeta = declarative_base(metadata=MetaData(schema=\"phenopolis\"))\nEnsembl: DeclarativeMeta = declarative_base(metadata=MetaData(schema=\"ensembl\"))\nHpo: DeclarativeMeta = declarative_base(metadata=MetaData(schema=\"hpo\"))\n\n# meta=MetaData(engine)\n\n\nclass AsDictable(object):\n def as_dict(self):\n dictionary = self.__dict__.copy()\n if \"_sa_instance_state\" in dictionary:\n del dictionary[\"_sa_instance_state\"] # removes SQLAlchemy internal field\n for k, v in dictionary.items(): # ensures that Enum fields are represented as strings\n if isinstance(v, enum.Enum):\n dictionary[k] = v.name\n return dictionary\n\n\nclass NewGene(Ensembl, AsDictable):\n __tablename__ = \"gene\"\n identifier = Column(Integer, nullable=False, primary_key=True)\n ensembl_gene_id = Column(String(255), nullable=False)\n start = Column(Integer, nullable=False)\n end = Column(Integer, nullable=False)\n chromosome = Column(String(255), nullable=False)\n strand = Column(SmallInteger, nullable=False)\n hgnc_id = Column(String(255))\n hgnc_symbol = Column(String(255))\n assembly = Column(String(255))\n\n\nclass IndividualGene(Phenopolis, AsDictable):\n __tablename__ = \"individual_gene\"\n individual_id = Column(Integer, ForeignKey(\"phenopolis.individual.id\"), nullable=False, primary_key=True)\n gene_id = Column(BigInteger, nullable=False, primary_key=True)\n # SQLAlchemy seems really annoying when dealing with ForeignKey between schemas\n # We're not needing this below, though it's working fine in postgres\n # ForeignKey bellow will cause Individual to fail\n # gene_id = Column(BigInteger, ForeignKey(\"ensembl.gene.identifier\"), nullable=False, primary_key=True)\n\n\nclass HpoTerm(Hpo, AsDictable):\n __tablename__ = \"term\"\n id = Column(Integer, primary_key=True, nullable=False)\n hpo_id = Column(String(255), primary_key=True, nullable=False)\n name = Column(String(255), nullable=False)\n\n\nclass IndividualFeature(Phenopolis, AsDictable):\n __tablename__ = \"individual_feature\"\n individual_id = Column(Integer, ForeignKey(\"phenopolis.individual.id\"), primary_key=True, nullable=False)\n feature_id = Column(Integer, ForeignKey(\"hpo.term.id\"), primary_key=True, nullable=False)\n type = Column(String(255), primary_key=True, nullable=False)\n\n\nclass User(Public, AsDictable):\n __tablename__ = \"users\"\n user = Column(\"user\", String(255), primary_key=True, unique=True)\n argon_password = Column(\"argon_password\", String(255))\n individuals = relationship(\"UserIndividual\", backref=\"users\")\n enabled = Column(\"enabled\", Boolean(), default=False)\n registered_on = Column(\"registered_on\", DateTime(timezone=True), default=func.now())\n confirmed = Column(\"confirmed\", Boolean(), default=False)\n confirmed_on = Column(\"confirmed_on\", DateTime(timezone=True))\n email = Column(\"email\", String(255), unique=True)\n full_name = Column(\"full_name\", String(255))\n\n\nclass UserConfig(Public, AsDictable):\n __tablename__ = \"user_config\"\n user_name = Column(\"user_name\", String(255), primary_key=True)\n language = Column(\"language\", String(255), primary_key=True)\n page = Column(\"page\", String(255), primary_key=True)\n config = Column(\"config\", JSON)\n\n\nclass Sex(enum.Enum):\n # male\n M = 1\n # female\n F = 2\n # unknown\n U = 3\n\n\nclass Individual(Phenopolis, AsDictable):\n __tablename__ = \"individual\"\n id = Column(Integer, nullable=False, primary_key=True)\n phenopolis_id = Column(String(255), nullable=False)\n external_id = Column(String(255))\n sex = Column(Enum(Sex), nullable=False)\n consanguinity = Column(\"consanguinity\", String(255))\n # These relationships are not used, but if used they're braking delete_individual()\n # sqlalchemy.exc.NoReferencedTableError: Foreign key associated with column 'individual_gene.gene_id'\n # could not find table 'ensembl.gene' with which to generate a foreign key to target column 'identifier'\n # to_feat = relationship(\"IndividualFeature\", backref=\"individual_feature\", lazy=True, cascade=\"all, delete-orphan\")\n # to_gene = relationship(\"IndividualGene\", backref=\"individual_gene\", lazy=True, cascade=\"all, delete-orphan\")\n\n\nclass UserIndividual(Public, AsDictable):\n __tablename__ = \"users_individuals\"\n # remove pytest warning DELETE statement on table 'users_individuals' expected to delete 1 row(s); 4 were matched\n __mapper_args__ = {\"confirm_deleted_rows\": False}\n user = Column(\"user\", String(255), ForeignKey(\"users.user\"))\n internal_id = Column(\"internal_id\", String(255), primary_key=True,)\n\n\nclass NewVariant(Phenopolis, AsDictable):\n __tablename__ = \"variant\"\n id = Column(BigInteger, primary_key=True)\n chrom = Column(String(255), nullable=False)\n pos = Column(Integer, nullable=False)\n ref = Column(String(255), nullable=False)\n alt = Column(String(255), nullable=False)\n\n\nclass TranscriptConsequence(Phenopolis, AsDictable):\n __tablename__ = \"transcript_consequence\"\n id = Column(BigInteger, primary_key=True)\n chrom = Column(String(255), nullable=False)\n pos = Column(Integer, nullable=False)\n ref = Column(String(255), nullable=False)\n alt = Column(String(255), nullable=False)\n hgvs_c = Column(String(255))\n hgvs_p = Column(String(255))\n consequence = Column(String(255))\n gene_id = Column(String(255))\n\n\nclass IndividualVariant(Phenopolis, AsDictable):\n __tablename__ = \"individual_variant\"\n individual_id = Column(Integer, nullable=False, primary_key=True)\n variant_id = Column(BigInteger, nullable=False, primary_key=True)\n chrom = Column(String(255), nullable=False)\n pos = Column(Integer, nullable=False)\n ref = Column(String(255), nullable=False)\n alt = Column(String(255), nullable=False)\n zygosity = Column(String(255), nullable=False)\n\n\nclass IndividualVariantClassification(Phenopolis, AsDictable):\n __tablename__ = \"individual_variant_classification\"\n id = Column(BigInteger, primary_key=True)\n individual_id = Column(Integer, ForeignKey(\"individual_variant.individual_id\"), nullable=False)\n variant_id = Column(BigInteger, ForeignKey(\"individual_variant.variant_id\"), nullable=False)\n user_id = Column(String(255), nullable=False)\n classified_on = Column(DateTime(timezone=True), default=func.now())\n classification = Column(String(255), nullable=False)\n pubmed_id = Column(String(255))\n notes = Column(String(255))\n", "id": "11076437", "language": "Python", "matching_score": 3.422468662261963, "max_stars_count": 24, "path": "db/model.py" }, { "content": "\"\"\"\nAutocomplete view\n\"\"\"\nimport re\nfrom typing import List\n\nfrom flask import jsonify, request, session\nfrom psycopg2 import sql\nfrom sqlalchemy import Text, and_, cast\nfrom sqlalchemy.orm import Session\n\nfrom db.helpers import cursor2dict\nfrom db.model import Individual, NewGene, NewVariant, TranscriptConsequence, UserIndividual\nfrom views import HG_ASSEMBLY, application\nfrom views.auth import USER, requires_auth\nfrom views.postgres import get_db, session_scope\n\nCHROMOSOME_POS_REGEX = re.compile(r\"^(\\w+)[-:](\\d+)$\")\nCHROMOSOME_POS_REF_REGEX = re.compile(r\"^(\\w+)[-:](\\d+)[-:]([ACGT\\*]+)$\", re.IGNORECASE)\nCHROMOSOME_POS_REF_ALT_REGEX = re.compile(r\"^(\\w+)[-:](\\d+)[-:]([ACGT\\*]+)[-:>]([ACGT\\*]+)$\", re.IGNORECASE)\nGENOMIC_REGION_REGEX = re.compile(r\"^(\\w+)[-:](\\d+)[-:](\\d+)$\", re.IGNORECASE)\nENSEMBL_TRANSCRIPT_REGEX = re.compile(r\"^ENST(\\d{0,12})(\\.\\d{1,2})?\", re.IGNORECASE)\nENSEMBL_PROTEIN_REGEX = re.compile(r\"^ENSP(\\d{0,12})(\\.\\d{1,2})?\", re.IGNORECASE)\nENSEMBL_GENE_REGEX = re.compile(r\"^^ENSG(\\d{0,12})(\\.\\d{1,2})?\", re.IGNORECASE)\nHPO_REGEX = re.compile(r\"^HP:(\\d{0,7})\", re.IGNORECASE)\nPATIENT_REGEX = re.compile(r\"^PH(\\d{0,8})\", re.IGNORECASE)\nNUMERIC_REGEX = re.compile(r\"^\\d+$\", re.IGNORECASE)\nHGVS_C_REGEX = re.compile(r\"(.+):(c.*)\")\nHGVS_P_REGEX = re.compile(r\"(.+):(p.*)\")\nHGVSP = \"hgvsp\"\nHGVSC = \"hgvsc\"\n\nDEFAULT_SEARCH_RESULTS_LIMIT = 20\nMAXIMUM_SEARCH_RESULTS_LIMIT = 1000\n\n\[email protected](\"/autocomplete/<query>\")\n@requires_auth\ndef autocomplete(query):\n arguments = request.args.to_dict()\n query_type = arguments.get(\"query_type\")\n try:\n limit = int(arguments.get(\"limit\", DEFAULT_SEARCH_RESULTS_LIMIT))\n except ValueError:\n return (\n jsonify(success=False, message=f\"Please, specify a numeric limit value, {arguments.get('limit')}\"),\n 400,\n )\n\n if limit > MAXIMUM_SEARCH_RESULTS_LIMIT:\n return (\n jsonify(success=False, message=f\"Please, specify a limit lower than {MAXIMUM_SEARCH_RESULTS_LIMIT}\"),\n 400,\n )\n application.logger.debug(\"Autocomplete query '%s' and query type '%s'\", query, query_type)\n\n with session_scope() as db_session:\n if query_type == \"gene\":\n suggestions = _search_genes(db_session, query, limit)\n\n elif query_type == \"phenotype\":\n suggestions = _search_phenotypes(db_session, query, limit)\n\n elif query_type == \"patient\":\n suggestions = _search_patients(db_session, query, limit)\n\n elif query_type == \"variant\":\n suggestions = _search_variants(db_session, query, limit)\n\n elif query_type is None or query_type == \"\":\n suggestions = (\n _search_genes(db_session, query, limit)\n + _search_phenotypes(db_session, query, limit)\n + _search_patients(db_session, query, limit)\n + _search_variants(db_session, query, limit)\n )\n else:\n message = f\"Autocomplete request with unsupported query type '{query_type}'\"\n application.logger.error(message)\n # raise PhenopolisException(message)\n return (\n jsonify(success=False, message=message),\n 400,\n )\n\n return jsonify(suggestions), 200\n\n\ndef _search_patients(db_session: Session, query, limit):\n r\"\"\"'\n Patient (phenopolis_id) format: PH (PH\\d{8}) e.g. 'PH00005862' and are restricted to a particular user\n 'demo', for example, can only access ['PH00008256', 'PH00008258', 'PH00008267', 'PH00008268']\n so, a search for 'PH000082', for user 'demo', should return only the 4 cases above\n \"\"\"\n individuals = (\n db_session.query(Individual, UserIndividual)\n .filter(\n and_(\n UserIndividual.internal_id == Individual.phenopolis_id,\n UserIndividual.user == session[USER],\n Individual.phenopolis_id.ilike(f\"%{query}%\"),\n )\n )\n .with_entities(Individual)\n .order_by(Individual.phenopolis_id.asc())\n .limit(limit)\n .all()\n )\n return [f\"individual::{x.phenopolis_id}::{x.phenopolis_id}\" for x in individuals]\n\n\ndef _search_phenotypes(db_session: Session, query, limit):\n r\"\"\"\n A user may search for things like 'Abnormality of body height' or for an HPO id as HP:1234567 (ie: HP:\\d{7})\n or just a seq of numbers like '1234'\n \"\"\"\n if HPO_REGEX.match(query) or NUMERIC_REGEX.match(query):\n sqlq = sql.SQL(\n \"\"\"\n select t.hpo_id, t.\"name\" from hpo.term t where t.hpo_id ~ %(query)s order by t.id limit %(limit)s\n \"\"\"\n )\n else:\n # TODO: search also over synonyms\n # TODO: return the distance so the frontend have greater flexibility\n # NOTE: order results by similarity and then by hpo_name (case insensitive)\n sqlq = sql.SQL(\n \"\"\"\n select\n t.hpo_id,\n t.\"name\" ,\n t.\"name\" <-> %(query)s as distance\n from\n hpo.term t\n where\n t.\"name\" %% %(query)s\n order by\n distance,\n lower(t.\"name\")\n limit %(limit)s\n \"\"\"\n )\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(sqlq, {\"query\": query, \"limit\": limit})\n phenotypes = cursor2dict(cur)\n return [f\"hpo::{x.get('name')}::{x.get('hpo_id')}\" for x in phenotypes]\n\n\ndef _search_genes(db_session: Session, query, limit):\n \"\"\"\n Either search for:\n - a gene id like 'ENSG000...'\n - a transcript id like 'ENST000...' not only canonical\n - a protein id like 'ENSP000...' not only canonical\n - a numeric id without any qualifier like '12345'\n - a gene name like 'TTLL...'\n - a gene synonym like 'asd...'\n\n The order of results is sorted by gene identifier for the 3 searches by identifier; and it is sorted by similarity\n for gene name and gene synonym searches\n \"\"\"\n is_identifier_query = (\n ENSEMBL_GENE_REGEX.match(query)\n or ENSEMBL_TRANSCRIPT_REGEX.match(query)\n or NUMERIC_REGEX.match(query)\n or ENSEMBL_PROTEIN_REGEX.match(query)\n )\n if is_identifier_query:\n query = remove_version_from_id(query)\n sqlq = sql.SQL(\n \"\"\"\n select distinct g.hgnc_symbol, g.ensembl_gene_id\n from ensembl.gene g\n left outer join ensembl.transcript t on g.ensembl_gene_id = t.ensembl_gene_id\n where g.assembly = %(hga)s\n and g.chromosome ~ '^X|^Y|^[0-9]{1,2}'\n and (\n g.ensembl_gene_id ~* %(query)s or\n t.ensembl_transcript_id ~* %(query)s or\n t.ensembl_peptide_id ~* %(query)s\n )\n order by g.ensembl_gene_id\n limit %(limit)s\n \"\"\"\n )\n else:\n sqlq = sql.SQL(\n \"\"\"\n select\n g.hgnc_symbol,\n g.ensembl_gene_id ,\n g.hgnc_symbol <-> %(query)s as distance\n from\n ensembl.gene g\n where g.assembly = %(hga)s\n and g.chromosome ~ '^X|^Y|^[0-9]{1,2}'\n and g.hgnc_symbol %% %(query)s\n order by\n distance,\n lower(g.hgnc_symbol)\n limit %(limit)s\n \"\"\"\n )\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(sqlq, {\"query\": query, \"limit\": limit, \"hga\": HG_ASSEMBLY})\n genes = cursor2dict(cur)\n if not genes:\n sqlq = sql.SQL(\n \"\"\"\n select\n g.hgnc_symbol,\n g.ensembl_gene_id ,\n gs.external_synonym <-> %(query)s as distance\n from\n ensembl.gene g\n join ensembl.gene_synonym gs on gs.gene = g.identifier\n where g.assembly = %(hga)s\n and g.chromosome ~ '^X|^Y|^[0-9]{1,2}'\n and gs.external_synonym %% %(query)s\n order by\n distance,\n lower(gs.external_synonym)\n limit %(limit)s\n \"\"\"\n )\n cur.execute(sqlq, {\"query\": query, \"limit\": limit, \"hga\": HG_ASSEMBLY})\n genes = cursor2dict(cur)\n\n return [f\"gene::{x.get('hgnc_symbol')}::{x.get('ensembl_gene_id')}\" for x in genes]\n\n\ndef _search_variants(db_session: Session, query, limit):\n chromosome_from_region, start, end = _parse_genomic_region_from_query(query)\n chromosome_from_variant, pos, ref, alt = _parse_variant_from_query(query.upper())\n hgvs_type, entity, hgvs = _parse_hgvs_from_query(query)\n variants = []\n if chromosome_from_region is not None:\n variants = _search_variants_by_region(db_session, chromosome_from_region, start, end, limit)\n elif chromosome_from_variant is not None:\n variants = _search_variants_by_coordinates(db_session, chromosome_from_variant, pos, ref, alt, limit)\n elif hgvs_type is not None:\n variants = _search_variants_by_hgvs(db_session, hgvs_type, entity, hgvs, limit)\n\n return [f\"variant::{v.chrom}-{v.pos}-{v.ref}-{v.alt}::{v.chrom}-{v.pos}-{v.ref}-{v.alt}\" for v in variants]\n\n\ndef _search_variants_by_coordinates(db_session: Session, chrom, pos, ref, alt, limit) -> List[NewVariant]:\n \"\"\"\n Assuming a user is searching for 22-38212762-A-G or 22-16269829-T-*\n 22-382\n 22-382-A\n 22-16269-T-*\n 22:162\n 22-38212:a>g\n \"\"\"\n if chrom is not None and ref is not None and alt is not None:\n variants = (\n db_session.query(NewVariant)\n .filter(\n and_(\n NewVariant.chrom == chrom,\n cast(NewVariant.pos, Text).like(f\"{pos}%\"),\n NewVariant.ref == ref,\n NewVariant.alt == alt,\n )\n )\n .order_by(NewVariant.chrom.asc(), NewVariant.pos.asc())\n .limit(limit)\n .all()\n )\n elif chrom is not None and ref is not None and alt is None:\n variants = (\n db_session.query(NewVariant)\n .filter(and_(NewVariant.chrom == chrom, cast(NewVariant.pos, Text).like(f\"{pos}%\"), NewVariant.ref == ref))\n .order_by(NewVariant.chrom.asc(), NewVariant.pos.asc())\n .limit(limit)\n .all()\n )\n elif chrom is not None and ref is None:\n variants = (\n db_session.query(NewVariant)\n .filter(and_(NewVariant.chrom == chrom, cast(NewVariant.pos, Text).like(f\"{pos}%\")))\n .order_by(NewVariant.chrom.asc(), NewVariant.pos.asc())\n .limit(limit)\n .all()\n )\n return variants\n\n\ndef _search_variants_by_region(db_session: Session, chrom, start, end, limit) -> List[NewVariant]:\n \"\"\"\n Assuming a user is searching for 22:10000-20000 it will return all variants within that region\n \"\"\"\n variants = (\n db_session.query(NewVariant)\n .filter(and_(NewVariant.chrom == chrom, NewVariant.pos >= start, NewVariant.pos <= end,))\n .order_by(NewVariant.chrom.asc(), NewVariant.pos.asc())\n .limit(limit)\n .all()\n )\n return variants\n\n\ndef _search_variants_by_hgvs(db_session: Session, hgvs_type, entity, hgvs, limit) -> List[NewVariant]:\n \"\"\"\n Assuming a user is searching for ENSP00000451572.1:p.His383Tyr, ENST00000355467.4:c.30C>T,\n ENSG00000119685.1:c.412A>G, ENSG00000119685.1:p.Ile138Val or ENST00000505973.1:n.97C>T\n The queries need to do something like HGVSC like %query%, because the HGVS codes are a comma separated list in the\n corresponding text column. The query must start with either ENST or ENSP to be performed\n \"\"\"\n if hgvs_type == HGVSC:\n if ENSEMBL_TRANSCRIPT_REGEX.match(entity):\n # search for HGVS including the transcript id over all variants table\n # TODO: when we have a transcript in the variants table, improve this query to avoid whole table scan\n # NOTE: the % after transcript deals with missing transcript version, as a positive side effect this allow\n # for partial ids\n variants = (\n db_session.query(NewVariant, TranscriptConsequence)\n .filter(\n and_(\n NewVariant.chrom == TranscriptConsequence.chrom,\n NewVariant.pos == TranscriptConsequence.pos,\n NewVariant.ref == TranscriptConsequence.ref,\n NewVariant.alt == TranscriptConsequence.alt,\n TranscriptConsequence.hgvs_c.ilike(f\"%{entity}%:{hgvs}%\"),\n )\n )\n .with_entities(NewVariant)\n .order_by(NewVariant.chrom.asc(), NewVariant.pos.asc())\n .limit(limit)\n .all()\n )\n elif ENSEMBL_GENE_REGEX.match(entity):\n # search for HGVS on the variants for the given gene id\n ensembl_gene_id_without_version = remove_version_from_id(entity)\n variants = (\n db_session.query(NewVariant, TranscriptConsequence)\n .filter(\n and_(\n NewVariant.chrom == TranscriptConsequence.chrom,\n NewVariant.pos == TranscriptConsequence.pos,\n NewVariant.ref == TranscriptConsequence.ref,\n NewVariant.alt == TranscriptConsequence.alt,\n TranscriptConsequence.gene_id == ensembl_gene_id_without_version,\n TranscriptConsequence.hgvs_c.ilike(f\"%{hgvs}%\"),\n )\n )\n .with_entities(NewVariant)\n .order_by(NewVariant.chrom.asc(), NewVariant.pos.asc())\n .limit(limit)\n .all()\n )\n else:\n # search for HGVS on the variants for the given gene symbol\n variants = (\n db_session.query(NewVariant, TranscriptConsequence, NewGene)\n .filter(\n and_(\n NewVariant.chrom == TranscriptConsequence.chrom,\n NewVariant.pos == TranscriptConsequence.pos,\n NewVariant.ref == TranscriptConsequence.ref,\n NewVariant.alt == TranscriptConsequence.alt,\n TranscriptConsequence.gene_id == NewGene.ensembl_gene_id,\n NewGene.assembly == HG_ASSEMBLY,\n NewGene.hgnc_symbol == entity,\n TranscriptConsequence.hgvs_c.ilike(f\"%{hgvs}%\"),\n )\n )\n .with_entities(NewVariant)\n .order_by(NewVariant.chrom.asc(), NewVariant.pos.asc())\n .limit(limit)\n .all()\n )\n elif hgvs_type == HGVSP:\n if ENSEMBL_PROTEIN_REGEX.match(entity):\n # search for HGVS including the transcript id over all variants table\n # TODO: when we have a transcript in the variants table, improve this query to avoid whole table scan\n # NOTE: the % after transcript deals with missing transcript version, as a positive side effect this allow\n # for partial ids\n variants = (\n db_session.query(NewVariant, TranscriptConsequence)\n .filter(\n and_(\n NewVariant.chrom == TranscriptConsequence.chrom,\n NewVariant.pos == TranscriptConsequence.pos,\n NewVariant.ref == TranscriptConsequence.ref,\n NewVariant.alt == TranscriptConsequence.alt,\n TranscriptConsequence.hgvs_p.ilike(f\"%{entity}%:{hgvs}%\"),\n )\n )\n .with_entities(NewVariant)\n .order_by(NewVariant.chrom.asc(), NewVariant.pos.asc())\n .limit(limit)\n .all()\n )\n elif ENSEMBL_GENE_REGEX.match(entity):\n # search for HGVS on the variants for the given gene id\n ensembl_protein_id_without_version = remove_version_from_id(entity)\n variants = (\n db_session.query(NewVariant, TranscriptConsequence)\n .filter(\n and_(\n NewVariant.chrom == TranscriptConsequence.chrom,\n NewVariant.pos == TranscriptConsequence.pos,\n NewVariant.ref == TranscriptConsequence.ref,\n NewVariant.alt == TranscriptConsequence.alt,\n TranscriptConsequence.gene_id == ensembl_protein_id_without_version,\n TranscriptConsequence.hgvs_p.ilike(f\"%{hgvs}%\"),\n )\n )\n .with_entities(NewVariant)\n .order_by(NewVariant.chrom.asc(), NewVariant.pos.asc())\n .limit(limit)\n .all()\n )\n else:\n # search for HGVS on the variants for the given gene symbol\n variants = (\n db_session.query(NewVariant, TranscriptConsequence, NewGene)\n .filter(\n and_(\n NewVariant.chrom == TranscriptConsequence.chrom,\n NewVariant.pos == TranscriptConsequence.pos,\n NewVariant.ref == TranscriptConsequence.ref,\n NewVariant.alt == TranscriptConsequence.alt,\n TranscriptConsequence.gene_id == NewGene.ensembl_gene_id,\n NewGene.assembly == HG_ASSEMBLY,\n NewGene.hgnc_symbol == entity,\n TranscriptConsequence.hgvs_p.ilike(f\"%{hgvs}%\"),\n )\n )\n .with_entities(NewVariant)\n .order_by(NewVariant.chrom.asc(), NewVariant.pos.asc())\n .limit(limit)\n .all()\n )\n return variants\n\n\ndef remove_version_from_id(entity):\n ensembl_gene_id_without_version = re.sub(r\"\\..*\", \"\", entity)\n return ensembl_gene_id_without_version\n\n\ndef _parse_hgvs_from_query(query):\n match = HGVS_C_REGEX.match(query)\n hgvs_type, entity, hgvs = None, None, None\n if match:\n hgvs_type = HGVSC\n entity = match.group(1)\n hgvs = match.group(2)\n match = HGVS_P_REGEX.match(query)\n if match:\n hgvs_type = HGVSP\n entity = match.group(1)\n hgvs = match.group(2)\n return hgvs_type, entity, hgvs\n\n\ndef _parse_variant_from_query(query):\n \"\"\"\n Extract chromosome, position, reference and alternate from something looking like a variant\n It can extract only chromosome and position, chromosome, position and reference or chromosome, position, reference\n and alternate.\n It expects fields in the variant to be separated by -, : or >. This last one only for separation between reference\n and alternate.\n \"\"\"\n # TODO: remove the * from the accepted queries if we normalize indels to VCF-like format\n match = CHROMOSOME_POS_REF_ALT_REGEX.match(query)\n if match:\n return match.group(1), match.group(2), match.group(3), match.group(4)\n match = CHROMOSOME_POS_REF_REGEX.match(query)\n if match:\n return match.group(1), match.group(2), match.group(3), None\n match = CHROMOSOME_POS_REGEX.match(query)\n if match:\n return match.group(1), match.group(2), None, None\n return None, None, None, None\n\n\ndef _parse_genomic_region_from_query(query):\n \"\"\"\n Extract chromosome, start position and end position\n It expects fields to be separated by - or :\n \"\"\"\n match = GENOMIC_REGION_REGEX.match(query)\n chromosome = None\n start = None\n end = None\n if match:\n chromosome = match.group(1)\n start = int(match.group(2))\n end = int(match.group(3))\n return chromosome, start, end\n", "id": "719596", "language": "Python", "matching_score": 4.443031311035156, "max_stars_count": 3, "path": "views/autocomplete.py" }, { "content": "\"\"\"\nvariant view\n\"\"\"\nimport requests\nfrom flask import Response, jsonify\nfrom flask.globals import session\nfrom psycopg2 import sql\n\nfrom db.helpers import cursor2dict, query_user_config\nfrom views import HG_ASSEMBLY, MAX_PAGE_SIZE, application, phenoid_mapping, variant_file\nfrom views.auth import DEMO_USER, USER, requires_auth\nfrom views.autocomplete import CHROMOSOME_POS_REF_ALT_REGEX, ENSEMBL_GENE_REGEX, PATIENT_REGEX\nfrom views.exceptions import PhenopolisException\nfrom views.general import _get_pagination_parameters, cache_on_browser, process_for_display\nfrom views.postgres import get_db, session_scope\n\nmsg_var = \"Wrong variant id. Format must be chrom-pos-ref-alt\"\n\n# NOTE: simplified version for statistics and 'my_variants'\nsqlq_all_variants = sql.SQL(\n \"\"\"select distinct\nv.chrom as \"CHROM\", v.pos as \"POS\", v.\"ref\" as \"REF\", v.alt as \"ALT\",\nv.dbsnp, v.variant_class, v.dann, v.cadd_phred, v.revel, v.fathmm_score\n--,iv.status, iv.clinvar_id, iv.pubmed_id, iv.\"comment\", iv.dp, iv.\"fs\", iv.mq, iv.qd, iv.\"filter\"\nfrom phenopolis.individual_variant iv\njoin phenopolis.individual i on i.id = iv.individual_id\njoin public.users_individuals ui on ui.internal_id = i.phenopolis_id\njoin phenopolis.variant v on v.id = iv.variant_id\nwhere ui.\"user\" = %s\n\"\"\"\n)\n\n\[email protected](\"/<language>/variant/<variant_id>\")\[email protected](\"/variant/<variant_id>\")\n@requires_auth\n@cache_on_browser()\ndef variant(variant_id, language=\"en\") -> Response:\n\n # parse variant id\n chrom, pos, ref, alt = _parse_variant_id(variant_id)\n variant_id = f\"{chrom}-{pos}-{ref}-{alt}\"\n if chrom is None:\n response = jsonify(message=msg_var)\n response.status_code = 400\n return response\n variants = _get_variants(variant_id)\n if not variants:\n response = jsonify(message=\"Variant not found\")\n response.status_code = 404\n return response\n\n resp_variant = _config_variant(variants, language)\n return resp_variant\n\n\[email protected](\"/variant/preview/<variant_id>\")\n@requires_auth\n@cache_on_browser()\ndef variant_preview(variant_id) -> Response:\n\n # parse variant id\n chrom, pos, ref, alt = _parse_variant_id(variant_id)\n if chrom is None:\n response = jsonify(message=msg_var)\n response.status_code = 400\n return response\n\n return _get_preview(chrom, pos, ref, alt)\n\n\ndef _get_variants(target: str):\n \"\"\"Returns a list of dict variants\n Args:\n target (str):\n * variant_id (e.g '14-76156575-A-G') - and it will return the variant(s) dict for it\n '12-7241974-C-T', e.g., returns 2 dicts because of 'phenopolis.individual_variant'\n * gene_id (e.g. 'ENSG00000144285') - and it will return all variants linked to that gene\n * phenopolis_id (e.g. 'PH00008256') - and it will return all variants linked to that patient\n input 'target' must obey its respective string format.\n Returns:\n List[dict variant]: empty ([]), one or more variants depending on input target\n \"\"\"\n if CHROMOSOME_POS_REF_ALT_REGEX.match(target):\n c, p, r, a = target.split(\"-\")\n filter = sql.SQL(f\"\"\"where v.chrom = '{c}' and v.pos = {p} and v.\"ref\" = '{r}' and v.alt = '{a}'\"\"\")\n elif ENSEMBL_GENE_REGEX.match(target):\n filter = sql.SQL(f\"where vg.gene_id = '{target}'\")\n elif PATIENT_REGEX.match(target):\n filter = sql.SQL(f\"where i2.phenopolis_id = '{target}'\")\n else:\n return []\n\n sqlq_main = sql.SQL(\n \"\"\"\n select\n array_agg(distinct iv.zygosity order by iv.zygosity) as zigosity,\n array_agg(distinct concat(g.hgnc_symbol,'@',g.ensembl_gene_id)) as genes, -- to split\n v.chrom as \"CHROM\", v.pos as \"POS\", v.\"ref\" as \"REF\", v.alt as \"ALT\", v.cadd_phred, v.dann,\n v.fathmm_score, v.revel, -- new added\n -- removed: v.id\n vg.most_severe_consequence, string_agg(distinct vg.hgvs_c,',' order by vg.hgvs_c) as hgvsc,\n string_agg(distinct vg.hgvs_p,',' order by vg.hgvs_p) as hgvsp, -- via variant_gene\n iv.dp as \"DP\", iv.\"fs\" as \"FS\", iv.mq as \"MQ\", iv.\"filter\" as \"FILTER\", -- via individual_variant\n (\n select array_agg(i.phenopolis_id order by i.id)\n from phenopolis.individual i\n join phenopolis.individual_variant iv2 on iv2.individual_id = i.id and iv2.zygosity = 'HOM'\n where v.id = iv2.variant_id\n ) as \"HOM\",\n (\n select array_agg(i.phenopolis_id order by i.id)\n from phenopolis.individual i\n join phenopolis.individual_variant iv2 on iv2.individual_id = i.id and iv2.zygosity = 'HET'\n where v.id = iv2.variant_id\n ) as \"HET\",\n (\n select distinct on (ah.chrom,ah.pos,ah.\"ref\",ah.alt) ah.af from kaviar.annotation_hg19 ah\n where ah.chrom = v.chrom and ah.pos = v.pos and ah.\"ref\" = v.\"ref\" and ah.alt = v.alt\n order by ah.chrom,ah.pos,ah.\"ref\",ah.alt,ah.ac desc\n ) as af_kaviar,\n av.af as af_gnomad_genomes -- gnomad # NOTE: missing strand?\n -- deprecated: MLEAF, MLEAC\n -- need to be added (by Daniele): af_converge, af_hgvd, af_jirdc, af_krgdb, af_tommo,\n from phenopolis.variant v\n join phenopolis.individual_variant iv on iv.variant_id = v.id\n join phenopolis.individual i2 on i2.id = iv.individual_id\n left outer join phenopolis.variant_gene vg on vg.variant_id = v.id -- variant_gene not complete?\n left outer join ensembl.gene g on vg.gene_id = g.ensembl_gene_id\n and g.assembly = %(hga)s and g.chromosome ~ '^X|^Y|^[0-9]{1,2}'\n left outer join gnomad.annotation_v3 av\n on av.chrom = v.chrom and av.pos = v.pos and av.\"ref\" = v.\"ref\" and av.alt = v.alt\n --where v.chrom = '12' and v.pos = 7241974 and v.\"ref\" = 'C' and v.alt = 'T' -- 2 rows\n --where v.chrom = '7' and v.pos = 2303057 and v.\"ref\" = 'G' and v.alt = 'A' -- 1 row\n --where i2.phenopolis_id = 'PH00008256'\n --where vg.gene_id = 'ENSG00000144285'\n \"\"\"\n )\n\n sqlq_end = sql.SQL(\n \"\"\"\n group by \"CHROM\",\"POS\",\"REF\",\"ALT\",cadd_phred,dann,fathmm_score,revel,most_severe_consequence,\n \"DP\",\"FS\",\"MQ\",\"FILTER\", -- need for array_agg but disambiguates depending on individual_variant\n \"HOM\",\"HET\",af_kaviar,af_gnomad_genomes\n order by\n substring(v.chrom FROM '([0-9]+)')::int,\n v.pos, v.\"ref\", v.alt, iv.dp desc\n \"\"\"\n )\n\n sqlq = sqlq_main + filter + sqlq_end\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(sqlq, {\"hga\": HG_ASSEMBLY})\n variants = cursor2dict(cur)\n for v in variants:\n gs, gi = zip(*[x.split(\"@\") for x in sorted(v[\"genes\"])])\n v[\"gene_symbol\"] = \",\".join([x for x in gs if x])\n v[\"gene_id\"] = \",\".join([x for x in gi if x])\n if not v[\"HET\"]:\n v[\"HET\"] = []\n if not v[\"HOM\"]:\n v[\"HOM\"] = []\n v[\"HET_COUNT\"] = len(v[\"HET\"])\n v[\"HOM_COUNT\"] = len(v[\"HOM\"])\n v[\"AC\"] = v[\"HET_COUNT\"] + 2 * v[\"HOM_COUNT\"]\n v[\"AN\"] = (v[\"HET_COUNT\"] + v[\"HOM_COUNT\"]) * 2\n v[\"AF\"] = v[\"AC\"] / v[\"AN\"]\n v[\"af_hgvd\"] = \"TBA\" # to be added\n v[\"af_converge\"] = \"TBA\" # to be added\n v[\"af_jirdc\"] = \"TBA\" # to be added\n v[\"af_krgdb\"] = \"TBA\" # to be added\n v[\"af_tommo\"] = \"TBA\" # to be added\n # -------\n v[\"ID\"] = \"TBR\" # to be removed\n v[\"MLEAC\"] = \"TBR\" # to be removed\n v[\"MLEAF\"] = \"TBR\" # to be removed\n for x, y in v.items():\n if y is None:\n v[x] = \"NA\"\n\n return variants\n\n\ndef _config_variant(variants, language):\n with session_scope() as db_session:\n # get the genotype information for this variant from the VCF file\n if session[USER] == DEMO_USER:\n genotypes = []\n else:\n genotypes = _get_genotypes(variants[0][\"CHROM\"], variants[0][\"POS\"])\n application.logger.debug(f\"genotypes: {len(genotypes)} {genotypes[:1]}...\")\n process_for_display(db_session, variants)\n config = query_user_config(db_session=db_session, language=language, entity=\"variant\")\n config[0][\"metadata\"][\"data\"] = variants\n config[0][\"individuals\"][\"data\"] = variants\n config[0][\"frequency\"][\"data\"] = variants\n config[0][\"consequence\"][\"data\"] = variants\n config[0][\"genotypes\"][\"data\"] = genotypes\n return jsonify(config)\n\n\ndef _get_preview(chrom, pos, ref, alt):\n # queries for Clinvar clinical significance\n clinical_significance = _fetch_clinvar_clinical_significance(chrom, pos, ref, alt)\n preview = {\"Clinvar\": clinical_significance}\n # TODO: add more things here eg: GnomAD frequency, SO effect\n return jsonify(preview)\n\n\ndef _get_genotypes(chrom, pos):\n genotypes = []\n # reads the variant data from the VCF file (either local or on S3)\n try:\n v = next(variant_file(f\"{chrom}:{pos}-{pos}\"))\n lookup = {s: i for i, s in enumerate(variant_file.samples)}\n gts = [tuple(item if item >= 0 else None for item in alist[:2]) for alist in v.genotypes]\n rds = [x.item() if x >= 0 else None for x in v.gt_ref_depths]\n ads = [x.item() if x >= 0 else None for x in v.gt_alt_depths]\n dps = [x.item() if x >= 0 else None for x in v.gt_depths]\n genotypes = [\n {\n # NOTE: samples didn't use to care about which samples were authorized to view, now variants\n # belonging to non authorized are shown but the sample id is not\n \"sample\": [{\"display\": phenoid_mapping.get(s)}],\n \"GT\": gts[lookup[s]][:2],\n \"AD\": (rds[lookup[s]], ads[lookup[s]]),\n \"DP\": dps[lookup[s]],\n }\n for s in variant_file.samples\n if phenoid_mapping.get(s) is not None\n ]\n except StopIteration:\n application.logger.debug(f\"{_get_genotypes.__name__} for variant {chrom}:{pos} not FOUND\")\n except Exception as e:\n application.logger.error(f\"{_get_genotypes.__name__} FAILED: {e}\")\n return genotypes\n\n\ndef _fetch_clinvar_clinical_significance(chrom, pos, ref, alt):\n # TODO: replace this by a query to our database once we have this dataset loaded\n clinical_significance = None\n url = \"https://myvariant.info/v1/variant/chr%s:g.%d%s>%s?fields=clinvar.rcv.clinical_significance&dotfield=true\" % (\n chrom,\n pos,\n ref,\n alt,\n )\n x = requests.get(url).json()\n if x:\n clinical_significance = str(x.get(\"clinvar.rcv.clinical_significance\", \"\"))\n return clinical_significance\n\n\ndef _parse_variant_id(variant_id):\n chrom, pos, ref, alt = None, None, None, None\n match = CHROMOSOME_POS_REF_ALT_REGEX.match(variant_id)\n if match:\n chrom = match.group(1)\n pos = int(match.group(2))\n ref = match.group(3)\n alt = match.group(4)\n return chrom, pos, ref, alt\n\n\[email protected](\"/my_variants\")\n@requires_auth\ndef get_all_variants():\n with session_scope() as db_session:\n try:\n limit, offset = _get_pagination_parameters()\n if limit > MAX_PAGE_SIZE:\n return (\n jsonify(message=f\"The maximum page size for variants is {MAX_PAGE_SIZE}\"),\n 400,\n )\n sqlq = sqlq_all_variants + sql.SQL(f\"limit {limit} offset {offset}\")\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(sqlq, [session[USER]])\n variants = cursor2dict(cur)\n process_for_display(db_session, variants)\n except PhenopolisException as e:\n return jsonify(success=False, message=str(e)), e.http_status\n return jsonify(variants), 200\n", "id": "9029198", "language": "Python", "matching_score": 6.491871356964111, "max_stars_count": 3, "path": "views/variant.py" }, { "content": "\"\"\"\nGene view\n\"\"\"\nfrom views.exceptions import PhenopolisException\nfrom views.variant import _get_variants\nfrom psycopg2 import sql\nfrom db.helpers import query_user_config, cursor2dict\nfrom sqlalchemy.orm import Session\nfrom flask import jsonify, session\nfrom views import HG_ASSEMBLY, MAX_PAGE_SIZE, application\nfrom views.auth import USER, requires_auth, is_demo_user\nfrom views.postgres import get_db, session_scope\nfrom views.general import _get_pagination_parameters, process_for_display, cache_on_browser\n\n# NOTE: using tables: ensembl.gene, ensembl.gene_synonym, ensembl.transcript, ensembl.transcript_uniprot\nsqlq_main = sql.SQL(\n \"\"\"select distinct\n(\n select array_agg(distinct tu.uniprotswissprot order by tu.uniprotswissprot)\n from ensembl.transcript_uniprot tu\n join ensembl.transcript t on tu.transcript = t.identifier\n where t.ensembl_gene_id = g.ensembl_gene_id\n) AS uniprot,\n(\n select array_agg(distinct concat(t.ensembl_transcript_id,'@',t.ensembl_peptide_id,'@',t.canonical))\n from ensembl.transcript t where t.ensembl_gene_id = g.ensembl_gene_id\n and t.assembly = g.assembly\n) AS transcripts,\n(\n select array_agg(distinct gs.external_synonym order by gs.external_synonym)\n from ensembl.gene_synonym gs\n where gs.gene = g.identifier\n) AS other_names,\ng.ensembl_gene_id as gene_id, g.\"version\", g.description as full_gene_name, g.chromosome as chrom, g.\"start\",\ng.\"end\" as \"stop\", g.strand, g.band, g.biotype, g.hgnc_id, g.hgnc_symbol as gene_symbol,\ng.percentage_gene_gc_content, g.assembly\nfrom ensembl.gene g\nleft outer join ensembl.gene_synonym gs on gs.gene = g.identifier\nwhere g.assembly = %(hga)s\nand g.chromosome ~ '^X|^Y|^[0-9]{1,2}'\n\"\"\"\n)\n\n\[email protected](\"/<language>/gene/<gene_id>\")\[email protected](\"/<language>/gene/<gene_id>/<subset>\")\[email protected](\"/gene/<gene_id>\")\[email protected](\"/gene/<gene_id>/<subset>\")\n@requires_auth\n@cache_on_browser()\ndef gene(gene_id, subset=\"all\", language=\"en\"):\n with session_scope() as db_session:\n config = query_user_config(db_session=db_session, language=language, entity=\"gene\")\n gene = _get_gene(db_session, gene_id)\n if not gene:\n response = jsonify(message=\"Gene not found\")\n response.status_code = 404\n return response\n gene[0][\"gene_name\"] = gene[0][\"gene_symbol\"]\n config[0][\"metadata\"][\"data\"] = gene\n chrom = config[0][\"metadata\"][\"data\"][0][\"chrom\"]\n start = config[0][\"metadata\"][\"data\"][0][\"start\"]\n stop = config[0][\"metadata\"][\"data\"][0][\"stop\"]\n gene_id = config[0][\"metadata\"][\"data\"][0][\"gene_id\"]\n gene_name = config[0][\"metadata\"][\"data\"][0][\"gene_symbol\"]\n for d in config[0][\"metadata\"][\"data\"]:\n ets, eps, cf = [], [], []\n if d[\"transcripts\"]:\n ets, eps, cf = zip(*[x.split(\"@\") for x in sorted(d[\"transcripts\"])])\n d[\"transcript_ids\"] = \",\".join(ets)\n d[\"peptide_id\"] = \",\".join(eps)\n d[\"canonical_transcript\"] = \"\"\n d[\"canonical_peptide\"] = \"\"\n if \"t\" in cf:\n idx = cf.index(\"t\")\n d[\"canonical_transcript\"] = ets[idx]\n d[\"canonical_peptide\"] = eps[idx]\n d[\"external_services\"] = [\n {\"display\": \"GnomAD Browser\", \"href\": f\"http://gnomad.broadinstitute.org/gene/{gene_id}\"},\n {\"display\": \"GeneCards\", \"href\": f\"http://www.genecards.org/cgi-bin/carddisp.pl?gene={gene_name}\"},\n ]\n d[\"genome_browser\"] = [\n {\n \"display\": \"Ensembl Browser\",\n \"href\": f\"http://{HG_ASSEMBLY.lower()}.ensembl.org/Homo_sapiens/Gene/Summary?g={gene_id}\",\n },\n {\n \"display\": \"UCSC Browser\",\n \"href\": f\"http://genome.ucsc.edu/cgi-bin/hgTracks?db=hg19&position=chr{chrom}:{start}-{stop}\",\n },\n ]\n d[\"other\"] = [\n {\"display\": \"Wikipedia\", \"href\": f\"http://en.wikipedia.org/{gene_name}\"},\n {\"display\": \"Pubmed Search\", \"href\": f\"http://www.ncbi.nlm.nih.gov/pubmed?term={gene_name}\"},\n {\"display\": \"Wikigenes\", \"href\": f\"http://www.wikigenes.org/?search={gene_name}\"},\n {\"display\": \"GTEx (expression)\", \"href\": f\"http://www.gtexportal.org/home/gene/{gene_name}\"},\n ]\n # d[\"related_hpo\"] = [\n # {\n # \"display\": c.execute(\n # \"select hpo_name from hpo where hpo_id='%s' limit 1\" % hpo_id\n # ).fetchone()[0],\n # \"end_href\": hpo_id,\n # }\n # for hpo_id, in c.execute(\n # \"select hpo_id from gene_hpo where gene_symbol='%s'\" % gene_name\n # ).fetchall()\n # ]\n d[\"related_hpo\"] = []\n config[0][\"variants\"][\"data\"] = _get_variants(gene_id)\n config[0][\"metadata\"][\"data\"][0][\"number_of_variants\"] = len(config[0][\"variants\"][\"data\"])\n cadd_gt_20 = 0\n for v in config[0][\"variants\"][\"data\"]:\n if v[\"cadd_phred\"] and v[\"cadd_phred\"] != \"NA\" and float(v[\"cadd_phred\"]) >= 20:\n cadd_gt_20 += 1\n config[0][\"preview\"] = [\n [\"Number of variants\", config[0][\"metadata\"][\"data\"][0][\"number_of_variants\"]],\n [\"CADD > 20\", cadd_gt_20],\n ]\n if subset == \"preview\":\n return jsonify([{subset: y[\"preview\"]} for y in config])\n process_for_display(db_session, config[0][\"variants\"][\"data\"])\n # print x[0]['preview']\n # print x[0]['variants']['data'][0]\n if is_demo_user() and gene_name not in [\"TTLL5\", \"DRAM2\"]:\n config[0][\"variants\"][\"data\"] = []\n if subset == \"all\":\n return jsonify(config)\n return jsonify([{subset: y[subset]} for y in config])\n\n\ndef _get_gene(db_session: Session, gene_id):\n g_id = gene_id.upper()\n sqlq_end = sql.SQL(\"and (g.ensembl_gene_id = %(g_id)s or upper(g.hgnc_symbol) = %(g_id)s)\")\n sqlq = sqlq_main + sqlq_end\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(sqlq, {\"g_id\": g_id, \"hga\": HG_ASSEMBLY})\n gene = cursor2dict(cur)\n if not gene:\n application.logger.info(\"Using gene_synonym\")\n sqlq_end = sql.SQL(\"and upper(gs.external_synonym) = %(g_id)s\")\n sqlq = sqlq_main + sqlq_end\n cur.execute(sqlq, {\"g_id\": g_id, \"hga\": HG_ASSEMBLY})\n gene = cursor2dict(cur)\n return gene\n\n\[email protected](\"/my_genes\")\n@requires_auth\ndef get_all_genes():\n with session_scope() as db_session:\n try:\n limit, offset = _get_pagination_parameters()\n if limit > MAX_PAGE_SIZE:\n return (\n jsonify(message=\"The maximum page size for genes is {}\".format(MAX_PAGE_SIZE)),\n 400,\n )\n sqlq_end = sql.SQL(\n \"\"\"\n and exists (\n select 1 from public.users_individuals ui\n join phenopolis.individual i on i.phenopolis_id = ui.internal_id\n join phenopolis.individual_gene ig on i.id = ig.individual_id and ig.gene_id = g.identifier\n where ui.\"user\" = %(user)s)\n \"\"\"\n )\n sqlq = sqlq_main + sqlq_end + sql.SQL(\"limit {} offset {}\".format(limit, offset))\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(sqlq, {\"user\": session[USER], \"hga\": HG_ASSEMBLY})\n genes = cursor2dict(cur)\n process_for_display(db_session, genes)\n except PhenopolisException as e:\n return jsonify(success=False, message=str(e)), e.http_status\n return jsonify(genes), 200\n", "id": "10236347", "language": "Python", "matching_score": 5.661782741546631, "max_stars_count": 24, "path": "views/gene.py" }, { "content": "\"\"\"\nHPO view - Human Phenotype Ontology\n\"\"\"\nfrom flask import jsonify, session\nfrom psycopg2 import sql\n\nfrom db.helpers import cursor2dict, query_user_config\nfrom views import MAX_PAGE_SIZE, application\nfrom views.auth import USER, requires_auth\nfrom views.exceptions import PhenopolisException\nfrom views.general import _get_pagination_parameters, cache_on_browser, process_for_display\nfrom views.individual import _get_authorized_individuals\nfrom views.postgres import get_db, session_scope\n\n\[email protected](\"/<language>/hpo/<hpo_id>\")\[email protected](\"/<language>/hpo/<hpo_id>/<subset>\")\[email protected](\"/hpo/<hpo_id>\")\[email protected](\"/hpo/<hpo_id>/<subset>\")\n@requires_auth\n@cache_on_browser()\ndef hpo(hpo_id=\"HP:0000001\", subset=\"all\", language=\"en\"):\n\n with session_scope() as db_session:\n config = query_user_config(db_session=db_session, language=language, entity=\"hpo\")\n field = \"hpo_id\"\n if not hpo_id.startswith(\"HP:\"):\n field = \"name\"\n sql_query = sql.SQL(\n rf\"\"\"\n select\n t.id, t.hpo_id, t.name\n from\n hpo.term t\n where\n t.id in (\n select\n regexp_split_to_table(path::text, '\\.')::int as ancestor\n from\n hpo.is_a_path tpath\n join hpo.term ht on\n tpath.term_id = ht.id\n where\n ht.{field} = %s )\n order by\n t.id\"\"\"\n )\n sqlq = sql_query\n with get_db() as conn:\n with conn.cursor() as cur:\n if subset == \"preview\":\n ni = _preview(cur, session[USER], hpo_id)\n config[0][\"preview\"] = [[\"Number of Individuals\", ni]]\n return jsonify([{subset: y[\"preview\"]} for y in config])\n\n cur.execute(sqlq, [hpo_id])\n res = cursor2dict(cur)\n application.logger.debug(res)\n data = [x for x in res if x[field] == hpo_id]\n if not data:\n response = jsonify(message=\"HPO not found\")\n response.status_code = 404\n return response\n d_hpo = data[0]\n h_id = d_hpo[\"id\"]\n hpo_id = d_hpo[\"hpo_id\"]\n hpo_name = d_hpo[\"name\"]\n parent_phenotypes = [\n {\"display\": i, \"end_href\": j} for j, i in [(h, n) for _i, h, n in [ii.values() for ii in res]]\n ]\n # query to give the ancestors for a given hpo for a given user for all patients this user has access\n sqlq = sql.SQL(\n \"\"\"\n select distinct i.id, i.external_id, i.phenopolis_id, i.sex, i.consanguinity,\n (select array_agg(distinct g.hgnc_symbol order by g.hgnc_symbol)\n from phenopolis.individual_gene ig\n join ensembl.gene g on g.identifier = ig.gene_id\n where ig.individual_id = i.id\n ) AS genes,\n (\n select array_agg(distinct concat(t.hpo_id,'@', t.\"name\"))\n from hpo.term t\n join phenopolis.individual_feature if2 on t.id = if2.feature_id\n where i.id = if2.individual_id\n and if2.\"type\" ='observed'\n ) AS simplified_observed_features_names\n from phenopolis.individual i\n join public.users_individuals ui on ui.internal_id = i.phenopolis_id\n join phenopolis.individual_feature if3 on i.id = if3.individual_id and if3.\"type\" = 'observed'\n join hpo.term t2 on t2.id = if3.feature_id\n join hpo.is_a_path p on p.term_id = t2.id\n where ui.\"user\" = %s\n and p.path ~ %s\n order by i.id\n \"\"\"\n )\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(sqlq, (session[USER], f\"*.{h_id}.*\"))\n\n individuals = cursor2dict(cur)\n\n if hpo_id != \"HP:0000001\":\n cur.execute(\"select * from phenogenon where hpo_id=%s\", [hpo_id])\n config[0][\"phenogenon_recessive\"][\"data\"] = [\n {\n \"gene_id\": [{\"display\": gene_id, \"end_href\": gene_id}],\n \"hpo_id\": hpo_id,\n \"hgf_score\": hgf,\n \"moi_score\": moi_score,\n }\n for gene_id, hpo_id, hgf, moi_score, in cur.fetchall()\n ]\n # NOTE: redundant line below commented\n # cur.execute(\"select * from phenogenon where hpo_id=%s\", [hpo_id])\n config[0][\"phenogenon_dominant\"][\"data\"] = config[0][\"phenogenon_recessive\"][\"data\"]\n # Chr,Start,End,HPO,Symbol,ENSEMBL,FisherPvalue,SKATO,variants,CompoundHetPvalue,HWEp,min_depth,nb_alleles_cases,case_maf,nb_ctrl_homs,nb_case_homs,MaxMissRate,nb_alleles_ctrls,nb_snps,nb_cases,minCadd,MeanCallRateCtrls,MeanCallRateCases,OddsRatio,MinSNPs,nb_ctrl_hets,total_maf,MaxCtrlMAF,ctrl_maf,nb_ctrls,nb_case_hets,maxExac\n cur.execute(\"select Symbol,FisherPvalue,SKATO,OddsRatio,variants from skat where HPO= %s\", [hpo_id])\n config[0][\"skat\"][\"data\"] = [\n {\n \"gene_id\": [{\"display\": gene_id, \"end_href\": gene_id}],\n \"fisher_p_value\": fisher_p_value,\n \"skato\": skato,\n \"odds_ratio\": odds_ratio,\n \"variants\": [],\n }\n for gene_id, fisher_p_value, skato, odds_ratio, _variants in cur.fetchall()[:100]\n ]\n application.logger.debug(len(individuals))\n config[0][\"preview\"] = [[\"Number of Individuals\", len(individuals)]]\n for ind in individuals[:]:\n ind[\"internal_id\"] = [{\"display\": ind[\"phenopolis_id\"]}]\n if ind[\"genes\"]:\n ind[\"genes\"] = [{\"display\": i} for i in ind[\"genes\"]]\n else:\n ind[\"genes\"] = []\n ind[\"simplified_observed_features_names\"] = [\n {\"display\": j, \"end_href\": i}\n for i, j, in [\n x.split(\"@\")\n for x in sorted(ind[\"simplified_observed_features_names\"], key=lambda x: x.split(\"@\")[1])\n ]\n ]\n config[0][\"individuals\"][\"data\"] = individuals\n config[0][\"metadata\"][\"data\"] = [\n {\"name\": hpo_name, \"id\": hpo_id, \"count\": len(individuals), \"parent_phenotypes\": parent_phenotypes}\n ]\n process_for_display(db_session, config[0][\"metadata\"][\"data\"])\n if subset == \"all\":\n return jsonify(config)\n else:\n return jsonify([{subset: y[subset]} for y in config])\n\n\ndef _preview(cur, user, hpo_id):\n q1 = sql.SQL(\n \"\"\"select * from phenopolis.individual i\n join public.users_individuals ui on ui.internal_id = i.phenopolis_id\n and ui.\"user\" = %s\n \"\"\"\n )\n q2 = sql.SQL(\n f\"\"\"where exists (\n select 1 from hpo.is_a_path p, hpo.term t, phenopolis.individual_feature if2\n where p.term_id = t.id\n and if2.feature_id = t.id and i.id = if2.individual_id and if2.\"type\" = 'observed'\n and p.path ~ (\n select ('*.' || id || '.*')::lquery\n from hpo.term t2\n where t2.hpo_id = '{hpo_id}'\n )\n )\n \"\"\"\n )\n q = q1\n if hpo_id != \"HP:0000001\":\n q = q1 + q2\n cur.execute(q, [user])\n return cur.rowcount\n\n\[email protected](\"/my_hpos\")\n@requires_auth\ndef get_all_hpos():\n with session_scope() as db_session:\n individuals = _get_authorized_individuals(db_session)\n sqlq_all_hpos = sql.SQL(\n \"\"\"\n select distinct t.hpo_id, t.\"name\" from phenopolis.individual_feature ife\n join hpo.term t on t.id = ife.feature_id\n where ife.individual_id = any(%s) and ife.type in ('observed')\n \"\"\"\n )\n try:\n limit, offset = _get_pagination_parameters()\n if limit > MAX_PAGE_SIZE:\n return (\n jsonify(message=f\"The maximum page size for variants is {MAX_PAGE_SIZE}\"),\n 400,\n )\n sqlq = sqlq_all_hpos + sql.SQL(f\"limit {limit} offset {offset}\")\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(sqlq, [[x.id for x in individuals]])\n hpos = cursor2dict(cur)\n process_for_display(db_session, hpos)\n except PhenopolisException as e:\n return jsonify(success=False, message=str(e)), e.http_status\n return jsonify(hpos), 200\n", "id": "6671846", "language": "Python", "matching_score": 6.083082675933838, "max_stars_count": 3, "path": "views/hpo.py" }, { "content": "\"\"\"\nIndividual view\n\"\"\"\nimport functools\nimport operator\nfrom collections import Counter\nfrom typing import Dict, List, Optional, Tuple, Union\n\nfrom bidict import bidict\nfrom db.helpers import cursor2dict, query_user_config\nfrom db.model import Individual, Sex, UserIndividual\nfrom flask import jsonify, request, session\nfrom psycopg2 import sql\nfrom sqlalchemy import and_, or_\nfrom sqlalchemy.orm import Session\n\nfrom views import HG_ASSEMBLY, MAX_PAGE_SIZE, application\nfrom views.auth import ADMIN_USER, DEMO_USER, USER, is_demo_user, requires_auth\nfrom views.exceptions import PhenopolisException\nfrom views.general import _get_pagination_parameters, cache_on_browser, process_for_display\nfrom views.helpers import _get_json_payload\nfrom views.postgres import get_db, session_scope\nfrom views.variant import _get_variants\n\nMAPPING_SEX_REPRESENTATIONS = bidict({\"male\": Sex.M, \"female\": Sex.F, \"unknown\": Sex.U})\n\n\[email protected](\"/individual\")\n@requires_auth\ndef get_all_individuals():\n with session_scope() as db_session:\n try:\n limit, offset = _get_pagination_parameters()\n if limit > MAX_PAGE_SIZE:\n return (\n jsonify(message=\"The maximum page size for individuals is {}\".format(MAX_PAGE_SIZE)),\n 400,\n )\n individuals = _fetch_all_individuals(db_session=db_session, offset=offset, limit=limit)\n for ind in individuals:\n a1, a2 = zip(*[x.split(\"@\") for x in sorted(ind[\"ancestor_observed_features\"])])\n o1, o2 = zip(*[x.split(\"@\") for x in sorted(ind[\"observed_features\"])])\n # NOTE: casting list in strings just for frotend, but list is better, I guess (Alan)\n ind[\"ancestor_observed_features\"] = \",\".join(a1)\n ind[\"ancestor_observed_features_names\"] = \",\".join(a2)\n ind[\"observed_features\"] = \",\".join(o1)\n ind[\"observed_features_names\"] = \",\".join(o2)\n ind[\"simplified_observed_features\"] = \"\"\n ind[\"simplified_observed_features_names\"] = \"\"\n ind[\"phenopolis_id\"] = ind[\"internal_id\"]\n if ind[\"unobserved_features\"]:\n ind[\"unobserved_features\"] = \",\".join(ind[\"unobserved_features\"])\n else:\n ind[\"unobserved_features\"] = \"\"\n if ind[\"genes\"]:\n ind[\"genes\"] = \",\".join(ind[\"genes\"])\n else:\n ind[\"genes\"] = \"\"\n\n except PhenopolisException as e:\n return jsonify(success=False, message=str(e)), e.http_status\n return jsonify(individuals), 200\n\n\[email protected](\"/<language>/individual/<phenopolis_id>\")\[email protected](\"/<language>/individual/<phenopolis_id>/<subset>\")\[email protected](\"/individual/<phenopolis_id>\")\[email protected](\"/individual/<phenopolis_id>/<subset>\")\n@requires_auth\n@cache_on_browser()\ndef get_individual_by_id(phenopolis_id, subset=\"all\", language=\"en\"):\n with session_scope() as db_session:\n config = query_user_config(db_session=db_session, language=language, entity=\"individual\")\n individual = _fetch_authorized_individual(db_session, phenopolis_id)\n # unauthorized access to individual\n if not individual:\n response = jsonify(message=\"Patient not found\")\n response.status_code = 404\n return response\n\n if subset == \"preview\":\n individual_view = _individual_preview(config, individual)\n else:\n individual_view = _individual_complete_view(db_session, config, individual, subset)\n return jsonify(individual_view)\n\n\[email protected](\"/<language>/update_patient_data/<phenopolis_id>\", methods=[\"POST\"])\[email protected](\"/update_patient_data/<phenopolis_id>\", methods=[\"POST\"])\n@requires_auth\ndef update_patient_data(phenopolis_id):\n if is_demo_user():\n return jsonify(error=\"Demo user not authorised\"), 405\n\n with session_scope() as db_session:\n individual = _fetch_authorized_individual(db_session, phenopolis_id)\n # unauthorized access to individual\n if not individual:\n response = jsonify(\n message=\"Sorry, either the patient does not exist or you are not permitted to see this patient\"\n )\n response.status_code = 404\n return response\n application.logger.debug(request.form)\n consanguinity = request.form.get(\"consanguinity_edit[]\", \"unknown\")\n gender = request.form.get(\"gender_edit[]\", \"unknown\")\n genes = request.form.getlist(\"genes[]\")\n features = request.form.getlist(\"feature[]\")\n if not len(features):\n features = [\"All\"]\n\n # TODO: simplify this gender translation\n unk_obj = MAPPING_SEX_REPRESENTATIONS.get(\"unknown\")\n gender = MAPPING_SEX_REPRESENTATIONS.get(gender, unk_obj)\n hpos = _get_hpos(features)\n _update_individual(consanguinity, gender, genes, hpos, individual)\n return jsonify({\"success\": True}), 200\n\n\[email protected](\"/individual\", methods=[\"POST\"])\n@requires_auth\ndef create_individual():\n if is_demo_user():\n return jsonify(error=\"Demo user not authorised\"), 405\n # checks individuals validity\n with session_scope() as db_session:\n try:\n dlist = _get_json_payload()\n new_individuals = []\n for d in dlist:\n genes = []\n if d.get(\"observed_features\"):\n feats = d.pop(\"observed_features\").split(\",\")\n else:\n feats = []\n if d.get(\"genes\") or d.get(\"genes\") == \"\":\n genes = d.pop(\"genes\").split(\",\")\n i = Individual(**d)\n _check_individual_valid(db_session, i)\n new_individuals.append((i, genes, feats))\n except PhenopolisException as e:\n application.logger.error(str(e))\n return jsonify(success=False, error=str(e)), e.http_status\n\n request_ok = True\n http_status = 200\n message = \"Individuals were created\"\n ids_new_individuals = []\n try:\n # generate a new unique id for the individual\n for trio in new_individuals:\n i, g, f = trio\n # insert individual\n db_session.add(i)\n # to refresh i and with new id and phenopolis_id, both lines below needed (black magic)\n db_session.query(Individual).count()\n db_session.refresh(i)\n # add entry to user_individual\n # TODO: enable access to more users than the creator\n db_session.add(UserIndividual(user=session[USER], internal_id=i.phenopolis_id))\n if session[USER] != ADMIN_USER:\n db_session.add(UserIndividual(user=ADMIN_USER, internal_id=i.phenopolis_id))\n db_session.commit()\n _insert_genes(i, g)\n _insert_feats(i, f)\n ids_new_individuals.append(i.phenopolis_id)\n except PhenopolisException as e:\n application.logger.exception(e)\n request_ok = False\n message = str(e)\n http_status = e.http_status\n return jsonify(success=request_ok, message=message, id=\",\".join(ids_new_individuals)), http_status\n\n\ndef _insert_genes(individual, genes):\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(\n \"\"\"\n insert into phenopolis.individual_gene (individual_id, gene_id) select %(id)s as individual_id,\n identifier from ensembl.gene where hgnc_symbol = any(%(genes)s::text[]) and assembly = %(hga)s;\n \"\"\",\n {\"id\": individual.id, \"genes\": genes, \"hga\": HG_ASSEMBLY},\n )\n\n\ndef _insert_feats(individual, hpo_ids):\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(\n \"\"\"\n insert into phenopolis.individual_feature (individual_id, feature_id, type)\n select %(id)s as individual_id, t.id as feature_id, unnest('{observed,simplified}'::text[]) as type\n from hpo.term t where t.hpo_id = any(%(hpo_ids)s::text[]) order by type, feature_id;\n \"\"\",\n {\"id\": individual.id, \"hpo_ids\": hpo_ids},\n )\n\n\[email protected](\"/individual/<phenopolis_id>\", methods=[\"DELETE\"])\n@requires_auth\ndef delete_individual(phenopolis_id):\n with session_scope() as db_session:\n individual = _fetch_authorized_individual(db_session, phenopolis_id)\n request_ok = True\n http_status = 200\n message = f\"Patient {phenopolis_id} has been deleted.\"\n if individual:\n try:\n user_individuals = (\n db_session.query(UserIndividual).filter(UserIndividual.internal_id == phenopolis_id).all()\n )\n for ui in user_individuals:\n db_session.delete(ui)\n db_session.delete(individual)\n except Exception as e:\n application.logger.exception(e)\n request_ok = False\n message = str(e)\n http_status = e.http_status\n else:\n request_ok = False\n message = f\"Patient {phenopolis_id} does not exist.\"\n http_status = 404\n return jsonify(success=request_ok, message=message), http_status\n\n\ndef _check_individual_valid(db_session: Session, new_individual: Individual):\n if not new_individual.as_dict() or not new_individual.sex:\n raise PhenopolisException(\"Null individual\", 400)\n\n exist_internal_id = (\n db_session.query(Individual.phenopolis_id)\n .filter(\n or_(\n Individual.phenopolis_id == new_individual.phenopolis_id,\n Individual.external_id == new_individual.external_id,\n )\n )\n .all()\n )\n\n if len(exist_internal_id) > 0:\n raise PhenopolisException(\"Individual already exists.\", 400)\n # TODO: add more validations here\n\n\ndef _individual_complete_view(db_session: Session, config, individual: Individual, subset):\n variants = _get_variants(individual.phenopolis_id)\n hom_vars = [x for x in variants if \"HOM\" in x[\"zigosity\"]]\n het_vars = [x for x in variants if \"HET\" in x[\"zigosity\"]]\n # hom variants\n config[0][\"rare_homs\"][\"data\"] = hom_vars\n # rare variants\n config[0][\"rare_variants\"][\"data\"] = het_vars\n # rare_comp_hets\n genes: List[str] = functools.reduce(operator.iconcat, [v[\"gene_symbol\"].split(\",\") for v in het_vars], [])\n gene_counter = Counter(genes)\n genes = [x for x, y in gene_counter.items() if y > 1 and x]\n rare_comp_hets_variants = []\n for v in het_vars:\n if v[\"gene_symbol\"]:\n for g in v[\"gene_symbol\"].split(\",\"):\n if g in genes:\n rare_comp_hets_variants.append(v)\n config[0][\"rare_comp_hets\"][\"data\"] = rare_comp_hets_variants\n\n if not config[0][\"metadata\"][\"data\"]:\n config[0][\"metadata\"][\"data\"] = [dict()]\n config = _map_individual2output(config, individual)\n process_for_display(db_session, config[0][\"rare_homs\"][\"data\"])\n process_for_display(db_session, config[0][\"rare_variants\"][\"data\"])\n if subset == \"all\":\n return config\n else:\n return [{subset: y[subset]} for y in config]\n\n\ndef _individual_preview(config, individual: Individual):\n sql_zig = \"\"\"select iv.zygosity,count(*) from phenopolis.variant v\n join phenopolis.individual_variant iv on iv.variant_id = v.id\n where iv.individual_id = %s\n group by iv.zygosity\"\"\"\n sql_comp = \"\"\"select sum(c)::int as total from(\n select count(v.*) as c\n from phenopolis.variant_gene vg\n join phenopolis.individual_variant iv on iv.variant_id = vg.variant_id\n join phenopolis.variant v on v.id = iv.variant_id\n where iv.individual_id = %s and iv.zygosity = 'HET'\n group by vg.gene_id having count(v.*) > 1\n ) as com\"\"\"\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(sql_zig, [individual.id])\n hh = dict(cur.fetchall())\n cur.execute(sql_comp, [individual.id])\n hc = cur.fetchone()[0] or 0\n\n hom_count = hh.get(\"HOM\", 0)\n het_count = hh.get(\"HET\", 0)\n comp_het_count = hc\n external_id = individual.external_id\n if session[USER] == DEMO_USER:\n external_id = \"_demo_\"\n # TODO: make a dict of this and not a list of lists\n config[0][\"preview\"] = [\n [\"External_id\", external_id],\n [\"Sex\", individual.sex.name],\n [\"Genes\", [g[0] for g in _get_genes_for_individual(individual)]],\n [\"Features\", [f[1] for f in _get_feature_for_individual(individual)]],\n [\"Number of hom variants\", hom_count],\n [\"Number of compound hets\", comp_het_count],\n [\"Number of het variants\", het_count],\n ]\n return config\n\n\ndef _map_individual2output(config, individual: Individual):\n config[0][\"metadata\"][\"data\"][0].update(individual.as_dict())\n if session[USER] == DEMO_USER:\n config[0][\"metadata\"][\"data\"][0][\"external_id\"] = \"_demo_\"\n config[0][\"metadata\"][\"data\"][0][\"internal_id\"] = [{\"display\": individual.phenopolis_id}]\n config[0][\"metadata\"][\"data\"][0][\"simplified_observed_features\"] = [\n {\"display\": x[1], \"end_href\": x[0]} for x in _get_feature_for_individual(individual)\n ]\n genes = _get_genes_for_individual(individual)\n config[0][\"metadata\"][\"data\"][0][\"genes\"] = [{\"display\": i[0]} for i in genes]\n return config\n\n\ndef _get_feature_for_individual(\n individual: Union[Individual, dict], atype: str = \"simplified\"\n) -> List[Tuple[str, str]]:\n \"\"\"\n returns observed_features for a given individual\n options are: simplified (default), observed, unobserved\n e.g. [('HP:0000007', 'Autosomal recessive inheritance'), ('HP:0000505', 'Visual impairment')]\n \"\"\"\n if isinstance(individual, Individual):\n ind_id = individual.id\n elif isinstance(individual, dict):\n ind_id = individual.get(\"id\")\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(\n \"\"\"\n select distinct t.hpo_id, t.\"name\" from phenopolis.individual i\n join phenopolis.individual_feature if2 on (i.id = if2.individual_id)\n join hpo.term t on (t.id = if2.feature_id) and if2.\"type\" = %s\n and i.id = %s\"\"\",\n (atype, ind_id),\n )\n res = cur.fetchall()\n return res\n\n\ndef _get_genes_for_individual(individual: Union[Individual, dict]) -> List[Tuple[str]]:\n \"\"\"returns e.g. [('TTLL5',)]\"\"\"\n if isinstance(individual, Individual):\n ind_id = individual.id\n elif isinstance(individual, dict):\n ind_id = individual.get(\"id\")\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(\n \"\"\"\n select distinct g.hgnc_symbol from phenopolis.individual i\n join phenopolis.individual_gene ig on i.id = ig.individual_id\n join ensembl.gene g on g.identifier = ig.gene_id\n and i.id = %s\"\"\",\n [ind_id],\n )\n genes = cur.fetchall()\n return genes\n\n\ndef _fetch_all_individuals(db_session: Session, offset, limit) -> List[Dict]:\n \"\"\"\n For admin user it returns all individuals and all users having access to them.\n But for others than admin it returns only individuals which this user has access,\n other users having access are not returned\n \"\"\"\n query = _query_all_individuals() + sql.SQL(\"limit {} offset {}\".format(limit, offset))\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(query, [session[USER]])\n individuals = sorted(cursor2dict(cur), key=lambda i: i[\"id\"])\n if session[USER] != ADMIN_USER:\n for dd in individuals:\n dd[\"users\"] = [session[USER]]\n return individuals\n\n\ndef _count_all_individuals() -> int:\n \"\"\"\n For admin users it counts all individuals and all users having access to them.\n But for other than admin it counts only individuals which this user has access, other users having access are\n not counted\n \"\"\"\n query = _query_all_individuals()\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(query, [session[USER]])\n return cur.rowcount\n\n\ndef _count_all_individuals_by_sex(sex: Sex) -> int:\n \"\"\"\n For admin users it counts all individuals and all users having access to them.\n But for other than admin it counts only individuals which this user has access, other users having access are\n not counted\n \"\"\"\n query = _query_all_individuals(sex)\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(query, (session[USER], sex.name))\n return cur.rowcount\n\n\ndef _query_all_individuals(additional_filter: Optional[Sex] = None) -> sql.SQL:\n\n # e.g. additional_filter = 'Sex'\n q1 = sql.SQL(\n \"\"\"where exists (\n select 1 from public.users_individuals ui\n where ui.internal_id = i.phenopolis_id\n and ui.\"user\" = %s)\"\"\"\n )\n\n conds = [q1]\n if additional_filter is not None:\n conds.append(sql.SQL(\"i.sex = %s\"))\n query = sql.SQL(\n r\"\"\"\n select i.id, i.external_id, i.phenopolis_id as internal_id, i.sex, i.consanguinity,\n (\n select array_agg(ui.\"user\")\n from public.users_individuals ui\n where ui.internal_id = i.phenopolis_id\n ) AS users,\n (\n select array_agg(g.hgnc_symbol)\n from phenopolis.individual_gene ig\n join ensembl.gene g on g.identifier = ig.gene_id\n where ig.individual_id = i.id\n ) AS genes,\n (\n select array_agg(concat(t.hpo_id,'@', t.\"name\"))\n from hpo.term t\n join phenopolis.individual_feature if2 on t.id = if2.feature_id\n where i.id = if2.individual_id\n and if2.\"type\" = 'observed'\n ) as observed_features,\n (\n select array_agg(t.hpo_id)\n from hpo.term t\n join phenopolis.individual_feature if2 on t.id = if2.feature_id\n where i.id = if2.individual_id\n and if2.\"type\" = 'unobserved'\n ) as unobserved_features,\n (\n select array_agg(concat(t.hpo_id,'@', t.\"name\"))\n from hpo.term t where t.id in (\n select (regexp_split_to_table(p.\"path\"::text, '\\.'))::int as ancestor\n from phenopolis.individual_feature if2\n join hpo.is_a_path p on if2.feature_id = p.term_id\n where i.id = if2.individual_id\n and if2.\"type\" = 'observed'\n )\n ) as ancestor_observed_features\n from phenopolis.individual i\n {filter}\n \"\"\"\n ).format(filter=sql.SQL(\" and \").join(conds))\n\n return query\n\n\ndef _fetch_authorized_individual(db_session: Session, phenopolis_id) -> Individual:\n return (\n db_session.query(Individual)\n .join(UserIndividual, UserIndividual.internal_id == Individual.phenopolis_id)\n .filter(UserIndividual.user == session[USER])\n .filter(Individual.phenopolis_id == phenopolis_id)\n .first()\n )\n\n\ndef _update_individual(consanguinity, gender: Sex, genes, hpos: List[tuple], individual: Individual):\n \"\"\"\n Updates tables:\n phenopolis.individual: col(gender)\n phenopolis.individual_feature: col(feature_id) # hpo_ids\n phenopolis.individual_gene: col(gene_id) # maps hgnc_symbol -> gene.identifier\n given hgnc_symbol MUST exactly match hgnc_symbols in ensembl.gene table otherwise returns []\n \"\"\"\n individual.sex = gender\n individual.consanguinity = consanguinity\n hpo_ids = [h[0] for h in hpos]\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(\n \"\"\"\n delete from phenopolis.individual_feature where individual_id = %(id)s\n and \"type\" = any('{observed,simplified}');\n insert into phenopolis.individual_feature (individual_id, feature_id, type) select %(id)s as individual_id,\n unnest(%(hpo_ids)s::int[]) as feature_id, 'observed' as type;\n insert into phenopolis.individual_feature (individual_id, feature_id, type) select %(id)s as individual_id,\n unnest(%(hpo_ids)s::int[]) as feature_id, 'simplified' as type;\n delete from phenopolis.individual_gene where individual_id = %(id)s;\n insert into phenopolis.individual_gene (individual_id, gene_id) select %(id)s as individual_id,\n identifier from ensembl.gene where hgnc_symbol = any(%(genes)s::text[]) and assembly = %(hga)s;\n \"\"\",\n {\"id\": individual.id, \"hpo_ids\": hpo_ids, \"genes\": genes, \"hga\": HG_ASSEMBLY},\n )\n\n\ndef _get_hpos(features: List[str]):\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(\"select * from hpo.term t where t.name = any(%s);\", [features])\n res = cur.fetchall()\n return res\n\n\ndef _get_authorized_individuals(db_session: Session) -> List[Individual]:\n user_id = session[USER]\n query = db_session.query(Individual, UserIndividual)\n if user_id != ADMIN_USER:\n query = query.filter(\n and_(Individual.phenopolis_id == UserIndividual.internal_id, UserIndividual.user == user_id)\n )\n return query.with_entities(Individual).all()\n", "id": "11927982", "language": "Python", "matching_score": 5.153641700744629, "max_stars_count": 24, "path": "views/individual.py" }, { "content": "\"\"\"\nStatistics view\n\"\"\"\nfrom typing import List\n\nfrom flask import jsonify, session\n\nfrom db.model import Individual, Sex\nfrom views import HG_ASSEMBLY, VERSION, application\nfrom views.auth import USER, requires_auth\nfrom views.individual import _count_all_individuals, _count_all_individuals_by_sex, _get_authorized_individuals\nfrom views.postgres import get_db, session_scope\nfrom views.variant import sqlq_all_variants\n\nCOMMON_VARIANTS_THRESHOLD = 0.05\nRARE_VARIANTS_THRESHOLD = 0.01\n\n\[email protected](\"/statistics\")\n@requires_auth\ndef phenopolis_statistics():\n with session_scope() as db_session:\n\n # counts individuals\n total_patients = _count_all_individuals()\n male_patients = _count_all_individuals_by_sex(Sex.M)\n female_patients = _count_all_individuals_by_sex(Sex.F)\n unknown_patients = _count_all_individuals_by_sex(Sex.U)\n\n # counts variants\n total_variants = count_variants()\n\n # counts HPOs\n individuals = _get_authorized_individuals(db_session)\n count_observed_features, count_unobserved_features = count_hpos(individuals)\n\n # counts genes\n total_genes = count_genes(individuals)\n\n return jsonify(\n exomes=total_patients,\n males=male_patients,\n females=female_patients,\n unknowns=unknown_patients,\n total_variants=total_variants,\n observed_features=count_observed_features,\n unobserved_features=count_unobserved_features,\n total_genes=total_genes,\n version_number=VERSION,\n )\n\n\ndef count_hpos(individuals: List[Individual]):\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(\n \"\"\"select ife.type, count(distinct ife.feature_id) from phenopolis.individual_feature ife\n where ife.individual_id = any(%s) and ife.type in ('observed','unobserved') group by ife.type\"\"\",\n [[x.id for x in individuals]],\n )\n res = dict(cur.fetchall())\n return res.get(\"observed\", 0), res.get(\"unobserved\", 0)\n\n\ndef count_genes(individuals: List[Individual]):\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(\n \"\"\"\n select distinct gene_id from phenopolis.individual_gene ig\n join ensembl.gene g on g.identifier = ig.gene_id\n where g.assembly = %s and g.chromosome ~ '^X|^Y|^[0-9]{1,2}'\n and ig.individual_id = any(%s)\n \"\"\",\n [HG_ASSEMBLY, [x.id for x in individuals]],\n )\n return cur.rowcount\n\n\ndef count_variants():\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute(sqlq_all_variants, [session[USER]])\n return cur.rowcount\n", "id": "3169014", "language": "Python", "matching_score": 2.2492759227752686, "max_stars_count": 3, "path": "views/statistics.py" }, { "content": "\"\"\"\nTest DB access\nFull and Demo DB slightly differ for the entries tested bellow\nUsing some reliable common ground\nDemo DB need to be updated?\n\"\"\"\n\nfrom db.model import NewGene\nfrom views import HG_ASSEMBLY\nfrom views.postgres import close_db, postgres_cursor, session_scope\n\n\ndef test_db_sql_query_old_schema(_demo):\n \"\"\"res -> tuple\"\"\"\n cursor = postgres_cursor()\n cursor.execute(\n \"\"\"select gene_id, gene_name, gene_name_upper, full_gene_name, other_names from genes g\n where gene_id = 'ENSG00000156171'\"\"\"\n )\n res = cursor.fetchone()\n assert \"DNA-damage regulated autophagy modulator 2\" in res\n\n\ndef test_db_sql_query(_demo):\n \"\"\"res -> tuple\"\"\"\n cursor = postgres_cursor()\n cursor.execute(\n \"select * from ensembl.gene where ensembl_gene_id = 'ENSG00000156171' and assembly = %s\", [HG_ASSEMBLY]\n )\n res = cursor.fetchone()\n assert \"DNA-damage regulated autophagy modulator 2\" in res\n\n\ndef test_sqlalchemy_query(_demo):\n \"\"\"res -> db.NewGene\"\"\"\n with session_scope() as db_session:\n res = db_session.query(NewGene).filter(NewGene.ensembl_gene_id == \"ENSG00000119685\").first()\n assert res.hgnc_symbol == \"TTLL5\"\n\n\n# Never used so far\ndef test_close_db(_demo):\n close_db()\n", "id": "3567691", "language": "Python", "matching_score": 1.1644517183303833, "max_stars_count": 3, "path": "tests/test_db.py" }, { "content": "\nimport MySQLdb\n\n# Open database connection\n#mysql -u anonymous -h ensembldb.ensembl.org\ndb = MySQLdb.connect(\"ensembldb.ensembl.org\",\"anonymous\",\"\",\"homo_sapiens_core_75_37\" )\n\n# prepare a cursor object using cursor() method\ncursor = db.cursor()\n\n# execute SQL query using execute() method.\ncursor.execute(\"SELECT VERSION()\")\n# Fetch a single row using fetchone() method.\ndata = cursor.fetchone()\nprint \"Database version : %s \" % data\n\ncursor.execute( \"\"\" SELECT transcript.stable_id, xref.display_label FROM transcript, object_xref, xref,external_db WHERE transcript.transcript_id = object_xref.ensembl_id AND object_xref.ensembl_object_type = 'Transcript' AND object_xref.xref_id = xref.xref_id AND xref.external_db_id = external_db.external_db_id AND external_db.db_name = 'RefSeq_mRNA'; \"\"\" )\ncursor.fetchone()\n\n# disconnect from server\ndb.close()\n\n\n", "id": "4372973", "language": "Python", "matching_score": 1.054522156715393, "max_stars_count": 24, "path": "rest/ensembl_transcript_table.py" }, { "content": "\nimport json\nimport urllib \nimport pymongo\nfrom itertools import chain\n\nconn=pymongo.MongoClient(host='localhost',port=27017)\ndb=conn['hpo']\n\nfor r in db.tiger_eye_genes.find():\n refseq=r['RefSeq']\n url = 'http://biodbnet-abcc.ncifcrf.gov/webServices/rest.php/biodbnetRestApi.json?method=db2db&format=row&input=refseqmrnaaccession&inputValues={}&outputs=ensembltranscriptid,ensemblgeneid&,ensemblgeneidtaxonId=9606'.format( refseq )\n u = urllib.urlopen(url)\n s=json.loads(u.read())\n d=dict()\n d['ensembl_transcript_id'] = list(chain(*[x['Ensembl Transcript ID'].split('//') for x in s]))\n d['ensembl_gene_id'] = list(chain(*[x['Ensembl Gene ID'].split('//') for x in s]))\n print(refseq,d)\n print(db.tiger_eye_genes.update({'RefSeq':refseq},{'$set':d},w=0))\n\n", "id": "10670734", "language": "Python", "matching_score": 1.0683773756027222, "max_stars_count": 24, "path": "rest/db2db.py" }, { "content": "import sys\nfrom views import *\nfrom config import config\n\n# Load default config and override config from an environment variable\nif config.LOCAL:\n app.config.from_pyfile('../local.cfg')\nelse:\n app.config.from_pyfile('../phenopolis.cfg')\n\n\nif __name__ == \"__main__\":\n # use ssl\n # add some common url. Would be good if can generate the url in real time\n home = ''\n #from OpenSSL import SSL\n # altnerative\n #context = SSL.Context(SSL.SSLv23_METHOD)\n #context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\n #context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\n # this is now handled by Apache\n #app.run(host='0.0.0.0',port=8000,threaded=True,debug=True)\n app.run(host='0.0.0.0',port=8000,threaded=True)\n # threaded\n #app.run(threaded=True)\n #app.run(host='127.0.0.1',port=8000, debug = True, ssl_context=context)\n #app.run(host='0.0.0.0', port=8000, ssl_context=context)\n #app.run(host='0.0.0.0', port=8000, debug=True, ssl_context='adhoc')\n #app.run(host='0.0.0.0', port=8000, debug=True)\n #app.run(host='127.0.0.1', port=8000, debug=True)\n #toolbar=DebugToolbarExtension(app)\n #runner = Runner(app) # adds Flask command line options for setting host, port, etc.\n #runner.run()\n\n\n\n\n\n", "id": "6418529", "language": "Python", "matching_score": 0.5777967572212219, "max_stars_count": 24, "path": "runserver.py" }, { "content": "# @PydevCodeAnalysisIgnore\n# pylint: disable=undefined-variable\n'''Jupyter server config'''\nc.NotebookApp.open_browser = False\nc.NotebookApp.ip = '0.0.0.0' # '*'\nc.NotebookApp.port = 8192 # If you change the port here, make sure you update it in the jupyter_installer.sh file as well\nc.NotebookApp.password = u'<PASSWORD>'\nc.Authenticator.admin_users = {'jupyter'}\nc.LocalAuthenticator.create_system_users = True\n", "id": "7464964", "language": "Python", "matching_score": 0.4460774064064026, "max_stars_count": 0, "path": "src/jupyter_notebook_config.py" }, { "content": "import os\n\nimport pytest\nfrom dotenv import load_dotenv\n\nfrom views import APP_ENV, VERSION, application\nfrom views.auth import ADMIN_USER, DEMO_USER, USER\n\nNONDEMO_USER = \"nondemo\"\nload_dotenv(dotenv_path=\"./private.env\")\n\n\ndef pytest_report_header(config):\n return f\">>>\\tVersion: {VERSION}\\n\\tAPP_ENV: {APP_ENV}\\n\\tVCF_FILE: {os.getenv('VCF_FILE')}\"\n\n\[email protected]\ndef _admin():\n with application.test_request_context(path=\"/login\", method=\"POST\", data={\"user\": \"Admin\", \"password\": \"<PASSWORD>\"}):\n yield\n\n\[email protected](scope=\"module\")\ndef _demo():\n with application.test_request_context(path=\"/login\", method=\"POST\", data={\"user\": \"demo\", \"password\": \"<PASSWORD>\"}):\n yield\n\n\[email protected]\ndef _admin_client():\n with application.test_client() as client:\n with client.session_transaction() as session:\n session[USER] = ADMIN_USER\n yield client\n\n\[email protected]\ndef _demo_client():\n with application.test_client() as client:\n with client.session_transaction() as session:\n session[USER] = DEMO_USER\n yield client\n\n\[email protected]\ndef _nondemo_client():\n with application.test_client() as client:\n with client.session_transaction() as session:\n session[USER] = NONDEMO_USER\n yield client\n\n\[email protected]\ndef _not_logged_in_client():\n with application.test_client() as client:\n yield client\n", "id": "5428646", "language": "Python", "matching_score": 1.887643575668335, "max_stars_count": 3, "path": "tests/conftest.py" }, { "content": "\"\"\"\nAuthentication modules\n\"\"\"\n\nfrom functools import wraps\nfrom flask import session, request, jsonify\nfrom passlib.handlers.argon2 import argon2\nfrom sqlalchemy import and_\n\nfrom db.model import User\nfrom views import application\nfrom views.postgres import session_scope\n\nADMIN_USER = \"Admin\"\nDEMO_USER = \"demo\"\nPASSWORD = \"password\"\nUSER = \"user\"\n\n\ndef is_demo_user():\n return session[USER] == DEMO_USER\n\n\ndef check_auth(username, password):\n \"\"\"\n This function is called to check if a username / password combination is valid.\n \"\"\"\n with session_scope() as db_session:\n # only enabled and confirmed users can login\n user = db_session.query(User).filter(and_(User.user == username, User.enabled, User.confirmed)).first()\n if not user:\n return False\n hashed_password = user.argon_password\n return argon2.verify(password, hashed_password)\n\n\ndef requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n if session.get(USER):\n return f(*args, **kwargs)\n # TODO: eventually we will want to remove this bit for POST endpoints\n if request.method == \"POST\":\n username = request.form.get(USER, request.headers.get(USER))\n password = request.form.get(PASSWORD, request.headers.get(PASSWORD))\n if check_auth(username, password):\n session[USER] = username\n # session.permanent = True\n return f(*args, **kwargs)\n return jsonify(error=\"Unauthenticated\"), 401\n\n return decorated\n\n\ndef requires_admin(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n if session.get(USER) == ADMIN_USER:\n return f(*args, **kwargs)\n # TODO: eventually we will want to remove this bit for POST endpoints\n username = request.form.get(USER, request.headers.get(USER))\n if request.method in [\"POST\", \"DELETE\"] and username == ADMIN_USER:\n password = request.form.get(PASSWORD, request.headers.get(PASSWORD))\n if check_auth(username, password):\n session[USER] = username\n # session.permanent = True\n return f(*args, **kwargs)\n return jsonify(error=\"Admin permissions required to perform this operation\"), 403\n\n return decorated\n\n\ndef requires_admin_or_user(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n user_id = kwargs.get(\"user_id\")\n if session.get(USER) in [ADMIN_USER, user_id]:\n return f(*args, **kwargs)\n return jsonify(error=\"Only Admin or the own User can perform this operation\"), 403\n\n return decorated\n\n\[email protected](\"/<language>/login\", methods=[\"POST\"])\[email protected](\"/login\", methods=[\"POST\"])\ndef login():\n application.logger.info(f\"request.json: {request.json}\")\n username = request.json.get(USER)\n password = <PASSWORD>(PASSWORD)\n if not check_auth(username, password):\n return jsonify(error=\"Invalid Credentials. Please try again.\"), 401\n session[USER] = username\n session.update()\n return jsonify(success=\"Authenticated\", username=username), 200\n\n\[email protected](\"/<language>/logout\", methods=[\"POST\"])\[email protected](\"/logout\", methods=[\"POST\"])\n@requires_auth\ndef logout():\n application.logger.info(\"Delete session\")\n session.pop(USER, None)\n return jsonify(success=\"logged out\"), 200\n\n\[email protected](\"/is_logged_in\")\n@requires_auth\ndef is_logged_in():\n return jsonify(username=session.get(USER, \"\")), 200\n", "id": "2584551", "language": "Python", "matching_score": 2.5451619625091553, "max_stars_count": 24, "path": "views/auth.py" }, { "content": "from passlib.handlers.argon2 import argon2\nfrom sqlalchemy.orm import Session\n\nfrom db.model import User, UserConfig\nfrom tests.conftest import NONDEMO_USER\nfrom tests.test_views import _check_only_available_to_admin\nfrom views.postgres import session_scope\nfrom views.token import generate_confirmation_token\nfrom views.user_individuals import delete_user_individual, create_user_individual\nfrom views.users import enable_user, get_users, get_user, create_user\n\n\ndef test_create_user_individual_without_permissions(_demo):\n \"\"\"res -> tuple(flask.wrappers.Response)\"\"\"\n res = create_user_individual()\n _check_only_available_to_admin(res)\n\n\ndef test_get_user_without_permissions(_demo):\n \"\"\"res -> tuple(flask.wrappers.Response)\"\"\"\n res = get_user(\"whatever_user\")\n _check_only_available_to_admin(res)\n\n\ndef test_get_users_without_permissions(_demo):\n \"\"\"res -> tuple(flask.wrappers.Response)\"\"\"\n res = get_users()\n _check_only_available_to_admin(res)\n\n\ndef test_delete_user_individual_without_permissions(_demo):\n \"\"\"res -> tuple(flask.wrappers.Response)\"\"\"\n res = delete_user_individual()\n _check_only_available_to_admin(res)\n\n\ndef test_enable_user_special_permissions(_demo_client):\n # try another user: not allowed\n res = _demo_client.put(\"/user/nondemo/enabled/true\")\n assert res.status_code == 403\n assert res.json == {\"error\": \"Only Admin or the own User can perform this operation\"}\n # try itself: allowed\n res = _demo_client.put(\"/user/demo/enabled/true\")\n assert res.status_code == 200\n assert res.json == {\"message\": \"User enabled flag set to 1\", \"success\": True}\n\n\ndef test_attempt_create_user_with_wrong_mimetype(_admin):\n \"\"\"res -> tuple(flask.wrappers.Response)\"\"\"\n response = create_user()\n assert response.data == b'{\"error\":\"Only mimetype application/json is accepted\",\"success\":false}\\n'\n assert response.status_code == 400\n\n\ndef test_get_user(_admin):\n \"\"\"res -> tuple(flask.wrappers.Response)\"\"\"\n response, status = get_user(\"Admin\")\n assert status == 200\n user_dict = response.json\n assert isinstance(user_dict, dict)\n assert user_dict.get(\"user\") == \"Admin\", f\"user_dict={user_dict}\"\n assert user_dict.get(\"argon_password\") is None, f\"user_dict={user_dict}\"\n individual_ids = user_dict.get(\"individuals\")\n assert isinstance(individual_ids, list), f\"user_dict={user_dict}\"\n assert len(individual_ids) > 0, f\"user_dict={user_dict}\"\n\n\ndef test_get_non_existing_user(_admin):\n \"\"\"res -> tuple(flask.wrappers.Response)\"\"\"\n _, status = get_user(\"JuanSinMiedo\")\n assert status == 404\n\n\ndef test_get_users(_admin):\n \"\"\"res -> tuple(flask.wrappers.Response)\"\"\"\n response, status = get_users()\n assert status == 200\n users = response.json\n assert isinstance(users, list), f\"users={users}\"\n assert len(users) >= 2, f\"users={users}\"\n assert \"Admin\" in users\n assert \"demo\" in users\n\n\ndef test_enable_user(_admin):\n response, _ = get_user(\"demo\")\n user = response.json\n assert user.get(\"enabled\"), \"Demo user is not enabled from the beginning\"\n response, status = enable_user(\"demo\", \"False\")\n assert response.json.get(\"success\")\n assert status == 200\n response, _ = get_user(\"demo\")\n user = response.json\n assert not user.get(\"enabled\"), \"Demo user should be disabled\"\n response, status = enable_user(\"demo\", \"True\")\n assert response.json.get(\"success\")\n assert status == 200\n response, _ = get_user(\"demo\")\n user = response.json\n assert user.get(\"enabled\"), \"Demo user should be enabled\"\n response, status = enable_user(\"Admin\", \"False\")\n assert status == 400\n assert not response.json.get(\"success\")\n assert response.json.get(\"message\") == \"Cannot change the status of Admin user!\"\n response, status = enable_user(\"abcdefxyz\", \"True\")\n assert status == 404\n assert response.json.get(\"message\") == \"User not found\"\n\n\ndef test_bad_attempt_to_disable_user(_admin):\n response, _ = get_user(\"demo\")\n user = response.json\n assert user.get(\"enabled\"), \"Demo user is not enabled from the beginning\"\n _, status = enable_user(\"demo\", \"Falsch\")\n assert status == 400\n\n\ndef test_create_user(_not_logged_in_client):\n payload = {\"confirmation_url\": \"http://phenopolis.org/confirm/\"}\n response = _not_logged_in_client.post(\"/user\", json=payload, content_type=\"application/json\")\n assert response.status_code == 400\n assert response.json.get(\"error\") == \"Missing user name\"\n payload[\"user\"] = \"a_tester\"\n response = _not_logged_in_client.post(\"/user\", json=payload, content_type=\"application/json\")\n assert response.status_code == 400\n assert response.json.get(\"error\") == \"Missing password\"\n payload[\"argon_password\"] = \"<PASSWORD>\"\n response = _not_logged_in_client.post(\"/user\", json=payload, content_type=\"application/json\")\n assert response.status_code == 400\n assert response.json.get(\"error\") == \"Missing email\"\n\n user_name = \"test_register1\"\n with session_scope() as db_session:\n try:\n user = User()\n user.user = user_name\n user.argon_password = \"<PASSWORD>\"\n user.email = \"<EMAIL>\"\n _assert_create_user(db_session, _not_logged_in_client, user)\n finally:\n # cleans the database\n _clean_test_users(db_session, user_name)\n\n\ndef test_create_and_confirm_user(_not_logged_in_client):\n user_name = \"test_register2\"\n email = \"<EMAIL>\"\n with session_scope() as db_session:\n try:\n # creates a user\n user = User()\n user.user = user_name\n user.argon_password = \"<PASSWORD>\"\n user.email = email\n _assert_create_user(db_session, _not_logged_in_client, user)\n # confirms the user\n confirmation_token = generate_confirmation_token(user.email)\n response = _not_logged_in_client.get(f\"/user/confirm/{confirmation_token}\")\n assert response.status_code == 200\n observed_user = db_session.query(User).filter(User.user == user.user).first()\n assert observed_user.user == user.user\n assert observed_user.enabled, \"Enabled field is not true\"\n assert observed_user.confirmed, \"Confirmed field is not true\"\n assert observed_user.confirmed_on is not None\n finally:\n # cleans the database\n _clean_test_users(db_session, user_name)\n\n\ndef test_confirm_user_with_token_with_unexisting_email(_not_logged_in_client):\n # tries to confirm an email not in the database\n confirmation_token = generate_confirmation_token(\"<EMAIL>\")\n response = _not_logged_in_client.get(f\"/user/confirm/{confirmation_token}\")\n assert response.status_code == 404\n\n\ndef test_confirm_user_with_bad_token(_not_logged_in_client):\n response = _not_logged_in_client.get(\"/user/confirm/a-bad-token\")\n assert response.status_code == 404\n\n\ndef test_confirm_user_already_confirmed(_not_logged_in_client):\n # tries to confirm an email not in the database\n confirmation_token = generate_confirmation_token(\"<EMAIL>\")\n response = _not_logged_in_client.get(f\"/user/confirm/{confirmation_token}\")\n assert response.status_code == 200\n\n\ndef test_create_user_with_explicit_enabled_and_confirmed_flags(_not_logged_in_client):\n user_name = \"test_register3\"\n with session_scope() as db_session:\n try:\n user = User()\n user.user = user_name\n user.argon_password = \"<PASSWORD>\"\n user.email = \"<EMAIL>\"\n user.enabled = True\n user.confirmed = True\n _assert_create_user(db_session, _not_logged_in_client, user)\n finally:\n # cleans the database\n _clean_test_users(db_session, user_name)\n\n\ndef test_create_user_without_email(_not_logged_in_client):\n user_name = \"test_register4\"\n with session_scope() as db_session:\n try:\n user = User()\n user.user = user_name\n user.argon_password = \"<PASSWORD>\"\n response = _not_logged_in_client.post(\"/user\", json=user.as_dict(), content_type=\"application/json\")\n assert response.status_code == 400\n finally:\n # cleans the database\n _clean_test_users(db_session, user_name)\n\n\ndef test_create_user_with_used_email(_not_logged_in_client):\n user_name = \"test_register5\"\n with session_scope() as db_session:\n try:\n user = User()\n user.user = user_name\n user.argon_password = \"<PASSWORD>\"\n user.email = \"<EMAIL>\"\n payload = user.as_dict()\n payload[\"confirmation_url\"] = \"http://phenopolis.org/confirm/\"\n response = _not_logged_in_client.post(\"/user\", json=payload, content_type=\"application/json\")\n assert response.status_code == 500\n finally:\n # cleans the database\n _clean_test_users(db_session, user_name)\n\n\ndef test_create_user_with_used_username(_not_logged_in_client):\n user_name = \"demo\"\n user = User()\n user.user = user_name\n user.argon_password = \"<PASSWORD>\"\n user.email = \"<EMAIL>\"\n payload = user.as_dict()\n payload[\"confirmation_url\"] = \"http://phenopolis.org/confirm/\"\n response = _not_logged_in_client.post(\"/user\", json=payload, content_type=\"application/json\")\n assert response.status_code == 500\n\n\ndef test_create_user_without_callbackurl(_not_logged_in_client):\n user_name = \"demo\"\n user = User()\n user.user = user_name\n user.argon_password = \"<PASSWORD>\"\n user.email = \"<EMAIL>\"\n payload = user.as_dict()\n response = _not_logged_in_client.post(\"/user\", json=payload, content_type=\"application/json\")\n assert response.status_code == 400\n\n\ndef test_change_password_demo(_demo_client):\n response = _demo_client.post(\n \"/user/change-password\",\n json={\"current_password\": \"<PASSWORD>\", \"new_password\": \"<PASSWORD>\"},\n content_type=\"application/json\",\n )\n assert response.status_code == 403\n assert response.json.get(\"error\") == \"You do not have permission to change the password for username 'demo'.\"\n\n\ndef test_change_password(_nondemo_client):\n new_password = \"<PASSWORD>\"\n old_password = \"password\"\n\n response = _nondemo_client.post(\n \"/user/change-password\",\n json={\"current_password\": \"<PASSWORD>\", \"new_password\": new_password},\n content_type=\"application/json\",\n )\n assert response.status_code == 401\n assert response.json.get(\"error\") == \"Username and current password incorrect. Please try again.\"\n\n # verifies old password is what it should\n with session_scope() as db_session:\n observed_user = db_session.query(User).filter(User.user == NONDEMO_USER).first()\n assert argon2.verify(old_password, observed_user.argon_password)\n\n # changes the password\n response = _nondemo_client.post(\n \"/user/change-password\",\n json={\"current_password\": <PASSWORD>_password, \"new_password\": <PASSWORD>},\n content_type=\"application/json\",\n )\n assert response.status_code == 200\n\n with session_scope() as db_session:\n # checks that the password is changed\n observed_user = db_session.query(User).filter(User.user == NONDEMO_USER).first()\n assert argon2.verify(new_password, observed_user.argon_password)\n\n # revert passward for future tests\n response = _nondemo_client.post(\n \"/user/change-password\",\n json={\"current_password\": <PASSWORD>, \"new_password\": <PASSWORD>},\n content_type=\"application/json\",\n )\n assert response.status_code == 200\n with session_scope() as db_session:\n # checks that the old_password is back\n observed_user = db_session.query(User).filter(User.user == NONDEMO_USER).first()\n assert argon2.verify(old_password, observed_user.argon_password)\n\n\ndef test_delete_user(_admin_client):\n user_name = \"test_register6\"\n with session_scope() as db_session:\n user = User()\n user.user = user_name\n user.argon_password = \"<PASSWORD>\"\n user.email = \"<EMAIL>\"\n _assert_create_user(db_session, _admin_client, user)\n\n # deletes user\n response = _admin_client.delete(f\"/user/{user_name}\", content_type=\"application/json\")\n assert response.status_code == 200\n\n # confirms it does not exist\n o_user = db_session.query(User).filter(User.user == user_name).first()\n assert o_user is None, \"Deletion was not successful\"\n\n # try to delete non-existent user\n response = _admin_client.delete(\"/user/not_me\", content_type=\"application/json\")\n assert response.status_code == 404\n\n\ndef test_delete_user_itself(_not_logged_in_client):\n user_name = \"temp_user\"\n with session_scope() as db_session:\n user = User()\n user.user = user_name\n user.argon_password = \"<PASSWORD>\"\n user.email = \"<EMAIL>\"\n _assert_create_user(db_session, _not_logged_in_client, user)\n confirmation_token = generate_confirmation_token(user.email)\n response = _not_logged_in_client.get(f\"/user/confirm/{confirmation_token}\")\n assert response.status_code == 200\n\n # login with new user\n resp = _not_logged_in_client.post(\"/login\", json={\"user\": f\"{user.user}\", \"password\": f\"{<PASSWORD>}\"})\n assert resp.status_code == 200\n assert resp.json == {\"success\": \"Authenticated\", \"username\": f\"{user.user}\"}\n\n # # try to delete another user\n response = _not_logged_in_client.delete(\"/user/demo\", content_type=\"application/json\")\n assert response.status_code == 403\n\n # user deletes itself\n response = _not_logged_in_client.delete(f\"/user/{user_name}\", content_type=\"application/json\")\n assert response.status_code == 200\n\n\ndef _assert_create_user(db_session: Session, _client, user):\n payload = user.as_dict()\n payload[\"confirmation_url\"] = \"http://phenopolis.org/confirm/\"\n response = _client.post(\"/user\", json=payload, content_type=\"application/json\")\n assert response.status_code == 200\n observed_user = db_session.query(User).filter(User.user == user.user).first()\n assert observed_user is not None, \"Empty newly created user\"\n assert observed_user.user is not None and observed_user.user != \"\", \"Field user is empty\"\n assert observed_user.argon_password is not None and observed_user.argon_password != \"\", \"Field password is empty\"\n assert not observed_user.enabled, \"Enabled field is not false\"\n assert not observed_user.confirmed, \"Confirmed field is not false\"\n\n\ndef _clean_test_users(db_session, user_name):\n try:\n # deletion in public.users should cascade to public.user_config\n db_session.query(User).filter(User.user == user_name).delete()\n db_session.query(UserConfig).filter(UserConfig.user_name == user_name).delete()\n except Exception:\n # could not remove users\n pass\n", "id": "5048553", "language": "Python", "matching_score": 4.51577091217041, "max_stars_count": 24, "path": "tests/test_users.py" }, { "content": "\"\"\"\nUsers view\n\"\"\"\nfrom flask import session, jsonify\nfrom flask_mail import Message\nfrom passlib.handlers.argon2 import argon2\nfrom sqlalchemy import func\nfrom db.model import User, UserIndividual, UserConfig\nfrom views import MAIL_USERNAME, application, mail\nfrom views.auth import requires_admin_or_user, requires_auth, check_auth, requires_admin, is_demo_user, USER, ADMIN_USER\nfrom views.exceptions import PhenopolisException\nfrom views.general import _parse_boolean_parameter\nfrom views.helpers import _get_json_payload\nfrom views.postgres import session_scope\nfrom views.token import generate_confirmation_token, confirm_token\n\nCONFIRMATION_URL = \"confirmation_url\"\n\n\[email protected](\"/user/change-password\", methods=[\"POST\"])\n@requires_auth\ndef change_password():\n try:\n username = session[USER]\n data = _get_json_payload()\n password = data.get(\"<PASSWORD>\")\n new_password = data.get(\"new_password\")\n if is_demo_user():\n return (\n jsonify(error=\"You do not have permission to change the password for username 'demo'.\"),\n 403,\n )\n if not check_auth(username, password):\n application.logger.info(\"Change password:- Login Failed\")\n return (\n jsonify(error=\"Username and current password incorrect. Please try again.\"),\n 401,\n )\n application.logger.info(\"Login success, changing password\")\n\n with session_scope() as db_session:\n user = db_session.query(User).filter(User.user == username).first()\n user.argon_password = <PASSWORD>(<PASSWORD>)\n msg = \"Password for username '\" + username + \"' changed. You are logged in as '\" + username + \"'.\"\n except PhenopolisException as e:\n application.logger.error(str(e))\n return jsonify(success=False, error=str(e)), e.http_status\n\n return jsonify(success=msg), 200\n\n\[email protected](\"/user/<user_id>/enabled/<status>\", methods=[\"PUT\"])\n@requires_admin_or_user\ndef enable_user(user_id, status):\n with session_scope() as db_session:\n try:\n if user_id == ADMIN_USER:\n raise PhenopolisException(\"Cannot change the status of Admin user!\", 400)\n user = _get_user_by_id(db_session, user_id)\n if not user:\n return jsonify(message=\"User not found\"), 404\n user.enabled = _parse_boolean_parameter(status)\n enabled_flag = user.enabled\n except PhenopolisException as e:\n return jsonify(success=False, message=str(e)), e.http_status\n return jsonify(success=True, message=f\"User enabled flag set to {enabled_flag}\"), 200\n\n\[email protected](\"/user\", methods=[\"POST\"])\ndef create_user():\n try:\n payload = _get_json_payload()\n if CONFIRMATION_URL not in payload:\n raise PhenopolisException(\"Please, provide a confirmation URL\", 400)\n confirmation_url = payload.pop(CONFIRMATION_URL)\n new_user = User(**payload)\n _check_user_valid(new_user)\n # encode password\n new_user.argon_password = <PASSWORD>)\n # this is the default, but to avoid a misuse of the API that circumvents user registration it forces these\n # two flags to False\n new_user.confirmed = False\n new_user.enabled = False\n\n try:\n # persist users\n user_id = new_user.user\n with session_scope() as db_session:\n db_session.add(new_user)\n _add_config_from_admin(db_session, new_user)\n # sends confirmation email\n _send_confirmation_email(new_user, confirmation_url=confirmation_url)\n response = jsonify(success=True, message=\"User was created\", id=user_id)\n except Exception as e:\n application.logger.exception(e)\n response = jsonify(success=False, message=str(e))\n response.status_code = 500\n except PhenopolisException as e:\n application.logger.error(str(e))\n response = jsonify(success=False, error=str(e))\n response.status_code = e.http_status\n return response\n\n\[email protected](\"/user/<user_id>\")\n@requires_admin\ndef get_user(user_id):\n try:\n with session_scope() as db_session:\n user = _get_user_by_id(db_session, user_id)\n if not user:\n return jsonify(message=\"User not found\"), 404\n user_individuals = db_session.query(UserIndividual).filter(UserIndividual.user == user.user).all()\n user_dict = user.as_dict()\n # removes the password hash from the endpoint we don't want/need this around\n del user_dict[\"argon_password\"]\n user_dict[\"individuals\"] = [ui.internal_id for ui in user_individuals]\n except PhenopolisException as e:\n return jsonify(success=False, message=str(e)), e.http_status\n return jsonify(user_dict), 200\n\n\[email protected](\"/user\")\n@requires_admin\ndef get_users():\n with session_scope() as db_session:\n users = db_session.query(User).all()\n user_names = [u.user for u in users]\n return jsonify(user_names), 200\n\n\[email protected](\"/user/confirm/<token>\")\ndef confirm_user(token):\n email = confirm_token(token, application.config[\"TOKEN_EXPIRY_SECONDS\"])\n with session_scope() as db_session:\n try:\n if email is None:\n raise PhenopolisException(\"Invalid token or non existing user\", 404)\n user = db_session.query(User).filter(User.email == email).first()\n if user is None:\n raise PhenopolisException(\"Invalid token or non existing user\", 404)\n if user.confirmed:\n raise PhenopolisException(\"User has already been confirmed. Please, go to login\", 200)\n user.confirmed = True\n user.confirmed_on = func.now()\n user.enabled = True\n response = jsonify(success=True, message=\"User confirmation successful\")\n except PhenopolisException as e:\n response = jsonify(success=False, message=str(e))\n response.status_code = e.http_status\n return response\n\n\[email protected](\"/user/<user_id>\", methods=[\"DELETE\"])\n@requires_admin_or_user\ndef delete_user(user_id):\n with session_scope() as db_session:\n user = _get_user_by_id(db_session, user_id)\n request_ok = True\n http_status = 200\n message = f\"User {user_id} has been deleted.\"\n if user:\n try:\n db_session.query(UserIndividual).filter(UserIndividual.user == user_id).delete()\n db_session.query(UserConfig).filter(UserConfig.user_name == user_id).delete()\n db_session.delete(user)\n except Exception as e:\n application.logger.exception(e)\n request_ok = False\n message = str(e)\n http_status = e.http_status\n else:\n request_ok = False\n message = f\"User {user_id} does not exist.\"\n http_status = 404\n return jsonify(success=request_ok, message=message), http_status\n\n\ndef _check_user_valid(new_user: User):\n if new_user.user is None or new_user.user == \"\":\n raise PhenopolisException(\"Missing user name\", 400)\n if new_user.argon_password is None or new_user.argon_password == \"\":\n raise PhenopolisException(\"Missing password\", 400)\n if new_user.email is None or new_user.email == \"\":\n raise PhenopolisException(\"Missing email\", 400)\n\n\ndef _add_config_from_admin(db_session, new_user):\n configs = db_session.query(UserConfig).filter(UserConfig.user_name.match(\"Admin\")).all()\n new_configs = []\n for c in configs:\n new_user_config = UserConfig(**c.as_dict())\n new_user_config.user_name = new_user.user\n new_configs.append(new_user_config)\n db_session.add_all(new_configs)\n\n\ndef _get_user_by_id(db_session, user_id: str) -> User:\n return db_session.query(User).filter(User.user == user_id).first()\n\n\ndef _send_confirmation_email(user: User, confirmation_url: str):\n confirmation_token = generate_confirmation_token(user.email)\n m = Message(\"Confirm your registration into Phenopolis\", sender=MAIL_USERNAME, recipients=[user.email],)\n m.body = f\"\"\"Welcome to Phenopolis {user.user}, confirm your registration in the following link:\\n\n {confirmation_url}/{confirmation_token}\"\"\"\n mail.send(m)\n", "id": "10273203", "language": "Python", "matching_score": 4.411825656890869, "max_stars_count": 24, "path": "views/users.py" }, { "content": "\"\"\"\nUsers Individuals view\nNOTE: by @alanwilter - I don't see this module being used anywhere anymore, if ever used before.\n\"\"\"\nfrom flask import jsonify\nfrom sqlalchemy.orm import Session\n\nfrom db.model import UserIndividual, User, Individual\nfrom views import application\nfrom views.auth import requires_admin\nfrom views.exceptions import PhenopolisException\nfrom views.helpers import _get_json_payload\nfrom views.postgres import session_scope\n\n\[email protected](\"/user-individual\", methods=[\"POST\"])\n@requires_admin\ndef create_user_individual():\n try:\n new_user_individuals = _get_json_payload(UserIndividual)\n for u in new_user_individuals:\n _check_user_individual_valid(u)\n except PhenopolisException as e:\n application.logger.error(str(e))\n return jsonify(success=False, error=str(e)), e.http_status\n\n with session_scope() as db_session:\n request_ok = True\n message = \"User individuals were created\"\n try:\n # insert user individuals\n for u in new_user_individuals:\n # TODO: should not all these checks happen at the DB?\n _check_db_integrity_user_individual(db_session, u)\n db_session.add(u)\n except Exception as e:\n application.logger.exception(e)\n request_ok = False\n message = str(e)\n\n if not request_ok:\n return jsonify(success=False, message=message), 500\n else:\n return jsonify(success=True, message=message), 200\n\n\[email protected](\"/user-individual\", methods=[\"DELETE\"])\n@requires_admin\ndef delete_user_individual():\n try:\n user_individuals_to_be_deleted = _get_json_payload(UserIndividual)\n except PhenopolisException as e:\n return jsonify(success=False, error=str(e)), e.http_status\n\n with session_scope() as db_session:\n request_ok = True\n message = \"User individuals were deleted\"\n try:\n # insert user individuals\n for u in user_individuals_to_be_deleted:\n db_session.query(UserIndividual).filter(UserIndividual.user == u.user).filter(\n UserIndividual.internal_id == u.internal_id\n ).delete()\n except Exception as e:\n application.logger.exception(e)\n request_ok = False\n message = str(e)\n\n if not request_ok:\n return jsonify(success=False, message=message), 500\n else:\n return jsonify(success=True, message=message), 200\n\n\ndef _check_db_integrity_user_individual(db_session: Session, user: UserIndividual):\n # TODO: all these checks could happen in the DB\n if db_session.query(User.user).filter(User.user.match(user.user)).count() != 1:\n raise PhenopolisException(\"Trying to add an entry in user_individual to a non existing user\", 500)\n if db_session.query(Individual.phenopolis_id).filter(Individual.phenopolis_id.match(user.internal_id)).count() != 1:\n raise PhenopolisException(\"Trying to add an entry in user_individual to a non existing individual\", 500)\n if (\n db_session.query(UserIndividual)\n .filter(UserIndividual.user.match(user.user))\n .filter(UserIndividual.internal_id.match(user.internal_id))\n .count()\n > 0\n ):\n raise PhenopolisException(\"Trying to add an entry in user_individual that already exists\", 500)\n\n\ndef _check_user_individual_valid(new_user_individual: UserIndividual):\n if new_user_individual is None:\n raise PhenopolisException(\"Null user individual\", 400)\n if new_user_individual.user is None or new_user_individual.user == \"\":\n raise PhenopolisException(\"Missing user\", 400)\n if new_user_individual.internal_id is None or new_user_individual.internal_id == \"\":\n raise PhenopolisException(\"Missing individual id\", 400)\n", "id": "11985384", "language": "Python", "matching_score": 3.3802413940429688, "max_stars_count": 24, "path": "views/user_individuals.py" }, { "content": "from flask import jsonify, session\nfrom sqlalchemy.orm import Session\n\nfrom db.model import Individual, IndividualVariantClassification\nfrom views import application\nfrom views.auth import USER, requires_auth\nfrom views.exceptions import PhenopolisException\nfrom views.helpers import _get_json_payload\nfrom views.individual import _fetch_authorized_individual\nfrom views.postgres import session_scope\n\n\[email protected](\"/variant-classification\", methods=[\"POST\"])\n@requires_auth\ndef create_classification():\n\n with session_scope() as db_session:\n try:\n classifications = _get_json_payload(IndividualVariantClassification)\n for c in classifications:\n _check_classification_valid(db_session, c)\n except PhenopolisException as e:\n application.logger.error(str(e))\n response = jsonify(success=False, error=str(e))\n response.status_code = e.http_status\n return response\n\n request_ok = True\n http_status = 200\n message = \"Variant classifications were created\"\n try:\n # generate a new unique id for the individual\n for c in classifications:\n # insert individual\n c.user_id = session[USER] # whatever value comes here we ensure the actual user is stored\n c.id = None # this one should be set by the database\n c.classified_on = None # this one should be set by the database\n db_session.add(c)\n except PhenopolisException as e:\n application.logger.exception(e)\n request_ok = False\n message = str(e)\n http_status = e.http_status\n\n return jsonify(success=request_ok, message=message), http_status\n\n\[email protected](\"/variant-classifications-by-individual/<phenopolis_id>\")\n@requires_auth\ndef get_classifications_by_individual(phenopolis_id):\n\n with session_scope() as db_session:\n individual = _fetch_authorized_individual(db_session, phenopolis_id)\n # unauthorized access to individual\n if not individual:\n response = jsonify(\n message=\"Sorry, either the patient does not exist or you are not permitted to see this patient\"\n )\n response.status_code = 401\n else:\n classifications = (\n db_session.query(IndividualVariantClassification)\n .join(Individual, Individual.id == IndividualVariantClassification.individual_id)\n .filter(Individual.phenopolis_id == phenopolis_id)\n .order_by(IndividualVariantClassification.classified_on.desc())\n .all()\n )\n response = jsonify([c.as_dict() for c in classifications])\n return response\n\n\ndef _check_classification_valid(db_session: Session, classification: IndividualVariantClassification):\n try:\n phenopolis_id = (\n db_session.query(Individual).filter(Individual.id == classification.individual_id).first().phenopolis_id\n )\n except Exception:\n phenopolis_id = None\n individual = _fetch_authorized_individual(db_session, phenopolis_id)\n if individual is None:\n raise PhenopolisException(\n f\"User not authorized to classify variants for individual {classification.individual_id}\", 401\n )\n", "id": "1840010", "language": "Python", "matching_score": 2.958186388015747, "max_stars_count": 3, "path": "views/variant_classification.py" }, { "content": "from db.model import IndividualVariantClassification\nfrom views.postgres import session_scope\nimport random\n\n\ndef test_create_classification_with_admin_user(_admin_client):\n classification = IndividualVariantClassification()\n classification.variant_id = 8090\n classification.individual_id = 8258\n classification.classification = \"pathogenic\"\n classification.notes = \"\".join([\"bla\" for _ in range(random.randint(10, 100))])\n classification.pubmed_id = str(random.randint(10, 1000000))\n _assert_variant_classification(_admin_client, classification, \"Admin\")\n\n\ndef test_create_classification_with_demo_user(_demo_client):\n classification = IndividualVariantClassification()\n classification.variant_id = 8090\n classification.individual_id = 8258\n classification.classification = \"pathogenic\"\n classification.notes = \"\".join([\"bla\" for _ in range(random.randint(10, 100))])\n classification.pubmed_id = str(random.randint(10, 1000000))\n _assert_variant_classification(_demo_client, classification, \"demo\")\n\n\ndef test_create_classification_with_mismatching_variant_and_individual(_demo_client):\n classification = IndividualVariantClassification()\n classification.variant_id = 2099\n classification.individual_id = 8258\n classification.classification = \"pathogenic\"\n classification.notes = \"\".join([\"bla\" for _ in range(random.randint(10, 100))])\n classification.pubmed_id = str(random.randint(10, 1000000))\n response = _demo_client.post(\n \"/variant-classification\", json=classification.as_dict(), content_type=\"application/json\"\n )\n assert response.status_code == 500\n\n\ndef test_create_classification_with_non_existing_variant(_demo_client):\n classification = IndividualVariantClassification()\n classification.variant_id = 210500000000000\n classification.individual_id = 8258\n classification.classification = \"pathogenic\"\n classification.notes = \"\".join([\"bla\" for _ in range(random.randint(10, 100))])\n classification.pubmed_id = str(random.randint(10, 1000000))\n response = _demo_client.post(\n \"/variant-classification\", json=classification.as_dict(), content_type=\"application/json\"\n )\n assert response.status_code == 500\n\n\ndef test_create_classification_with_non_existing_individual(_demo_client):\n classification = IndividualVariantClassification()\n classification.variant_id = 2105\n classification.individual_id = 123456789\n classification.classification = \"pathogenic\"\n classification.notes = \"\".join([\"bla\" for _ in range(random.randint(10, 100))])\n classification.pubmed_id = str(random.randint(10, 1000000))\n response = _demo_client.post(\n \"/variant-classification\", json=classification.as_dict(), content_type=\"application/json\"\n )\n assert response.status_code == 401\n\n\ndef test_create_classification_with_bad_value(_demo_client):\n classification = IndividualVariantClassification()\n classification.variant_id = 2105\n classification.individual_id = 8258\n classification.classification = \"iknownothingofthis\"\n classification.notes = \"\".join([\"bla\" for _ in range(random.randint(10, 100))])\n classification.pubmed_id = str(random.randint(10, 1000000))\n response = _demo_client.post(\n \"/variant-classification\", json=classification.as_dict(), content_type=\"application/json\"\n )\n assert response.status_code == 500\n\n\ndef test_create_classification_with_empty_variant(_demo_client):\n classification = IndividualVariantClassification()\n classification.individual_id = 8258\n classification.classification = \"pathogenic\"\n classification.notes = \"\".join([\"bla\" for _ in range(random.randint(10, 100))])\n classification.pubmed_id = str(random.randint(10, 1000000))\n response = _demo_client.post(\n \"/variant-classification\", json=classification.as_dict(), content_type=\"application/json\"\n )\n assert response.status_code == 500\n\n\ndef test_create_classification_with_empty_individual(_demo_client):\n classification = IndividualVariantClassification()\n classification.variant_id = 2105\n classification.classification = \"pathogenic\"\n classification.notes = \"\".join([\"bla\" for _ in range(random.randint(10, 100))])\n classification.pubmed_id = str(random.randint(10, 1000000))\n response = _demo_client.post(\n \"/variant-classification\", json=classification.as_dict(), content_type=\"application/json\"\n )\n assert response.status_code == 401\n\n\ndef test_create_classification_with_empty_classification(_demo_client):\n classification = IndividualVariantClassification()\n classification.variant_id = 2105\n classification.individual_id = 8258\n classification.notes = \"\".join([\"bla\" for _ in range(random.randint(10, 100))])\n classification.pubmed_id = str(random.randint(10, 1000000))\n response = _demo_client.post(\n \"/variant-classification\", json=classification.as_dict(), content_type=\"application/json\"\n )\n assert response.status_code == 500\n\n\ndef test_create_classification_unauthorised_variant(_nondemo_client):\n classification = IndividualVariantClassification()\n classification.variant_id = 2105\n classification.individual_id = 8258\n classification.classification = \"pathogenic\"\n classification.notes = \"\".join([\"bla\" for _ in range(random.randint(10, 100))])\n classification.pubmed_id = str(random.randint(10, 1000000))\n response = _nondemo_client.post(\n \"/variant-classification\", json=classification.as_dict(), content_type=\"application/json\"\n )\n assert response.status_code == 401\n\n\ndef test_get_classifications_by_individual(_admin_client):\n\n # sets 3 variant classifications for a given individual\n phenopolis_id = \"PH00008258\"\n individual_id = int(phenopolis_id.replace(\"PH\", \"\"))\n classification1 = IndividualVariantClassification()\n classification1.variant_id = 8090\n classification1.individual_id = individual_id\n classification1.classification = \"pathogenic\"\n _assert_variant_classification(_admin_client, classification1, \"Admin\")\n classification2 = IndividualVariantClassification()\n classification2.variant_id = 13309\n classification2.individual_id = individual_id\n classification2.classification = \"pathogenic\"\n _assert_variant_classification(_admin_client, classification2, \"Admin\")\n classification3 = IndividualVariantClassification()\n classification3.variant_id = 22678\n classification3.individual_id = individual_id\n classification3.classification = \"pathogenic\"\n _assert_variant_classification(_admin_client, classification3, \"Admin\")\n # creates this one just to check it does not come in the output\n classification4 = IndividualVariantClassification()\n classification4.variant_id = 3491\n classification4.individual_id = 8256\n classification4.classification = \"pathogenic\"\n _assert_variant_classification(_admin_client, classification4, \"Admin\")\n\n response = _admin_client.get(f\"/variant-classifications-by-individual/{phenopolis_id}\")\n assert response.status_code == 200\n classifications = response.json\n assert len(classifications) >= 3\n observed_variant_ids = [c.get(\"variant_id\") for c in classifications]\n assert 8090 in observed_variant_ids\n assert 13309 in observed_variant_ids\n assert 22678 in observed_variant_ids\n assert 3491 not in observed_variant_ids\n observed_individual_ids = set([c.get(\"individual_id\") for c in classifications])\n assert individual_id in observed_individual_ids\n assert len(observed_individual_ids) == 1\n\n\ndef test_get_classification_unauthorised_individual(_nondemo_client):\n response = _nondemo_client.get(\"/variant-classifications-by-individual/PH00008258\")\n assert response.status_code == 401\n\n\ndef test_get_classification_non_existing_individual(_admin_client):\n response = _admin_client.get(\"/variant-classifications-by-individual/PH123456789789665541222\")\n assert response.status_code == 401\n\n\ndef _assert_variant_classification(client, classification, user_id):\n response = client.post(\"/variant-classification\", json=classification.as_dict(), content_type=\"application/json\")\n assert response.status_code == 200\n with session_scope() as db_session:\n observed_classification = (\n db_session.query(IndividualVariantClassification)\n .filter(IndividualVariantClassification.individual_id == classification.individual_id)\n .order_by(IndividualVariantClassification.classified_on.desc())\n .first()\n )\n assert observed_classification is not None\n assert observed_classification.id is not None\n assert observed_classification.classified_on is not None\n assert observed_classification.classification == classification.classification\n assert observed_classification.variant_id == classification.variant_id\n assert observed_classification.user_id == user_id\n assert observed_classification.notes == classification.notes\n assert observed_classification.pubmed_id == classification.pubmed_id\n", "id": "4991891", "language": "Python", "matching_score": 3.3389716148376465, "max_stars_count": 24, "path": "tests/test_variant_classifications.py" }, { "content": "import pytest\nfrom sqlalchemy.orm import Session\n\nimport views.individual as vi # to allow MAX_PAGE_SIZE redefinition\nfrom db.model import Individual, UserIndividual\nfrom views.auth import USER\nfrom views.individual import MAPPING_SEX_REPRESENTATIONS, get_all_individuals, get_individual_by_id\nfrom views.postgres import session_scope\n\n\[email protected](\n (\"query\", \"subset\", \"msg\"),\n (\n (\"PH00008267\", \"all\", \"'Number of individuals that are wildtype in our dataset'\"),\n (\"PH00008258\", \"preview\", \"'Visual impairment', 'Macular dystrophy'\"),\n (\"PH00008258\", \"metadata\", \"_demo_\"),\n ),\n)\ndef test_get_authorised_individual_by_id(_demo, query, subset, msg):\n \"\"\"\n res -> str\n \"\"\"\n response = get_individual_by_id(query, subset=subset)\n assert response.status_code == 200\n assert msg in str(response.json)\n assert response.cache_control.max_age == 300\n assert response.cache_control.public\n assert response.expires is not None\n\n\ndef test_get_unauthorised_individual_by_id(_demo):\n \"\"\"\n \"demo\" user has no right to access PH00000001\n res -> tuple(flask.wrappers.Response)\n \"\"\"\n response = get_individual_by_id(\"PH00000001\")\n assert response.status_code == 404\n assert response.json.get(\"message\") == \"Patient not found\"\n\n\ndef test_get_individual_complete_view_by_id(_admin):\n\n # test individual with homozygous variants\n individual_view = _get_view_individual_by_id(identifier=\"PH00008256\")\n assert len(individual_view.get(\"rare_homs\", {}).get(\"data\")) == 82, \"Unexpected number of homozygous variants\"\n assert (\n len(individual_view.get(\"rare_variants\", {}).get(\"data\")) == 1324\n ), \"Unexpected number of heterozygous variants\"\n assert (\n len(individual_view.get(\"rare_comp_hets\", {}).get(\"data\")) == 0\n ), \"Unexpected number of compound heterozygous variants\"\n\n # test individual with heterozygous variants\n individual_view = _get_view_individual_by_id(identifier=\"PH00008267\")\n assert len(individual_view.get(\"rare_homs\", {}).get(\"data\")) == 59, \"Unexpected number of homozygous variants\"\n assert (\n len(individual_view.get(\"rare_variants\", {}).get(\"data\")) == 905\n ), \"Unexpected number of heterozygous variants\"\n assert (\n len(individual_view.get(\"rare_comp_hets\", {}).get(\"data\")) == 2\n ), \"Unexpected number of compound heterozygous variants\"\n\n\ndef test_get_individual_preview_by_id(_admin):\n\n # test individual with homozygous variants\n individual_view = _get_view_individual_by_id(identifier=\"PH00008256\", subset=\"preview\")\n assert individual_view.get(\"preview\")[0][1] == \"WebsterURMD_Sample_GV4344\"\n assert individual_view.get(\"preview\")[4][0] == \"Number of hom variants\"\n assert individual_view.get(\"preview\")[4][1] == 82, \"Unexpected number of homozygous variants\"\n assert individual_view.get(\"preview\")[5][0] == \"Number of compound hets\"\n assert individual_view.get(\"preview\")[5][1] == 0, \"Unexpected number of compound heterozygous variants\"\n assert individual_view.get(\"preview\")[6][0] == \"Number of het variants\"\n assert individual_view.get(\"preview\")[6][1] == 1324, \"Unexpected number of heterozygous variants\"\n\n # test individual with heterozygous variants\n individual_view = _get_view_individual_by_id(identifier=\"PH00008267\", subset=\"preview\")\n assert individual_view.get(\"preview\")[4][0] == \"Number of hom variants\"\n assert individual_view.get(\"preview\")[4][1] == 59, \"Unexpected number of homozygous variants\"\n assert individual_view.get(\"preview\")[5][0] == \"Number of compound hets\"\n assert individual_view.get(\"preview\")[5][1] == 2, \"Unexpected number of compound heterozygous variants\"\n assert individual_view.get(\"preview\")[6][0] == \"Number of het variants\"\n assert individual_view.get(\"preview\")[6][1] == 905, \"Unexpected number of heterozygous variants\"\n\n\ndef _get_view_individual_by_id(identifier, subset=\"all\"):\n response = get_individual_by_id(identifier, subset=subset)\n assert response.status_code == 200\n data = response.json\n assert len(data) == 1, \"Missing expected data\"\n individual_complete_view = data[0]\n return individual_complete_view\n\n\ndef test_update_individual_with_demo_user_fails(_demo_client):\n # fetch current sex\n individual_id = \"PH00008267\"\n with session_scope() as db_session:\n individual = db_session.query(Individual).filter(Individual.phenopolis_id == individual_id).first()\n sex = individual.sex\n\n # update sex\n new_sex_for_api = MAPPING_SEX_REPRESENTATIONS.inverse.get(sex)\n response = _demo_client.post(\n f\"/update_patient_data/{individual_id}\",\n data=f\"gender_edit[]={new_sex_for_api}\",\n content_type=\"application/x-www-form-urlencoded\",\n )\n assert response.status_code == 405\n\n # fetch new sex\n db_session.refresh(individual)\n observed_sex = individual.sex\n assert observed_sex == sex, \"Update did work and it should not!\"\n\n\ndef test_update_individual_with_admin_user(_admin_client):\n\n # fetch current sex\n individual_id = \"PH00008267\"\n with session_scope() as db_session:\n individual = db_session.query(Individual).filter(Individual.phenopolis_id == individual_id).first()\n sex1 = individual.sex\n\n # update sex\n new_sex_for_api = \"unknown\"\n response = _admin_client.post(\n f\"/update_patient_data/{individual_id}\",\n data=f\"gender_edit[]={new_sex_for_api}&feature[]=Abnormality of body height\"\n \"&feature[]=Multicystic kidney dysplasia\"\n \"&feature[]=Mode of inheritance&genes[]=TTLL5&genes[]=GAST\",\n content_type=\"application/x-www-form-urlencoded\",\n )\n assert response.status_code == 200\n\n # confirm observed data\n db_session.refresh(individual)\n observed_sex = individual.sex\n assert observed_sex == MAPPING_SEX_REPRESENTATIONS.get(new_sex_for_api), \"Update sex did not work\"\n observed_hpo_names = [x[1] for x in vi._get_feature_for_individual(individual, atype=\"observed\")]\n assert len(observed_hpo_names) == 3, \"Update HPOs did not work\"\n unobserved_hpo_names = [x[1] for x in vi._get_feature_for_individual(individual, atype=\"unobserved\")]\n assert len(unobserved_hpo_names) == 17, \"Do not touch unobserved\"\n assert \"Abnormality of body height\" in observed_hpo_names, \"Update HPOs did not work\"\n assert \"Multicystic kidney dysplasia\" in observed_hpo_names, \"Update HPOs did not work\"\n assert \"Mode of inheritance\" in observed_hpo_names, \"Update HPOs did not work\"\n observed_hpos = [x[0] for x in vi._get_feature_for_individual(individual, atype=\"observed\")]\n assert len(observed_hpos) == 3, \"Update HPOs did not work\"\n assert \"HP:0000002\" in observed_hpos, \"Update HPOs did not work\"\n assert \"HP:0000003\" in observed_hpos, \"Update HPOs did not work\"\n assert \"HP:0000005\" in observed_hpos, \"Update HPOs did not work\"\n\n sex_org = MAPPING_SEX_REPRESENTATIONS.inverse.get(sex1)\n response = _admin_client.post(\n f\"/update_patient_data/{individual_id}\",\n data=f\"gender_edit[]={sex_org}&feature[]=Abnormality of body height\"\n \"&feature[]=Multicystic kidney dysplasia\"\n \"&feature[]=Mode of inheritance&genes[]=TTLL5&genes[]=GAST\",\n content_type=\"application/x-www-form-urlencoded\",\n )\n assert response.status_code == 200\n db_session.refresh(individual)\n observed_sex = individual.sex\n assert observed_sex == sex1, \"Update sex did not work\"\n\n individual_id = \"PH00009999\"\n response = _admin_client.post(\n f\"/update_patient_data/{individual_id}\",\n data=\"genes[]=DRAM2\",\n content_type=\"application/x-www-form-urlencoded\",\n )\n assert response.status_code == 404, \"Patient does not exist\"\n\n\ndef test_create_individual_with_demo_user_fails(_demo_client):\n individual = Individual()\n individual.phenopolis_id = \"PH00000000\"\n response = _demo_client.post(\"/individual\", json=individual.as_dict(), content_type=\"text/json\")\n assert response.status_code == 405\n\n\[email protected]((\"sample\", \"sex\"), ((\"for_test_Sample1\", \"U\"), (\"for_test_Sample2\", \"F\")))\ndef test_create_individual_with_admin_user(_admin_client, sample, sex):\n individual = Individual()\n test_external_id = sample\n individual.external_id = test_external_id\n response = _admin_client.post(\"/individual\", json=[individual.as_dict()], content_type=\"application/json\")\n assert response.status_code == 400\n assert response.json == {\"error\": \"Null individual\", \"success\": False}, \"Sex cannot be null\"\n individual.sex = sex\n individual.consanguinity = \"unknown\"\n individual.genes = \"DRAM2\"\n individual.observed_features = \"HP:0000001\"\n response = _admin_client.post(\"/individual\", json=[Individual().as_dict()], content_type=\"application/json\")\n assert response.status_code == 400\n assert response.json == {\"error\": \"Null individual\", \"success\": False}, \"Empty individual\"\n response = _admin_client.post(\"/individual\", json={}, content_type=\"application/json\")\n assert response.status_code == 400\n assert response.json == {\"error\": \"Empty payload or wrong formatting\", \"success\": False}\n response = _admin_client.post(\"/individual\", json=\"not_dict_nor_list\", content_type=\"application/json\")\n assert response.status_code == 400\n assert response.json == {\"error\": \"Payload of unexpected type: <class 'str'>\", \"success\": False}\n response = _admin_client.post(\"/individual\", json=[individual.as_dict()], content_type=\"application/json\")\n assert response.status_code == 200\n\n with session_scope() as db_session:\n observed_individual = db_session.query(Individual).filter(Individual.external_id == test_external_id).first()\n phenopolis_id = observed_individual.phenopolis_id\n assert observed_individual is not None, \"Empty newly created individual\"\n assert observed_individual.external_id == test_external_id\n assert observed_individual.sex.name == individual.sex\n assert observed_individual.consanguinity == individual.consanguinity\n\n response = _admin_client.post(\n f\"/update_patient_data/{phenopolis_id}\",\n data=\"genes[]=TTLL5\",\n content_type=\"application/x-www-form-urlencoded\",\n )\n\n assert response.status_code == 200, \"Test empty features\"\n\n # cleans the database\n _clean_test_individuals(_admin_client, db_session, test_external_id)\n\n\ndef test_create_individual_existing_individual_fails(_admin_client):\n individual = Individual()\n test_external_id = \"for_test_Sample\"\n individual.external_id = test_external_id\n individual.sex = \"M\"\n individual.genes = \"DRAM2,TTLL5\"\n individual.observed_features = \"HP:0000001,HP:0000618\"\n response = _admin_client.post(\"/individual\", json=[individual.as_dict()], content_type=\"application/json\")\n assert response.status_code == 200\n\n with session_scope() as db_session:\n observed_individual = db_session.query(Individual).filter(Individual.external_id == test_external_id).first()\n assert observed_individual is not None, \"Empty newly created individual\"\n\n # try to create the same individual again\n response = _admin_client.post(\"/individual\", json=[individual.as_dict()], content_type=\"application/json\")\n assert response.status_code == 400\n\n # cleans the database\n _clean_test_individuals(_admin_client, db_session, test_external_id)\n\n\ndef test_create_multiple_individuals(_admin_client):\n individual = Individual()\n test_external_id = \"for_test_Sample\"\n individual.external_id = test_external_id\n individual.sex = \"M\"\n individual.genes = \"DRAM2\"\n individual.observed_features = \"HP:0000001\"\n individual2 = Individual()\n test_external_id2 = \"for_test_Sample2\"\n individual2.external_id = test_external_id2\n individual2.sex = \"F\"\n response = _admin_client.post(\n \"/individual\", json=[individual.as_dict(), individual2.as_dict()], content_type=\"application/json\"\n )\n assert response.status_code == 200\n\n with session_scope() as db_session:\n observed_individual = db_session.query(Individual).filter(Individual.external_id == test_external_id).first()\n assert observed_individual is not None, \"Empty newly created individual\"\n assert observed_individual.sex.name == individual.sex\n observed_individual2 = db_session.query(Individual).filter(Individual.external_id == test_external_id2).first()\n assert observed_individual2 is not None, \"Empty newly created individual\"\n assert observed_individual2.sex.name == individual2.sex\n\n # cleans the database\n _clean_test_individuals(_admin_client, db_session, test_external_id)\n _clean_test_individuals(_admin_client, db_session, test_external_id2)\n\n\ndef test_delete_individual(_admin_client):\n # creates an individual\n individual = Individual()\n test_external_id = \"for_test_Sample\"\n individual.external_id = test_external_id\n individual.sex = \"M\"\n response = _admin_client.post(\"/individual\", json=[individual.as_dict()], content_type=\"application/json\")\n assert response.status_code == 200\n\n # confirms existence of new individual\n with session_scope() as db_session:\n observed_individual = db_session.query(Individual).filter(Individual.external_id == test_external_id).first()\n assert observed_individual is not None, \"Empty newly created individual\"\n\n # deletes individual\n response = _admin_client.delete(\n f\"/individual/{observed_individual.phenopolis_id}\", content_type=\"application/json\"\n )\n assert response.status_code == 200\n\n # confirms it does not exist\n observed_individual = db_session.query(Individual).filter(Individual.external_id == test_external_id).first()\n assert observed_individual is None, \"Deletion was not successful\"\n\n # try to delete non-existent individual\n response = _admin_client.delete(\"/individual/PH00000000\", content_type=\"application/json\")\n assert response.status_code == 404\n\n\ndef test_delete_individual_for_user(_nondemo_client):\n # creates an individual\n individual = Individual()\n test_external_id = \"for_test_Sample\"\n individual.external_id = test_external_id\n individual.sex = \"M\"\n response = _nondemo_client.post(\"/individual\", json=[individual.as_dict()], content_type=\"application/json\")\n assert response.status_code == 200\n\n # confirms existence of new individual\n with session_scope() as db_session:\n observed_individual = db_session.query(Individual).filter(Individual.external_id == test_external_id).first()\n assert observed_individual is not None, \"Empty newly created individual\"\n\n # deletes individual\n response = _nondemo_client.delete(\n f\"/individual/{observed_individual.phenopolis_id}\", content_type=\"application/json\"\n )\n assert response.status_code == 200\n\n # confirms it does not exist\n observed_individual = db_session.query(Individual).filter(Individual.external_id == test_external_id).first()\n assert observed_individual is None, \"Deletion was not successful\"\n\n # try to delete non-existent individual\n response = _nondemo_client.delete(\"/individual/PH00000000\", content_type=\"application/json\")\n assert response.status_code == 404\n\n # try to delete a non-authorised patient for a given user\n response = _nondemo_client.delete(\"/individual/PH00008258\", content_type=\"application/json\")\n assert response.status_code == 404, \"PH00008258 exists but access not authorised\"\n\n\ndef test_get_all_individuals_default_page(_demo):\n response, status = get_all_individuals()\n assert status == 200\n individuals = response.json\n assert len(individuals) <= 100, \"Page is greater than the maximum size of 100\"\n assert len(individuals) > 0, \"There are no results\"\n for i in individuals:\n assert \"demo\" in i.get(\"users\"), \"User demo not in the list of users\"\n assert len(i.get(\"users\")) == 1, \"Other users than demo are in the list\"\n\n\ndef test_get_all_individuals_with_admin_default_page(_admin):\n vi.MAX_PAGE_SIZE = 5\n response, status = get_all_individuals()\n assert status == 400\n assert response.json == {\"message\": \"The maximum page size for individuals is 5\"}\n vi.MAX_PAGE_SIZE = 100000\n response, status = get_all_individuals()\n individuals = response.json\n assert len(individuals) <= 100, \"Page is greater than the maximum size of 100\"\n assert len(individuals) > 0, \"There are no results\"\n found_individual_multiple_users = False\n for i in individuals:\n assert \"Admin\" in i.get(\"users\"), \"User Admin not in the list of users\"\n assert len(i.get(\"users\")) >= 1, \"Other users than demo are in the list\"\n found_individual_multiple_users = found_individual_multiple_users or len(i.get(\"users\")) > 1\n assert found_individual_multiple_users, \"Only Admin user reported as users with access to individuals\"\n\n\ndef test_get_all_individuals_with_pagination(_admin_client):\n\n response = _admin_client.get(\"/individual?limit=2&offset=0\")\n assert response.status_code == 200\n first_page = response.json\n assert len(first_page) == 2\n\n response = _admin_client.get(\"/individual?limit=2&offset=2\")\n assert response.status_code == 200\n second_page = response.json\n assert len(second_page) == 2\n\n # the third page\n response = _admin_client.get(\"/individual?limit=2&offset=4\")\n assert response.status_code == 200\n third_page = response.json\n assert len(third_page) == 0\n\n # check elements between the pages are different\n internal_ids = [i.get(\"id\") for i in first_page + second_page + third_page]\n assert len(set(internal_ids)) == 4\n\n\ndef test_get_individual_not_having_duplicated_keys(_admin):\n\n # test individual with homozygous variants\n individual_view = _get_view_individual_by_id(identifier=\"PH00008256\")\n column_names = [c[\"key\"] for c in individual_view.get(\"rare_homs\").get(\"colNames\")]\n assert len(column_names) == len(set(column_names)), \"There are duplicated column names in the rare_homs\"\n assert \"#CHROM\" not in column_names\n\n # test individuals with heterozygous and compound heterozygous\n individual_view = _get_view_individual_by_id(identifier=\"PH00008267\")\n column_names = [c[\"key\"] for c in individual_view.get(\"rare_comp_hets\").get(\"colNames\")]\n assert len(column_names) == len(set(column_names)), \"There are duplicated column names in the rare_comp_hets\"\n assert \"#CHROM\" not in column_names\n column_names = [c[\"key\"] for c in individual_view.get(\"rare_variants\").get(\"colNames\")]\n assert len(column_names) == len(set(column_names)), \"There are duplicated column names in the rare_variants\"\n assert \"#CHROM\" not in column_names\n assert \"'key': 'variant_id', 'name': 'Variant Id',\" in str(individual_view), \"Critical, must be present\"\n\n\ndef _clean_test_individuals(client, db_session: Session, test_external_id):\n i = db_session.query(Individual).filter(Individual.external_id == test_external_id).first()\n db_session.query(Individual).filter(Individual.external_id == test_external_id).delete()\n with client.session_transaction() as session:\n db_session.query(UserIndividual).filter(UserIndividual.user == session[USER]).filter(\n UserIndividual.internal_id == i.phenopolis_id\n ).delete()\n", "id": "10191840", "language": "Python", "matching_score": 3.9643373489379883, "max_stars_count": 3, "path": "tests/test_individuals.py" }, { "content": "import pytest\n\nfrom views.gene import gene\n\n\[email protected](\n (\"query\", \"subset\", \"full_gene_name\"),\n (\n (\"ENSG00000119685\", \"all\", \"tubulin tyrosine ligase-like family, member 5\"),\n (\"ENSG00000119685\", \"preview\", '{\"preview\":'),\n (\"TTLL5\", \"all\", \"tubulin tyrosine ligase-like family, member 5\"),\n (\"ENSG00000119685\", \"variants\", \"variant_id\"),\n (\"KIAA0998\", \"all\", \"tubulin tyrosine ligase-like family, member 5\"),\n (\"STAMP\", \"all\", \"tubulin tyrosine ligase-like family, member 5\"),\n ),\n)\ndef test_gene(_demo, query, subset, full_gene_name):\n response = gene(query, subset=subset)\n assert full_gene_name in str(response.data)\n assert response.cache_control.max_age == 300\n assert response.cache_control.public\n assert response.expires is not None\n\n\ndef test_gene_not_found(_demo):\n response = gene(\"fake_gene\")\n assert response.status_code == 404\n assert response.data == b'{\"message\":\"Gene not found\"}\\n'\n\n\ndef test_gene_not_having_duplicated_keys(_demo):\n response = gene(\"TTLL5\")\n gene_results = response.json\n column_names = [c[\"key\"] for c in gene_results[0][\"variants\"][\"colNames\"]]\n assert len(column_names) == len(set(column_names)), \"There are duplicated column names in the variants\"\n assert \"#CHROM\" not in column_names\n\n\[email protected](\n (\"query\", \"subset\", \"msg\"),\n (\n (\"ENSG00000119685\", \"all\", \"'canonical_peptide': 'ENSP00000450713',\"),\n (\"TTLL5\", \"all\", \"'canonical_transcript': 'ENST00000557636',\"),\n (\"STAMP\", \"all\", \"'uniprot': ['Q6EMB2'],\"),\n (\"GAST\", \"all\", \"'stop': 39872221,\"),\n (\"DRAM2\", \"preview\", \"[{'preview': [['Number of variants', 75], ['CADD > 20', 2]]}]\"),\n ),\n)\ndef test_gene_web(_demo_client, query, subset, msg):\n resp = _demo_client.get(f\"/gene/{query}/{subset}\")\n assert resp.status_code == 200\n assert msg in str(resp.json)\n", "id": "394381", "language": "Python", "matching_score": 2.8174819946289062, "max_stars_count": 3, "path": "tests/test_genes.py" }, { "content": "import pytest\n\nfrom views.autocomplete import HPO_REGEX, NUMERIC_REGEX\n\n\[email protected](\n (\"query\", \"qt\", \"msg\"),\n (\n # gene search\n (\"ttll\", \"\", \"gene::TTLL5::ENSG00000119685\"),\n (\"ttll\", \"gene\", \"gene::TTLL5::ENSG00000119685\"),\n # TODO: populate the genes.csv for testing with a larger dataset\n (\"BRC\", \"gene\", None),\n (\"kiaa099\", \"gene\", \"gene::TTLL5::ENSG00000119685\"),\n (\"ENSG0000015617\", \"gene\", \"gene::DRAM2::ENSG00000156171\"),\n (\"ENSG0000015617.3\", \"gene\", \"gene::DRAM2::ENSG00000156171\"), # version is ignored\n (\"15617\", \"gene\", \"gene::DRAM2::ENSG00000156171\"),\n (\"ENST00000557636\", \"gene\", \"gene::TTLL5::ENSG00000119685\"),\n (\"557636\", \"gene\", \"gene::TTLL5::ENSG00000119685\"),\n (\"something_confusing\", \"gene\", None),\n # phenotype search\n (\"retinal\", \"phenotype\", \"hpo::Retinal dystrophy::HP:0000556\"),\n (\"HP:0000007\", \"phenotype\", \"hpo::Autosomal recessive inheritance::HP:0000007\"),\n (\"118\", \"phenotype\", \"hpo::Phenotypic abnormality::HP:0000118\"),\n (\"HP:000010\", \"phenotype\", \"hpo::Renal cyst::HP:0000107\"),\n (\"intelligence\", \"phenotype\", None),\n # TODO: when we search over HPO synonyms this search should return dyschromatopsia, red-gree dyschromatopsia,\n # TODO: monochromacy, tritanomaly and protanomaly\n # (\"color blindness\", \"phenotype\", \"hpo::Blindness::HP:0000618\"),\n # (\"achromatopsia\", \"phenotype\", \"hpo::Achromatopsia::HP:0011516\"),\n (\"хороший\", \"phenotype\", None),\n # patient search\n (\"PH000082\", \"patient\", \"individual::PH00008267::PH00008267\"),\n (\"82\", \"patient\", \"individual::PH00008267::PH00008267\"),\n (\"0082\", \"patient\", \"individual::PH00008267::PH00008267\"),\n (\"PH0082\", \"patient\", None),\n (\"PH000083\", \"patient\", None),\n # variant search\n (\"14-76156\", \"variant\", \"variant::14-76156407-T-C::14-76156407-T-C\"),\n (\"14-76156-A-G\", \"variant\", \"variant::14-76156575-A-G::14-76156575-A-G\"),\n (\"14-7615-A\", \"variant\", \"variant::14-76156575-A-G::14-76156575-A-G\"),\n (\"ENST00000286692.4:c.*242A>G\", \"variant\", \"variant::1-111660540-T-C::1-111660540-T-C\"),\n (\"ENST00000286692:c.*242A>G\", \"variant\", \"variant::1-111660540-T-C::1-111660540-T-C\"),\n (\"DRAM2:c.*242A>G\", \"variant\", \"variant::1-111660540-T-C::1-111660540-T-C\"),\n (\"ENSG00000156171:c.*242A>G\", \"variant\", \"variant::1-111660540-T-C::1-111660540-T-C\"),\n (\"ENSP00000286692.4:p.His\", \"variant\", \"variant::1-111663293-T-A::1-111663293-T-A\"),\n (\"ENSP00000286692:p.His\", \"variant\", \"variant::1-111663293-T-A::1-111663293-T-A\"),\n (\"DRAM2:p.His\", \"variant\", \"variant::1-111663293-T-A::1-111663293-T-A\"),\n (\"ENSG00000156171:p.His\", \"variant\", \"variant::1-111663293-T-A::1-111663293-T-A\"),\n (\"1-11166\", \"variant\", \"variant::1-111660181-G-GA::1-111660181-G-GA\"),\n (\"25-11166\", \"variant\", None),\n (\"something_confusing\", \"variant\", None),\n (\"14-76156300-76156500\", \"variant\", \"variant::14-76156407-T-C::14-76156407-T-C\"),\n (\"14:76156300-76156500\", \"variant\", \"variant::14-76156407-T-C::14-76156407-T-C\"),\n (\"14:76156300:76156500\", \"variant\", \"variant::14-76156407-T-C::14-76156407-T-C\"),\n (\"not_a_chromosome:76156300:76156500\", \"variant\", None),\n (\"14:-100:-200\", \"variant\", None),\n (\"14:76156500:76156300\", \"variant\", None),\n ),\n)\ndef test_autocomplete(_demo_client, query, qt, msg):\n resp = _demo_client.get(\"/autocomplete/{query}?query_type={qt}\".format(query=query, qt=qt))\n assert resp.status_code == 200\n if msg:\n assert msg in resp.json\n if qt == \"patient\":\n # the results must be sorted by individual.phenopolis_id\n assert resp.json == sorted(resp.json)\n elif qt == \"phenotype\":\n if HPO_REGEX.match(query) or NUMERIC_REGEX.match(query):\n # HPO query by query id, results sorted by hpo.hpo_id\n phenotypes_ids = [x.split(\"::\")[2] for x in resp.json]\n assert phenotypes_ids == sorted(phenotypes_ids)\n else:\n # HPO query by name, results sorted by query similarity to hpo.hpo_name\n phenotypes_names = [x.split(\"::\")[1] for x in resp.json]\n # NOTE: semantic search simplification for \"easy\" search, results having an exact match of the query are\n # sorted by length og HPO name, inexact searches are more tricky\n assert phenotypes_names == sorted(\n phenotypes_names, key=lambda x: len(x) if query.lower() in x.lower() else 100 + len(x)\n )\n elif qt == \"variant\":\n assert msg == resp.json[0]\n else:\n assert len(resp.json) == 0\n\n\[email protected](\n (\"limit\", \"msg\"),\n (\n (\"acme\", {\"message\": \"Please, specify a numeric limit value, acme\", \"success\": False}),\n (\"2000\", {\"message\": \"Please, specify a limit lower than 1000\", \"success\": False}),\n ),\n)\ndef test_autocomplete_limit(_demo_client, limit, msg):\n resp = _demo_client.get(\"/autocomplete/ttll?limit={limit}\".format(limit=limit))\n assert resp.status_code == 400\n assert resp.json == msg\n\n\n# TODO: add tests for limit\n\n\ndef test_autocomplete_wrong_query_type(_demo_client):\n resp = _demo_client.get(\"/autocomplete/ttll?query_type=acme\")\n assert resp.status_code == 400\n assert resp.json == {\"message\": \"Autocomplete request with unsupported query type 'acme'\", \"success\": False}\n", "id": "9219826", "language": "Python", "matching_score": 2.309051036834717, "max_stars_count": 24, "path": "tests/test_autocomplete.py" }, { "content": "import pytest\nfrom views.hpo import hpo\n\n\[email protected](\n (\"query\", \"subset\", \"msg\"),\n (\n (\"HP:0000001\", \"all\", '{\"display\":\"GAST\"}'),\n (\"HP:0000478\", \"all\", \"Phenotypic abnormality\"),\n (\"Conductive hearing impairment\", \"all\", \"HP:0000405\"),\n (\"HP:0000478\", \"preview\", '[{\"preview\":[[\"Number of Individuals\"'),\n (\"HP:0000478\", \"metadata\", '\"name\":\"Abnormality of the eye\"'),\n ),\n)\ndef test_hpo(_demo, query, subset, msg):\n \"\"\"res -> str\"\"\"\n response = hpo(query, subset=subset)\n assert msg in str(response.data)\n\n\ndef test_duplicated_hpo(_demo_client):\n resp = _demo_client.get(\"/hpo/HP:0000001\")\n assert resp.status_code == 200\n a1 = [x[\"display\"] for x in resp.json[0][\"individuals\"][\"data\"][0][\"simplified_observed_features_names\"]]\n assert len(a1) == len(set(a1)), \"Duplicated hpo_ids\"\n\n\[email protected](\n (\"query\", \"subset\", \"msg\"),\n (\n (\"HP:0001\", \"preview\", [{\"preview\": [[\"Number of Individuals\", 0]]}]), # HP:0001 does not exist in DB\n (\"xyw2zkh\", \"preview\", [{\"preview\": [[\"Number of Individuals\", 0]]}]), # xyw2zkh does not exist in DB\n (\"HP:0000478\", \"preview\", [{\"preview\": [[\"Number of Individuals\", 1]]}]),\n ),\n)\ndef test_hpo_web(_nondemo_client, query, subset, msg):\n resp = _nondemo_client.get(f\"/hpo/{query}/{subset}\")\n assert resp.status_code == 200\n assert resp.json == msg\n\n\[email protected](\n (\"query\", \"subset\", \"msg\"),\n (\n (\"HP:0001\", \"all\", \"HPO not found\"), # HP:0001 does not exist in DB\n (\"xyw2zkh\", \"all\", \"HPO not found\"), # xyw2zkh does not exist in DB\n ),\n)\ndef test_hpo_preview(_nondemo_client, query, subset, msg):\n resp = _nondemo_client.get(f\"/hpo/{query}/{subset}\")\n assert resp.status_code == 404\n assert resp.json.get(\"message\") == msg\n", "id": "1768188", "language": "Python", "matching_score": 2.5700881481170654, "max_stars_count": 24, "path": "tests/test_hpo.py" }, { "content": "from views.statistics import phenopolis_statistics\n\n\ndef test_statistics_api(_demo):\n \"\"\"res -> dict\"\"\"\n res = phenopolis_statistics().json\n assert \"total_variants\" in res.keys()\n\n\ndef test_statistics(_admin_client):\n resp = _admin_client.get(\"/statistics\")\n assert resp.status_code == 200\n data = resp.json\n assert data.get(\"exomes\") == 4\n assert data.get(\"females\") == 1\n assert data.get(\"males\") == 2\n assert data.get(\"unknowns\") == 1\n assert data.get(\"total_variants\") == 4099\n assert data.get(\"observed_features\") == 7\n assert data.get(\"unobserved_features\") == 17\n assert data.get(\"total_genes\") == 3\n\n\ndef test_statistics_with_demo_user(_demo_client):\n resp = _demo_client.get(\"/statistics\")\n assert resp.status_code == 200\n data = resp.json\n assert data.get(\"exomes\") == 4\n assert data.get(\"females\") == 1\n assert data.get(\"males\") == 2\n assert data.get(\"unknowns\") == 1\n assert data.get(\"total_variants\") == 4099\n assert data.get(\"observed_features\") == 7\n assert data.get(\"unobserved_features\") == 17\n assert data.get(\"total_genes\") == 3\n\n\ndef test_statistics_with_nondemo_user(_nondemo_client):\n resp = _nondemo_client.get(\"/statistics\")\n assert resp.status_code == 200\n data = resp.json\n assert data.get(\"exomes\") == 1\n assert data.get(\"females\") == 0\n assert data.get(\"males\") == 1\n assert data.get(\"unknowns\") == 0\n assert data.get(\"total_variants\") == 1406\n assert data.get(\"observed_features\") == 3\n assert data.get(\"unobserved_features\") == 0\n assert data.get(\"total_genes\") == 1\n\n\ndef test_my_variants(_demo_client):\n resp = _demo_client.get(\"/my_variants?limit=10000\")\n assert len(resp.json) == 4099\n assert \"'variant_id': [{'display': '14-95236097-C-A'\" in str(resp.json)\n resp = _demo_client.get(\"/my_variants?limit=100001\")\n assert resp.status_code == 400\n assert resp.json == {\"message\": \"The maximum page size for variants is 100000\"}\n\n\ndef test_my_genes(_demo_client):\n resp = _demo_client.get(\"/my_genes\")\n assert len(resp.json) == 3\n assert \"'percentage_gene_gc_content': 49.73\" in str(resp.json)\n\n\ndef test_my_hpos(_demo_client):\n resp = _demo_client.get(\"/my_hpos\")\n assert len(resp.json) == 7\n assert \"'Abnormal retinal morphology'\" in str(resp.json)\n resp = _demo_client.get(\"/my_hpos?limit=100001\")\n assert resp.status_code == 400\n assert resp.json == {\"message\": \"The maximum page size for variants is 100000\"}\n", "id": "1544437", "language": "Python", "matching_score": 2.1733644008636475, "max_stars_count": 24, "path": "tests/test_statistics.py" }, { "content": "from views.variant import _get_genotypes # noqa: F401\nfrom views.variant import _get_variants, variant, variant_preview\n\n\ndef test_get_genotypes_exception(capsys):\n # if this happens, something is out of sync between VCF file and variant table in DB\n # or cyvcf2 is broken again\n _get_genotypes(\"443\", \"10000\")\n captured = capsys.readouterr()\n assert \"no intervals found for\" in captured.err + captured.out\n\n\ndef test_variant(_demo):\n \"\"\"\n This tests VCF access via cyvcf2\n tests both for subset and entry not in DB, the real one is 14-76127655-C-T\n res -> str\n \"\"\"\n response = variant(\"1-111660351-G-T\")\n assert '\"gene_id\":\"ENSG00000156171\",\"gene_symbol\":[{\"display\":\"DRAM2\"}]' in str(response.data), \"Critical\"\n assert '\"hgvsc\":\"ENST00000286692.4:c.*431C>A\"' in str(response.data), \"Critical\"\n # assert '{\"display\":\"my:PH00008258\"' in str(response.data), \"Check for 'my:...\"\n assert len(str(response.json)) == 7439\n assert \"'cadd_phred': 2.531, 'dann': 0.625\" in str(response.json), \"Check col frequency data\"\n assert \"end_href': '1-111660351-G-T'\" in str(response.json), \"Critical, must be present\"\n assert \"'gene_id': 'ENSG00000156171'\" in str(response.json), \"Critical, must be present\"\n assert \"Variant Id\" not in str(response.json), \"Critical, must be present\"\n\n\ndef test_variant_web(_admin_client):\n resp = _admin_client.get(\"/variant/14-76156575-A-G\")\n assert resp.status_code == 200\n assert \"[{'display': 'my:PH00008258',\" in str(resp.json), \"Check for 'my:...\"\n\n\ndef test_variant_genotype_vcf(_admin_client):\n resp = _admin_client.get(\"/variant/14-76156575-A-G\")\n assert resp.status_code == 200\n assert len(resp.json[0][\"genotypes\"][\"data\"]) == 4, \"Critical, VCF access not working\"\n\n\ndef test_cyvcf2_S3(_admin_client):\n from cyvcf2 import VCF\n\n vcf_S3 = VCF(\"s3://3kricegenome/test/test.vcf.gz\") # public VCF file\n assert len(vcf_S3.raw_header) == 559362, \"Critical, S3 access not working\"\n\n\ndef test_missing_variant(_demo):\n response = variant(\"chr45-1234567890112233-C-G\")\n assert response.status_code == 404\n\n\ndef test_wrong_variant(_demo):\n response = variant(\"something-else\")\n assert response.status_code == 400\n\n\ndef test_variant_preview(_demo):\n response = variant_preview(\"14-76127655-C-G\")\n assert response.status_code == 200\n assert \"Clinvar\" in response.json\n\n\ndef test_wrong_variant_preview(_demo):\n response = variant_preview(\"something-else\")\n assert response.status_code == 400\n\n\ndef test_get_variants(_demo):\n response = _get_variants(\"wrong\")\n assert not response\n", "id": "1638336", "language": "Python", "matching_score": 1.7093065977096558, "max_stars_count": 24, "path": "tests/test_variants.py" }, { "content": "\"\"\"\nGeneral modules\n\"\"\"\nimport traceback\nfrom views.postgres import get_db\nimport ujson as json\nfrom time import strftime\nfrom flask import jsonify, request, Response, session\nfrom flask_mail import Message\nfrom sqlalchemy.orm import Session\nfrom werkzeug.exceptions import HTTPException\nfrom views import MAIL_USERNAME, VERSION, application, mail, APP_ENV\nfrom views.auth import DEMO_USER, USER\nfrom views.exceptions import PhenopolisException\nfrom datetime import datetime, timedelta\nfrom functools import wraps\n\n\[email protected](\"/check_health\")\ndef check_health():\n return jsonify(health=\"ok\"), 200\n\n\[email protected](\"/version\")\ndef get_version():\n return jsonify(version=VERSION), 200\n\n\[email protected]_request\ndef after_request(response):\n application.logger.info(\n f\"{request.remote_addr} {request.method} {request.scheme} {request.full_path} {response.status}\"\n )\n # avoids rewriting the cache config if it has been set previously\n if \"Cache-control\" not in response.headers:\n response.headers[\"Cache-Control\"] = \"no-cache\"\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n # prevent click-jacking vulnerability identified by BITs\n # response.headers[\"X-Frame-Options\"] = \"SAMEORIGIN\"\n response.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n\n\[email protected](Exception)\ndef exceptions(e):\n application.logger.error(\n f\"\"\"{VERSION} {request.remote_addr} {request.method} {request.scheme} {request.full_path}\n 5xx INTERNAL SERVER ERROR\"\"\"\n )\n application.logger.exception(e)\n response = Response()\n response.status_code = 500 # this is the default\n if isinstance(e, HTTPException):\n # start with the correct headers and status code from the error\n response = e.get_response()\n if response.status_code != 404:\n _send_error_mail(response.status_code)\n return _build_response_from_exception(response, e)\n\n\ndef _build_response_from_exception(response, exception):\n message = [str(x) for x in exception.args]\n success = False\n response.data = json.dumps(\n {\n \"success\": success,\n \"error\": {\"type\": exception.__class__.__name__, \"message\": message},\n \"remote_addr\": application.config[\"SERVED_URL\"],\n \"full_path\": request.full_path,\n \"method\": request.method,\n \"scheme\": request.scheme,\n \"timestamp\": strftime(\"[%Y-%b-%d %H:%M]\"),\n \"version\": VERSION,\n }\n )\n response.content_type = \"application/json\"\n return response\n\n\ndef _send_error_mail(code):\n msg = Message(\n f\"{code}: {request.method} {application.config['SERVED_URL']}{request.full_path}\",\n sender=MAIL_USERNAME,\n recipients=[MAIL_USERNAME],\n )\n msg.body = f\"Version: {VERSION}\\n{traceback.format_exc()}\"\n mail.send(msg)\n\n\n# TODO: who will review this?\n# this should not be done live but offline\n# need to figure out how to encode json data type in postgres import\n# rather do the conversion on the fly\ndef process_for_display(db_session: Session, data):\n with get_db() as conn:\n with conn.cursor() as cur:\n cur.execute('select ui.internal_id from public.users_individuals ui where ui.\"user\" = %s', [session[USER]])\n my_patients = [x[0] for x in cur.fetchall()]\n # TODO: avoid this transformation to dict and use the objects themselves\n for x2 in data:\n if \"CHROM\" in x2 and \"POS\" in x2 and \"REF\" in x2 and \"ALT\" in x2:\n variant_id = f'{x2[\"CHROM\"]}-{x2[\"POS\"]}-{x2[\"REF\"]}-{x2[\"ALT\"]}'\n x2[\"variant_id\"] = [{\"end_href\": variant_id, \"display\": variant_id[:60]}]\n if \"gene_symbol\" in x2:\n x2[\"gene_symbol\"] = [{\"display\": x3} for x3 in x2[\"gene_symbol\"].split(\",\") if x3]\n if x2.get(\"HET\"):\n x2[\"HET\"] = [\n {\"display\": \"my:\" + x3, \"end_href\": x3}\n if x3 in my_patients\n else {}\n if session[USER] == DEMO_USER\n else {\"display\": x3, \"end_href\": x3}\n for x3 in x2[\"HET\"]\n ]\n if x2.get(\"HOM\"):\n x2[\"HOM\"] = [\n {\"display\": \"my:\" + x3, \"end_href\": x3}\n if x3 in my_patients\n else {}\n if session[USER] == DEMO_USER\n else {\"display\": x3, \"end_href\": x3}\n for x3 in x2[\"HOM\"]\n ]\n # NOTE: nowhere in the project is using the lines below, I'm commenting them out @alan\n # NOTE: gene.py has commented lines about 'related_hpo' @alan\n # if \"hpo_ancestors\" in x2:\n # x2[\"hpo_ancestors\"] = [{\"display\": x3} for x3 in x2[\"hpo_ancestors\"].split(\";\") if x3]\n # if \"genes\" in x2 and x2[\"genes\"] == \"\":\n # x2[\"genes\"] = []\n\n\ndef _parse_boolean_parameter(val):\n \"\"\"Convert a string representation of truth to true (1) or false (0).\n True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values\n are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if\n 'val' is anything else.\n \"\"\"\n # NOTE: this code was adapted from https://github.com/python/cpython/blob/master/Lib/distutils/util.py#L307\n val = val.lower()\n if val in (\"y\", \"yes\", \"t\", \"true\", \"on\", \"1\"):\n return 1\n elif val in (\"n\", \"no\", \"f\", \"false\", \"off\", \"0\"):\n return 0\n else:\n raise PhenopolisException(\"invalid truth value %r\" % (val,), 400)\n\n\ndef cache_on_browser(minutes=5):\n \"\"\" Flask decorator that allow to set Expire and Cache headers. \"\"\"\n if APP_ENV == \"debug\":\n minutes = 0\n\n def fwrap(f):\n @wraps(f)\n def wrapped_f(*args, **kwargs):\n response = f(*args, **kwargs)\n then = datetime.now() + timedelta(minutes=minutes)\n response.headers.add(\"Expires\", then.strftime(\"%a, %d %b %Y %H:%M:%S GMT\"))\n response.headers.add(\"Cache-Control\", \"public,max-age=%d\" % int(60 * minutes))\n return response\n\n return wrapped_f\n\n return fwrap\n\n\ndef _get_pagination_parameters():\n try:\n offset = int(request.args.get(\"offset\", 0))\n limit = int(request.args.get(\"limit\", 10))\n except ValueError as e:\n raise PhenopolisException(str(e), 500)\n return limit, offset\n", "id": "12716117", "language": "Python", "matching_score": 3.5123062133789062, "max_stars_count": 24, "path": "views/general.py" }, { "content": "\"\"\"\nTest web views api\n\nTODO:\n - How to test for session timeout??\n I think it's a frontend feature\n\"\"\"\n\nfrom werkzeug.exceptions import BadHost\n\nfrom views.gene import gene\nfrom views.general import after_request, check_health, exceptions\nfrom views.individual import get_all_individuals\n\n\ndef test_check_health(_demo):\n \"\"\"res -> tuple(flask.wrappers.Response)\"\"\"\n kv = dict([(check_health, b'{\"health\":\"ok\"}\\n')])\n for func, msg in kv.items():\n res = func()[0]\n assert res.status_code == 200\n assert res.data == msg\n\n\ndef test_after_request(_demo):\n \"\"\"\n Also tests gene_not_found\n resp -> tuple(flask.wrappers.Response)\n res -> flask.wrappers.Response\n \"\"\"\n # tries an endpoint that allows caching\n response = gene(\"fake_gene\")\n assert response.status_code == 404\n assert response.data == b'{\"message\":\"Gene not found\"}\\n'\n res = after_request(response)\n assert res.status_code == 404\n assert res.data == b'{\"message\":\"Gene not found\"}\\n'\n assert res.headers[\"Cache-Control\"] == \"public,max-age=300\"\n\n # tries an endpoint that does not allow caching\n response, _status = get_all_individuals()\n res = after_request(response)\n assert res.headers[\"Cache-Control\"] == \"no-cache, no-store, must-revalidate\"\n\n\ndef test_exceptions(_demo):\n \"\"\"\n ee = werkzeug.exceptions.BadHost\n res -> werkzeug.wrappers.response.Response\n \"\"\"\n ee = BadHost()\n res = exceptions(ee)\n assert res.status_code == 400\n\n\ndef test_version(_not_logged_in_client):\n res = _not_logged_in_client.get(\"/version\")\n assert res.status_code == 200\n assert res.json.get(\"version\")\n\n\ndef _check_only_available_to_admin(res):\n assert res[0].status_code == 200\n assert res[0].data == b'{\"error\":\"Admin permissions required to perform this operation\"}\\n'\n assert res[1] == 403\n", "id": "8718944", "language": "Python", "matching_score": 1.9905482530593872, "max_stars_count": 3, "path": "tests/test_views.py" }, { "content": "def test_login_logout(_not_logged_in_client):\n resp = _not_logged_in_client.get(\"is_logged_in\")\n assert resp.status_code == 401\n assert resp.json == {\"error\": \"Unauthenticated\"}\n\n resp = _not_logged_in_client.post(\"/login\", json={\"user\": \"demo\", \"password\": \"<PASSWORD>\"})\n assert resp.status_code == 401\n assert resp.json == {\"error\": \"Invalid Credentials. Please try again.\"}\n\n resp = _not_logged_in_client.post(\"/login\", json={\"user\": \"acme\", \"password\": \"<PASSWORD>\"})\n assert resp.status_code == 401\n assert resp.json == {\"error\": \"Invalid Credentials. Please try again.\"}\n\n resp = _not_logged_in_client.post(\"/login\", json={\"user\": \"demo\", \"password\": \"<PASSWORD>\"})\n assert resp.status_code == 200\n assert resp.json == {\"success\": \"Authenticated\", \"username\": \"demo\"}\n\n resp = _not_logged_in_client.get(\"/is_logged_in\")\n assert resp.status_code == 200\n assert resp.json == {\"username\": \"demo\"}\n\n resp = _not_logged_in_client.post(\"/logout\")\n assert resp.status_code == 200\n assert resp.json == {\"success\": \"logged out\"}\n\n resp = _not_logged_in_client.get(\"/is_logged_in\")\n assert resp.status_code == 401\n", "id": "9069477", "language": "Python", "matching_score": 1.863701343536377, "max_stars_count": 24, "path": "tests/test_auth.py" }, { "content": "from __future__ import print_function\n# Uncomment to run this module directly. TODO comment out.\n#import sys, os\n#sys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n# End of uncomment.\n\nimport unittest\nimport subprocess\nimport runserver\nfrom views import neo4j_driver\nimport helper\n\nfrom passlib.hash import argon2\n\nclass Neo4jTestCase(unittest.TestCase):\n\n def setUp(self):\n helper.load_neo4j_test_data()\n runserver.app.config['TESTING'] = True\n self.app = runserver.app.test_client()\n\n\n def tearDown(self):\n helper.delete_neo4j_test_data()\n\n\n def test_users_data(self):\n with neo4j_driver.session() as neo4j_session:\n # Test unknown user\n results = neo4j_session.run(\"MATCH (u:User {user : 'xxx'}) RETURN u.user AS user, u.argon_password AS argon_password\")\n result = results.single()\n assert(not result)\n\n # Test known user\n results = neo4j_session.run(\"MATCH (u:User {user : 'testSuite'}) RETURN u.user AS user, u.argon_password AS argon_password\")\n result = results.single()\n assert(result)\n assert result['user'] == 'testSuite'\n assert(argon2.verify('demo123', result['argon_password']))\n\n\n def test_login_logout(self):\n rv = self.login('Testx', 'demo123')\n assert rv.status_code == 401\n assert 'Invalid Credentials. Please try again.' in rv.data\n rv = self.login('testSuite', 'demo123x')\n assert rv.status_code == 401\n assert 'Invalid Credentials. Please try again' in rv.data\n rv = self.login('testSuite', 'demo123')\n assert rv.status_code == 200\n assert 'Authenticated' in rv.data\n rv = self.logout()\n assert rv.status_code == 200\n assert 'Please login' and 'username' and 'password' in rv.data\n\n\n def test_change_password(self):\n rv = self.login('testSuite', 'demo123')\n assert rv.status_code == 200\n assert 'Authenticated' in rv.data\n\n rv = self.change_password('testSuite', '<PASSWORD>', '<PASSWORD>')\n assert rv.status_code == 200\n print(rv.data)\n assert 'Password for username \\'testSuite\\' changed' in rv.data\n\n rv = self.login('testSuite', 'demo456')\n assert rv.status_code == 200\n\n rv = self.login('testSuite', 'demo123')\n assert rv.status_code == 401\n\n rv = self.change_password('testSuite', '<PASSWORD>', '<PASSWORD>')\n assert rv.status_code == 200\n\n rv = self.change_password('x', '<PASSWORD>', '<PASSWORD>')\n assert rv.status_code == 401\n\n rv = self.change_password('testSuite', 'x', '<PASSWORD>')\n assert rv.status_code == 401\n\n\n def login(self, username, password):\n return self.app.post('/login', data=dict(\n name=username,\n password=password\n ), follow_redirects=True)\n\n\n def logout(self):\n return self.app.get('/logout', follow_redirects=True)\n\n\n def change_password(self, username, password, new_pass_1):\n return self.app.post('/change_password', data=dict(\n change_pwd_name=username,\n current_password=password,\n new_password_1=<PASSWORD>,\n ), follow_redirects=True)\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "2034262", "language": "Python", "matching_score": 2.106621503829956, "max_stars_count": 24, "path": "tests/test_login.py" }, { "content": "import sys\nimport ConfigParser\nimport os\nfrom io import StringIO\nfrom neo4j.v1 import GraphDatabase, basic_auth, CypherError\nfrom passlib.hash import argon2\n\n\ndef setup_neo4j_driver(host, port, password):\n default_password = '<PASSWORD>' # Travis will use a fresh Neo4j, with the default password.\n local_password = password\n uri = \"bolt://\"+host+\":\"+str(port)\n\n # Try local_password.\n try:\n driver = GraphDatabase.driver(uri, auth=basic_auth(\"neo4j\", local_password))\n return driver\n except:\n pass\n\n # Try default_password.\n # Password handling from https://github.com/robinedwards/django-neomodel\n driver = GraphDatabase.driver(uri, auth=basic_auth(\"neo4j\", default_password))\n with driver.session() as neo4j_session:\n try:\n result = neo4j_session.run(\"MATCH (a:Person) WHERE a.name = {name} RETURN a\", {\"name\": \"Crick\"})\n\n except CypherError as ce:\n if 'The credentials you provided were valid, but must be changed before you can use this instance' in str(ce):\n neo4j_session.run(\"CALL dbms.changePassword({password})\", {'password': local_password})\n print(\"New database with no password set, setting password to '\", local_password, \"'.\")\n neo4j_session.close()\n else:\n raise ce\n return driver\n\ndef create_demo_user(neo4j_session):\n results = neo4j_session.run(\"MATCH (u:User {user : 'demo'}) RETURN u\")\n result = results.single()\n if not result:\n print(\"Adding user 'demo' to the neo4j database.\")\n neo4j_session.run(\"CREATE (a:User {user: {username}, argon_password: {hash}})\",\n {\"username\": \"demo\", \"hash\": argon2.hash(\"demo123\")}) \n\n\n# For use in easy_install.sh to set up a demo user.\nif __name__ == '__main__':\n uri = sys.argv[1] \n password = sys.argv[2] \n driver = GraphDatabase.driver(uri, auth=basic_auth(\"neo4j\", password))\n with driver.session() as neo4j_session: \n create_demo_user(neo4j_session)\n", "id": "1948216", "language": "Python", "matching_score": 2.0759265422821045, "max_stars_count": 24, "path": "views/neo4j_setup.py" }, { "content": "from views import neo4j_driver\nfrom views.neo4j_setup import create_demo_user\nfrom passlib.hash import argon2\n\n\ndef login(app):\n return app.post('/login', data=dict(\n name='demo',\n password='<PASSWORD>'\n ), follow_redirects=True)\n\n\n# We won't load from csv file because Neo4j is set up by default to load only from \n# folder <neo4j-home>\\import and we don't have access to change this on Travis-CI.\ndef load_neo4j_test_data(): \n with neo4j_driver.session() as neo4j_session:\n neo4j_session.run(\"CREATE (a:User {user: {username}, argon_password: {hash}})\",\n {\"username\": \"testSuite\", \"hash\": argon2.hash(\"demo123\")})\n\n\ndef delete_neo4j_test_data():\n with neo4j_driver.session() as neo4j_session:\n neo4j_session.run(\"MATCH (a:User) WHERE a.user = {username} \"\n \"DETACH DELETE a\",\n {\"username\": \"testSuite\"})\n\n\ndef create_neo4j_demo_user(): \n with neo4j_driver.session() as neo4j_session:\n create_demo_user(neo4j_session)\n\ndef my_patients_neo4j_data():\n user='demo'\n with neo4j_driver.session() as neo4j_session:\n # person1\n s=\"\"\"\n MATCH (u:User {user:'%s'})\n MERGE (u)-[r:WRITES]->(p:Person {personId:\"person1\", gender:\"M\", score:0.69})\n MERGE (t:Term {termId:\"HP:0000505\", name:\"Visual impairment\", observed:\"yes\"})\n MERGE (p)-[:PersonToObservedTerm]->(t)\n MERGE (p)-[:CandidateGene]->(g:Gene {gene_name:\"TTLL5\"})\n MERGE (gv1:GeneticVariant {variantId:\"22-38212762-A-G\", allele_freq:0.0002, kaviar_AF:0.000006})\n MERGE (gv2:GeneticVariant {variantId:\"14-76201609-C-G\", allele_freq:0.0002, kaviar_AF:0.000006})\n MERGE (p)<-[:HomVariantToPerson]-(gv1)\n MERGE (p)<-[:HetVariantToPerson]-(gv2);\n \"\"\" % (user)\n result = neo4j_session.run(s)\n\n # person2\n s=\"\"\"\n MATCH (u:User {user:'%s'}), (g:Gene {gene_name:\"TTLL5\"}), \n (gv1:GeneticVariant {variantId:\"22-38212762-A-G\"}), \n (gv2:GeneticVariant {variantId:\"14-76201609-C-G\"})\n MERGE (u)-[r:WRITES]->(p:Person {personId:\"person2\", gender:\"F\", score:0.69})\n MERGE (t505:Term {termId:\"HP:0000505\", name:\"Visual impairment\", observed:\"yes\"})\n MERGE (t479:Term {termId:\"HP:0000479\", name:\"Abnormality of the retina\", observed:\"yes\"})\n MERGE (t7754:Term {termId:\"HP:0007754\", name:\"Macular dystrophy\", observed:\"yes\"})\n MERGE (p)-[:PersonToObservedTerm]->(t505)\n MERGE (p)-[:PersonToObservedTerm]->(t479)\n MERGE (p)-[:PersonToObservedTerm]->(t7754)\n MERGE (p)-[:CandidateGene]->(g)\n MERGE (p)-[:CandidateGene]->(g1:Gene {gene_name:\"DRAM2\"})\n MERGE (p)-[:CandidateGene]->(g2:Gene {gene_name:\"RPGR\"})\n MERGE (p)-[:CandidateGene]->(g3:Gene {gene_name:\"TRIM32\"})\n MERGE (gv3:GeneticVariant {variantId:\"14-12312312-C-G\", allele_freq:0.0002, kaviar_AF:0.000006})\n MERGE (p)<-[:HomVariantToPerson]-(gv1)\n MERGE (p)<-[:HetVariantToPerson]-(gv2)\n MERGE (p)<-[:HetVariantToPerson]-(gv3);\n \"\"\" % (user)\n result = neo4j_session.run(s)\n\n #Gene to HPO term\n s=\"\"\"\n MATCH (g1:Gene {gene_name:\"TTLL5\"}), (g2:Gene {gene_name:\"DRAM2\"}),\n (g3:Gene {gene_name:\"TRIM32\"}), \n (t505:Term {termId:\"HP:0000505\"}), (t479:Term {termId:\"HP:0000479\"}),\n (t7754:Term {termId:\"HP:0007754\", name:\"Macular dystrophy\"})\n MERGE (g1)-[:GeneToTerm]->(t505)\n MERGE (g1)-[:GeneToTerm]->(t479)\n MERGE (g2)-[:GeneToTerm]->(t505)\n MERGE (g2)-[:GeneToTerm]->(t479)\n MERGE (g3)-[:GeneToTerm]->(t505)\n MERGE (g3)-[:GeneToTerm]->(t7754);\n \"\"\" \n result = neo4j_session.run(s)\n\n #Genetic variant to transcript variant\n s=\"\"\"\n MATCH (gv1:GeneticVariant {variantId:\"22-38212762-A-G\"})\n MERGE (gv1)-[:GeneticVariantToTranscriptVariant]->(:TranscriptVariant {variantId:\"Variant 01\"})\n \"\"\" \n result = neo4j_session.run(s)\n \n\n\n \n\n", "id": "234886", "language": "Python", "matching_score": 2.5093302726745605, "max_stars_count": 24, "path": "tests/helper.py" }, { "content": "import json\nfrom views import *\nfrom lookups import *\nfrom orm import *\nimport rest as annotation\nimport requests\nfrom config import config\nif config.IMPORT_PYSAM_PRIMER3:\n import primer3\nimport myvariant\nfrom vcf import vcf_query\nimport hashlib\nfrom bson.json_util import dumps\nfrom neo4j.v1 import GraphDatabase, basic_auth\n\n\[email protected]('/pheno4j/',methods=['GET'])\n@requires_auth\ndef pheno4j():\n neo=get_neo4j()\n result = neo.run(\"MATCH (a:Person) return a.personId as personId \")\n s='\\n'.join([ \"%s\" % (record[\"personId\"]) for record in result ])\n return s\n\n\[email protected]('/rv_sharing/<individual_id>/<thresh>/<allele_freq>/<limit>')\n@requires_auth\ndef rv_sharing(individual_id,thresh,allele_freq,limit):\n #thresh=0.05\n #allele_freq=0.001\n print individual_id\n print float(thresh)\n print float(allele_freq)\n neo=get_db('neo4j')\n q= \"\"\" MATCH (k:Person)\n WITH count(k) as numberOfPeople\n MATCH (p:Person {{personId:\"{personId}\"}})<-[:PRESENT_IN]-(gv:GeneticVariant)\n WHERE (gv.allele_freq < {allele_freq} or gv.hasExac = false)\n WITH size(()<-[:PRESENT_IN]-(gv)) as count , gv, p, numberOfPeople\n WHERE count > 1 \n AND ((count / toFloat(numberOfPeople)) <= {thresh})\n MATCH (gv)-[:PRESENT_IN]->(q:Person)\n WHERE p <> q\n WITH p,q,count(gv) as intersection, numberOfPeople\n ORDER BY intersection DESC limit {limit}\n MATCH (x:Person)<-[:PRESENT_IN]-(v:GeneticVariant)\n WHERE (x.personId = p.personId or x.personId = q.personId)\n AND (v.allele_freq < {allele_freq} or v.hasExac = false)\n AND ((size(()<-[:PRESENT_IN]-(v)) / toFloat(numberOfPeople)) <= {thresh})\n WITH p, q, v, intersection\n RETURN p.personId, q.personId, intersection, size(collect(distinct v)) as unionSum, (round((intersection/toFloat(size(collect(distinct v))))*100.0*10)/10) as PercentShared\n ORDER BY PercentShared DESC;\n \"\"\".format(thresh=float(thresh), personId=individual_id,allele_freq=float(allele_freq),limit=int(limit))\n result = neo.run(q)\n get_db('neo4j').close()\n return json.dumps([r.__dict__ for r in result], indent=4)\n\n\n", "id": "6776402", "language": "Python", "matching_score": 1.2612568140029907, "max_stars_count": 24, "path": "views/pheno4j.py" }, { "content": "import rest\nimport json\nimport orm\n\n\nclass Gene(object):\n def __init__(self, gene_id, db=None):\n self.gene_id=gene_id\n Gene.db=db\n self.data=Gene.db.genes.find_one({'gene_id':gene_id},{'_id':False})\n self.__dict__.update(self.data)\n @property\n def variants(self):\n #if 'variant_ids' not in self.__dict__:\n variants=[v for v in Gene.db.variants.find({'genes': self.gene_id}, projection={'_id': False})]\n print('number of variants', len(variants))\n self.__dict__['variant_ids']=[v['variant_id'] for v in variants]\n #self.save()\n orm.Variant.db=Gene.db\n for variant in variants:\n try:\n v=orm.Variant(variant_id=variant['variant_id'],data=variant)\n except Exception, e:\n print e\n continue\n yield v\n @property\n def variant_ids(self):\n if 'variant_ids' in self.__dict__: return self.__dict__['variant_ids']\n variants=[v for v in Gene.db.variants.find({'genes': self.gene_id}, projection={'_id': False})]\n self.__dict__['variant_ids']=[v['variant_id'] for v in variants]\n self.save()\n return self.__dict__['variant_ids']\n @property\n def transcripts(self):\n if 'transcripts' in self.__dict__: return self.__dict__['transcripts']\n self.__dict__['transcripts']=rest.mg.getgene(self.gene_id,'ensembl.transcripts')\n print(self.save())\n return self.__dict__['transcripts']\n @property\n def canonical_transcript(self):\n if 'transcripts' in self.__dict__: return self.__dict__['transcripts'][0]\n self.__dict__['transcripts']=rest.mg.getgene(self.gene_id,'ensembl.transcripts')\n print(self.save())\n return self.__dict__['transcripts'][0]\n @property\n def summary(self):\n if 'summary' in self.__dict__: return self.__dict__['summary']\n self.__dict__['summary']=rest.mg.getgene(self.gene_id,'reporter.summary')\n #self.save()\n return self.__dict__['summary']\n @property\n def exons(self):\n if 'exons' in self.__dict__: return self.__dict__['exons']\n self.__dict__['exons']=rest.mg.getgene(self.gene_id,'exons')\n #self.save()\n return self.__dict__['exons']\n def save(self):\n print('writing', self.gene_id, 'to database')\n return Gene.db.genes.update({'gene_id':self.gene_id},self.__dict__,upsert=True)\n\n\n\n\n\n\n\n", "id": "3551564", "language": "Python", "matching_score": 0.8314435482025146, "max_stars_count": 24, "path": "orm/gene.py" }, { "content": "\nfrom variant import Variant, csq_order, get_variants_by_rsid\nfrom gene import Gene\nfrom transcript import Transcript\nfrom individual import Individual\nfrom user import User\nfrom patient import Patient\n\n\n\n\n", "id": "8991885", "language": "Python", "matching_score": 0.8269808292388916, "max_stars_count": 24, "path": "orm/__init__.py" }, { "content": "\nfrom flask import jsonify\nimport lookups\nfrom os import listdir, chdir\nfrom os.path import isfile, join\nimport pymongo\nfrom collections import defaultdict, Counter\n\nclass User(object):\n def __init__(self, user, user_db, groups=[], email='', affiliation=''):\n User.db=user_db\n data=User.db.users.find_one({'user':user},{'_id':False})\n if data:\n self.__dict__.update(data)\n self.status={ 'message':'User account exists already.'%user, 'http_code':401}\n return\n data=User.db.new_users.find_one({'user':user},{'_id':False})\n if data:\n self.__dict__.update(data)\n self.status={ 'message':'User account %s request already created, still unapproved.'%user, 'http_code':401}\n return\n self.user=user\n self.email=email\n self.affiliation=affiliation\n self.groups=groups\n self.status={ 'message':'User account request created for %s.'%user, 'http_code':200}\n # add to unapproved table\n User.db.new_users.ensure_index('user',unique=True)\n User.db.new_users.insert_one( self.__dict__ )\n def __getattribute__(self, key):\n \"Emulate type_getattro() in Objects/typeobject.c\"\n v = object.__getattribute__(self, key)\n if hasattr(v, '__get__'): return v.__get__(None, self)\n return v\n def json(self):\n if '_id' in self.__dict__: del self.__dict__['_id']\n return jsonify(result=self.__dict__)\n def save(self):\n print('writing', self.external_id, 'to database')\n return Patient.db.user.update({'user':self.user},self.__dict__,upsert=True)\n @property\n def password(self):\n pass\n @property\n def external_ids(self):\n pass\n @property\n def individuals(self):\n pass\n @property\n def approved(self):\n pass\n", "id": "12716376", "language": "Python", "matching_score": 1.387927770614624, "max_stars_count": 24, "path": "orm/user.py" }, { "content": "class PhenopolisException(Exception):\n\n http_status = None\n\n def __init__(self, message, http_status):\n super().__init__(message)\n self.http_status = http_status\n", "id": "3725679", "language": "Python", "matching_score": 0.0967472568154335, "max_stars_count": 24, "path": "views/exceptions.py" }, { "content": "from views import *\nfrom lookups import *\nimport requests\nimport re\nfrom utils import *\nimport itertools\nfrom config import config\nif config.IMPORT_PYSAM_PRIMER3:\n import pysam\nimport csv\n#hpo lookup\nimport random\nfrom flask import Response, request\nimport os\nfrom werkzeug.datastructures import Headers\nimport re\n\[email protected]('/bam_viewer/')\ndef bam_viewer():\n return render_template('igv_viewer.html')\n\[email protected]('/read_viz/bam/<sample>')\ndef read_viz(sample):\n BAM_FILES=app.config['BAM_FILES']\n print(request.method)\n headers=Headers()\n #headers.add('Content-Type','application/octet-stream')\n headers.add('Content-Transfer-Encoding','binary')\n #Date:Wed, 06 Jul 2016 17:19:52 GMT\n #ETag:\"flask-1446310274.0-12661331-649139018\"\n #Expires:Thu, 07 Jul 2016 05:19:52 GMT\n #Keep-Alive:timeout=5, max=93\n #Last-Modified:Sat, 31 Oct 2015 16:51:14 GMT\n headers.add('Accept-Ranges', 'bytes')\n #Server:Apache/2.4.12 (Red Hat) mod_wsgi/3.4 Python/2.7.8\n headers.add('X-Frame-Options','SAMEORIGIN')\n if sample=='gencode.v19.sorted.bed':\n bamfile=BAM_FILES+'/gencode.v19.sorted.bed'\n elif sample=='gencode.v19.sorted.bed.idx':\n bamfile=BAM_FILES+'/gencode.v19.sorted.bed.idx'\n elif sample.endswith('.bai'):\n bamfile=BAM_FILES+'/%s.bam.bai' % sample\n else:\n bamfile=BAM_FILES+'/%s.bam' % sample\n size = os.path.getsize(bamfile)\n print(size)\n status = 200\n begin = 0\n end = size-1\n if request.headers.has_key(\"Range\") and request.method=='GET':\n print(request.headers['Range'])\n headers.add('Accept-Ranges','bytes')\n ranges = re.findall(r\"\\d+\", request.headers[\"Range\"])\n begin = int( ranges[0] )\n if len(ranges)>1: end = int( ranges[1] )\n headers.add('Content-Range','bytes %s-%s/%s' % (str(begin),str(end),size) )\n headers.add('Content-Length',str((end-begin)+1))\n with file(bamfile,'rb') as f:\n f.seek(begin)\n data=f.read(end-begin)\n print(len(data))\n response = Response( data, status=206, mimetype=\"application/octet-stream\", headers=headers, direct_passthrough=True)\n else:\n if request.method=='HEAD':\n headers.add('Content-Length',size)\n response = Response( '', status=200, mimetype=\"application/octet-stream\", headers=headers, direct_passthrough=True)\n elif request.method=='GET':\n response = Response( file(bamfile), status=200, mimetype=\"application/octet-stream\", headers=headers, direct_passthrough=True)\n #Add mimetype \n response.cache_control.public = True\n response.make_conditional(request)\n return response\n\n\n\n\ndef read_viz2():\n print(sample)\n print(region)\n from subprocess import call\n tmpfile=subprocess.Popen('mktemp', shell=True, stdout=subprocess.PIPE).stdout.read().strip()+'.bam'\n print(tmpfile)\n print(subprocess.Popen(\"samtools view -b %s/%s_sorted_unique.bam %s > %s\" % (BAM_FILES,sample,region, tmpfile), shell=True, stdout=subprocess.PIPE).stdout.read())\n subprocess.Popen('samtools index %s'%tmpfile).stdout.read()\n \n\n", "id": "4149396", "language": "Python", "matching_score": 0.8951901197433472, "max_stars_count": 24, "path": "views/igv.py" }, { "content": "import jinja2\nfrom jinja2.utils import soft_unicode\nfrom jinja2.utils import Markup\n\ndef get_attributes(value, *args):\n \"\"\"\n \"\"\"\n return (value[a] for a in args)\n\n\ndef map_format(value, pattern):\n \"\"\"\n Apply python string formatting on an object:\n .. sourcecode:: jinja\n {{ \"%s - %s\"|format(\"Hello?\", \"Foo!\") }}\n -> Hello? - Foo!\n \"\"\"\n value=tuple((value))\n s=soft_unicode(pattern).format(*value)\n return s\n\n\ndef unique(value):\n \"\"\"\n \"\"\"\n return list(set((value)))\n\n\ndef href(value,link):\n return Markup(soft_unicode(\"<br>\".join([\"<a href=/\"+link+\"/\"+x+\" target=_blank>\"+x+\"</a>\" for x in value])))\n\n\nclass FilterModule(object):\n ''' jinja2 filters '''\n def filters(self): return { 'map_format': map_format, 'get_attributes': get_attributes, 'unique':unique, 'href':href }\n\njinja2.filters.FILTERS['map_format'] = map_format\njinja2.filters.FILTERS['get_attributes'] = get_attributes\njinja2.filters.FILTERS['unique'] = unique\njinja2.filters.FILTERS['href'] = href\n\n", "id": "997467", "language": "Python", "matching_score": 0.2569904625415802, "max_stars_count": 24, "path": "jinja2_extensions.py" }, { "content": "from views import *\nfrom lookups import *\nimport requests\nimport re\nfrom utils import *\nimport itertools\nfrom flask import request\nimport orm\n\n\[email protected]('/exomiser_prioritise/',methods=['GET'])\ndef exomiser_prioritise():\n #phenotypes=HP:0001156,HP:0001363,HP:0011304,HP:0010055\n #prioritiser=hiphive\n #genes=341640,2263,4920,3909,10743\n #prioritiser-params=human,mouse,fish\n print(request.args)\n r=requests.get('http://localhost:8085/exomiser/api/prioritise/',params=request.args)\n #?phenotypes=HP:0001156,HP:0001363,HP:0011304,HP:0010055&prioritiser=hiphive&genes=341640,2263,4920,3909,10743&prioritiser-params=human,mouse,fish')\n return jsonify(result=r.json())\n\n", "id": "7254867", "language": "Python", "matching_score": 1, "max_stars_count": 24, "path": "views/exomiser.py" }, { "content": "\nfrom lookups import *\n\n\n", "id": "8332932", "language": "Python", "matching_score": 0.015050976537168026, "max_stars_count": 24, "path": "lookups/__init__.py" }, { "content": "from views import *\nfrom lookups import *\nfrom orm import *\n\n\n\[email protected]('/transcript_json/<transcript_id>')\n@requires_auth\ndef transcript_json(transcript_id):\n db = get_db()\n def f(v):\n del v['_id']\n if session['user']=='demo':\n del v['het_samples']\n del v['hom_samples']\n del v['wt_samples']\n return v\n variants=[f(v) for v in db.variants.find({'canonical_transcript':transcript_id})]\n #cache_key = 't-transcript-{}'.format(transcript_id)\n #t = cache.get(cache_key)\n #print 'Rendering %stranscript: %s' % ('' if t is None else 'cached ', transcript_id)\n #if t: return t\n #cache.set(cache_key, t)\n return jsonify(result={'variants':variants,'count':len(variants)})\n\n\[email protected]('/transcript/<transcript_id>')\n@requires_auth\ndef transcript(transcript_id):\n db = get_db()\n transcript=db.transcripts.find_one({'transcript_id':transcript_id})\n transcript['variants']=[Variant(variant_id=v['variant_id'],db=db) for v in db.variants.find({'canonical_transcript':transcript_id})]\n #cache_key = 't-transcript-{}'.format(transcript_id)\n #t = cache.get(cache_key)\n #print 'Rendering %stranscript: %s' % ('' if t is None else 'cached ', transcript_id)\n #if t: return t\n #cache.set(cache_key, t)\n individuals=dict()\n for v in transcript['variants']:\n if v.canonical_hgvsc[0]:\n v.cdna_pos=v.canonical_hgvsc[0].split(':')[1].split('.')[1]\n else:\n v.cdna_pos=''\n v.canonical_hgvs=dict(zip( v.canonical_hgvsp, v.canonical_hgvsc))\n v.__dict__['protein_mutations']=dict([(p,p.split(':')[1],) for p in v.canonical_hgvsp if ':' in p])\n for csq in v.transcript_consequences:\n if csq['transcript_id']!=transcript_id: continue\n v.distance=csq.get('distance','')\n for s in v.het_samples:\n if v.HET_COUNT < 10: individuals[s]=individuals.get(s,[])+[v]\n table_headers=re.findall(\"<td class='?\\\"?(.*)-cell'?\\\"?>\",file('templates/transcript.html','r').read())\n return render_template('transcript.html',transcript=transcript,individuals=individuals,table_headers=table_headers)\n\n\n\n", "id": "3269866", "language": "Python", "matching_score": 1.94509756565094, "max_stars_count": 24, "path": "views/transcript.py" }, { "content": "import flask\nfrom views import *\nfrom lookups import *\nimport rest as annotation\nimport requests\nfrom config import config\nif config.IMPORT_PYSAM_PRIMER3:\n import pysam\n import primer3\nimport myvariant\nimport re\nfrom utils import *\nimport itertools\nimport csv\n#hpo lookup\nimport phizz\nimport random\nimport orm\nimport vcf\nimport subprocess\nimport os\n\n\[email protected]('/variant/<variant_str>')\n@requires_auth\ndef variant_page(variant_str):\n try:\n variant=orm.Variant(variant_id=variant_str,db=get_db())\n except:\n return 'Variant does not exist'\n if not variant: return 'Variant does not exist'\n variant=variant.__dict__\n if session['user'] == 'demo':\n del variant['wt_samples']\n del variant['het_samples']\n del variant['hom_samples']\n return render_template(\n 'variant.html',\n title=variant_str,\n variant=variant\n )\n\n#<EMAIL>('/variant_json/<variant_str>')\n#def variant_json(variant_str): return jsonify(result=vcf.vcf_query(variant_str=variant_str))\n\[email protected]('/variant_json/<variant_str>')\ndef variant_json(variant_str):\n variant=orm.Variant(variant_id=variant_str,db=get_db())\n if session['user'] == 'demo':\n variant.__dict__['wt_samples']=[]\n variant.__dict__['het_samples']=[]\n variant.__dict__['hom_samples']=[]\n return jsonify(result=variant.__dict__)\n\[email protected]('/variant_json_db_new/<variant_str>')\ndef variant_json_db_new(variant_str):\n if session['user'] == 'demo': return ''\n variant=orm.Variant(variant_id=variant_str,db=get_db())\n return jsonify(result=variant.__dict__)\n\[email protected]('/set_variant_causal/<individual>/<variant_str>')\ndef set_variant_causal(individual, variant_str):\n print individual, variant_str\n db=get_db()\n #get_db().patients.update({'patient_id':individual},{'$addToSet':{'causal_variants':variant_str}})\n var=db.variants.find_one({'variant_id':variant_str})\n gene_id=var['genes'][0]\n gene_name=db.genes.find_one({'gene_id':gene_id})['gene_name_upper']\n print 'GENE_NAME', gene_name\n p=get_db('DB_NAME_PATIENTS').patients.find_one({'external_id':individual})\n get_db('DB_NAME_PATIENTS').patients.update_one({'external_id':individual},{'$set':{'genes': p.get('genes',[])+[{'gene':gene_name}]}})\n print get_db(app.config['DB_NAME_PATIENTS']).patients.update({'external_id':individual},{'$set':p},w=0)\n p=db.patients.find_one({'external_id':individual})\n p['causal_variants']=list(frozenset(p.get('causal_variants',[])+[variant_str]))\n db.patients.update({'external_id':individual},{'$set':{'causal_variants':p['causal_variants']}},w=0)\n if request.referrer:\n referrer=request.referrer\n u = urlparse(referrer)\n referrer='%s://%s' % (u.scheme,u.hostname,)\n if u.port: referrer='%s:%s' % (referrer,u.port,)\n return redirect(referrer+'/individual/'+individual)\n\[email protected]('/unset_variant_causal/<individual>/<variant_str>')\ndef unset_variant_causal(individual, variant_str):\n print individual, variant_str\n db=get_db()\n p=db.patients.find_one({'external_id':individual})\n if 'causal_variants' in p and not p['causal_variants']: p['causal_variants']=[]\n if variant_str in p.get('causal_variants',[]):\n p['causal_variants']=p['causal_variants'].remove(variant_str)\n db.patients.update({'external_id':individual},{'$set':{'causal_variants':p['causal_variants']}},w=0)\n p2=get_db('DB_NAME_PATIENTS').patients.find_one({'external_id':individual})\n p2['genes']=[]\n for var in p['causal_variants']:\n var=db.variants.find_one({'variant_id':var})\n gene_id=var['genes'][0]\n gene_name=db.genes.find_one({'gene_id':gene_id})['gene_name_upper']\n print 'GENE_NAME', gene_name\n p2['genes']=list(frozenset(p2.get('genes',[])+[{'gene':gene_name}]))\n print get_db(app.config['DB_NAME_PATIENTS']).patients.update({'external_id':individual},{'$set':p2},w=0)\n if request.referrer:\n referrer=request.referrer\n u = urlparse(referrer)\n referrer='%s://%s' % (u.scheme,u.hostname,)\n if u.port: referrer='%s:%s' % (referrer,u.port,)\n return redirect(referrer+'/individual/'+individual)\n\[email protected]('/set_variant_status/<individual>/<variant_str>/<status>')\ndef set_variant_status(individual, variant_str, status):\n print individual, variant_str, status\n db=get_db()\n #print get_db().patients.update({'patient_id':individual},{'$addToSet':{'variant_status':{variant_str:status}}})\n rare_variants=db.patients.find_one({'external_id':individual},{'rare_variants':1})['rare_variants']\n for rv in rare_variants:\n if rv['variant_id']==variant_str:\n rv['status']=status\n print db.patients.update({'external_id':individual},{'$set':{'rare_variants':rare_variants}})\n return status\n\n\[email protected]('/private_variants/<individual>')\ndef private_variants(individual):\n pv=[]\n cmd=\"bgt view -s,\"+individual+\" -s 'name!=\\\"\"+individual+\"\\\"' -f 'AC1>0&&AC2==0' -G \"+ \"/slms/gee/research/vyplab/UCLex/mainset_July2016/bgt/mainset_July2016.bgt\"\n print(cmd)\n s=subprocess.check_output([cmd],shell=True)\n for l in s.split('\\n'):\n if len(l)<5: continue\n if l.startswith('##'): continue\n if l.startswith('#'):\n headers=l.split('\\t')\n continue\n d=dict(zip(headers,l.split('\\t')))\n d.update(dict([x.split('=') for x in d['INFO'].split(';')]))\n del d['INFO']\n d['variant_id']='-'.join([d['#CHROM'],d['POS'],d['REF'],d['ALT']])\n pv.append(d)\n return jsonify(result=pv)\n\[email protected]('/rare_variants/<individual>/<AC>')\ndef rare_variants(individual,AC=10):\n pv=[]\n cmd=\"bgt view -s,\"+individual+\" -s 'name!=\\\"\"+individual+\"\\\"' -f 'AC1>0&&AC2<%s' \"%str(AC)+ \"-G /slms/gee/research/vyplab/UCLex/mainset_July2016/bgt/mainset_July2016.bgt\" \n print(cmd)\n proc=subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True)\n def generate():\n for l in iter(proc.stdout.readline,''):\n l=l.strip()\n print(l)\n if len(l)<5: continue\n if l.startswith('##'): continue\n if l.startswith('#'):\n headers=l.split('\\t')\n continue\n d=dict(zip(headers,l.split('\\t')))\n d.update(dict([x.split('=') for x in d['INFO'].split(';')]))\n del d['INFO']\n if ',' in d['ALT']: d['ALT']=d['ALT'].split(',')[0]\n d['variant_id']='-'.join([d['#CHROM'],d['POS'],d['REF'],d['ALT']])\n try:\n var=orm.Variant(variant_id=d['variant_id'],db=get_db())\n except Exception, e:\n print(e)\n print(d)\n continue\n yield flask.json.dumps(var.__dict__)+'\\n'\n #yield l+'\\n'\n #return Response(stream_with_context(generate()),mimetype='application/json')\n return Response(stream_with_context(generate()),mimetype='text/plain')\n\[email protected]('/common_private_variants/<individual>/<individual2>')\ndef common_private_variants(individual,individual2):\n pv=[]\n s=subprocess.check_output([\"bgt view -s,\"+individual+\" -s,\"+individual2+\" -s 'name!=\\\"\"+individual+\"\\\"&&name!=\\\"\"+individual2+\"\\\"' -f 'AC1>0&&AC2>0&&AC3==0' -G /slms/gee/research/vyplab/UCLex/mainset_July2016/bgt/mainset_July2016.bgt\" ],shell=True)\n #bgt view -s,IRDC_batch6_LON_2055 -s,WebsterURMD_Sample_06G02870 -s 'name!=\"IRDC_batch6_LON_2055\"&&name!=\"WebsterURMD_Sample_06G02870\"' -f 'AC1>0&&AC2>0&&AC3==0' -G mainset_July2016_chr1.bgt\n for l in s.split('\\n'):\n if len(l)<5: continue\n if l.startswith('##'): continue\n if l.startswith('#'):\n headers=l.split('\\t')\n continue\n d=dict(zip(headers,l.split('\\t')))\n d.update(dict([x.split('=') for x in d['INFO'].split(';')]))\n del d['INFO']\n d['variant_id']='-'.join([d['#CHROM'],d['POS'],d['REF'],d['ALT']])\n pv.append(d)\n return jsonify(result=pv)\n\[email protected]('/common_rare_variants/<individual>/<individual2>/<AC>')\ndef common_rare_variants(individual,individual2,AC=1):\n pv=[]\n s=subprocess.check_output([\"bgt view -s,\"+individual+\" -s,\"+individual2+\" -s 'name!=\\\"\"+individual+\"\\\"&&name!=\\\"\"+individual2+\"\\\"' -f 'AC1>0&&AC2>0&&AC3<%s' \"%AC+ \"-G /slms/gee/research/vyplab/UCLex/mainset_July2016/bgt/mainset_July2016.bgt\" ],shell=True)\n #bgt view -s,IRDC_batch6_LON_2055 -s,WebsterURMD_Sample_06G02870 -s 'name!=\"IRDC_batch6_LON_2055\"&&name!=\"WebsterURMD_Sample_06G02870\"' -f 'AC1>0&&AC2>0&&AC3==0' -G mainset_July2016_chr1.bgt\n for l in s.split('\\n'):\n if len(l)<5: continue\n if l.startswith('##'): continue\n if l.startswith('#'):\n headers=l.split('\\t')\n continue\n d=dict(zip(headers,l.split('\\t')))\n d.update(dict([x.split('=') for x in d['INFO'].split(';')]))\n del d['INFO']\n #d['variant_id']='-'.join([d['#CHROM'],d['POS'],d['REF'],d['ALT']])\n #pv.append(d)\n d['variant_id']='-'.join([d['#CHROM'],d['POS'],d['REF'],d['ALT']])\n try:\n var=orm.Variant(variant_id=d['variant_id'],db=get_db())\n except Exception, e:\n print(e)\n print(d)\n continue\n pv.append(var.__dict__)\n return jsonify(result=pv)\n\n\n\n", "id": "7573785", "language": "Python", "matching_score": 2.712028980255127, "max_stars_count": 24, "path": "views/variant.py" }, { "content": "from views import *\nfrom lookups import *\nfrom orm import *\nimport rest as annotation\nimport requests\nfrom config import config\nif config.IMPORT_PYSAM_PRIMER3:\n import primer3\nimport myvariant\nfrom vcf import vcf_query\nimport hashlib\nfrom bson.json_util import dumps\n\n'''\ndefs\n'''\ndef hide_id_for_demo(data):\n if not data: return\n for k,v in data['patients'].items():\n # hide hpo\n v['hpo'] = ['hidden']\n # hide variants\n v['variants'] = ['hidden_'+hashlib.sha224(i).hexdigest()[:6] for i in v['variants']]\n # hide p_id\n new_p = 'hidden_'+hashlib.sha224(k).hexdigest()[:6]\n data['patients'][new_p] = data['patients'].pop(k)\n\n for k1,v1 in data['data'].items():\n for k2,v2 in v1['p'].items():\n v1['p'][k2] = ['hidden_'+hashlib.sha224(i).hexdigest()[:6] for i in v2]\n\n for k,v in data['variants'].items():\n new_v = 'hidden_'+hashlib.sha224(k).hexdigest()[:6]\n data['variants'][new_v] = data['variants'].pop(k)\n\n'''\nroutes\n'''\[email protected]('/gene/<gene_id>',methods=['GET'])\n<EMAIL>(timeout=24*3600)\n@requires_auth\ndef gene_page(gene_id):\n # if gene not ensembl id then translate to\n db=get_db()\n hpo_db=get_db(app.config['DB_NAME_HPO'])\n patient_db=get_db(app.config['DB_NAME_PATIENTS'])\n hpo=request.args.get('hpo')\n if not gene_id.startswith('ENSG'):\n gene=db.genes.find_one({'gene_name': gene_id}, projection={'_id': False})\n #if not gene: gene=db.genes.find_one({'other_names': gene_id}, projection={'_id': False})\n if not gene: return gene_id+' does not exist'\n gene_id=gene['gene_id']\n else:\n gene=db.genes.find_one({'gene_id':gene_id})\n if not gene: return gene_id+' does not exist'\n if session['user'] == 'demo' and gene_id not in ['ENSG00000156171','ENSG00000119685']: return 'Sorry you are not permitted to see these genes in demo account, please contact us to setup an account!'\n variants=db.variants.find({'genes':gene_id},projection={'_id':False})\n gene['variants']=[Variant(variant_id=v['variant_id'],db=db) for v in variants]\n individuals=dict()\n for v in gene['variants']:\n v.canonical_hgvs=dict(zip( v.canonical_hgvsp, v.canonical_hgvsc))\n v.__dict__['protein_mutations']=dict([(p,p.split(':')[1],) for p in v.canonical_hgvsp if ':' in p])\n for s in v.het_samples:\n if v.HET_COUNT < 10:\n individuals[s]=individuals.get(s,[])+[v]\n print(gene['gene_id'])\n hpo_terms=hpo_db.gene_hpo.find_one({'gene_id':gene['gene_id']})\n if hpo_terms:\n hpo_terms=hpo_terms['hpo_terms']\n else:\n hpo_terms=hpo_db.genes_pheno.find_one({'gene':gene['gene_name']})\n if hpo_terms:\n hpo_terms=hpo_terms['hpo']\n else:\n hpo_terms=[]\n hpo_terms_dict=dict()\n for hpo_id in hpo_terms:\n hpo_terms_dict[hpo_id]=hpo_db.hpo.find_one({'id':hpo_id})\n gene_hpo = db.gene_hpo.find_one({'gene_id':gene_id},{'_id':0})\n patients_status = {}\n if session['user'] == 'demo': hide_id_for_demo(gene_hpo) \n else:\n # get patients status, solved? candidate genes? Only work when user is not demo for the time-being. Will probably change data struture later on to make it work for demo too\n all_patients = gene_hpo['patients'].keys()\n patients_status = dict([(i['external_id'],i) for i in patient_db.patients.find({'external_id':{'$in':list(all_patients)}},{'external_id':1,'solved':1,'genes':1})])\n table_headers=re.findall(\"<td class='?\\\"?(.*)-cell'?\\\"?>\",file('templates/gene-page-tabs/gene_variant_row.tmpl','r').read())\n # get simreg\n simreg_data = list(db.simreg.find({'gene':gene_id}))\n simreg = {'rec':{'data':[],'p':None},'dom':{'data':[],'p':None}}\n for mode in ['rec','dom']:\n temp = [i for i in simreg_data if i['mode'] == mode]\n if not temp: continue\n simreg[mode]['p'] = temp[0]['p']\n # convert it to array\n simreg[mode]['data'] = temp[0]['phi'].values()\n # sort desc\n simreg[mode]['data'] = sorted(simreg[mode]['data'], key=lambda x: x['prob'], reverse=True)\n pli=get_db('exac').pli.find_one({'gene':gene['gene_name']})\n if pli:\n pli=pli['pLI']\n else:\n pli=-1\n return render_template('gene.html', \n title=gene['gene_name'],\n gene=gene,\n pli=pli,\n table_headers=table_headers,\n phenogenon = json.dumps(gene_hpo) if gene_hpo else {},\n simreg = simreg,\n individuals=individuals,\n hpo_terms_json = json.dumps(hpo_terms),\n patients_status = dumps(patients_status),\n hpo_terms=hpo_terms_dict)\n\n\[email protected]('/gene_json/<gene_id>',methods=['GET','POST'])\n@requires_auth\ndef gene_json(gene_id):\n # if gene not ensembl id then translate to\n db=get_db()\n hpo_db=get_db(app.config['DB_NAME_HPO'])\n patient_db=get_db(app.config['DB_NAME_PATIENTS'])\n hpo=request.args.get('hpo')\n if not gene_id.startswith('ENSG'): gene_id = lookups.get_gene_by_name(get_db(), gene_id)['gene_id']\n gene=db.genes.find_one({'gene_id':gene_id})\n del gene['_id']\n variants=db.variants.find({'genes':gene_id})\n return json.dumps(gene)\n\n\n# get sequence given region, and highlight the region. useful for design primers\[email protected]('/sequence')\n@requires_auth\ndef sequence():\n var_id = request.args.get('variant_id')\n symbol = request.args.get('symbol')\n paddings = int(request.args.get('paddings') or 100)\n build = request.args.get('build') or 'grch37'\n (chrom,start,ref) = var_id.split('-',3)[:3]\n start = int(start)\n end = max(start + len(ref) - 1, start)\n strand = request.args.get('strand') or '1'\n margins = int(request.args.get('margins') or 500)\n gc_opt = float(request.args.get('gc_opt') or 50)\n gc_min = float(request.args.get('gc_min') or 35)\n gc_max = float(request.args.get('gc_max') or 65)\n primer_size_opt = int(request.args.get('primer_size_opt') or 20)\n primer_size_min = int(request.args.get('primer_size_min') or 18)\n primer_size_max = int(request.args.get('primer_size_max') or 25)\n primer_tm_opt = float(request.args.get('primer_tm_opt') or 59)\n primer_tm_min = float(request.args.get('primer_tm_min') or 55)\n primer_tm_max = float(request.args.get('primer_tm_max') or 67)\n PCR_size_range = request.args.get('PCR_size_range') or '100-400'\n SEQUENCE_EXCLUDED_REGION = []\n # get sequence\n server = \"http://%s.rest.ensembl.org\" % build\n ext = '''/sequence/region/human/%(chrom)s:%(start)s..%(end)s:%(strand)s?expand_5prime=%(margins)s;expand_3prime=%(margins)s;''' % locals()\n r = requests.get(server+ext, headers={'Content-Type':'application/json' })\n if not r.ok: return r.raise_for_status()\n decoded = r.json()\n # run primer3 to get primers suggestion\n # useful keys:\n # PRIMER_LEFT_0: (start(0 based), length)\n # PRIMER_LEFT_0_SEQUENCE\n # PRIMER_LEFT_0_TM\n # PRIMER_LEFT_0_GC_PERCENT\n # PRIMER_PAIR_0_PRODUCT_SIZE\n # region sets the paddings to include in the sequencing.Default with 100 bp on each side.\n region = [margins - paddings, end - start + 2*paddings ]\n seq = str(decoded['seq'])\n primer = primer3.bindings.designPrimers(\n {\n 'SEQUENCE_ID': 'phenopolis',\n 'SEQUENCE_TEMPLATE': seq,\n 'SEQUENCE_TARGET': region\n },\n {\n 'PRIMER_OPT_SIZE': primer_size_opt,\n 'PRIMER_INTERNAL_MAX_SELF_END': 8,\n 'PRIMER_MIN_SIZE': primer_size_min,\n 'PRIMER_MAX_SIZE': primer_size_max,\n 'PRIMER_OPT_TM': primer_tm_opt,\n 'PRIMER_MIN_TM': primer_tm_min,\n 'PRIMER_MAX_TM': primer_tm_max,\n 'PRIMER_MIN_GC': gc_min,\n 'PRIMER_OPT_GC': gc_opt,\n 'PRIMER_MAX_GC': gc_max,\n 'PRIMER_MAX_POLY_X': 100,\n 'PRIMER_INTERNAL_MAX_POLY_X': 100,\n 'PRIMER_SALT_MONOVALENT': 50.0,\n 'PRIMER_DNA_CONC': 50.0,\n 'PRIMER_MAX_NS_ACCEPTED': 0,\n 'PRIMER_MAX_SELF_ANY': 12,\n 'PRIMER_MAX_SELF_END': 8,\n 'PRIMER_PAIR_MAX_COMPL_ANY': 12,\n 'PRIMER_PAIR_MAX_COMPL_END': 8,\n 'PRIMER_PRODUCT_SIZE_RANGE':[int(PCR_size_range.split('-')[0]),\n int(PCR_size_range.split('-')[1]) ]\n })\n if 'PRIMER_RIGHT_0' not in primer:\n # return 'Cannot pick any primers for the given sequence'\n left=left_tm=left_gc=right=right_tm=right_gc=product_length='NA'\n seq = seq[:margins-1] + '<span class=\"highlight\">' + seq[margins-1:margins+end-start] + '</span>' + seq[margins+end-start:]\n else:\n # formulate sequence\n seq = seq[:primer['PRIMER_RIGHT_0'][0]-primer['PRIMER_RIGHT_0'][1]+1] + '<span class=\"primer\">' + seq[primer['PRIMER_RIGHT_0'][0]-primer['PRIMER_RIGHT_0'][1]+1:primer['PRIMER_RIGHT_0'][0]+1] + '</span>' + seq[primer['PRIMER_RIGHT_0'][0]+1:]\n seq = seq[:margins] + '<span class=\"highlight\">' + seq[margins:margins+end-start+1] + '</span>' + seq[margins+end-start+1:]\n seq = seq[:primer['PRIMER_LEFT_0'][0]] + '<span class=\"primer\">' + seq[primer['PRIMER_LEFT_0'][0]:primer['PRIMER_LEFT_0'][0]+primer['PRIMER_LEFT_0'][1]] + '</span>' + seq[primer['PRIMER_LEFT_0'][0]+primer['PRIMER_LEFT_0'][1]:]\n left=primer['PRIMER_LEFT_0_SEQUENCE']\n left_tm=primer['PRIMER_LEFT_0_TM']\n left_gc=primer['PRIMER_LEFT_0_GC_PERCENT']\n right=primer['PRIMER_RIGHT_0_SEQUENCE']\n right_tm=primer['PRIMER_RIGHT_0_TM']\n right_gc=primer['PRIMER_RIGHT_0_GC_PERCENT']\n product_length=primer['PRIMER_PAIR_0_PRODUCT_SIZE']\n # construct object\n result = {'seq': seq,\n 'var_id': var_id,\n 'symbol': symbol,\n 'left': left,\n 'left_tm': left_tm,\n 'left_gc': left_gc, \n 'right': right,\n 'right_tm': right_tm,\n 'right_gc': right_gc,\n 'product_length': product_length,\n 'paddings': paddings,\n 'margins': margins,\n 'gc_opt': gc_opt,\n 'gc_max': gc_max,\n 'gc_min': gc_min,\n 'primer_tm_opt': primer_tm_opt,\n 'primer_tm_max': primer_tm_max,\n 'primer_tm_min': primer_tm_min,\n 'primer_size_opt': primer_size_opt,\n 'primer_size_min': primer_size_min,\n 'primer_size_max': primer_size_max,\n 'PCR_size_range': PCR_size_range,\n }\n return render_template('primer3_sequence.html', result = result,)\n \[email protected]('/gene_phenogenon_json/<gene_id>',methods=['GET','POST'])\n@requires_auth\ndef gene_phenogenon_json(gene_id):\n # if gene not ensembl id then translate to\n db=get_db()\n if not gene_id.startswith('ENSG'): gene_id = lookups.get_gene_by_name(get_db(), gene_id)['gene_id']\n gene=db.gene_hpo_new.find_one({'gene_id':gene_id})\n del gene['_id']\n return json.dumps(gene)\n\n\n# test\[email protected]('/test')\ndef test():\n '''\n random test\n '''\n retnet_f=app.config['RETNET_JSON']\n RETNET = json.load(open(retnet_f, 'r'))\n relations = []\n genes = []\n omims = []\n for g, v in RETNET.iteritems():\n genes.append(g)\n for o in v['omim']:\n omims.append(o)\n relations.extend([(g,o)])\n return render_template('test.html', relations = json.dumps(relations), genes = json.dumps(list(set(genes))), omims = json.dumps(list(set(omims))))\n\n\n\n", "id": "931202", "language": "Python", "matching_score": 3.2229928970336914, "max_stars_count": 24, "path": "views/gene.py" }, { "content": "from views import *\nfrom lookups import *\nimport rest as annotation\nimport requests\nfrom flask import request\nfrom config import config\nif config.IMPORT_PYSAM_PRIMER3:\n import pysam\n import primer3\nimport myvariant\nimport re\nfrom utils import *\nimport itertools\nimport csv\n#hpo lookup\nimport phizz\nimport random\nimport orm\nimport vcf\n\n\ndef phenogenon(hpo_id,lit_genes,omim_genes,recessive_genes,dominant_genes,cache=True):\n cache_db=get_db('cache')\n temp=cache_db.phenogenon_cache.find_one({'hpo_id':hpo_id})\n if temp and cache:\n lit_genes.extend(temp['lit_genes'])\n omim_genes.extend(temp['omim_genes'])\n recessive_genes.extend(temp['recessive_genes'])\n dominant_genes.extend(temp['dominant_genes'])\n return\n hpo_db=get_db(app.config['DB_NAME_HPO'])\n db=get_db()\n def f(r):\n g=db.genes.find_one({'gene_name_upper':r['Gene-Name'].upper()},{'_id':0})\n if not g: return\n phenogenon=db.gene_hpo.find_one({'gene_id':g['gene_id']})\n if not phenogenon: return\n het=phenogenon.get('het',{}).get(hpo_id,{})\n hom_comp=phenogenon.get('hom_comp',{}).get(hpo_id,{})\n if 'data' in het: del het['data']\n if 'data' in hom_comp: del hom_comp['data']\n g['phenogenon']={ 'het':het, 'hom_comp': hom_comp}\n return g\n lit_genes=[f(r) for r in hpo_db.hpo_gene.find({'HPO-ID':hpo_id})]\n lit_genes=[lg for lg in lit_genes if lg]\n omim_genes.extend(map(lambda x: x['gene_id'], lit_genes))\n phenogenon=db.hpo_gene.find_one({'hpo_id':hpo_id})\n if phenogenon: phenogenon=phenogenon['data']['unrelated']\n else: phenogenon={'recessive':[],'dominant':[]}\n recessive_genes.extend([{'gene_id':x['gene_id'],'gene_name':db.genes.find_one({'gene_id':x['gene_id']})['gene_name'],'p_val':x['p_val'],'known':x['gene_id'] in omim_genes} for x in phenogenon['recessive']])\n dominant_genes.extend([{'gene_id':x['gene_id'],'gene_name':db.genes.find_one({'gene_id':x['gene_id']})['gene_name'],'p_val':x['p_val'], 'known':x['gene_id'] in omim_genes} for x in phenogenon['dominant']])\n #print({'hpo_id':hpo_id,'dominant_genes':dominant_genes,'recessive_genes':recessive_genes,'omim_genes':omim_genes,'lit_genes':lit_genes})\n cache_db.phenogenon_cache.insert_one({'hpo_id':hpo_id,'dominant_genes':dominant_genes,'recessive_genes':recessive_genes,'omim_genes':omim_genes,'lit_genes':lit_genes})\n\ndef skat(hpo_id):\n db=get_db()\n skat_genes=db.skat.find({'HPO':hpo_id},{'_id':False})\n skat_genes=[g for g in skat_genes if g['FisherPvalue']<0.05 and g['SKATO']<0.005]\n for g in skat_genes:\n pli=get_db('exac').pli.find_one({'gene':g['Symbol']})\n if str(g['OddsRatio'])=='inf': g['OddsRatio']=9999999\n if pli:\n g['pli']=pli['pLI'] \n else:\n g['pli']=-1\n return skat_genes\n\[email protected]('/hpo_skat_json/<hpo_id>')\n@requires_auth\ndef hpo_skat_json(hpo_id):\n skat_genes=skat(hpo_id)\n return jsonify( result={ 'individuals':skat_genes }, allow_nan=False )\n\ndef get_hpo_individuals(hpo_id):\n db=get_db()\n hpo_db=get_db(app.config['DB_NAME_HPO'])\n patients_db=get_db(app.config['DB_NAME_PATIENTS'])\n patients=lookups.get_hpo_patients(hpo_db,patients_db,hpo_id,cached=True)\n print('num patients', len(patients))\n # candidate genes\n candidate_genes = [p.get('genes',[]) for p in patients]\n # solved genes\n solved_genes = [p.get('solved',[]) for p in patients]\n hpo_db=get_db(app.config['DB_NAME_HPO'])\n def f(p):\n print p['external_id']\n if session['user']=='demo': p['external_id']='hidden'\n del p['_id']\n p['features']=[f for f in p.get('features',[]) if f['observed']=='yes']\n if 'solved' in p:\n if 'gene' in p['solved']:\n p['solved']=[p['solved']['gene']]\n else:\n p['solved']=[]\n else: p['solved']=[]\n if 'genes' in p: p['genes']=[x['gene'] for x in p['genes'] if 'gene' in x]\n else: p['genes']=[]\n p['genes']=list(frozenset(p['genes']+p['solved']))\n p2=db.patients.find_one({'external_id':p['external_id']},{'rare_homozygous_variants_count':1,'rare_compound_hets_count':1, 'rare_variants_count':1,'total_variant_count':1})\n if not p2: return p\n p['rare_homozygous_variants_count']=p2.get('rare_homozygous_variants_count','')\n p['rare_compound_hets_count']=p2.get('rare_compound_hets_count','')\n p['rare_variants_count']=p2.get('rare_variants_count','')\n p['total_variant_count']=p2.get('total_variant_count','')\n solved_patient=db.solved_patients.find_one({'external_id':p['external_id']})\n if solved_patient and session['user']!='demo': p['solved_variants']=solved_patient.get('genes',{})\n return p\n patients=[f(p) for p in patients if 'external_id' in p]\n return patients\n\n\n\[email protected]('/hpo_individuals_json/<hpo_id>')\n@requires_auth\ndef hpo_individuals_json(hpo_id):\n patients=get_hpo_individuals(hpo_id)\n return jsonify( result={ 'individuals':patients } )\n\n\[email protected]('/hpo_individuals_csv/<hpo_id>')\n@requires_auth\ndef hpo_individuals_csv(hpo_id):\n patients=get_hpo_individuals(hpo_id)\n return '\\n'.join([','.join([p['external_id'],';'.join([str(g) for g in p['genes']])]) for p in patients if 'external_id' in p])\n\n\[email protected]('/phenogenon_json/<hpo_id>')\n@requires_auth\ndef phenogenon_json(hpo_id):\n cache = bool(request.args.get('cache',True))\n threshold = float(request.args.get('threshold',0.05))\n print 'PHENOGENON_JSON'\n print cache\n #print(intersect(obs_genes.keys(),lit_genes))\n #print(Counter([rv['HUGO'] for rv in db.patients.find_one({'external_id':p['external_id']},{'rare_variants':1})]['rare_variants']))\n ## only return common variants if there are many individuals\n ##rsession.voidEval('common_variants <- common.variants')\n lit_genes=[]\n omim_genes=[]\n recessive_genes=[]\n dominant_genes=[]\n phenogenon(hpo_id, lit_genes, omim_genes, recessive_genes, dominant_genes,cache)\n print(len(lit_genes))\n print(len(omim_genes))\n print(len(recessive_genes))\n print(len(dominant_genes))\n true_positives=len([g for g in lit_genes if 'unrelated_recessive_p_val' in g['phenogenon']['hom_comp'] and g['phenogenon']['hom_comp']['unrelated_recessive_p_val']<=threshold])\n false_negatives=len([g for g in lit_genes if 'unrelated_recessive_p_val' in g['phenogenon']['hom_comp'] and g['phenogenon']['hom_comp']['unrelated_recessive_p_val']>threshold])\n false_positives=len([g for g in recessive_genes if not g['known'] and g['p_val']<=threshold])\n true_negatives=len([g for g in recessive_genes if not g['known'] and g['p_val']>threshold])\n # can have zero denominator sometimes\n #{'TPR':float(true_positives)/float(true_positives+false_negatives),'FPR':float(false_positives)/float(false_positives+true_negatives)}\n return jsonify( result={\n 'performance':{'TP':true_positives,'FN':false_negatives,'FP':false_positives,'TN':true_negatives},\n 'lit_genes':lit_genes,\n 'omim_genes':omim_genes,\n 'recessive_genes':recessive_genes,\n 'dominant_genes':dominant_genes,\n } )\n\n\[email protected]('/phenogenon_recessive_csv/<hpo_id>')\n@requires_auth\ndef phenogenon_recessive_csv(hpo_id):\n lit_genes=[]\n omim_genes=[]\n recessive_genes=[]\n dominant_genes=[]\n phenogenon(hpo_id, lit_genes, omim_genes, recessive_genes, dominant_genes)\n print(len(lit_genes))\n print(len(omim_genes))\n print(len(recessive_genes))\n print(len(dominant_genes))\n text=','.join([k for k in recessive_genes[0].keys()])+'\\n'\n for g in recessive_genes:\n text+=','.join([str(g[k]) for k in recessive_genes[0].keys()])+'\\n'\n return text\n\n\[email protected]('/phenogenon_dominant_csv/<hpo_id>')\n@requires_auth\ndef phenogenon_dominant_csv(hpo_id):\n lit_genes=[]\n omim_genes=[]\n recessive_genes=[]\n dominant_genes=[]\n phenogenon(hpo_id, lit_genes, omim_genes, recessive_genes, dominant_genes)\n print(len(lit_genes))\n print(len(omim_genes))\n print(len(recessive_genes))\n print(len(dominant_genes))\n text=','.join([k for k in dominant_genes[0].keys()])+'\\n'\n for g in dominant_genes:\n text+=','.join([str(g[k]) for k in dominant_genes[0].keys()])+'\\n'\n return text\n\[email protected]('/phenogenon_literature_csv/<hpo_id>')\n@requires_auth\ndef phenogenon_literature_csv(hpo_id):\n lit_genes=[]\n omim_genes=[]\n recessive_genes=[]\n dominant_genes=[]\n phenogenon(hpo_id, lit_genes, omim_genes, recessive_genes, dominant_genes)\n print(len(lit_genes))\n print(len(omim_genes))\n print(len(recessive_genes))\n print(len(dominant_genes))\n names=['gene_name','phenogenon.dominant_pvalue','phenogenon.recessive_pvalue']\n text=','.join([k for k in names])+'\\n'\n for g in lit_genes:\n gene_name=g['gene_name']\n dominant_pvalue=str(g['phenogenon']['het'].get('unrelated_dominant_all_p_val',None))\n recessive_pvalue=str(g['phenogenon']['hom_comp'].get('unrelated_recessive_p_val',None))\n print(gene_name)\n print(dominant_pvalue)\n print(recessive_pvalue)\n text+=','.join([gene_name,dominant_pvalue,recessive_pvalue])+'\\n'\n return text\n\n\[email protected]('/hpo/<hpo_id>')\n@requires_auth\[email protected](timeout=24*3600)\ndef hpo_page(hpo_id):\n db=get_db()\n hpo_db=get_db(app.config['DB_NAME_HPO'])\n print hpo_id \n if not hpo_id.startswith('HP:'):\n hpo_term=hpo_db.hpo.find_one({'name':re.compile('^'+hpo_id+'$',re.IGNORECASE)})\n if not hpo_term: return hpo_id+' does not exist'\n hpo_id=hpo_term['id'][0]\n print hpo_id \n hpo_name=hpo_db.hpo.find_one({'id':hpo_id})['name'][0]\n print hpo_name\n #print('HPO ANCESTORS')\n #hpo_ancestors=lookups.get_hpo_ancestors(hpo_db,hpo_id)\n #print(len(hpo_ancestors))\n #print([h['name'] for h in hpo_ancestors])\n #print(len([v['VARIANT_ID'] for v in db.variants.find({'HET' : { '$in': patient_ids }})]))\n #print(len([v['VARIANT_ID'] for v in db.variants.find({'HOM' : { '$in': patient_ids }})]))\n #if r: external_ids=r['external_ids']\n #else: external_ids=[]\n #for r in hpo_db.hpo_pubmed.find({'hpoid':hpo_id}): print(r)\n #print recessive_genes\n #print dominant_genes\n return render_template('hpo.html',\n title=hpo_id,\n hpo_id=hpo_id,\n hpo_name=hpo_name)\n\[email protected]('/hpo_json/<hpo_id>')\n<EMAIL>\n@requires_auth\ndef hpo_json(hpo_id):\n db=get_db()\n hpo_db=get_db(app.config['DB_NAME_HPO'])\n patients_db=get_db(app.config['DB_NAME_PATIENTS'])\n #patients=[p for p in patients_db.patients.find( { 'features': {'$elemMatch':{'id':str(hpo_id)}} } )]\n print(hpo_id)\n if not hpo_id.startswith('HP:'):\n hpo_term=hpo_db.hpo.find_one({'name':hpo_id})\n hpo_id=hpo_term['id'][0]\n print(hpo_id)\n hpo_term=hpo_db.hpo.find_one({'id':hpo_id})\n hpo_name=hpo_term['name'][0]\n if 'is_a' in hpo_term:\n parents=[ pid for pid in hpo_term['is_a'] ]\n else:\n parents=[]\n print('HPO ANCESTORS')\n hpo_ancestors=lookups.get_hpo_ancestors(hpo_db,hpo_id)\n #print(lookups.get_hpo_ancestors_array(hpo_db,hpo_id))\n print(hpo_ancestors)\n print(len(hpo_ancestors))\n print([h['name'] for h in hpo_ancestors])\n #hpo_ancestors=dict((h['id'][0],h['name'][0]) for h in hpo_ancestors)\n hpo_ancestors=[{'hpo_id':h['id'][0],'hpo_name':h['name'][0]} for h in hpo_ancestors]\n #print(len([v['VARIANT_ID'] for v in db.variants.find({'HET' : { '$in': patient_ids }})]))\n #print(len([v['VARIANT_ID'] for v in db.variants.find({'HOM' : { '$in': patient_ids }})]))\n #r=patients_db.hpo.find_one({'hp_id':hpo_id})\n #if r: external_ids=r['external_ids']\n #else: external_ids=[]\n genes=[lookups.get_gene_by_name(db, r['Gene-Name']) for r in hpo_db.hpo_gene.find({'HPO-ID':hpo_id})]\n print('num genes', len(genes))\n #for r in hpo_db.hpo_pubmed.find({'hpoid':hpo_id}): print(r)\n #pmids=[r['pmid'] for r in hpo_db.hpo_pubmed.find({'hpoid':hpo_id})]\n patients=lookups.get_hpo_patients(hpo_db,patients_db,hpo_id)\n print('num patients', len(patients))\n #return jsonify(result={'hpo_id':hpo_id,'hpo_name':hpo_name,'individuals':[str(p['external_id']) for p in patients],'genes':genes})\n return jsonify(result={'hpo_id':hpo_id,'hpo_name':hpo_name,'individuals':[str(p['external_id']) for p in patients],'hpo_ancestors':hpo_ancestors, 'parents':parents})\n\n\n", "id": "6010209", "language": "Python", "matching_score": 3.746595859527588, "max_stars_count": 24, "path": "views/hpo.py" }, { "content": "import re\n#from utils import *\nimport itertools\nfrom config import config\nif config.IMPORT_PYSAM_PRIMER3:\n import pysam\nimport csv\n#hpo lookup\nimport phizz\nimport random\nimport pickle\nimport hashlib\nimport pprint\nimport orm\n\nSEARCH_LIMIT = 10000\n# massive genes?\n#UNSUPPORTED_QUERIES = ['TTN', 'ENSG00000155657', 'CMD1G', 'CMH9', 'CMPD4', 'FLJ32040', 'LGMD2J', 'MYLK5', 'TMD', u'ENST00000342175', u'ENST00000359218', u'ENST00000342992', u'ENST00000460472', u'ENST00000589042', u'ENST00000591111']\n\ndef lookup_patient(db,user,external_id):\n external_ids=db.users.find_one({'user':user},{'external_ids':1})['external_ids']\n return external_id in external_ids\n\ndef xpos_to_pos(xpos): return int(xpos % 1e9)\n\ndef get_gene(db, gene_id):\n print(gene_id)\n for g in db.genes.find({'gene_id': gene_id}): print(g)\n #return g\n return db.genes.find_one({'gene_id': gene_id}, projection={'_id': False})\n\n\ndef get_gene_by_name(db, gene_name):\n # try gene_name field first\n gene = db.genes.find_one({'gene_name': gene_name}, projection={'_id': False})\n if gene: return gene\n # if not, try gene['other_names']\n return db.genes.find_one({'other_names': gene_name}, projection={'_id': False})\n\n\ndef get_transcript(db, transcript_id):\n transcript = db.transcripts.find_one({'transcript_id': transcript_id}, projection={'_id': False})\n if not transcript:\n return None\n transcript['exons'] = get_exons_in_transcript(db, transcript_id)\n return transcript\n\n\ndef get_raw_variant(db, xpos, ref, alt, get_id=False):\n return db.variants.find_one({'xpos': xpos, 'ref': ref, 'alt': alt}, projection={'_id': get_id})\n\ndef get_variant(db, variant_id):\n return db.variants.find_one({'variant_id':variant_id})\n\n\ndef get_variant(db, xpos, ref, alt):\n variant = get_raw_variant(db, xpos, ref, alt, False)\n print(variant)\n if variant is None or 'rsid' not in variant: return variant\n if variant['rsid'] == '.' or variant['rsid'] is None:\n rsid = db.dbsnp.find_one({'xpos': xpos})\n if rsid:\n variant['rsid'] = 'rs%s' % rsid['rsid']\n return variant\n\ndef get_variants_from_dbsnp(db, rsid):\n if not rsid.startswith('rs'):\n return None\n try:\n rsid = int(rsid.lstrip('rs'))\n except Exception, e:\n return None\n position = db.dbsnp.find_one({'rsid': rsid})\n if position:\n variants = list(db.variants.find({'xpos': {'$lte': position['xpos'], '$gte': position['xpos']}}, projection={'_id': False}))\n if variants:\n #add_consequence_to_variants(variants)\n return variants\n return []\n\n\ndef get_coverage_for_bases(db, xstart, xstop=None):\n \"\"\"\n Get the coverage for the list of bases given by xstart->xstop, inclusive\n Returns list of coverage dicts\n xstop can be None if just one base, but you'll still get back a list\n \"\"\"\n if xstop is None:\n xstop = xstart\n coverages = {\n doc['xpos']: doc for doc in db.base_coverage.find(\n {'xpos': {'$gte': xstart, '$lte': xstop}},\n projection={'_id': False}\n )\n }\n ret = []\n for i in range(xstart, xstop+1):\n if i in coverages:\n ret.append(coverages[i])\n else:\n ret.append({'xpos': i, 'pos': xpos_to_pos(i)})\n for item in ret:\n item['has_coverage'] = 'mean' in item\n del item['xpos']\n print '+++++++++++++++++++++++++++'\n temp = db.base_coverage.find({'xpos': {'$gte': xstart, '$lte': xstop}})\n from bson.json_util import dumps\n dumps(temp)\n print xstart\n print xstop\n print '+++++++++++++++++++++++++++++'\n return ret\n\n\ndef get_coverage_for_transcript(db, xstart, xstop=None):\n \"\"\"\n :param db:\n :param genomic_coord_to_exon:\n :param xstart:\n :param xstop:\n :return:\n \"\"\"\n coverage_array = get_coverage_for_bases(db, xstart, xstop)\n # only return coverages that have coverage (if that makes any sense?)\n # return coverage_array\n #print '+++++++++++++++++++++++++'\n #print coverage_array\n #print '+++++++++++++++++++++++++'\n covered = [c for c in coverage_array if c['has_coverage']]\n for c in covered: del c['has_coverage']\n return covered\n\n\ndef get_constraint_for_transcript(db, transcript):\n return db.constraint.find_one({'transcript': transcript}, projection={'_id': False})\n\n\ndef get_awesomebar_suggestions(g, query):\n \"\"\"\n This generates autocomplete suggestions when user\n query is the string that user types\n If it is the prefix for a gene, return list of gene names\n \"\"\"\n regex = re.compile('^' + re.escape(query), re.IGNORECASE)\n results = (r for r in g.autocomplete_strings if regex.match(r))\n results = itertools.islice(results, 0, 20)\n #db.hpo.find({'name':/.*retinal dystrophy.*/})\n return list(results)\n\n\n# 1:1-1000\nR1 = re.compile(r'^(\\d+|X|Y|M|MT)\\s*:\\s*(\\d+)-(\\d+)$')\nR2 = re.compile(r'^(\\d+|X|Y|M|MT)\\s*:\\s*(\\d+)$')\nR3 = re.compile(r'^(\\d+|X|Y|M|MT)$')\nR4 = re.compile(r'^(\\d+|X|Y|M|MT)\\s*[-:]\\s*(\\d+)-([ATCG]+)-([ATCG]+)$')\n\n\n\ndef get_genes_in_region(db, chrom, start, stop):\n \"\"\"\n Genes that overlap a region\n \"\"\"\n xstart = get_xpos(chrom, start)\n xstop = get_xpos(chrom, stop)\n genes = db.genes.find({ 'xstart': {'$lte': xstop}, 'xstop': {'$gte': xstart}, }, projection={'_id': False})\n return list(genes)\n\n\ndef get_variants_in_region(db, chrom, start, stop):\n \"\"\"\n Variants that overlap a region\n Unclear if this will include CNVs\n \"\"\"\n xstart = get_xpos(chrom, start)\n xstop = get_xpos(chrom, stop)\n variants = list(db.variants.find({ 'xpos': {'$lte': xstop, '$gte': xstart}\n }, projection={'_id': False}, limit=SEARCH_LIMIT))\n #add_consequence_to_variants(variants)\n return list(variants)\n\n\n\ndef remove_extraneous_information(variant):\n return\n del variant['genotype_depths']\n del variant['genotype_qualities']\n del variant['transcripts']\n del variant['genes']\n del variant['orig_alt_alleles']\n del variant['xpos']\n del variant['xstart']\n del variant['xstop']\n del variant['site_quality']\n del variant['vep_annotations']\n\n\n\ndef get_transcripts_in_gene(db, gene_id):\n \"\"\"\n \"\"\"\n return list(db.transcripts.find({'gene_id': gene_id}, projection={'_id': False}))\n\n\ndef get_exons_in_transcript(db, transcript_id):\n # return sorted(\n # [x for x in\n # db.exons.find({'transcript_id': transcript_id}, fields={'_id': False})\n # if x['feature_type'] != 'exon'],\n # key=lambda k: k['start'])\n return sorted(list(db.exons.find({'transcript_id': transcript_id, 'feature_type': { \"$in\": ['CDS', 'UTR', 'exon'] }}, fields={'_id': False})), key=lambda k: k['start'])\n\n\ndef get_hpo_patients(hpo_db, patients_db, hpo_id, cached=True,verbose=False):\n \"\"\"\n Get patients with HPO term.\n \"\"\"\n if cached:\n return [p for p in patients_db.patients.find({'external_id':{'$in':patients_db.hpo_cache.find_one({'hpo_id':hpo_id})['external_id']}}) if 'external_id' in p]\n if 'HP:0000001' == hpo_id: return [p for p in patients_db.patients.find() if 'external_id' in p]\n patients = [p for p in patients_db.patients.find({'features.id':hpo_id}) for f in p['features'] if f['id']== hpo_id and f['observed']=='yes']\n if verbose: print(hpo_id,len(patients))\n for r in hpo_db.hpo.find({'is_a':hpo_id}):\n for i in r['id']: patients+=list(itertools.chain(get_hpo_patients(hpo_db,patients_db,i,cached=cached,verbose=verbose))) \n #remove duplicates\n patients={v['external_id']:v for v in patients}.values()\n return patients\n\n# return hpo terms found in people in which variant is found\ndef get_hpo(variant_str):\n samples=get_samples(variant_str)\n #chrom,pos,ref,alt,=str(variant_str.strip()).split('-')\n d=csv.DictReader(file('/data/uclex_data/UCLexInfo/uclex-samples.csv','r'),delimiter=',')\n hpo=[]\n for r in d:\n if r['sample'] not in samples: continue\n pheno=r['phenotype']\n print((r['sample'],pheno,))\n if pheno.startswith('HP'):\n hpo+=[phizz.query_hpo([pheno])]\n elif pheno.startswith('MIM'):\n hpo+=[phizz.query_disease([pheno])]\n return(hpo)\n\ndef get_hpo_children(hpo_db, hpo_id):\n hpo=[hpo_db.hpo.find_one({'id':hpo_id})]\n for r in hpo_db.hpo.find({'is_a':hpo_id}):\n for i in r['id']:\n hpo+=list(itertools.chain(get_hpo_children(hpo_db,i))) \n #remove duplicates\n hpo={h['id'][0]:h for h in hpo}.values()\n return hpo\n\ndef replace_hpo(hpo_db, hpo):\n # some hpo_ids are obsolete.\n record = hpo_db.hpo.find_one({'id':hpo[0]})\n if not record:\n print 'no record in replace_hpo'\n print hpo\n if 'replaced_by' in record:\n new = hpo_db.hpo.find_one({'id':record['replaced_by'][0]})\n return [new['id'][0], new['name'][0]]\n else:\n return hpo\n\ndef get_hpo_ancestors(hpo_db, hpo_id):\n \"\"\"\n Get HPO terms higher up in the hierarchy.\n \"\"\"\n h=hpo_db.hpo.find_one({'id':hpo_id})\n #print(hpo_id,h)\n if 'replaced_by' in h:\n # not primary id, replace with primary id and try again\n h = hpo_db.hpo.find_one({'id':h['replaced_by'][0]})\n hpo=[h]\n if 'is_a' not in h: return hpo\n for hpo_parent_id in h['is_a']:\n #p=hpo_db.hpo.find({'id':hpo_parent_id}):\n hpo+=list(itertools.chain(get_hpo_ancestors(hpo_db,hpo_parent_id))) \n #remove duplicates\n hpo={h['id'][0]:h for h in hpo}.values()\n return hpo\n\ndef get_hpo_ancestors_array(hpo_db, hpo_id):\n # return an array of ids, instead of array of dicts\n anc = get_hpo_ancestors(hpo_db, hpo_id)\n result = []\n for a in anc:\n result.extend(a['id'])\n return result\n\ndef get_hpo_size_freq(freq_file):\n # read freq file\n # result = {'HP:0000345':{size: 456, freq: 0.1, raw: 456/4500}}\n hpo_freq = {}\n inf = open(freq_file, 'r')\n for l in inf:\n l = l.rstrip().split('\\t')\n nums = l[1].split('/')\n size = int(nums[0])\n tot = float(nums[1])\n hpo_freq[l[0]] = {'size': size, 'freq': size/tot, 'raw': l[1]}\n return hpo_freq\n\ndef get_hpo_common_ancestors(hpo_db, h1, h2):\n # return a list of hpo ids for h1 and h2's common ancestors\n a1 = get_hpo_ancestors(hpo_db, h1)\n a2 = get_hpo_ancestors(hpo_db,h2)\n an1 = []\n an2 = []\n for a in a1:\n an1.extend(a['id'])\n for a in a2:\n an2.extend(a['id'])\n return list(set(an1) & set(an2))\n\ndef get_hpo_nearest_common_ancestors(hpo_db, h1, h2, hpo_freq):\n # given hpo_freq, find out a list of nearest common ancestors\n common_ans = get_hpo_common_ancestors(hpo_db, h1, h2)\n freqs = [hpo_freq[h] for h in common_ans]\n min_freq = min(freqs)\n inds = [i for i, v in enumerate(freqs) if v == min_freq]\n return [common_ans[i] for i in inds]\n\ndef hpo_minimum_set(hpo_db, hpo_ids=[]):\n '''\n minimize the hpo sets\n results = {'HP:0000505': [ancestors]}\n '''\n hpo_ids = list(set(hpo_ids))\n results = dict([(hpo_id, [ h['id'][0] for h in get_hpo_ancestors(hpo_db, hpo_id)],) for hpo_id in hpo_ids])\n # minimise\n bad_ids = []\n for i in range(len(hpo_ids)):\n for j in range(i+1,len(hpo_ids)):\n if hpo_ids[i] in results[hpo_ids[j]]:\n # i is j's ancestor, remove\n bad_ids.append(hpo_ids[i])\n break\n if hpo_ids[j] in results[hpo_ids[i]]:\n # j is i's ancestor, remove\n bad_ids.append(hpo_ids[j])\n return list(set(hpo_ids) - set(bad_ids))\n\n\ndef get_patient_hpo(hpo_db,patients_db, patient_id,ancestors=True):\n \"\"\"\n Get complete hierarchy of HPO terms for patient.\n \"\"\"\n p=patients_db.patients.find_one({'external_id':patient_id})\n if 'features' not in p: return []\n if ancestors:\n hpo_ancestors=[]\n for hpo_ids in [f['id'] for f in p['features'] if f['observed']=='yes']:\n hpo_ancestors+=get_hpo_ancestors(hpo_db,hpo_ids)\n # remove duplicates\n hpo_ancestors={h['id'][0]:h for h in hpo_ancestors}.values()\n return hpo_ancestors\n else:\n return [ hpo_db.hpo.find_one({'id':f['id']}) for f in p['features'] if f['observed']=='yes']\n\ndef get_gene_hpo(hpo_db,gene_name,dot=True):\n \"\"\"\n Get all HPO terms linked to gene name, including ancestors.\n and return as dot string for plotting if dot is True.\n \"\"\"\n hpo_ids=[hpo['HPO-Term-ID'] for hpo in hpo_db.OMIM_ALL_FREQUENCIES_genes_to_phenotype.find({'entrez-gene-symbol':gene_name})]\n if not hpo_ids:\n hpo_ids=hpo_db.genes_pheno.find_one({'gene':gene_name})\n # no hpo linked to gene\n if hpo_ids is None: hpo_ids=[]\n else: hpo_ids=hpo_ids['hpo']\n hpo_ancestors=[get_hpo_ancestors(hpo_db,hid) for hid in hpo_ids]\n hpo_ancestors=list(itertools.chain(*hpo_ancestors)) \n # remove duplicates\n hpo_ancestors={h['id'][0]:h for h in hpo_ancestors}.values()\n hpo_string=\"digraph {\"\n for h in hpo_ancestors:\n hpo_id=h['id'][0]\n hpo_label=h['name'][0]\n #hpo_count=0\n hpo_string+= '\"{}\" [style=\"filled\", fixedsize=\"true\", fontsize=\"15\", shape=\"circle\", width=\"0.75\", fillcolor=\"powderblue\", label=\"{}\\n{}\", color=\"transparent\"];\\n'.format(hpo_id,hpo_label,hpo_id)\n for h in hpo_ancestors:\n hpo_id=h['id'][0]\n if 'is_a' not in h: continue\n for anc in h['is_a']:\n hpo_string+='\"{}\" -> \"{}\" [color=\"#000000\", lty=\"solid\"];\\n'.format(anc,hpo_id)\n hpo_string+= '}'\n if dot:\n return hpo_string\n else:\n return hpo_ancestors\n\n\n# get hpo terms shared between patients\ndef common_hpo(hpo_db,patients_db,patient_ids):\n terms_by_patient=[get_patient_hpo(hpo_db,patients_db,pid) for pid in patient_ids]\n # intersection of lists\n common_hpo_term_ids=frozenset.intersection(*[frozenset([y['id'][0] for y in x]) for x in terms_by_patient])\n # remove ancestors\n #get_hpo_ancestors(hpo_db, hpo_id):\n # lookup hpo terms\n common_hpo_terms=[hpo_db.hpo.find_one({'id':hpo_id}) for hpo_id in common_hpo_term_ids]\n return common_hpo_terms\n\n# get union of hpo terms seen in patients\ndef union_hpo(hpo_db,patients_db,patient_ids):\n terms_by_patient=[get_patient_hpo(hpo_db,patients_db,pid) for pid in patient_ids]\n #flatten lists\n terms_by_patient=list(itertools.chain(*terms_by_patient)) \n # intersection of lists\n terms_by_patient={h['id'][0]:h for h in terms_by_patient}.values()\n return terms_by_patient\n\n\n# VCF gene query\ndef variants_in_gene_vcf(gene_symbol):\n import mygene\n mg = mygene.MyGeneInfo()\n g=mg.query('symbol:%s' % gene_symbol, fields='exons', species='human')\n print g\n exons=g['hits'][0]['exons']\n for transcript in exons:\n yield (transcript, exons[transcript],)\n\ndef get_patient_observed_hpo(patient, patient_db):\n # returns [('HP:0000001', 'hell yeah')]\n this_patient = patient_db.patients.find_one({'external_id':patient}) \n result = [(None, None)]\n if not this_patient:\n #print 'ERROR: %s not in patients db' % patient\n pass\n else:\n if 'features' not in this_patient:\n print 'WARNING: features not in ' + patient\n p_features = this_patient.get('features', [{'id':'HP:0000001', 'label':'All', 'observed': 'yes' }])\n result = [(f['id'], f['label']) for f in p_features if f['observed']=='yes']\n return result\n\n\n\n", "id": "1496918", "language": "Python", "matching_score": 4.012266159057617, "max_stars_count": 24, "path": "lookups/lookups.py" }, { "content": "\nfrom flask import jsonify\nimport lookups\nfrom os import listdir, chdir\nfrom os.path import isfile, join\nimport pymongo\nfrom collections import defaultdict, Counter\n\nclass Patient(object):\n def __init__(self, patient_id, patient_db=None,variant_db=None,hpo_db=None):\n Patient.db=patient_db\n Patient.patient_db=patient_db\n Patient.variant_db=variant_db\n Patient.hpo_db=hpo_db\n data=Patient.db.patients.find_one({'external_id':patient_id},{'_id':False})\n self.__dict__.update(data)\n if 'genes' not in self.__dict__: self.__dict__['genes']=[]\n def __getattribute__(self, key):\n \"Emulate type_getattro() in Objects/typeobject.c\"\n v = object.__getattribute__(self, key)\n if hasattr(v, '__get__'): return v.__get__(None, self)\n return v\n def json(self):\n return jsonify(result=self.__dict__)\n def save(self):\n print('writing', self.external_id, 'to database')\n return Patient.db.patients.update({'external_id':self.external_id},self.__dict__,upsert=True)\n @property\n def gender(self):\n return self.__dict__['sex']\n @property\n def consanguinity(self):\n if 'family_history' in self.__dict__:\n return self.__dict__['family_history'].get('consanguinity',None)\n else:\n return None\n @property\n def observed_features(self):\n # if 'observed_features' in self.__dict__: return self.__dict__['observed_features']\n self.__dict__.update({'observed_features':[feature for feature in self.__dict__['features'] if feature['observed']=='yes']})\n self.save()\n return self.__dict__['observed_features']\n def update_features(self, observed_features):\n #for of in observed_features:\n return ''\n @property\n def hpo_ids(self):\n if 'hpo_ids' in self.__dict__: return self.__dict__['hpo_ids']\n hpo_ids=[feature['id'] for feature in self.features if feature['observed']=='yes']\n self.__dict__.update({'hpo_ids':hpo_ids})\n self.save()\n return self.__dict__['hpo_ids']\n @property\n def hpo_terms(self):\n if 'hpo_terms' in self.__dict__: return self.__dict__['hpo_terms']\n hpo_terms=lookups.get_patient_hpo(Patient.hpo_db, Patient.patient_db, self.external_id, ancestors=False)\n hpo_terms = dict([(hpo['id'][0],{'id':hpo['id'][0],'name':hpo['name'][0], 'is_a':hpo.get('is_a',[])}) for hpo in hpo_terms])\n self.__dict__.update({'hpo_terms':hpo_terms})\n self.save()\n return self.__dict__['hpo_terms']\n @property\n def pubmed_key(self):\n if 'pubmed_key' in self.__dict__: return self.__dict__['pubmed_key']\n return None\n @property\n def family_history(self):\n return ''\n def process(self,x):\n if type(x['canonical_gene_name_upper']) is list: x['canonical_gene_name_upper']=x['canonical_gene_name_upper'][0]\n gene_hpo_terms=lookups.get_gene_hpo(Patient.hpo_db,x['canonical_gene_name_upper'],False)\n gene_hpo_terms = dict([(hpo['id'][0],{'id':hpo['id'][0],'name':hpo['name'][0], 'is_a':hpo.get('is_a',[])}) for hpo in gene_hpo_terms])\n gene_hpo_ids=gene_hpo_terms.keys()\n common_hpo_ids=list(set(gene_hpo_ids) & set(self.hpo_ids))\n # simplify hpo terms\n common_hpo_ids=lookups.hpo_minimum_set(Patient.hpo_db, common_hpo_ids)\n common_hpo_ids=[{'hpo_id':k,'hpo_term':self.hpo_terms[k]['name']} for k in common_hpo_ids]\n x['HPO']=common_hpo_ids\n print x['canonical_gene_name_upper'],common_hpo_ids\n g=x['canonical_gene_name_upper']\n # gene_id is used to get gene-hpo analysis result\n temp = lookups.get_gene_by_name(Patient.variant_db, g)\n x['gene_id'] = temp['gene_id'] if temp else None\n x['canonical_hgvs']=dict(zip( [x2.replace('.','_') for x2 in x.get('canonical_hgvsp','')], x.get('canonical_hgvsc','')))\n x['protein_mutations']=dict([(p.replace('.','_'),p.split(':')[1],) for p in x.get('canonical_hgvsp','') if ':' in p])\n if 'FILTER' not in x: x['FILTER']=x['filter']\n if 'ID' not in x: x['ID']=''\n return x\n def conditions(self, x, AC=10,kaviar=.05,consequence_exclude=['intron_variant','non_coding_transcript','5_prime_UTR_variant','3_prime_UTR_variant','upstream_gene_variant','downstream_gene_variant','synonymous_variant','non_coding_transcript_exon_variant'],consequence_include=[ 'transcript_ablation', 'splice_acceptor_variant', 'splice_donor_variant', 'stop_gained', 'frameshift_variant', 'stop_lost', 'start_lost', 'transcript_amplification', 'inframe_insertion', 'inframe_deletion', 'missense_variant', 'protein_altering_variant', 'splice_region_variant', 'regulatory_region_ablation']):\n if 'AC' not in x or x['AC'] > AC: return False\n if x['EXAC'] and x['EXAC']['AC_POPMAX']>=AC: return False\n if 'kaviar' in x and x['kaviar']>kaviar: return False\n if 'canonical_gene_name_upper' not in x: return False\n if x['most_severe_consequence'] in consequence_exclude: return False\n if x['most_severe_consequence'] not in consequence_include: return False\n return True\n @property\n def rare_variants(self):\n if 'rare_variants' in self.__dict__: return self.__dict__['rare_variants']\n rare_variants=[ self.process(x) for x in Patient.variant_db.variants.find({'het_samples':self.external_id},{'_id':False}) if self.conditions(x) ]\n gene_counter=Counter([var['canonical_gene_name_upper'] for var in rare_variants])\n for var in rare_variants: var['gene_count']=gene_counter[var['canonical_gene_name_upper']]\n self.__dict__.update({'rare_variants':rare_variants})\n self.save()\n return self.__dict__['rare_variants']\n @property\n def homozygous_variants(self):\n if 'homozygous_variants' in self.__dict__: return self.__dict__['homozygous_variants']\n homozygous_variants=[ self.process(x) for x in Patient.variant_db.variants.find({'hom_samples':self.external_id},{'_id':False}) if self.conditions(x) ]\n self.__dict__.update({'homozygous_variants':homozygous_variants})\n self.save()\n return self.__dict__['homozygous_variants']\n @property\n def compound_het_variants(self):\n #if 'compound_hets' in self.__dict__: return self.__dict__['compound_hets']\n compound_hets=[process(x) for x in self.rare_variants if x['gene_count']>1]\n self.__dict__.update({'compound_hets':compound_hets})\n self.save()\n return self.__dict__['compound_hets']\n @property\n def variants(self):\n patient_db=get_db(app.config['DB_NAME_PATIENTS'])\n if not patient: return jsonify(result=None)\n patient = patient_db.patients.find_one({'external_id':individual},{'_id':False})\n return { 'het_variants':[ process(x) for x in db.variants.find({'het_samples':individual}) if conditions(x) ], 'hom_variants':[ process(x) for x in db.variants.find({'hom_samples':individual}) if conditions(x) ] }\n def load_patient_from_file(self, filename, hpo='HP:0000001'):\n # Some constant\n #HEADER = ['HUGO', 'HPO', 'consequence', 'ref(pubmedID)', 'description', 'OMIM', 'allele_freq', 'ExAC_freq', 'variant_id', 'p_change']\n # get db\n client = pymongo.MongoClient()\n hpo_db = client['hpo']\n db = client['uclex']\n patient_db = client['patients']\n patient_id=os.path.basename(filename.replace('.csv','')) \n parent_dir=os.path.basename(os.path.abspath(os.path.join(filename, os.pardir)))\n # Add patient to phenotips if it does not already exist\n pheno=PhenotipsClient()\n patient={u'features':[], 'clinicalStatus': {u'clinicalStatus': u'affected'}, u'ethnicity': {u'maternal_ethnicity': [], u'paternal_ethnicity': []}, u'family_history': {}, u'disorders': [], u'life_status': u'alive', u'reporter': u'', u'genes': [], u'prenatal_perinatal_phenotype': {u'prenatal_phenotype': [], u'negative_prenatal_phenotype': []}, u'prenatal_perinatal_history': {u'twinNumber': u''}, u'sex': u'U', u'solved': {u'status': u'unsolved'}}\n eid=patient_id\n p=pheno.get_patient(session=session,eid=eid)\n print p\n if p is None:\n print 'MISSING', eid\n patient['features']=[ {'id':h,'type':'phenotype','observed':'yes'} for h in hpo.strip().split(',')]\n patient['external_id']=eid\n print 'CREATING', eid\n print pheno.create_patient(auth,patient)\n if not patient_db.patients.find_one({'external_id':eid}):\n # update database\n p=pheno.get_patient(eid=eid,session=session)\n print 'UPDATE'\n print patient_db.patients.update({'external_id':eid},{'$set':p},w=0,upsert=True)\n patient_hpo_terms=lookups.get_patient_hpo(hpo_db, patient_db, patient_id, ancestors=False)\n patient_hpo_terms = dict([(hpo['id'][0],{'id':hpo['id'][0],'name':hpo['name'][0], 'is_a':hpo.get('is_a',[])}) for hpo in patient_hpo_terms])\n patient_hpo_ids=patient_hpo_terms.keys()\n # get hpo terms from patient\n print 'processing rare variants of %s' % patient_id\n print 'patient hpo terms', patient_hpo_terms \n variants_reader=csv.DictReader(open(filename))\n #for var in ['homozygous_variants', 'compound_hets', 'rare_variants']:\n VARIANTS=[]\n for var in variants_reader:\n # look up variant on myvariant\n variant_id=var['signature']\n chrom, pos, ref, alt, = variant_id.split('_')\n #for k in var.keys(): print k, ':', var[k]\n #break\n variant=orm.Variant(variant_id=variant_id,db=db)\n #variant=vcf.vcf_query(chrom, pos, ref, alt, individual=patient_id, limit=100)\n if variant is None:\n sys.stderr.write( '\\033[01;31m' + var['signature'] + ' not found!' + '\\033[m' + '\\n' )\n with open(\"notfound.txt\", \"a\") as myfile: myfile.write(var['signature'])\n continue\n print var['signature'], '==>', variant.POS, variant.REF, variant.ALT\n #variant['site_quality'] = variant['QUAL']\n #variant['filter'] = variant['FILTER']\n #pprint(variant)\n #variant['vep']=vep_anno(str(chrom), str(pos), ref, alt,)\n #variant['my_variant']=mv.getvariant(variant['hgvs'],fields='all')\n #variant['rvs']=rvs_anno(chrom,pos,ref,alt)\n #print(variant['exac'])\n variant.__dict__update(var)\n #print vep_anno(chrom, pos, ref, alt)\n if patient_id in variant.hom_samples: var.variant_type='rare_homozygous'\n elif patient_id in variant.het_samples: var.variant_type='rare_het'\n else:\n print variant.het_samples\n print variant.hom_samples\n print patient_id, 'not in hom or het samples'\n VAR['variant_type']='rare_het'\n #raise 'hell'\n VAR['variant_id']=variant.variant_id\n VAR['allele_freq']=[ variant.allele_freq, str(variant.allele_count)+'/'+str(variant.allele_num), variant.MISS_COUNT]\n print(VAR['allele_freq'])\n #rvs=[impact for impact in variant['rvs']['impact'] if impact['alt']==alt]\n #if len(rvs)==1:\n VAR['HUGO']=re.sub('\\(.*\\)','',variant.HUGO)\n VAR['HUGO']=re.sub(',.*','',VAR['HUGO'])\n VAR['ExAC_freq']=variant['exac']\n VAR['Gene']=re.sub('\\(.*\\)','',variant.Gene)\n if VAR['HUGO']=='NA':\n gene_id=VAR['Gene'].split(',')[0]\n g=db.genes.find_one({'gene_id':gene_id})\n if not g and 'vep_annotations' in variant.exac:\n VAR['HUGO']=variant['exac']['vep_annotations'][0]['SYMBOL']\n else:\n #g=mg.query(gene_id, scopes='symbol', fields='ensembl.gene', species='human')\n g=annotation.ensembl_xrefs(gene_id)\n if 'error' in g:\n # unnamed gene\n VAR['HUGO']=''\n else:\n print gene_id, g\n VAR['HUGO']=find_item(g,'display_id')\n # get annotation from CSV file\n if variant['splicing']=='FALSE':\n if not variant['AAChange']: variant['AAChange']=re.compile('.*\\((.*)\\)').search(variant['Gene']).group(1)\n VAR['p_change']=dict(zip(['gene_id','transcript_id','exon','hgvs_c','hgvs_p'],variant['AAChange'].split(':')))\n if 'hgvs_p' in VAR['p_change']: VAR['p_change']['hgvs_p']=re.sub(',.*','',VAR['p_change']['hgvs_p'])\n else:\n VAR['p_change']={}\n VAR['consequence']=variant['ExonicFunc']\n VAR['filter']=variant['FILTER']\n VAR['OMIM']=variant.get('Omim','').split(';')[0]\n VAR['lof']=bool(variant['lof'])\n VAR['description']=variant['Description']\n if VAR['lof']:\n print 'lof'\n print VAR['HUGO']\n g=db.genes.find_one({'gene_name_upper':VAR['HUGO'].upper()})\n if g:\n gene_id=g['gene_id']\n print gene_id\n else:\n mg=mygene.MyGeneInfo()\n g=mg.query(VAR['HUGO'], scopes='symbol', fields='ensembl.gene', species='human')\n if g and 'hits' in g and 'ensembl' in g['hits'][0]:\n print g\n # {u'hits': [{u'_id': u'643669', u'ensembl': [{u'gene': u'ENSG00000262484'}, {u'gene': u'ENSG00000283099'}]}], u'total': 1, u'max_score': 443.8707, u'took': 2}\n gene_id=find_item(g,'gene')\n #gene_id=[x for _, x, in g['hits'][0]['ensembl'][0].iteritems()]\n print gene_id\n #raise 'hell'\n else:\n e=annotation.ensembl_region('{}:{}-{}'.format(chrom,pos,pos))\n gene_id=e[0]['gene_id']\n print gene_id\n lof=db.lof.find_one({'gene_id':gene_id})\n if lof:\n lof['patient_ids'][patient_id]=list(set(lof['patient_ids'].get(patient_id,[])+[VAR['variant_id']]))\n print db.lof.update({'gene_id':gene_id}, {'$set':{'patient_ids':lof['patient_ids']}})\n else:\n print db.lof.insert({'gene_id':gene_id,'patient_ids':{patient_id:[VAR['variant_id']]}})\n #hpo_terms=hpo_db.gene_hpo.find_one({'gene_name':VAR['HUGO']},{'hpo_terms':1,'_id':1})\n #gene_hpo_ids=hpo_db.gene_hpo.find_one({'gene_name':'ABCA4'},{'hpo_terms':1,'_id':0}).get('hpo_terms',[])\n #VAR['HUGO']='ABCA4'\n gene_hpo_terms=lookups.get_gene_hpo(hpo_db,VAR['HUGO'],False)\n gene_hpo_terms = dict([(hpo['id'][0],{'id':hpo['id'][0],'name':hpo['name'][0], 'is_a':hpo.get('is_a',[])}) for hpo in gene_hpo_terms])\n gene_hpo_ids=gene_hpo_terms.keys()\n #lookups.get_gene_hpo(hpo_db,gene_name,dot=False)\n #print 'gene', gene_hpo_ids\n #print 'patient', patient_hpo_ids\n common_hpo_ids=list(set(gene_hpo_ids) & set(patient_hpo_ids))\n # simplify hpo terms\n common_hpo_ids=lookups.hpo_minimum_set(hpo_db, common_hpo_ids)\n common_hpo_ids=[{'hpo_id':k,'hpo_term':patient_hpo_terms[k]['name']} for k in common_hpo_ids]\n print VAR['HUGO'],common_hpo_ids\n VAR['HPO']=common_hpo_ids\n VARIANTS.append(VAR)\n # determine count per gene\n gene_counter=Counter([var['HUGO'] for var in VARIANTS])\n for var in VARIANTS: var['gene_count']=gene_counter[var['HUGO']]\n print('gene_counter', gene_counter)\n print('rare_variants',len(VARIANTS))\n print(db.patients.update({'external_id':patient_id}, {'$set':{'rare_variants':VARIANTS}}, upsert=True))\n print(db.patients.update({'external_id':patient_id}, {'$set':{'rare_variants_count':len(VARIANTS)}}, upsert=True))\n COMPOUND_HETS=[var for var in VARIANTS if var['gene_count']>1]\n print('compound_hets',len(COMPOUND_HETS))\n print(db.patients.update({'external_id':patient_id}, {'$set':{'compound_hets':COMPOUND_HETS}}, upsert=True)) \n print(db.patients.update({'external_id':patient_id}, {'$set':{'compound_hets_count':len(COMPOUND_HETS)}}, upsert=True)) \n HOMOZYGOUS_VARIANTS=[var for var in VARIANTS if var['variant_type']=='rare_homozygous']\n print('rare_homozygous',len(HOMOZYGOUS_VARIANTS))\n print(db.patients.update({'external_id':patient_id}, {'$set':{'homozygous_variants':HOMOZYGOUS_VARIANTS}}, upsert=True))\n print(db.patients.update({'external_id':patient_id}, {'$set':{'homozygous_variants_count':len(HOMOZYGOUS_VARIANTS)}}, upsert=True))\n def get_patient_observed_hpo(self):\n # returns [('HP:0000001', 'hell yeah')]\n this_patient = patient_db.patients.find_one({'external_id':patient}) \n result = [(None, None)]\n if not this_patient:\n #print 'ERROR: %s not in patients db' % patient\n pass\n else:\n if 'features' not in this_patient:\n print 'WARNING: features not in ' + patient\n p_features = this_patient.get('features', [{'id':'HP:0000001', 'label':'All', 'observed': 'yes' }])\n result = [(f['id'], f['label']) for f in p_features if f['observed']=='yes']\n return result\n\n\n\n", "id": "1911418", "language": "Python", "matching_score": 5.203237056732178, "max_stars_count": 24, "path": "orm/patient.py" }, { "content": "\n\"\"\"\nVariant and gene annotation.\nWill use pymongo db as cache to avoid unecessary networks access.\n\nSources:\n VEP\n RVS\n ExAC\n myvariant.info\n\"\"\"\n\nimport requests\nimport re\nimport json\nimport pymongo\n# myvariant.info\nimport myvariant\n# mygene.info\nimport mygene\n# RVS\nfrom varnorm.varcharkey import VarCharKey\n\n\nmv = myvariant.MyVariantInfo()\n\nmg = mygene.MyGeneInfo()\n\ndef ensembl_region(region,features='gene'):\n #'feature=gene;feature=transcript;feature=cds;feature=exon'\n r=requests.get('http://grch37.rest.ensembl.org/overlap/region/human/{}?feature={}'.format(region,features),headers={'Content-Type':'application/json'})\n return r.json()\n\ndef ensembl_xrefs(ID):\n r=requests.get('http://grch37.rest.ensembl.org/xrefs/id/{}'.format(ID),headers={'Content-Type':'application/json'})\n return r.json()\n\ndef mygene_anno(gene_symbol=None,gene_id=None, values='name,symbol,refseq.rna'):\n if gene_symbol:\n return mg.query('symbol:%s' % gene_symbol, species='human')\n elif gene_id:\n return mg.getgene(gene_id, values)\n\ndef vep_anno(chrom, pos, ref, alt):\n # doing vep annotation\n # costructing vep keys\n # and a v_id => vep_key dict\n server = \"http://grch37.rest.ensembl.org\"\n ext = \"/vep/homo_sapiens/region\"\n headers={ \"Content-Type\" : \"application/json\", \"Accept\" : \"application/json\"}\n print 'vep annotation'\n vep_keys_dict = {}\n vep_keys_list = []\n vep_key = '{} {} . {} {} . . .'.format(chrom, pos, ref, alt)\n #vep_keys_dict[variant] = vep_key\n #vep_keys_list.append(vep_key)\n # construct query\n query = '{ \"variants\" : [\"' + '\", \"'.join(vep_key) + '\" ] }'\n # do the query\n r = requests.post(server+ext, headers=headers, data=query)\n # check the query\n if not r.ok:\n r.raise_for_status()\n return\n # parse VEP\n # has potential more than one transcripts.\n # pick the first and most damaging one\n #first_transcript = g['VEP']['transcript_consequences'][0]\n #this['consequence'] = [g['VEP']['most_severe_consequence'], {'impact': first_transcript.get('impact', 'NA'), 'polyphen': first_transcript.get('polyphen_prediction','NA'), 'sift': first_transcript.get('sift_prediction','NA')}]\n #aa = first_transcript.get('amino_acids','')\n #if aa:\n #this['p_change'] = {'pvar': 'p.%s %s' % (first_transcript['protein_start'], aa), 'cvar': 'c.%s %s' % (first_transcript['cds_start'], first_transcript['codons']), 'gene_id': gene_id, 'transcript_id': first_transcript['transcript_id']}\n #else:\n # not a aa change\n #this['p_change'] = {'pvar': 'NA', 'gene_id': gene_id, 'transcript_id':first_transcript['transcript_id']}\n # has default, weak vep_annotations\n #vep = g['vep_annotations'][0]\n #this['consequence'] = [vep['Consequence'], {}]\n #aa = vep['Amino_acids']\n #this['p_change'] = {'pvar': 'p.%s %s' % (vep['Protein_position'], aa), 'cvar': 'c.%s %s' % (vep['CDS_position'], vep['Codons']), 'gene_id': gene_id, 'transcript_id': vep['Feature']}\n #if debug: print(r.json())\n return r.json()\n #else: return json.loads(r.json())\n\n\ndef rvs_anno(chrom, pos, ref, alt):\n # annotate variants with rvs, and add the emtpy records to bulk_vep\n ##################\n # construct vkeys for rvs\n all_vkeys_list = []\n # dict for v_id => rvs_id\n rvs_dict = {}\n # get the functional arguments of v2k function\n rvs_id=VarCharKey.v2k(chrom, int(pos), int(pos), alt)\n all_vkeys_list.append(rvs_id)\n print all_vkeys_list\n all_vkeys = ','.join(all_vkeys_list)\n ##################\n # request rvs\n rvs_vars = {}\n for mode in ['impact','prediction']:\n url='https://rvs.u.hpc.mssm.edu/rest/{}/vkey/{}'.format(mode,all_vkeys)\n print url\n r = requests.get(url, headers={ \"Content-Type\" : \"application/json\"})\n rvs_vars[mode]=r.json()\n # parse RVS record\n #rvs=rvs[0]\n #VAR['p_change']={'gene_id':rvs['gene_id'],'cvar':rvs['hgvs_c'],'pvar':rvs['hgvs_p'],'transcript_id':rvs['enst']}\n #VAR['consequence']=rvs['effect']\n #print 'NO RVS, USING CSV INFO'\n #ENSG00000164256:ENST00000296682:exon11:c.2497_2580del:p.833_860del\n #variant=dict()\n #variant['p_change'] = {'pvar': g['RVS']['impact']['hgvs_p'], 'cvar': g['RVS']['impact']['hgvs_c'], 'gene_id': gene_id, 'transcript_id': g['RVS']['impact']['enst']}\n #variant['consequence'] = [g['RVS']['impact']['effect'], {'impact': g['RVS']['impact']['impact']} ]\n #if g['RVS'].get('prediction',{}): temp = g['RVS']['prediction'] \n #for pred in ['Polyphen2_HDIV', 'SIFT', 'CADD', 'MutationTaster', 'ensemble_prediction', 'FATHMM', 'MutationAssessor', 'phastCons', 'GWAVA_region', 'Polyphen2_HVAR']: this['consequence'][1][pred] = temp[pred]\n return rvs_vars\n\n\ndef exac_anno(var,update=True):\n #if v and 'EXAC' in v and 'allele_freq' in v['EXAC']: return v['EXAC']\n attempt = 5\n while attempt:\n try:\n r=requests.get('http://exac.broadinstitute.org/variant/%s'%var)\n break\n except requests.ConnectionError:\n print 'query exac connectionError, retry'\n attempt -= 1\n time.sleep(1)\n if not r: raise 'Too many attempts to query exac, fail at exac_anno'\n #http://exac.hms.harvard.edu/rest/variant/1-1271580-C-T\n p=r.text\n VAR={}\n m=re.compile('This variant is not found in ExAC.').search(p)\n if m:\n print var, 'not in exac'\n return VAR\n else:\n print var, 'in exac'\n m=re.compile(\"window.variant\\s*=\\s*({.*})\\s*;\").search(p)\n if m: variant=json.loads(m.group(1))\n #VAR['variant']=variant\n VAR['genes']=variant['genes']\n VAR['vep_annotations']=variant['vep_annotations']\n VAR['pop_homs']=variant['pop_homs']\n VAR['pop_acs']=variant['pop_acs']\n VAR['pop_ans']=variant['pop_ans']\n VAR['pop_af']={}\n for k in VAR['pop_homs']:\n try:\n VAR['pop_af'][k]=float(VAR['pop_acs'][k])/float(VAR['pop_ans'][k])\n except:\n VAR['pop_af'][k]=None\n VAR['total_homs']=sum([int(VAR['pop_homs'][k]) for k in VAR['pop_homs']])\n VAR['total_acs']=sum([int(VAR['pop_acs'][k]) for k in VAR['pop_acs']])\n VAR['total_ans']=sum([int(VAR['pop_ans'][k]) for k in VAR['pop_ans']])\n if float(VAR['total_ans'])==0: \n VAR['allele_freq']=None\n else:\n VAR['allele_freq']=float(VAR['total_acs'])/float(VAR['total_ans'])\n m=re.compile(\"window.consequence\\s*=\\s*({.*})\\s*;\").search(p)\n if m: csq=json.loads(m.group(1))\n #VAR['consequence']=csq\n m=re.compile(\"window.metrics\\s*=\\s*({.*})\\s*;\").search(p)\n if m: metrics=json.loads(m.group(1))\n #VAR['metrics']=metrics\n return VAR\n\n\n", "id": "12164771", "language": "Python", "matching_score": 3.141563892364502, "max_stars_count": 24, "path": "rest/annotation.py" }, { "content": "import rest\nimport vcf\nimport json\nfrom operator import itemgetter\nimport pprint\nimport requests\n\n\n# Note that this is the current as of v77 with 2 included for backwards compatibility (VEP <= 75)\ncsq_order = [\"transcript_ablation\",\n\"splice_donor_variant\",\n\"splice_acceptor_variant\",\n\"stop_gained\",\n\"frameshift_variant\",\n\"stop_lost\",\n\"initiator_codon_variant\",\n\"transcript_amplification\",\n\"inframe_insertion\",\n\"inframe_deletion\",\n\"missense_variant\",\n\"splice_region_variant\",\n\"incomplete_terminal_codon_variant\",\n\"stop_retained_variant\",\n\"synonymous_variant\",\n\"coding_sequence_variant\",\n\"mature_miRNA_variant\",\n\"5_prime_UTR_variant\",\n\"3_prime_UTR_variant\",\n\"non_coding_transcript_exon_variant\",\n\"non_coding_exon_variant\", # deprecated\n\"intron_variant\",\n\"NMD_transcript_variant\",\n\"non_coding_transcript_variant\",\n\"nc_transcript_variant\", # deprecated\n\"upstream_gene_variant\",\n\"downstream_gene_variant\",\n\"TFBS_ablation\",\n\"TFBS_amplification\",\n\"TF_binding_site_variant\",\n\"regulatory_region_ablation\",\n\"regulatory_region_amplification\",\n\"regulatory_region_variant\",\n\"feature_elongation\",\n\"feature_truncation\",\n\"intergenic_variant\",\n\"start_lost\",\n'protein_altering_variant',\n\"\"]\ncsq_order_dict = dict(zip(csq_order, range(len(csq_order))))\nrev_csq_order_dict = dict(zip(range(len(csq_order)), csq_order))\n\ndef compare_two_consequences(csq1, csq2):\n if csq_order_dict[worst_csq_from_csq(csq1)] < csq_order_dict[worst_csq_from_csq(csq2)]:\n return -1\n elif csq_order_dict[worst_csq_from_csq(csq1)] == csq_order_dict[worst_csq_from_csq(csq2)]:\n return 0\n return 1\n\ndef get_protein_hgvs(csq):\n \"\"\"\n Takes consequence dictionary, returns proper variant formatting for synonymous variants\n \"\"\"\n if '%3D' in csq['HGVSp']:\n try:\n amino_acids = ''.join([protein_letters_1to3[x] for x in csq['Amino_acids']])\n return \"p.\" + amino_acids + csq['Protein_position'] + amino_acids\n except Exception, e:\n print 'Could not create HGVS for: %s' % csq\n return csq['HGVSp'].split(':')[-1]\n\n\ndef worst_csq_index(csq_list):\n \"\"\"\n Input list of consequences (e.g. ['frameshift_variant', 'missense_variant'])\n Return index of the worst annotation (In this case, index of 'frameshift_variant', so 4)\n Works well with csqs = 'non_coding_exon_variant&nc_transcript_variant' by worst_csq_index(csqs.split('&'))\n :param annnotation:\n :return most_severe_consequence_index:\n \"\"\"\n return min([csq_order_dict[ann] for ann in csq_list])\n\n\ndef worst_csq_from_list(csq_list):\n \"\"\"\n Input list of consequences (e.g. ['frameshift_variant', 'missense_variant'])\n Return the worst annotation (In this case, 'frameshift_variant')\n Works well with csqs = 'non_coding_exon_variant&nc_transcript_variant' by worst_csq_from_list(csqs.split('&'))\n :param annnotation:\n :return most_severe_consequence:\n \"\"\"\n return rev_csq_order_dict[worst_csq_index(csq_list)]\n\ndef worst_csq_from_csq(csq):\n \"\"\"\n Input possibly &-filled csq string (e.g. 'non_coding_exon_variant&nc_transcript_variant')\n Return the worst annotation (In this case, 'non_coding_exon_variant')\n :param consequence:\n :return most_severe_consequence:\n \"\"\"\n return rev_csq_order_dict[worst_csq_index(csq.split('&'))]\n\n\ndef order_vep_by_csq(annotation_list):\n print('ANNOTATION LIST',annotation_list)\n output = sorted(annotation_list, cmp=lambda x, y: compare_two_consequences(x, y), key=itemgetter('consequence_terms'))\n for ann in output:\n ann['major_consequence'] = worst_csq_from_csq(ann['consequence_terms'])\n return output\n\n\n\ndef compare_two_consequences(csq1, csq2):\n if csq_order_dict[worst_csq_from_csq(csq1)] < csq_order_dict[worst_csq_from_csq(csq2)]:\n return -1\n elif csq_order_dict[worst_csq_from_csq(csq1)] == csq_order_dict[worst_csq_from_csq(csq2)]:\n return 0\n return 1\n\ndef get_variants_by_rsid(db, rsid):\n if not rsid.startswith('rs'):\n return None\n try:\n int(rsid.lstrip('rs'))\n except Exception, e:\n return None\n variants = list([Variant(data=v) for v in db.variants.find({'rsid': rsid}, projection={'_id': False})])\n #add_consequence_to_variants(variants)\n return variants\n\n\nclass Variant(object):\n def __init__(self, variant_id=None, db=None,data=None):\n if variant_id is None: variant_id=data['variant_id']\n self.variant_id=str(variant_id).strip().replace('_','-')\n self.chrom, self.pos, self.ref, self.alt = variant_id.split('-')\n #q=vcf.vcf_query(variant_str=self.variant_id,)\n #if q is None: raise Exception('NOT IN VCF',self.variant_id)\n #self.__dict__.update(q)\n Variant.db=db\n data=Variant.db.variants.find_one({'variant_id':self.variant_id},projection={'_id':False})\n self.__dict__.update(data)\n def __getattribute__(self, key):\n \"Emulate type_getattro() in Objects/typeobject.c\"\n v = object.__getattribute__(self, key)\n if hasattr(v, '__get__'): return v.__get__(None, self)\n return v\n def save(self):\n #print('writing', self.variant_id, 'to database')\n #return Variant.db.variants.update({'variant_id':self.variant_id},self.__dict__,upsert=True)\n pass\n @property\n def kaviar(self):\n if 'kaviar' in self.__dict__: return self.__dict__['kaviar']\n @property\n def status(self):\n return 'M'\n @property\n def HPO(self):\n return []\n @property\n def FILTER(self):\n return self.filter\n @property\n def filter(self):\n self.__dict__['filter']=self.__dict__['FILTER']\n return self.__dict__['filter']\n @property\n def hom_samples(self):\n if 'hom_samples' in self.__dict__: return self.__dict__['hom_samples']\n q=vcf.vcf_query(variant_str=self.variant_id)\n self.__dict__.update(q)\n print(self.save())\n return self.__dict__['hom_samples']\n @property\n def het_samples(self):\n if 'het_samples' in self.__dict__: return self.__dict__['het_samples']\n q=vcf.vcf_query(variant_str=self.variant_id)\n self.__dict__.update(q)\n print(self.save())\n return self.__dict__['het_samples']\n def to_JSON(self):\n return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)\n def get_minimal_representation(self): \n \"\"\"\n Get the minimal representation of a variant, based on the ref + alt alleles in a VCF\n This is used to make sure that multiallelic variants in different datasets, \n with different combinations of alternate alleles, can always be matched directly. \n Note that chromosome is ignored here - in xbrowse, we'll probably be dealing with 1D coordinates \n Args: \n pos (int): genomic position in a chromosome (1-based)\n ref (str): ref allele string\n alt (str): alt allele string\n Returns: \n tuple: (pos, ref, alt) of remapped coordinate\n \"\"\"\n pos = int(self.pos)\n # If it's a simple SNV, don't remap anything\n if len(self.ref) == 1 and len(self.alt) == 1: return self.pos, self.ref, self.alt\n # strip off identical suffixes\n while(self.alt[-1] == self.ref[-1] and min(len(self.alt),len(self.ref)) > 1):\n alt = alt[:-1]\n ref = ref[:-1]\n # strip off identical prefixes and increment position\n while(self.alt[0] == self.ref[0] and min(len(self.alt),len(self.ref)) > 1):\n alt = self.alt[1:]\n self.ref = self.ref[1:]\n self.pos += 1\n return self.pos, self.ref, self.alt \n def add_consequence_to_variant(self):\n worst_csq = worst_csq_with_vep(variant['vep_annotations'])\n if worst_csq is None: return\n variant['major_consequence'] = worst_csq['major_consequence']\n variant['HGVSp'] = get_protein_hgvs(worst_csq)\n variant['HGVSc'] = get_transcript_hgvs(worst_csq)\n variant['HGVS'] = get_proper_hgvs(worst_csq)\n variant['CANONICAL'] = worst_csq['CANONICAL']\n variant['flags'] = get_flags_from_variant(variant)\n if csq_order_dict[variant['major_consequence']] <= csq_order_dict[\"frameshift_variant\"]:\n variant['category'] = 'lof_variant'\n elif csq_order_dict[variant['major_consequence']] <= csq_order_dict[\"missense_variant\"]:\n # Should be noted that this grabs inframe deletion, etc.\n variant['category'] = 'missense_variant'\n elif csq_order_dict[variant['major_consequence']] <= csq_order_dict[\"synonymous_variant\"]:\n variant['category'] = 'synonymous_variant'\n else:\n variant['category'] = 'other_variant'\n @property\n def data(self): return self.__dict__\n @property\n def consequence(self):\n \"\"\"\n Return the most severe consequence\n \"\"\"\n if 'consequence' in self.__dict__: return self.__dict__['consequence']\n if 'major_consequence' in self.__dict__: return self.__dict__['major_consequence']\n if 'most_severe_consequence' in self.__dict__: return self.__dict__['most_severe_consequence']\n url='http://grch37.rest.ensembl.org/vep/human/hgvs/%s?content-type=application/json' % self.hgvs.replace('chr','')\n r=requests.get(url)\n print(url)\n d=r.json()\n #if not isinstance(d,list) and len(d) < 1: return None\n if 'error' in d: return None\n d=d[0]\n print(d['most_severe_consequence'])\n self.__dict__['consequence']=d['most_severe_consequence']\n print(self.save())\n return self.__dict__['consequence']\n @property\n def transcripts(self):\n if 'transcript_consequences' in self.__dict__: return [x for x in self.transcript_consequences]\n if 'transcripts' in self.__dict__: return self.__dict__['transcripts']\n url='http://grch37.rest.ensembl.org/vep/human/hgvs/%s?content-type=application/json' % self.hgvs.replace('chr','')\n r=requests.get(url)\n print(url)\n d=r.json()\n if 'error' in d: return None\n d=d[0]\n self.__dict__['transcripts']=list(set([csq['transcript_id'] for csq in d.transcript_consequences]))\n self.__dict__['genes']=list(set([csq['gene_id'] for csq in d.transcript_consequences]))\n print(self.save())\n if not isinstance(d,list) and len(d) < 1: return None\n return self.__dict__['transcripts']\n @property\n def genes(self):\n if 'genes' in self.__dict__: return list(set(self.__dict__['genes']))\n url='http://grch37.rest.ensembl.org/vep/human/hgvs/%s?content-type=application/json' % self.hgvs.replace('chr','')\n r=requests.get(url)\n print(url)\n d=r.json()[0]\n self.__dict__['genes']=list(set([csq['gene_id'] for csq in d['transcript_consequences']]))\n print(self.save())\n return self.__dict__['genes']\n @property\n def canonical_hgvsp(self):\n if 'canonical_hgvsp' in self.__dict__:\n return self.__dict__['canonical_hgvsp']\n else:\n return []\n @property\n def canonical_hgvsc(self):\n if 'canonical_hgvsc' in self.__dict__:\n return self.__dict__['canonical_hgvsc']\n else:\n return []\n @property\n def p_hgvs(self):\n \"\"\"\n Takes consequence dictionary, returns proper variant formatting for synonymous variants\n \"\"\"\n if '%3D' in csq['HGVSp']:\n try:\n amino_acids = ''.join([protein_letters_1to3[x] for x in csq['Amino_acids']])\n return \"p.\" + amino_acids + csq['Protein_position'] + amino_acids\n except Exception, e:\n print 'Could not create HGVS for: %s' % csq\n return csq['HGVSp'].split(':')[-1]\n @property\n def snpeff(self):\n if 'snpeff' in self.__dict__: return self.__dict__['snpeff']\n self.__dict__['snpeff'] = rest.mv.getvariant('chr%s:g.%s%s>%s'%(self.chrom,self.pos,self.ref,self.alt,),fields='snpeff')\n return self.__dict__['snpeff']\n @property\n def canonical_cadd(self):\n if 'canonical_cadd' in self.__dict__: return self.__dict__['canonical_cadd']\n return ''\n @property\n def cadd(self):\n if 'canonical_cadd' in self.__dict__: return self.__dict__['canonical_cadd']\n if 'cadd' in self.__dict__: return self.__dict__['cadd'].get('phred',None)\n cadd = rest.mv.getvariant('chr%s:g.%s%s>%s'%(self.chrom,self.pos,self.ref,self.alt,),fields='cadd')\n if cadd and 'cadd' in cadd:\n self.__dict__['cadd']=cadd['cadd']\n else:\n self.__dict__['cadd']={}\n print(self.save())\n return self.__dict__['cadd'].get('phred',None)\n @property\n def vep_annotations(self):\n if 'vep_annotations' in self.__dict__: return self.__dict__['vep_annotations']\n if 'transcript_consequences' in self.__dict__: return self.__dict__['transcript_consequences']\n self.__dict__['vep_annotations']=rest.vep_anno(self.chrom, self.pos, self.ref, self.alt)\n print('number of transcripts:', len(self.__dict__['vep_annotations']))\n self.__dict__['transcript_consequences']=self.__dict__['vep_annotations'][0]['transcript_consequences']\n self.__dict__['gene_name_upper']=self.__dict__['transcript_consequences'][0]['gene_symbol']\n print('gene_symbol', self.__dict__['gene_name_upper'])\n #print(self.__dict__['vep_annotations'])\n #self.__dict__['vep_annotations'] = order_vep_by_csq(self.__dict__['vep_annotations']) \n #self.ordered_csqs = [x['major_consequence'] for x in self.__dict__['vep_annotations']]\n # Close but not quite there\n #ordered_csqs = reduce(lambda x, y: ','.join([x, y]) if y not in x else x, ordered_csqs, '').split(',')\n #consequences = defaultdict(lambda: defaultdict(list))\n #for annotation in self.data['vep_annotations']:\n #annotation['HGVS'] = get_proper_hgvs(annotation)\n #consequences[annotation['major_consequence']][annotation['Gene']].append(annotation)\n return self.__dict__['vep_annotations']\n @property\n def transcript_consequences(self):\n if 'transcript_consequences' in self.__dict__: return self.__dict__['transcript_consequences']\n #print(self.vep_annotations)\n return self.__dict__['transcript_consequences']\n @vep_annotations.setter\n def vep_annotations(self,value):\n self.__dict__['vep_annotations']=value\n @property\n def in_exac(self):\n if 'EXAC' in self.__dict__ and self.__dict__['EXAC'] and len(self.__dict__['EXAC'])>0:\n self.__dict__['in_exac']=True\n else:\n self.__dict__['in_exac']=False\n return self.__dict__['in_exac']\n @property\n def EXAC(self):\n if 'EXAC' in self.__dict__ and self.__dict__['EXAC']:\n if ',' in str(self.__dict__['EXAC']['AC_Hom']):\n self.multi=True\n self.__dict__['EXAC']['AC_Hom']=int(self.__dict__['EXAC']['AC_Hom'].strsplit(',')[0])\n print self.variant_id\n self.__dict__['EXAC']['total_homs']=float(self.__dict__['EXAC']['AC_Hom'])/2\n return self.__dict__['EXAC']\n else:\n return None\n if 'EXAC_freq' in self.__dict__: return self.__dict__['EXAC_freq']\n #self.__dict__['EXAC_freq']=rest.exac_anno(self.data['variant_id'],update=False)\n if len(self.__dict__['EXAC_freq'])>0:\n self.__dict__['in_exac']=True\n else:\n self.__dict__['in_exac']=False\n #print(self.save())\n return self.__dict__['EXAC_freq']\n @EXAC.setter\n def EXAC(self,value):\n self.__dict__['ExAC_freq']=value\n return self.__dict__['EXAC_freq']\n @property\n def ExAC_freq(self):\n if 'ExAC_freq' in self.__dict__ and 'total_homs' in self.__dict__['ExAC_freq']: return self.__dict__['ExAC_freq']\n self.__dict__['ExAC_freq']=rest.exac_anno(self.variant_id,update=False)\n print(self.__dict__['ExAC_freq'].keys())\n #print(self.save())\n return self.__dict__['ExAC_freq']\n @property\n def WT_COUNT(self):\n if 'WT_COUNT' in self.__dict__: return self.__dict__['WT_COUNT']\n q=vcf.vcf_query(variant_str=self.variant_id)\n if q is None: raise Exception('ERROR',self.variant_id)\n self.__dict__.update(q)\n print(self.save())\n return self.__dict__['WT_COUNT']\n @property\n def HOM_COUNT(self):\n if 'HOM_COUNT' in self.__dict__: return self.__dict__['HOM_COUNT']\n q=vcf.vcf_query(variant_str=self.variant_id)\n if q is None: raise Exception('ERROR',self.variant_id)\n self.__dict__.update(q)\n print(self.save())\n return self.__dict__['HOM_COUNT']\n @property\n def allele_num(self):\n if 'allele_num' in self.__dict__: return self.__dict__['allele_num']\n q=vcf.vcf_query(variant_str=self.variant_id)\n if q is None: raise Exception('ERROR',self.variant_id)\n self.__dict__.update(q)\n print(self.save())\n return self.__dict__['allele_num']\n def get_flags_from_variant(self):\n flags = []\n if 'mnps' in variant:\n flags.append('MNP')\n #lof_annotations = [x for x in variant['vep_annotations'] if x['LoF'] != '']\n lof_annotations = []\n if not len(lof_annotations): return flags\n if all([x['LoF'] == 'LC' for x in lof_annotations]):\n flags.append('LC LoF')\n if all([x['LoF_flags'] != '' for x in lof_annotations]):\n flags.append('LoF flag')\n return flags\n @property\n def HUGO(self):\n if 'gene_name_upper' in self.__dict__: return self.__dict__['gene_name_upper']\n if 'canonical_gene_name_upper' in self.__dict__: return self.__dict__['canonical_gene_name_upper'][0]\n else: print(self.variant_id)\n return ''\n #self.vep_annotations\n #print(self.save())\n return self.__dict__['gene_name_upper']\n @property\n def description(self):\n if 'description' in self.__dict__: return self.__dict__['description']\n g=Variant.db.genes.find_one({'gene_name_upper':self.HUGO})\n self.__dict__['description']=g.get('full_gene_name','')\n return self.__dict__['description']\n @property\n def OMIM(self):\n if 'OMIM' in self.__dict__: return self.__dict__['OMIM']\n #self.__dict__['OMIM']=self.vep_annotations[0]['SYMBOL']\n #print(self.save())\n #return self.__dict__['OMIM']\n return ''\n @property\n def p_change(self):\n if 'p_change' in self.__dict__: return self.__dict__['p_change']\n if 'HGVSp' in self.__dict__: return self.__dict__['HGVSp']\n #if 'canonical_hgvsp' in self__dict__: return self.__dict__['canonical_hgvsp']\n self.__dict__['p_change']=dict()\n #self.__dict__['p_change']=\n #trans['hgvsp'].split(':')[1]\n self.__dict__['p_change']['exon']=''\n self.__dict__['p_change']['gene_id']=self.genes[0]\n self.__dict__['p_change']['transcript_id']=self.canonical_transcript[0]\n self.__dict__['p_change']['hgvs_c']=self.canonical_hgvsc[0]\n self.__dict__['p_change']['hgvs_p']=self.canonical_hgvsp[0]\n return self.__dict__['p_change']\n # get db\n def stuff():\n if 'consequence' in self.__dict__ and len(self.__dict__['consequence']): return self.__dict__['consequence']\n pp = pprint.PrettyPrinter(indent=10)\n v['Consequence']=[transcript['consequence_terms'][0] for transcript in v['vep_annotations']['transcript_consequences']]\n v['vep_annotations']['Consequence']=[csq for csq in v['Consequence']]\n print ('CSQ')\n print( v['vep_annotations']['Consequence'] )\n worst_csq = worst_csq_with_vep(variant['vep_annotations'])\n if worst_csq is None: return\n variant['major_consequence'] = worst_csq['major_consequence']\n variant['HGVSp'] = get_protein_hgvs(worst_csq)\n variant['HGVSc'] = get_transcript_hgvs(worst_csq)\n variant['HGVS'] = get_proper_hgvs(worst_csq)\n variant['CANONICAL'] = worst_csq['CANONICAL']\n variant['flags'] = get_flags_from_variant(variant)\n if csq_order_dict[variant['major_consequence']] <= csq_order_dict[\"frameshift_variant\"]:\n variant['category'] = 'lof_variant'\n elif csq_order_dict[variant['major_consequence']] <= csq_order_dict[\"missense_variant\"]:\n # Should be noted that this grabs inframe deletion, etc.\n variant['category'] = 'missense_variant'\n elif csq_order_dict[variant['major_consequence']] <= csq_order_dict[\"synonymous_variant\"]:\n variant['category'] = 'synonymous_variant'\n else:\n variant['category'] = 'other_variant'\n def worst_csq_with_vep(self, annotation_list):\n \"\"\"\n Takes list of VEP annotations [{'Consequence': 'frameshift', Feature: 'ENST'}, ...]\n Returns most severe annotation (as full VEP annotation [{'Consequence': 'frameshift', Feature: 'ENST'}])\n Also tacks on worst consequence for that annotation (i.e. worst_csq_from_csq)\n :param annotation_list:\n :return worst_annotation:\n \"\"\"\n if len(annotation_list) == 0: return None\n worst = annotation_list[0]\n for annotation in annotation_list:\n if compare_two_consequences(annotation['Consequence'], worst['Consequence']) < 0:\n worst = annotation\n elif compare_two_consequences(annotation['Consequence'], worst['Consequence']) == 0 and annotation['CANONICAL'] == 'YES':\n worst = annotation\n worst['major_consequence'] = worst_csq_from_csq(worst['Consequence'])\n return worst\n \n\n\n\n", "id": "9833117", "language": "Python", "matching_score": 3.9702610969543457, "max_stars_count": 24, "path": "orm/variant.py" }, { "content": "from operator import itemgetter\nimport lookups\n\nAF_BUCKETS = [0.0001, 0.0002, 0.0005, 0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1]\nMETRICS = [\n 'BaseQRankSum',\n 'ClippingRankSum',\n 'DP',\n 'FS',\n 'InbreedingCoeff',\n 'MQ',\n 'MQRankSum',\n 'QD',\n 'ReadPosRankSum',\n 'VQSLOD'\n]\n\n\ndef add_transcript_coordinate_to_variants(db, variant_list, transcript_id):\n \"\"\"\n Each variant has a 'xpos' and 'pos' positional attributes.\n This method takes a list of variants and adds a third position: the \"transcript coordinates\".\n This is defined as the distance from the start of the transcript, in coding bases.\n So a variant in the 7th base of the 6th exon of a transcript will have a transcript coordinate of\n the sum of the size of the first 5 exons) + 7\n This is 0-based, so a variant in the first base of the first exon has a transcript coordinate of 0.\n\n You may want to add transcript coordinates for multiple transcripts, so this is stored in a variant as\n variant['transcript_coordinates'][transcript_id]\n\n If a variant in variant_list does not have a `transcript_coordinates` dictionary, we create one\n\n If a variant start position for some reason does not fall in any exons in this transcript, its coordinate is 0.\n This is perhaps logically inconsistent,\n but it allows you to spot errors quickly if there's a pileup at the first base.\n `None` would just break things.\n\n Consider the behavior if a 20 base deletion deletes parts of two exons.\n I think the behavior in this method is consistent, but beware that it might break things downstream.\n\n Edits variant_list in place; no return val\n \"\"\"\n # make sure exons is sorted by (start, end)\n exons = sorted(lookups.get_exons_in_transcript(db, transcript_id), key=itemgetter('start', 'stop'))\n # offset from start of base for exon in ith position (so first item in this list is always 0)\n exon_offsets = [0 for i in range(len(exons))]\n for i, exon in enumerate(exons):\n for j in range(i+1, len(exons)):\n exon_offsets[j] += exon['stop'] - exon['start']\n\n for variant in variant_list:\n if 'transcript_coordinates' not in variant:\n variant['transcript_coordinates'] = {}\n variant['transcript_coordinates'][transcript_id] = 0\n for i, exon in enumerate(exons):\n if exon['start'] <= variant['pos'] <= exon['stop']:\n variant['transcript_coordinates'][transcript_id] = exon_offsets[i] + variant['pos'] - exon['start']\n\n\n\n\nprotein_letters_1to3 = {\n 'A': 'Ala', 'C': 'Cys', 'D': 'Asp', 'E': 'Glu',\n 'F': 'Phe', 'G': 'Gly', 'H': 'His', 'I': 'Ile',\n 'K': 'Lys', 'L': 'Leu', 'M': 'Met', 'N': 'Asn',\n 'P': 'Pro', 'Q': 'Gln', 'R': 'Arg', 'S': 'Ser',\n 'T': 'Thr', 'V': 'Val', 'W': 'Trp', 'Y': 'Tyr',\n 'X': 'Ter', '*': 'Ter'\n}\n\n\ndef get_proper_hgvs(csq):\n # Needs major_consequence\n if csq['major_consequence'] in ('splice_donor_variant', 'splice_acceptor_variant', 'splice_region_variant'):\n return get_transcript_hgvs(csq)\n else:\n return get_protein_hgvs(csq)\n\n\ndef get_transcript_hgvs(csq):\n return csq['HGVSc'].split(':')[-1]\n\nCHROMOSOMES = ['chr%s' % x for x in range(1, 23)]\nCHROMOSOMES.extend(['chrX', 'chrY', 'chrM'])\nCHROMOSOME_TO_CODE = { item: i+1 for i, item in enumerate(CHROMOSOMES) }\n\ndef get_single_location(chrom, pos):\n \"\"\"\n Gets a single location from chromosome and position\n chr must be actual chromosme code (chrY) and pos must be integer\n\n Borrowed from xbrowse\n \"\"\"\n return CHROMOSOME_TO_CODE[chrom] * int(1e9) + pos\n\n\ndef get_xpos(chrom, pos):\n \"\"\"\n Borrowed from xbrowse\n \"\"\"\n if not chrom.startswith('chr'):\n chrom = 'chr{}'.format(chrom)\n return get_single_location(chrom, int(pos))\n\n\n\n", "id": "11410644", "language": "Python", "matching_score": 0.6979855895042419, "max_stars_count": 24, "path": "utils.py" }, { "content": "from views import *\nfrom lookups import *\nimport requests\nimport re\nfrom utils import *\nimport itertools\nfrom config import config\nif config.IMPORT_PYSAM_PRIMER3:\n import pysam\nimport csv\n#hpo lookup\nimport orm\nimport subprocess\n\[email protected]('/register',methods=['POST'])\ndef register():\n name=request.form.get('name').replace(' ','')\n affiliation=request.form.get('affiliation')\n email=request.form.get('email')\n groups=request.form.getlist('group[]')\n user=orm.User(user_db=get_db(app.config['DB_NAME_USERS']),user=name,groups=groups,email=email,affiliation=affiliation)\n print(user.json())\n print(user.status)\n return jsonify(message=user.status['message']), user.status['http_code']\n\n\[email protected]('/', methods=['GET'])\ndef homepage():\n cache_key = 't-home'\n db=get_db()\n patients_db=get_db(app.config['DB_NAME_PATIENTS']) \n total_variants=db.variants.count()\n total_variants=db.variants.count()\n print('total_variants',total_variants,)\n total_patients=patients_db.patients.count()\n print('total_patients',total_patients,)\n male_patients=patients_db.patients.find( {'sex':'M'}).count()\n print('male_patients',male_patients,)\n female_patients=patients_db.patients.find( {'sex':'F'}).count()\n print('female_patients',female_patients,)\n unknown_patients=patients_db.patients.find( {'sex':'U'}).count()\n if config.LOCAL:\n hpo_json={}\n else:\n hpo_file='uclex_stats/overall_hpo_2016_Aug_2.json'\n hpo_json = json.load(open(hpo_file,'r'))\n exac_variants=0\n print('exac_variants',exac_variants,)\n pass_variants=db.variants.find({'FILTER':'PASS'}).count()\n print('pass_variants',pass_variants,)\n #pass_exac_variants=db.variants.find({'in_exac':True,'filter':'PASS'}).count()\n #pass_exac_variants=db.variants.find({'in_exac':True,'filter':'PASS'}).count()\n pass_exac_variants=0\n print('pass_exac_variants',pass_exac_variants,)\n #pass_exac_variants=db.variants.find({'in_exac':True,'filter':'PASS'}).count()\n pass_exac_variants=0\n #nonexac_variants=db.variants.find({'in_exac':False}).count()\n nonexac_variants=0\n #pass_nonexac_variants=db.variants.find({'in_exac':False,'filter':'PASS'}).count()\n pass_nonexac_variants=0\n nonpass_variants=(total_variants-pass_variants)\n nonpass_nonexac_variants=nonexac_variants-pass_nonexac_variants\n try:\n version_number = subprocess.check_output(['git', 'describe', '--exact-match'])\n except:\n version_number = None\n print('Version number is:-')\n print(version_number)\n labnames= ['black','brogan','elliott','gosgene','hardcastle','humphries','kelsell',\n 'lachmann','marks','mead','moosajee','nejentsev','rahman','segal',\n 'sisodiya','arvc','syrris','ukirdc','vulliamy','webster']\n username = ''\n if session and 'user' in session:\n username = session['user']\n return render_template('home.html', title='Phenopolis - Home Page',\n total_patients=total_patients,\n male_patients=male_patients,\n female_patients=female_patients,\n unknown_patients=unknown_patients,\n hpo_json=json.dumps(hpo_json),\n total_variants=total_variants,\n exac_variants=exac_variants,\n pass_variants=pass_variants,\n nonpass_variants=nonpass_variants,\n pass_exac_variants=pass_exac_variants,\n pass_nonexac_variants=pass_nonexac_variants,\n version_number=version_number,\n labnames=labnames,\n username=username)\n\n", "id": "550331", "language": "Python", "matching_score": 5.59880256652832, "max_stars_count": 24, "path": "views/home.py" }, { "content": "from views import *\nfrom lookups import *\nimport requests\nimport re\nfrom utils import *\nimport itertools\nfrom config import config\nif config.IMPORT_PYSAM_PRIMER3:\n import pysam\nimport csv\n#hpo lookup\nimport orm\nimport subprocess\n\n\n\[email protected]('/search', methods=['GET','POST'])\n@requires_auth\ndef search():\n cache_key = 't-homepage'\n #t = cache.get(cache_key)\n #if t: return t\n db=get_db()\n patients_db=get_db(app.config['DB_NAME_PATIENTS']) \n total_variants=db.variants.count()\n print('total_variants',total_variants,)\n total_patients=patients_db.patients.count()\n print('total_patients',total_patients,)\n male_patients=patients_db.patients.find( {'sex':'M'}).count()\n print('male_patients',male_patients,)\n female_patients=patients_db.patients.find( {'sex':'F'}).count()\n print('female_patients',female_patients,)\n unknown_patients=patients_db.patients.find( {'sex':'U'}).count()\n if config.LOCAL:\n hpo_json={}\n else:\n hpo_file='uclex_stats/overall_hpo_2016_Aug_2.json'\n hpo_json = json.load(open(hpo_file,'r'))\n exac_variants=0\n print('exac_variants',exac_variants,)\n pass_variants=db.variants.find({'FILTER':'PASS'}).count()\n print('pass_variants',pass_variants,)\n #pass_exac_variants=db.variants.find({'in_exac':True,'filter':'PASS'}).count()\n #pass_exac_variants=db.variants.find({'in_exac':True,'filter':'PASS'}).count()\n pass_exac_variants=0\n print('pass_exac_variants',pass_exac_variants,)\n #pass_exac_variants=db.variants.find({'in_exac':True,'filter':'PASS'}).count()\n pass_exac_variants=0\n #nonexac_variants=db.variants.find({'in_exac':False}).count()\n nonexac_variants=0\n #pass_nonexac_variants=db.variants.find({'in_exac':False,'filter':'PASS'}).count()\n pass_nonexac_variants=0\n nonpass_variants=(total_variants-pass_variants)\n nonpass_nonexac_variants=nonexac_variants-pass_nonexac_variants\n #labels = 'PASS', 'non-PASS',\n #sizes =[100*pass_variants/float(total_variants),100*(nonpass_variants)/float(total_variants)]\n #print(sizes)\n #colors = ['yellowgreen', 'red']\n #explode = (0.1, 0)\n #plt.figure(figsize=(5,5))\n #plt.margins(1, 1)\n #plt.pie(sizes, explode=explode, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True, startangle=90)\n ## Set aspect ratio to be equal so that pie is drawn as a circle.\n #plt.axis('equal')\n #plt.axis('off')\n #plt.show()\n # word cloud\n #from os import path\n #from wordcloud import WordCloud\n #text = 'HPO HPO HPO HPO all day'\n ## Read the whole text.\n ## take relative word frequencies into account, lower max_font_size\n #wordcloud = WordCloud().generate(text)\n #plt.figure()\n #plt.imshow(wordcloud)\n #plt.axis(\"off\")\n #plt.show()\n #imgdata = StringIO.StringIO()\n #plt.savefig(imgdata, format='svg')\n #imgdata.seek(0) # rewind the data\n #import urllib\n #image=urllib.quote(base64.b64encode(imgdata.buf))\n #image=imgdata.buf\n #image = '<svg' + image.split('<svg')[1]\n\n try:\n version_number = subprocess.check_output(['git', 'describe', '--exact-match'])\n except:\n version_number = None\n print('Version number is:-')\n print(version_number)\n\n t = render_template('search.html',\n title='home',\n total_patients=total_patients,\n male_patients=male_patients,\n female_patients=female_patients,\n unknown_patients=unknown_patients,\n hpo_json=json.dumps(hpo_json),\n total_variants=total_variants,\n exac_variants=exac_variants,\n pass_variants=pass_variants,\n nonpass_variants=nonpass_variants,\n pass_exac_variants=pass_exac_variants,\n pass_nonexac_variants=pass_nonexac_variants,\n #image=image.decode('utf8'))\n image=\"\",\n version_number=version_number)\n #cache.set(cache_key, t)\n return t\n\n\n", "id": "10053721", "language": "Python", "matching_score": 1.9482859373092651, "max_stars_count": 24, "path": "views/search.py" }, { "content": "import json\nimport pymongo\nfrom pymongo import MongoClient\nfrom flask import Flask, current_app\nimport views\nfrom config import config\n\n# Load the test data set.\ndef load_data():\n app = Flask(__name__)\n with app.app_context():\n indexes = ['id', 'name']\n import_data('test_hpo', 'hpo', \"./tests/data/hpo-hpo.json\", indexes)\n indexes = ['gene_id', 'gene_name_upper', 'gene_name', 'other_names', 'xstart', 'xstop']\n import_data('test_uclex', 'genes', \"./tests/data/uclex-genes.json\", indexes)\n indexes = ['gene', 'mode', 'p']\n import_data('test_uclex', 'simreg', \"./tests/data/uclex-simreg-TTLL5.json\", indexes)\n # 'EXAC' should also be made an index but it throws an error - key too long.\n indexes = ['variant_id', 'CHROM', 'canonical_cadd', 'FILTER', 'canonical_transcript', 'hom_samples', 'het_samples', 'canonical_gene_name_upper']\n #indexes = ['variant_id', 'CHROM', 'canonical_cadd', 'EXAC', 'FILTER', 'canonical_transcript', 'hom_samples', 'het_samples', 'canonical_gene_name_upper']\n import_data('test_uclex', 'variants', \"./tests/data/uclex-variant-TTLL5.json\", indexes)\n indexes = ['gene_id']\n import_data('test_uclex', 'gene_hpo', \"./tests/data/uclex-gene_hpo-TTLL5.json\", indexes)\n indexes = ['gene_id', 'gene_name']\n import_data('test_hpo', 'gene_hpo', \"./tests/data/hpo-gene_hpo-TTLL5.json\", indexes)\n indexes = ['gene', 'hpo']\n import_data('test_hpo', 'genes_pheno', \"./tests/data/hpo-genes_pheno-TTLL5.json\", indexes)\n indexes = ['external_id', 'report_id', 'features.id', 'sex', 'genes.gene', 'solved', 'clinicalStatus.clinicalStatus', 'specificity.score']\n import_data('test_patients', 'patients', \"./tests/data/patients-patients-hidden.json\", indexes)\n import_data('test_users', 'users', \"./tests/data/users.json\")\n\n# Load the test data set needed for test_login.\ndef load_user_data():\n app = Flask(__name__)\n with app.app_context():\n import_data('test_users', 'users', \"./tests/data/users.json\")\n\n# Create a collection in the db, drop any existing data, add data from file.\n# Create indexes in to the data.\ndef import_data(db_name, collection_name, file_location, indexes=None):\n db = views.get_db(db_name)\n collection = db.get_collection(collection_name)\n collection.drop()\n with open(file_location, 'r') as json_data:\n for line in json_data:\n dataset = json.loads(line.replace(\"$oid\", \"ObjectId\").replace(\"$numberLong\", \"NumberLong\"))\n collection.insert(dataset,check_keys=False)\n if indexes:\n for item in indexes:\n collection.create_index(item)\n \n", "id": "10775233", "language": "Python", "matching_score": 2.8305881023406982, "max_stars_count": 24, "path": "tests/load_data.py" }, { "content": "import sys\nimport pymongo\nimport json\n\n'''\nBuild an HPO to patient id cache to facilitate rapid lookup of individuals by HPO term.\n'''\n\n\nconn = pymongo.MongoClient(host='phenotips', port=27017)\ndb = conn['uclex']\npatient_db=conn['patients']\n\ndb.solved_patients.drop()\n\ndb.solved_patients.create_index('external_id',unique=True)\ndb.solved_patients.create_index('genes')\n\nfor p in patient_db.patients.find():\n if 'external_id' not in p: continue\n #print p['external_id'], p['solved'], p.get('genes',[])\n if 'genes' not in p: continue\n solved_genes=dict()\n for g in p['genes']:\n het=[]\n hom=[]\n for var in db.variants.find({'canonical_gene_name_upper':g['gene']}):\n if p['external_id'] in var['het_samples'] and var['HET_COUNT']<20:\n het+=[var['variant_id']]\n print(p['external_id'], g['gene'], 'het',var['variant_id'],var['most_severe_consequence'],var['HET_COUNT'],var['canonical_cadd'],var.get('kaviar',''))\n if p['external_id'] in var['hom_samples'] and var['HET_COUNT']<20:\n hom+=[var['variant_id']]\n print(p['external_id'], g['gene'],'hom',var['variant_id'],var['most_severe_consequence'],var['HET_COUNT'],var['canonical_cadd'],var.get('kaviar',''))\n solved_genes[g['gene']]={'het':het, 'hom':hom}\n print(db.solved_patients.insert({'external_id':p['external_id'], 'genes':solved_genes}))\n\n", "id": "4732491", "language": "Python", "matching_score": 2.249823570251465, "max_stars_count": 24, "path": "mongodb/solved_patients.py" }, { "content": "from views import *\nfrom lookups import *\nimport requests\nimport re\nfrom utils import *\nimport itertools\nfrom config import config\nif config.IMPORT_PYSAM_PRIMER3:\n import pysam\nimport csv\n#hpo lookup\nimport orm\nfrom pprint import pprint\nimport os\nimport json\nimport pymongo\nimport sys\nimport re\nimport itertools\nfrom urllib2 import HTTPError, URLError\nimport csv\nfrom collections import defaultdict, Counter\n#import rest as annotation\nfrom optparse import OptionParser\nimport mygene\nimport lookups\nfrom orm import Patient\nimport requests\n\nfrom neo4j.v1 import GraphDatabase, basic_auth\n\ndef individuals_update(external_ids):\n patients_db=get_db(app.config['DB_NAME_PATIENTS'])\n users_db=get_db(app.config['DB_NAME_USERS'])\n def f(eid):\n p=patients_db.patients.find_one({'external_id':eid},{'_id':False})\n print p['external_id']\n p['features']=[f for f in p.get('features',[]) if f['observed']=='yes']\n if 'solved' in p:\n if 'gene' in p['solved']:\n p['solved']=[p['solved']['gene']]\n else:\n p['solved']=[]\n else: p['solved']=[]\n if 'genes' in p: p['genes']=[x['gene'] for x in p['genes'] if 'gene' in x]\n else: p['genes']=[]\n p['genes']=list(frozenset(p['genes']+p['solved']))\n p2=get_db().patients.find_one({'external_id':p['external_id']},{'rare_homozygous_variants_count':1,'rare_compound_hets_count':1, 'rare_variants_count':1,'total_variant_count':1})\n if not p2: return p\n p['rare_homozygous_variants_count']=p2.get('rare_homozygous_variants_count','')\n p['rare_compound_hets_count']=p2.get('rare_compound_hets_count','')\n p['rare_variants_count']=p2.get('rare_variants_count','')\n p['total_variant_count']=p2.get('total_variant_count','')\n #p['all_variants_count']=get_db().patients.find_one({'external_id':p['external_id']},{'_id':0,'all_variants_count':1})['all_variants_count']\n #db.cache.find_one({\"key\" : \"%s_blindness,macula,macular,retina,retinal,retinitis,stargardt_\" % })\n if '_id' in p: del p['_id']\n return p\n new_individuals=[f(eid) for eid in external_ids]\n old_individuals=users_db.users.find_one({'user':session['user']}).get('individuals',[])\n old_individuals=[ind for ind in old_individuals if ind['external_id'] not in external_ids]\n individuals=new_individuals+old_individuals\n users_db.users.update_one({'user':session['user']},{'$set':{'individuals':individuals}})\n return individuals\n\n\[email protected]('/update_patient_data/<individual>',methods=['POST'])\n@requires_auth\ndef update_patient_data(individual):\n if session['user']=='demo': return 'not permitted'\n print(request.form)\n consanguinity=request.form.getlist('consanguinity_edit[]')[0]\n gender=request.form.getlist('gender_edit[]')[0]\n genes=request.form.getlist('genes[]')\n features=request.form.getlist('feature[]')\n print('INDIVIDUAL',individual)\n print('GENDER',gender)\n print('CONSANGUINITY',consanguinity)\n print('GENES',genes)\n print('FEATURES',features)\n print(individual)\n external_id=individual\n individual=get_db(app.config['DB_NAME_PATIENTS']).patients.find_one({'external_id':external_id})\n print('edit patient gender')\n print(get_db(app.config['DB_NAME_PATIENTS']).patients.update_one({'external_id':external_id},{'$set':{'sex':{'female':'F','male':'M','unknown':'U'}[gender]}}))\n print('edit patient genes')\n individual['genes']=[]\n for g in genes:\n gene=get_db(app.config['DB_NAME']).genes.find_one({'gene_name_upper':g})\n print(gene)\n if gene in [g['gene'] for g in individual['genes']]: continue\n if not gene: continue\n individual['genes'].append({'gene':g, 'status':'candidate'})\n print(individual['genes'])\n print(get_db(app.config['DB_NAME_PATIENTS']).patients.update_one({'external_id':external_id},{'$set':{'genes':individual['genes']}}))\n print('edit patient features')\n individual['features']=[]\n for f in features:\n hpo=get_db(app.config['DB_NAME_HPO']).hpo.find_one({'name':re.compile('^'+f+'$',re.IGNORECASE)})\n if not hpo: continue\n if hpo in [h['label'] for h in individual['features']]: continue\n individual['features'].append({'id':hpo['id'][0], 'label':hpo['name'][0], 'observed':'yes'})\n print(get_db(app.config['DB_NAME_PATIENTS']).patients.update_one({'external_id':external_id},{'$set':{'features':individual['features']}}))\n print(get_db(app.config['DB_NAME_PATIENTS']).patients.update_one({'external_id':external_id},{'$set':{'observed_features':[f for f in individual['features'] if f['observed']=='yes']}}))\n print('edit patient consanguinity')\n individual['family_history']=individual.get('family_history',{})\n if (consanguinity)=='unknown':\n individual['family_history']['consanguinity']=None\n elif consanguinity.lower()=='yes':\n individual['family_history']['consanguinity']=True\n elif consanguinity.lower()=='no':\n individual['family_history']['consanguinity']=False\n print(get_db(app.config['DB_NAME_PATIENTS']).patients.update_one({'external_id':external_id},{'$set':{'family_history':individual['family_history']}}))\n # also trigger refresh of that individual for individuals summary page\n patient=Patient(external_id,get_db(app.config['DB_NAME_PATIENTS']))\n print(patient.consanguinity)\n print(patient.observed_features)\n print(patient.genes)\n print(patient.gender)\n individuals_update([external_id])\n return jsonify({'success': True}), 200\n\n\[email protected]('/individual_json/<individual>')\n@requires_auth\ndef individual_json(individual):\n patient=Patient(individual,patient_db=get_db(app.config['DB_NAME_PATIENTS']))\n #PP.addPatientGenderInfo(data.result.sex); \n #PP.addPatientFeaturesInfo(data.result.observed_features);\n #PP.addPatientConsanguinityInfo(data.result.family_history);\n #PP.addPatientGenesInfo(data.result.genes);\n #PP.submitEditedIndividual(patientId);\n return patient.json()\n\n\[email protected]('/individual/<individual>')\n@requires_auth\n<EMAIL>(timeout=24*3600)\ndef individual_page(individual):\n patient=Patient(individual,patient_db=get_db(app.config['DB_NAME_PATIENTS']),variant_db=get_db(app.config['DB_NAME']),hpo_db=get_db(app.config['DB_NAME_HPO']))\n #if session['user']=='demo': individual=decrypt(str(individual))\n # make sure that individual is accessible by user\n if not lookup_patient(db=get_db(app.config['DB_NAME_USERS']),user=session['user'],external_id=individual): return 'Sorry you are not permitted to see this patient, please get in touch with us to access this information.'\n db=get_db()\n hpo_db=get_db(app.config['DB_NAME_HPO'])\n # TODO\n # mode of inheritance in hpo terms: HP:0000005\n #print lookups.get_hpo_children(hpo_db, 'HP:0000005')\n #patient['global_mode_of_inheritance']=patient2.get('global_mode_of_inheritance',None)\n # minimise it\n patient.__dict__['hpo_ids']=lookups.hpo_minimum_set(get_db(app.config['DB_NAME_HPO']), patient.hpo_ids)\n hpo_gene=get_hpo_gene(patient.hpo_ids)\n # get pubmedbatch scores\n pubmedbatch = {}\n genes = {}\n # is this still updating?\n if type(pubmedbatch) is dict:\n update_status = pubmedbatch.get('status', 0)\n else:\n update_status=0\n # get known and retnet genes\n known_genes=[x['gene_name'] for x in db.retnet.find()]\n RETNET = dict([(i['gene_name'],i) for i in db.retnet.find({},projection={'_id':False})])\n print 'get pubmed score and RETNET'\n gene_info=dict()\n individuals=dict()\n #\n genes=[]\n #genes['homozygous_variants']=[v['canonical_gene_name_upper'] for v in patient.homozygous_variants]\n #genes['compound_hets']=[v['canonical_gene_name_upper'] for v in patient.compound_het_variants]\n #genes['rare_variants']=[v['canonical_gene_name_upper'] for v in patient.rare_variants]\n # print(g, genes_pubmed[g])\n # figure out the order of columns from the variant row\n table_headers=re.findall(\"<td class='?\\\"?(.*)-cell'?\\\"?.*>\",file('templates/individual-page-tabs/individual_variant_row.tmpl','r').read())\n if session['user']=='demo': table_headers=table_headers[:-1]\n print table_headers\n # get a list of genes related to retinal dystrophy. only relevant to subset group of ppl. talk to Jing or Niko for other cohorts. Note that dominant p value only counts paitents with 1 qualified variant on the gene. \n # current setting: unrelated, exac_af 0.01 for recessive, 0.001 for dominant, cadd_phred 15\n print 'get phenogenon genes'\n retinal_genes = {}\n return render_template('individual.html', \n patient=patient,\n table_headers=table_headers,\n pubmedbatch=pubmedbatch,\n pubmed_db=get_db('pubmed_cache'),\n genes = genes,\n individuals=individuals,\n hpo_gene = hpo_gene,\n gene_info={},\n update_status = 0,\n retinal_genes = {},\n feature_venn = [])\n\n\ndef get_feature_venn(patient):\n s=\"\"\"\n MATCH (p:Person)-[:PersonToObservedTerm]->(t:Term)--(g:Gene)\n WHERE p.personId='%s'\n RETURN t.termId, t.name, g.gene_id, g.gene_name\n \"\"\" % patient\n print(s)\n with neo4j_driver.session() as neo4j_session:\n result=neo4j_session.run(s)\n\n data = []\n for r in result:\n data.append({\n 'hpo_id': r['t.termId'],\n 'hpo_term': r['t.name'],\n 'gene_id': r['g.gene_id'],\n 'gene_name': r['g.gene_name']\n })\n\n hpo_terms=[(k,v,) for k, v, in dict([(x['hpo_id'],x['hpo_term'],) for x in data]).items()]\n hpo_gene=dict()\n for x in data:\n hpo_gene[x['hpo_id']]=hpo_gene.get(x['hpo_id'],[])+[x['gene_name']]\n\n genes = {}\n feature_combo = []\n feature_venn = []\n print \"get combinatorics of features to draw venn diagram\"\n for i in range(len(hpo_terms[:5])):\n feature_combo.extend(itertools.combinations(range(len(hpo_terms)), i+1))\n print 'calculate Venn diagram'\n for combo in feature_combo:\n # construct features_venn key\n #venn_ind += 1\n dic_key = [hpo_terms[i][1] for i in combo]\n for ind in range(len(combo)):\n if ind == 0:\n x=hpo_terms[combo[ind]][0]\n feature_venn.append({'key': dic_key, 'value':list(frozenset(hpo_gene.get(x,\"\")))})\n else:\n tem = feature_venn[-1]['value']\n feature_venn[-1]['value'] = list(frozenset(feature_venn[-1]['value']) & frozenset(hpo_gene[hpo_terms[combo[ind]][0]]))\n return feature_venn\n\n\n\[email protected]('/venn_json/<individual>')\n@requires_auth\ndef venn_json(individual):\n feature_venn=get_feature_venn(individual)\n return jsonify(result=feature_venn)\n\n\ndef patient_variants():\n # add known gene and retnet gene labels, and re-calculate pubmed_score\n for mm in ['rare_variants','homozygous_variants','compound_het_variants']:\n for v in patient.__dict__[mm]:\n if 'canonical_gene_name_upper' not in v: v['canonical_gene_name_upper']=v['Gene']\n gene=v['canonical_gene_name_upper']\n pubmed_key = '_'.join([gene,patient.get('pubmed_key','')])\n gene_info[gene]=dict()\n if gene in known_genes: \n gene_info[gene]['known']=True\n pubmedbatch[pubmed_key] = max(1,pubmedbatch.get('pubmed_key',0))\n if gene not in RETNET: continue\n gene_info[gene]['disease'] = RETNET[gene]['disease']\n gene_info[gene]['omim'] = RETNET[gene]['omim']\n gene_info[gene]['mode'] = RETNET[gene]['mode']\n pubmedbatch[pubmed_key] = max(1,pubmedbatch.get('pubmed_key',0))\n if mm != 'rare_variants' or ('d' in gene_info[gene]['mode'] and mm == 'rare_variants') :\n pubmedbatch[pubmed_key] = max(100,pubmedbatch[pubmed_key])\n if gene=='DRAM2':\n print pubmed_key\n print pubmedbatch[pubmed_key]\n if 'het_samples' not in v: print(v)\n for s in v['het_samples']:\n if v['HET_COUNT'] < 10:\n individuals[s]=individuals.get(s,[])+[v]\n\n\n\ndef get_hpo_gene(hpo_ids):\n hpo_db=get_db(app.config['DB_NAME_HPO'])\n hpo_terms = [(i, hpo_db.hpo.find_one({'id':i})['name'][0]) for i in hpo_ids]\n # this has missing HPO ids. see IRDC_batch2_OXF_3001 and #HP:0000593\n hpo_gene=dict()\n for hpo_id,hpo_term, in hpo_terms:\n hpo_gene[hpo_id] = []\n for gene_name in [x['Gene-Name'] for x in hpo_db.ALL_SOURCES_ALL_FREQUENCIES_phenotype_to_genes.find({'HPO-ID':hpo_id},{'Gene-Name':1,'_id':0})]:\n #gene_hpo[gene_name]=gene_hpo.get(gene_name,[])+[{'hpo_id':hpo_id,'hpo_term':hpo_term}]\n hpo_gene[hpo_id]=hpo_gene.get(hpo_id,[])+[gene_name]\n for k in hpo_gene: hpo_gene[k]=list(frozenset(list(hpo_gene[k])))\n return hpo_gene\n\n\ndef find_item(obj, key):\n if key in obj:\n return obj[key]\n if isinstance(obj, dict):\n for k in obj:\n if isinstance(obj[k], dict):\n item = find_item(obj[k], key)\n if item is not None:\n return item\n elif isinstance(obj[k], list):\n for i in obj[k]:\n if isinstance(i, str):\n continue\n item = find_item(i, key)\n if item is not None:\n return item\n elif isinstance(obj, list):\n for k in obj:\n if isinstance(k, dict):\n item = find_item(k, key)\n if item is not None:\n return item\n elif isinstance(k, list):\n for i in k:\n if isinstance(i, str):\n continue\n item = find_item(i, key)\n if item is not None:\n return item\n\ndef exomiser(individual):\n patient_hpo_terms=lookups.get_patient_hpo(hpo_db, patient_db, individual, ancestors=False)\n patient_hpo_terms = dict([(hpo['id'][0],{'id':hpo['id'][0],'name':hpo['name'][0], 'is_a':hpo.get('is_a',[])}) for hpo in patient_hpo_terms])\n patient_hpo_ids=patient_hpo_terms.keys()\n x['exomiser']=[]\n for g in list(set(x['genes'])):\n r=db.ensembl_entrez.find_one({'Ensembl Gene ID':g})\n if not r or not r['EntrezGene ID']: continue\n x['entrezgeneid']=r['EntrezGene ID']\n #url='http://localhost:8085/exomiser/api/prioritise/?phenotypes=%s&prioritiser=hiphive&genes=%s&prioritiser-params=human,mouse,fish'%(','.join(patient_hpo_terms.keys()), x['entrezgeneid'])\n url='http://monarch-exomiser-prod.monarchinitiative.org/exomiser/api/prioritise/?phenotypes=%s&prioritiser=hiphive&genes=%s&prioritiser-params=human,mouse,fish'%(','.join(patient_hpo_terms.keys()), x['entrezgeneid'])\n print(url)\n r=requests.get(url)\n if isinstance(r.json(),list):\n x['exomiser']+=r.json()[0]['results']\n else:\n x['exomiser']+=r.json()['results']\n if len(x['exomiser'])<1: x['exomiser']=[{'score':-1}]\n exomiser_scores=[xx['score'] for xx in x['exomiser']]\n i=exomiser_scores.index(max(exomiser_scores))\n x['exomiser']=x['exomiser'][i]\n\n\[email protected]('/homozygous_variants_json/<individual>')\n@requires_auth\ndef homozgous_variants(individual):\n patient=Patient(individual,patient_db=get_db(app.config['DB_NAME_PATIENTS']),variant_db=get_db(app.config['DB_NAME']),hpo_db=get_db(app.config['DB_NAME_HPO']))\n return jsonify(result=patient.homozygous_variants)\n\n\ndef merge_dicts(*dict_args):\n \"\"\"\n Given any number of dicts, shallow copy and merge into a new dict,\n precedence goes to key value pairs in latter dicts.\n \"\"\"\n result = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result\n\[email protected]('/homozygous_variants_json2/<individual>')\n@requires_auth\ndef homozygous_variants2(individual):\n allele_freq=float(request.args.get('allele_freq',0.001))\n kaviar_AF=float(request.args.get('kaviar_AF',0.001))\n s=\"\"\" MATCH\n (p)-[:PersonToObservedTerm]-(t:Term),\n (t)--(g:Gene)--(gv:GeneticVariant)-[:HomVariantToPerson]-(p:Person), \n (gv)--(tv:TranscriptVariant)\n WHERE p.personId='%s' AND gv.kaviar_AF < %f AND gv.allele_freq < %f\n WITH gv, g, t, tv\n OPTIONAL\n MATCH\n (gv)-[:HetVariantToPerson]-(p2:Person)\n OPTIONAL\n MATCH\n (gv)-[:HomVariantToPerson]-(p3:Person)\n RETURN gv,\n collect(distinct g),\n collect(distinct t),\n collect(distinct tv),\n collect(distinct p2),\n collect(distinct p3)\n \"\"\" % (individual,kaviar_AF,allele_freq,)\n print(s)\n with neo4j_driver.session() as neo4j_session: \n result=neo4j_session.run(s)\n return jsonify(result=[merge_dicts(dict(r[0]),\n {'genes':[dict(x) for x in r[1]]},\n {'terms':[dict(x) for x in r[2]]},\n {'transcript_variants':[dict(x) for x in r[3]]},\n {'het_individuals':[dict(x) for x in r[4]]},\n {'hom_individuals':[dict(x) for x in r[5]]}\n ) for r in result])\n\n\[email protected]('/compound_het_variants_json2/<individual>',methods=['GET','POST'])\n@requires_auth\ndef compound_het_variants2(individual):\n kaviar_AF=float(request.args.get('kaviar_AF',0.01))\n allele_freq=float(request.args.get('allele_freq',0.01))\n s=\"\"\"\n MATCH\n (p)-[:PersonToObservedTerm]-(t:Term),\n (g:Gene)--(gv:GeneticVariant)-[:HetVariantToPerson]-(p:Person)\n WHERE p.personId='%s' AND gv.kaviar_AF<%f and gv.allele_freq < %f\n WITH g, collect(distinct gv) AS cgv\n WHERE length(cgv) > 1\n UNWIND cgv as v\n OPTIONAL\n MATCH\n (v)-[:HetVariantToPerson]-(p2:Person)\n OPTIONAL\n MATCH\n (v)-[:HomVariantToPerson]-(p3:Person)\n RETURN v,\n collect(distinct g),\n collect(distinct p2),\n collect(distinct p3)\n \"\"\" % (individual,kaviar_AF,allele_freq)\n print(s)\n with neo4j_driver.session() as neo4j_session:\n result=neo4j_session.run(s)\n return jsonify(result=[ merge_dicts(\n dict(r[0]),\n {'terms':[]},\n {'genes':[dict(x) for x in r[1]]},\n {'transcript_variants':[]},\n {'het_individuals':[dict(x) for x in r[2]]},\n {'hom_individuals':[dict(x) for x in r[3]]}\n ) for r in result])\n \[email protected]('/compound_het_variants_json/<individual>')\n@requires_auth\ndef compound_het_variants(individual):\n patient=Patient(individual,patient_db=get_db(app.config['DB_NAME_PATIENTS']),variant_db=get_db(app.config['DB_NAME']),hpo_db=get_db(app.config['DB_NAME_HPO']))\n return jsonify(result=patient.compound_het_variants)\n\[email protected]('/rare_variants_json2/<individual>')\n@requires_auth\ndef rare_variants2(individual):\n kaviar_AF=float(request.args.get('kaviar_AF',0.01))\n allele_freq=float(request.args.get('allele_freq',0.01))\n s=\"\"\" MATCH\n (p)-[:PersonToObservedTerm]-(t:Term),\n (t)--(g:Gene)--(gv:GeneticVariant)-[:HetVariantToPerson]-(p:Person), \n (gv)--(tv:TranscriptVariant)\n WHERE p.personId='%s' AND gv.kaviar_AF < %f AND gv.allele_freq < %f\n WITH gv, g, t, tv\n OPTIONAL\n MATCH\n (gv)-[:HetVariantToPerson]-(p2:Person)\n OPTIONAL\n MATCH\n (gv)-[:HomVariantToPerson]-(p3:Person)\n RETURN gv,\n collect(distinct g),\n collect(distinct t),\n collect(distinct tv),\n collect(distinct p2),\n collect(distinct p3)\n \"\"\" % (individual,kaviar_AF,allele_freq,)\n print(s)\n with neo4j_driver.session() as neo4j_session:\n result=neo4j_session.run(s)\n return jsonify(result=[ merge_dicts(\n dict(r[0]),\n {'genes':[dict(x) for x in r[1]]},\n {'terms':[dict(x) for x in r[2]]},\n {'transcript_variants':[dict(x) for x in r[3]]},\n {'het_individuals':[dict(x) for x in r[4]]},\n {'hom_individuals':[dict(x) for x in r[5]]}\n ) for r in result])\n \n\n\ndef load_patient(individual,auth,pubmed_key,hpo='HP:0000001'):\n hpo_db=get_db(app.config['DB_NAME_HPO'])\n db = get_db()\n patient_db=get_db(app.config['DB_NAME_PATIENTS'])\n patient_id=individual\n patient={u'features': {u'observed': u'yes', u'type': u'phenotype', u'id': hpo}, 'clinicalStatus': {u'clinicalStatus': u'affected'}, u'ethnicity': {u'maternal_ethnicity': [], u'paternal_ethnicity': []}, u'family_history': {}, u'disorders': [], u'life_status': u'alive', u'reporter': u'', u'genes': [], u'prenatal_perinatal_phenotype': {u'prenatal_phenotype': [], u'negative_prenatal_phenotype': []}, u'prenatal_perinatal_history': {u'twinNumber': u''}, u'sex': u'U', u'solved': {u'status': u'unsolved'}}\n eid=patient_id\n if p: patient.update(p)\n #patient_hpo_terms=','.join([f['id'] for f in patient['features'] if f['observed']=='yes'])\n gene_counter=Counter([var['canonical_gene_name_upper'] for var in patient.rare_variants])\n for var in patient['rare_variants']: var['gene_count']=gene_counter[var['canonical_gene_name_upper']]\n patient[\"pubmedbatch_status\"]=0\n pubmed_key=\"blindness-macula-macular-pigmentosa-retina-retinal-retinitis-stargardt\"\n patient[\"pubmed_key\"]=pubmed_key\n #db.patients.update({'external_id':patient_id}, patient, upsert=True)\n\n\n\[email protected]('/individual_update/<individual>')\n@requires_auth\ndef individual_update(individual):\n print 'UPDATE'\n print p\n print get_db(app.config['DB_NAME_PATIENTS']).patients.update({'external_id':individual},{'$set':p})\n print 'DB'\n print get_db(app.config['DB_NAME_PATIENTS']).patients.find_one({'external_id':individual})\n if request.referrer:\n referrer=request.referrer\n u = urlparse(referrer)\n referrer='%s://%s' % (u.scheme,u.hostname,)\n if u.port: referrer='%s:%s' % (referrer,u.port,)\n return redirect(referrer+'/individual/'+individual)\n else:\n return 'done'\n\n\n'''\nprogress bar query\n'''\[email protected]('/pubmedbatch_progress_bar/<id>')\ndef pubmedbatch_progress(id):\n user = session.get('user') or app.config['DEFAULT_USER']\n progress_id = user + id\n return jsonify(PROGRESS_BAR[progress_id])\n\n'''\nget pubmedbatch cache results based on pubmedkey\n'''\[email protected]('/pubmedbatch-cache/<pubmedkey>')\ndef pubmedbatch_getcache(pubmedkey):\n db = get_db('pubmedbatch') \n result = db.cache.find_one({'key':pubmedkey},{'_id':False})\n if result: return jsonify(result)\n else: return jsonify('')\n\n\[email protected]('/homozygous_individuals_json/<variant_id>')\n@requires_auth\ndef get_homozygous_individuals(variant_id):\n s=\"\"\"\n MATCH\n (v)-[:HomVariantToPerson]-(p:Person)\n WHERE v.variantId='%s'\n RETURN p\n \"\"\" % variant_id\n with neo4j_driver.session() as neo4j_session:\n result=neo4j_session.run(s)\n return jsonify(result=[ merge_dicts(\n dict(r[0])) for r in result])\n\n\[email protected]('/heterozygous_individuals_json/<variant_id>')\n@requires_auth\ndef get_heterozygous_individuals(variant_id):\n s=\"\"\"\n MATCH\n (v)-[:HetVariantToPerson]-(p:Person)\n WHERE v.variantId='%s'\n RETURN p\n \"\"\" % variant_id\n db_session = neo4j_driver.session()\n result=db_session.run(s)\n return jsonify(result=[ merge_dicts(\n dict(r[0])) for r in result])\n\n\n\n\n\n", "id": "6249402", "language": "Python", "matching_score": 6.5427350997924805, "max_stars_count": 24, "path": "views/individual.py" }, { "content": "from views import *\nfrom lookups import *\nimport requests\nimport re\nfrom utils import *\nimport itertools\nfrom config import config\nif config.IMPORT_PYSAM_PRIMER3:\n import pysam\nimport csv\n#hpo lookup\nimport orm\n\n\ndef individuals_update(external_ids):\n patients_db=get_db(app.config['DB_NAME_PATIENTS'])\n users_db=get_db(app.config['DB_NAME_USERS'])\n def f(eid):\n p=patients_db.patients.find_one({'external_id':eid},{'_id':False})\n print p['external_id']\n p['features']=[f for f in p.get('features',[]) if f['observed']=='yes']\n if 'solved' in p:\n if 'gene' in p['solved']:\n p['solved']=[p['solved']['gene']]\n else:\n p['solved']=[]\n else: p['solved']=[]\n if 'genes' in p: p['genes']=[x['gene'] for x in p['genes'] if 'gene' in x]\n else: p['genes']=[]\n p['genes']=list(frozenset(p['genes']+p['solved']))\n p2=get_db().patients.find_one({'external_id':p['external_id']},{'rare_homozygous_variants_count':1,'rare_compound_hets_count':1, 'rare_variants_count':1,'total_variant_count':1})\n if not p2: return p\n p['rare_homozygous_variants_count']=p2.get('rare_homozygous_variants_count','')\n p['rare_compound_hets_count']=p2.get('rare_compound_hets_count','')\n p['rare_variants_count']=p2.get('rare_variants_count','')\n p['total_variant_count']=p2.get('total_variant_count','')\n #p['all_variants_count']=get_db().patients.find_one({'external_id':p['external_id']},{'_id':0,'all_variants_count':1})['all_variants_count']\n #db.cache.find_one({\"key\" : \"%s_blindness,macula,macular,retina,retinal,retinitis,stargardt_\" % })\n if '_id' in p: del p['_id']\n return p\n new_individuals=[f(eid) for eid in external_ids]\n old_individuals=users_db.users.find_one({'user':session['user']}).get('individuals',[])\n old_individuals=[ind for ind in old_individuals if ind['external_id'] not in external_ids]\n individuals=new_individuals+old_individuals\n users_db.users.update_one({'user':session['user']},{'$set':{'individuals':individuals}})\n return individuals\n\n\ndef get_individuals(user):\n s=\"\"\"\n MATCH (u:User {user:'%s'})--(p:Person)-[:PersonToObservedTerm]->(t:Term),\n (p)-[:CandidateGene]-(g:Gene)\n RETURN p.personId as individual,\n p.gender as gender,\n collect(DISTINCT t) as phenotypes,\n p.score as phenotypeScore,\n size((p)<-[:HomVariantToPerson]-()) as hom_count,\n size((p)<-[:HetVariantToPerson]-()) as het_count,\n collect(DISTINCT g.gene_name) as genes;\n \"\"\" % user\n\n with neo4j_driver.session() as db_session: \n result=db_session.run(s)\n data = []\n for r in result:\n data.append({\n 'individual': r['individual'],\n 'gender': r['gender'],\n 'phenotypes': [dict(x) for x in r['phenotypes']],\n 'phenotypeScore': r['phenotypeScore'],\n 'hom_count': r['hom_count'],\n 'het_count': r['het_count'],\n 'genes': [y for y in r['genes']]\n })\n return data\n\n\[email protected]('/my_patients_json')\n@requires_auth\ndef my_patients_json():\n users_db=get_db(app.config['DB_NAME_USERS'])\n user=users_db.users.find_one({'user':session['user']})\n individuals=get_individuals(user['user'])\n return(jsonify(result=individuals))\n\n\n# shows each patients, \n# all_individuals\[email protected]('/my_patients')\n@requires_auth\ndef my_patients():\n return render_template('my_patients.html')\n\n# shows each individual, \n# all_individuals\[email protected]('/individuals_csv')\n@requires_auth\ndef individuals_csv():\n page=int(request.args.get('page',0))\n number=int(request.args.get('number',200))\n hpo_db=get_db(app.config['DB_NAME_HPO'])\n def f(p):\n print p['external_id']\n p['features']=[f for f in p.get('features',[]) if f['observed']=='yes']\n if 'solved' in p:\n if 'gene' in p['solved']:\n p['solved']=[p['solved']['gene']]\n else:\n p['solved']=[]\n else: p['solved']=[]\n if 'genes' in p: p['genes']=[x['gene'] for x in p['genes'] if 'gene' in x]\n else: p['genes']=[]\n p['genes']=list(frozenset(p['genes']+p['solved']))\n p2=get_db().patients.find_one({'external_id':p['external_id']},{'rare_homozygous_variants_count':1,'rare_compound_hets_count':1, 'rare_variants_count':1,'total_variant_count':1})\n if not p2: return p\n p['rare_homozygous_variants_count']=p2.get('rare_homozygous_variants_count','')\n p['rare_compound_hets_count']=p2.get('rare_compound_hets_count','')\n p['rare_variants_count']=p2.get('rare_variants_count','')\n p['total_variant_count']=p2.get('total_variant_count','')\n #p['all_variants_count']=get_db().patients.find_one({'external_id':p['external_id']},{'_id':0,'all_variants_count':1})['all_variants_count']\n #db.cache.find_one({\"key\" : \"%s_blindness,macula,macular,retina,retinal,retinitis,stargardt_\" % })\n return p\n conn=PhenotipsClient()\n all_patients=conn.get_patient(session=session).get('patientSummaries',[]) \n all_eids=[p['eid'] for p in all_patients if p['eid']]\n total=len(all_eids)\n print('TOTAL NUMBER OF PATIENTS',total)\n patients=conn.get_patient(session=session,start=page*number,number=number).get('patientSummaries',[])\n eids=[p['eid'] for p in patients if p['eid']]\n print(eids)\n patients=get_db(app.config['DB_NAME_PATIENTS']).patients.find({'external_id':{'$in':eids}})\n #patients=get_db(app.config['DB_NAME_PATIENTS']).patients.find({'external_id':re.compile('^IRDC')},{'pubmedBatch':0})\n individuals=[f(p) for p in patients if 'external_id' in p]\n # family_history\":{\"consanguinity\":true}\n #if session['user']=='demo': for ind in individuals: ind['external_id']=encrypt(ind['external_id'])\n #return render_template('individuals_page.html',individuals=individuals,page=page,number=number,total=total)\n return '\\n'.join([','.join([ind['external_id'],ind['total_variant_count'],ind['rare_variants_count']]) for ind in individuals])\n", "id": "10271350", "language": "Python", "matching_score": 2.7938649654388428, "max_stars_count": 24, "path": "views/my_patients.py" }, { "content": "\nimport requests\nimport json\nimport pymongo\nimport sys\n\nclient = pymongo.MongoClient(port=27017)\npatients_db = client['patients']\n\n\ndef score_phenotype(external_id):\n p=patients_db.patients.find_one({'external_id':external_id})\n x=dict()\n x['id']=p['report_id']\n x['features']=p['features']\n for f in p['features']:\n f['isPresent']={'yes':'true','no':'false'}[f['observed']]\n del f['observed']\n del f['label']\n del f['type']\n #print x\n #x={\"features\" : [{\"id\":\"HP:0000505\",'isPresent' : True},{\"id\":\"HP:0000479\", 'isPresent' : False}, {\"id\":\"HP:0001010\",'isPresent' : True},{\"id\":\"HP:0000044\", 'isPresent' : False}]}\n x=json.dumps(x)\n url='https://monarchinitiative.org/score/?annotation_profile={}'.format(x)\n print url\n r=requests.get(url,headers={'Content-Type':'application/json'})\n return r.json()\n\ndef hpo_terms(p):\n x=dict()\n x['id']=p['report_id']\n x['features']=p['features']\n for f in p['features']:\n f['isPresent']={'yes':'true','no':'false'}[f['observed']]\n del f['observed']\n del f['label']\n del f['type']\n return x['features']\n\n\ndef compare(individual,individual2):\n individual=conn.get_patient(eid=individual,session=session)\n individual2=conn.get_patient(eid=individual2,session=session)\n hpo1='+'.join([h['id'] for h in hpo_terms(individual)])\n hpo2='+'.join([h['id'] for h in hpo_terms(individual2)])\n url='https://monarchinitiative.org/compare/{}/{}.json'.format(hpo1,hpo2)\n print(url)\n r=requests.get(url,headers={'Content-Type':'application/json'})\n return r.json()\n\ndef get_phenotype_score(hpo):\n client = pymongo.MongoClient(port=27017)\n db = client['patients']\n for ind in db.patients.find():\n hpo2='+'.join([h['id'] for h in hpo_terms(ind)])\n url='https://monarchinitiative.org/compare/{}/{}.json'.format(hpo,hpo2)\n r=requests.get(url,headers={'Content-Type':'application/json'})\n x=r.json()\n if 'b' not in x:\n print 'SCORE', hpo, ind['external_id'], 0\n else:\n print 'SCORE', hpo, ind['external_id'], x['b'][0]['score']['score']\n\n\n\n", "id": "7151251", "language": "Python", "matching_score": 2.0663955211639404, "max_stars_count": 24, "path": "rest/monarch.py" }, { "content": "\n\nfrom os import listdir, chdir\nfrom os.path import isfile, join\nimport pymongo\n\nclass Individual(object):\n def __init__(self, filename, db=None, hpo='HP:0000001'):\n pass\n def load_individual(self):\n conn = pymongo.MongoClient(host='localhost', port=27017)\n db=conn['uclex-old']\n for p in db.patients.find():\n eid=p['external_id']\n print eid\n for k in ['all_variants', 'rare_variants', 'compound_hets', 'homozygous_variants']:\n if k in p:\n variants_count=len(p[k])\n print k, variants_count\n print db.patients.update({'external_id':eid},{'$set':{k+'_count':variants_count}},upsert=True)\n \n def get_patient_observed_hpo(self):\n # returns [('HP:0000001', 'hell yeah')]\n this_patient = patient_db.patients.find_one({'external_id':patient}) \n result = [(None, None)]\n if not this_patient:\n #print 'ERROR: %s not in patients db' % patient\n pass\n else:\n if 'features' not in this_patient:\n print 'WARNING: features not in ' + patient\n p_features = this_patient.get('features', [{'id':'HP:0000001', 'label':'All', 'observed': 'yes' }])\n result = [(f['id'], f['label']) for f in p_features if f['observed']=='yes']\n return result\n\n\n\n", "id": "6016039", "language": "Python", "matching_score": 0.3329143822193146, "max_stars_count": 24, "path": "orm/individual.py" }, { "content": "#!/usr/bin/env python\n\"\"\"Import an individual's VAR.tsv file\"\"\"\n\nimport os\nimport re\nimport sys\nimport atexit\nimport logging\nimport tempfile\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\nfrom urllib.parse import urlparse\n\nimport boto3\nimport psycopg2 # type: ignore\nfrom psycopg2 import sql\nfrom botocore.exceptions import ClientError\n\nlogger = logging.getLogger()\nlogging.basicConfig(level=logging.DEBUG, format=\"%(asctime)s %(levelname)s %(message)s\")\n\nIMPORT_TABLE = sql.Identifier(\"iv_import\")\n\n\nclass ScriptError(Exception):\n \"\"\"Controlled exception raised by the script.\"\"\"\n\n\ndef main():\n opt = parse_cmdline()\n logger.setLevel(opt.loglevel)\n\n if opt.resource.startswith(\"s3://\"):\n download_from_aws(opt)\n else:\n opt.file = opt.resource\n\n with psycopg2.connect(opt.dsn) as conn:\n create_temp_table(opt, conn)\n import_temp_table(opt, conn)\n # upsert_individual(opt, conn) # if we will need it\n import_variant(opt, conn)\n import_variant_gene(opt, conn)\n import_individual_variant(opt, conn)\n\n\ndef download_from_aws(opt):\n \"\"\"\n Download opt.resource from aws into a temp file.\n\n After download store the file name in `opt.file`.\n \"\"\"\n check_aws_config(opt)\n\n # s3://phenopolis-individuals/PH00009704/VAR.tsv\n # ^ ^\n # bucket filename\n\n parts = urlparse(opt.resource)\n path = parts.path.lstrip(\"/\")\n indid = path.split(\"/\", 1)[0]\n if not indid.startswith(\"PH\"):\n raise ScriptError(f\"cannot see an individual id in the {opt.resource} url\")\n if not opt.individual:\n opt.individual = indid\n\n # Download the s3 file into the temporary file\n s3 = boto3.resource(\"s3\", endpoint_url=\"https://s3.eu-central-1.wasabisys.com\")\n bucket = s3.Bucket(parts.netloc)\n with tempfile.NamedTemporaryFile(delete=False) as f:\n opt.file = f.name\n atexit.register(drop_temp_file, f.name)\n logger.info(\"downloading %s into temp file %s\", opt.resource, f.name)\n try:\n bucket.download_fileobj(path, f)\n except ClientError as exc:\n raise ScriptError(f\"error downloading file: {exc}\")\n\n\ndef drop_temp_file(filename):\n if os.path.exists(filename):\n logger.info(\"dropping temp file %s\", filename)\n os.remove(filename)\n else:\n logger.warn(\"file name %s not found\", filename)\n\n\ndef check_aws_config(opt):\n \"\"\"Bail out if there's something obviously broken in aws.\"\"\"\n for varname in (\"AWS_SECRET_ACCESS_KEY\", \"AWS_ACCESS_KEY_ID\"):\n if not os.environ.get(varname):\n raise ScriptError(f\"env var {varname} not set: this is not gonna work\")\n\n\ndef create_temp_table(opt, conn):\n temp = sql.SQL(\"temp \" if not opt.keep_temp else \"\")\n logger.info(\"creating %stable %s\", temp.as_string(conn), IMPORT_TABLE.as_string(conn))\n\n titles = get_tsv_titles(opt)\n parts = []\n\n parts.append(sql.SQL(\"create {}table {} (\").format(temp, IMPORT_TABLE))\n types = {\n \"pos\": \"bigint\",\n \"dann\": \"float4\",\n \"cadd_phred\": \"float4\",\n \"revel\": \"float4\",\n \"fathmm_score\": \"text\",\n \"canonical\": \"int\",\n \"dp\": \"int\",\n \"fs\": \"float4\",\n \"mq\": \"float4\",\n \"qd\": \"float4\",\n \"het\": \"bool\",\n \"hom\": \"bool\",\n \"strand\": \"smallint\",\n }\n\n for title in titles:\n parts.append(sql.Identifier(title))\n parts.append(sql.SQL(types.get(title, \"text\")))\n parts.append(sql.SQL(\",\"))\n\n parts[-1] = sql.SQL(\")\")\n\n cur = conn.cursor()\n try:\n cur.execute(sql.SQL(\" \").join(parts))\n except psycopg2.errors.DuplicateTable:\n raise ScriptError(\n f\"table {IMPORT_TABLE.strings[0]} already exists: if you used '--keep-temp' you should remove it\"\n )\n\n if opt.keep_temp:\n conn.commit()\n\n\ndef import_temp_table(opt, conn):\n logger.info(\"importing %s into %s\", opt.file, IMPORT_TABLE.as_string(conn))\n\n cur = conn.cursor()\n with open(opt.file) as f:\n stmt = sql.SQL(\"copy {} from stdin (format csv, header true, delimiter '\\t')\").format(IMPORT_TABLE)\n cur.copy_expert(stmt, f)\n\n cur.execute(sql.SQL(\"analyze {}\").format(IMPORT_TABLE))\n\n if opt.keep_temp:\n conn.commit()\n\n\n# def upsert_individual(opt, conn):\n# indid = get_individual_id(opt)\n# cur = conn.cursor()\n# cur.execute(\n# \"select id from phenopolis.individual where phenopolis_id = %s\", (indid,),\n# )\n# rec = cur.fetchone()\n# if not rec:\n# # TODO: insert new?\n# raise ScriptError(f\"individual not found: {indid}\")\n\n# return rec[0]\n\n\ndef import_variant(opt, conn):\n cur = conn.cursor()\n cur.execute(\n sql.SQL(\n \"\"\"\ninsert into phenopolis.variant (\n chrom, pos, ref, alt, dbsnp, variant_class, dann, cadd_phred, revel, fathmm_score)\nselect\n iv.chrom, iv.pos, iv.ref, iv.alt, iv.dbsnp, iv.variant_class,\n iv.dann, iv.cadd_phred, iv.revel,\n string_to_array(iv.fathmm_score, ',', '.')::float4[]\nfrom {} iv\non conflict on constraint variant_key do nothing\n\"\"\"\n ).format(IMPORT_TABLE)\n )\n logger.info(\"variant records imported: %s\", cur.rowcount)\n\n\ndef import_individual_variant(opt, conn):\n cur = conn.cursor()\n indid = get_individual_id(opt)\n\n cur.execute(\n sql.SQL(\n \"\"\"\ninsert into phenopolis.individual_variant (\n individual_id, variant_id, chrom, pos, ref, alt,\n dp, fs, mq, qd, filter, zygosity\n )\nselect\n %s, v.id, iv.chrom, iv.pos, iv.ref, iv.alt,\n iv.dp, iv.fs, iv.mq, iv.qd, iv.filter,\n case when iv.het then 'HET' when iv.hom then 'HOM' end\nfrom {} iv\njoin phenopolis.variant v\n on (v.chrom, v.pos, v.ref, v.alt) = (iv.chrom, iv.pos, iv.ref, iv.alt)\non conflict on constraint individual_variant_pkey do nothing\n\"\"\"\n ).format(IMPORT_TABLE),\n (indid,),\n )\n logger.info(\"individual/variant records imported: %s\", cur.rowcount)\n\n\ndef import_variant_gene(opt, conn):\n cur = conn.cursor()\n\n cur.execute(\n sql.SQL(\n \"\"\"\ninsert into phenopolis.variant_gene (\n variant_id, gene_id, transcript_id, strand, exon, most_severe_consequence,\n impact, hgvs_c, hgvs_p, canonical)\nselect\n v.id, iv.gene_id, iv.transcript_id, iv.strand, iv.exon, iv.most_severe_consequence,\n lower(iv.impact), iv.hgvsc, iv.hgvsp, iv.canonical != 0\nfrom {} iv\njoin phenopolis.variant v\n on (v.chrom, v.pos, v.ref, v.alt) = (iv.chrom, iv.pos, iv.ref, iv.alt)\non conflict on constraint variant_gene_pkey do nothing\n\"\"\"\n ).format(IMPORT_TABLE)\n )\n logger.info(\"variant/gene records imported: %s\", cur.rowcount)\n\n\ndef get_tsv_titles(opt, __cache=[]):\n if __cache:\n return __cache[0]\n\n with open(opt.file) as f:\n line = f.readline()\n\n titles = line.lower().split()\n __cache.append(titles)\n return titles\n\n\ndef get_individual_id(opt, __cache=[]):\n if __cache:\n return __cache[0]\n\n if opt.individual:\n rv = int(opt.individual.replace(\"PH\", \"\"))\n\n else:\n m = re.search(r\"PH(\\d+)\", opt.file)\n if m:\n rv = int(m.group(1))\n\n if rv:\n __cache.append(rv)\n logger.info(\"importing data for individual %s\", rv)\n return rv\n else:\n raise ScriptError(\"no individual found in the resource or --individual\")\n\n\ndef parse_cmdline():\n parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter)\n\n parser.add_argument(\"resource\", metavar=\"RES\", help=\"the resource to import (file, s3:// url)\")\n parser.add_argument(\"--dsn\", default=\"\", help=\"connection string to import into [default: %(default)r]\")\n parser.add_argument(\"--keep-temp\", action=\"store_true\", help=\"keep the temp table after import (for debugging)\")\n parser.add_argument(\"--individual\", help=\"individual id to import (otherwise try from the filename)\")\n\n g = parser.add_mutually_exclusive_group()\n g.add_argument(\n \"-q\",\n \"--quiet\",\n help=\"talk less\",\n dest=\"loglevel\",\n action=\"store_const\",\n const=logging.WARN,\n default=logging.INFO,\n )\n g.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"talk more\",\n dest=\"loglevel\",\n action=\"store_const\",\n const=logging.DEBUG,\n default=logging.INFO,\n )\n\n opt = parser.parse_args()\n\n return opt\n\n\nif __name__ == \"__main__\":\n try:\n sys.exit(main())\n\n except ScriptError as e:\n logger.error(\"%s\", e)\n sys.exit(1)\n\n except Exception:\n logger.exception(\"unexpected error\")\n sys.exit(1)\n\n except KeyboardInterrupt:\n logger.info(\"user interrupt\")\n sys.exit(1)\n", "id": "9545265", "language": "Python", "matching_score": 8.490264892578125, "max_stars_count": 24, "path": "scripts/import_individual_variants.py" }, { "content": "#!/usr/bin/env python\n\"\"\"Import a variants csv file into the database.\n\"\"\"\n\nimport sys\nimport logging\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\n\nimport psycopg2 # type: ignore\nfrom psycopg2 import sql\n\nlogger = logging.getLogger()\nlogging.basicConfig(level=logging.DEBUG, format=\"%(asctime)s %(levelname)s %(message)s\")\n\n\nclass ScriptError(Exception):\n \"\"\"Controlled exception raised by the script.\"\"\"\n\n\nIMPORT_TABLE = sql.Identifier(\"variant_csv\")\n\n\ndef main():\n opt = parse_cmdline()\n logger.setLevel(opt.loglevel)\n\n with psycopg2.connect(opt.dsn) as conn:\n create_temp_table(opt, conn)\n import_temp_table(opt, conn)\n insert_variants(opt, conn)\n\n\ndef create_temp_table(opt, conn):\n titles = get_csv_titles(opt)\n parts = []\n\n temp = sql.SQL(\"temp\" if not opt.keep_temp else \"\")\n parts.append(sql.SQL(\"create {} table {} (\").format(temp, IMPORT_TABLE))\n\n for title in titles:\n parts.append(sql.Identifier(title))\n parts.append(sql.SQL(\"text\"))\n parts.append(sql.SQL(\",\"))\n\n parts[-1] = sql.SQL(\")\")\n\n cur = conn.cursor()\n try:\n cur.execute(sql.SQL(\" \").join(parts))\n except psycopg2.errors.DuplicateTable:\n raise ScriptError(\n f\"table {IMPORT_TABLE.strings[0]} already exists: if you used '--keep-temp' you should remove it\"\n )\n\n\ndef import_temp_table(opt, conn):\n cur = conn.cursor()\n with open(opt.file) as f:\n stmt = sql.SQL(\"copy {} from stdin (format csv, header true)\").format(IMPORT_TABLE)\n cur.copy_expert(stmt, f)\n\n cur.execute(sql.SQL(\"analyze {}\").format(IMPORT_TABLE))\n\n\ndef insert_variants(opt, conn):\n cur = conn.cursor()\n\n # use more memory, less disk\n cur.execute(\"set local work_mem to '1 GB'\")\n\n cur.execute(\n sql.SQL(\n \"\"\"\n insert into phenopolis.variant (chrom, pos, ref, alt)\n select chrom, pos::bigint, ref, alt\n from {}\n where (hgvsc, hgvsp) != ('', '')\n group by 1, 2, 3, 4\n on conflict on constraint variant_key do nothing\n \"\"\"\n ).format(IMPORT_TABLE)\n )\n logger.info(\"imported %s new variant records\", cur.rowcount)\n\n # TODO: do we have to update existing values too?\n\n cur.execute(\n sql.SQL(\n \"\"\"\n insert into phenopolis.transcript_consequence\n (chrom, pos, ref, alt, hgvs_c, hgvs_p, consequence, gene_id)\n select * from (\n select\n chrom, pos::bigint, ref, alt,\n nullif(hgvsc, '') as hgvs_c,\n nullif(hgvsp, '') as hgvs_p,\n nullif(most_severe_consequence, '') as consequence,\n nullif(gene_id, '') as gene_id\n from {}\n where (hgvsc, hgvsp) != ('', '')\n ) s\n where not exists (\n select 1 from phenopolis.transcript_consequence t\n where (t.chrom, t.pos, t.ref, t.alt) = (s.chrom, s.pos, s.ref, s.alt)\n and (t.hgvs_c, t.hgvs_p, t.consequence)\n is not distinct from (s.hgvs_c, s.hgvs_p, s.consequence)\n )\n \"\"\"\n ).format(IMPORT_TABLE)\n )\n logger.info(\"imported %s new transcript consequence records\", cur.rowcount)\n\n cur.execute(\"analyze phenopolis.variant, phenopolis.transcript_consequence\")\n logger.info(\"variants tables stats updated\")\n\n\ndef get_csv_titles(opt, __cache=[]):\n if __cache:\n return __cache[0]\n\n with open(opt.file) as f:\n line = f.readline()\n\n titles = line.strip().replace('\"', \"\").lower().split(\",\")\n for t in \"chrom pos ref alt hgvsc hgvsp most_severe_consequence gene_id\".split():\n if t not in titles:\n raise ScriptError(f\"column {t} not found in the csv (available: {', '.join(titles)})\")\n\n __cache.append(titles)\n return titles\n\n\ndef parse_cmdline():\n parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter)\n\n parser.add_argument(\"file\", metavar=\"FILE\", help=\"the file to import\")\n parser.add_argument(\"--dsn\", default=\"\", help=\"connection string to import into [default: %(default)r]\")\n parser.add_argument(\"--keep-temp\", action=\"store_true\", help=\"keep the temp table after import (for debugging)\")\n\n g = parser.add_mutually_exclusive_group()\n g.add_argument(\n \"-q\",\n \"--quiet\",\n help=\"talk less\",\n dest=\"loglevel\",\n action=\"store_const\",\n const=logging.WARN,\n default=logging.INFO,\n )\n g.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"talk more\",\n dest=\"loglevel\",\n action=\"store_const\",\n const=logging.DEBUG,\n default=logging.INFO,\n )\n\n opt = parser.parse_args()\n\n return opt\n\n\nif __name__ == \"__main__\":\n try:\n sys.exit(main())\n\n except ScriptError as e:\n logger.error(\"%s\", e)\n sys.exit(1)\n\n except Exception:\n logger.exception(\"unexpected error\")\n sys.exit(1)\n\n except KeyboardInterrupt:\n logger.info(\"user interrupt\")\n sys.exit(1)\n", "id": "1719387", "language": "Python", "matching_score": 3.7050528526306152, "max_stars_count": 24, "path": "scripts/import_variants.py" }, { "content": "#!/usr/bin/env python3\n\"\"\"\nApply database patches.\n\nApplied patches are recorded in the schema_patch table of the database.\n\nThe dsn to connect to defaults to a local one (empty connection string). It can\nbe chosen using the command line or an environment variable. Patches\napplication is interactive by default.\n\nA script may be associated with a .pre and .post script, that may be written\nin any script language, they should just have a shebang (e.g. NAME.sql is\nassociated with NAME.pre.py and/or NAME.post.sh).\n\"\"\"\n\nimport logging\nimport os\nimport re\nimport shutil\nimport socket\nimport subprocess as sp\nimport sys\nfrom argparse import ArgumentParser\nfrom glob import glob\n\nimport psycopg2\nfrom psycopg2.extras import NamedTupleCursor\n\nlogging.basicConfig(level=logging.INFO, format=\"%(levelname)s %(message)s\")\nlogger = logging.getLogger()\n\n\nclass ScriptException(Exception):\n pass\n\n\nclass UserInterrupt(Exception):\n pass\n\n\nopt = None\n\n\ndef main():\n global opt\n opt = parse_cmdline()\n grab_lock()\n patches = find_patches()\n verify_patch_table(patches)\n patches = remove_applied_patches(patches)\n if not patches:\n return\n\n logger.info(\"applying patches to the database '%s'\" % opt.dsn)\n try:\n for patch in patches:\n apply_patch(patch)\n finally:\n patches = remove_applied_patches(patches)\n if patches:\n logger.warning(\"The following patches remain unapplied:\")\n for patch in patches:\n logger.warning(\"* %s\" % patch)\n\n\ndef parse_cmdline():\n parser = ArgumentParser(description=\"Apply patches to a database.\",)\n parser.add_argument(\"input\", nargs=\"+\", help=\"The files or directories where to look for patches\")\n parser.add_argument(\n \"--dsn\",\n metavar=\"STRING\",\n default=os.environ.get(\"PATCH_DSN\", \"\"),\n help=\"the database to connect to. Read from env var PATCH_DSN if set [default: '%(default)s']\",\n )\n parser.add_argument(\n \"--yes\", \"-y\", action=\"store_true\", help=\"assume affermative answer to all the questions\",\n )\n parser.add_argument(\"--dry-run\", \"-n\", action=\"store_true\", help=\"just pretend\")\n\n g = parser.add_mutually_exclusive_group()\n g.add_argument(\n \"-q\",\n \"--quiet\",\n help=\"Talk less\",\n dest=\"loglevel\",\n action=\"store_const\",\n const=logging.WARN,\n default=logging.INFO,\n )\n g.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"Talk more\",\n dest=\"loglevel\",\n action=\"store_const\",\n const=logging.DEBUG,\n default=logging.INFO,\n )\n\n opt = parser.parse_args()\n logger.setLevel(opt.loglevel)\n return opt\n\n\ndef get_connection():\n # to be found in pg_stat_activity\n os.environ[\"PGAPPNAME\"] = \"patch_db on %s\" % socket.gethostname()\n if opt.dry_run:\n # will work for both psql and psycopg\n os.environ[\"PGOPTIONS\"] = \"-c default_transaction_read_only=on\"\n try:\n conn = psycopg2.connect(opt.dsn)\n conn.autocommit = True\n return conn\n except psycopg2.OperationalError as e:\n raise ScriptException(\n \"failed to connect to dev database: \"\n \"you should probably set the PATCH_DSN variable.\\n\"\n \"Error was: %s\" % e\n )\n\n\ndef grab_lock(_cnn=[]):\n \"\"\"Grab the lock and keep it until the end of the world (the process)\n \"\"\"\n logger.debug(\"trying to grab an advisory lock\")\n\n cid, oid = divmod(3733496049986286126, 2 ** 32)\n\n if _cnn:\n raise ValueError(\"attempted to grab the lock more than once\")\n cnn = get_connection()\n\n # keep this connection alive after return\n _cnn.append(cnn)\n\n # Try and grab the lock\n cur = cnn.cursor()\n cur.execute(\"select pg_try_advisory_lock(%s, %s)\", (cid, oid))\n if cur.fetchone()[0]:\n # lock acquired\n return\n\n # Lock failed, let's see who is in\n cur.execute(\n \"\"\"\n select s.application_name\n from pg_locks l\n join pg_stat_activity s on s.pid = l.pid\n where (l.classid, l.objid, l.objsubid) = (%s, %s, 2)\n and l.locktype = 'advisory'\n and s.datname = current_database();\n \"\"\",\n (cid, oid),\n )\n r = cur.fetchone()\n if not r:\n msg = \"they may have finished by now\"\n else:\n msg = r[0]\n if not msg:\n msg = \"don't know who\"\n\n raise ScriptException(\"couldn't lock the database: somebody else is patching it (%s)\" % msg)\n\n\ndef with_connection(f):\n def with_connection_(*args, **kwargs):\n if args and hasattr(args[0], \"cursor\"):\n return f(*args, **kwargs)\n\n cnn = get_connection()\n\n # extra paranoia\n if opt.dry_run:\n cur = cnn.cursor()\n cur.execute(\"set default_transaction_read_only=on\")\n os.environ[\"PGOPTIONS\"] = \"-c default_transaction_read_only=on\"\n\n try:\n return f(cnn, *args, **kwargs)\n finally:\n cnn.close()\n\n return with_connection_\n\n\ndef find_patches():\n files = []\n for entry in opt.input:\n if os.path.isdir(entry):\n logger.debug(\"looking for patches in %s\", entry)\n files.extend(glob(os.path.join(entry, \"*.sql\")))\n elif os.path.isfile(entry):\n logger.debug(\"got patch %s\", entry)\n files.append(entry)\n elif os.path.exists(entry):\n raise ScriptException(\"not a valid file or dir: %s\" % entry)\n else:\n raise ScriptException(\"input entry not found: %s\" % entry)\n\n files.sort(key=os.path.basename)\n return files\n\n\n@with_connection\ndef table_columns(cnn, name):\n cur = cnn.cursor()\n cur.execute(\n \"\"\"\n select array_agg(attname)\n from (\n select attname\n from pg_attribute join pg_class r on r.oid = attrelid\n where relname = %s\n and not attisdropped and attnum > 0\n order by attnum\n ) x\n \"\"\",\n (name,),\n )\n return cur.fetchone()[0]\n\n\n@with_connection\ndef verify_patch_table(cnn, patches):\n cols = table_columns(cnn, \"schema_patch\")\n\n if not cols:\n version = 0\n elif \"stage\" not in cols:\n version = 1\n else:\n version = 2\n\n if version == 2:\n return\n\n patches = {\n 1: \"\"\"\nbegin;\nalter table schema_patch add stage text check (stage = any('{pre,patch,post}'));\nalter table schema_patch drop constraint schema_patch_status_check;\nalter table schema_patch add check (status = any('{applying,applied,skipped,failed,assumed}'));\ncommit;\n\"\"\"\n }\n\n if version == 0:\n logger.warning(\n \"Patches table not found at dsn '%s': assuming all the patches in input have already been applied.\",\n opt.dsn,\n )\n confirm(\"Do you want to continue?\")\n if opt.dry_run:\n return\n\n cur = cnn.cursor()\n cur.execute(\n \"\"\"\n create table schema_patch (\n name text primary key,\n status text not null check (\n status = any('{applying,applied,skipped,failed,assumed}')),\n stage text check (stage = any('{pre,patch,post}'))\n status_date timestamp not null)\n \"\"\"\n )\n\n for patch in patches:\n register_patch(cnn, patch, status=\"assumed\")\n\n # Migrate from old schema of the table\n else:\n cur = cnn.cursor()\n while version in patches:\n confirm(f\"Upgrade patch table from version {version} to version {version + 1}?\")\n logger.info(\"upgrading to patch version %s\", version + 1)\n if not opt.dry_run:\n cur.execute(patches[version])\n version += 1\n\n\n@with_connection\ndef remove_applied_patches(cnn, patches):\n if not table_columns(cnn, \"schema_patch\"):\n # assume --dry-run with non existing table\n return []\n\n cur = cnn.cursor()\n cur.execute(\n \"\"\"\n select name from schema_patch\n where status in ('applied', 'skipped')\"\"\"\n )\n applied = {r[0] for r in cur.fetchall()}\n\n rv = []\n for patch in patches:\n if os.path.basename(patch) not in applied:\n rv.append(patch)\n\n return rv\n\n\n@with_connection\ndef apply_patch(cnn, filename):\n ans = confirm_patch(filename)\n if ans is SKIP:\n register_patch(cnn, filename, status=\"skipped\")\n return\n\n elif not ans:\n return\n\n verify_transaction(filename)\n\n run_script(cnn, filename, \"pre\")\n\n if not opt.dry_run:\n logger.info(\"applying patch '%s'\", filename)\n register_patch(cnn, filename, \"applying\", stage=\"patch\")\n run_psql(cnn, filename)\n else:\n logger.info(\"would apply patch '%s'\", filename)\n\n run_script(cnn, filename, \"post\")\n register_patch(cnn, filename)\n\n\n@with_connection\ndef run_script(cnn, filename, suffix):\n \"\"\"\n Execute a script associated to a db patch.\n\n The db patch /some/path/foo.sql may have a script called\n /some/path/foo.pre.py.\n \"\"\"\n name, ext = os.path.splitext(filename)\n script = glob(name + \".\" + suffix + \".*\")\n if script:\n # assume there's at most one\n script = script[0]\n else:\n return\n\n if not confirm_script(script):\n return\n\n if opt.dry_run:\n logger.info(\"would run script '%s'\", script)\n return\n\n register_patch(cnn, filename, \"applying\", stage=suffix)\n\n logger.info(\"running script '%s'\", script)\n\n # propagate the db dsn to the environment\n os.environ[\"PATCH_DSN\"] = opt.dsn\n\n # execute the script\n script = os.path.abspath(script)\n path = os.path.split(script)[0]\n try:\n sp.check_call(script, cwd=path)\n except sp.CalledProcessError as e:\n try:\n register_patch(cnn, filename, \"failed\", stage=suffix)\n except Exception as e:\n logger.error(\"failed to register the patch as failed: %s\", e)\n raise ScriptException(e)\n\n\n@with_connection\ndef run_psql(cnn, filename):\n psql = shutil.which(\"psql\")\n dirname, basename = os.path.split(filename)\n cmdline = [\"psql\", \"-X\", \"-e\", \"--set\", \"ON_ERROR_STOP=1\", \"-f\", basename, opt.dsn]\n try:\n if not psql:\n raise ScriptException(\"psql executable not found\")\n try:\n sp.check_call(cmdline, cwd=dirname)\n except Exception:\n raise ScriptException(\"patch failed to apply: %s\" % basename)\n except Exception:\n # try to record the failed state and reraise\n try:\n register_patch(cnn, filename, \"failed\", stage=\"patch\")\n except Exception as e:\n logger.error(\"failed to register the patch as failed: %s\", e)\n raise\n\n\n@with_connection\ndef get_patch(cnn, filename):\n name = os.path.basename(filename)\n cur = cnn.cursor(cursor_factory=NamedTupleCursor)\n cur.execute(\n \"\"\"\n select name, status, stage, status_date\n from schema_patch\n where name = %s\n \"\"\",\n (name,),\n )\n rec = cur.fetchone()\n return rec\n\n\n@with_connection\ndef register_patch(cnn, filename, status=\"applied\", stage=None):\n logger.debug(\"registering patch '%s' as %s\", filename, status + (\"(%s)\" % stage if stage else \"\"))\n if opt.dry_run:\n return\n\n name = os.path.basename(filename)\n patch = get_patch(cnn, filename)\n if patch:\n if patch.status in (\"applied\", \"skipped\"):\n raise ScriptException(\"unexpected patch to apply in status %s\" % patch.status)\n\n cur = cnn.cursor()\n cur.execute(\n \"\"\"\n update schema_patch\n set (status, stage, status_date) = (%s, %s, now())\n where name = %s\n \"\"\",\n (status, stage, name),\n )\n else:\n cur = cnn.cursor()\n cur.execute(\n \"\"\"\n insert into schema_patch (name, status, stage, status_date)\n values (%s, %s, %s, now())\"\"\",\n (name, status, stage),\n )\n\n\ndef verify_transaction(filename):\n \"\"\"Make sure that the script contains a BEGIN\n\n We cannot run psql in single transaction mode or it becomes impossible to\n run certain operations.\n\n Make sure a BEGIN is used \"for real\", but the patch may span outside the\n single transaction if needed.\n \"\"\"\n with open(filename) as f:\n script = f.read()\n\n if not re.search(r\"\\bbegin\\b\", script, re.I):\n raise ScriptException(\"'BEGIN' not found in the patch %s\" % filename)\n if not re.search(r\"\\bcommit\\b\", script, re.I):\n raise ScriptException(\"'COMMIT' not found in the patch %s\" % filename)\n\n\ndef confirm(prompt):\n if opt.yes:\n return\n\n while 1:\n logger.info(\"%s [Y/n]\" % prompt)\n ans = input()\n ans = (ans or \"y\")[0].lower()\n if ans == \"n\":\n raise UserInterrupt\n if ans == \"y\":\n break\n\n\nSKIP = object()\n\n\ndef confirm_patch(filename, _all=[], _warned=[]):\n if opt.yes or _all:\n return True\n\n while 1:\n logger.info(\"Do you want to apply '%s'? \" \"(Y)es, (n)o, (v)iew, (s)kip forever, (a)ll, (q)uit\" % filename)\n ans = input()\n ans = (ans or \"y\")[0].lower()\n if ans == \"q\":\n raise UserInterrupt\n if ans == \"n\":\n logger.warning(\"skipping patch '%s'\", filename)\n if not _warned:\n logger.warning(\"following patches may fail to apply\")\n _warned.append(True)\n return False\n if ans == \"v\":\n print(\"Content of the patch '%s':\" % filename, file=sys.stderr)\n with open(filename) as f:\n print(f.read(), file=sys.stderr)\n if ans == \"y\":\n return True\n if ans == \"s\":\n return SKIP\n if ans == \"a\":\n _all.append(True)\n return True\n\n\ndef confirm_script(filename):\n if opt.yes:\n return True\n\n while 1:\n logger.info(\"Do you want to run the script '%s'? (Y)es, (n)o, (v)iew, (q)uit\" % filename)\n ans = input()\n ans = (ans or \"y\")[0].lower()\n if ans == \"q\":\n raise UserInterrupt\n if ans == \"n\":\n logger.warning(\"skipping script '%s'\", filename)\n return False\n if ans == \"v\":\n print(\"Content of the script '%s':\" % filename, file=sys.stderr)\n with open(filename) as f:\n print(f.read(), file=sys.stderr)\n if ans == \"y\":\n return True\n\n\nif __name__ == \"__main__\":\n try:\n sys.exit(main())\n\n except UserInterrupt:\n logger.info(\"user interrupt\")\n sys.exit(1)\n\n except ScriptException as e:\n logger.error(\"%s\", e)\n sys.exit(1)\n\n except Exception as e:\n logger.exception(\"Unexpected error: %s - %s\", e.__class__.__name__, e)\n sys.exit(1)\n\n except KeyboardInterrupt:\n logger.info(\"user interrupt\")\n sys.exit(1)\n", "id": "1517693", "language": "Python", "matching_score": 3.5898211002349854, "max_stars_count": 3, "path": "scripts/patch_db.py" }, { "content": "#!/usr/bin/env python\nr\"\"\"Import a gnomad file.\n\nRead a resource (file, url) and print on stdout a stream of data suitable\nfor COPY. We'll see later what to do with it...\n\nExample usage:\n\n import_gnomad.py -v https://example.com/gnomad.genomes.r3.0.sites.chr22.vcf.bgz \\\n | psql -c \"copy gnomad.annotation_v3 from stdin\" \\\n \"host=$(dchost db) dbname=phenopolis_db user=phenopolis_api\"\n\"\"\"\n\nimport re\nimport os\nimport sys\nimport gzip\nimport logging\nfrom urllib.parse import quote\nfrom urllib.request import urlopen\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\n\nlogger = logging.getLogger()\nlogging.basicConfig(level=logging.DEBUG, format=\"%(asctime)s %(levelname)s %(message)s\")\n\n\nclass ScriptError(Exception):\n \"\"\"Controlled exception raised by the script.\"\"\"\n\n\nclass VCFTransform:\n \"\"\"\n Transform a stream of VCF data into data suitable for PostgreSQL COPY.\n \"\"\"\n\n def __init__(self, fields):\n self.fields = fields\n self._field_regexps = [re.compile(rf\"\\b{re.escape(f)}=([^;]*)\".encode(\"ascii\")) for f in fields]\n self.convert_line = self.convert_line_start\n self.headers = {} # map title -> col idx\n\n def convert_line_start(self, line):\n if line.startswith(b\"##INFO\"):\n self.parse_info(line)\n elif line.startswith(b\"##\"):\n pass\n elif line.startswith(b\"#CHROM\"):\n self.parse_header(line)\n self.convert_line = self.convert_line_data\n return None\n\n def parse_info(self, line):\n # TODO, in case we need other types than int and float\n pass\n\n CHROM = 0\n POS = 1\n REF = 3\n ALT = 4\n INFO = 7\n\n def parse_header(self, line):\n headers = line.decode(\"ascii\").lstrip(\"#\").rstrip().split(\"\\t\")\n self.headers = {col: i for i, col in enumerate(headers)}\n\n # If these fail the converter must become more generic\n assert self.headers[\"CHROM\"] == self.CHROM\n assert self.headers[\"POS\"] == self.POS\n assert self.headers[\"REF\"] == self.REF\n assert self.headers[\"ALT\"] == self.ALT\n assert self.headers[\"INFO\"] == self.INFO\n\n def convert_line_data(self, line):\n fields = line.split(b\"\\t\")\n fields[-1] = fields[-1].rstrip()\n out = [fields[self.CHROM][3:], fields[self.POS], fields[self.REF], fields[self.ALT]]\n out.extend(self.parse_info_field(fields[self.INFO]))\n return b\"\\t\".join(out) + b\"\\n\"\n\n def parse_info_field(self, data):\n rv = []\n for rex in self._field_regexps:\n m = rex.search(data)\n rv.append(m.group(1) if m is not None else b\"\\\\N\")\n return rv\n\n\ndef main():\n opt = parse_cmdline()\n logger.setLevel(opt.loglevel)\n url = as_url(opt.file)\n # TODO: make it configurable\n tx = VCFTransform(fields=[\"AC\", \"AF\"])\n logger.info(\"reading from %s\", url)\n with urlopen(url) as f:\n if os.path.splitext(url)[-1] in (\".bgz\", \".gz\"):\n f = gzip.GzipFile(fileobj=f)\n nr = nw = 0\n for line in f:\n nr += 1\n if nr % 100_000 == 0:\n logger.debug(\"%s lines read\", nr)\n line = tx.convert_line(line)\n if line is not None:\n nw += 1\n sys.stdout.buffer.write(line)\n\n logger.info(\"%s lines written\", nw)\n\n\ndef as_url(name):\n if \"://\" in name:\n # looks like an url to me\n return name\n\n # if it's a file, does it even exist?\n if not os.path.isfile(name):\n raise ScriptError(f\"not a valid file: {name}\")\n\n name = quote(os.path.abspath(name))\n return f\"file://{name}\"\n\n\ndef parse_cmdline():\n parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument(\"file\", metavar=\"FILE_OR_URL\", help=\"the resource to parse\")\n\n g = parser.add_mutually_exclusive_group()\n g.add_argument(\n \"-q\",\n \"--quiet\",\n help=\"talk less\",\n dest=\"loglevel\",\n action=\"store_const\",\n const=logging.WARN,\n default=logging.INFO,\n )\n g.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"talk more\",\n dest=\"loglevel\",\n action=\"store_const\",\n const=logging.DEBUG,\n default=logging.INFO,\n )\n\n opt = parser.parse_args()\n\n return opt\n\n\nif __name__ == \"__main__\":\n try:\n sys.exit(main())\n\n except ScriptError as e:\n logger.error(\"%s\", e)\n sys.exit(1)\n\n except Exception:\n logger.exception(\"unexpected error\")\n sys.exit(1)\n\n except KeyboardInterrupt:\n logger.info(\"user interrupt\")\n sys.exit(1)\n", "id": "11855638", "language": "Python", "matching_score": 2.898437261581421, "max_stars_count": 24, "path": "scripts/import_gnomad.py" }, { "content": "#!/usr/bin/env python\n\"\"\"Import an Human Phenotype Ontology data .obo file into the database\n\nSee https://hpo.jax.org/app/download/ontology\n\"\"\"\n\nimport logging\nimport re\nimport sys\n\nimport obonet\nimport psycopg2\n\nlogger = logging.getLogger()\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)s %(levelname)s %(message)s\")\n\nDEFAULT_URL = \"https://raw.githubusercontent.com/obophenotype/human-phenotype-ontology/master/hp.obo\"\n\n\ndef main():\n opt = parse_cmdline()\n\n logger.info(\"reading %s\", opt.input)\n net = obonet.read_obo(opt.input)\n\n logger.info(\"connecting to %s\", opt.dsn)\n with psycopg2.connect(opt.dsn) as conn:\n cur = conn.cursor()\n\n logger.info(\"importing terms\")\n for k, node in net.nodes.items():\n import_term(cur, k, node)\n\n logger.info(\"importing other details\")\n for k, node in net.nodes.items():\n import_is_a(cur, k, node)\n import_xref(cur, k, node)\n import_synonym(cur, k, node)\n import_alt(cur, k, node)\n\n logger.info(\"refreshing path matview\")\n cur.execute(\"refresh materialized view hpo.is_a_path\")\n\n\ndef import_term(cur, k, node):\n descr = node.get(\"def\")\n if descr is not None:\n descr = dequote(descr)\n\n args = [id_to_int(k), k, node[\"name\"], descr, node.get(\"comment\")]\n\n cur.execute(\n \"\"\"\ninsert into hpo.term (id, hpo_id, name, description, comment)\nvalues (%s, %s, %s, %s, %s)\non conflict on constraint term_pkey do update set\n name = excluded.name,\n description = excluded.description,\n comment = excluded.comment\n\"\"\",\n args,\n )\n\n\ndef import_is_a(cur, k, node):\n isas = node.get(\"is_a\")\n if not isas:\n return\n\n for isa in isas:\n if \"!\" in isa:\n isa = isa.split(\"!\")[0].strip()\n\n cur.execute(\n \"\"\"\ninsert into hpo.is_a (term_id, is_a_id) values (%s, %s)\non conflict on constraint is_a_pkey do nothing\n\"\"\",\n [id_to_int(k), id_to_int(isa)],\n )\n\n\ndef import_xref(cur, k, node):\n xrefs = node.get(\"xref\")\n if not xrefs:\n return\n\n for xref in xrefs:\n if \" \" in xref:\n xref, descr = xref.split(\" \", 1)\n descr = dequote(descr)\n else:\n descr = None\n\n cur.execute(\n \"\"\"\ninsert into hpo.xref (term_id, xref, description) values (%s, %s, %s)\non conflict on constraint xref_pkey do update\n set description = excluded.description\n\"\"\",\n [id_to_int(k), xref, descr],\n )\n\n\ndef import_synonym(cur, k, node):\n syns = node.get(\"synonym\")\n if not syns:\n return\n\n for syn in syns:\n syn = dequote(syn)\n\n cur.execute(\n \"\"\"\ninsert into hpo.synonym (term_id, description) values (%s, %s)\non conflict on constraint synonym_term_id_description_key do nothing\n\"\"\",\n [id_to_int(k), syn],\n )\n\n\ndef import_alt(cur, k, node):\n alts = node.get(\"alt_id\")\n if not alts:\n return\n\n for alt in alts:\n cur.execute(\n \"\"\"\ninsert into hpo.alt (id, alt_id, term_id) values (%s, %s, %s)\non conflict on constraint alt_pkey do nothing\n\"\"\",\n [id_to_int(alt), alt, id_to_int(k)],\n )\n\n\ndef id_to_int(s):\n assert s.startswith(\"HP:\")\n return int(s.split(\":\")[1])\n\n\ndef dequote(s):\n assert s.startswith('\"')\n rv = re.sub(r'^\"([^\"]+)\".*$', r\"\\1\", s)\n assert '\"' not in rv\n return rv\n\n\ndef parse_cmdline():\n from argparse import ArgumentParser\n\n parser = ArgumentParser(description=__doc__)\n parser.add_argument(\n \"input\", nargs=\"?\", default=DEFAULT_URL, help=\"file name or url to import from [default: %(default)s]\",\n )\n parser.add_argument(\n \"--dsn\", default=\"\", help=\"connection string to import into [default: %(default)r]\",\n )\n\n opt = parser.parse_args()\n\n return opt\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "id": "12013869", "language": "Python", "matching_score": 0.8289655447006226, "max_stars_count": 3, "path": "scripts/import_hpo.py" }, { "content": "\"\"\"\nReceived Uploaded Files\n\"\"\"\nimport os\n\nimport boto3\nfrom botocore.client import Config\nfrom flask import jsonify, request\n\nfrom views import application\nfrom views.auth import requires_admin\nfrom views.exceptions import PhenopolisException\n\nUPLOAD_FOLDER = \"upload\"\n\nS3_KEY = os.getenv(\"VCF_S3_KEY\")\nSECRET_ACCESS_KEY = os.environ.get(\"VCF_S3_SECRET\")\nDOWNLOAD_SIGNED_URL_TIME = 300\n\ns3_client = boto3.client(\n \"s3\",\n aws_access_key_id=S3_KEY,\n aws_secret_access_key=SECRET_ACCESS_KEY,\n config=Config(signature_version=\"s3v4\", region_name=\"eu-west-2\"),\n)\n\n\[email protected](\"/preSignS3URL\", methods=[\"GET\", \"POST\"])\n@requires_admin\ndef presign_S3():\n data = request.get_json()\n filename = data.get(\"filename\")\n prefix = data.get(\"prefix\")\n\n try:\n response = s3_client.generate_presigned_post(\n Bucket=\"phenopolis-website-uploads\", Key=prefix + \"/\" + filename, ExpiresIn=3600\n )\n except PhenopolisException as e:\n application.logger.error(str(e))\n return None\n\n return jsonify(response), 200\n\n\[email protected](\"/files/<individual_id>\", methods=[\"GET\", \"POST\"])\n@requires_admin\ndef getUploadedFile(individual_id):\n try:\n response = s3_client.list_objects_v2(Bucket=\"phenopolis-website-uploads\", Prefix=individual_id, MaxKeys=100)\n\n if response[\"KeyCount\"] == 0:\n return jsonify(response), 404\n except PhenopolisException as e:\n application.logger.error(str(e))\n return None\n message = \"get Uploaded File Success\"\n return jsonify(message=message, response=response), 200\n\n\[email protected](\"/files\", methods=[\"DELETE\"])\n@requires_admin\ndef delete_file():\n data = request.get_json()\n fileKey = data.get(\"fileKey\")\n response = s3_client.delete_object(Bucket=\"phenopolis-website-uploads\", Key=fileKey)\n\n return jsonify(message=\"Delete File Success\", response=response), 200\n\n\[email protected](\"/file_download\", methods=[\"POST\"])\n@requires_admin\ndef download_file():\n data = request.get_json()\n fileKey = data.get(\"fileKey\")\n response = s3_client.generate_presigned_url(\n \"get_object\",\n Params={\"Bucket\": \"phenopolis-website-uploads\", \"Key\": fileKey},\n ExpiresIn=DOWNLOAD_SIGNED_URL_TIME,\n )\n return jsonify(filename=fileKey, response=response), 200\n", "id": "12226126", "language": "Python", "matching_score": 2.0529286861419678, "max_stars_count": 3, "path": "views/upload.py" }, { "content": "def test_presign_S3(_admin_client):\n patientID = \"PH0001\"\n payload = {\n \"prefix\": patientID,\n \"filename\": patientID + \"_\" + \"a_file.vcf\",\n \"contentType\": \"multipart/form-data\",\n }\n resp = _admin_client.post(\"/preSignS3URL\", json=payload, content_type=\"application/json\")\n assert resp.status_code == 200\n assert \"x-amz-credential\" in str(resp.json)\n assert resp.json.get(\"fields\").get(\"key\") == f\"{patientID}/{payload['filename']}\"\n", "id": "12364855", "language": "Python", "matching_score": 0.9253958463668823, "max_stars_count": 24, "path": "tests/test_upload.py" }, { "content": "from flask import request\n\nfrom views import application\nfrom views.exceptions import PhenopolisException\n\n\ndef _get_json_payload(clazz=None):\n if not request.is_json:\n raise PhenopolisException(\"Only mimetype application/json is accepted\", 400)\n payload = request.get_json(silent=True)\n if not payload:\n raise PhenopolisException(\"Empty payload or wrong formatting\", 400)\n if not isinstance(payload, list) and not isinstance(payload, dict):\n raise PhenopolisException(f\"Payload of unexpected type: {type(payload)}\", 400)\n application.logger.debug(payload)\n if clazz is not None:\n return _parse_payload(payload, clazz)\n return payload\n\n\ndef _parse_payload(payload, model_class):\n if isinstance(payload, dict):\n objects = [model_class(**payload)]\n elif isinstance(payload, list):\n objects = [model_class(**p) for p in payload]\n else:\n raise PhenopolisException(f\"Payload of unexpected type: {type(payload)}\", 400)\n return objects\n", "id": "7421644", "language": "Python", "matching_score": 0.5583513975143433, "max_stars_count": 24, "path": "views/helpers.py" }, { "content": "# Uncomment to run this module directly. TODO comment out.\n#import sys, os\n#sys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n# End of uncomment.\n\nimport unittest\nimport subprocess\nimport runserver\nfrom flask import Flask, current_app, jsonify\nfrom views import neo4j_driver\nfrom views import my_patients\nfrom views import session\nimport helper\nimport json\n\n\n\nclass MyPatientsPageTestCase(unittest.TestCase):\n\n def setUp(self):\n runserver.app.config['TESTING'] = True\n runserver.app.config['DB_NAME_USERS'] = 'test_users'\n self.app = runserver.app.test_client()\n #helper.create_neo4j_demo_user()\n helper.login(self.app)\n helper.my_patients_neo4j_data()\n\n\n def tearDown(self):\n self.app.get('/logout', follow_redirects=True)\n\n\n def test_my_patients_page(self): \n page = self.app.get('/my_patients', follow_redirects=True)\n assert page.status_code == 200 # NB this test doesn't wait for the data to load.\n\n \n def test_my_patients_functionality(self): \n app = Flask(__name__)\n with app.test_request_context():\n records = my_patients.get_individuals('demo')\n # Here we create the Flask Response object, containing json, \n # that the /my_patients page receives. We then test \n # that the expected data is available.\n data=jsonify(result=records)\n assert data.status == '200 OK'\n parsed_json = json.loads(data.data)\n # First person.\n i=0\n assert parsed_json['result'][i]['individual'] == 'person2'\n assert parsed_json['result'][i]['gender'] == 'F'\n for pheno in parsed_json['result'][i]['phenotypes'] :\n assert (pheno['name'] == 'Abnormality of the retina' or \n pheno['name'] == 'Visual impairment' or \n pheno['name'] == 'Macular dystrophy') \n assert parsed_json['result'][i]['phenotypeScore'] == 0.69\n assert parsed_json['result'][i]['hom_count'] == 1\n assert parsed_json['result'][i]['het_count'] == 2\n for gene in parsed_json['result'][i]['genes'] :\n assert gene == 'RPGR' or gene == 'TTLL5' or gene == 'DRAM2' or gene == 'TRIM32'\n # Next person.\n i=1\n assert parsed_json['result'][i]['individual'] == 'person1'\n assert parsed_json['result'][i]['gender'] == 'M'\n assert parsed_json['result'][i]['phenotypes'][0]['name'] == 'Visual impairment'\n assert parsed_json['result'][i]['phenotypeScore'] == 0.69\n assert parsed_json['result'][i]['hom_count'] == 1\n assert parsed_json['result'][i]['het_count'] == 1\n assert parsed_json['result'][i]['genes'][0] == 'TTLL5'\n\n \n\nif __name__ == '__main__':\n unittest.main()\n\n", "id": "10900349", "language": "Python", "matching_score": 4.324718952178955, "max_stars_count": 24, "path": "tests/test_my_patients.py" }, { "content": "# Uncomment to run this module directly. TODO comment out.\n#import sys, os\n#sys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n# End of uncomment.\n\nimport unittest\nimport subprocess\nimport runserver\nfrom flask import Flask, current_app\nfrom views import neo4j_driver\nfrom views import individual\nimport helper\nimport json\n\n\n\n\nclass IndividualPageTestCase(unittest.TestCase):\n\n def setUp(self):\n runserver.app.config['TESTING'] = True\n runserver.app.config['DB_NAME_USERS'] = 'test_users'\n self.app = runserver.app.test_client()\n helper.login(self.app)\n helper.my_patients_neo4j_data()\n\n\n def tearDown(self):\n self.app.get('/logout', follow_redirects=True)\n\n\n def test_venn_json_page(self): \n page = self.app.get('/venn_json/person2', follow_redirects=True)\n assert page.status_code == 200 \n parsed_json = json.loads(page.data)\n # Check some key points\n i=4\n assert parsed_json['result'][i]['key'][0] == 'Abnormality of the retina'\n assert parsed_json['result'][i]['key'][1] == 'Visual impairment'\n assert parsed_json['result'][i]['value'][0] == 'TTLL5'\n assert parsed_json['result'][i]['value'][1] == 'DRAM2'\n i=5\n assert parsed_json['result'][i]['key'][0] == 'Macular dystrophy'\n assert parsed_json['result'][i]['key'][1] == 'Visual impairment'\n assert parsed_json['result'][i]['value'][0] == 'TRIM32'\n i=6\n assert parsed_json['result'][i]['key'][0] == 'Abnormality of the retina'\n assert parsed_json['result'][i]['key'][1] == 'Macular dystrophy'\n assert parsed_json['result'][i]['key'][2] == 'Visual impairment'\n assert not parsed_json['result'][i]['value']\n \nif __name__ == '__main__':\n unittest.main()\n\n", "id": "589194", "language": "Python", "matching_score": 0.4483530819416046, "max_stars_count": 24, "path": "tests/test_individual.py" }, { "content": "### all the mongodb reading/writing code lives here now\n\n\ndef load_db():\n \"\"\"\n Load the database\n \"\"\"\n # Initialize database\n # Don't need to explicitly create tables with mongo, just indices\n confirm = raw_input('This will drop the database and reload. Are you sure you want to continue? [no] ')\n if not confirm.startswith('y'):\n print('Exiting...')\n sys.exit(1)\n all_procs = []\n for load_function in [load_variants_file, load_dbsnp_file, load_base_coverage, load_gene_models, load_constraint_information]:\n procs = load_function()\n all_procs.extend(procs)\n print(\"Started %s processes to run %s\" % (len(procs), load_function.__name__))\n\n [p.join() for p in all_procs]\n print('Done! Loading MNPs...')\n load_mnps()\n print('Done! Creating cache...')\n create_cache()\n print('Done!')\n\n\ndef load_base_coverage():\n \"\"\" \"\"\"\n def load_coverage(coverage_files, i, n, db):\n coverage_generator = parse_tabix_file_subset(coverage_files, i, n, get_base_coverage_from_file)\n try:\n db.base_coverage.insert(coverage_generator, w=0)\n except pymongo.errors.InvalidOperation, e:\n print(e)\n # handle error when coverage_generator is empty\n pass \n db = get_db()\n db.base_coverage.drop()\n print(\"Dropped db.base_coverage\")\n # load coverage first; variant info will depend on coverage\n db.base_coverage.ensure_index('xpos')\n procs = []\n coverage_files = app.config['BASE_COVERAGE_FILES']\n num_procs = app.config['LOAD_DB_PARALLEL_PROCESSES']\n random.shuffle(app.config['BASE_COVERAGE_FILES'])\n for i in range(num_procs):\n p = Process(target=load_coverage, args=(coverage_files, i, num_procs, db))\n p.start()\n procs.append(p)\n return procs\n #print 'Done loading coverage. Took %s seconds' % int(time.time() - start_time)\n\n\ndef load_variants_file():\n def load_variants(sites_file, i, n, db):\n for f in sites_file:\n variants_generator = parse_tabix_file_subset([f], i, n, get_variants_from_sites_vcf)\n try:\n db.variants.insert(variants_generator, w=0)\n except pymongo.errors.InvalidOperation:\n pass # handle error when variant_generator is empty\n db = get_db()\n db.variants.drop()\n print(\"Dropped db.variants\")\n # grab variants from sites VCF\n db.variants.ensure_index('xpos')\n db.variants.ensure_index('xstart')\n db.variants.ensure_index('xstop')\n db.variants.ensure_index('rsid')\n db.variants.ensure_index('genes')\n db.variants.ensure_index('transcripts')\n sites_vcfs = app.config['SITES_VCFS']\n print(sites_vcfs)\n #if len(sites_vcfs) > 1: raise Exception(\"More than one sites vcf file found: %s\" % sites_vcfs)\n procs = []\n num_procs = app.config['LOAD_DB_PARALLEL_PROCESSES']\n #pdb.set_trace()\n for i in range(num_procs):\n p = Process(target=load_variants, args=(sites_vcfs, i, num_procs, db))\n p.start()\n procs.append(p)\n return procs\n\n #print 'Done loading variants. Took %s seconds' % int(time.time() - start_time)\n\n\ndef load_constraint_information():\n db = get_db()\n\n db.constraint.drop()\n print 'Dropped db.constraint.'\n\n start_time = time.time()\n\n with gzip.open(app.config['CONSTRAINT_FILE']) as constraint_file:\n for transcript in get_constraint_information(constraint_file):\n db.constraint.insert(transcript, w=0)\n\n db.constraint.ensure_index('transcript')\n print 'Done loading constraint info. Took %s seconds' % int(time.time() - start_time)\n\n\ndef load_mnps():\n db = get_db()\n start_time = time.time()\n\n db.variants.ensure_index('has_mnp')\n print 'Done indexing.'\n while db.variants.find_and_modify({'has_mnp' : True}, {'$unset': {'has_mnp': '', 'mnps': ''}}): pass\n print 'Deleted MNP data.'\n\n with gzip.open(app.config['MNP_FILE']) as mnp_file:\n for mnp in get_mnp_data(mnp_file):\n variant = lookups.get_raw_variant(db, mnp['xpos'], mnp['ref'], mnp['alt'], True)\n db.variants.find_and_modify({'_id': variant['_id']}, {'$set': {'has_mnp': True}, '$push': {'mnps': mnp}}, w=0)\n\n db.variants.ensure_index('has_mnp')\n print 'Done loading MNP info. Took %s seconds' % int(time.time() - start_time)\n\n\ndef load_gene_models():\n db = get_db()\n\n db.genes.drop()\n db.transcripts.drop()\n db.exons.drop()\n print 'Dropped db.genes, db.transcripts, and db.exons.'\n\n start_time = time.time()\n\n canonical_transcripts = {}\n with gzip.open(app.config['CANONICAL_TRANSCRIPT_FILE']) as canonical_transcript_file:\n for gene, transcript in get_canonical_transcripts(canonical_transcript_file):\n canonical_transcripts[gene] = transcript\n\n omim_annotations = {}\n with gzip.open(app.config['OMIM_FILE']) as omim_file:\n for fields in get_omim_associations(omim_file):\n if fields is None:\n continue\n gene, transcript, accession, description = fields\n omim_annotations[gene] = (accession, description)\n\n dbnsfp_info = {}\n with gzip.open(app.config['DBNSFP_FILE']) as dbnsfp_file:\n for dbnsfp_gene in get_dbnsfp_info(dbnsfp_file):\n other_names = [other_name.upper() for other_name in dbnsfp_gene['gene_other_names']]\n dbnsfp_info[dbnsfp_gene['ensembl_gene']] = (dbnsfp_gene['gene_full_name'], other_names)\n\n print 'Done loading metadata. Took %s seconds' % int(time.time() - start_time)\n\n # grab genes from GTF\n start_time = time.time()\n with gzip.open(app.config['GENCODE_GTF']) as gtf_file:\n for gene in get_genes_from_gencode_gtf(gtf_file):\n gene_id = gene['gene_id']\n if gene_id in canonical_transcripts:\n gene['canonical_transcript'] = canonical_transcripts[gene_id]\n if gene_id in omim_annotations:\n gene['omim_accession'] = omim_annotations[gene_id][0]\n gene['omim_description'] = omim_annotations[gene_id][1]\n if gene_id in dbnsfp_info:\n gene['full_gene_name'] = dbnsfp_info[gene_id][0]\n gene['other_names'] = dbnsfp_info[gene_id][1]\n db.genes.insert(gene, w=0)\n\n print 'Done loading genes. Took %s seconds' % int(time.time() - start_time)\n\n start_time = time.time()\n db.genes.ensure_index('gene_id')\n db.genes.ensure_index('gene_name_upper')\n db.genes.ensure_index('gene_name')\n db.genes.ensure_index('other_names')\n db.genes.ensure_index('xstart')\n db.genes.ensure_index('xstop')\n print 'Done indexing gene table. Took %s seconds' % int(time.time() - start_time)\n\n # and now transcripts\n start_time = time.time()\n with gzip.open(app.config['GENCODE_GTF']) as gtf_file:\n db.transcripts.insert((transcript for transcript in get_transcripts_from_gencode_gtf(gtf_file)), w=0)\n print 'Done loading transcripts. Took %s seconds' % int(time.time() - start_time)\n\n start_time = time.time()\n db.transcripts.ensure_index('transcript_id')\n db.transcripts.ensure_index('gene_id')\n print 'Done indexing transcript table. Took %s seconds' % int(time.time() - start_time)\n\n # Building up gene definitions\n start_time = time.time()\n with gzip.open(app.config['GENCODE_GTF']) as gtf_file:\n db.exons.insert((exon for exon in get_exons_from_gencode_gtf(gtf_file)), w=0)\n print 'Done loading exons. Took %s seconds' % int(time.time() - start_time)\n\n start_time = time.time()\n db.exons.ensure_index('exon_id')\n db.exons.ensure_index('transcript_id')\n db.exons.ensure_index('gene_id')\n print 'Done indexing exon table. Took %s seconds' % int(time.time() - start_time)\n\n return []\n\n\ndef load_dbsnp_file():\n db = get_db()\n def load_dbsnp(dbsnp_file, i, n, db):\n if os.path.isfile(dbsnp_file + \".tbi\"):\n dbsnp_record_generator = parse_tabix_file_subset([dbsnp_file], i, n, get_snp_from_dbsnp_file)\n try:\n db.dbsnp.insert(dbsnp_record_generator, w=0)\n except pymongo.errors.InvalidOperation:\n pass # handle error when coverage_generator is empty\n\n else:\n with gzip.open(dbsnp_file) as f:\n db.dbsnp.insert((snp for snp in get_snp_from_dbsnp_file(f)), w=0)\n db.dbsnp.drop()\n db.dbsnp.ensure_index('rsid')\n db.dbsnp.ensure_index('xpos')\n start_time = time.time()\n dbsnp_file = app.config['DBSNP_FILE']\n\n print \"Loading dbsnp from %s\" % dbsnp_file\n if os.path.isfile(dbsnp_file + \".tbi\"):\n num_procs = app.config['LOAD_DB_PARALLEL_PROCESSES']\n else:\n # see if non-tabixed .gz version exists\n if os.path.isfile(dbsnp_file):\n print((\"WARNING: %(dbsnp_file)s.tbi index file not found. Will use single thread to load dbsnp.\"\n \"To create a tabix-indexed dbsnp file based on UCSC dbsnp, do: \\n\"\n \" wget http://hgdownload.soe.ucsc.edu/goldenPath/hg19/database/snp141.txt.gz \\n\"\n \" gzcat snp141.txt.gz | cut -f 1-5 | bgzip -c > snp141.txt.bgz \\n\"\n \" tabix -0 -s 2 -b 3 -e 4 snp141.txt.bgz\") % locals())\n num_procs = 1\n else:\n raise Exception(\"dbsnp file %s(dbsnp_file)s not found.\" % locals())\n\n procs = []\n for i in range(num_procs):\n p = Process(target=load_dbsnp, args=(dbsnp_file, i, num_procs, db))\n p.start()\n procs.append(p)\n\n return procs\n #print 'Done loading dbSNP. Took %s seconds' % int(time.time() - start_time)\n #start_time = time.time()\n #db.dbsnp.ensure_index('rsid')\n #print 'Done indexing dbSNP table. Took %s seconds' % int(time.time() - start_time)\n\n\n\n", "id": "8383107", "language": "Python", "matching_score": 2.128678560256958, "max_stars_count": 24, "path": "mongodb/__init__.py" }, { "content": "#!/usr/bin/env python3\nfrom typing import List, Tuple\n\nfrom pybiomart import Server\nimport pandas as pd\nimport re\nimport logging\nimport time\n\nSYNONYM = \"external_synonym\"\nTRANSCRIPT_VERSION = \"transcript_version\"\nGENE_GC_CONTENT = \"percentage_gene_gc_content\"\nHGNC_SYMBOL = \"hgnc_symbol\"\nHGNC_ID = \"hgnc_id\"\nGENE_BIOTYPE = \"gene_biotype\"\nBAND = \"band\"\nEND_POSITION = \"end_position\"\nSTART_POSITION = \"start_position\"\nDESCRIPTION = \"description\"\nPEPTIDE_VERSION = \"peptide_version\"\nVERSION = \"version\"\nTRANSCRIPT_LENGTH = \"transcript_length\"\nTRANSCRIPTION_START_SITE = \"transcription_start_site\"\nEND_PHASE = \"end_phase\"\nPHASE = \"phase\"\nRANK = \"rank\"\nCONSTITUTIVE = \"is_constitutive\"\nEXON_CHROM_END = \"exon_chrom_end\"\nEXON_CHROM_START = \"exon_chrom_start\"\nENSEMBL_EXON_ID = \"ensembl_exon_id\"\nCANONICAL = \"canonical\"\nASSEMBLY = \"assembly\"\nUNIPROTSWISSPROT = \"uniprotswissprot\"\nUNIPARC = \"uniparc\"\nTRANSCRIPT_BIOTYPE = \"transcript_biotype\"\nCDS_LENGTH = \"cds_length\"\nTRANSCRIPT_END = \"transcript_end\"\nTRANSCRIPT_START = \"transcript_start\"\nSTRAND = \"strand\"\nCHROMOSOME_NAME = \"chromosome_name\"\nENSEMBL_PEPTIDE_ID = \"ensembl_peptide_id\"\nENSEMBL_TRANSCRIPT_ID = \"ensembl_transcript_id\"\nENSEMBL_GENE_ID = \"ensembl_gene_id\"\n\n\nclass BiomartReader:\n\n BIOMART_SERVER_URL_GRCH38 = \"http://www.ensembl.org\" # use GRCh38\n DATASET = \"hsapiens_gene_ensembl\"\n MART = \"ENSEMBL_MART_ENSEMBL\"\n BIOMART_SERVER_URL_GRCH37 = \"http://grch37.ensembl.org\" # use GRCh37\n\n def __init__(self, filters):\n self.filters = filters\n server_grch37 = Server(host=self.BIOMART_SERVER_URL_GRCH37)\n self.dataset_grch37 = server_grch37.marts[self.MART].datasets[self.DATASET]\n server_grch38 = Server(host=self.BIOMART_SERVER_URL_GRCH38)\n self.dataset_grch38 = server_grch38.marts[self.MART].datasets[self.DATASET]\n\n def get_genes(self) -> pd.DataFrame:\n genes_attributes = [\n ENSEMBL_GENE_ID,\n VERSION,\n DESCRIPTION,\n CHROMOSOME_NAME,\n START_POSITION,\n END_POSITION,\n STRAND,\n BAND,\n GENE_BIOTYPE,\n HGNC_ID,\n HGNC_SYMBOL,\n GENE_GC_CONTENT,\n ]\n\n genes_grch37, genes_grch38 = self._get_attributes(genes_attributes)\n\n # flags the latest version genes to avoid repetitions\n genes_grch37[\"latest\"] = self._add_latest_flag(df=genes_grch37, id_field=ENSEMBL_GENE_ID, version_field=VERSION)\n genes_grch38[\"latest\"] = self._add_latest_flag(df=genes_grch38, id_field=ENSEMBL_GENE_ID, version_field=VERSION)\n\n genes = pd.concat([genes_grch37, genes_grch38])\n genes.reset_index(drop=True, inplace=True)\n\n # filter out non latest genes\n genes = genes[genes.latest]\n genes.drop(\"latest\", axis=1, inplace=True)\n\n # remove duplicates\n genes.drop_duplicates([ENSEMBL_GENE_ID, ASSEMBLY], inplace=True)\n\n # edit description to remove information between brackets\n genes[DESCRIPTION] = genes[DESCRIPTION].transform(\n lambda x: re.sub(r\"\\[.*\\]\", \"\", x).strip() if isinstance(x, str) else x\n )\n\n # some column renaming\n genes.rename(\n {GENE_BIOTYPE: \"biotype\", START_POSITION: \"start\", END_POSITION: \"end\", CHROMOSOME_NAME: \"chromosome\"},\n axis=1,\n inplace=True,\n )\n\n self._genes_sanity_checks(genes)\n\n return genes\n\n def get_transcripts(self):\n transcripts_attributes = [\n ENSEMBL_GENE_ID,\n ENSEMBL_TRANSCRIPT_ID,\n TRANSCRIPT_VERSION,\n ENSEMBL_PEPTIDE_ID,\n PEPTIDE_VERSION,\n CHROMOSOME_NAME,\n TRANSCRIPT_START,\n TRANSCRIPT_END,\n TRANSCRIPTION_START_SITE,\n STRAND,\n TRANSCRIPT_LENGTH,\n CDS_LENGTH,\n TRANSCRIPT_BIOTYPE,\n UNIPARC,\n ]\n\n transcripts_grch37, transcripts_grch38 = self._get_attributes(transcripts_attributes)\n\n # adds the canonical flag for both assemblies independently\n transcripts_grch37[CANONICAL] = self._add_canonical_transcript_flag(transcripts_grch37)\n transcripts_grch38[CANONICAL] = self._add_canonical_transcript_flag(transcripts_grch38)\n\n # flags the latest version genes to avoid repetitions\n transcripts_grch37[\"latest\"] = self._add_latest_flag(\n df=transcripts_grch37, id_field=ENSEMBL_TRANSCRIPT_ID, version_field=TRANSCRIPT_VERSION\n )\n transcripts_grch38[\"latest\"] = self._add_latest_flag(\n df=transcripts_grch38, id_field=ENSEMBL_TRANSCRIPT_ID, version_field=TRANSCRIPT_VERSION\n )\n\n transcripts = pd.concat([transcripts_grch37, transcripts_grch38])\n transcripts.reset_index(drop=True, inplace=True)\n\n # filter out non latest genes\n transcripts = transcripts[transcripts.latest]\n transcripts.drop(\"latest\", axis=1, inplace=True)\n\n # some column renaming\n transcripts.rename(\n {\n TRANSCRIPT_VERSION: VERSION,\n TRANSCRIPT_BIOTYPE: \"biotype\",\n TRANSCRIPT_START: \"start\",\n TRANSCRIPT_END: \"end\",\n CHROMOSOME_NAME: \"chromosome\",\n },\n axis=1,\n inplace=True,\n )\n\n self._transcripts_sanity_checks(transcripts)\n\n return transcripts\n\n def get_exons(self):\n exons_attributes = [\n ENSEMBL_GENE_ID,\n ENSEMBL_TRANSCRIPT_ID,\n ENSEMBL_EXON_ID,\n CHROMOSOME_NAME,\n EXON_CHROM_START,\n EXON_CHROM_END,\n CONSTITUTIVE,\n RANK,\n PHASE,\n END_PHASE,\n ]\n\n exons_grch37, exons_grch38 = self._get_attributes(exons_attributes)\n exons = pd.concat([exons_grch37, exons_grch38])\n exons.reset_index(drop=True, inplace=True)\n\n # some column renaming\n exons.rename(\n {EXON_CHROM_START: \"start\", EXON_CHROM_END: \"end\", CHROMOSOME_NAME: \"chromosome\"}, axis=1, inplace=True\n )\n\n self._exons_sanity_checks(exons)\n\n return exons\n\n def get_gene_synonyms(self, genes: pd.DataFrame) -> pd.DataFrame:\n synonym_attributes = [ENSEMBL_GENE_ID, SYNONYM]\n synonyms_grch37, synonyms_grch38 = self._get_attributes(synonym_attributes)\n synonyms = pd.concat([synonyms_grch37, synonyms_grch38])\n # map synonyms to internal gene ids\n synonyms = genes[[\"identifier\", ENSEMBL_GENE_ID, ASSEMBLY]].join(\n synonyms[[SYNONYM, ENSEMBL_GENE_ID, ASSEMBLY]].set_index([ENSEMBL_GENE_ID, ASSEMBLY]),\n on=[ENSEMBL_GENE_ID, ASSEMBLY],\n )[[\"identifier\", SYNONYM]]\n # remove empty synonyms\n synonyms.rename({\"identifier\": \"gene\"}, axis=1, inplace=True)\n synonyms.dropna(inplace=True)\n return synonyms\n\n def get_uniprot(self, trancripts: pd.DataFrame) -> pd.DataFrame:\n uniprot_attributes = [ENSEMBL_TRANSCRIPT_ID, UNIPROTSWISSPROT]\n uniprot_grch37, uniprot_grch38 = self._get_attributes(uniprot_attributes)\n uniprot = pd.concat([uniprot_grch37, uniprot_grch38])\n # map synonyms to internal gene ids\n uniprot = trancripts[[\"identifier\", ENSEMBL_TRANSCRIPT_ID, ASSEMBLY]].join(\n uniprot[[UNIPROTSWISSPROT, ENSEMBL_TRANSCRIPT_ID, ASSEMBLY]].set_index([ENSEMBL_TRANSCRIPT_ID, ASSEMBLY]),\n on=[ENSEMBL_TRANSCRIPT_ID, ASSEMBLY],\n )[[\"identifier\", UNIPROTSWISSPROT]]\n # remove empty synonyms\n uniprot.rename({\"identifier\": \"transcript\"}, axis=1, inplace=True)\n uniprot.dropna(inplace=True)\n return uniprot\n\n def _add_canonical_transcript_flag(self, transcripts: pd.DataFrame) -> pd.Series:\n \"\"\"\n Adds a column indicating whether the transcript is canonical\n If more than one transcript chooses the one with the longest CDS trying to replicate the\n definition here http://www.ensembl.org/Help/Glossary\n \"\"\"\n canonical_transcripts = transcripts.groupby(ENSEMBL_GENE_ID)[[ENSEMBL_TRANSCRIPT_ID, CDS_LENGTH]].max()\n canonical_transcripts.reset_index(inplace=True)\n return transcripts[ENSEMBL_TRANSCRIPT_ID].isin(canonical_transcripts[ENSEMBL_TRANSCRIPT_ID])\n\n def _add_latest_flag(self, df: pd.DataFrame, id_field: str, version_field: str) -> pd.Series:\n \"\"\"\n Adds a column indicating whether the gene version is the latest in this table\n \"\"\"\n id_with_version = \"id_with_version\"\n df[id_with_version] = df[[id_field, version_field]].apply(lambda x: \"{}.{}\".format(x[0], x[1]), axis=1)\n latest = df.groupby(id_field)[[id_with_version, version_field]].max()\n latest.reset_index(inplace=True)\n is_latest = df[id_with_version].isin(latest[id_with_version])\n df.drop(id_with_version, axis=1, inplace=True)\n return is_latest\n\n def _get_attributes(self, attributes: List) -> Tuple[pd.DataFrame, pd.DataFrame]:\n # reads the transcripts from biomart\n transcripts_grch37 = self.dataset_grch37.query(attributes=attributes, filters=self.filters, use_attr_names=True)\n transcripts_grch38 = self.dataset_grch38.query(attributes=attributes, filters=self.filters, use_attr_names=True)\n # sets the assembly for each\n transcripts_grch37[ASSEMBLY] = \"GRCh37\"\n transcripts_grch38[ASSEMBLY] = \"GRCh38\"\n return transcripts_grch37, transcripts_grch38\n\n def _genes_sanity_checks(self, genes: pd.DataFrame) -> None:\n unique_genes = (\n genes[[ENSEMBL_GENE_ID, ASSEMBLY]].apply(lambda x: \"{}.{}\".format(x[0], x[1]), axis=1).value_counts()\n )\n assert unique_genes[unique_genes > 1].shape[0] == 0, \"Found non unique genes: {}\".format(\n unique_genes[unique_genes > 1]\n )\n assert genes.ensembl_gene_id.isna().sum() == 0, \"Found entry without ensembl id\"\n assert genes.assembly.isna().sum() == 0, \"Found entry without assembly\"\n assert genes.chromosome.isna().sum() == 0, \"Found entry without chromosome\"\n assert genes.start.isna().sum() == 0, \"Found entry without start\"\n assert genes.end.isna().sum() == 0, \"Found entry without end\"\n assert genes[genes.start > genes.end].shape[0] == 0, \"Start and end positions incoherent\"\n\n def _transcripts_sanity_checks(self, transcripts: pd.DataFrame) -> None:\n unique_transcripts = (\n transcripts[[ENSEMBL_TRANSCRIPT_ID, ASSEMBLY]]\n .apply(lambda x: \"{}.{}\".format(x[0], x[1]), axis=1)\n .value_counts()\n )\n assert unique_transcripts[unique_transcripts > 1].shape[0] == 0, \"Found non unique genes: {}\".format(\n unique_transcripts[unique_transcripts > 1]\n )\n assert transcripts.ensembl_transcript_id.isna().sum() == 0, \"Found entry without ensembl id\"\n assert transcripts.ensembl_gene_id.isna().sum() == 0, \"Found entry without gene ensembl id\"\n assert transcripts.assembly.isna().sum() == 0, \"Found entry without assembly\"\n assert transcripts.chromosome.isna().sum() == 0, \"Found entry without chromosome\"\n assert transcripts.start.isna().sum() == 0, \"Found entry without start\"\n assert transcripts.end.isna().sum() == 0, \"Found entry without end\"\n assert transcripts[transcripts.start > transcripts.end].shape[0] == 0, \"Start and end positions incoherent\"\n\n def _exons_sanity_checks(self, exons: pd.DataFrame) -> None:\n unique_exons = (\n exons[[ENSEMBL_TRANSCRIPT_ID, ENSEMBL_EXON_ID, ASSEMBLY]]\n .apply(lambda x: \"{}.{}.{}\".format(x[0], x[1], x[2]), axis=1)\n .value_counts()\n )\n assert unique_exons[unique_exons > 1].shape[0] == 0, \"Found non unique exons: {}\".format(\n unique_exons[unique_exons > 1]\n )\n unique_exons_by_rank = (\n exons[[ENSEMBL_TRANSCRIPT_ID, \"rank\", ASSEMBLY]]\n .apply(lambda x: \"{}.{}.{}\".format(x[0], x[1], x[2]), axis=1)\n .value_counts()\n )\n assert (\n unique_exons_by_rank[unique_exons_by_rank > 1].shape[0] == 0\n ), \"Found non unique exons by rank: {}\".format(unique_exons_by_rank[unique_exons_by_rank > 1])\n assert exons.ensembl_exon_id.isna().sum() == 0, \"Found entry without ensembl id\"\n assert exons.ensembl_transcript_id.isna().sum() == 0, \"Found entry without transcript ensembl id\"\n assert exons.ensembl_gene_id.isna().sum() == 0, \"Found entry without gene ensembl id\"\n assert exons.assembly.isna().sum() == 0, \"Found entry without assembly\"\n assert exons.chromosome.isna().sum() == 0, \"Found entry without chromosome\"\n assert exons.start.isna().sum() == 0, \"Found entry without start\"\n assert exons.end.isna().sum() == 0, \"Found entry without end\"\n assert exons[exons.start > exons.end].shape[0] == 0, \"Start and end positions incoherent\"\n\n\nif __name__ == \"__main__\":\n # possible transcript biotypes\n # \"[3prime_overlapping_ncRNA,antisense_RNA,bidirectional_promoter_lncRNA,IG_C_gene,IG_C_pseudogene,\n # IG_D_gene,IG_J_gene,IG_J_pseudogene,IG_pseudogene,IG_V_gene,IG_V_pseudogene,lincRNA,macro_lncRNA,\n # miRNA,misc_RNA,Mt_rRNA,Mt_tRNA,non_coding,polymorphic_pseudogene,processed_pseudogene,processed_transcript,\n # protein_coding,pseudogene,ribozyme,rRNA,scaRNA,scRNA,sense_intronic,sense_overlapping,snoRNA,snRNA,sRNA,\n # TEC,transcribed_processed_pseudogene,transcribed_unitary_pseudogene,transcribed_unprocessed_pseudogene,\n # translated_processed_pseudogene,TR_C_gene,TR_D_gene,TR_J_gene,TR_J_pseudogene,TR_V_gene,TR_V_pseudogene,\n # unitary_pseudogene,unprocessed_pseudogene,vaultRNA]\"\n # from http://www.ensembl.org/info/genome/genebuild/biotypes.html\n # GENCODE basic: \"A subset of the GENCODE transcript set, containing only 5' and 3' complete transcripts.\"\n # from http://www.ensembl.org/Help/Glossary\n filters = {\n \"transcript_biotype\": [\n \"protein_coding\",\n \"IG_C_gene\",\n \"IG_D_gene\",\n \"IG_J_gene\",\n \"IG_V_gene\",\n \"TR_C_gene\",\n \"TR_D_gene\",\n \"TR_J_gene\",\n \"TR_V_gene\",\n ],\n \"transcript_gencode_basic\": \"only\",\n # \"chromosome_name\": \"22\" # use for testing\n }\n logging.warning(\"Starting...\")\n start_time = time.time()\n reader = BiomartReader(filters=filters)\n\n genes = reader.get_genes()\n genes.index.rename(\"identifier\", inplace=True)\n genes.index += 1\n genes.to_csv(\"genes.csv\", index=True, header=True)\n\n transcripts = reader.get_transcripts()\n transcripts.index.rename(\"identifier\", inplace=True)\n transcripts.index += 1\n transcripts.to_csv(\"transcripts.csv\", index=True, header=True)\n\n exons = reader.get_exons()\n exons.index.rename(\"identifier\", inplace=True)\n exons.index += 1\n exons.to_csv(\"exons.csv\", index=True, header=True)\n\n genes.reset_index(inplace=True)\n transcripts.reset_index(inplace=True)\n exons.reset_index(inplace=True)\n\n synonyms = reader.get_gene_synonyms(genes=genes)\n synonyms.to_csv(\"gene_synonyms.csv\", index=False, header=True)\n\n uniprot = reader.get_uniprot(trancripts=transcripts)\n uniprot.to_csv(\"transcripts_uniprot.csv\", index=False, header=True)\n\n genes_transcripts = genes[[\"identifier\", ENSEMBL_GENE_ID]].join(\n transcripts[[\"identifier\", ENSEMBL_GENE_ID]].set_index(ENSEMBL_GENE_ID),\n on=ENSEMBL_GENE_ID,\n lsuffix=\"_gene\",\n rsuffix=\"_transcript\",\n )[[\"identifier_gene\", \"identifier_transcript\"]]\n genes_transcripts.rename({\"identifier_gene\": \"gene\", \"identifier_transcript\": \"transcript\"}, axis=1, inplace=True)\n genes_transcripts.to_csv(\"genes_transcripts.csv\", index=False, header=True)\n\n transcripts_exons = transcripts[[\"identifier\", ENSEMBL_TRANSCRIPT_ID]].join(\n exons[[\"identifier\", ENSEMBL_TRANSCRIPT_ID]].set_index(ENSEMBL_TRANSCRIPT_ID),\n on=ENSEMBL_TRANSCRIPT_ID,\n lsuffix=\"_transcript\",\n rsuffix=\"_exon\",\n )[[\"identifier_transcript\", \"identifier_exon\"]]\n transcripts_exons.rename({\"identifier_exon\": \"exon\", \"identifier_transcript\": \"transcript\"}, axis=1, inplace=True)\n transcripts_exons.to_csv(\"transcripts_exons.csv\", index=False, header=True)\n\n end_time = time.time()\n logging.warning(\"Finished in {} seconds\".format(end_time - start_time))\n", "id": "4954150", "language": "Python", "matching_score": 1.5448100566864014, "max_stars_count": 24, "path": "scripts/import_ensembl.py" }, { "content": "from __future__ import print_function\nimport sys\nimport pymongo\nimport json\nimport re\n\nhost='phenotips.cs.ucl.ac.uk'\n\nconn = pymongo.MongoClient(host=host, port=27017)\ndb=conn['patients']\n\nheaders=[\"Internal reference number or ID\",\"Chromosome\",\"Start\",\"Genome assembly\",\"Reference allele\",\"Alternate allele\",\"Transcript\",\"Gene name\",\"Intergenic\",\"Chromosomal sex\",\"Open-access consent\",\"Age at last clinical assessment\",\"Prenatal age in weeks\",\"Note\",\"Inheritance\",\"Pathogenicity\",\"Phenotypes\",\"HGVS code\",\"Genotype\",\"Responsible contact\"]\n#headers=[\"Internal reference number or ID\",\"Genome assembly\",\"Gene name\",\"Open-access consent\",\"Phenotypes\",\"Responsible contact\"]\n\nprint(','.join(map(lambda x: '\"%s\"'%x,headers)))\nfor p in db.patients.find({'external_id':{'$regex':re.compile('IRDC_.*_LON_.*')}}):\n r=dict()\n if 'GC' not in p['external_id']: continue\n if 'genes' in p:\n r[\"Gene name\"]= ', '.join([g['gene'] for g in p['genes']])\n else:\n r[\"Gene name\"]= ''\n r[\"Internal reference number or ID\"]=re.match('.*_(GC.*)',p['external_id']).group(1)\n r[\"Chromosome\"]=''\n r[\"Start\"]=''\n r[\"Genome assembly\"]='GRCh37/hg19'\n r[\"Reference allele\"]=''\n r[\"Alternate allele\"]=''\n r[\"Transcript\"]=''\n r[\"Intergenic\"]=''\n r[\"Chromosomal sex\"]=''\n r[\"Open-access consent\"]='No'\n r[\"Age at last clinical assessment\"]=''\n r[\"Prenatal age in weeks\"]=''\n r[\"Note\"]=''\n r[\"Inheritance\"]=''\n r[\"Pathogenicity\"]=''\n r[\"Phenotypes\"]=', '.join([f['id'] for f in p['features'] if f['observed']=='yes'])\n r[\"HGVS code\"]=''\n r[\"Genotype\"]=''\n r[\"Responsible contact\"]='<NAME>'\n print(','.join(['\"%s\"' % r[k] for k in headers]))\n\n", "id": "6619977", "language": "Python", "matching_score": 0.7962878942489624, "max_stars_count": 24, "path": "decipher/export_patients.py" }, { "content": "import bs4\nimport functools\nfrom htmlmin.minify import html_minify\n\ndef prettify(route_function):\n @functools.wraps(route_function)\n def wrapped(*args, **kwargs):\n yielded_html = route_function(*args, **kwargs)\n soup = bs4.BeautifulSoup(yielded_html, 'lxml')\n return soup.prettify()\n\n return wrapped\n\ndef uglify(route_function):\n @functools.wraps(route_function)\n def wrapped(*args, **kwargs):\n yielded_html = route_function(*args, **kwargs)\n minified_html = html_minify(yielded_html)\n return minified_html\n\n return wrapped\n", "id": "673949", "language": "Python", "matching_score": 0.012983581982553005, "max_stars_count": 24, "path": "views/minify_output.py" }, { "content": "\"\"\"\nSave configurations for a user\n\"\"\"\nimport psycopg2\nimport ujson as json\nfrom flask import jsonify, request, session\n\nimport db.helpers\nfrom views import application\nfrom views.auth import USER, requires_auth\nfrom views.postgres import get_db, postgres_cursor\n\n\[email protected](\"/<language>/save_configuration/<pageType>/<pagePart>\", methods=[\"POST\"])\[email protected](\"/save_configuration/<pageType>/<pagePart>\", methods=[\"POST\"])\n@requires_auth\ndef save_configuration(pageType, pagePart, language=\"en\"):\n config = db.helpers.legacy_query_user_config(language=language, entity=pageType)\n application.logger.debug(pageType)\n application.logger.debug(pagePart)\n if pageType == \"my_patients\":\n pageType = \"hpo\"\n application.logger.debug(config)\n for col in config[0][pagePart][\"colNames\"]:\n if col[\"key\"] in request.form.getlist(\"colNames[]\"):\n application.logger.debug(col[\"key\"], True)\n col[\"default\"] = True\n else:\n application.logger.debug(col[\"key\"], False)\n col[\"default\"] = False\n c = postgres_cursor()\n try:\n c.execute(\n \"UPDATE user_config SET config=%s WHERE user_name=%s AND language=%s AND page=%s\",\n (json.dumps(config), session[USER], language, pageType),\n )\n get_db().commit()\n c.close()\n except (Exception, psycopg2.DatabaseError) as error:\n application.logger.exception(error)\n get_db().rollback()\n return jsonify(\"save configuration failed\"), 500\n finally:\n c.close()\n return jsonify(success=\"\"), 200\n", "id": "11463960", "language": "Python", "matching_score": 2.871931791305542, "max_stars_count": 3, "path": "views/save_configuration.py" }, { "content": "from flask import session\nfrom sqlalchemy import and_\nfrom sqlalchemy.orm import Session\nfrom db.model import UserConfig\nfrom views.postgres import postgres_cursor\n\n\ndef query_user_config(db_session: Session, language, entity):\n\n user_config = (\n db_session.query(UserConfig)\n .filter(\n and_(UserConfig.user_name == session[\"user\"], UserConfig.language == language, UserConfig.page == entity)\n )\n .first()\n )\n return user_config.config\n\n\n# TODO: get rid of this method!\ndef legacy_query_user_config(language, entity):\n cursor = postgres_cursor()\n cursor.execute(\n \"select config from user_config u where u.user_name=%(user)s and u.language=%(language)s and \"\n \"u.page=%(entity)s limit 1\",\n {\"user\": session[\"user\"], \"language\": language, \"entity\": entity},\n )\n config = cursor.fetchone()[0]\n cursor.close()\n return config\n\n\ndef cursor2dict(cursor):\n headers = [h[0] for h in cursor.description]\n return [dict(zip(headers, r)) for r in cursor.fetchall()]\n", "id": "5436837", "language": "Python", "matching_score": 0.9790798425674438, "max_stars_count": 24, "path": "db/helpers.py" }, { "content": "\"\"\"\nPostgres module\n\"\"\"\nfrom contextlib import contextmanager\n\nimport psycopg2\nfrom flask import g\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.engine import Engine\nfrom sqlalchemy.orm import Session, sessionmaker\n\nfrom views import ENV_LOG_FLAG, application\n\n\ndef get_db():\n if \"db\" not in g:\n g.db = psycopg2.connect(\n host=application.config[\"DB_HOST\"],\n database=application.config[\"DB_DATABASE\"],\n user=application.config[\"DB_USER\"],\n password=application.config[\"DB_PASSWORD\"],\n port=application.config[\"DB_PORT\"],\n )\n return g.db\n\n\ndef close_db():\n adb = g.pop(\"db\", None)\n if adb is not None:\n adb.close()\n\n\ndef postgres_cursor():\n cursor = get_db().cursor()\n return cursor\n\n\ndef get_db_engine() -> Engine:\n \"\"\"\n Opens a new database connection if there is none yet for the\n current application context.\n \"\"\"\n if not hasattr(g, \"dbengine\"):\n engine = create_engine(application.config[\"SQLALCHEMY_DATABASE_URI\"], echo=ENV_LOG_FLAG)\n engine.connect()\n g.dbengine = engine\n return g.dbengine\n\n\n@contextmanager\ndef session_scope() -> Session:\n \"\"\"Provide a transactional scope around a series of operations.\"\"\"\n engine = get_db_engine()\n # TODO: do we want to tweak the session pool config here?\n DbSession = sessionmaker(bind=engine)\n DbSession.configure(bind=engine)\n session = DbSession()\n try:\n yield session\n session.commit()\n except BaseException as e:\n session.rollback()\n raise e\n finally:\n session.close()\n", "id": "10246901", "language": "Python", "matching_score": 3.567981719970703, "max_stars_count": 3, "path": "views/postgres.py" }, { "content": "\"\"\"\nPackage to init views\n\"\"\"\nimport os\nimport datetime\nfrom flask import Flask\nfrom flask_sessionstore import SqlAlchemySessionInterface\nfrom flask_compress import Compress\nfrom flask_caching import Cache\nfrom flask_mail import Mail\nfrom flask_sqlalchemy import SQLAlchemy\nimport logging\nfrom logging.config import dictConfig\nfrom flask.logging import default_handler\nfrom cyvcf2 import VCF\nfrom subprocess import Popen, STDOUT, PIPE\nimport psycopg2\n\n# Options are: prod, dev, debug (default)\nAPP_ENV = os.getenv(\"APP_ENV\", \"debug\")\n\nHG_ASSEMBLY = os.getenv(\"HG_ASSEMBLY\", \"GRCh37\")\n\nMAIL_USERNAME = os.getenv(\"MAIL_USERNAME\", \"<EMAIL>\")\n\nMAX_PAGE_SIZE = 100000\n\nVERSION = Popen(\"git describe --tags --always\", shell=True, stderr=STDOUT, stdout=PIPE).communicate()[0][:-1].decode()\nif \"command not found\" in VERSION:\n VERSION = \"$Format:%H$\"[:7]\n\nENV_LOG_FLAG = True\nif APP_ENV in [\"prod\"]:\n ENV_LOG_FLAG = False\n\nvariant_file = VCF(os.getenv(\"VCF_FILE\"))\n\n\ndef _configure_logs():\n application_environment = APP_ENV\n log_level = logging.DEBUG if application_environment == \"debug\" else logging.ERROR\n dictConfig(\n {\n \"version\": 1,\n \"formatters\": {\n \"default\": {\"format\": \"%(asctime)s-%(levelname)s-%(name)s::%(module)s|%(lineno)s:: %(message)s\"}\n },\n \"handlers\": {\n \"wsgi\": {\n \"class\": \"logging.StreamHandler\",\n \"stream\": \"ext://flask.logging.wsgi_errors_stream\",\n \"formatter\": \"default\",\n },\n \"info_rotating_file_handler\": {\n \"level\": log_level,\n \"formatter\": \"default\",\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"filename\": \"phenopolis.log\",\n \"mode\": \"a\",\n \"maxBytes\": 1048576,\n \"backupCount\": 10,\n },\n },\n \"root\": {\"level\": log_level, \"handlers\": [\"wsgi\"]},\n }\n )\n # add SQLalchemy logs\n logging.getLogger(\"sqlalchemy\").addHandler(default_handler)\n\n\ndef _load_config():\n application.config[\"SERVED_URL\"] = os.getenv(\"SERVED_URL\", \"127.0.0.1\")\n application.config[\"MAIL_SERVER\"] = os.getenv(\"MAIL_SERVER\", \"smtp.gmail.com\")\n application.config[\"MAIL_PORT\"] = os.getenv(\"MAIL_PORT\", \"587\")\n application.config[\"MAIL_USERNAME\"] = MAIL_USERNAME\n application.config[\"MAIL_PASSWORD\"] = os.getenv(\"MAIL_PASSWORD\", \"<PASSWORD>\")\n application.config[\"MAIL_USE_TLS\"] = os.getenv(\"MAIL_USE_TLS\", \"true\") == \"true\"\n application.config[\"MAIL_USE_SSL\"] = os.getenv(\"MAIL_USE_SSL\", \"false\") == \"true\"\n application.config[\"MAIL_SUPPRESS_SEND\"] = os.getenv(\"MAIL_SUPPRESS_SEND\", \"true\") == \"true\"\n application.config[\"DB_HOST\"] = os.getenv(\"PH_DB_HOST\", \"0.0.0.0\")\n application.config[\"DB_DATABASE\"] = os.getenv(\"PH_DB_NAME\", \"phenopolis_db\")\n application.config[\"DB_USER\"] = os.getenv(\"PH_DB_USER\", \"phenopolis_api\")\n application.config[\"DB_PASSWORD\"] = os.getenv(\"PH_DB_PASSWORD\", \"phenopolis_api\")\n application.config[\"DB_PORT\"] = os.getenv(\"PH_DB_PORT\", \"5432\")\n application.config[\"SECRET_KEY\"] = os.getenv(\"PH_SECRET_KEY\", \"my_precious\")\n application.config[\"SECURITY_PASSWORD_SALT\"] = os.getenv(\"PH_SECURITY_PASSWORD_SALT\", \"<PASSWORD>\")\n application.config[\"TOKEN_EXPIRY_SECONDS\"] = int(os.getenv(\"PH_TOKEN_EXPIRY_SECONDS\", 172800))\n application.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = True\n db_uri = \"postgresql+psycopg2://%s:%s@%s:%s/%s\" % (\n application.config[\"DB_USER\"],\n application.config[\"DB_PASSWORD\"],\n application.config[\"DB_HOST\"],\n application.config[\"DB_PORT\"],\n application.config[\"DB_DATABASE\"],\n )\n application.config[\"SQLALCHEMY_DATABASE_URI\"] = db_uri\n\n\ndef _init_sqlalchemy():\n database = SQLAlchemy(application)\n database.init_app(application)\n application.session_interface = SqlAlchemySessionInterface(application, database, \"test_sessions\", \"test_sess_\")\n application.permanent_session_lifetime = datetime.timedelta(hours=1)\n\n\n_configure_logs() # NOTE: this needs to happen before starting the application\n# Load default config and override config from an environment variable\napplication = Flask(__name__)\n_load_config()\n_init_sqlalchemy()\n\nCompress(application)\ncache = Cache(application, config={\"CACHE_TYPE\": \"simple\"})\nmail = Mail(application)\n\ntry:\n db = psycopg2.connect(\n host=application.config[\"DB_HOST\"],\n database=application.config[\"DB_DATABASE\"],\n user=application.config[\"DB_USER\"],\n password=application.config[\"DB_PASSWORD\"],\n port=application.config[\"DB_PORT\"],\n )\n c = db.cursor()\n c.execute(\"select external_id, internal_id from individuals\")\n headers = [h[0] for h in c.description]\n\n pheno_ids = [dict(zip(headers, r)) for r in c.fetchall()]\n phenoid_mapping = {ind[\"external_id\"]: ind[\"internal_id\"] for ind in pheno_ids}\nexcept Exception:\n phenoid_mapping = {}\n\n# NOTE: These imports must be placed at the end of this file\n# flake8: noqa E402\nimport views.general\nimport views.postgres\nimport views.auth\nimport views.statistics\nimport views.gene\nimport views.variant\nimport views.individual\nimport views.hpo\nimport views.users\nimport views.user_individuals\nimport views.autocomplete\nimport views.save_configuration\nimport views.variant_classification\nimport views.upload\n", "id": "6785323", "language": "Python", "matching_score": 2.6231558322906494, "max_stars_count": 24, "path": "views/__init__.py" }, { "content": "\"\"\"\nFlask app\n\"\"\"\nfrom views import application, APP_ENV\n\nif __name__ == \"__main__\":\n application.debug = False\n if APP_ENV == \"debug\":\n application.debug = True\n application.run()\n", "id": "4688338", "language": "Python", "matching_score": 2.6231558322906494, "max_stars_count": 24, "path": "application.py" }, { "content": "\"\"\"\nFlask app\n\"\"\"\nfrom views import APP_ENV, application\n\nif __name__ == \"__main__\":\n application.debug = False\n if APP_ENV == \"debug\":\n application.debug = True\n application.run()\n", "id": "12844032", "language": "Python", "matching_score": 2.6231558322906494, "max_stars_count": 3, "path": "application.py" } ]
2.21132
tb205gti
[ { "content": "#!/usr/bin/env python\nfrom cereal import car,tesla\nimport time\nimport os\nfrom selfdrive.can.parser import CANParser\nfrom common.realtime import sec_since_boot\nfrom selfdrive.services import service_list\nimport selfdrive.messaging as messaging\nfrom selfdrive.car.tesla.readconfig import CarSettings\nfrom selfdrive.tinklad.tinkla_interface import TinklaClient\n\n#RADAR_A_MSGS = list(range(0x371, 0x37F , 3))\n#RADAR_B_MSGS = list(range(0x372, 0x37F, 3))\nBOSCH_MAX_DIST = 150. #max distance for radar\nRADAR_A_MSGS = list(range(0x310, 0x36F , 3))\nRADAR_B_MSGS = list(range(0x311, 0x36F, 3))\nOBJECT_MIN_PROBABILITY = 20.\nCLASS_MIN_PROBABILITY = 20.\nRADAR_MESSAGE_FREQUENCY = 0.050 * 1e9 #time in ns, radar sends data at 0.06 s\nVALID_MESSAGE_COUNT_THRESHOLD = 4\n\n\n# Tesla Bosch firmware has 32 objects in all objects or a selected set of the 5 we should look at\n# definetly switch to all objects when calibrating but most likely use select set of 5 for normal use\nUSE_ALL_OBJECTS = True\n\ndef _create_radard_can_parser():\n dbc_f = 'teslaradar.dbc'\n\n msg_a_n = len(RADAR_A_MSGS)\n msg_b_n = len(RADAR_B_MSGS)\n\n signals = zip(['LongDist'] * msg_a_n + ['LatDist'] * msg_a_n +\n ['LongSpeed'] * msg_a_n + ['LongAccel'] * msg_a_n + \n ['Valid'] * msg_a_n + ['Tracked'] * msg_a_n + \n ['Meas'] * msg_a_n + ['ProbExist'] * msg_a_n + \n ['Index'] * msg_a_n + ['ProbObstacle'] * msg_a_n + \n ['LatSpeed'] * msg_b_n + ['Index2'] * msg_b_n +\n ['Class'] * msg_b_n + ['ProbClass'] * msg_b_n + \n ['Length'] * msg_b_n + ['dZ'] * msg_b_n + ['MovingState'] * msg_b_n,\n RADAR_A_MSGS * 10 + RADAR_B_MSGS * 7,\n [255.] * msg_a_n + [0.] * msg_a_n + [0.] * msg_a_n + [0.] * msg_a_n + \n [0] * msg_a_n + [0] * msg_a_n + [0] * msg_a_n + [0.] * msg_a_n +\n [0] * msg_a_n + [0.] * msg_a_n + [0.] * msg_b_n + [0] * msg_b_n +\n [0] * msg_b_n + [0.] * msg_b_n + [0.] * msg_b_n +[0.] * msg_b_n + [0]* msg_b_n)\n\n checks = zip(RADAR_A_MSGS + RADAR_B_MSGS, [60]*(msg_a_n + msg_b_n))\n\n return CANParser(os.path.splitext(dbc_f)[0], signals, checks, 1)\n\n\nclass RadarInterface(object):\n\n tinklaClient = TinklaClient()\n\n def __init__(self,CP):\n # radar\n self.pts = {}\n self.extPts = {}\n self.delay = 0.1\n self.useTeslaRadar = CarSettings().get_value(\"useTeslaRadar\")\n self.TRACK_LEFT_LANE = True\n self.TRACK_RIGHT_LANE = True\n self.updated_messages = set()\n self.canErrorCounter = 0\n if self.useTeslaRadar:\n self.pts = {}\n self.extPts = {}\n self.valid_cnt = {key: 0 for key in RADAR_A_MSGS}\n self.delay = 0.1 # Delay of radar\n self.rcp = _create_radard_can_parser()\n self.logcan = messaging.sub_sock(service_list['can'].port)\n self.radarOffset = CarSettings().get_value(\"radarOffset\")\n self.trackId = 1\n self.trigger_start_msg = RADAR_A_MSGS[0]\n self.trigger_end_msg = RADAR_B_MSGS[-1]\n\n\n\n def update(self, can_strings):\n # radard at 20Hz and return no points\n if not self.useTeslaRadar:\n time.sleep(0.05)\n return car.RadarData.new_message(),self.extPts.values()\n\n\n tm = int(sec_since_boot() * 1e9)\n if can_strings != None:\n vls = self.rcp.update_strings(tm, can_strings)\n self.updated_messages.update(vls)\n\n\n if self.trigger_start_msg not in self.updated_messages:\n return None,None\n\n if self.trigger_end_msg not in self.updated_messages:\n return None,None\n\n rr,rrext = self._update(self.updated_messages)\n #self.updated_messages.clear()\n return rr,rrext\n\n\n def _update(self, updated_messages):\n ret = car.RadarData.new_message()\n\n for message in updated_messages:\n if not(message in RADAR_A_MSGS):\n if message in self.pts:\n del self.pts[message]\n del self.extPts[message]\n continue\n cpt = self.rcp.vl[message]\n cpt2 = self.rcp.vl[message+1]\n # ensure the two messages are from the same frame reading\n if cpt['Index'] != cpt2['Index2']:\n continue\n if (cpt['LongDist'] >= BOSCH_MAX_DIST) or (cpt['LongDist']==0) or (not cpt['Tracked']):\n self.valid_cnt[message] = 0 # reset counter\n if message in self.pts:\n del self.pts[message]\n del self.extPts[message]\n elif cpt['Valid'] and (cpt['LongDist'] < BOSCH_MAX_DIST) and (cpt['LongDist'] > 0) and (cpt['ProbExist'] >= OBJECT_MIN_PROBABILITY):\n self.valid_cnt[message] += 1\n else:\n self.valid_cnt[message] = max(self.valid_cnt[message] -1, 0)\n if (self.valid_cnt[message]==0) and (message in self.pts):\n del self.pts[message]\n del self.extPts[message]\n\n # radar point only valid if it's a valid measurement and score is above 50\n # bosch radar data needs to match Index and Index2 for validity\n # also for now ignore construction elements\n if (cpt['Valid'] or cpt['Tracked'])and (cpt['LongDist']>0) and (cpt['LongDist'] < BOSCH_MAX_DIST) and \\\n (self.valid_cnt[message] > VALID_MESSAGE_COUNT_THRESHOLD) and (cpt['ProbExist'] >= OBJECT_MIN_PROBABILITY): \n if message not in self.pts and ( cpt['Tracked']):\n self.pts[message] = car.RadarData.RadarPoint.new_message()\n self.pts[message].trackId = self.trackId \n self.extPts[message] = tesla.TeslaRadarPoint.new_message()\n self.extPts[message].trackId = self.trackId \n self.trackId = (self.trackId + 1) & 0xFFFFFFFFFFFFFFFF\n if self.trackId ==0:\n self.trackId = 1\n if message in self.pts:\n self.pts[message].dRel = cpt['LongDist'] # from front of car\n self.pts[message].yRel = cpt['LatDist'] - self.radarOffset # in car frame's y axis, left is positive\n self.pts[message].vRel = cpt['LongSpeed']\n self.pts[message].aRel = cpt['LongAccel']\n self.pts[message].yvRel = cpt2['LatSpeed']\n self.pts[message].measured = bool(cpt['Meas'])\n self.extPts[message].dz = cpt2['dZ']\n self.extPts[message].movingState = cpt2['MovingState']\n self.extPts[message].length = cpt2['Length']\n self.extPts[message].obstacleProb = cpt['ProbObstacle']\n self.extPts[message].timeStamp = int(self.rcp.ts[message+1]['Index2'])\n if self.rcp.vl[message+1]['Class'] >= CLASS_MIN_PROBABILITY:\n self.extPts[message].objectClass = cpt2['Class']\n # for now we will use class 0- unknown stuff to show trucks\n # we will base that on being a class 1 and length of 2 (hoping they meant width not length, but as germans could not decide)\n # 0-unknown 1-four wheel vehicle 2-two wheel vehicle 3-pedestrian 4-construction element\n # going to 0-unknown 1-truck 2-car 3/4-motorcycle/bicycle 5 pedestrian - we have two bits so\n if self.extPts[message].objectClass == 0:\n self.extPts[message].objectClass = 1\n if (self.extPts[message].objectClass == 1) and ((self.extPts[message].length >= 1.8) or (1.6 < self.extPts[message].dz < 4.5)):\n self.extPts[message].objectClass = 0\n if self.extPts[message].objectClass == 4:\n self.extPts[message].objectClass = 1\n else:\n self.extPts[message].objectClass = 1\n\n ret.points = self.pts.values()\n errors = []\n if not self.rcp.can_valid:\n errors.append(\"canError\")\n self.tinklaClient.logCANErrorEvent(source=\"radar_interface\", canMessage=0, additionalInformation=\"Invalid CAN Count\")\n self.canErrorCounter += 1\n else:\n self.canErrorCounter = 0\n #BB: Only trigger canError for 3 consecutive errors\n if self.canErrorCounter > 2:\n ret.errors = errors\n else:\n ret.errors = []\n return ret,self.extPts.values()\n\n# radar_interface standalone tester\nif __name__ == \"__main__\":\n CP = None\n RI = RadarInterface(CP)\n while 1:\n ret,retext = RI.update(can_strings = None)\n print(chr(27) + \"[2J\")\n print(ret,retext)\n", "id": "10151869", "language": "Python", "matching_score": 8.071280479431152, "max_stars_count": 1, "path": "selfdrive/car/tesla/radar_interface.py" }, { "content": "#!/usr/bin/env python\r\nimport sys\r\ntry:\r\n sys.path.index('/data/openpilot/')\r\nexcept ValueError:\r\n sys.path.append('/data/openpilot/')\r\n\r\nfrom cereal import car\r\nimport time\r\nimport os\r\nimport zmq\r\nfrom selfdrive.can.parser import CANParser\r\nfrom common.realtime import sec_since_boot\r\nfrom selfdrive.services import service_list\r\nimport selfdrive.messaging as messaging\r\nfrom selfdrive.car.tesla.readconfig import read_config_file,CarSettings\r\n\r\n#RADAR_A_MSGS = list(range(0x371, 0x37F , 3))\r\n#RADAR_B_MSGS = list(range(0x372, 0x37F, 3))\r\nBOSCH_MAX_DIST = 150. #max distance for radar\r\nRADAR_A_MSGS = list(range(0x310, 0x36F , 3))\r\nRADAR_B_MSGS = list(range(0x311, 0x36F, 3))\r\nOBJECT_MIN_PROBABILITY = 20.\r\nCLASS_MIN_PROBABILITY = 20.\r\n#for calibration we only want fixed objects within 1 m of the center line and between 2.5 and 4.5 m far from radar\r\nMINX = 2.5 \r\nMAXX = 4.5\r\nMINY = -1.0\r\nMAXY = 1.0\r\n\r\n\r\n# Tesla Bosch firmware has 32 objects in all objects or a selected set of the 5 we should look at\r\n# definetly switch to all objects when calibrating but most likely use select set of 5 for normal use\r\nUSE_ALL_OBJECTS = True\r\n\r\ndef _create_radard_can_parser():\r\n dbc_f = 'teslaradar.dbc'\r\n\r\n msg_a_n = len(RADAR_A_MSGS)\r\n msg_b_n = len(RADAR_B_MSGS)\r\n\r\n signals = zip(['LongDist'] * msg_a_n + ['LatDist'] * msg_a_n +\r\n ['LongSpeed'] * msg_a_n + ['LongAccel'] * msg_a_n + \r\n ['Valid'] * msg_a_n + ['Tracked'] * msg_a_n + \r\n ['Meas'] * msg_a_n + ['ProbExist'] * msg_a_n + \r\n ['Index'] * msg_a_n + ['ProbObstacle'] * msg_a_n + \r\n ['LatSpeed'] * msg_b_n + ['Index2'] * msg_b_n +\r\n ['Class'] * msg_b_n + ['ProbClass'] * msg_b_n + \r\n ['Length'] * msg_b_n + ['dZ'] * msg_b_n + ['MovingState'] * msg_b_n,\r\n RADAR_A_MSGS * 10 + RADAR_B_MSGS * 7,\r\n [255.] * msg_a_n + [0.] * msg_a_n + [0.] * msg_a_n + [0.] * msg_a_n + \r\n [0] * msg_a_n + [0] * msg_a_n + [0] * msg_a_n + [0.] * msg_a_n +\r\n [0] * msg_a_n + [0.] * msg_a_n + [0.] * msg_b_n + [0] * msg_b_n +\r\n [0] * msg_b_n + [0.] * msg_b_n + [0.] * msg_b_n +[0.] * msg_b_n + [0]* msg_b_n)\r\n\r\n checks = zip(RADAR_A_MSGS + RADAR_B_MSGS, [20]*(msg_a_n + msg_b_n))\r\n\r\n return CANParser(os.path.splitext(dbc_f)[0], signals, checks, 1)\r\n\r\n\r\nclass RadarInterface(object):\r\n def __init__(self):\r\n # radar\r\n self.pts = {}\r\n self.delay = 0.1\r\n self.useTeslaRadar = CarSettings().get_value(\"useTeslaRadar\")\r\n self.TRACK_LEFT_LANE = True\r\n self.TRACK_RIGHT_LANE = True\r\n if self.useTeslaRadar:\r\n self.pts = {}\r\n self.valid_cnt = {key: 0 for key in RADAR_A_MSGS}\r\n self.delay = 0.05 # Delay of radar\r\n self.rcp = _create_radard_can_parser()\r\n self.logcan = messaging.sub_sock(service_list['can'].port)\r\n\r\n def update(self):\r\n\r\n ret = car.RadarData.new_message()\r\n if not self.useTeslaRadar:\r\n time.sleep(0.05)\r\n return ret\r\n\r\n canMonoTimes = []\r\n updated_messages = set()\r\n while 1:\r\n tm = int(sec_since_boot() * 1e9)\r\n _ , vls = self.rcp.update(tm, True)\r\n updated_messages.update(vls)\r\n if RADAR_B_MSGS[-1] in updated_messages:\r\n break\r\n errors = []\r\n if not self.rcp.can_valid:\r\n errors.append(\"commIssue\")\r\n ret.errors = errors\r\n ret.canMonoTimes = canMonoTimes\r\n for ii in updated_messages:\r\n if ii in RADAR_A_MSGS:\r\n cpt = self.rcp.vl[ii]\r\n if (cpt['LongDist'] >= BOSCH_MAX_DIST) or (cpt['LongDist']==0) or (not cpt['Tracked']):\r\n self.valid_cnt[ii] = 0 # reset counter\r\n elif cpt['Valid'] and (cpt['LongDist'] < BOSCH_MAX_DIST) and (cpt['LongDist'] > 0) and (cpt['ProbExist'] >= OBJECT_MIN_PROBABILITY):\r\n self.valid_cnt[ii] += 1\r\n else:\r\n self.valid_cnt[ii] = max(self.valid_cnt[ii] -1, 0)\r\n\r\n if (cpt['Valid'] or cpt['Tracked'])and (cpt['LongDist']>=MINX) and (cpt['LongDist'] <= MAXX) and \\\r\n (cpt['Index'] == self.rcp.vl[ii+1]['Index2']) and (self.valid_cnt[ii] > 4) and \\\r\n (cpt['ProbExist'] >= OBJECT_MIN_PROBABILITY) and (cpt['LatDist']>=MINY) and (cpt['LatDist']<=MAXY):\r\n if ii not in self.pts and ( cpt['Tracked']):\r\n self.pts[ii] = car.RadarData.RadarPoint.new_message()\r\n self.pts[ii].trackId = int((ii - 0x310)/3) \r\n if ii in self.pts:\r\n self.pts[ii].dRel = cpt['LongDist'] # from front of car\r\n self.pts[ii].yRel = cpt['LatDist'] # in car frame's y axis, left is positive\r\n self.pts[ii].vRel = cpt['LongSpeed']\r\n self.pts[ii].aRel = cpt['LongAccel']\r\n self.pts[ii].yvRel = self.rcp.vl[ii+1]['LatSpeed']\r\n self.pts[ii].measured = bool(cpt['Meas'])\r\n self.pts[ii].dz = self.rcp.vl[ii+1]['dZ']\r\n self.pts[ii].movingState = self.rcp.vl[ii+1]['MovingState']\r\n self.pts[ii].length = self.rcp.vl[ii+1]['Length']\r\n self.pts[ii].obstacleProb = cpt['ProbObstacle']\r\n if self.rcp.vl[ii+1]['Class'] >= CLASS_MIN_PROBABILITY:\r\n self.pts[ii].objectClass = self.rcp.vl[ii+1]['Class']\r\n # for now we will use class 0- unknown stuff to show trucks\r\n # we will base that on being a class 1 and length of 2 (hoping they meant width not length, but as germans could not decide)\r\n # 0-unknown 1-four wheel vehicle 2-two wheel vehicle 3-pedestrian 4-construction element\r\n # going to 0-unknown 1-truck 2-car 3/4-motorcycle/bicycle 5 pedestrian - we have two bits so\r\n if self.pts[ii].objectClass == 0:\r\n self.pts[ii].objectClass = 1\r\n if (self.pts[ii].objectClass == 1) and ((self.pts[ii].length >= 1.8) or (1.6 < self.pts[ii].dz < 4.5)):\r\n self.pts[ii].objectClass = 0\r\n if self.pts[ii].objectClass == 4:\r\n self.pts[ii].objectClass = 1\r\n else:\r\n self.pts[ii].objectClass = 1\r\n else:\r\n if ii in self.pts:\r\n del self.pts[ii]\r\n\r\n ret.points = self.pts.values()\r\n return ret\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n RI = RadarInterface()\r\n while 1:\r\n ret = RI.update()\r\n print(chr(27) + \"[2J\")\r\n print ret\r\n", "id": "4009818", "language": "Python", "matching_score": 1.9761732816696167, "max_stars_count": 1, "path": "selfdrive/car/tesla/radar_tools/calibrateRadar.py" }, { "content": "#!/usr/bin/env python\nimport numpy as np\nimport numpy.matlib\nimport importlib\nimport zmq\nfrom collections import defaultdict, deque\n\nimport selfdrive.messaging as messaging\nfrom selfdrive.services import service_list\nfrom selfdrive.controls.lib.radar_helpers import Track, Cluster\nfrom selfdrive.config import RADAR_TO_CENTER\nfrom selfdrive.controls.lib.cluster.fastcluster_py import cluster_points_centroid\nfrom selfdrive.swaglog import cloudlog\nfrom cereal import car,log,tesla\nfrom common.params import Params\nfrom common.realtime import set_realtime_priority, Ratekeeper, DT_MDL\nfrom selfdrive.car.tesla.readconfig import read_config_file,CarSettings\n\nDEBUG = False\n\n#vision point\nDIMSV = 2\nXV, SPEEDV = 0, 1\nVISION_POINT = -1\nRDR_TO_LDR = 0.\n\n# Time-alignment\nrate = 1. / DT_MDL # model and radar are both at 20Hz\nv_len = 20 # how many speed data points to remember for t alignment with rdr data\n\n\ndef laplacian_cdf(x, mu, b):\n b = np.max([b, 1e-4])\n return np.exp(-abs(x-mu)/b)\n\n\ndef match_vision_to_cluster(v_ego, lead, clusters):\n # match vision point to best statistical cluster match\n probs = []\n offset_vision_dist = lead.dist - RADAR_TO_CENTER\n for c in clusters:\n prob_d = laplacian_cdf(c.dRel, offset_vision_dist, lead.std)\n prob_y = laplacian_cdf(c.yRel, lead.relY, lead.relYStd)\n prob_v = laplacian_cdf(c.vRel, lead.relVel, lead.relVelStd)\n # This is isn't exactly right, but good heuristic\n combined_prob = prob_d * prob_y * prob_v\n probs.append(combined_prob)\n idx = np.argmax(probs)\n # if no 'sane' match is found return -1\n # stationary radar points can be false positives\n dist_sane = abs(clusters[idx].dRel - offset_vision_dist) < max([(offset_vision_dist)*.25, 5.0])\n vel_sane = (abs(clusters[idx].vRel - lead.relVel) < 10) or (v_ego + clusters[idx].vRel > 2)\n if dist_sane and vel_sane:\n return idx\n else:\n return None\n\ndef get_rrext_by_trackId(rrext,trackId):\n if rrext is not None:\n for p in rrext:\n if p.trackId == trackId:\n return p\n return None\n\ndef get_lead(v_ego, ready, clusters, lead_msg, low_speed_override=True):\n # Determine leads, this is where the essential logic happens\n if len(clusters) > 0 and ready and lead_msg.prob > .5:\n lead_idx = match_vision_to_cluster(v_ego, lead_msg, clusters)\n else:\n lead_idx = None\n\n lead_dict = {'status': False}\n lead_dict_ext = {'trackId': 1, 'oClass': 0, 'length': 0.}\n if lead_idx is not None:\n lead_dict,lead_dict_ext = clusters[lead_idx].get_RadarState(lead_msg.prob)\n elif (lead_idx is None) and ready and (lead_msg.prob > .5):\n lead_dict = Cluster().get_RadarState_from_vision(lead_msg, v_ego)\n\n if low_speed_override:\n low_speed_clusters = [c for c in clusters if c.potential_low_speed_lead(v_ego)]\n if len(low_speed_clusters) > 0:\n lead_idx = np.argmin([c.dRel for c in low_speed_clusters])\n if (not lead_dict['status']) or (low_speed_clusters[lead_idx].dRel < lead_dict['dRel']):\n lead_dict,lead_dict_ext = low_speed_clusters[lead_idx].get_RadarState()\n\n return lead_dict,lead_dict_ext\n\n\nclass RadarD(object):\n def __init__(self, mocked, RI):\n self.current_time = 0\n self.mocked = mocked\n self.RI = RI\n self.tracks = defaultdict(dict)\n\n self.last_md_ts = 0\n self.last_controls_state_ts = 0\n\n self.active = 0\n\n # v_ego\n self.v_ego = 0.\n self.v_ego_hist_t = deque([0], maxlen=v_len)\n self.v_ego_hist_v = deque([0], maxlen=v_len)\n self.v_ego_t_aligned = 0.\n self.ready = False\n self.icCarLR = None\n if (RI.TRACK_RIGHT_LANE or RI.TRACK_LEFT_LANE) and CarSettings().get_value(\"useTeslaRadar\"):\n self.icCarLR = messaging.pub_sock(service_list['uiIcCarLR'].port)\n \n self.lane_width = 3.0\n #only used for left and right lanes\n self.path_x = np.arange(0.0, 160.0, 0.1) # 160 meters is max\n self.poller = zmq.Poller()\n self.pathPlanSocket = messaging.sub_sock(service_list['pathPlan'].port, conflate=True, poller=self.poller)\n self.dPoly = [0.,0.,0.,0.]\n\n def update(self, frame, delay, sm, rr, has_radar,rrext):\n self.current_time = 1e-9*max([sm.logMonoTime[key] for key in sm.logMonoTime.keys()])\n use_tesla_radar = CarSettings().get_value(\"useTeslaRadar\")\n if sm.updated['controlsState']:\n self.active = sm['controlsState'].active\n self.v_ego = sm['controlsState'].vEgo\n self.v_ego_hist_v.append(self.v_ego)\n self.v_ego_hist_t.append(float(frame)/rate)\n if sm.updated['model']:\n self.ready = True\n\n for socket, _ in self.poller.poll(0):\n if socket is self.pathPlanSocket:\n pp = messaging.recv_one(self.pathPlanSocket).pathPlan\n self.lane_width = pp.laneWidth\n self.dPoly = pp.dPoly\n\n path_y = np.polyval(self.dPoly, self.path_x)\n\n ar_pts = {}\n for pt in rr.points:\n extpt = get_rrext_by_trackId(rrext,pt.trackId)\n ar_pts[pt.trackId] = [pt.dRel + RDR_TO_LDR, pt.yRel, pt.vRel, pt.measured, pt.aRel, pt.yvRel, extpt.objectClass, extpt.length, pt.trackId+2, extpt.movingState]\n # *** remove missing points from meta data ***\n for ids in self.tracks.keys():\n if ids not in ar_pts:\n self.tracks.pop(ids, None)\n\n # *** compute the tracks ***\n for ids in ar_pts:\n rpt = ar_pts[ids]\n\n # align v_ego by a fixed time to align it with the radar measurement\n cur_time = float(frame)/rate\n self.v_ego_t_aligned = np.interp(cur_time - delay, self.v_ego_hist_t, self.v_ego_hist_v)\n\n # distance relative to path\n d_path = np.sqrt(np.amin((self.path_x - rpt[0]) ** 2 + (path_y - rpt[1]) ** 2))\n # add sign\n d_path *= np.sign(rpt[1] - np.interp(rpt[0], self.path_x, path_y))\n\n # create the track if it doesn't exist or it's a new track\n if ids not in self.tracks:\n self.tracks[ids] = Track()\n self.tracks[ids].update(rpt[0], rpt[1], rpt[2], rpt[3], rpt[4],rpt[5],rpt[6],rpt[7],rpt[8],rpt[9], d_path, self.v_ego_t_aligned,use_tesla_radar)\n\n idens = list(self.tracks.keys())\n track_pts = np.array([self.tracks[iden].get_key_for_cluster() for iden in idens])\n\n\n # If we have multiple points, cluster them\n if len(track_pts) > 1:\n cluster_idxs = cluster_points_centroid(track_pts, 2.5)\n clusters = [None] * (max(cluster_idxs) + 1)\n\n for idx in xrange(len(track_pts)):\n cluster_i = cluster_idxs[idx]\n if clusters[cluster_i] is None:\n clusters[cluster_i] = Cluster()\n clusters[cluster_i].add(self.tracks[idens[idx]])\n elif len(track_pts) == 1:\n # FIXME: cluster_point_centroid hangs forever if len(track_pts) == 1\n cluster_idxs = [0]\n clusters = [Cluster()]\n clusters[0].add(self.tracks[idens[0]])\n else:\n clusters = []\n\n # if a new point, reset accel to the rest of the cluster\n for idx in xrange(len(track_pts)):\n if self.tracks[idens[idx]].cnt <= 1:\n aLeadK = clusters[cluster_idxs[idx]].aLeadK\n aLeadTau = clusters[cluster_idxs[idx]].aLeadTau\n self.tracks[idens[idx]].reset_a_lead(aLeadK, aLeadTau)\n \n ### START REVIEW SECTION\n\n #################################################################\n #BB For Tesla integration we will also track Left and Right lanes\n #################################################################\n if (self.RI.TRACK_RIGHT_LANE or self.RI.TRACK_LEFT_LANE) and use_tesla_radar:\n datrl = tesla.ICCarsLR.new_message()\n datrl.v1Type = int(0)\n datrl.v1Dx = float(0.)\n datrl.v1Vrel = float(0.)\n datrl.v1Dy = float(0.)\n datrl.v1Id = int(0)\n datrl.v2Type = int(0) \n datrl.v2Dx = float(0.)\n datrl.v2Vrel = float(0.)\n datrl.v2Dy = float(0.)\n datrl.v2Id = int(0)\n datrl.v3Type = int(0)\n datrl.v3Dx = float(0.)\n datrl.v3Vrel = float(0.)\n datrl.v3Dy = float(0.)\n datrl.v3Id = int(0)\n datrl.v4Type = int(0) \n datrl.v4Dx = float(0.)\n datrl.v4Vrel = float(0.)\n datrl.v4Dy = float(0.)\n datrl.v4Id = int(0)\n lane_offset = 0. \n #LEFT LANE\n if self.RI.TRACK_LEFT_LANE and use_tesla_radar:\n ll_track_pts = np.array([self.tracks[iden].get_key_for_cluster_dy(-self.lane_width) for iden in idens])\n # If we have multiple points, cluster them\n if len(ll_track_pts) > 1:\n ll_cluster_idxs = cluster_points_centroid(ll_track_pts, 2.5)\n ll_clusters = [None] * (max(ll_cluster_idxs) + 1)\n\n for idx in xrange(len(ll_track_pts)):\n ll_cluster_i = ll_cluster_idxs[idx]\n\n if ll_clusters[ll_cluster_i] == None:\n ll_clusters[ll_cluster_i] = Cluster()\n ll_clusters[ll_cluster_i].add(self.tracks[idens[idx]])\n elif len(ll_track_pts) == 1:\n # TODO: why do we need this?\n ll_clusters = [Cluster()]\n ll_clusters[0].add(self.tracks[idens[0]])\n else:\n ll_clusters = []\n if DEBUG:\n for i in ll_clusters:\n print(i)\n # *** extract the lead car ***\n ll_lead_clusters = [c for c in ll_clusters\n if c.is_potential_lead_dy(self.v_ego,-self.lane_width)]\n ll_lead_clusters.sort(key=lambda x: x.dRel)\n ll_lead_len = len(ll_lead_clusters)\n ll_lead1_truck = (len([c for c in ll_lead_clusters\n if c.is_truck(ll_lead_clusters)]) > 0)\n\n # *** extract the second lead from the whole set of leads ***\n ll_lead2_clusters = [c for c in ll_lead_clusters\n if c.is_potential_lead2(ll_lead_clusters)]\n ll_lead2_clusters.sort(key=lambda x: x.dRel)\n ll_lead2_len = len(ll_lead2_clusters)\n ll_lead2_truck = (len([c for c in ll_lead_clusters\n if c.is_truck(ll_lead2_clusters)]) > 0)\n # publish data\n if ll_lead_len > 0:\n datrl.v1Type = int(ll_lead_clusters[0].oClass)\n if datrl.v1Type == 1 and ll_lead1_truck:\n datrl.v1Type = 0\n datrl.v1Dx = float(ll_lead_clusters[0].dRel)\n datrl.v1Vrel = float(ll_lead_clusters[0].vRel)\n datrl.v1Dy = float(-ll_lead_clusters[0].yRel - lane_offset)\n datrl.v1Id = int(ll_lead_clusters[0].track_id % 32)\n if ll_lead2_len > 0:\n datrl.v2Type = int(ll_lead2_clusters[0].oClass)\n if datrl.v2Type == 1 and ll_lead2_truck:\n datrl.v2Type = 0\n datrl.v2Dx = float(ll_lead2_clusters[0].dRel)\n datrl.v2Vrel = float(ll_lead2_clusters[0].vRel)\n datrl.v2Dy = float(-ll_lead2_clusters[0].yRel - lane_offset) \n datrl.v2Id = int(ll_lead2_clusters[0].track_id % 32)\n #RIGHT LANE\n if self.RI.TRACK_RIGHT_LANE and use_tesla_radar:\n rl_track_pts = np.array([self.tracks[iden].get_key_for_cluster_dy(self.lane_width) for iden in idens])\n # If we have multiple points, cluster them\n if len(rl_track_pts) > 1:\n rl_cluster_idxs = cluster_points_centroid(rl_track_pts, 2.5)\n rl_clusters = [None] * (max(rl_cluster_idxs) + 1)\n\n for idx in xrange(len(rl_track_pts)):\n rl_cluster_i = rl_cluster_idxs[idx]\n\n if rl_clusters[rl_cluster_i] == None:\n rl_clusters[rl_cluster_i] = Cluster()\n rl_clusters[rl_cluster_i].add(self.tracks[idens[idx]])\n elif len(rl_track_pts) == 1:\n # TODO: why do we need this?\n rl_clusters = [Cluster()]\n rl_clusters[0].add(self.tracks[idens[0]])\n else:\n rl_clusters = []\n if DEBUG:\n for i in rl_clusters:\n print(i)\n # *** extract the lead car ***\n rl_lead_clusters = [c for c in rl_clusters\n if c.is_potential_lead_dy(self.v_ego,self.lane_width)]\n rl_lead_clusters.sort(key=lambda x: x.dRel)\n rl_lead_len = len(rl_lead_clusters)\n rl_lead1_truck = (len([c for c in rl_lead_clusters\n if c.is_truck(rl_lead_clusters)]) > 0)\n # *** extract the second lead from the whole set of leads ***\n rl_lead2_clusters = [c for c in rl_lead_clusters\n if c.is_potential_lead2(rl_lead_clusters)]\n rl_lead2_clusters.sort(key=lambda x: x.dRel)\n rl_lead2_len = len(rl_lead2_clusters)\n rl_lead2_truck = (len([c for c in rl_lead_clusters\n if c.is_truck(rl_lead2_clusters)]) > 0)\n # publish data\n if rl_lead_len > 0:\n datrl.v3Type = int(rl_lead_clusters[0].oClass) \n if datrl.v3Type == 1 and rl_lead1_truck:\n datrl.v3Type = 0\n datrl.v3Dx = float(rl_lead_clusters[0].dRel)\n datrl.v3Vrel = float(rl_lead_clusters[0].vRel)\n datrl.v3Dy = float(-rl_lead_clusters[0].yRel+ lane_offset)\n datrl.v3Id = int(rl_lead_clusters[0].track_id % 32)\n if rl_lead2_len > 0:\n datrl.v4Type = int(rl_lead2_clusters[0].oClass)\n if datrl.v4Type == 1 and rl_lead2_truck:\n datrl.v4Type = 0\n datrl.v4Dx = float(rl_lead2_clusters[0].dRel)\n datrl.v4Vrel = float(rl_lead2_clusters[0].vRel)\n datrl.v4Dy = float(-rl_lead2_clusters[0].yRel + lane_offset)\n datrl.v4Id = int(rl_lead2_clusters[0].track_id % 32)\n if (self.RI.TRACK_RIGHT_LANE or self.RI.TRACK_LEFT_LANE) and use_tesla_radar:\n self.icCarLR.send(datrl.to_bytes()) \n\n ### END REVIEW SECTION\n \n\n # *** publish radarState ***\n dat = messaging.new_message()\n dat.init('radarState')\n dat.valid = sm.all_alive_and_valid(service_list=['controlsState', 'model'])\n dat.radarState.mdMonoTime = self.last_md_ts\n dat.radarState.canMonoTimes = list(rr.canMonoTimes)\n dat.radarState.radarErrors = list(rr.errors)\n dat.radarState.controlsStateMonoTime = self.last_controls_state_ts\n\n datext = tesla.ICLeads.new_message()\n l1x = tesla.TeslaLeadPoint.new_message()\n l2x = tesla.TeslaLeadPoint.new_message()\n if has_radar:\n l1d,l1x = get_lead(self.v_ego, self.ready, clusters, sm['model'].lead, low_speed_override=True)\n l2d,l2x = get_lead(self.v_ego, self.ready, clusters, sm['model'].leadFuture, low_speed_override=False)\n dat.radarState.leadOne = l1d\n dat.radarState.leadTwo = l2d\n \n datext.lead1trackId = l1x['trackId']\n datext.lead1oClass = l1x['oClass']\n datext.lead1length = l1x['length']\n datext.lead2trackId = l2x['trackId']\n datext.lead2oClass = l2x['oClass']\n datext.lead2length = l2x['length']\n return dat, datext\n\n\n# fuses camera and radar data for best lead detection\ndef radard_thread(gctx=None):\n set_realtime_priority(2)\n\n # wait for stats about the car to come in from controls\n cloudlog.info(\"radard is waiting for CarParams\")\n CP = car.CarParams.from_bytes(Params().get(\"CarParams\", block=True))\n use_tesla_radar = CarSettings().get_value(\"useTeslaRadar\")\n mocked = (CP.carName == \"mock\") or ((CP.carName == \"tesla\") and not use_tesla_radar)\n cloudlog.info(\"radard got CarParams\")\n\n # import the radar from the fingerprint\n cloudlog.info(\"radard is importing %s\", CP.carName)\n RadarInterface = importlib.import_module('selfdrive.car.%s.radar_interface' % CP.carName).RadarInterface\n\n can_sock = messaging.sub_sock(service_list['can'].port)\n sm = messaging.SubMaster(['model', 'controlsState', 'liveParameters'])\n\n RI = RadarInterface(CP)\n\n # *** publish radarState and liveTracks\n radarState = messaging.pub_sock(service_list['radarState'].port)\n liveTracks = messaging.pub_sock(service_list['liveTracks'].port)\n icLeads = messaging.pub_sock(service_list['uiIcLeads'].port)\n\n rk = Ratekeeper(rate, print_delay_threshold=None)\n RD = RadarD(mocked, RI)\n\n has_radar = not CP.radarOffCan or mocked\n last_md_ts = 0.\n v_ego = 0.\n\n while 1:\n can_strings = messaging.drain_sock_raw(can_sock, wait_for_one=True)\n rr,rrext = RI.update(can_strings)\n\n if rr is None:\n continue\n\n sm.update(0)\n\n if sm.updated['controlsState']:\n v_ego = sm['controlsState'].vEgo\n\n \n\n dat,datext = RD.update(rk.frame, RI.delay, sm, rr, has_radar, rrext)\n dat.radarState.cumLagMs = -rk.remaining*1000.\n\n radarState.send(dat.to_bytes())\n icLeads.send(datext.to_bytes())\n\n # *** publish tracks for UI debugging (keep last) ***\n tracks = RD.tracks\n dat = messaging.new_message()\n dat.init('liveTracks', len(tracks))\n\n for cnt, ids in enumerate(tracks.keys()):\n dat.liveTracks[cnt] = {\n \"trackId\": ids,\n \"dRel\": float(tracks[ids].dRel),\n \"yRel\": float(tracks[ids].yRel),\n \"vRel\": float(tracks[ids].vRel),\n }\n liveTracks.send(dat.to_bytes())\n\n rk.monitor_time()\n\n\ndef main(gctx=None):\n radard_thread(gctx)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "478127", "language": "Python", "matching_score": 4.729629993438721, "max_stars_count": 1, "path": "selfdrive/controls/radard.py" }, { "content": "from common.realtime import DT_MDL\nfrom common.kalman.simple_kalman import KF1D\nfrom selfdrive.car.tesla.readconfig import CarSettings\nfrom selfdrive.config import RADAR_TO_CENTER\nfrom common.numpy_fast import clip, interp\n\n# the longer lead decels, the more likely it will keep decelerating\n# TODO is this a good default?\n_LEAD_ACCEL_TAU = 1.5\n\n# radar tracks\nSPEED, ACCEL = 0, 1 # Kalman filter states enum\n\n# stationary qualification parameters\nv_ego_stationary = 4. # no stationary object flag below this speed\n\n# Lead Kalman Filter params\n_VLEAD_A = [[1.0, DT_MDL], [0.0, 1.0]]\n_VLEAD_C = [1.0, 0.0]\n#_VLEAD_Q = np.matrix([[10., 0.0], [0.0, 100.]])\n#_VLEAD_R = 1e3\n#_VLEAD_K = np.matrix([[ 0.05705578], [ 0.03073241]])\n_VLEAD_K = [[0.1988689], [0.28555364]]\n\nclass Track(object):\n def __init__(self):\n self.ekf = None\n self.cnt = 0\n\n def update(self, d_rel, y_rel, v_rel,measured, a_rel, vy_rel, oClass, length, track_id,movingState, d_path, v_ego_t_aligned,use_tesla_radar):\n \n # relative values, copy\n self.dRel = d_rel # LONG_DIST\n self.yRel = y_rel # -LAT_DIST\n self.vRel = v_rel # REL_SPEED\n self.aRel = a_rel # rel acceleration\n self.vLat = vy_rel # rel lateral speed\n self.oClass = oClass # object class\n self.length = length #length\n self.measured = measured # measured or estimate\n self.track_id = track_id\n self.dPath = d_path\n self.stationary = (movingState == 3)\n\n # computed velocity and accelerations\n self.vLead = self.vRel + v_ego_t_aligned\n\n \n if self.cnt == 0:\n self.kf = KF1D([[self.vLead], [0.0]], _VLEAD_A, _VLEAD_C, _VLEAD_K)\n else:\n self.kf.update(self.vLead)\n\n self.cnt += 1\n\n self.vLeadK = float(self.kf.x[SPEED][0])\n self.aLeadK = float(self.kf.x[ACCEL][0])\n\n # Learn if constant acceleration\n if abs(self.aLeadK) < 0.5:\n self.aLeadTau = _LEAD_ACCEL_TAU\n else:\n self.aLeadTau *= 0.9\n\n def get_key_for_cluster(self):\n # Weigh y higher since radar is inaccurate in this dimension\n return [self.dRel, self.yRel*2, self.vRel]\n\n def get_key_for_cluster_dy(self, dy):\n # Weigh y higher since radar is inaccurate in this dimension\n return [self.dRel, (self.yRel-dy)*2, self.vRel]\n\n def reset_a_lead(self, aLeadK, aLeadTau):\n self.kf = KF1D([[self.vLead], [aLeadK]], _VLEAD_A, _VLEAD_C, _VLEAD_K)\n self.aLeadK = aLeadK\n self.aLeadTau = aLeadTau\n\ndef mean(l):\n return sum(l) / len(l)\n\n\nclass Cluster(object):\n def __init__(self):\n self.tracks = set()\n #BB frame delay for dRel calculation, in seconds\n self.frame_delay = 0.2\n self.useTeslaRadar = CarSettings().get_value(\"useTeslaRadar\")\n\n def add(self, t):\n # add the first track\n self.tracks.add(t)\n\n # TODO: make generic\n @property\n def dRel(self):\n return min([t.dRel for t in self.tracks])\n\n @property\n def yRel(self):\n return mean([t.yRel for t in self.tracks])\n\n @property\n def vRel(self):\n return mean([t.vRel for t in self.tracks])\n\n @property\n def aRel(self):\n return mean([t.aRel for t in self.tracks])\n\n @property\n def vLead(self):\n return mean([t.vLead for t in self.tracks])\n\n @property\n def dPath(self):\n return mean([t.dPath for t in self.tracks])\n\n @property\n def vLat(self):\n return mean([t.vLat for t in self.tracks])\n\n @property\n def vLeadK(self):\n return mean([t.vLeadK for t in self.tracks])\n\n @property\n def aLeadK(self):\n if all(t.cnt <= 1 for t in self.tracks):\n return 0.\n else:\n return mean([t.aLeadK for t in self.tracks if t.cnt > 1])\n\n @property\n def aLeadTau(self):\n if all(t.cnt <= 1 for t in self.tracks):\n return _LEAD_ACCEL_TAU\n else:\n return mean([t.aLeadTau for t in self.tracks if t.cnt > 1])\n\n @property\n def measured(self):\n return any([t.measured for t in self.tracks])\n\n @property\n def oClass(self):\n return all([t.oClass for t in self.tracks])\n\n @property\n def length(self):\n return max([t.length for t in self.tracks])\n \n @property\n def track_id(self):\n return mean([t.track_id for t in self.tracks])\n \n @property\n def stationary(self):\n return all([t.stationary for t in self.tracks])\n\n def get_RadarState(self, model_prob=0.0):\n dRel_delta_estimate = 0.\n if self.useTeslaRadar:\n dRel_delta_estimate = (self.vRel + self.aRel * self.frame_delay / 2.) * self.frame_delay\n return {\n \"dRel\": float(self.dRel + dRel_delta_estimate),\n \"yRel\": float(self.yRel),\n \"vRel\": float(self.vRel),\n \"vLead\": float(self.vLead),\n \"vLeadK\": float(self.vLeadK),\n \"aLeadK\": float(self.aLeadK),\n \"status\": True,\n \"fcw\": self.is_potential_fcw(model_prob),\n \"aLeadTau\": float(self.aLeadTau),\n \"modelProb\": model_prob,\n \"radar\": True,\n }, {\n \"trackId\": int(self.track_id % 32),\n \"oClass\": int(self.oClass),\n \"length\": float(self.length),\n }\n\n def get_RadarState_from_vision(self, lead_msg, v_ego):\n return {\n \"dRel\": float(lead_msg.dist - RADAR_TO_CENTER),\n \"yRel\": float(lead_msg.relY),\n \"vRel\": float(lead_msg.relVel),\n \"vLead\": float(v_ego + lead_msg.relVel),\n \"vLeadK\": float(v_ego + lead_msg.relVel),\n \"aLeadK\": float(0),\n \"aLeadTau\": _LEAD_ACCEL_TAU,\n \"fcw\": False,\n \"modelProb\": float(lead_msg.prob),\n \"radar\": False,\n \"status\": True\n }\n\n def __str__(self):\n ret = \"x: %4.1f y: %4.1f v: %4.1f a: %4.1f\" % (self.dRel, self.yRel, self.vRel, self.aLeadK)\n return ret\n\n def is_potential_lead(self, v_ego):\n # predict cut-ins by extrapolating lateral speed by a lookahead time\n # lookahead time depends on cut-in distance. more attentive for close cut-ins\n # also, above 50 meters the predicted path isn't very reliable\n\n # the distance at which v_lat matters is higher at higher speed\n lookahead_dist = 40. + v_ego/1.2 #40m at 0mph, ~70m at 80mph\n\n t_lookahead_v = [1., 0.]\n t_lookahead_bp = [10., lookahead_dist]\n\n # average dist\n d_path = self.dPath\n\n # lat_corr used to be gated on enabled, now always running\n t_lookahead = interp(self.dRel, t_lookahead_bp, t_lookahead_v)\n\n # correct d_path for lookahead time, considering only cut-ins and no more than 1m impact.\n lat_corr = 0. # BB disables for now : clip(t_lookahead * self.vLat, -1., 1.) if self.measured else 0.\n\n # consider only cut-ins\n d_path = clip(d_path + lat_corr, min(0., d_path), max(0.,d_path))\n\n return abs(d_path) < 1.5 and not self.stationary and not self.oncoming\n\n def is_potential_lead_dy(self, v_ego,dy):\n # predict cut-ins by extrapolating lateral speed by a lookahead time\n # lookahead time depends on cut-in distance. more attentive for close cut-ins\n # also, above 50 meters the predicted path isn't very reliable\n\n # the distance at which v_lat matters is higher at higher speed\n lookahead_dist = 40. + v_ego/1.2 #40m at 0mph, ~70m at 80mph\n\n t_lookahead_v = [1., 0.]\n t_lookahead_bp = [10., lookahead_dist]\n\n # average dist\n d_path = self.dPath - dy\n\n # lat_corr used to be gated on enabled, now always running\n t_lookahead = interp(self.dRel, t_lookahead_bp, t_lookahead_v)\n\n # correct d_path for lookahead time, considering only cut-ins and no more than 1m impact.\n lat_corr = clip(t_lookahead * self.vLat, -1., 1.) if self.measured else 0.\n\n # consider only cut-ins\n d_path = clip(d_path + lat_corr, min(0., d_path), max(0.,d_path))\n\n return abs(d_path) < abs(dy/2.) and not self.stationary #and not self.oncoming\n\n def is_truck(self,lead_clusters):\n return False\n if len(lead_clusters) > 0:\n lead_cluster = lead_clusters[0]\n # check if the new lead is too close and roughly at the same speed of the first lead:\n # it might just be the second axle of the same vehicle\n return (self.dRel - lead_cluster.dRel < 4.5) and (self.dRel - lead_cluster.dRel > 0.5) and (abs(self.yRel - lead_cluster.yRel) < 2.) and (abs(self.vRel - lead_cluster.vRel) < 0.2)\n else:\n return False\n\n def is_potential_lead2(self, lead_clusters):\n if len(lead_clusters) > 0:\n lead_cluster = lead_clusters[0]\n return ((self.dRel - lead_cluster.dRel > 8.) and (lead_cluster.oClass > 0)) or ((self.dRel - lead_cluster.dRel > 15.) and (lead_cluster.oClass == 0)) or abs(self.vRel - lead_cluster.vRel) > 1.\n else:\n return False\n\n \n def potential_low_speed_lead(self, v_ego):\n # stop for stuff in front of you and low speed, even without model confirmation\n return abs(self.yRel) < 1.5 and (v_ego < v_ego_stationary) and self.dRel < 25\n\n def is_potential_fcw(self, model_prob):\n return model_prob > .9\n", "id": "9403109", "language": "Python", "matching_score": 2.7903645038604736, "max_stars_count": 1, "path": "selfdrive/controls/lib/radar_helpers.py" }, { "content": "import datetime\nfrom cereal import log,tesla\nfrom common.params import Params\nfrom collections import namedtuple\nfrom common.numpy_fast import clip, interp\nfrom common.realtime import DT_CTRL\nfrom selfdrive.car.tesla import teslacan\nfrom selfdrive.car.tesla.values import AH, CM\nfrom selfdrive.can.packer import CANPacker\nfrom selfdrive.config import Conversions as CV\nfrom selfdrive.car.modules.ALCA_module import ALCAController\nfrom selfdrive.car.modules.GYRO_module import GYROController\nfrom selfdrive.car.tesla.ACC_module import ACCController\nfrom selfdrive.car.tesla.PCC_module import PCCController\nfrom selfdrive.car.tesla.HSO_module import HSOController\nfrom selfdrive.car.tesla.movingaverage import MovingAverage\nimport zmq\nimport selfdrive.messaging as messaging\nfrom selfdrive.services import service_list\n\n# Steer angle limits\nANGLE_MAX_BP = [0., 27., 36.]\nANGLE_MAX_V = [410., 92., 36.]\n\nANGLE_DELTA_BP = [0., 5., 15.]\nANGLE_DELTA_V = [5., .8, .25] # windup limit\nANGLE_DELTA_VU = [5., 3.5, 0.8] # unwind limit\n#steering adjustment with speed\nDES_ANGLE_ADJUST_FACTOR_BP = [0.,13., 44.]\nDES_ANGLE_ADJUST_FACTOR = [1.0, 1.0, 1.0]\n\n#LDW WARNING LEVELS\nLDW_WARNING_1 = 1.0\nLDW_WARNING_2 = 0.9\nLDW_LANE_PROBAB = 0.3\n\ndef gen_solution(CS):\n fix = 0\n if CS.gpsAccuracy < 2:\n fix = 1\n timestamp = int(((datetime.datetime.now() - datetime.datetime(1970,1,1)).total_seconds())*1e+03)\n gps_fix = {'bearing': CS.gpsHeading, # heading of motion in degrees\n 'altitude': CS.gpsElevation, # altitude above ellipsoid\n 'latitude': CS.gpsLatitude, # latitude in degrees\n 'longitude': CS.gpsLongitude, # longitude in degrees\n 'speed': CS.gpsVehicleSpeed, # ground speed in meters\n 'accuracy': CS.gpsAccuracy, # horizontal accuracy (1 sigma?)\n 'timestamp': timestamp, # UTC time in ms since start of UTC stime\n 'vNED': [0.,0.,0.], # velocity in NED frame in m/s\n 'speedAccuracy': 0., # speed accuracy in m/s\n 'verticalAccuracy': 0., # vertical accuracy in meters\n 'bearingAccuracy': 0., # heading accuracy in degrees\n 'source': 'ublox',\n 'flags': fix, # 1 of gpsAccuracy less than 2 meters\n }\n return log.Event.new_message(gpsLocationExternal=gps_fix)\n\ndef process_hud_alert(hud_alert):\n # initialize to no alert\n fcw_display = 0\n steer_required = 0\n acc_alert = 0\n if hud_alert == AH.NONE: # no alert\n pass\n elif hud_alert == AH.FCW: # FCW\n fcw_display = hud_alert[1]\n elif hud_alert == AH.STEER: # STEER\n steer_required = hud_alert[1]\n else: # any other ACC alert\n acc_alert = hud_alert[1]\n\n return fcw_display, steer_required, acc_alert\n\n\nHUDData = namedtuple(\"HUDData\",\n [\"pcm_accel\", \"v_cruise\", \"mini_car\", \"car\", \"X4\",\n \"lanes\", \"beep\", \"chime\", \"fcw\", \"acc_alert\", \"steer_required\"])\n\n\n\nclass CarController(object):\n def __init__(self, dbc_name):\n self.alcaStateData = None\n self.icLeadsData = None\n self.params = Params()\n self.braking = False\n self.brake_steady = 0.\n self.brake_last = 0.\n self.packer = CANPacker(dbc_name)\n self.epas_disabled = True\n self.last_angle = 0.\n self.last_accel = 0.\n self.ALCA = ALCAController(self,True,True) # Enabled and SteerByAngle both True\n self.ACC = ACCController(self)\n self.PCC = PCCController(self)\n self.HSO = HSOController(self)\n self.GYRO = GYROController()\n self.sent_DAS_bootID = False\n self.poller = zmq.Poller()\n self.speedlimit = None\n self.trafficevents = messaging.sub_sock(service_list['trafficEvents'].port, conflate=True, poller=self.poller)\n self.pathPlan = messaging.sub_sock(service_list['pathPlan'].port, conflate=True, poller=self.poller)\n self.radarState = messaging.sub_sock(service_list['radarState'].port, conflate=True, poller=self.poller)\n self.icLeads = messaging.sub_sock(service_list['uiIcLeads'].port, conflate=True, poller=self.poller)\n self.icCarLR = messaging.sub_sock(service_list['uiIcCarLR'].port, conflate=True, poller=self.poller)\n self.alcaState = messaging.sub_sock(service_list['alcaState'].port, conflate=True, poller=self.poller)\n self.gpsLocationExternal = None \n self.speedlimit_ms = 0.\n self.speedlimit_valid = False\n self.speedlimit_units = 0\n self.opState = 0 # 0-disabled, 1-enabled, 2-disabling, 3-unavailable, 5-warning\n self.accPitch = 0.\n self.accRoll = 0.\n self.accYaw = 0.\n self.magPitch = 0.\n self.magRoll = 0.\n self.magYaw = 0.\n self.gyroPitch = 0.\n self.gyroRoll = 0.\n self.gyroYaw = 0.\n self.set_speed_limit_active = False\n self.speed_limit_offset = 0.\n self.speed_limit_for_cc = 0.\n\n # for warnings\n self.warningCounter = 0\n self.DAS_206_apUnavailable = 0\n self.DAS_222_accCameraBlind = 0 #DAS_206 lkas not ebabled\n self.DAS_219_lcTempUnavailableSpeed = 0\n self.DAS_220_lcTempUnavailableRoad = 0\n self.DAS_221_lcAborting = 0\n self.DAS_211_accNoSeatBelt = 0\n self.DAS_207_lkasUnavailable = 0 #use for manual steer?\n self.DAS_208_rackDetected = 0 #use for low battery?\n self.DAS_202_noisyEnvironment = 0 #use for planner error?\n self.DAS_025_steeringOverride = 0 #another one to use for manual steer?\n self.warningNeeded = 0\n\n # items for IC integration for Lane and Lead Car\n self.average_over_x_pathplan_values = 1 \n self.curv0Matrix = MovingAverage(self.average_over_x_pathplan_values)\n self.curv1Matrix = MovingAverage(self.average_over_x_pathplan_values) \n self.curv2Matrix = MovingAverage(self.average_over_x_pathplan_values) \n self.curv3Matrix = MovingAverage(self.average_over_x_pathplan_values)\n self.leadDxMatrix = MovingAverage(self.average_over_x_pathplan_values)\n self.leadDyMatrix = MovingAverage(self.average_over_x_pathplan_values)\n self.leadDx = 0.\n self.leadDy = 0.\n self.leadClass = 0\n self.leadVx = 0.\n self.leadId = 0\n self.lead2Dx = 0.\n self.lead2Dy = 0.\n self.lead2Class = 0\n self.lead2Vx = 0.\n self.lead2Id = 0\n self.lLine = 0\n self.rLine = 0\n self.curv0 = 0. \n self.curv1 = 0. \n self.curv2 = 0. \n self.curv3 = 0. \n self.visionCurvC0 = 0.\n self.laneRange = 50 #max is 160m but OP has issues with precision beyond 50\n self.useZeroC0 = False\n self.useMap = False\n self.clipC0 = False\n self.useMapOnly = False\n self.laneWidth = 0.\n\n self.stopSign_visible = False\n self.stopSign_distance = 1000.\n self.stopSign_action = 0\n self.stopSign_resume = False\n\n self.stopLight_visible = False\n self.stopLight_distance = 1000.\n self.stopLight_action = 0\n self.stopLight_resume = False\n self.stopLight_color = 0. #0-unknown, 1-red, 2-yellow, 3-green\n\n self.stopSignWarning = 0\n self.stopLightWarning = 0\n self.stopSignWarning_last = 0\n self.stopLightWarning_last = 0\n self.roadSignType = 0xFF\n self.roadSignStopDist = 1000.\n self.roadSignColor = 0.\n self.roadSignControlActive = 0\n self.roadSignType_last = 0xFF\n\n self.roadSignDistanceWarning = 50.\n\n self.alca_enabled = False\n self.ldwStatus = 0\n self.prev_ldwStatus = 0\n\n self.radarVin_idx = 0\n\n self.isMetric = (self.params.get(\"IsMetric\") == \"1\")\n\n def reset_traffic_events(self):\n self.stopSign_visible = False\n self.stopSign_distance = 1000.\n self.stopSign_action = 0\n self.stopSign_resume = False\n\n self.stopLight_visible = False\n self.stopLight_distance = 1000.\n self.stopLight_action = 0\n self.stopLight_resume = False\n self.stopLight_color = 0. #0-unknown, 1-red, 2-yellow, 3-green\n\n def checkWhichSign(self):\n self.stopSignWarning = 0\n self.stopLightWarning = 0\n self.roadSignType_last = self.roadSignType\n self.roadSignType = 0xFF\n self.roadSignStopDist = 1000.\n self.roadSignColor = 0\n self.roadSignControlActive = 0\n if (self.stopSign_distance < self.stopLight_distance):\n self.roadSignType = 0x00\n self.roadSignStopDist = self.stopSign_distance\n self.roadSignColor = 0\n self.roadSignControlActive = self.stopSign_resume\n if (self.stopSign_distance < self.roadSignDistanceWarning ):\n self.stopSignWarning = 1\n elif (self.stopLight_distance < self.stopSign_distance ):\n self.roadSignType = 0x01\n self.roadSignStopDist = self.stopLight_distance\n self.roadSignColor = self.stopLight_color\n self.roadSignControlActive = self.stopLight_resume\n if (self.stopLight_distance < self.roadSignDistanceWarning ) and (self.roadSignColor == 1):\n self.stopLightWarning = 1\n \n def update(self, enabled, CS, frame, actuators, \\\n pcm_speed, pcm_override, pcm_cancel_cmd, pcm_accel, \\\n hud_v_cruise, hud_show_lanes, hud_show_car, hud_alert, \\\n snd_beep, snd_chime,leftLaneVisible,rightLaneVisible):\n\n if (not enabled) and (self.ALCA.laneChange_cancelled):\n self.ALCA.laneChange_cancelled = False\n self.ALCA.laneChange_cancelled_counter = 0\n self.warningNeeded = 1\n if self.warningCounter > 0:\n self.warningCounter = self.warningCounter - 1\n if self.warningCounter == 0:\n self.warningNeeded = 1\n if self.warningCounter == 0 or not enabled:\n # when zero reset all warnings\n self.DAS_222_accCameraBlind = 0 #we will see what we can use this for\n self.DAS_219_lcTempUnavailableSpeed = 0\n self.DAS_220_lcTempUnavailableRoad = 0\n self.DAS_221_lcAborting = 0\n self.DAS_211_accNoSeatBelt = 0\n self.DAS_207_lkasUnavailable = 0 #use for manual not in drive?\n self.DAS_208_rackDetected = 0 #use for low battery?\n self.DAS_202_noisyEnvironment = 0 #use for planner error?\n self.DAS_025_steeringOverride = 0 #use for manual steer?\n self.DAS_206_apUnavailable = 0 #Ap disabled from CID\n\n if CS.keepEonOff:\n if CS.cstm_btns.get_button_status(\"dsp\") != 9:\n CS.cstm_btns.set_button_status(\"dsp\",9)\n else:\n if CS.cstm_btns.get_button_status(\"dsp\") != 1:\n CS.cstm_btns.set_button_status(\"dsp\",1) \n # \"\"\" Controls thread \"\"\"\n\n if not CS.useTeslaMapData:\n if self.speedlimit is None:\n self.speedlimit = messaging.sub_sock(service_list['liveMapData'].port, conflate=True, poller=self.poller)\n\n\n # *** no output if not enabled ***\n if not enabled and CS.pcm_acc_status:\n # send pcm acc cancel cmd if drive is disabled but pcm is still on, or if the system can't be activated\n pcm_cancel_cmd = True\n\n # vehicle hud display, wait for one update from 10Hz 0x304 msg\n if hud_show_lanes:\n hud_lanes = 1\n else:\n hud_lanes = 0\n\n # TODO: factor this out better\n if enabled:\n if hud_show_car:\n hud_car = 2\n else:\n hud_car = 1\n else:\n hud_car = 0\n \n # For lateral control-only, send chimes as a beep since we don't send 0x1fa\n #if CS.CP.radarOffCan:\n\n #print chime, alert_id, hud_alert\n fcw_display, steer_required, acc_alert = process_hud_alert(hud_alert)\n\n hud = HUDData(int(pcm_accel), int(round(hud_v_cruise)), 1, hud_car,\n 0xc1, hud_lanes, int(snd_beep), snd_chime, fcw_display, acc_alert, steer_required)\n \n if not all(isinstance(x, int) and 0 <= x < 256 for x in hud):\n print \"INVALID HUD\", hud\n hud = HUDData(0xc6, 255, 64, 0xc0, 209, 0x40, 0, 0, 0, 0)\n\n # **** process the car messages ****\n\n # *** compute control surfaces ***\n\n STEER_MAX = 420\n # Prevent steering while stopped\n MIN_STEERING_VEHICLE_VELOCITY = 0.05 # m/s\n vehicle_moving = (CS.v_ego >= MIN_STEERING_VEHICLE_VELOCITY)\n \n # Basic highway lane change logic\n changing_lanes = CS.right_blinker_on or CS.left_blinker_on\n\n #upodate custom UI buttons and alerts\n CS.UE.update_custom_ui()\n \n if (frame % 100 == 0):\n CS.cstm_btns.send_button_info()\n #read speed limit params\n if CS.hasTeslaIcIntegration:\n self.set_speed_limit_active = True\n self.speed_limit_offset = CS.userSpeedLimitOffsetKph\n self.speed_limit_for_cc = CS.userSpeedLimitKph\n #print self.speed_limit_for_cc\n else:\n self.set_speed_limit_active = (self.params.get(\"SpeedLimitOffset\") is not None) and (self.params.get(\"LimitSetSpeed\") == \"1\")\n if self.set_speed_limit_active:\n self.speed_limit_offset = float(self.params.get(\"SpeedLimitOffset\"))\n else:\n self.speed_limit_offset = 0.\n if not self.isMetric:\n self.speed_limit_offset = self.speed_limit_offset * CV.MPH_TO_MS\n if CS.useTeslaGPS:\n if self.gpsLocationExternal is None:\n self.gpsLocationExternal = messaging.pub_sock(service_list['gpsLocationExternal'].port)\n sol = gen_solution(CS)\n sol.logMonoTime = int(frame * DT_CTRL * 1e9)\n self.gpsLocationExternal.send(sol.to_bytes())\n\n #get pitch/roll/yaw every 0.1 sec\n if (frame %10 == 0):\n (self.accPitch, self.accRoll, self.accYaw),(self.magPitch, self.magRoll, self.magYaw),(self.gyroPitch, self.gyroRoll, self.gyroYaw) = self.GYRO.update(CS.v_ego,CS.a_ego,CS.angle_steers)\n CS.UE.uiGyroInfoEvent(self.accPitch, self.accRoll, self.accYaw,self.magPitch, self.magRoll, self.magYaw,self.gyroPitch, self.gyroRoll, self.gyroYaw)\n\n # Update statuses for custom buttons every 0.1 sec.\n if (frame % 10 == 0):\n #self.ALCA.update_status(False) \n self.ALCA.update_status((CS.cstm_btns.get_button_status(\"alca\") > 0) and ((CS.enableALCA and not CS.hasTeslaIcIntegration) or (CS.hasTeslaIcIntegration and CS.alcaEnabled)))\n \n pedal_can_sends = []\n \n if CS.pedal_interceptor_available:\n #update PCC module info\n pedal_can_sends = self.PCC.update_stat(CS, True)\n self.ACC.enable_adaptive_cruise = False\n else:\n # Update ACC module info.\n self.ACC.update_stat(CS, True)\n self.PCC.enable_pedal_cruise = False\n \n # Update HSO module info.\n human_control = False\n\n # update CS.v_cruise_pcm based on module selected.\n if self.ACC.enable_adaptive_cruise:\n CS.v_cruise_pcm = self.ACC.acc_speed_kph\n elif self.PCC.enable_pedal_cruise:\n CS.v_cruise_pcm = self.PCC.pedal_speed_kph\n else:\n CS.v_cruise_pcm = max(0.,CS.v_ego * CV.MS_TO_KPH +0.5) #BB try v_ego to reduce the false FCW warnings; was: vCS.v_cruise_actual\n # Get the turn signal from ALCA.\n turn_signal_needed, self.alca_enabled = self.ALCA.update(enabled, CS, actuators)\n apply_angle = -actuators.steerAngle # Tesla is reversed vs OP.\n human_control = self.HSO.update_stat(self,CS, enabled, actuators, frame)\n human_lane_changing = changing_lanes and not self.alca_enabled\n enable_steer_control = (enabled\n and not human_lane_changing\n and not human_control \n and vehicle_moving)\n \n angle_lim = interp(CS.v_ego, ANGLE_MAX_BP, ANGLE_MAX_V)\n apply_angle = clip(apply_angle, -angle_lim, angle_lim)\n # Windup slower.\n if self.last_angle * apply_angle > 0. and abs(apply_angle) > abs(self.last_angle):\n angle_rate_lim = interp(CS.v_ego, ANGLE_DELTA_BP, ANGLE_DELTA_V)\n else:\n angle_rate_lim = interp(CS.v_ego, ANGLE_DELTA_BP, ANGLE_DELTA_VU)\n\n des_angle_factor = interp(CS.v_ego, DES_ANGLE_ADJUST_FACTOR_BP, DES_ANGLE_ADJUST_FACTOR )\n if self.alca_enabled or not CS.enableSpeedVariableDesAngle:\n des_angle_factor = 1.\n #BB disable limits to test 0.5.8\n # apply_angle = clip(apply_angle * des_angle_factor, self.last_angle - angle_rate_lim, self.last_angle + angle_rate_lim) \n # If human control, send the steering angle as read at steering wheel.\n if human_control:\n apply_angle = CS.angle_steers\n\n # Send CAN commands.\n can_sends = []\n\n #if using radar, we need to send the VIN\n if CS.useTeslaRadar and (frame % 100 == 0):\n useRadar=0\n if CS.useTeslaRadar:\n useRadar=1\n can_sends.append(teslacan.create_radar_VIN_msg(self.radarVin_idx,CS.radarVIN,1,0x108,useRadar,CS.radarPosition,CS.radarEpasType))\n self.radarVin_idx += 1\n self.radarVin_idx = self.radarVin_idx % 3\n\n #First we emulate DAS.\n # DAS_longC_enabled (1),DAS_speed_override (1),DAS_apUnavailable (1), DAS_collision_warning (1), DAS_op_status (4)\n # DAS_speed_kph(8), \n # DAS_turn_signal_request (2),DAS_forward_collision_warning (2), DAS_hands_on_state (4), \n # DAS_cc_state (2), DAS_usingPedal(1),DAS_alca_state (5),\n # DAS_acc_speed_limit_mph (8), \n # DAS_speed_limit_units(8)\n #send fake_das data as 0x553\n # TODO: forward collission warning\n\n if CS.hasTeslaIcIntegration:\n self.set_speed_limit_active = True\n self.speed_limit_offset = CS.userSpeedLimitOffsetKph\n # only change the speed limit when we have a valid vaue\n if CS.userSpeedLimitKph >= 10:\n self.speed_limit_for_cc = CS.userSpeedLimitKph\n\n if CS.useTeslaMapData: \n self.speedlimit_ms = CS.speedLimitKph * CV.KPH_TO_MS\n self.speedlimit_valid = True\n if self.speedlimit_ms == 0:\n self.speedlimit_valid = False\n self.speedlimit_units = self.speedUnits(fromMetersPerSecond = self.speedlimit_ms)\n if frame % 10 == 0:\n for socket, _ in self.poller.poll(1):\n if socket is self.speedlimit and not CS.useTeslaMapData:\n #get speed limit\n lmd = messaging.recv_one(socket).liveMapData\n self.speedlimit_ms = lmd.speedLimit\n self.speedlimit_valid = lmd.speedLimitValid\n self.speedlimit_units = self.speedUnits(fromMetersPerSecond = self.speedlimit_ms)\n self.speed_limit_for_cc = self.speedlimit_ms * CV.MS_TO_KPH\n elif socket is self.icLeads:\n self.icLeadsData = tesla.ICLeads.from_bytes(socket.recv())\n elif socket is self.radarState:\n #to show lead car on IC\n if self.icLeadsData is not None:\n can_messages = self.showLeadCarOnICCanMessage(radarSocket = socket)\n can_sends.extend(can_messages)\n elif socket is self.alcaState:\n self.alcaStateData = tesla.ALCAState.from_bytes(socket.recv())\n elif socket is self.pathPlan:\n #to show curvature and lanes on IC\n if self.alcaStateData is not None:\n self.handlePathPlanSocketForCurvatureOnIC(pathPlanSocket = socket, alcaStateData = self.alcaStateData,CS = CS)\n elif socket is self.icCarLR:\n can_messages = self.showLeftAndRightCarsOnICCanMessages(icCarLRSocket = socket)\n can_sends.extend(can_messages)\n elif socket is self.trafficevents:\n can_messages = self.handleTrafficEvents(trafficEventsSocket = socket)\n can_sends.extend(can_messages)\n\n if (CS.roadCurvRange > 20) and self.useMap:\n if self.useZeroC0:\n self.curv0 = 0.\n elif self.clipC0:\n self.curv0 = -clip(CS.roadCurvC0,-0.5,0.5)\n #else:\n # self.curv0 = -CS.roadCurvC0\n #if CS.v_ego > 9:\n # self.curv1 = -CS.roadCurvC1\n #else:\n # self.curv1 = 0.\n self.curv2 = -CS.roadCurvC2\n self.curv3 = -CS.roadCurvC3\n self.laneRange = CS.roadCurvRange\n #else:\n # self.curv0 = 0.\n # self.curv1 = 0.\n # self.curv2 = 0.\n # self.curv3 = 0.\n # self.laneRange = 0\n \n if (CS.csaRoadCurvRange > 2.) and self.useMap and not self.useMapOnly:\n self.curv2 = -CS.csaRoadCurvC2\n self.curv3 = -CS.csaRoadCurvC3\n #if self.laneRange > 0:\n # self.laneRange = min(self.laneRange,CS.csaRoadCurvRange)\n #else:\n self.laneRange = CS.csaRoadCurvRange\n elif (CS.csaOfframpCurvRange > 2.) and self.useMap and not self.useMapOnly:\n #self.curv2 = -CS.csaOfframpCurvC2\n #self.curv3 = -CS.csaOfframpCurvC3\n #self.curv0 = 0.\n #self.curv1 = 0.\n #if self.laneRange > 0:\n # self.laneRange = min(self.laneRange,CS.csaOfframpCurvRange)\n #else:\n self.laneRange = CS.csaOfframpCurvRange\n else:\n self.laneRange = 50\n self.laneRange = int(clip(self.laneRange,0,159))\n op_status = 0x02\n hands_on_state = 0x00\n forward_collision_warning = 0 #1 if needed\n if hud_alert == AH.FCW:\n forward_collision_warning = hud_alert[1]\n if forward_collision_warning > 1:\n forward_collision_warning = 1\n #cruise state: 0 unavailable, 1 available, 2 enabled, 3 hold\n cc_state = 1 \n speed_limit_to_car = int(self.speedlimit_units)\n alca_state = 0x00 \n \n speed_override = 0\n collision_warning = 0x00\n acc_speed_limit_mph = 0\n speed_control_enabled = 0\n accel_min = -15\n accel_max = 5\n acc_speed_kph = 0\n if enabled:\n #self.opState 0-disabled, 1-enabled, 2-disabling, 3-unavailable, 5-warning\n if self.opState == 0:\n op_status = 0x02\n if self.opState == 1:\n op_status = 0x03\n if self.opState == 2:\n op_status = 0x08\n if self.opState == 3:\n op_status = 0x01\n if self.opState == 5:\n op_status = 0x03\n alca_state = 0x08 + turn_signal_needed\n #canceled by user\n if self.ALCA.laneChange_cancelled and (self.ALCA.laneChange_cancelled_counter > 0):\n alca_state = 0x14\n #min speed for ALCA\n if CS.CL_MIN_V > CS.v_ego:\n alca_state = 0x05\n if not enable_steer_control:\n #op_status = 0x08\n hands_on_state = 0x02\n if hud_alert == AH.STEER:\n if snd_chime == CM.MUTE:\n hands_on_state = 0x03\n else:\n hands_on_state = 0x05\n acc_speed_limit_mph = max(self.ACC.acc_speed_kph * CV.KPH_TO_MPH,1)\n if CS.pedal_interceptor_available:\n acc_speed_limit_mph = max(self.PCC.pedal_speed_kph * CV.KPH_TO_MPH,1)\n acc_speed_kph = self.PCC.pedal_speed_kph\n if hud_alert == AH.FCW:\n collision_warning = hud_alert[1]\n if collision_warning > 1:\n collision_warning = 1\n #use disabling for alerts/errors to make them aware someting is goin going on\n if (snd_chime == CM.DOUBLE) or (hud_alert == AH.FCW):\n op_status = 0x08\n if self.ACC.enable_adaptive_cruise:\n acc_speed_kph = self.ACC.new_speed #pcm_speed * CV.MS_TO_KPH\n if (CS.pedal_interceptor_available and self.PCC.enable_pedal_cruise) or (self.ACC.enable_adaptive_cruise):\n speed_control_enabled = 1\n cc_state = 2\n CS.speed_control_enabled = 1\n else:\n CS.speed_control_enabled = 0\n if (CS.pcm_acc_status == 4):\n #car CC enabled but not OP, display the HOLD message\n cc_state = 3\n\n send_fake_msg = False\n send_fake_warning = False\n\n if enabled:\n if frame % 2 == 0:\n send_fake_msg = True\n if frame % 25 == 0:\n send_fake_warning = True\n else:\n if frame % 23 == 0:\n send_fake_msg = True\n if frame % 60 == 0:\n send_fake_warning = True\n if frame % 10 == 0:\n can_sends.append(teslacan.create_fake_DAS_obj_lane_msg(self.leadDx,self.leadDy,self.leadClass,self.rLine,self.lLine,self.curv0,self.curv1,self.curv2,self.curv3,self.laneRange,self.laneWidth))\n speed_override = 0\n if (CS.pedal_interceptor_value > 10) and (cc_state > 1):\n speed_override = 0 #force zero for now\n if (not enable_steer_control) and op_status == 3:\n #hands_on_state = 0x03\n self.DAS_219_lcTempUnavailableSpeed = 1\n self.warningCounter = 100\n self.warningNeeded = 1\n if enabled and self.ALCA.laneChange_cancelled and (not CS.steer_override) and (not CS.blinker_on) and (self.ALCA.laneChange_cancelled_counter > 0): \n self.DAS_221_lcAborting = 1\n self.warningCounter = 300\n self.warningNeeded = 1\n if send_fake_msg:\n if enable_steer_control and op_status == 3:\n op_status = 0x5\n can_sends.append(teslacan.create_fake_DAS_msg(speed_control_enabled,speed_override,self.DAS_206_apUnavailable, collision_warning, op_status, \\\n acc_speed_kph, \\\n turn_signal_needed,forward_collision_warning,hands_on_state, \\\n cc_state, 1 if (CS.pedal_interceptor_available) else 0,alca_state, \\\n #acc_speed_limit_mph,\n CS.v_cruise_pcm * CV.KPH_TO_MPH, \n speed_limit_to_car,\n apply_angle,\n 1 if enable_steer_control else 0))\n if send_fake_warning or (self.opState == 2) or (self.opState == 5) or (self.stopSignWarning != self.stopSignWarning_last) or (self.stopLightWarning != self.stopLightWarning_last) or (self.warningNeeded == 1) or (frame % 100 == 0):\n #if it's time to send OR we have a warning or emergency disable\n can_sends.append(teslacan.create_fake_DAS_warning(self.DAS_211_accNoSeatBelt, CS.DAS_canErrors, \\\n self.DAS_202_noisyEnvironment, CS.DAS_doorOpen, CS.DAS_notInDrive, CS.enableDasEmulation, CS.enableRadarEmulation, \\\n self.stopSignWarning, self.stopLightWarning, \\\n self.DAS_222_accCameraBlind, self.DAS_219_lcTempUnavailableSpeed, self.DAS_220_lcTempUnavailableRoad, self.DAS_221_lcAborting, \\\n self.DAS_207_lkasUnavailable,self.DAS_208_rackDetected, self.DAS_025_steeringOverride,self.ldwStatus,0,CS.useWithoutHarness))\n self.stopLightWarning_last = self.stopLightWarning\n self.stopSignWarning_last = self.stopSignWarning\n self.warningNeeded = 0\n # end of DAS emulation \"\"\"\n if frame % 100 == 0: # and CS.hasTeslaIcIntegration:\n #IF WE HAVE softPanda RUNNING, send a message every second to say we are still awake\n can_sends.append(teslacan.create_fake_IC_msg())\n idx = frame % 16\n cruise_btn = None\n # send enabled ethernet every 0.2 sec\n if frame % 20 == 0:\n can_sends.append(teslacan.create_enabled_eth_msg(1))\n if self.ACC.enable_adaptive_cruise and not CS.pedal_interceptor_available:\n cruise_btn = self.ACC.update_acc(enabled, CS, frame, actuators, pcm_speed, \\\n self.speed_limit_for_cc, self.speedlimit_valid, \\\n self.set_speed_limit_active, self.speed_limit_offset)\n if cruise_btn:\n cruise_msg = teslacan.create_cruise_adjust_msg(\n spdCtrlLvr_stat=cruise_btn,\n turnIndLvr_Stat= 0, #turn_signal_needed,\n real_steering_wheel_stalk=CS.steering_wheel_stalk)\n # Send this CAN msg first because it is racing against the real stalk.\n can_sends.insert(0, cruise_msg)\n apply_accel = 0.\n if CS.pedal_interceptor_available and frame % 5 == 0: # pedal processed at 20Hz\n apply_accel, accel_needed, accel_idx = self.PCC.update_pdl(enabled, CS, frame, actuators, pcm_speed, \\\n self.speed_limit_for_cc * CV.KPH_TO_MS, self.speedlimit_valid, \\\n self.set_speed_limit_active, self.speed_limit_offset * CV.KPH_TO_MS, self.alca_enabled)\n can_sends.append(teslacan.create_pedal_command_msg(apply_accel, int(accel_needed), accel_idx))\n self.last_angle = apply_angle\n self.last_accel = apply_accel\n \n return pedal_can_sends + can_sends\n\n #to show lead car on IC\n def showLeadCarOnICCanMessage(self, radarSocket):\n messages = []\n leads = messaging.recv_one(radarSocket).radarState\n if leads is None:\n return messages\n lead_1 = leads.leadOne\n lead_2 = leads.leadTwo\n if (lead_1 is not None) and lead_1.status:\n self.leadDx = lead_1.dRel\n self.leadDy = self.curv0-lead_1.yRel\n self.leadId = self.icLeadsData.lead1trackId\n self.leadClass = self.icLeadsData.lead1oClass \n self.leadVx = lead_1.vRel\n if (self.leadId <= 0) or (self.leadId == 63):\n self.leadId = 61\n else:\n self.leadDx = 0.\n self.leadDy = 0.\n self.leadClass = 0\n self.leadId = 0\n self.leadVx = 0xF\n if (lead_2 is not None) and lead_2.status:\n self.lead2Dx = lead_2.dRel\n self.lead2Dy = self.curv0-lead_2.yRel\n self.lead2Id = self.icLeadsData.lead2trackId\n self.lead2Class = self.icLeadsData.lead2oClass \n self.lead2Vx = lead_2.vRel\n if (self.lead2Id <= 0) or (self.lead2Id == 63):\n self.leadId = 62\n else:\n self.lead2Dx = 0.\n self.lead2Dy = 0.\n self.lead2Class = 0\n self.lead2Id = 0\n self.lead2Vx = 0xF\n messages.append(teslacan.create_DAS_LR_object_msg(0,self.leadClass, self.leadId,\n self.leadDx,self.leadDy,self.leadVx,self.lead2Class,\n self.lead2Id,self.lead2Dx,self.lead2Dy,self.lead2Vx))\n return messages\n\n def handlePathPlanSocketForCurvatureOnIC(self, pathPlanSocket, alcaStateData, CS):\n pp = messaging.recv_one(pathPlanSocket).pathPlan\n if pp.paramsValid:\n if pp.lProb > 0.75:\n self.lLine = 3\n elif pp.lProb > 0.5:\n self.lLine = 2\n elif pp.lProb > 0.25:\n self.lLine = 1\n else:\n self.lLine = 0\n if pp.rProb > 0.75:\n self.rLine = 3\n elif pp.rProb > 0.5:\n self.rLine = 2\n elif pp.rProb > 0.25:\n self.rLine = 1\n else:\n self.rLine = 0\n #first we clip to the AP limits of the coefficients\n self.curv0 = -clip(pp.dPoly[3],-3.5,3.5) #self.curv0Matrix.add(-clip(pp.cPoly[3],-3.5,3.5))\n self.curv1 = -clip(pp.dPoly[2],-0.2,0.2) #self.curv1Matrix.add(-clip(pp.cPoly[2],-0.2,0.2))\n self.curv2 = -clip(pp.dPoly[1],-0.0025,0.0025) #self.curv2Matrix.add(-clip(pp.cPoly[1],-0.0025,0.0025))\n self.curv3 = -clip(pp.dPoly[0],-0.00003,0.00003) #self.curv3Matrix.add(-clip(pp.cPoly[0],-0.00003,0.00003))\n self.laneWidth = pp.laneWidth\n self.laneRange = 50 # it is fixed in OP at 50m pp.viewRange\n self.visionCurvC0 = self.curv0\n self.prev_ldwStatus = self.ldwStatus\n self.ldwStatus = 0\n if (self.ALCA.laneChange_direction != 0) and alcaStateData.alcaError:\n self.ALCA.stop_ALCA(CS)\n if self.alca_enabled:\n #exagerate position a little during ALCA to make lane change look smoother on IC\n if self.ALCA.laneChange_over_the_line:\n self.curv0 = self.ALCA.laneChange_direction * self.laneWidth - self.curv0\n self.curv0 = clip(self.curv0, -3.5, 3.5)\n else:\n if CS.enableLdw and (not CS.blinker_on) and (CS.v_ego > 15.6) and (not CS.steer_override):\n if pp.lProb > LDW_LANE_PROBAB:\n lLaneC0 = -pp.lPoly[3]\n if abs(lLaneC0) < LDW_WARNING_2:\n self.ldwStatus = 3\n elif abs(lLaneC0) < LDW_WARNING_1:\n self.ldwStatus = 1\n if pp.rProb > LDW_LANE_PROBAB:\n rLaneC0 = -pp.rPoly[3]\n if abs(rLaneC0) < LDW_WARNING_2:\n self.ldwStatus = 3\n elif abs(rLaneC0) < LDW_WARNING_1:\n self.ldwStatus = 1\n if not(self.prev_ldwStatus == self.ldwStatus):\n self.warningNeeded = 1\n if self.ldwStatus > 0:\n self.warningCounter = 50\n else:\n self.lLine = 0\n self.rLine = 0\n self.curv0 = self.curv0Matrix.add(0.)\n self.curv1 = self.curv1Matrix.add(0.)\n self.curv2 = self.curv2Matrix.add(0.)\n self.curv3 = self.curv3Matrix.add(0.)\n\n # Generates IC messages for the Left and Right radar identified cars from radard\n def showLeftAndRightCarsOnICCanMessages(self, icCarLRSocket):\n messages = []\n icCarLR_msg = tesla.ICCarsLR.from_bytes(icCarLRSocket.recv())\n if icCarLR_msg is not None:\n #for icCarLR_msg in icCarLR_list:\n messages.append(teslacan.create_DAS_LR_object_msg(1,icCarLR_msg.v1Type,icCarLR_msg.v1Id,\n icCarLR_msg.v1Dx,icCarLR_msg.v1Dy,icCarLR_msg.v1Vrel,icCarLR_msg.v2Type,\n icCarLR_msg.v2Id,icCarLR_msg.v2Dx,icCarLR_msg.v2Dy,icCarLR_msg.v2Vrel))\n messages.append(teslacan.create_DAS_LR_object_msg(2,icCarLR_msg.v3Type,icCarLR_msg.v3Id,\n icCarLR_msg.v3Dx,icCarLR_msg.v3Dy,icCarLR_msg.v3Vrel,icCarLR_msg.v4Type,\n icCarLR_msg.v4Id,icCarLR_msg.v4Dx,icCarLR_msg.v4Dy,icCarLR_msg.v4Vrel))\n return messages\n\n def handleTrafficEvents(self, trafficEventsSocket):\n messages = []\n self.reset_traffic_events()\n tr_ev_list = messaging.recv_sock(trafficEventsSocket)\n if tr_ev_list is not None:\n for tr_ev in tr_ev_list.trafficEvents:\n if tr_ev.type == 0x00:\n if (tr_ev.distance < self.stopSign_distance):\n self.stopSign_visible = True\n self.stopSign_distance = tr_ev.distance \n self.stopSign_action = tr_ev.action\n self.stopSign_resume = tr_ev.resuming\n if tr_ev.type == 0x04:\n if (tr_ev.distance < self.stopLight_distance):\n self.stopLight_visible = True\n self.stopLight_distance = tr_ev.distance\n self.stopLight_action = tr_ev.action\n self.stopLight_resume = tr_ev.resuming\n self.stopLight_color = 1. #0-unknown, 1-red, 2-yellow, 3-green\n if tr_ev.type == 0x01:\n if (tr_ev.distance < self.stopLight_distance):\n self.stopLight_visible = True\n self.stopLight_distance = tr_ev.distance\n self.stopLight_action = tr_ev.action\n self.stopLight_resume = tr_ev.resuming\n self.stopLight_color = 1. #0-unknown, 1-red, 2-yellow, 3-green\n if tr_ev.type == 0x02:\n if (tr_ev.distance < self.stopLight_distance):\n self.stopLight_visible = True\n self.stopLight_distance = tr_ev.distance\n self.stopLight_action = tr_ev.action\n self.stopLight_resume = tr_ev.resuming\n self.stopLight_color = 2. #0-unknown, 1-red, 2-yellow, 3-green\n if tr_ev.type == 0x03:\n if (tr_ev.distance < self.stopLight_distance):\n self.stopLight_visible = True\n self.stopLight_distance = tr_ev.distance\n self.stopLight_action = tr_ev.action\n self.stopLight_resume = tr_ev.resuming\n self.stopLight_color = 3. #0-unknown, 1-red, 2-yellow, 3-green\n self.checkWhichSign()\n if not ((self.roadSignType_last == self.roadSignType) and (self.roadSignType == 0xFF)):\n messages.append(teslacan.create_fake_DAS_sign_msg(self.roadSignType,self.roadSignStopDist,self.roadSignColor,self.roadSignControlActive))\n return messages\n\n # Returns speed as it needs to be displayed on the IC\n def speedUnits(self, fromMetersPerSecond):\n return fromMetersPerSecond * (CV.MS_TO_KPH if self.isMetric else CV.MS_TO_MPH) + 0.5\n", "id": "2260255", "language": "Python", "matching_score": 6.144045352935791, "max_stars_count": 1, "path": "selfdrive/car/tesla/carcontroller.py" }, { "content": "from selfdrive.services import service_list\r\nfrom selfdrive.car.tesla.values import CruiseButtons, CruiseState\r\nfrom selfdrive.config import Conversions as CV\r\nimport selfdrive.messaging as messaging\r\nimport sys\r\nimport time\r\nimport zmq\r\nfrom selfdrive.car.tesla.movingaverage import MovingAverage\r\n \r\n\r\nclass ACCState(object):\r\n # Possible states of the ACC system, following the DI_cruiseState naming\r\n # scheme.\r\n OFF = 0 # Disabled by UI.\r\n STANDBY = 1 # Ready to be enaged.\r\n ENABLED = 2 # Engaged.\r\n NOT_READY = 9 # Not ready to be engaged due to the state of the car.\r\n \r\nclass _Mode(object):\r\n def __init__(self, label, autoresume, state):\r\n self.label = label\r\n self.autoresume = autoresume\r\n self.state = state\r\n self.next = None\r\n \r\nclass ACCMode(object):\r\n # Possible ACC modes, controlling how ACC behaves.\r\n # This is separate from ACC state. For example, you could\r\n # have ACC in \"Autoresume\" mode in \"Standby\" state.\r\n FOLLOW = _Mode(label=\"follow\", autoresume=False, state=ACCState.STANDBY)\r\n AUTO = _Mode(label=\"auto\", autoresume=True, state=ACCState.STANDBY)\r\n \r\n BUTTON_NAME = 'acc'\r\n BUTTON_ABREVIATION = 'ACC'\r\n \r\n # Toggle order: OFF -> ON -> AUTO -> OFF\r\n _all_modes = [FOLLOW, AUTO]\r\n for index, mode in enumerate(_all_modes):\r\n mode.next = _all_modes[(index + 1) % len(_all_modes)]\r\n \r\n # Map labels to modes for fast lookup by label.\r\n _label_to_mode = {mode.label: mode for mode in _all_modes}\r\n @ classmethod\r\n def from_label(cls, label):\r\n return cls._label_to_mode.get(label, cls.FOLLOW)\r\n \r\n @ classmethod\r\n def labels(cls):\r\n return [mode.label for mode in cls._all_modes]\r\n\r\ndef _current_time_millis():\r\n return int(round(time.time() * 1000))\r\n\r\n\r\nclass ACCController(object):\r\n \r\n # Tesla cruise only functions above 17 MPH\r\n MIN_CRUISE_SPEED_MS = 17.1 * CV.MPH_TO_MS\r\n \r\n def __init__(self,carcontroller):\r\n self.CC = carcontroller\r\n self.human_cruise_action_time = 0\r\n self.automated_cruise_action_time = 0\r\n self.poller = zmq.Poller()\r\n self.radarState = messaging.sub_sock(service_list['radarState'].port, conflate=True, poller=self.poller)\r\n self.last_update_time = 0\r\n self.enable_adaptive_cruise = False\r\n self.prev_enable_adaptive_cruise = False\r\n # Whether to re-engage automatically after being paused due to low speed or\r\n # user-initated deceleration.\r\n self.autoresume = False\r\n self.last_cruise_stalk_pull_time = 0\r\n self.prev_cruise_buttons = CruiseButtons.IDLE\r\n self.prev_pcm_acc_status = 0\r\n self.acc_speed_kph = 0.\r\n self.speed_limit_kph = 0.\r\n self.prev_speed_limit_kph = 0.\r\n self.user_has_braked = False\r\n self.has_gone_below_min_speed = False\r\n self.fast_decel_time = 0\r\n self.lead_last_seen_time_ms = 0\r\n # BB speed for testing\r\n self.new_speed = 0\r\n self.average_speed_over_x_suggestions = 20 #2 seconds.... 10x a second\r\n self.maxsuggestedspeed_avg = MovingAverage(self.average_speed_over_x_suggestions)\r\n\r\n # Updates the internal state of this controller based on user input,\r\n # specifically the steering wheel mounted cruise control stalk, and OpenPilot\r\n # UI buttons.\r\n def update_stat(self, CS, enabled):\r\n # Check if the cruise stalk was double pulled, indicating that adaptive\r\n # cruise control should be enabled. Twice in .75 seconds counts as a double\r\n # pull.\r\n self.prev_enable_adaptive_cruise = self.enable_adaptive_cruise\r\n acc_string = CS.cstm_btns.get_button_label2(ACCMode.BUTTON_NAME)\r\n acc_mode = ACCMode.from_label(acc_string)\r\n CS.cstm_btns.get_button(ACCMode.BUTTON_NAME).btn_label2 = acc_mode.label\r\n self.autoresume = acc_mode.autoresume\r\n curr_time_ms = _current_time_millis()\r\n # Handle pressing the enable button.\r\n if (CS.cruise_buttons == CruiseButtons.MAIN and\r\n self.prev_cruise_buttons != CruiseButtons.MAIN):\r\n double_pull = curr_time_ms - self.last_cruise_stalk_pull_time < 750\r\n self.last_cruise_stalk_pull_time = curr_time_ms\r\n ready = (CS.cstm_btns.get_button_status(ACCMode.BUTTON_NAME) > ACCState.OFF\r\n and enabled\r\n and CruiseState.is_enabled_or_standby(CS.pcm_acc_status)\r\n and CS.v_ego > self.MIN_CRUISE_SPEED_MS)\r\n if ready and double_pull:\r\n # A double pull enables ACC. updating the max ACC speed if necessary.\r\n self.enable_adaptive_cruise = True\r\n # Increase ACC speed to match current, if applicable.\r\n self.acc_speed_kph = max(CS.v_ego_raw * CV.MS_TO_KPH, self.speed_limit_kph)\r\n self.user_has_braked = False\r\n self.has_gone_below_min_speed = False\r\n else:\r\n # A single pull disables ACC (falling back to just steering).\r\n self.enable_adaptive_cruise = False\r\n # Handle pressing the cancel button.\r\n elif CS.cruise_buttons == CruiseButtons.CANCEL:\r\n self.enable_adaptive_cruise = False\r\n self.acc_speed_kph = 0. \r\n self.last_cruise_stalk_pull_time = 0\r\n # Handle pressing up and down buttons.\r\n elif (self.enable_adaptive_cruise and\r\n CS.cruise_buttons != self.prev_cruise_buttons):\r\n self._update_max_acc_speed(CS)\r\n \r\n if CS.brake_pressed:\r\n self.user_has_braked = True\r\n if not self.autoresume:\r\n self.enable_adaptive_cruise = False\r\n \r\n if CS.v_ego < self.MIN_CRUISE_SPEED_MS:\r\n self.has_gone_below_min_speed = True\r\n \r\n # If autoresume is not enabled, manually steering or slowing disables ACC.\r\n if not self.autoresume:\r\n if not enabled or self.user_has_braked or self.has_gone_below_min_speed:\r\n self.enable_adaptive_cruise = False\r\n \r\n # Notify if ACC was toggled\r\n if self.prev_enable_adaptive_cruise and not self.enable_adaptive_cruise:\r\n CS.UE.custom_alert_message(3, \"ACC Disabled\", 150, 4)\r\n CS.cstm_btns.set_button_status(ACCMode.BUTTON_NAME, ACCState.STANDBY)\r\n elif self.enable_adaptive_cruise:\r\n CS.cstm_btns.set_button_status(ACCMode.BUTTON_NAME, ACCState.ENABLED)\r\n if not self.prev_enable_adaptive_cruise:\r\n CS.UE.custom_alert_message(2, \"ACC Enabled\", 150)\r\n\r\n # Update the UI to show whether the current car state allows ACC.\r\n if CS.cstm_btns.get_button_status(ACCMode.BUTTON_NAME) in [ACCState.STANDBY, ACCState.NOT_READY]:\r\n if (enabled\r\n and CruiseState.is_enabled_or_standby(CS.pcm_acc_status)\r\n and CS.v_ego > self.MIN_CRUISE_SPEED_MS):\r\n CS.cstm_btns.set_button_status(ACCMode.BUTTON_NAME, ACCState.STANDBY)\r\n else:\r\n CS.cstm_btns.set_button_status(ACCMode.BUTTON_NAME, ACCState.NOT_READY)\r\n \r\n # Update prev state after all other actions.\r\n self.prev_cruise_buttons = CS.cruise_buttons\r\n self.prev_pcm_acc_status = CS.pcm_acc_status\r\n \r\n def _update_max_acc_speed(self, CS):\r\n # Adjust the max ACC speed based on user cruise stalk actions.\r\n half_press_kph, full_press_kph = self._get_cc_units_kph(CS.imperial_speed_units)\r\n speed_change_map = {\r\n CruiseButtons.RES_ACCEL: half_press_kph,\r\n CruiseButtons.RES_ACCEL_2ND: full_press_kph,\r\n CruiseButtons.DECEL_SET: -1 * half_press_kph,\r\n CruiseButtons.DECEL_2ND: -1 * full_press_kph\r\n }\r\n self.acc_speed_kph += speed_change_map.get(CS.cruise_buttons, 0)\r\n\r\n # Clip ACC speed between 0 and 170 KPH.\r\n self.acc_speed_kph = min(self.acc_speed_kph, 170)\r\n self.acc_speed_kph = max(self.acc_speed_kph, 0)\r\n \r\n def max_v_by_speed_limit(self, acc_set_speed_ms, CS):\r\n # if more than 10 kph / 2.78 ms, consider we have speed limit\r\n if (CS.maxdrivespeed > 0) and CS.useTeslaMapData and (CS.mapAwareSpeed or (CS.baseMapSpeedLimitMPS <2.7)):\r\n #do we know the based speed limit?\r\n sl1 = 0.\r\n if CS.baseMapSpeedLimitMPS >= 2.7:\r\n #computer adjusted maxdrive based on set speed\r\n sl1 = min (acc_set_speed_ms * CS.maxdrivespeed / CS.baseMapSpeedLimitMPS, acc_set_speed_ms)\r\n sl1 = self.maxsuggestedspeed_avg.add(sl1)\r\n else:\r\n sl1 = self.maxsuggestedspeed_avg.add(CS.maxdrivespeed)\r\n return min(acc_set_speed_ms, sl1)\r\n else:\r\n return acc_set_speed_ms\r\n\r\n # Decide which cruise control buttons to simluate to get the car to the\r\n # desired speed.\r\n def update_acc(self, enabled, CS, frame, actuators, pcm_speed, speed_limit_kph, speed_limit_valid, set_speed_limit_active, speed_limit_offset):\r\n # Adaptive cruise control\r\n self.prev_speed_limit_kph = self.speed_limit_kph\r\n if speed_limit_valid and set_speed_limit_active and (speed_limit_kph >= 10):\r\n self.speed_limit_kph = speed_limit_kph + speed_limit_offset\r\n if not (int(self.prev_speed_limit_kph) == int(self.speed_limit_kph)):\r\n self.acc_speed_kph = self.speed_limit_kph\r\n self.maxsuggestedspeed_avg.reset()\r\n current_time_ms = _current_time_millis()\r\n if CruiseButtons.should_be_throttled(CS.cruise_buttons):\r\n self.human_cruise_action_time = current_time_ms\r\n button_to_press = None\r\n \r\n # If ACC is disabled, disengage traditional cruise control.\r\n if (self.prev_enable_adaptive_cruise and not self.enable_adaptive_cruise\r\n and CS.pcm_acc_status == CruiseState.ENABLED):\r\n button_to_press = CruiseButtons.CANCEL\r\n\r\n lead_1 = None\r\n #if enabled:\r\n for socket, _ in self.poller.poll(0):\r\n if socket is self.radarState:\r\n lead_1 = messaging.recv_one(socket).radarState.leadOne\r\n if lead_1.dRel:\r\n self.lead_last_seen_time_ms = current_time_ms\r\n if self.enable_adaptive_cruise and enabled:\r\n if CS.cstm_btns.get_button_label2(ACCMode.BUTTON_NAME) in [\"OP\", \"AutoOP\"]: \r\n button_to_press = self._calc_button(CS, pcm_speed)\r\n self.new_speed = pcm_speed * CV.MS_TO_KPH\r\n else:\r\n # Alternative speed decision logic that uses the lead car's distance\r\n # and speed more directly.\r\n # Bring in the lead car distance from the radarState feed\r\n \r\n button_to_press = self._calc_follow_button(CS, lead_1,speed_limit_kph, speed_limit_valid, set_speed_limit_active, speed_limit_offset)\r\n if button_to_press:\r\n self.automated_cruise_action_time = current_time_ms\r\n # If trying to slow below the min cruise speed, just cancel cruise.\r\n # This prevents a SCCM crash which is triggered by repeatedly pressing\r\n # stalk-down when already at min cruise speed.\r\n if (CruiseButtons.is_decel(button_to_press)\r\n and CS.v_cruise_actual - 1 < self.MIN_CRUISE_SPEED_MS * CV.MS_TO_KPH):\r\n button_to_press = CruiseButtons.CANCEL\r\n if button_to_press == CruiseButtons.CANCEL:\r\n self.fast_decel_time = current_time_ms\r\n # Debug logging (disable in production to reduce latency of commands)\r\n #print \"***ACC command: %s***\" % button_to_press\r\n return button_to_press\r\n\r\n # function to calculate the cruise button based on a safe follow distance\r\n def _calc_follow_button(self, CS, lead_car,speed_limit_kph, speed_limit_valid, set_speed_limit_active, speed_limit_offset):\r\n if lead_car is None:\r\n return None\r\n # Desired gap (in seconds) between cars.\r\n follow_time_s = CS.apFollowTimeInS\r\n # v_ego is in m/s, so safe_dist_m is in meters.\r\n safe_dist_m = CS.v_ego * follow_time_s\r\n current_time_ms = _current_time_millis()\r\n # Make sure we were able to populate lead_1.\r\n # dRel is in meters.\r\n lead_dist_m = lead_car.dRel\r\n lead_speed_kph = (lead_car.vRel + CS.v_ego) * CV.MS_TO_KPH\r\n # Relative velocity between the lead car and our set cruise speed.\r\n future_vrel_kph = lead_speed_kph - CS.v_cruise_actual\r\n # How much we can accelerate without exceeding the max allowed speed.\r\n max_acc_speed_kph = self.max_v_by_speed_limit(self.acc_speed_kph * CV.KPH_TO_MS, CS) * CV.MS_TO_KPH\r\n available_speed_kph = max_acc_speed_kph - CS.v_cruise_actual\r\n half_press_kph, full_press_kph = self._get_cc_units_kph(CS.imperial_speed_units)\r\n # button to issue\r\n button = None\r\n # debug msg\r\n msg = None\r\n\r\n # Automatically engage traditional cruise if ACC is active.\r\n if self._should_autoengage_cc(CS, lead_car=lead_car) and self._no_action_for(milliseconds=100):\r\n button = CruiseButtons.RES_ACCEL\r\n # If traditional cruise is engaged, then control it.\r\n elif CS.pcm_acc_status == CruiseState.ENABLED:\r\n \r\n # Disengage cruise control if a slow object is seen ahead. This triggers\r\n # full regen braking, which is stronger than the braking that happens if\r\n # you just reduce cruise speed.\r\n if self._fast_decel_required(CS, lead_car) and self._no_human_action_for(milliseconds=500):\r\n msg = \"Off (Slow traffic)\"\r\n button = CruiseButtons.CANCEL\r\n self.new_speed = 1\r\n \r\n # if cruise is set to faster than the max speed, slow down\r\n elif CS.v_cruise_actual > max_acc_speed_kph and self._no_action_for(milliseconds=300):\r\n msg = \"Slow to max\"\r\n button = CruiseButtons.DECEL_SET\r\n self.new_speed = max_acc_speed_kph \r\n \r\n elif (# if we have a populated lead_distance\r\n lead_dist_m > 0\r\n and self._no_action_for(milliseconds=300)\r\n # and we're moving\r\n and CS.v_cruise_actual > full_press_kph):\r\n ### Slowing down ###\r\n # Reduce speed significantly if lead_dist < safe dist\r\n # and if the lead car isn't already pulling away.\r\n if lead_dist_m < safe_dist_m * .5 and future_vrel_kph < 2:\r\n msg = \"-5 (Significantly too close)\"\r\n button = CruiseButtons.DECEL_2ND\r\n self.new_speed = CS.v_ego * CV.MS_TO_KPH - full_press_kph\r\n # Don't rush up to lead car\r\n elif future_vrel_kph < -15:\r\n msg = \"-5 (approaching too fast)\"\r\n button = CruiseButtons.DECEL_2ND\r\n self.new_speed = CS.v_ego * CV.MS_TO_KPH - full_press_kph\r\n elif future_vrel_kph < -8:\r\n msg = \"-1 (approaching too fast)\"\r\n button = CruiseButtons.DECEL_SET\r\n self.new_speed = CS.v_ego * CV.MS_TO_KPH - half_press_kph\r\n elif lead_dist_m < safe_dist_m and future_vrel_kph <= 0:\r\n msg = \"-1 (Too close)\"\r\n button = CruiseButtons.DECEL_SET\r\n self.new_speed = CS.v_ego * CV.MS_TO_KPH - half_press_kph\r\n # Make slow adjustments if close to the safe distance.\r\n # only adjust every 1 secs\r\n elif (lead_dist_m < safe_dist_m * 1.3\r\n and future_vrel_kph < -1 * half_press_kph\r\n and self._no_action_for(milliseconds=1000)):\r\n msg = \"-1 (Near safe distance)\"\r\n button = CruiseButtons.DECEL_SET\r\n self.new_speed = CS.v_ego * CV.MS_TO_KPH - half_press_kph\r\n\r\n ### Speed up ###\r\n elif (available_speed_kph > half_press_kph\r\n and lead_dist_m > safe_dist_m\r\n and self._no_human_action_for(milliseconds=1000)):\r\n lead_is_far = lead_dist_m > safe_dist_m * 1.75\r\n closing = future_vrel_kph < -2\r\n lead_is_pulling_away = future_vrel_kph > 4\r\n if lead_is_far and not closing or lead_is_pulling_away:\r\n msg = \"+1 (Beyond safe distance and speed)\"\r\n button = CruiseButtons.RES_ACCEL\r\n self.new_speed = CS.v_ego * CV.MS_TO_KPH + half_press_kph\r\n \r\n # If lead_dist is reported as 0, no one is detected in front of you so you\r\n # can speed up. Only accel on straight-aways; vision radar often\r\n # loses lead car in a turn.\r\n elif (lead_dist_m == 0\r\n and CS.angle_steers < 2.0\r\n and half_press_kph < available_speed_kph\r\n and self._no_action_for(milliseconds=500)\r\n and self._no_human_action_for(milliseconds=1000)\r\n and current_time_ms > self.lead_last_seen_time_ms + 4000):\r\n msg = \"+1 (road clear)\"\r\n button = CruiseButtons.RES_ACCEL\r\n self.new_speed = CS.v_ego * CV.MS_TO_KPH + half_press_kph\r\n\r\n if (current_time_ms > self.last_update_time + 1000):\r\n ratio = 0\r\n if safe_dist_m > 0:\r\n ratio = (lead_dist_m / safe_dist_m) * 100\r\n print \"Ratio: {0:.1f}% lead: {1:.1f}m avail: {2:.1f}kph vRel: {3:.1f}kph Angle: {4:.1f}deg\".format(\r\n ratio, lead_dist_m, available_speed_kph, lead_car.vRel * CV.MS_TO_KPH, CS.angle_steers)\r\n self.last_update_time = current_time_ms\r\n if msg != None:\r\n print \"ACC: \" + msg\r\n return button\r\n \r\n def _should_autoengage_cc(self, CS, lead_car=None):\r\n # Automatically (re)engage cruise control so long as \r\n # 1) The carstate allows cruise control\r\n # 2) There is no imminent threat of collision\r\n # 3) The user did not cancel ACC by pressing the brake\r\n cruise_ready = (self.enable_adaptive_cruise\r\n and CS.pcm_acc_status == CruiseState.STANDBY\r\n and CS.v_ego >= self.MIN_CRUISE_SPEED_MS\r\n and _current_time_millis() > self.fast_decel_time + 2000)\r\n \r\n slow_lead = lead_car and lead_car.dRel > 0 and lead_car.vRel < 0 or self._fast_decel_required(CS, lead_car)\r\n \r\n # \"Autoresume\" mode allows cruise to engage even after brake events, but\r\n # shouldn't trigger DURING braking.\r\n autoresume_ready = self.autoresume and CS.a_ego >= 0.1\r\n \r\n braked = self.user_has_braked or self.has_gone_below_min_speed\r\n \r\n return cruise_ready and not slow_lead and (autoresume_ready or not braked)\r\n \r\n def _fast_decel_required(self, CS, lead_car):\r\n \"\"\" Identifies situations which call for rapid deceleration. \"\"\"\r\n if not lead_car or not lead_car.dRel:\r\n return False\r\n\r\n collision_imminent = self._seconds_to_collision(CS, lead_car) < 4\r\n \r\n lead_absolute_speed_ms = lead_car.vRel + CS.v_ego\r\n lead_too_slow = lead_absolute_speed_ms < self.MIN_CRUISE_SPEED_MS\r\n \r\n return collision_imminent or lead_too_slow\r\n \r\n def _seconds_to_collision(self, CS, lead_car):\r\n if not lead_car or not lead_car.dRel:\r\n return sys.maxint\r\n elif lead_car.vRel >= 0:\r\n return sys.maxint\r\n return abs(float(lead_car.dRel) / lead_car.vRel)\r\n \r\n def _get_cc_units_kph(self, is_imperial_units):\r\n # Cruise control buttons behave differently depending on whether the car\r\n # is configured for metric or imperial units.\r\n if is_imperial_units:\r\n # Imperial unit cars adjust cruise in units of 1 and 5 mph.\r\n half_press_kph = 1 * CV.MPH_TO_KPH\r\n full_press_kph = 5 * CV.MPH_TO_KPH\r\n else:\r\n # Metric cars adjust cruise in units of 1 and 5 kph.\r\n half_press_kph = 1\r\n full_press_kph = 5\r\n return half_press_kph, full_press_kph\r\n \r\n # Adjust speed based off OP's longitudinal model. As of OpenPilot 0.5.3, this\r\n # is inoperable because the planner crashes when given only visual radar\r\n # inputs. (Perhaps this can be used in the future with a radar install, or if\r\n # OpenPilot planner changes.)\r\n def _calc_button(self, CS, desired_speed_ms):\r\n button_to_press = None\r\n # Automatically engange traditional cruise if appropriate.\r\n if self._should_autoengage_cc(CS) and desired_speed_ms >= CS.v_ego:\r\n button_to_press = CruiseButtons.RES_ACCEL\r\n # If traditional cruise is engaged, then control it.\r\n elif (CS.pcm_acc_status == CruiseState.ENABLED\r\n # But don't make adjustments if a human has manually done so in\r\n # the last 3 seconds. Human intention should not be overridden.\r\n and self._no_human_action_for(milliseconds=3000)\r\n and self._no_automated_action_for(milliseconds=500)):\r\n # The difference between OP's target speed and the current cruise\r\n # control speed, in KPH.\r\n speed_offset_kph = (desired_speed_ms * CV.MS_TO_KPH - CS.v_cruise_actual)\r\n \r\n half_press_kph, full_press_kph = self._get_cc_units_kph(CS.imperial_speed_units)\r\n \r\n # Reduce cruise speed significantly if necessary. Multiply by a % to\r\n # make the car slightly more eager to slow down vs speed up.\r\n if desired_speed_ms < self.MIN_CRUISE_SPEED_MS:\r\n button_to_press = CruiseButtons.CANCEL\r\n if speed_offset_kph < -2 * full_press_kph and CS.v_cruise_actual > 0:\r\n button_to_press = CruiseButtons.CANCEL\r\n elif speed_offset_kph < -0.6 * full_press_kph and CS.v_cruise_actual > 0:\r\n # Send cruise stalk dn_2nd.\r\n button_to_press = CruiseButtons.DECEL_2ND\r\n # Reduce speed slightly if necessary.\r\n elif speed_offset_kph < -0.9 * half_press_kph and CS.v_cruise_actual > 0:\r\n # Send cruise stalk dn_1st.\r\n button_to_press = CruiseButtons.DECEL_SET\r\n # Increase cruise speed if possible.\r\n elif CS.v_ego > self.MIN_CRUISE_SPEED_MS:\r\n # How much we can accelerate without exceeding max allowed speed.\r\n available_speed_kph = self.acc_speed_kph - CS.v_cruise_actual\r\n if speed_offset_kph >= full_press_kph and full_press_kph < available_speed_kph:\r\n # Send cruise stalk up_2nd.\r\n button_to_press = CruiseButtons.RES_ACCEL_2ND\r\n elif speed_offset_kph >= half_press_kph and half_press_kph < available_speed_kph:\r\n # Send cruise stalk up_1st.\r\n button_to_press = CruiseButtons.RES_ACCEL\r\n return button_to_press\r\n \r\n def _no_human_action_for(self, milliseconds):\r\n return _current_time_millis() > self.human_cruise_action_time + milliseconds\r\n \r\n def _no_automated_action_for(self, milliseconds):\r\n return _current_time_millis() > self.automated_cruise_action_time + milliseconds\r\n \r\n def _no_action_for(self, milliseconds):\r\n return self._no_human_action_for(milliseconds) and self._no_automated_action_for(milliseconds)\r\n", "id": "6981934", "language": "Python", "matching_score": 1.655381202697754, "max_stars_count": 1, "path": "selfdrive/car/tesla/ACC_module.py" }, { "content": "\nAIRTABLE_API_KEY = '<KEY>'\nAIRTABLE_BASE_ID = 'appht7GB4aJS2A0LD'\n\nUSERS_TABLE = 'Users'\nEVENTS_TABLE = 'Events'\n\nLOG_PREFIX = \"tinklad.airtable_publisher: \"\n\nclass AirtableUsersKeys():\n openPilotId = \"openPilotId\"\n timestamp = \"timestamp\"\n userHandle = \"userHandle\"\n gitRemote = \"gitRemote\"\n gitBranch = \"gitBranch\"\n\nclass AirtableEventKeys():\n openPilotId = \"openPilotId\"\n timestamp = \"timestamp\"\n source = \"source\"\n category = \"category\"\n name = \"name\"\n value = \"value\"\n\n# This needs to match tinkla.capnp\nclass TinklaEventValueTypes():\n boolValue = 'boolValue'\n textValue = 'textValue'\n intValue = 'intValue'\n floatValue = 'floatValue'\n\nclass Publisher():\n openPilotId = None\n latest_info_dict = None # current info published\n userRecordId = None\n\n def send_info(self, info, isData= False):\n data_dict = None\n if isData:\n data_dict = info\n else:\n data_dict = self.__generate_airtable_user_info_dict(info)\n\n # Early return if no changes\n if self.latest_info_dict != None:\n print(LOG_PREFIX + \"latest_info. data=%s\" % (self.latest_info_dict)) \n if data_dict == self.latest_info_dict:\n print(LOG_PREFIX + \"send_info no update necessary*\")\n return\n\n print(LOG_PREFIX + \"Sending info. data=%s\" % (data_dict))\n if self.userRecordId != None:\n self.__update_user(data_dict)\n\n if info.openPilotId != None and info.openPilotId != '':\n self.openPilotId = info.openPilotId\n\n response = self.at.get(USERS_TABLE, limit=1, filter_by_formula=(\"{openPilotId} = '%s'\" % (self.openPilotId)))\n if self.__is_notfound_response(response): # Not found, create:\n print(LOG_PREFIX + \"Creating record for openPilotId='%s'\" % (info.openPilotId))\n response = self.at.create(USERS_TABLE, data_dict)\n if self.__is_error_response(response):\n raise Exception(response)\n elif self.__is_error_response(response): #Unsupported error\n print(LOG_PREFIX + \"Error retrieving data: '%s'\" % (response))\n raise Exception(response)\n else:\n self.userRecordId = response[\"records\"][0][\"id\"]\n self.__update_user(data_dict)\n \n self.latest_info_dict = data_dict\n print(LOG_PREFIX + \"*send_info competed*\")\n\n def send_event(self, event):\n if self.openPilotId is None and self.latest_info_dict != None:\n self.openPilotId = self.latest_info_dict[self.userKeys.openPilotId]\n\n event_dict = self.__generate_airtable_user_event_dict(event)\n print(LOG_PREFIX + \"Sending event. data=%s\" % (event_dict))\n response = self.at.create(EVENTS_TABLE, event_dict)\n if self.__is_error_response(response):\n print(LOG_PREFIX + \"Error sending airtable event. %s\" % (response))\n raise Exception(response)\n print(LOG_PREFIX + \"*send_event competed*\")\n\n\n\n def __generate_airtable_user_info_dict(self, info):\n dictionary = info.to_dict()\n dictionary.pop(self.userKeys.timestamp, None)\n return dictionary\n\n def __generate_airtable_user_event_dict(self, event):\n value = event.value.which()\n if value == self.eventValueTypes.boolValue:\n value = event.value.boolValue\n elif value == self.eventValueTypes.textValue:\n value = event.value.textValue\n elif value == self.eventValueTypes.intValue:\n value = event.value.intValue\n elif value == self.eventValueTypes.floatValue:\n value = event.value.floatValue\n openPilotId = self.openPilotId if (self.openPilotId != None) else \"\"\n dictionary = event.to_dict()\n dictionary[self.eventKeys.value] = value\n dictionary[self.eventKeys.openPilotId] = openPilotId\n # dictionary.pop(\"timestamp\", None)\n return dictionary\n\n def __update_user(self, data):\n print(LOG_PREFIX + \"Updating userRecordId='%s'\" % (self.userRecordId))\n response = self.at.update(USERS_TABLE, self.userRecordId, data)\n if self.__is_error_response(response):\n raise Exception(response)\n\n def __is_notfound_response(self, response):\n try:\n return response[\"error\"] != None and response[\"error\"][\"code\"] == 422\n except: # pylint: disable=bare-except \n count = response[\"records\"].__len__()\n return count == 0\n\n def __is_error_response(self, response):\n try:\n return response[\"error\"] != None\n except: # pylint: disable=bare-except \n return False\n\n def __init__(self):\n self.eventValueTypes = TinklaEventValueTypes()\n self.userKeys = AirtableUsersKeys()\n self.eventKeys = AirtableEventKeys()\n self.at = Airtable(AIRTABLE_BASE_ID, AIRTABLE_API_KEY)\n\n################################################################\n# airtable.py - https://github.com/josephbestjames/airtable.py #\n################################################################\n\nimport json\nimport posixpath \nimport requests\nimport six\nfrom collections import OrderedDict\n\nAPI_URL = 'https://api.airtable.com/v%s/'\nAPI_VERSION = '0'\n\n\nclass IsNotInteger(Exception):\n pass\n\n\nclass IsNotString(Exception):\n pass\n\n\ndef check_integer(n):\n if not n:\n return False\n elif not isinstance(n, six.integer_types):\n raise IsNotInteger('Expected an integer')\n else:\n return True\n\n\ndef check_string(s):\n if not s:\n return False\n elif not isinstance(s, six.string_types):\n raise IsNotString('Expected a string')\n else:\n return True\n\n\ndef create_payload(data):\n return {'fields': data}\n\n\nclass Airtable(object):\n def __init__(self, base_id, api_key, dict_class=OrderedDict):\n \"\"\"Create a client to connect to an Airtable Base.\n\n Args:\n - base_id: The ID of the base, e.g. \"appA0CDAE34F\"\n - api_key: The API secret key, e.g. \"keyBAAE123C\"\n - dict_class: the class to use to build dictionaries for returning\n fields. By default the fields are kept in the order they were\n returned by the API using an OrderedDict, but you can switch\n to a simple dict if you prefer.\n \"\"\"\n self.airtable_url = API_URL % API_VERSION\n self.base_url = posixpath.join(self.airtable_url, base_id)\n self.headers = {'Authorization': 'Bearer %s' % api_key}\n self._dict_class = dict_class\n\n def __request(self, method, url, params=None, payload=None):\n if method in ['POST', 'PUT', 'PATCH']:\n self.headers.update({'Content-type': 'application/json'})\n r = requests.request(method,\n posixpath.join(self.base_url, url),\n params=params,\n data=payload,\n headers=self.headers)\n if r.status_code == requests.codes.ok: # pylint: disable=no-member\n return r.json(object_pairs_hook=self._dict_class)\n else:\n try:\n message = None\n r.raise_for_status()\n except requests.exceptions.HTTPError as e:\n message = str(e)\n return {\n 'error': dict(code=r.status_code, message=message)\n }\n\n def get( # pylint: disable=dangerous-default-value\n self, table_name, record_id=None, limit=0, offset=None,\n filter_by_formula=None, view=None, max_records=0, fields=[]):\n params = {}\n if check_string(record_id):\n url = posixpath.join(table_name, record_id)\n else:\n url = table_name\n if limit and check_integer(limit):\n params.update({'pageSize': limit})\n if offset and check_string(offset):\n params.update({'offset': offset})\n if filter_by_formula is not None:\n params.update({'filterByFormula': filter_by_formula})\n if view is not None:\n params.update({'view': view})\n if max_records and check_integer(max_records):\n params.update({'maxRecords': max_records})\n if fields and type(fields) is list: # pylint: disable=unidiomatic-typecheck\n for field in fields: check_string(field)\n params.update({'fields': fields})\n\n return self.__request('GET', url, params)\n\n def iterate( # pylint: disable=dangerous-default-value\n self, table_name, batch_size=0, filter_by_formula=None, \n view=None, max_records=0, fields=[]):\n \"\"\"Iterate over all records of a table.\n\n Args:\n table_name: the name of the table to list.\n batch_size: the number of records to fetch per request. The default\n (0) is using the default of the API which is (as of 2016-09)\n 100. Note that the API does not allow more than that (but\n allow for less).\n filter_by_formula: a formula used to filter records. The formula\n will be evaluated for each record, and if the result is not 0,\n false, \"\", NaN, [], or #Error! the record will be included in\n the response. If combined with view, only records in that view\n which satisfy the formula will be returned.\n view: the name or ID of a view in the table. If set, only the\n records in that view will be returned. The records will be\n sorted according to the order of the view.\n Yields:\n A dict for each record containing at least three fields: \"id\",\n \"createdTime\" and \"fields\".\n \"\"\"\n offset = None\n while True:\n response = self.get(\n table_name, limit=batch_size, offset=offset, max_records=max_records, \n fields=fields, filter_by_formula=filter_by_formula, view=view)\n for record in response.pop('records'):\n yield record\n if 'offset' in response:\n offset = response['offset'].encode('ascii','ignore')\n else:\n break\n\n def create(self, table_name, data): # pylint: disable=inconsistent-return-statements\n if check_string(table_name):\n payload = create_payload(data)\n return self.__request('POST', table_name,\n payload=json.dumps(payload))\n\n def update(self, table_name, record_id, data): # pylint: disable=inconsistent-return-statements\n if check_string(table_name) and check_string(record_id):\n url = posixpath.join(table_name, record_id)\n payload = create_payload(data)\n return self.__request('PATCH', url,\n payload=json.dumps(payload))\n\n def update_all(self, table_name, record_id, data): # pylint: disable=inconsistent-return-statements\n if check_string(table_name) and check_string(record_id):\n url = posixpath.join(table_name, record_id)\n payload = create_payload(data)\n return self.__request('PUT', url,\n payload=json.dumps(payload))\n\n def delete(self, table_name, record_id): # pylint: disable=inconsistent-return-statements\n if check_string(table_name) and check_string(record_id):\n url = posixpath.join(table_name, record_id)\n return self.__request('DELETE', url)\n", "id": "2862819", "language": "Python", "matching_score": 3.6324241161346436, "max_stars_count": 1, "path": "selfdrive/tinklad/airtable_publisher.py" }, { "content": "#!/usr/bin/env python2.7\n\nfrom cereal import tinkla\nfrom tinkla_interface import TinklaClient\nimport time\nfrom selfdrive.car.tesla.readconfig import CarSettings\n\nclass TinklaTestClient():\n\n def __init__(self):\n #self.start_server()\n self.tinklaClient = TinklaClient()\n openPilotId = \"test_openpilotId\"\n source = \"tinkladTestClient\"\n userHandle = \"test_user_handle\"\n\n info = tinkla.Interface.UserInfo.new_message(\n openPilotId=openPilotId,\n userHandle=userHandle,\n gitRemote=\"test_github.com/something\",\n gitBranch=\"test_gitbranch\",\n gitHash=\"test_123456\"\n )\n start_time = time.time()\n self.tinklaClient.setUserInfo(info)\n elapsed_time_us = (time.time() - start_time) * 1000 * 1000\n print(\"Info Time Elapsed = %d\" % (elapsed_time_us))\n\n event = tinkla.Interface.Event.new_message(\n openPilotId=openPilotId,\n source=source,\n category=self.tinklaClient.eventCategoryKeys.userAction,\n name=\"pull_stalk\",\n )\n event.value.textValue=\"up\"\n start_time = time.time()\n self.tinklaClient.logUserEvent(event)\n elapsed_time_us = (time.time() - start_time) * 1000 * 1000\n print(\"Event Time Elapsed = %d\" % (elapsed_time_us))\n\n carsettings = CarSettings(\"./bb_openpilot_config.cfg\")\n carsettings.userHandle = userHandle\n print(\"userHandle = '%s'\" % (userHandle))\n\n print(\"attemptToSendPendingMessages\")\n self.tinklaClient.attemptToSendPendingMessages()\n\n print(\"send crash log\")\n self.tinklaClient.logCrashStackTraceEvent(openPilotId=openPilotId)\n\n print(\"send can error\")\n self.tinklaClient.logCANErrorEvent(source=source, canMessage=1, additionalInformation=\"test can error logging\", openPilotId=openPilotId)\n time.sleep(1)\n self.tinklaClient.logCANErrorEvent(source=source, canMessage=2, additionalInformation=\"test can error logging\", openPilotId=openPilotId)\n\n print(\"send process comm error\")\n self.tinklaClient.logProcessCommErrorEvent(source=source, processName=\"processNameWouldBeHere1\", count=10, eventType=\"Not Alive\", openPilotId=openPilotId)\n time.sleep(1)\n self.tinklaClient.logProcessCommErrorEvent(source=source, processName=\"processNameWouldBeHere2\", count=10, eventType=\"Not Alive\", openPilotId=openPilotId)\n\nif __name__ == \"__main__\":\n TinklaTestClient()\n", "id": "3254837", "language": "Python", "matching_score": 0.5585497617721558, "max_stars_count": 1, "path": "selfdrive/tinklad/tinkladTestClient.py" }, { "content": "#!/usr/bin/env python2.7\n\n# \n# https://github.com/balena/python-pqueue\n# \n# Some updates by Raf for tinklad.py 5/2019\n#\n\n\"\"\"A single process, persistent multi-producer, multi-consumer queue.\"\"\"\n\nimport os\nimport pickle\nimport tempfile\nimport shutil\n\n#python 2.7:\nfrom Queue import Queue as SyncQ\n#python 3:\n#from queue import Queue as SyncQ\n\n\ndef _truncate(fn, length):\n fd = os.open(fn, os.O_RDWR)\n os.ftruncate(fd, length)\n os.close(fd)\n\n\nclass Queue(SyncQ):\n def __init__(self, path, maxsize=0, chunksize=100, tempdir=None):\n \"\"\"Create a persistent queue object on a given path.\n\n The argument path indicates a directory where enqueued data should be\n persisted. If the directory doesn't exist, one will be created. If maxsize\n is <= 0, the queue size is infinite. The optional argument chunksize\n indicates how many entries should exist in each chunk file on disk.\n\n The tempdir parameter indicates where temporary files should be stored.\n The tempdir has to be located on the same disk as the enqueued data in\n order to obtain atomic operations.\n \"\"\"\n\n self.path = path\n self.chunksize = chunksize\n self.tempdir = tempdir\n if self.tempdir:\n if os.stat(self.path).st_dev != os.stat(self.tempdir).st_dev:\n raise ValueError(\"tempdir has to be located \"\n \"on same path filesystem\")\n\n SyncQ.__init__(self, maxsize)\n self.info = self._loadinfo()\n # truncate head case it contains garbage\n hnum, hcnt, hoffset = self.info['head']\n headfn = self._qfile(hnum)\n if os.path.exists(headfn):\n if hoffset < os.path.getsize(headfn):\n _truncate(headfn, hoffset)\n # let the head file open\n self.headf = self._openchunk(hnum, 'ab+')\n # let the tail file open\n tnum, _, toffset = self.info['tail']\n self.tailf = self._openchunk(tnum)\n self.tailf.seek(toffset)\n # update unfinished tasks with the current number of enqueued tasks\n self.unfinished_tasks = self.info['size']\n # optimize info file updates\n self.update_info = True\n\n def _init(self, maxsize):\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n\n def _destroy(self):\n if os.path.exists(self.path):\n shutil.rmtree(self.path)\n os.makedirs(self.path)\n\n def _qsize(self, len=len): # pylint: disable=redefined-builtin\n return self.info['size']\n\n def _put(self, item):\n pickle.dump(item, self.headf)\n self.headf.flush()\n hnum, hpos, _ = self.info['head']\n hpos += 1\n if hpos == self.info['chunksize']:\n hpos = 0\n hnum += 1\n self.headf.close()\n self.headf = self._openchunk(hnum, 'ab+')\n self.info['size'] += 1\n self.info['head'] = [hnum, hpos, self.headf.tell()]\n self._saveinfo()\n\n def _get(self):\n tnum, tcnt, toffset = self.info['tail']\n hnum, hcnt, _ = self.info['head']\n if [tnum, tcnt] >= [hnum, hcnt]:\n return None\n data = pickle.load(self.tailf)\n toffset = self.tailf.tell()\n tcnt += 1\n if tcnt == self.info['chunksize'] and tnum <= hnum:\n tcnt = toffset = 0\n tnum += 1\n self.tailf.close()\n self.tailf = self._openchunk(tnum)\n self.info['size'] -= 1\n self.info['tail'] = [tnum, tcnt, toffset]\n self.update_info = True\n return data\n\n def task_done(self):\n try:\n SyncQ.task_done(self)\n except: # pylint: disable=bare-except \n pass\n if self.update_info:\n self._saveinfo()\n self.update_info = False\n\n def _openchunk(self, number, mode='rb'):\n return open(self._qfile(number), mode)\n\n def _loadinfo(self):\n infopath = self._infopath()\n if os.path.exists(infopath):\n with open(infopath, 'rb') as f:\n info = pickle.load(f)\n else:\n info = {\n 'chunksize': self.chunksize,\n 'size': 0,\n 'tail': [0, 0, 0],\n 'head': [0, 0, 0],\n }\n return info\n\n def _gettempfile(self):\n if self.tempdir:\n return tempfile.mkstemp(dir=self.tempdir)\n else:\n return tempfile.mkstemp()\n\n def _saveinfo(self):\n tmpfd, tmpfn = self._gettempfile()\n os.write(tmpfd, pickle.dumps(self.info))\n os.close(tmpfd)\n # POSIX requires that 'rename' is an atomic operation\n os.rename(tmpfn, self._infopath())\n self._clear_old_file()\n\n def _clear_old_file(self):\n tnum, _, _ = self.info['tail']\n while tnum >= 1:\n tnum -= 1\n path = self._qfile(tnum)\n if os.path.exists(path):\n os.remove(path)\n else:\n break\n\n def _qfile(self, number):\n return os.path.join(self.path, 'q%05d' % number)\n\n def _infopath(self):\n return os.path.join(self.path, 'info')\n", "id": "8024901", "language": "Python", "matching_score": 0.8525034785270691, "max_stars_count": 1, "path": "selfdrive/tinklad/pqueue.py" }, { "content": "import ConfigParser\r\n\r\ndefault_config_file_path = '/data/bb_openpilot.cfg'\r\n\r\nclass ConfigFile(object):\r\n config_file_r = 'r'\r\n config_file_w = 'wb'\r\n\r\n ### Do NOT modify here, modify in /data/bb_openpilot.cfg and reboot\r\n def read(self, into, config_path):\r\n configr = ConfigParser.ConfigParser()\r\n file_changed = False\r\n\r\n try:\r\n configr.read(config_path)\r\n fd = open(config_path, \"r\")\r\n prev_file_contents = fd.read()\r\n fd.close()\r\n except IOError:\r\n prev_file_contents = \"\"\r\n print(\"no config file, creating with defaults...\")\r\n\r\n main_section = 'OP_CONFIG'\r\n config = ConfigParser.RawConfigParser(allow_no_value=True)\r\n config.add_section(main_section)\r\n\r\n #user_handle -> userHandle\r\n into.userHandle, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'user_handle', entry_type = str,\r\n default_value = 'your_tinkla_username',\r\n comment = 'Username at tinkla.com, for dashboard data and support. If you don\\'t have a username, ask for one on Discord, or just enter your Discord handle here.'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #force_fingerprint_tesla -> forceFingerprintTesla\r\n into.forceFingerprintTesla, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'force_fingerprint_tesla', entry_type = bool,\r\n default_value = False,\r\n comment = 'Forces the fingerprint to Tesla Model S if OpenPilot fails to identify car via fingerprint.'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #eon_to_front -> eonToFront\r\n into.eonToFront, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'eon_to_front', entry_type = float,\r\n default_value = 0.9,\r\n comment = 'Distance between EON plane and front of the car.'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #force_pedal_over_cc -> forcePedalOverCC\r\n into.forcePedalOverCC, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'force_pedal_over_cc', entry_type = bool,\r\n default_value = False,\r\n comment = 'Forces the use of Tesla Pedal over ACC completely disabling the Tesla CC'\r\n )\r\n file_changed |= didUpdate\r\n \r\n #enable_hso -> enableHSO\r\n into.enableHSO, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'enable_hso', entry_type = bool,\r\n default_value = True,\r\n comment = 'Enables Human Steering Override (HSO) feature which allows you to take control of the steering wheel and correct the course of the car without disengaging OpenPilot lane keep assis (LKS, lateral control)'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #enable_das_emulation -> enableDasEmulation\r\n into.enableALCA, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'enable_alca', entry_type = bool,\r\n default_value = True,\r\n comment = 'Enables the Adaptive Lane Change Assist (ALCA) feature which will automatically change lanes when driving above 18 MPH (29 km/h) by just pushing 1/2 way on your turn signal stalk; turn signal will remain on for the duration of lane change'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #enable_das_emulation -> enableDasEmulation\r\n into.enableDasEmulation, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'enable_das_emulation', entry_type = bool,\r\n default_value = False,\r\n comment = 'The secret sauce of IC/CID integration; this feature makes the Panda generate all the CAN messages needed for IC/CID integration that mimiinto the AP interface'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #enable_radar_emulation -> enableRadarEmulation\r\n into.enableRadarEmulation, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'enable_radar_emulation', entry_type = bool,\r\n default_value = False,\r\n comment = 'The secret sauce to make the Tesla Radar work; this feature make the Panda generate all the CAN messages needed by the Tesla Bosch Radar to operate'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #enable_roll_angle_correction -> enableRollAngleCorrection\r\n into.enableSpeedVariableDesAngle, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'enable_speed_variable_angle', entry_type = bool,\r\n default_value = True,\r\n comment = ''\r\n )\r\n file_changed |= didUpdate\r\n\r\n #enable_roll_angle_correction -> enableRollAngleCorrection\r\n into.enableRollAngleCorrection, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'enable_roll_angle_correction', entry_type = bool,\r\n default_value = False,\r\n comment = ''\r\n )\r\n file_changed |= didUpdate\r\n\r\n #enable_feed_forward_angle_correction -> enableFeedForwardAngleCorrection\r\n into.enableFeedForwardAngleCorrection, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'enable_feed_forward_angle_correction', entry_type = bool,\r\n default_value = True,\r\n comment = ''\r\n )\r\n file_changed |= didUpdate\r\n\r\n #enable_driver_monitor -> enableDriverMonitor\r\n into.enableDriverMonitor, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'enable_driver_monitor', entry_type = bool,\r\n default_value = True,\r\n comment = 'When turned off, the OpenPilot is tricked into thinking you have the hands on the sterring wheel all the time'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #enable_show_car -> enableShowCar\r\n into.enableShowCar, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'enable_show_car', entry_type = bool,\r\n default_value = True,\r\n comment = 'Shows a Tesla car in the limitted UI mode instead of the triangle that identifies the lead car; this is only used if you do not have IC/CID integration'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #enable_show_logo -> enableShowLogo\r\n into.enableShowLogo, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'enable_show_logo', entry_type = bool,\r\n default_value = True,\r\n comment = 'Shows a Tesla red logo on the EON screen when OP is not enabled'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #has_noctua_fan -> hasNoctuaFan\r\n into.hasNoctuaFan, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'has_noctua_fan', entry_type = bool,\r\n default_value = False,\r\n comment = 'Enables control of Noctua fan (at higher RPMS) when you have a Noctua fan installed'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #limit_battery_minmax -> limitBatteryMinMax\r\n into.limitBatteryMinMax, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'limit_battery_minmax', entry_type = bool,\r\n default_value = True,\r\n comment = 'Enables battery charging limits; the battery will start charging when battery percentage is below limit_battery_min and will stop charging when battery percentage is above limit_battery_max'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #limit_battery_min -> limitBattery_Min\r\n into.limitBattery_Min, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'limit_battery_min', entry_type = int,\r\n default_value = 60,\r\n comment = 'See limit_battery_minmax'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #limitBattery_Max -> limitBattery_Max\r\n into.limitBattery_Max, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'limit_battery_max', entry_type = int,\r\n default_value = 80,\r\n comment = 'See limit_battery_minmax'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #block_upload_while_tethering -> blockUploadWhileTethering\r\n into.blockUploadWhileTethering, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'block_upload_while_tethering', entry_type = bool,\r\n default_value = False,\r\n comment = 'This setting will block uploading OP videos to Comma when you are tethering through the phone. You should set the tether_ip to the first 3 values that your phone provides as IP when you tether. This is phone/carrier specific. For example iPhone give addresses like 172.20.10.x so you would enter 172.20.10.'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #tether_ip -> tetherIP\r\n into.tetherIP, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'tether_ip', entry_type = str,\r\n default_value = \"127.0.0.\",\r\n comment = 'See block_upload_while_tethering'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #use_tesla_gps -> useTeslaGPS\r\n into.useTeslaGPS, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'use_tesla_gps', entry_type = bool,\r\n default_value = False,\r\n comment = 'This setting makes OP to use Tesla GPS data instead of the GPS that comes with the gray panda; both GPS systems use Ublox and both are very close in accuracy; this also allows one to use a White Panda and still have map integration'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #use_tesla_map_data -> useTeslaMapData\r\n into.useTeslaMapData, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'use_tesla_map_data', entry_type = bool,\r\n default_value = False,\r\n comment = 'This setting (which requires root) allows OP to use Tesla navigation map data (under development)'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #has_tesla_ic_integration -> hasTeslaIcIntegration\r\n into.hasTeslaIcIntegration, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'has_tesla_ic_integration', entry_type = bool,\r\n default_value = False,\r\n comment = 'This setting (in conjunction with enable_radar_emulation) help create the IC integration'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #use_tesla_radar -> useTeslaRadar\r\n into.useTeslaRadar, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'use_tesla_radar', entry_type = bool,\r\n default_value = False,\r\n comment = 'Set this setting to True if you have a Tesla Bosch Radar installed (works in conjunction with enable_radar_emulation)'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #use_without_harness = useWithoutHarness\r\n into.useWithoutHarness, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'use_without_harness', entry_type = bool,\r\n default_value = False,\r\n comment = 'Not used at the moment; should be False'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #radar_vin -> into.radarVIN\r\n default_radar_vin = '\" \"'\r\n into.radarVIN, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'radar_vin', entry_type = str,\r\n default_value = default_radar_vin,\r\n comment = 'If you used an aftermarket Tesla Bosch Radar that already has a coded VIN, you will have to enter that VIN value here'\r\n )\r\n file_changed |= didUpdate\r\n if into.radarVIN == '':\r\n into.radarVIN = default_radar_vin\r\n file_changed = True\r\n\r\n #enable_ldw = enableLdw\r\n into.enableLdw, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'enable_ldw', entry_type = bool,\r\n default_value = True,\r\n comment = 'Enable the Lane Departure Warning (LDW) feature; this feature warns the driver is the car gets too close to one of the lines when driving above 45 MPH (72 km/h) without touching the steering wheel and when the turn signal is off'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #radar_offset -> radarOffset\r\n into.radarOffset, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'radar_offset', entry_type = float,\r\n default_value = 0,\r\n comment = 'If your Tesla Bosch Radar is not centered on the car, this value will allow to enter a correction offset'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #radar_epas_type -> radarEpasType\r\n into.radarEpasType, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'radar_epas_type', entry_type = int,\r\n default_value = 0,\r\n comment = 'Depending on the source of your Tesla Bosch Radar (older or newer Model S or Model X), this setting has to match what the radar was programmed to recognize as EPAS; values are between 0 and 4; finding the right one is trial and error'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #radar_position -> radarPosition\r\n into.radarPosition, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'radar_position', entry_type = int,\r\n default_value = 0,\r\n comment = 'Depending on the source of your Tesla Bosch Radar (older or newer Model S or Model X), this setting has to match what the radar was programmed to have a position (Model S, Model S facelift, Model X); values are between 0 and 3; finding the right one is trial and error'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #fix_1916 -> fix1916\r\n into.fix1916, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'fix_1916', entry_type = bool,\r\n default_value = False,\r\n comment = 'Set this value to True if you are running Tesla software v2019.16 and above. This fixes the DI_state can message change for DI_cruiseSet which changed from 9 bits to 8 bits'\r\n )\r\n file_changed |= didUpdate\r\n\r\n #do_auto_update -> doAutoUpdate\r\n into.doAutoUpdate, didUpdate = self.read_config_entry(\r\n config, configr, prev_file_contents, section = main_section,\r\n entry = 'do_auto_update', entry_type = bool,\r\n default_value = True,\r\n comment = 'Set this setting to False if you do not want OP to autoupdate every time you reboot and there is a change on the repo'\r\n )\r\n file_changed |= didUpdate\r\n\r\n if file_changed:\r\n did_write = True\r\n with open(config_path, self.config_file_w) as configfile:\r\n config.write(configfile)\r\n else:\r\n did_write = False\r\n\r\n # Remove double quotes from VIN (they are required for empty case)\r\n into.radarVIN = into.radarVIN.replace('\"', '')\r\n return did_write\r\n\r\n def read_config_entry(self, config, configr, prev_file_contents, section, entry, entry_type, default_value, comment):\r\n updated = self.update_comment(config, prev_file_contents, section, entry, default_value, comment)\r\n result = None\r\n try:\r\n if entry_type == bool:\r\n result = configr.getboolean(section, entry)\r\n elif entry_type == int:\r\n result = configr.getint(section, entry)\r\n elif entry_type == float:\r\n result = configr.getfloat(section, entry)\r\n else:\r\n result = configr.get(section, entry)\r\n except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):\r\n result = default_value\r\n updated = True\r\n config.set(section, entry, result)\r\n return result, updated\r\n\r\n def update_comment(self, config, prev_file_contents, section, entry, default_value, comment):\r\n new_comment = (\"# \" + entry + \": \" + comment + \" (Default: \" + str(default_value) + \")\").lower()\r\n config.set(section, new_comment)\r\n updated = (prev_file_contents.find(new_comment) == -1)\r\n return updated\r\n\r\nclass CarSettings(object):\r\n\r\n userHandle = None\r\n forceFingerprintTesla = None\r\n eonToFront = None\r\n forcePedalOverCC = None\r\n enableHSO = None\r\n enableALCA = None\r\n enableDasEmulation = None\r\n enableRadarEmulation = None\r\n enableSpeedVariableDesAngle = None\r\n enableRollAngleCorrection = None\r\n enableFeedForwardAngleCorrection = None\r\n enableDriverMonitor = None\r\n enableShowCar = None\r\n enableShowLogo = None\r\n hasNoctuaFan = None\r\n limitBatteryMinMax = None\r\n limitBattery_Min = None\r\n limitBattery_Max = None\r\n blockUploadWhileTethering = None\r\n tetherIP = None\r\n useTeslaGPS = None\r\n useTeslaMapData = None\r\n hasTeslaIcIntegration = None\r\n useTeslaRadar = None\r\n useWithoutHarness = None\r\n radarVIN = None\r\n enableLdw = None\r\n radarOffset = None\r\n radarEpasType = None\r\n radarPosition = None\r\n fix1916 = None\r\n doAutoUpdate = None\r\n\r\n def __init__(self, optional_config_file_path = default_config_file_path):\r\n config_file = ConfigFile()\r\n self.did_write_file = config_file.read(self, config_path = optional_config_file_path)\r\n\r\n def get_value(self, name_of_variable):\r\n return self.__dict__[name_of_variable]\r\n\r\n# Legacy support\r\ndef read_config_file(into, config_path = default_config_file_path):\r\n config_file = ConfigFile()\r\n config_file.read(into, config_path)\r\n", "id": "7789433", "language": "Python", "matching_score": 1.9804346561431885, "max_stars_count": 1, "path": "selfdrive/car/tesla/readconfig.py" }, { "content": "import numpy as np\nfrom common.kalman.simple_kalman import KF1D\nfrom selfdrive.can.can_define import CANDefine\nfrom selfdrive.can.parser import CANParser\nfrom selfdrive.config import Conversions as CV\nfrom selfdrive.car.toyota.values import CAR, DBC, STEER_THRESHOLD, TSS2_CAR, NO_DSU_CAR\n\ndef parse_gear_shifter(gear, vals):\n\n val_to_capnp = {'P': 'park', 'R': 'reverse', 'N': 'neutral',\n 'D': 'drive', 'B': 'brake'}\n try:\n return val_to_capnp[vals[gear]]\n except KeyError:\n return \"unknown\"\n\n\ndef get_can_parser(CP):\n\n signals = [\n # sig_name, sig_address, default\n (\"STEER_ANGLE\", \"STEER_ANGLE_SENSOR\", 0),\n (\"GEAR\", \"GEAR_PACKET\", 0),\n (\"BRAKE_PRESSED\", \"BRAKE_MODULE\", 0),\n (\"GAS_PEDAL\", \"GAS_PEDAL\", 0),\n (\"WHEEL_SPEED_FL\", \"WHEEL_SPEEDS\", 0),\n (\"WHEEL_SPEED_FR\", \"WHEEL_SPEEDS\", 0),\n (\"WHEEL_SPEED_RL\", \"WHEEL_SPEEDS\", 0),\n (\"WHEEL_SPEED_RR\", \"WHEEL_SPEEDS\", 0),\n (\"DOOR_OPEN_FL\", \"SEATS_DOORS\", 1),\n (\"DOOR_OPEN_FR\", \"SEATS_DOORS\", 1),\n (\"DOOR_OPEN_RL\", \"SEATS_DOORS\", 1),\n (\"DOOR_OPEN_RR\", \"SEATS_DOORS\", 1),\n (\"SEATBELT_DRIVER_UNLATCHED\", \"SEATS_DOORS\", 1),\n (\"TC_DISABLED\", \"ESP_CONTROL\", 1),\n (\"STEER_FRACTION\", \"STEER_ANGLE_SENSOR\", 0),\n (\"STEER_RATE\", \"STEER_ANGLE_SENSOR\", 0),\n (\"CRUISE_ACTIVE\", \"PCM_CRUISE\", 0),\n (\"CRUISE_STATE\", \"PCM_CRUISE\", 0),\n (\"MAIN_ON\", \"PCM_CRUISE_2\", 0),\n (\"SET_SPEED\", \"PCM_CRUISE_2\", 0),\n (\"LOW_SPEED_LOCKOUT\", \"PCM_CRUISE_2\", 0),\n (\"STEER_TORQUE_DRIVER\", \"STEER_TORQUE_SENSOR\", 0),\n (\"STEER_TORQUE_EPS\", \"STEER_TORQUE_SENSOR\", 0),\n (\"TURN_SIGNALS\", \"STEERING_LEVERS\", 3), # 3 is no blinkers\n (\"LKA_STATE\", \"EPS_STATUS\", 0),\n (\"IPAS_STATE\", \"EPS_STATUS\", 1),\n (\"BRAKE_LIGHTS_ACC\", \"ESP_CONTROL\", 0),\n (\"AUTO_HIGH_BEAM\", \"LIGHT_STALK\", 0),\n ]\n\n checks = [\n (\"BRAKE_MODULE\", 40),\n (\"GAS_PEDAL\", 33),\n (\"WHEEL_SPEEDS\", 80),\n (\"STEER_ANGLE_SENSOR\", 80),\n (\"PCM_CRUISE\", 33),\n (\"PCM_CRUISE_2\", 33),\n (\"STEER_TORQUE_SENSOR\", 50),\n (\"EPS_STATUS\", 25),\n ]\n\n if CP.carFingerprint in NO_DSU_CAR:\n signals += [(\"STEER_ANGLE\", \"STEER_TORQUE_SENSOR\", 0)]\n\n if CP.carFingerprint == CAR.PRIUS:\n signals += [(\"STATE\", \"AUTOPARK_STATUS\", 0)]\n\n # add gas interceptor reading if we are using it\n if CP.enableGasInterceptor:\n signals.append((\"INTERCEPTOR_GAS\", \"GAS_SENSOR\", 0))\n signals.append((\"INTERCEPTOR_GAS2\", \"GAS_SENSOR\", 0))\n checks.append((\"GAS_SENSOR\", 50))\n\n return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, 0)\n\n\ndef get_cam_can_parser(CP):\n\n signals = []\n\n # use steering message to check if panda is connected to frc\n checks = [(\"STEERING_LKA\", 42)]\n\n return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, 2)\n\n\nclass CarState(object):\n def __init__(self, CP):\n #labels for buttons\n self.btns_init = [[\"alca\",\"ALC\",[\"MadMax\",\"Normal\",\"Wifey\"]], \\\n [\"\",\"\",[\"\"]], \\\n [\"\",\"\",[\"\"]], \\\n [\"sound\",\"SND\",[\"\"]], \\\n [\"\", \"\",[\"\"]], \\\n [\"\", \"\", [\"\"]]]\n #if (CP.carFingerprint == CAR.MODELS):\n # ALCA PARAMS\n # max REAL delta angle for correction vs actuator\n self.CL_MAX_ANGLE_DELTA_BP = [10., 44.]\n self.CL_MAX_ANGLE_DELTA = [1.8, .3]\n\n # adjustment factor for merging steer angle to actuator; should be over 4; the higher the smoother\n self.CL_ADJUST_FACTOR_BP = [10., 44.]\n self.CL_ADJUST_FACTOR = [16. , 8.]\n\n\n # reenrey angle when to let go\n self.CL_REENTRY_ANGLE_BP = [10., 44.]\n self.CL_REENTRY_ANGLE = [5. , 5.]\n\n # a jump in angle above the CL_LANE_DETECT_FACTOR means we crossed the line\n self.CL_LANE_DETECT_BP = [10., 44.]\n self.CL_LANE_DETECT_FACTOR = [1.5, 1.5]\n\n self.CL_LANE_PASS_BP = [10., 20., 44.]\n self.CL_LANE_PASS_TIME = [40.,10., 3.] \n\n # change lane delta angles and other params\n self.CL_MAXD_BP = [10., 32., 44.]\n self.CL_MAXD_A = [.358, 0.084, 0.042] #delta angle based on speed; needs fine tune, based on Tesla steer ratio of 16.75\n\n self.CL_MIN_V = 8.9 # do not turn if speed less than x m/2; 20 mph = 8.9 m/s\n\n # do not turn if actuator wants more than x deg for going straight; this should be interp based on speed\n self.CL_MAX_A_BP = [10., 44.]\n self.CL_MAX_A = [10., 10.] \n\n # define limits for angle change every 0.1 s\n # we need to force correction above 10 deg but less than 20\n # anything more means we are going to steep or not enough in a turn\n self.CL_MAX_ACTUATOR_DELTA = 2.\n self.CL_MIN_ACTUATOR_DELTA = 0. \n self.CL_CORRECTION_FACTOR = 1.\n\n #duration after we cross the line until we release is a factor of speed\n self.CL_TIMEA_BP = [10., 32., 44.]\n self.CL_TIMEA_T = [0.7 ,0.30, 0.20]\n\n #duration to wait (in seconds) with blinkers on before starting to turn\n self.CL_WAIT_BEFORE_START = 1\n #END OF ALCA PARAMS\n \n self.CP = CP\n self.can_define = CANDefine(DBC[CP.carFingerprint]['pt'])\n self.shifter_values = self.can_define.dv[\"GEAR_PACKET\"]['GEAR']\n self.left_blinker_on = 0\n self.right_blinker_on = 0\n self.angle_offset = 0.\n self.init_angle_offset = False\n\n # initialize can parser\n self.car_fingerprint = CP.carFingerprint\n\n # vEgo kalman filter\n dt = 0.01\n # Q = np.matrix([[10.0, 0.0], [0.0, 100.0]])\n # R = 1e3\n self.v_ego_kf = KF1D(x0=[[0.0], [0.0]],\n A=[[1.0, dt], [0.0, 1.0]],\n C=[1.0, 0.0],\n K=[[0.12287673], [0.29666309]])\n self.v_ego = 0.0\n \n\n def update(self, cp):\n # update prevs, update must run once per loop\n self.prev_left_blinker_on = self.left_blinker_on\n self.prev_right_blinker_on = self.right_blinker_on\n\n self.door_all_closed = not any([cp.vl[\"SEATS_DOORS\"]['DOOR_OPEN_FL'], cp.vl[\"SEATS_DOORS\"]['DOOR_OPEN_FR'],\n cp.vl[\"SEATS_DOORS\"]['DOOR_OPEN_RL'], cp.vl[\"SEATS_DOORS\"]['DOOR_OPEN_RR']])\n self.seatbelt = not cp.vl[\"SEATS_DOORS\"]['SEATBELT_DRIVER_UNLATCHED']\n\n self.brake_pressed = cp.vl[\"BRAKE_MODULE\"]['BRAKE_PRESSED']\n if self.CP.enableGasInterceptor:\n self.pedal_gas = (cp.vl[\"GAS_SENSOR\"]['INTERCEPTOR_GAS'] + cp.vl[\"GAS_SENSOR\"]['INTERCEPTOR_GAS2']) / 2.\n else:\n self.pedal_gas = cp.vl[\"GAS_PEDAL\"]['GAS_PEDAL']\n self.car_gas = self.pedal_gas\n self.esp_disabled = cp.vl[\"ESP_CONTROL\"]['TC_DISABLED']\n\n # calc best v_ego estimate, by averaging two opposite corners\n self.v_wheel_fl = cp.vl[\"WHEEL_SPEEDS\"]['WHEEL_SPEED_FL'] * CV.KPH_TO_MS\n self.v_wheel_fr = cp.vl[\"WHEEL_SPEEDS\"]['WHEEL_SPEED_FR'] * CV.KPH_TO_MS\n self.v_wheel_rl = cp.vl[\"WHEEL_SPEEDS\"]['WHEEL_SPEED_RL'] * CV.KPH_TO_MS\n self.v_wheel_rr = cp.vl[\"WHEEL_SPEEDS\"]['WHEEL_SPEED_RR'] * CV.KPH_TO_MS\n v_wheel = float(np.mean([self.v_wheel_fl, self.v_wheel_fr, self.v_wheel_rl, self.v_wheel_rr]))\n\n # Kalman filter\n if abs(v_wheel - self.v_ego) > 2.0: # Prevent large accelerations when car starts at non zero speed\n self.v_ego_kf.x = [[v_wheel], [0.0]]\n\n self.v_ego_raw = v_wheel\n v_ego_x = self.v_ego_kf.update(v_wheel)\n self.v_ego = float(v_ego_x[0])\n self.a_ego = float(v_ego_x[1])\n self.standstill = not v_wheel > 0.001\n\n if self.CP.carFingerprint in TSS2_CAR:\n self.angle_steers = cp.vl[\"STEER_TORQUE_SENSOR\"]['STEER_ANGLE']\n elif self.CP.carFingerprint in NO_DSU_CAR:\n # cp.vl[\"STEER_TORQUE_SENSOR\"]['STEER_ANGLE'] is zeroed to where the steering angle is at start.\n # need to apply an offset as soon as the steering angle measurements are both received\n self.angle_steers = cp.vl[\"STEER_TORQUE_SENSOR\"]['STEER_ANGLE'] - self.angle_offset\n angle_wheel = cp.vl[\"STEER_ANGLE_SENSOR\"]['STEER_ANGLE'] + cp.vl[\"STEER_ANGLE_SENSOR\"]['STEER_FRACTION']\n if abs(angle_wheel) > 1e-3 and abs(self.angle_steers) > 1e-3 and not self.init_angle_offset:\n self.init_angle_offset = True\n self.angle_offset = self.angle_steers - angle_wheel\n else:\n self.angle_steers = cp.vl[\"STEER_ANGLE_SENSOR\"]['STEER_ANGLE'] + cp.vl[\"STEER_ANGLE_SENSOR\"]['STEER_FRACTION']\n self.angle_steers_rate = cp.vl[\"STEER_ANGLE_SENSOR\"]['STEER_RATE']\n can_gear = int(cp.vl[\"GEAR_PACKET\"]['GEAR'])\n self.gear_shifter = parse_gear_shifter(can_gear, self.shifter_values)\n self.main_on = cp.vl[\"PCM_CRUISE_2\"]['MAIN_ON']\n self.left_blinker_on = cp.vl[\"STEERING_LEVERS\"]['TURN_SIGNALS'] == 1\n self.right_blinker_on = cp.vl[\"STEERING_LEVERS\"]['TURN_SIGNALS'] == 2\n\n # 2 is standby, 10 is active. TODO: check that everything else is really a faulty state\n self.steer_state = cp.vl[\"EPS_STATUS\"]['LKA_STATE']\n self.steer_error = cp.vl[\"EPS_STATUS\"]['LKA_STATE'] not in [1, 5]\n self.ipas_active = cp.vl['EPS_STATUS']['IPAS_STATE'] == 3\n self.brake_error = 0\n self.steer_torque_driver = cp.vl[\"STEER_TORQUE_SENSOR\"]['STEER_TORQUE_DRIVER']\n self.steer_torque_motor = cp.vl[\"STEER_TORQUE_SENSOR\"]['STEER_TORQUE_EPS']\n # we could use the override bit from dbc, but it's triggered at too high torque values\n self.steer_override = abs(self.steer_torque_driver) > STEER_THRESHOLD\n\n self.user_brake = 0\n self.v_cruise_pcm = cp.vl[\"PCM_CRUISE_2\"]['SET_SPEED']\n self.pcm_acc_status = cp.vl[\"PCM_CRUISE\"]['CRUISE_STATE']\n self.pcm_acc_active = bool(cp.vl[\"PCM_CRUISE\"]['CRUISE_ACTIVE'])\n self.low_speed_lockout = cp.vl[\"PCM_CRUISE_2\"]['LOW_SPEED_LOCKOUT'] == 2\n self.brake_lights = bool(cp.vl[\"ESP_CONTROL\"]['BRAKE_LIGHTS_ACC'] or self.brake_pressed)\n if self.CP.carFingerprint == CAR.PRIUS:\n self.generic_toggle = cp.vl[\"AUTOPARK_STATUS\"]['STATE'] != 0\n else:\n self.generic_toggle = bool(cp.vl[\"LIGHT_STALK\"]['AUTO_HIGH_BEAM'])\n", "id": "3472745", "language": "Python", "matching_score": 2.2196333408355713, "max_stars_count": 1, "path": "selfdrive/car/toyota/carstate.py" }, { "content": "\"\"\"\nCopyright 2018-2019 BB Solutions, LLC. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in\n the documentation and/or other materials provided with the\n distribution.\n\n * Neither the name of Google nor the names of its contributors may\n be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nHISTORY\n-------\nv4.0 - integrated into model_parser.py\nv3.6 - moved parameters to carstate.py\nv3.5 - changing the start angle to keep turning until we reach MAX_ANGLE_DELTA\nv3.4 - read steerRatio from each car parameters file\nv3.3 - re-entry logic changed for smoothness\nv3.2 - angle adjustment to compesate for road curvature change\nv3.1 - new angle logic for a smoother re-entry\nv3.0 - better lane dettection logic\nv2.0 - detection of lane crossing \nv1.0 - fixed angle move\n\"\"\"\n\nfrom common.numpy_fast import interp\nfrom selfdrive.controls.lib.pid import PIController\nfrom common.realtime import sec_since_boot\nfrom selfdrive.services import service_list\nimport selfdrive.messaging as messaging\nimport zmq\nimport numpy as np\nfrom cereal import tesla\n\n#wait time after turn complete before enabling smoother\nWAIT_TIME_AFTER_TURN = 2.0\n\n#ALCA\nALCA_line_check_low_limit = 0.25\nALCA_line_check_high_limit = 0.75\nALCA_line_min_prob = 0.01\nALCA_release_distance = 0.3\n\nALCA_DEBUG = True\nDEBUG_INFO = \"step {step} of {total_steps}: direction={ALCA_direction} | using visual = {ALCA_use_visual} | over line={ALCA_over_line} | lane width={ALCA_lane_width} | left to move={left_to_move} | from center ={from_center} | C2 offset = {ALCA_OFFSET_C2} | \"\n\nclass ALCAController(object):\n def __init__(self,carcontroller,alcaEnabled,steerByAngle):\n #import settings\n self.CC = carcontroller # added to start, will see if we need it actually\n # variables for lane change\n self.angle_offset = 0. #added when one needs to compensate for missalignment\n self.alcaEnabled = alcaEnabled\n self.alca_duration = [2., 3.5, 5.]\n self.laneChange_strStartFactor = 2.\n self.laneChange_strStartMultiplier = 1.5\n self.laneChange_steerByAngle = steerByAngle # steer only by angle; do not call PID\n self.laneChange_last_actuator_angle = 0.\n self.laneChange_last_actuator_delta = 0.\n self.laneChange_last_sent_angle = 0.\n self.laneChange_over_the_line = 0 # did we cross the line?\n self.laneChange_avg_angle = 0. # used if we do average entry angle over x frames\n self.laneChange_avg_count = 0. # used if we do average entry angle over x frames\n self.laneChange_enabled = 1 # set to zero for no lane change\n self.laneChange_counter = 0 # used to count frames during lane change\n self.laneChange_min_duration = 2. # min time to wait before looking for next lane\n self.laneChange_duration = 5.6 # how many max seconds to actually do the move; if lane not found after this then send error\n self.laneChange_after_lane_duration_mult = 1. # multiplier for time after we cross the line before we let OP take over; multiplied with CL_TIMEA_T \n self.laneChange_wait = 1 # how many seconds to wait before it starts the change\n self.laneChange_lw = 3.7 # lane width in meters\n self.laneChange_angle = 0. # saves the last angle from actuators before lane change starts\n self.laneChange_angled = 0. # angle delta\n self.laneChange_steerr = 15.75 # steer ratio for lane change starting with the Tesla one\n self.laneChange_direction = 0 # direction of the lane change \n self.prev_right_blinker_on = False # local variable for prev position\n self.prev_left_blinker_on = False # local variable for prev position\n self.keep_angle = False #local variable to keep certain angle delta vs. actuator\n self.last10delta = []\n self.laneChange_cancelled = False\n self.laneChange_cancelled_counter = 0\n self.last_time_enabled = 0\n\n\n def update_status(self,alcaEnabled):\n self.alcaEnabled = alcaEnabled\n\n\n def stop_ALCA(self, CS):\n # something is not right; ALCAModelParser is not engaged; cancel\n CS.UE.custom_alert_message(3,\"Auto Lane Change Canceled! (d)\",200,5)\n self.laneChange_cancelled = True\n self.laneChange_cancelled_counter = 200\n self.laneChange_enabled = 1\n self.laneChange_counter = 0\n self.laneChange_direction = 0\n CS.cstm_btns.set_button_status(\"alca\",1)\n\n\n def update(self,enabled,CS,actuators):\n cl_min_v = CS.CL_MIN_V\n cl_max_a = CS.CL_MAX_A\n alca_mode = CS.cstm_btns.get_button_label2_index(\"alca\")\n\n if self.laneChange_cancelled_counter > 0:\n self.laneChange_cancelled_counter -= 1\n if self.laneChange_cancelled_counter == 0:\n self.laneChange_cancelled = False\n\n # Basic highway lane change logic\n actuator_delta = 0.\n laneChange_angle = 0.\n turn_signal_needed = 0 # send 1 for left, 2 for right 0 for not needed\n\n if (not CS.right_blinker_on) and (not CS.left_blinker_on) and \\\n (self.laneChange_enabled == 4):\n self.laneChange_enabled =1\n self.laneChange_counter =0\n self.laneChange_direction =0\n CS.UE.custom_alert_message(-1,\"\",0)\n \n if (not CS.right_blinker_on) and (not CS.left_blinker_on) and \\\n (self.laneChange_enabled > 1):\n # no blinkers on but we are still changing lane, so we need to send blinker command\n if self.laneChange_direction == -1:\n turn_signal_needed = 1\n elif self.laneChange_direction == 1:\n turn_signal_needed = 2\n else:\n turn_signal_needed = 0\n\n if (CS.cstm_btns.get_button_status(\"alca\") > 0) and self.alcaEnabled and (self.laneChange_enabled == 1):\n if ((CS.v_ego < cl_min_v) or (abs(actuators.steerAngle) >= cl_max_a) or \\\n (abs(CS.angle_steers)>= cl_max_a) or (not enabled)): \n CS.cstm_btns.set_button_status(\"alca\",9)\n else:\n CS.cstm_btns.set_button_status(\"alca\",1)\n\n if self.alcaEnabled and enabled and (((not self.prev_right_blinker_on) and CS.right_blinker_on) or \\\n ((not self.prev_left_blinker_on) and CS.left_blinker_on)) and \\\n ((CS.v_ego < cl_min_v) or (abs(actuators.steerAngle) >= cl_max_a) or (abs(CS.angle_steers) >=cl_max_a)):\n # something is not right, the speed or angle is limitting\n CS.UE.custom_alert_message(3,\"Auto Lane Change Unavailable!\",500,3)\n CS.cstm_btns.set_button_status(\"alca\",9)\n\n\n if self.alcaEnabled and enabled and (((not self.prev_right_blinker_on) and CS.right_blinker_on) or \\\n ((not self.prev_left_blinker_on) and CS.left_blinker_on)) and \\\n (CS.v_ego >= cl_min_v) and (abs(actuators.steerAngle) < cl_max_a):\n # start blinker, speed and angle is within limits, let's go\n laneChange_direction = 1\n # changing lanes\n if CS.left_blinker_on:\n laneChange_direction = -1\n\n if (self.laneChange_enabled > 1) and (self.laneChange_direction <> laneChange_direction):\n # something is not right; signal in oposite direction; cancel\n CS.UE.custom_alert_message(3,\"Auto Lane Change Canceled! (s)\",200,5)\n self.laneChange_cancelled = True\n self.laneChange_cancelled_counter = 200\n self.laneChange_enabled = 1\n self.laneChange_counter = 0\n self.laneChange_direction = 0\n CS.cstm_btns.set_button_status(\"alca\",1)\n elif (self.laneChange_enabled == 1) :\n # compute angle delta for lane change\n CS.UE.custom_alert_message(2,\"Auto Lane Change Engaged!\",100)\n self.laneChange_enabled = 2\n self.laneChange_counter = 1\n self.laneChange_direction = laneChange_direction\n CS.cstm_btns.set_button_status(\"alca\",2)\n\n if (not self.alcaEnabled) and self.laneChange_enabled > 1:\n self.laneChange_enabled = 1\n self.laneChange_counter = 0\n self.laneChange_direction = 0\n\n # lane change in progress\n if self.laneChange_enabled > 1:\n if (CS.steer_override or (CS.v_ego < cl_min_v)):\n CS.UE.custom_alert_message(4,\"Auto Lane Change Canceled! (u)\",200,3)\n self.laneChange_cancelled = True\n self.laneChange_cancelled_counter = 200\n # if any steer override cancel process or if speed less than min speed\n self.laneChange_counter = 0\n self.laneChange_enabled = 1\n self.laneChange_direction = 0\n CS.cstm_btns.set_button_status(\"alca\",1)\n if self.laneChange_enabled == 2:\n if self.laneChange_counter == 1:\n CS.UE.custom_alert_message(2,\"Auto Lane Change Engaged! (1)\",self.laneChange_wait * 100)\n self.laneChange_counter += 1\n if self.laneChange_counter == self.laneChange_wait * 100:\n self.laneChange_enabled = 3\n self.laneChange_counter = 0\n if self.laneChange_enabled ==3:\n if self.laneChange_counter == 1:\n CS.UE.custom_alert_message(2,\"Auto Lane Change Engaged! (2)\",int(self.alca_duration[alca_mode] * 100))\n self.laneChange_counter += 1\n if self.laneChange_counter >= self.alca_duration[alca_mode] * 100:\n self.laneChange_enabled = 4\n self.laneChange_counter = 0\n if self.laneChange_enabled == 4:\n if self.laneChange_counter == 1:\n CS.UE.custom_alert_message(2,\"Auto Lane Change Complete!\",100)\n self.laneChange_enabled = 1\n self.laneChange_counter = 0\n\n CS.ALCA_enabled = (self.laneChange_enabled > 1) and self.alcaEnabled\n CS.ALCA_total_steps = int(20 * self.alca_duration[alca_mode])\n if self.laneChange_enabled == 3:\n CS.ALCA_direction = -self.laneChange_direction\n else:\n CS.ALCA_direction = 0\n\n return turn_signal_needed, self.laneChange_enabled > 1\n\nclass ALCAModelParser(object):\n def __init__(self):\n #ALCA params\n self.ALCA_error = False\n self.ALCA_lane_width = 3.6\n self.ALCA_direction = 0 # left 1, right -1\n self.ALCA_step = 0\n self.ALCA_total_steps = 20 * 5 #20 Hz, 5 seconds, wifey mode\n self.ALCA_cancelling = False\n self.ALCA_enabled = False\n self.ALCA_OFFSET_C3 = 0.\n self.ALCA_OFFSET_C2 = 0.\n self.ALCA_over_line = False\n self.prev_CS_ALCA_error = False\n self.ALCA_use_visual = True\n self.ALCA_vego = 0.\n self.ALCA_vego_prev = 0.\n self.poller = zmq.Poller()\n self.alcaStatus = messaging.sub_sock(service_list['alcaStatus'].port, conflate=True, poller=self.poller)\n self.alcaState = messaging.pub_sock(service_list['alcaState'].port)\n self.alcas = None\n\n\n def reset_alca (self):\n self.ALCA_step = 0\n self.ALCA_direction = 0\n self.ALCA_cancelling = False\n self.ALCA_error = True\n self.ALCA_enabled = False\n self.ALCA_OFFSET_C3 = 0.\n self.ALCA_OFFSET_C2 = 0.\n self.ALCA_over_line = False\n self.ALCA_use_visual = True\n self.ALCA_vego_prev = 0.\n self.alcas = None\n\n def debug_alca(self,message):\n if ALCA_DEBUG:\n print message\n\n def send_state(self):\n alca_state = tesla.ALCAState.new_message()\n #ALCA params\n alca_state.alcaDirection = int(self.ALCA_direction)\n alca_state.alcaError = bool(self.ALCA_error)\n alca_state.alcaCancelling = bool(self.ALCA_cancelling)\n alca_state.alcaEnabled = bool(self.ALCA_enabled)\n alca_state.alcaLaneWidth = float(self.ALCA_lane_width)\n alca_state.alcaStep = int(self.ALCA_step)\n alca_state.alcaTotalSteps = int(self.ALCA_total_steps)\n self.alcaState.send(alca_state.to_bytes())\n\n def update(self, v_ego, md, r_poly, l_poly, r_prob, l_prob, lane_width, p_poly):\n\n for socket, _ in self.poller.poll(0):\n if socket is self.alcaStatus:\n self.alcas = tesla.ALCAStatus.from_bytes(socket.recv())\n\n #if we don't have yet ALCA status, return same values\n if self.alcas is None:\n self.send_state()\n return np.array(r_poly),np.array(l_poly),r_prob, l_prob, lane_width, p_poly\n\n \n self.ALCA_direction = self.alcas.alcaDirection\n self.ALCA_enabled = self.alcas.alcaEnabled\n self.ALCA_total_steps = self.alcas.alcaTotalSteps\n self.ALCA_error = self.ALCA_error or (self.alcas.alcaError and not self.prev_CS_ALCA_error)\n self.prev_CS_ALCA_error = self.alcas.alcaError\n\n if not self.ALCA_enabled:\n self.send_state()\n return np.array(r_poly),np.array(l_poly),r_prob, l_prob, lane_width, p_poly\n\n #if error but no direction, the carcontroller component is fine and we need to reset\n if self.ALCA_error and (self.ALCA_direction == 0):\n self.ALCA_error = False\n\n\n #where are we in alca as %\n ALCA_perc_complete = float(self.ALCA_step) / float(self.ALCA_total_steps)\n if self.ALCA_error and self.ALCA_cancelling:\n self.debug_alca(\" Error and Cancelling -> resetting...\")\n self.reset_alca()\n if self.ALCA_error and not self.ALCA_cancelling:\n if (ALCA_perc_complete < 0.1) or (ALCA_perc_complete > 0.9):\n self.debug_alca(\" Error and less than 10% -> resetting...\")\n self.reset_alca()\n else:\n self.debug_alca(\" Error and not Cancelling -> rewinding...\")\n self.ALCA_cancelling = True\n self.ALCA_error = False\n\n if self.ALCA_enabled and not (self.ALCA_direction == 0):\n if ALCA_DEBUG:\n print ALCA_perc_complete, self.ALCA_step,self.ALCA_total_steps\n ALCA_increment = -3 if self.ALCA_cancelling else 1\n self.ALCA_step += ALCA_increment\n if (self.ALCA_step < 0) or (self.ALCA_step >= self.ALCA_total_steps):\n #done so end ALCA\n self.debug_alca(\" step out of bounds -> resetting...\")\n self.reset_alca()\n else:\n #if between 20% and 80% of change is done, let's check if we are over the line\n if ALCA_line_check_low_limit < ALCA_perc_complete < ALCA_line_check_high_limit :\n if self.ALCA_direction == -1:\n #if we are moving to the right\n if (l_prob > ALCA_line_min_prob ) and (0. <= l_poly[3] <= (self.ALCA_lane_width /2.)):\n self.ALCA_over_line = True\n if self.ALCA_direction == 1:\n #if we are moving to the left\n if (r_prob > ALCA_line_min_prob ) and ((-self.ALCA_lane_width / 2.) <= r_poly[3] <= 0 ):\n self.ALCA_over_line = True\n elif ALCA_perc_complete >= ALCA_line_check_high_limit :\n self.ALCA_over_line = True\n else:\n self.ALCA_over_line = False\n #make sure we always have the line we need in sight\n prev_ALCA_use_visual = self.ALCA_use_visual\n if (not self.ALCA_over_line) and (((self.ALCA_direction == 1) and (l_prob < ALCA_line_min_prob)) or ((self.ALCA_direction == -1) and (r_prob < ALCA_line_min_prob))):\n self.ALCA_use_visual = False\n elif self.ALCA_over_line and (((self.ALCA_direction == 1) and (r_prob < ALCA_line_min_prob)) or ((self.ALCA_direction == -1) and (l_prob < ALCA_line_min_prob))):\n self.ALCA_use_visual = False\n else:\n self.ALCA_use_visual = True\n\n #did we just switch between visual and non-visual?\n if prev_ALCA_use_visual != self.ALCA_use_visual:\n self.reset_alca()\n\n #compute offset\n from_center = 0.\n left_to_move = 0.\n if self.ALCA_enabled and not (self.ALCA_direction == 0):\n if self.ALCA_over_line:\n if self.ALCA_direction == 1:\n from_center = self.ALCA_lane_width / 2 - r_poly[3]\n else:\n from_center = self.ALCA_lane_width / 2 + l_poly[3]\n else:\n if self.ALCA_direction == 1:\n from_center = self.ALCA_lane_width / 2 - l_poly[3]\n else:\n from_center = self.ALCA_lane_width / 2 + r_poly[3]\n if from_center < 0.:\n from_center += self.ALCA_lane_width /2 \n left_to_move = self.ALCA_lane_width - from_center\n steps_left = self.ALCA_total_steps - self.ALCA_step\n self.ALCA_OFFSET_C2 = float(self.ALCA_direction * left_to_move) / float(steps_left * 0.05 * (self.ALCA_vego_prev + v_ego) / 2.)\n if ALCA_DEBUG:\n debug_string = DEBUG_INFO.format(step=self.ALCA_step,total_steps=self.ALCA_total_steps,ALCA_direction=self.ALCA_direction,ALCA_use_visual=self.ALCA_use_visual,ALCA_over_line=self.ALCA_over_line,ALCA_lane_width=self.ALCA_lane_width, left_to_move=left_to_move, from_center=from_center, ALCA_OFFSET_C2=self.ALCA_OFFSET_C2)\n self.debug_alca(debug_string)\n else:\n self.ALCA_OFFSET_C2 = 0.\n \n if (not self.ALCA_error) and self.ALCA_use_visual:\n if self.ALCA_over_line:\n if (self.ALCA_total_steps - self.ALCA_step <= 1) or (self.ALCA_over_line and ((self.ALCA_direction == 1) and (r_poly[3] < -ALCA_release_distance)) or ((self.ALCA_direction == -1) and (l_poly[3] > ALCA_release_distance))):\n self.reset_alca()\n self.ALCA_error = False\n\n if (self.ALCA_direction == 1 and not self.ALCA_over_line) or (self.ALCA_direction == -1 and self.ALCA_over_line):\n r_poly = np.array(l_poly)\n l_prob = 1\n r_prob = l_prob\n elif (self.ALCA_direction == -1 and not self.ALCA_over_line) or (self.ALCA_direction == 1 and self.ALCA_over_line):\n l_poly = np.array(r_poly)\n r_prob = 1\n l_prob = r_prob\n l_poly[3] = self.ALCA_lane_width / 2\n r_poly[3] = -self.ALCA_lane_width / 2\n p_poly[3] = 0\n l_poly[2] += self.ALCA_OFFSET_C2\n r_poly[2] += self.ALCA_OFFSET_C2\n p_poly[2] += self.ALCA_OFFSET_C2\n else:\n self.reset_alca()\n self.ALCA_error = False\n\n self.ALCA_vego_prev = v_ego\n\n if self.ALCA_enabled:\n if self.ALCA_direction == 0:\n self.ALCA_lane_width = lane_width\n else:\n lane_width = self.ALCA_lane_width\n\n self.send_state()\n return np.array(r_poly),np.array(l_poly),r_prob, l_prob, self.ALCA_lane_width, p_poly\n \n", "id": "9237344", "language": "Python", "matching_score": 1.880547285079956, "max_stars_count": 1, "path": "selfdrive/car/modules/ALCA_module.py" }, { "content": "from cereal import ui\r\nfrom common import realtime\r\nimport selfdrive.messaging as messaging\r\nfrom selfdrive.services import service_list\r\nimport zmq\r\n\r\nclass UIEvents(object):\r\n def __init__(self,carstate):\r\n self.CS = carstate\r\n self.buttons_poller = zmq.Poller()\r\n self.uiCustomAlert = messaging.pub_sock(service_list['uiCustomAlert'].port)\r\n self.uiButtonInfo = messaging.pub_sock(service_list['uiButtonInfo'].port)\r\n self.uiSetCar = messaging.pub_sock(service_list['uiSetCar'].port)\r\n self.uiPlaySound = messaging.pub_sock(service_list['uiPlaySound'].port)\r\n self.uiGyroInfo = messaging.pub_sock(service_list['uiGyroInfo'].port)\r\n self.uiButtonStatus = messaging.sub_sock(service_list['uiButtonStatus'].port, conflate=True, poller=self.buttons_poller)\r\n self.prev_cstm_message = \"\"\r\n self.prev_cstm_status = -1\r\n\r\n def uiCustomAlertEvent(self,status,message):\r\n dat = ui.UICustomAlert.new_message()\r\n dat.caStatus = status\r\n dat.caText = message+'\\0'\r\n self.uiCustomAlert.send(dat.to_bytes())\r\n \r\n def uiButtonInfoEvent(self,btnid,name,label,status,label2):\r\n dat = ui.UIButtonInfo.new_message()\r\n dat.btnId = btnid\r\n dat.btnName = name #+ '\\0'\r\n dat.btnLabel = label #+ '\\0'\r\n dat.btnStatus = status\r\n dat.btnLabel2 = label2 #+ '\\0'\r\n self.uiButtonInfo.send(dat.to_bytes())\r\n\r\n def uiGyroInfoEvent(self,accpitch,accroll,accyaw,magpitch,magroll,magyaw,gyropitch,gyroroll,gyroyaw):\r\n dat = ui.UIGyroInfo.new_message()\r\n dat.accPitch = accpitch\r\n dat.accRoll = accroll \r\n dat.accYaw = accyaw \r\n dat.magPitch = magpitch\r\n dat.magRoll = magroll \r\n dat.magYaw = magyaw \r\n dat.gyroPitch = gyropitch\r\n dat.gyroRoll = gyroroll\r\n dat.gyroYaw = gyroyaw \r\n self.uiGyroInfo.send(dat.to_bytes())\r\n \r\n def uiSetCarEvent(self,car_folder,car_name, showLogo, showCar):\r\n dat = ui.UISetCar.new_message()\r\n dat.icCarFolder = car_folder\r\n dat.icCarName = car_name\r\n dat.icShowCar = int(showCar)\r\n dat.icShowLogo = int(showLogo)\r\n self.uiSetCar.send(dat.to_bytes())\r\n\r\n def uiPlaySoundEvent(self,sound):\r\n if self.CS.cstm_btns.get_button_status(\"sound\") > 0:\r\n dat = ui.UIPlaySound.new_message()\r\n dat.sndSound = sound\r\n self.uiPlaySound.send(dat.to_bytes())\r\n\r\n # for status we will use one of these values\r\n # NO_STATUS_ALTERATION -1\r\n # STATUS_STOPPED 0\r\n # STATUS_DISENGAGED 1\r\n # STATUS_ENGAGED 2\r\n # STATUS_WARNING 3\r\n # STATUS_ALERT 4\r\n # STATUS_MAX 5\r\n\r\n #for sound we will use one of these values\r\n # NO_SOUND -1\r\n # disable.wav 1\r\n # enable.wav 2\r\n # info.wav 3\r\n # attention.wav 4\r\n # error.wav 5\r\n\r\n def custom_alert_message(self,status,message,duration,sound=-1):\r\n if (status > -1) and (self.prev_cstm_status > status) and \\\r\n (self.CS.custom_alert_counter > 55):\r\n #dont change lessage to a lower importance one if we still have more than half second of display time\r\n return\r\n if (sound > -1) and ((self.prev_cstm_message != message) or (self.prev_cstm_status != status)):\r\n self.uiPlaySoundEvent(sound)\r\n self.uiCustomAlertEvent(status,message)\r\n self.CS.custom_alert_counter = duration\r\n self.prev_cstm_message = message\r\n self.prev_cstm_status = status\r\n\r\n def update_custom_ui(self):\r\n btn_message = None\r\n for socket, event in self.buttons_poller.poll(0):\r\n if socket is self.uiButtonStatus:\r\n btn_message = ui.UIButtonStatus.from_bytes(socket.recv())\r\n if btn_message is not None:\r\n btn_id = btn_message.btnId\r\n self.CS.cstm_btns.set_button_status_from_ui(btn_id,btn_message.btnStatus)\r\n if (self.CS.custom_alert_counter > 0):\r\n self.CS.custom_alert_counter -= 1\r\n if (self.CS.custom_alert_counter ==0):\r\n self.custom_alert_message(-1,\"\",0)\r\n self.CS.custom_alert_counter = -1\r\n", "id": "6242207", "language": "Python", "matching_score": 0.9703682065010071, "max_stars_count": 1, "path": "selfdrive/car/modules/UIEV_module.py" }, { "content": "from __future__ import print_function\nimport os\nimport time\nimport random\nfrom panda import Panda\nfrom nose.tools import assert_equal, assert_less, assert_greater\nfrom helpers import time_many_sends, test_two_panda, test_two_black_panda, panda_type_to_serial, clear_can_buffers, panda_connect_and_init\n\n@test_two_panda\n@panda_type_to_serial\n@panda_connect_and_init\ndef test_send_recv(p_send, p_recv):\n p_send.set_safety_mode(Panda.SAFETY_ALLOUTPUT)\n p_recv.set_safety_mode(Panda.SAFETY_ALLOUTPUT)\n p_send.set_can_loopback(False)\n p_recv.set_can_loopback(False)\n\n assert not p_send.legacy\n assert not p_recv.legacy\n\n p_send.can_send_many([(0x1ba, 0, \"message\", 0)]*2)\n time.sleep(0.05)\n p_recv.can_recv()\n p_send.can_recv()\n\n busses = [0,1,2]\n\n for bus in busses:\n for speed in [100, 250, 500, 750, 1000]:\n p_send.set_can_speed_kbps(bus, speed)\n p_recv.set_can_speed_kbps(bus, speed)\n time.sleep(0.05)\n\n comp_kbps = time_many_sends(p_send, bus, p_recv, two_pandas=True)\n\n saturation_pct = (comp_kbps/speed) * 100.0\n assert_greater(saturation_pct, 80)\n assert_less(saturation_pct, 100)\n\n print(\"two pandas bus {}, 100 messages at speed {:4d}, comp speed is {:7.2f}, percent {:6.2f}\".format(bus, speed, comp_kbps, saturation_pct))\n\n@test_two_panda\n@panda_type_to_serial\n@panda_connect_and_init\ndef test_latency(p_send, p_recv):\n p_send.set_safety_mode(Panda.SAFETY_ALLOUTPUT)\n p_recv.set_safety_mode(Panda.SAFETY_ALLOUTPUT)\n p_send.set_can_loopback(False)\n p_recv.set_can_loopback(False)\n\n assert not p_send.legacy\n assert not p_recv.legacy\n\n p_send.set_can_speed_kbps(0, 100)\n p_recv.set_can_speed_kbps(0, 100)\n time.sleep(0.05)\n\n p_send.can_send_many([(0x1ba, 0, \"testmsg\", 0)]*10)\n time.sleep(0.05)\n p_recv.can_recv()\n p_send.can_recv()\n\n busses = [0,1,2]\n\n for bus in busses:\n for speed in [100, 250, 500, 750, 1000]:\n p_send.set_can_speed_kbps(bus, speed)\n p_recv.set_can_speed_kbps(bus, speed)\n time.sleep(0.1)\n\n #clear can buffers\n clear_can_buffers(p_send)\n clear_can_buffers(p_recv)\n\n latencies = []\n comp_kbps_list = []\n saturation_pcts = []\n\n num_messages = 100\n\n for i in range(num_messages):\n st = time.time()\n p_send.can_send(0x1ab, \"message\", bus)\n r = []\n while len(r) < 1 and (time.time() - st) < 5:\n r = p_recv.can_recv()\n et = time.time()\n r_echo = []\n while len(r_echo) < 1 and (time.time() - st) < 10:\n r_echo = p_send.can_recv()\n\n if len(r) == 0 or len(r_echo) == 0:\n print(\"r: {}, r_echo: {}\".format(r, r_echo))\n\n assert_equal(len(r),1)\n assert_equal(len(r_echo),1)\n\n et = (et - st)*1000.0\n comp_kbps = (1+11+1+1+1+4+8*8+15+1+1+1+7) / et\n latency = et - ((1+11+1+1+1+4+8*8+15+1+1+1+7) / speed)\n\n assert_less(latency, 5.0)\n\n saturation_pct = (comp_kbps/speed) * 100.0\n latencies.append(latency)\n comp_kbps_list.append(comp_kbps)\n saturation_pcts.append(saturation_pct)\n\n average_latency = sum(latencies)/num_messages\n assert_less(average_latency, 1.0)\n average_comp_kbps = sum(comp_kbps_list)/num_messages\n average_saturation_pct = sum(saturation_pcts)/num_messages\n\n print(\"two pandas bus {}, {} message average at speed {:4d}, latency is {:5.3f}ms, comp speed is {:7.2f}, percent {:6.2f}\"\\\n .format(bus, num_messages, speed, average_latency, average_comp_kbps, average_saturation_pct))\n\n@test_two_black_panda\n@panda_type_to_serial\n@panda_connect_and_init\ndef test_black_loopback(panda0, panda1):\n # disable safety modes\n panda0.set_safety_mode(Panda.SAFETY_ALLOUTPUT)\n panda1.set_safety_mode(Panda.SAFETY_ALLOUTPUT)\n\n # disable loopback\n panda0.set_can_loopback(False)\n panda1.set_can_loopback(False)\n\n # clear stuff\n panda0.can_send_many([(0x1ba, 0, \"testmsg\", 0)]*10)\n time.sleep(0.05)\n panda0.can_recv()\n panda1.can_recv()\n\n # test array (send bus, sender obd, reciever obd, expected busses)\n test_array = [\n (0, False, False, [0]),\n (1, False, False, [1]),\n (2, False, False, [2]),\n (0, False, True, [0, 1]),\n (1, False, True, []),\n (2, False, True, [2]),\n (0, True, False, [0]),\n (1, True, False, [0]),\n (2, True, False, [2]),\n (0, True, True, [0, 1]),\n (1, True, True, [0, 1]),\n (2, True, True, [2])\n ]\n\n # test functions\n def get_test_string():\n return b\"test\"+os.urandom(10)\n\n def _test_buses(send_panda, recv_panda, _test_array):\n for send_bus, send_obd, recv_obd, recv_buses in _test_array:\n print(\"\\nSend bus:\", send_bus, \" Send OBD:\", send_obd, \" Recv OBD:\", recv_obd)\n \n # set OBD on pandas\n send_panda.set_gmlan(True if send_obd else None)\n recv_panda.set_gmlan(True if recv_obd else None)\n\n # clear buffers\n clear_can_buffers(send_panda)\n clear_can_buffers(recv_panda)\n\n # send the characters\n at = random.randint(1, 2000)\n st = get_test_string()[0:8]\n send_panda.can_send(at, st, send_bus)\n time.sleep(0.1)\n\n # check for receive\n cans_echo = send_panda.can_recv()\n cans_loop = recv_panda.can_recv()\n\n loop_buses = []\n for loop in cans_loop:\n print(\" Loop on bus\", str(loop[3]))\n loop_buses.append(loop[3])\n if len(cans_loop) == 0:\n print(\" No loop\")\n \n # test loop buses\n recv_buses.sort()\n loop_buses.sort()\n assert recv_buses == loop_buses\n print(\" TEST PASSED\")\n print(\"\\n\")\n\n # test both orientations\n print(\"***************** TESTING (0 --> 1) *****************\")\n _test_buses(panda0, panda1, test_array)\n print(\"***************** TESTING (1 --> 0) *****************\")\n _test_buses(panda1, panda0, test_array)", "id": "10769177", "language": "Python", "matching_score": 1.4753763675689697, "max_stars_count": 3, "path": "tests/automated/6_two_panda.py" }, { "content": "from panda.tests.safety import libpandasafety_py\n\nMAX_WRONG_COUNTERS = 5\n\ndef make_msg(bus, addr, length=8):\n to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')\n if addr >= 0x800:\n to_send[0].RIR = (addr << 3) | 5\n else:\n to_send[0].RIR = (addr << 21) | 1\n to_send[0].RDTR = length\n to_send[0].RDTR |= bus << 4\n\n return to_send\n\nclass StdTest:\n @staticmethod\n def test_relay_malfunction(test, addr, bus=0):\n # input is a test class and the address that, if seen on specified bus, triggers\n # the relay_malfunction protection logic: both tx_hook and fwd_hook are\n # expected to return failure\n test.assertFalse(test.safety.get_relay_malfunction())\n test.safety.safety_rx_hook(make_msg(bus, addr, 8))\n test.assertTrue(test.safety.get_relay_malfunction())\n for a in range(1, 0x800):\n for b in range(0, 3):\n test.assertFalse(test.safety.safety_tx_hook(make_msg(b, a, 8)))\n test.assertEqual(-1, test.safety.safety_fwd_hook(b, make_msg(b, a, 8)))\n\n @staticmethod\n def test_manually_enable_controls_allowed(test):\n test.safety.set_controls_allowed(1)\n test.assertTrue(test.safety.get_controls_allowed())\n test.safety.set_controls_allowed(0)\n test.assertFalse(test.safety.get_controls_allowed())\n\n @staticmethod\n def test_spam_can_buses(test, TX_MSGS):\n for addr in range(1, 0x800):\n for bus in range(0, 4):\n if all(addr != m[0] or bus != m[1] for m in TX_MSGS):\n test.assertFalse(test.safety.safety_tx_hook(make_msg(bus, addr, 8)))\n\n @staticmethod\n def test_allow_brake_at_zero_speed(test):\n # Brake was already pressed\n test.safety.safety_rx_hook(test._speed_msg(0))\n test.safety.safety_rx_hook(test._brake_msg(1))\n test.safety.set_controls_allowed(1)\n test.safety.safety_rx_hook(test._brake_msg(1))\n test.assertTrue(test.safety.get_controls_allowed())\n test.safety.safety_rx_hook(test._brake_msg(0))\n test.assertTrue(test.safety.get_controls_allowed())\n # rising edge of brake should disengage\n test.safety.safety_rx_hook(test._brake_msg(1))\n test.assertFalse(test.safety.get_controls_allowed())\n test.safety.safety_rx_hook(test._brake_msg(0)) # reset no brakes\n\n @staticmethod\n def test_not_allow_brake_when_moving(test, standstill_threshold):\n # Brake was already pressed\n test.safety.safety_rx_hook(test._brake_msg(1))\n test.safety.set_controls_allowed(1)\n test.safety.safety_rx_hook(test._speed_msg(standstill_threshold))\n test.safety.safety_rx_hook(test._brake_msg(1))\n test.assertTrue(test.safety.get_controls_allowed())\n test.safety.safety_rx_hook(test._speed_msg(standstill_threshold + 1))\n test.safety.safety_rx_hook(test._brake_msg(1))\n test.assertFalse(test.safety.get_controls_allowed())\n test.safety.safety_rx_hook(test._speed_msg(0))\n", "id": "5407304", "language": "Python", "matching_score": 1.4698697328567505, "max_stars_count": 40, "path": "tests/safety/common.py" }, { "content": "#!/usr/bin/env python\nimport time\nimport struct\nfrom enum import IntEnum\nfrom Queue import Queue, Empty\nimport threading\nfrom binascii import hexlify\n\nDEBUG = False\n\nclass SERVICE_TYPE(IntEnum):\n DIAGNOSTIC_SESSION_CONTROL = 0x10\n ECU_RESET = 0x11\n SECURITY_ACCESS = 0x27\n COMMUNICATION_CONTROL = 0x28\n TESTER_PRESENT = 0x3E\n ACCESS_TIMING_PARAMETER = 0x83\n SECURED_DATA_TRANSMISSION = 0x84\n CONTROL_DTC_SETTING = 0x85\n RESPONSE_ON_EVENT = 0x86\n LINK_CONTROL = 0x87\n READ_DATA_BY_IDENTIFIER = 0x22\n READ_MEMORY_BY_ADDRESS = 0x23\n READ_SCALING_DATA_BY_IDENTIFIER = 0x24\n READ_DATA_BY_PERIODIC_IDENTIFIER = 0x2A\n DYNAMICALLY_DEFINE_DATA_IDENTIFIER = 0x2C\n WRITE_DATA_BY_IDENTIFIER = 0x2E\n WRITE_MEMORY_BY_ADDRESS = 0x3D\n CLEAR_DIAGNOSTIC_INFORMATION = 0x14\n READ_DTC_INFORMATION = 0x19\n INPUT_OUTPUT_CONTROL_BY_IDENTIFIER = 0x2F\n ROUTINE_CONTROL = 0x31\n REQUEST_DOWNLOAD = 0x34\n REQUEST_UPLOAD = 0x35\n TRANSFER_DATA = 0x36\n REQUEST_TRANSFER_EXIT = 0x37\n\n_negative_response_codes = {\n 0x00: 'positive response',\n 0x10: 'general reject',\n 0x11: 'service not supported',\n 0x12: 'sub-function not supported',\n 0x13: 'incorrect message length or invalid format',\n 0x14: 'response too long',\n 0x21: 'busy repeat request',\n 0x22: 'conditions not correct',\n 0x24: 'request sequence error',\n 0x25: 'no response from subnet component',\n 0x26: 'failure prevents execution of requested action',\n 0x31: 'request out of range',\n 0x33: 'security access denied',\n 0x35: 'invalid key',\n 0x36: 'exceed numebr of attempts',\n 0x37: 'required time delay not expired',\n 0x70: 'upload download not accepted',\n 0x71: 'transfer data suspended',\n 0x72: 'general programming failure',\n 0x73: 'wrong block sequence counter',\n 0x78: 'request correctly received - response pending',\n 0x7e: 'sub-function not supported in active session',\n 0x7f: 'service not supported in active session',\n 0x81: 'rpm too high',\n 0x82: 'rpm too low',\n 0x83: 'engine is running',\n 0x84: 'engine is not running',\n 0x85: 'engine run time too low',\n 0x86: 'temperature too high',\n 0x87: 'temperature too low',\n 0x88: 'vehicle speed too high',\n 0x89: 'vehicle speed too low',\n 0x8a: 'throttle/pedal too high',\n 0x8b: 'throttle/pedal too low',\n 0x8c: 'transmission not in neutral',\n 0x8d: 'transmission not in gear',\n 0x8f: 'brake switch(es) not closed',\n 0x90: 'shifter lever not in park',\n 0x91: 'torque converter clutch locked',\n 0x92: 'voltage too high',\n 0x93: 'voltage too low',\n}\n\nclass MessageTimeoutError(Exception):\n pass\n\nclass NegativeResponseError(Exception):\n def __init__(self, message, service_id, error_code):\n super(Exception, self).__init__(message)\n self.service_id = service_id\n self.error_code = error_code\n\nclass InvalidServiceIdError(Exception):\n pass\n\nclass InvalidSubFunctioneError(Exception):\n pass\n\ndef _isotp_thread(panda, bus, tx_addr, tx_queue, rx_queue):\n try:\n panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT)\n if tx_addr < 0xFFF8:\n filter_addr = tx_addr+0x10\n elif tx_addr > 0x10000000 and tx_addr < 0xFFFFFFFF:\n filter_addr = (tx_addr & 0xFFFF0000) + (tx_addr<<8 & 0xFF00) + (tx_addr>>8 & 0xFF)\n else:\n raise ValueError(\"invalid tx_addr: {}\".format(tx_addr))\n rx_frame = {\"size\": 0, \"data\": \"\", \"idx\": 0, \"done\": True}\n tx_frame = {\"size\": 0, \"data\": \"\", \"idx\": 0, \"done\": True}\n\n # clear tx buffer\n panda.can_clear(bus)\n # clear rx buffer\n panda.can_clear(0xFFFF)\n time.sleep(1)\n while True:\n messages = panda.can_recv()\n for rx_addr, rx_ts, rx_data, rx_bus in messages:\n if rx_bus != bus or rx_addr != filter_addr or len(rx_data) == 0:\n continue\n rx_data = bytearray(rx_data)\n if (DEBUG): print(\"R: {} {}\".format(hex(rx_addr), hexlify(rx_data)))\n if rx_data[0] >> 4 == 0x0:\n # single rx_frame\n rx_frame[\"size\"] = rx_data[0] & 0xFF\n rx_frame[\"data\"] = rx_data[1:1+rx_frame[\"size\"]]\n rx_frame[\"idx\"] = 0\n rx_frame[\"done\"] = True\n rx_queue.put(rx_frame[\"data\"])\n elif rx_data[0] >> 4 == 0x1:\n # first rx_frame\n rx_frame[\"size\"] = ((rx_data[0] & 0x0F) << 8) + rx_data[1]\n rx_frame[\"data\"] = rx_data[2:]\n rx_frame[\"idx\"] = 0\n rx_frame[\"done\"] = False\n # send flow control message (send all bytes)\n msg = \"\\x30\\x00\\x00\".ljust(8, \"\\x00\")\n if (DEBUG): print(\"S: {} {}\".format(hex(tx_addr), hexlify(msg)))\n panda.can_send(tx_addr, msg, bus)\n elif rx_data[0] >> 4 == 0x2:\n # consecutive rx frame\n assert rx_frame[\"done\"] == False, \"rx: no active frame\"\n # validate frame index\n rx_frame[\"idx\"] += 1\n assert rx_frame[\"idx\"] & 0xF == rx_data[0] & 0xF, \"rx: invalid consecutive frame index\"\n rx_size = rx_frame[\"size\"] - len(rx_frame[\"data\"])\n rx_frame[\"data\"] += rx_data[1:1+min(rx_size, 7)]\n if rx_frame[\"size\"] == len(rx_frame[\"data\"]):\n rx_frame[\"done\"] = True\n rx_queue.put(rx_frame[\"data\"])\n elif rx_data[0] >> 4 == 0x3:\n # flow control\n assert tx_frame[\"done\"] == False, \"tx: no active frame\"\n # TODO: support non-zero block size and separate time\n assert rx_data[0] == 0x30, \"tx: flow-control requires: continue\"\n delay_ts = rx_data[2] & 0x7F\n # scale is 1 milliseconds if first bit == 0, 100 micro seconds if first bit == 1\n delay_div = 1000. if rx_data[2] & 0x80 == 0 else 100000.\t\n # first frame = 6 bytes, each consecutive frame = 7 bytes\n start = 6 + tx_frame[\"idx\"] * 7\n count = rx_data[1]\n end = start + count * 7 if count > 0 else tx_frame[\"size\"]\n for i in range(start, end, 7):\n tx_frame[\"idx\"] += 1\n # consecutive tx frames\n msg = (chr(0x20 | (tx_frame[\"idx\"] & 0xF)) + tx_frame[\"data\"][i:i+7]).ljust(8, \"\\x00\")\n if (DEBUG): print(\"S: {} {}\".format(hex(tx_addr), hexlify(msg)))\n panda.can_send(tx_addr, msg, bus)\n if delay_ts > 0:\n time.sleep(delay_ts / delay_div)\n tx_frame[\"done\"] = True\n\n if not tx_queue.empty():\n req = tx_queue.get(block=False)\n # reset rx and tx frames\n rx_frame = {\"size\": 0, \"data\": \"\", \"idx\": 0, \"done\": True}\n tx_frame = {\"size\": len(req), \"data\": req, \"idx\": 0, \"done\": False}\n if tx_frame[\"size\"] < 8:\n # single frame\n tx_frame[\"done\"] = True\n msg = (chr(tx_frame[\"size\"]) + tx_frame[\"data\"]).ljust(8, \"\\x00\")\n if (DEBUG): print(\"S: {} {}\".format(hex(tx_addr), hexlify(msg)))\n panda.can_send(tx_addr, msg, bus)\n else:\n # first rx_frame\n tx_frame[\"done\"] = False\n msg = (struct.pack(\"!H\", 0x1000 | tx_frame[\"size\"]) + tx_frame[\"data\"][:6]).ljust(8, \"\\x00\")\n if (DEBUG): print(\"S: {} {}\".format(hex(tx_addr), hexlify(msg)))\n panda.can_send(tx_addr, msg, bus)\n else:\n time.sleep(0.01)\n finally:\n panda.close()\n\n# generic uds request\ndef _uds_request(address, service_type, subfunction=None, data=None):\n req = chr(service_type)\n if subfunction is not None:\n req += chr(subfunction)\n if data is not None:\n req += data\n tx_queue.put(req)\n\n while True:\n try:\n resp = rx_queue.get(block=True, timeout=10)\n except Empty:\n raise MessageTimeoutError(\"timeout waiting for response\")\n resp_sid = resp[0] if len(resp) > 0 else None\n\n # negative response\n if resp_sid == 0x7F:\n service_id = resp[1] if len(resp) > 1 else -1\n try:\n service_desc = SERVICE_TYPE(service_id).name\n except Exception:\n service_desc = 'NON_STANDARD_SERVICE'\n error_code = resp[2] if len(resp) > 2 else -1\n try:\n error_desc = _negative_response_codes[error_code]\n except Exception:\n error_desc = 'unknown error'\n # wait for another message if response pending\n if error_code == 0x78:\n time.sleep(0.1)\n continue\n raise NegativeResponseError('{} - {}'.format(service_desc, error_desc), service_id, error_code)\n break\n\n # positive response\n if service_type+0x40 != resp_sid:\n resp_sid_hex = hex(resp_sid) if resp_sid is not None else None\n raise InvalidServiceIdError('invalid response service id: {}'.format(resp_sid_hex))\n\n if subfunction is not None:\n resp_sfn = resp[1] if len(resp) > 1 else None\n if subfunction != resp_sfn:\n resp_sfn_hex = hex(resp_sfn) if resp_sfn is not None else None\n raise InvalidSubFunctioneError('invalid response subfunction: {}'.format(hex(resp_sfn)))\n\n # return data (exclude service id and sub-function id)\n return resp[(1 if subfunction is None else 2):]\n\n# services\nclass SESSION_TYPE(IntEnum):\n DEFAULT = 1\n PROGRAMMING = 2\n EXTENDED_DIAGNOSTIC = 3\n SAFETY_SYSTEM_DIAGNOSTIC = 4\n\ndef diagnostic_session_control(address, session_type):\n _uds_request(address, SERVICE_TYPE.DIAGNOSTIC_SESSION_CONTROL, subfunction=session_type)\n\nclass RESET_TYPE(IntEnum):\n HARD = 1\n KEY_OFF_ON = 2\n SOFT = 3\n ENABLE_RAPID_POWER_SHUTDOWN = 4\n DISABLE_RAPID_POWER_SHUTDOWN = 5\n\ndef ecu_reset(address, reset_type):\n resp = _uds_request(address, SERVICE_TYPE.ECU_RESET, subfunction=reset_type)\n power_down_time = None\n if reset_type == RESET_TYPE.ENABLE_RAPID_POWER_SHUTDOWN:\n power_down_time = resp[0]\n return power_down_time\n\nclass ACCESS_TYPE(IntEnum):\n REQUEST_SEED = 1\n SEND_KEY = 2\n\ndef security_access2(address, access_type, security_key=None):\n request_seed = access_type % 2 != 0\n if request_seed and security_key is not None:\n raise ValueError('security_key not allowed')\n if not request_seed and security_key is None:\n raise ValueError('security_key is missing')\n resp = _uds_request(address, SERVICE_TYPE.DIAGNOSTIC_SESSION_CONTROL, subfunction=access_type, data=security_key)\n if request_seed:\n security_seed = resp\n return security_seed\n\ndef security_access(address, access_type, security_key=None):\n request_seed = access_type % 2 != 0\n if request_seed and security_key is not None:\n raise ValueError('security_key not allowed')\n if not request_seed and security_key is None:\n raise ValueError('security_key is missing')\n resp = _uds_request(address, SERVICE_TYPE.SECURITY_ACCESS, subfunction=access_type, data=security_key)\n if request_seed:\n security_seed = resp\n return security_seed\n\nclass CONTROL_TYPE(IntEnum):\n ENABLE_RX_ENABLE_TX = 0\n ENABLE_RX_DISABLE_TX = 1\n DISABLE_RX_ENABLE_TX = 2\n DISABLE_RX_DISABLE_TX = 3\n\nclass MESSAGE_TYPE(IntEnum):\n NORMAL = 1\n NETWORK_MANAGEMENT = 2\n NORMAL_AND_NETWORK_MANAGEMENT = 3\n\ndef communication_control(address, control_type, message_type):\n data = chr(message_type)\n _uds_request(address, SERVICE_TYPE.COMMUNICATION_CONTROL, subfunction=control_type, data=data)\n\ndef tester_present(address):\n _uds_request(address, SERVICE_TYPE.TESTER_PRESENT, subfunction=0x00)\n\nclass TIMING_PARAMETER_TYPE(IntEnum):\n READ_EXTENDED_SET = 1\n SET_TO_DEFAULT_VALUES = 2\n READ_CURRENTLY_ACTIVE = 3\n SET_TO_GIVEN_VALUES = 4\n\ndef access_timing_parameter(address, timing_parameter_type, parameter_values):\n write_custom_values = timing_parameter_type == ACCESS_TIMING_PARAMETER_TYPE.SET_TO_GIVEN_VALUES\n read_values = (\n timing_parameter_type == ACCESS_TIMING_PARAMETER_TYPE.READ_CURRENTLY_ACTIVE or\n timing_parameter_type == ACCESS_TIMING_PARAMETER_TYPE.READ_EXTENDED_SET\n )\n if not write_custom_values and parameter_values is not None:\n raise ValueError('parameter_values not allowed')\n if write_custom_values and parameter_values is None:\n raise ValueError('parameter_values is missing')\n resp = _uds_request(address, SERVICE_TYPE.ACCESS_TIMING_PARAMETER, subfunction=timing_parameter_type, data=parameter_values)\n if read_values:\n # TODO: parse response into values?\n parameter_values = resp\n return parameter_values\n\ndef secured_data_transmission(address, data):\n # TODO: split data into multiple input parameters?\n resp = _uds_request(address, SERVICE_TYPE.SECURED_DATA_TRANSMISSION, subfunction=None, data=data)\n # TODO: parse response into multiple output values?\n return resp\n\nclass DTC_SETTING_TYPE(IntEnum):\n ON = 1\n OFF = 2\n\ndef control_dtc_setting(address, dtc_setting_type):\n _uds_request(address, SERVICE_TYPE.CONTROL_DTC_SETTING, subfunction=dtc_setting_type)\n\nclass RESPONSE_EVENT_TYPE(IntEnum):\n STOP_RESPONSE_ON_EVENT = 0\n ON_DTC_STATUS_CHANGE = 1\n ON_TIMER_INTERRUPT = 2\n ON_CHANGE_OF_DATA_IDENTIFIER = 3\n REPORT_ACTIVATED_EVENTS = 4\n START_RESPONSE_ON_EVENT = 5\n CLEAR_RESPONSE_ON_EVENT = 6\n ON_COMPARISON_OF_VALUES = 7\n\ndef response_on_event(address, response_event_type, store_event, window_time, event_type_record, service_response_record):\n if store_event:\n response_event_type |= 0x20\n # TODO: split record parameters into arrays\n data = char(window_time) + event_type_record + service_response_record\n resp = _uds_request(address, SERVICE_TYPE.RESPONSE_ON_EVENT, subfunction=response_event_type, data=data)\n\n if response_event_type == REPORT_ACTIVATED_EVENTS:\n return {\n \"num_of_activated_events\": resp[0],\n \"data\": resp[1:], # TODO: parse the reset of response\n }\n\n return {\n \"num_of_identified_events\": resp[0],\n \"event_window_time\": resp[1],\n \"data\": resp[2:], # TODO: parse the reset of response\n }\n\nclass LINK_CONTROL_TYPE(IntEnum):\n VERIFY_BAUDRATE_TRANSITION_WITH_FIXED_BAUDRATE = 1\n VERIFY_BAUDRATE_TRANSITION_WITH_SPECIFIC_BAUDRATE = 2\n TRANSITION_BAUDRATE = 3\n\nclass BAUD_RATE_TYPE(IntEnum):\n PC9600 = 1\n PC19200 = 2\n PC38400 = 3\n PC57600 = 4\n PC115200 = 5\n CAN125000 = 16\n CAN250000 = 17\n CAN500000 = 18\n CAN1000000 = 19\n\ndef link_control(address, link_control_type, baud_rate_type=None):\n if LINK_CONTROL_TYPE.VERIFY_BAUDRATE_TRANSITION_WITH_FIXED_BAUDRATE:\n # baud_rate_type = BAUD_RATE_TYPE\n data = chr(baud_rate_type)\n elif LINK_CONTROL_TYPE.VERIFY_BAUDRATE_TRANSITION_WITH_SPECIFIC_BAUDRATE:\n # baud_rate_type = custom value (3 bytes big-endian)\n data = struct.pack('!I', baud_rate_type)[1:]\n else:\n data = None\n _uds_request(address, SERVICE_TYPE.LINK_CONTROL, subfunction=link_control_type, data=data)\n\nclass DATA_IDENTIFIER_TYPE(IntEnum):\n BOOT_SOFTWARE_IDENTIFICATION = 0XF180\n APPLICATION_SOFTWARE_IDENTIFICATION = 0XF181\n APPLICATION_DATA_IDENTIFICATION = 0XF182\n BOOT_SOFTWARE_FINGERPRINT = 0XF183\n APPLICATION_SOFTWARE_FINGERPRINT = 0XF184\n APPLICATION_DATA_FINGERPRINT = 0XF185\n ACTIVE_DIAGNOSTIC_SESSION = 0XF186\n VEHICLE_MANUFACTURER_SPARE_PART_NUMBER = 0XF187\n VEHICLE_MANUFACTURER_ECU_SOFTWARE_NUMBER = 0XF188\n VEHICLE_MANUFACTURER_ECU_SOFTWARE_VERSION_NUMBER = 0XF189\n SYSTEM_SUPPLIER_IDENTIFIER = 0XF18A\n ECU_MANUFACTURING_DATE = 0XF18B\n ECU_SERIAL_NUMBER = 0XF18C\n SUPPORTED_FUNCTIONAL_UNITS = 0XF18D\n VEHICLE_MANUFACTURER_KIT_ASSEMBLY_PART_NUMBER = 0XF18E\n VIN = 0XF190\n VEHICLE_MANUFACTURER_ECU_HARDWARE_NUMBER = 0XF191\n SYSTEM_SUPPLIER_ECU_HARDWARE_NUMBER = 0XF192\n SYSTEM_SUPPLIER_ECU_HARDWARE_VERSION_NUMBER = 0XF193\n SYSTEM_SUPPLIER_ECU_SOFTWARE_NUMBER = 0XF194\n SYSTEM_SUPPLIER_ECU_SOFTWARE_VERSION_NUMBER = 0XF195\n EXHAUST_REGULATION_OR_TYPE_APPROVAL_NUMBER = 0XF196\n SYSTEM_NAME_OR_ENGINE_TYPE = 0XF197\n REPAIR_SHOP_CODE_OR_TESTER_SERIAL_NUMBER = 0XF198\n PROGRAMMING_DATE = 0XF199\n CALIBRATION_REPAIR_SHOP_CODE_OR_CALIBRATION_EQUIPMENT_SERIAL_NUMBER = 0XF19A\n CALIBRATION_DATE = 0XF19B\n CALIBRATION_EQUIPMENT_SOFTWARE_NUMBER = 0XF19C\n ECU_INSTALLATION_DATE = 0XF19D\n ODX_FILE = 0XF19E\n ENTITY = 0XF19F\n\ndef read_data_by_identifier(address, data_identifier_type):\n # TODO: support list of identifiers\n data = struct.pack('!H', data_identifier_type)\n resp = _uds_request(address, SERVICE_TYPE.READ_DATA_BY_IDENTIFIER, subfunction=None, data=data)\n resp_id = struct.unpack('!H', resp[0:2])[0] if len(resp) >= 2 else None\n if resp_id != data_identifier_type:\n raise ValueError('invalid response data identifier: {}'.format(hex(resp_id)))\n return resp[2:]\n\ndef read_memory_by_address(address, memory_address, memory_size, memory_address_bytes=4, memory_size_bytes=1):\n if memory_address_bytes < 1 or memory_address_bytes > 4:\n raise ValueError('invalid memory_address_bytes: {}'.format(memory_address_bytes))\n if memory_size_bytes < 1 or memory_size_bytes > 4:\n raise ValueError('invalid memory_size_bytes: {}'.format(memory_size_bytes))\n data = chr(memory_size_bytes<<4 | memory_address_bytes)\n\n if memory_address >= 1<<(memory_address_bytes*8):\n raise ValueError('invalid memory_address: {}'.format(memory_address))\n data += struct.pack('!I', memory_address)[4-memory_address_bytes:]\n if memory_size >= 1<<(memory_size_bytes*8):\n raise ValueError('invalid memory_size: {}'.format(memory_size))\n data += struct.pack('!I', memory_size)[4-memory_size_bytes:]\n\n resp = _uds_request(address, SERVICE_TYPE.READ_MEMORY_BY_ADDRESS, subfunction=None, data=data)\n return resp\n\ndef read_scaling_data_by_identifier(address, data_identifier_type):\n data = struct.pack('!H', data_identifier_type)\n resp = _uds_request(address, SERVICE_TYPE.READ_SCALING_DATA_BY_IDENTIFIER, subfunction=None, data=data)\n resp_id = struct.unpack('!H', resp[0:2])[0] if len(resp) >= 2 else None\n if resp_id != data_identifier_type:\n raise ValueError('invalid response data identifier: {}'.format(hex(resp_id)))\n return resp[2:] # TODO: parse the response\n\nclass TRANSMISSION_MODE_TYPE(IntEnum):\n SEND_AT_SLOW_RATE = 1\n SEND_AT_MEDIUM_RATE = 2\n SEND_AT_FAST_RATE = 3\n STOP_SENDING = 4\n\ndef read_data_by_periodic_identifier(address, transmission_mode_type, periodic_data_identifier):\n # TODO: support list of identifiers\n data = chr(transmission_mode_type) + chr(periodic_data_identifier)\n _uds_request(address, SERVICE_TYPE.READ_DATA_BY_PERIODIC_IDENTIFIER, subfunction=None, data=data)\n\nclass DYNAMIC_DEFINITION_TYPE(IntEnum):\n DEFINE_BY_IDENTIFIER = 1\n DEFINE_BY_MEMORY_ADDRESS = 2\n CLEAR_DYNAMICALLY_DEFINED_DATA_IDENTIFIER = 3\n\ndef dynamically_define_data_identifier(address, dynamic_definition_type, dynamic_data_identifier, source_definitions, memory_address_bytes=4, memory_size_bytes=1):\n if memory_address_bytes < 1 or memory_address_bytes > 4:\n raise ValueError('invalid memory_address_bytes: {}'.format(memory_address_bytes))\n if memory_size_bytes < 1 or memory_size_bytes > 4:\n raise ValueError('invalid memory_size_bytes: {}'.format(memory_size_bytes))\n data = chr(memory_size_bytes<<4 | memory_address_bytes)\n\n data = struct.pack('!H', dynamic_data_identifier)\n if dynamic_definition_type == DYNAMIC_DEFINITION_TYPE.DEFINE_BY_IDENTIFIER:\n for s in source_definitions:\n data += struct.pack('!H', s[\"data_identifier\"]) + chr(s[\"position\"]) + chr(s[\"memory_size\"])\n elif dynamic_definition_type == DYNAMIC_DEFINITION_TYPE.DEFINE_BY_MEMORY_ADDRESS:\n data += chr(memory_size_bytes<<4 | memory_address_bytes)\n for s in source_definitions:\n if s[\"memory_address\"] >= 1<<(memory_address_bytes*8):\n raise ValueError('invalid memory_address: {}'.format(s[\"memory_address\"]))\n data += struct.pack('!I', memory_address)[4-memory_address_bytes:]\n if s[\"memory_size\"] >= 1<<(memory_size_bytes*8):\n raise ValueError('invalid memory_size: {}'.format(s[\"memory_size\"]))\n data += struct.pack('!I', s[\"memory_size\"])[4-memory_size_bytes:]\n elif dynamic_definition_type == DYNAMIC_DEFINITION_TYPE.CLEAR_DYNAMICALLY_DEFINED_DATA_IDENTIFIER:\n pass\n else:\n raise ValueError('invalid dynamic identifier type: {}'.format(hex(dynamic_definition_type)))\n _uds_request(address, SERVICE_TYPE.DYNAMICALLY_DEFINE_DATA_IDENTIFIER, subfunction=dynamic_definition_type, data=data)\n\ndef write_data_by_identifier(address, data_identifier_type, data_record):\n data = struct.pack('!H', data_identifier_type) + data_record\n resp = _uds_request(address, SERVICE_TYPE.WRITE_DATA_BY_IDENTIFIER, subfunction=None, data=data)\n resp_id = struct.unpack('!H', resp[0:2])[0] if len(resp) >= 2 else None\n if resp_id != data_identifier_type:\n raise ValueError('invalid response data identifier: {}'.format(hex(resp_id)))\n\ndef write_memory_by_address(address, memory_address, memory_size, data_record, memory_address_bytes=4, memory_size_bytes=1):\n if memory_address_bytes < 1 or memory_address_bytes > 4:\n raise ValueError('invalid memory_address_bytes: {}'.format(memory_address_bytes))\n if memory_size_bytes < 1 or memory_size_bytes > 4:\n raise ValueError('invalid memory_size_bytes: {}'.format(memory_size_bytes))\n data = chr(memory_size_bytes<<4 | memory_address_bytes)\n\n if memory_address >= 1<<(memory_address_bytes*8):\n raise ValueError('invalid memory_address: {}'.format(memory_address))\n data += struct.pack('!I', memory_address)[4-memory_address_bytes:]\n if memory_size >= 1<<(memory_size_bytes*8):\n raise ValueError('invalid memory_size: {}'.format(memory_size))\n data += struct.pack('!I', memory_size)[4-memory_size_bytes:]\n\n data += data_record\n _uds_request(address, SERVICE_TYPE.WRITE_MEMORY_BY_ADDRESS, subfunction=0x00, data=data)\n\nclass DTC_GROUP_TYPE(IntEnum):\n EMISSIONS = 0x000000\n ALL = 0xFFFFFF\n\ndef clear_diagnostic_information(address, dtc_group_type):\n data = struct.pack('!I', dtc_group_type)[1:] # 3 bytes\n _uds_request(address, SERVICE_TYPE.CLEAR_DIAGNOSTIC_INFORMATION, subfunction=None, data=data)\n\nclass DTC_REPORT_TYPE(IntEnum):\n NUMBER_OF_DTC_BY_STATUS_MASK = 0x01\n DTC_BY_STATUS_MASK = 0x02\n DTC_SNAPSHOT_IDENTIFICATION = 0x03\n DTC_SNAPSHOT_RECORD_BY_DTC_NUMBER = 0x04\n DTC_SNAPSHOT_RECORD_BY_RECORD_NUMBER = 0x05\n DTC_EXTENDED_DATA_RECORD_BY_DTC_NUMBER = 0x06\n NUMBER_OF_DTC_BY_SEVERITY_MASK_RECORD = 0x07\n DTC_BY_SEVERITY_MASK_RECORD = 0x08\n SEVERITY_INFORMATION_OF_DTC = 0x09\n SUPPORTED_DTC = 0x0A\n FIRST_TEST_FAILED_DTC = 0x0B\n FIRST_CONFIRMED_DTC = 0x0C\n MOST_RECENT_TEST_FAILED_DTC = 0x0D\n MOST_RECENT_CONFIRMED_DTC = 0x0E\n MIRROR_MEMORY_DTC_BY_STATUS_MASK = 0x0F\n MIRROR_MEMORY_DTC_EXTENDED_DATA_RECORD_BY_DTC_NUMBER = 0x10\n NUMBER_OF_MIRROR_MEMORY_DTC_BY_STATUS_MASK = 0x11\n NUMBER_OF_EMISSIONS_RELATED_OBD_DTC_BY_STATUS_MASK = 0x12\n EMISSIONS_RELATED_OBD_DTC_BY_STATUS_MASK = 0x13\n DTC_FAULT_DETECTION_COUNTER = 0x14\n DTC_WITH_PERMANENT_STATUS = 0x15\n\nclass DTC_STATUS_MASK_TYPE(IntEnum):\n TEST_FAILED = 0x01\n TEST_FAILED_THIS_OPERATION_CYCLE = 0x02\n PENDING_DTC = 0x04\n CONFIRMED_DTC = 0x08\n TEST_NOT_COMPLETED_SINCE_LAST_CLEAR = 0x10\n TEST_FAILED_SINCE_LAST_CLEAR = 0x20\n TEST_NOT_COMPLETED_THIS_OPERATION_CYCLE = 0x40\n WARNING_INDICATOR_uds_requestED = 0x80\n ALL = 0xFF\n\nclass DTC_SEVERITY_MASK_TYPE(IntEnum):\n MAINTENANCE_ONLY = 0x20\n CHECK_AT_NEXT_HALT = 0x40\n CHECK_IMMEDIATELY = 0x80\n ALL = 0xE0\n\ndef read_dtc_information(address, dtc_report_type, dtc_status_mask_type=DTC_STATUS_MASK_TYPE.ALL, dtc_severity_mask_type=DTC_SEVERITY_MASK_TYPE.ALL, dtc_mask_record=0xFFFFFF, dtc_snapshot_record_num=0xFF, dtc_extended_record_num=0xFF):\n data = ''\n # dtc_status_mask_type\n if dtc_report_type == DTC_REPORT_TYPE.NUMBER_OF_DTC_BY_STATUS_MASK or \\\n dtc_report_type == DTC_REPORT_TYPE.DTC_BY_STATUS_MASK or \\\n dtc_report_type == DTC_REPORT_TYPE.MIRROR_MEMORY_DTC_BY_STATUS_MASK or \\\n dtc_report_type == DTC_REPORT_TYPE.NUMBER_OF_MIRROR_MEMORY_DTC_BY_STATUS_MASK or \\\n dtc_report_type == DTC_REPORT_TYPE.NUMBER_OF_EMISSIONS_RELATED_OBD_DTC_BY_STATUS_MASK or \\\n dtc_report_type == DTC_REPORT_TYPE.EMISSIONS_RELATED_OBD_DTC_BY_STATUS_MASK:\n data += chr(dtc_status_mask_type)\n # dtc_mask_record\n if dtc_report_type == DTC_REPORT_TYPE.DTC_SNAPSHOT_IDENTIFICATION or \\\n dtc_report_type == DTC_REPORT_TYPE.DTC_SNAPSHOT_RECORD_BY_DTC_NUMBER or \\\n dtc_report_type == DTC_REPORT_TYPE.DTC_EXTENDED_DATA_RECORD_BY_DTC_NUMBER or \\\n dtc_report_type == DTC_REPORT_TYPE.MIRROR_MEMORY_DTC_EXTENDED_DATA_RECORD_BY_DTC_NUMBER or \\\n dtc_report_type == DTC_REPORT_TYPE.SEVERITY_INFORMATION_OF_DTC:\n data += struct.pack('!I', dtc_mask_record)[1:] # 3 bytes\n # dtc_snapshot_record_num\n if dtc_report_type == DTC_REPORT_TYPE.DTC_SNAPSHOT_IDENTIFICATION or \\\n dtc_report_type == DTC_REPORT_TYPE.DTC_SNAPSHOT_RECORD_BY_DTC_NUMBER or \\\n dtc_report_type == DTC_REPORT_TYPE.DTC_SNAPSHOT_RECORD_BY_RECORD_NUMBER:\n data += ord(dtc_snapshot_record_num)\n # dtc_extended_record_num\n if dtc_report_type == DTC_REPORT_TYPE.DTC_EXTENDED_DATA_RECORD_BY_DTC_NUMBER or \\\n dtc_report_type == DTC_REPORT_TYPE.MIRROR_MEMORY_DTC_EXTENDED_DATA_RECORD_BY_DTC_NUMBER:\n data += chr(dtc_extended_record_num)\n # dtc_severity_mask_type\n if dtc_report_type == DTC_REPORT_TYPE.NUMBER_OF_DTC_BY_SEVERITY_MASK_RECORD or \\\n dtc_report_type == DTC_REPORT_TYPE.DTC_BY_SEVERITY_MASK_RECORD:\n data += chr(dtc_severity_mask_type) + chr(dtc_status_mask_type)\n \n resp = _uds_request(address, SERVICE_TYPE.READ_DTC_INFORMATION, subfunction=dtc_report_type, data=data)\n\n # TODO: parse response\n return resp\n\nclass CONTROL_OPTION_TYPE(IntEnum):\n RETURN_CONTROL_TO_ECU = 0\n RESET_TO_DEFAULT = 1\n FREEZE_CURRENT_STATE = 2\n SHORT_TERM_ADJUSTMENT = 3\n\ndef input_output_control_by_identifier(address, data_identifier_type, control_option_record, control_enable_mask_record=''):\n data = struct.pack('!H', data_identifier_type) + control_option_record + control_enable_mask_record\n resp = _uds_request(address, SERVICE_TYPE.INPUT_OUTPUT_CONTROL_BY_IDENTIFIER, subfunction=None, data=data)\n resp_id = struct.unpack('!H', resp[0:2])[0] if len(resp) >= 2 else None\n if resp_id != data_identifier_type:\n raise ValueError('invalid response data identifier: {}'.format(hex(resp_id)))\n return resp[2:]\n\nclass ROUTINE_CONTROL_TYPE(IntEnum):\n START = 1\n STOP = 2\n REQUEST_RESULTS = 3\n\nclass ROUTINE_IDENTIFIER_TYPE(IntEnum):\n ERASE_MEMORY = 0xFF00\n CHECK_PROGRAMMING_DEPENDENCIES = 0xFF01\n ERASE_MIRROR_MEMORY_DTCS = 0xFF02\n\ndef routine_control(address, routine_control_type, routine_identifier_type, routine_option_record=''):\n data = struct.pack('!H', routine_identifier_type) + routine_option_record\n resp = _uds_request(address, SERVICE_TYPE.ROUTINE_CONTROL, subfunction=routine_control_type, data=data)\n resp_id = struct.unpack('!H', resp[0:2])[0] if len(resp) >= 2 else None\n if resp_id != routine_identifier_type:\n raise ValueError('invalid response routine identifier: {}'.format(hex(resp_id)))\n return resp[2:]\n\ndef request_download(address, memory_address, memory_size, memory_address_bytes=4, memory_size_bytes=4, data_format=0x00):\n data = chr(data_format)\n\n if memory_address_bytes < 1 or memory_address_bytes > 4:\n raise ValueError('invalid memory_address_bytes: {}'.format(memory_address_bytes))\n if memory_size_bytes < 1 or memory_size_bytes > 4:\n raise ValueError('invalid memory_size_bytes: {}'.format(memory_size_bytes))\n data += chr(memory_size_bytes<<4 | memory_address_bytes)\n\n if memory_address >= 1<<(memory_address_bytes*8):\n raise ValueError('invalid memory_address: {}'.format(memory_address))\n data += struct.pack('!I', memory_address)[4-memory_address_bytes:]\n if memory_size >= 1<<(memory_size_bytes*8):\n raise ValueError('invalid memory_size: {}'.format(memory_size))\n data += struct.pack('!I', memory_size)[4-memory_size_bytes:]\n\n resp = _uds_request(address, SERVICE_TYPE.REQUEST_DOWNLOAD, subfunction=None, data=data)\n max_num_bytes_len = resp[0] >> 4 if len(resp) > 0 else None\n if max_num_bytes_len >= 1 and max_num_bytes_len <= 4:\n max_num_bytes = struct.unpack('!I', ('\\x00'*(4-max_num_bytes_len))+resp[1:max_num_bytes_len+1])[0]\n else:\n raise ValueError('invalid max_num_bytes_len: {}'.format(max_num_bytes_len))\n\n return max_num_bytes # max number of bytes per transfer data request\n\ndef request_upload(address, memory_address, memory_size, memory_address_bytes=4, memory_size_bytes=4, data_format=0x00):\n data = chr(data_format)\n\n if memory_address_bytes < 1 or memory_address_bytes > 4:\n raise ValueError('invalid memory_address_bytes: {}'.format(memory_address_bytes))\n if memory_size_bytes < 1 or memory_size_bytes > 4:\n raise ValueError('invalid memory_size_bytes: {}'.format(memory_size_bytes))\n data += chr(memory_size_bytes<<4 | memory_address_bytes)\n\n if memory_address >= 1<<(memory_address_bytes*8):\n raise ValueError('invalid memory_address: {}'.format(memory_address))\n data += struct.pack('!I', memory_address)[4-memory_address_bytes:]\n if memory_size >= 1<<(memory_size_bytes*8):\n raise ValueError('invalid memory_size: {}'.format(memory_size))\n data += struct.pack('!I', memory_size)[4-memory_size_bytes:]\n\n resp = _uds_request(address, SERVICE_TYPE.REQUEST_UPLOAD, subfunction=None, data=data)\n max_num_bytes_len = resp[0] >> 4 if len(resp) > 0 else None\n if max_num_bytes_len >= 1 and max_num_bytes_len <= 4:\n max_num_bytes = struct.unpack('!I', ('\\x00'*(4-max_num_bytes_len))+resp[1:max_num_bytes_len+1])[0]\n else:\n raise ValueError('invalid max_num_bytes_len: {}'.format(max_num_bytes_len))\n\n return max_num_bytes # max number of bytes per transfer data request\n\ndef transfer_data(address, block_sequence_count, data=''):\n data = chr(block_sequence_count)+data\n resp = _uds_request(address, SERVICE_TYPE.TRANSFER_DATA, subfunction=None, data=data)\n resp_id = resp[0] if len(resp) > 0 else None\n if resp_id != block_sequence_count:\n raise ValueError('invalid block_sequence_count: {}'.format(resp_id))\n return resp[1:]\n\ndef request_transfer_exit(address):\n _uds_request(address, SERVICE_TYPE.REQUEST_TRANSFER_EXIT, subfunction=None)\n\ndef tesla_key_from_seed(seed):\n tesla_key = 53\n return ''.join(chr(a ^ tesla_key) for a in seed)\n\ndef tesla_radar_security_access_algorithm(seed):\n # k4 = 4 bits\n k4 = seed >> 5 & 8 | seed >> 0xB & 4 | seed >> 0x18 & 1 | seed >> 1 & 2\n if DEBUG: print(\"k4=\",hex(k4))\n if DEBUG: print(\"seed&0x20000=\",hex(seed&0x20000))\n\n # k32 = 32 bits\n if seed & 0x20000 == 0:\n k32 = (seed & ~(0xff << k4 & 0xFFFFFFFF)) << 0x20 - k4 & 0xFFFFFFFF | seed >> k4 & 0xFFFFFFFF\n else:\n k32 = (~(0xff << k4 & 0xFFFFFFFF) << 0x20 - k4 & seed & 0xFFFFFFFF) >> 0x20 - k4 & 0xFFFFFFFF | seed << k4 & 0xFFFFFFFF\n if DEBUG: print(\"k32=\",hex(k32))\n\n # k2 = 2 bits\n k2 = seed >> 4 & 2 | seed >> 0x1F\n if DEBUG: print(\"k2=\",hex(k2))\n if k2 == 0:\n return k32 | seed\n if k2 == 1:\n return k32 & seed\n if k2 == 2:\n return k32 ^ seed\n return k32\n\nif __name__ == \"__main__\":\n from panda import Panda\n panda = Panda()\n bus = 1 \n tx_addr = 0x641 # tesla bosch radar RCM addr\n tx_queue = Queue()\n rx_queue = Queue()\n can_reader_t = threading.Thread(target=_isotp_thread, args=(panda, bus, tx_addr, tx_queue, rx_queue))\n can_reader_t.daemon = True\n can_reader_t.start()\n\n print(\"tester present ...\")\n tester_present(tx_addr)\n print(\"extended diagnostic session ...\")\n diagnostic_session_control(tx_addr, SESSION_TYPE.EXTENDED_DIAGNOSTIC)\n print(\"reading VIN from radar...\")\n vin = read_data_by_identifier(tx_addr, DATA_IDENTIFIER_TYPE.VIN)\n print(\"VIN: {} [{}]\".format(vin, hexlify(vin)))\n print(\"security access: request seed ...\")\n TESLA_ACCESS_LEVEL = 0x11\n while True:\n #for i in range(0,0x7F):\n # TESLA_ACCESS_LEVEL = i*2 + 1\n try:\n seedh = security_access(tx_addr, TESLA_ACCESS_LEVEL)\n seed = struct.unpack('>L',seedh)[0]\n except NegativeResponseError as e:\n if e.error_code == 0x37:\n print(\"sleep ... (required time delay not expired)\")\n time.sleep(1)\n continue\n raise\n break\n print(\"seed: {}\".format(hexlify(seedh)))\n print(\"security access: send key ...\")\n key = tesla_radar_security_access_algorithm(seed)\n key = struct.pack('!L',key)\n print(\"key: {}\".format(hexlify(key)))\n security_access(tx_addr, TESLA_ACCESS_LEVEL + 1 , key)\n\n #print(\"programming session ...\")\n #diagnostic_session_control(tx_addr, SESSION_TYPE.PROGRAMMING)\n\n print(\"write data by id: set VIN ...\")\n write_data_by_identifier(tx_addr, DATA_IDENTIFIER_TYPE.VIN, '5YJSA1S13EFP52303')\n\n print(\"write data by id: set secondary VIN ...\")\n write_data_by_identifier(tx_addr, 0xF199, '5YJSA1S13EFP52303')\n\n print(\"reading new VIN from radar...\")\n vin = read_data_by_identifier(tx_addr, DATA_IDENTIFIER_TYPE.VIN)\n print(\"new VIN: {} [{}]\".format(vin, hexlify(vin)))\n\n print(\"reading new secondary VIN from radar...\")\n vin = read_data_by_identifier(tx_addr, 0xF199)\n print(\"new secondary VIN: {} [{}]\".format(vin, hexlify(vin)))\n\n print(\"resetting ecu... (soft)\")\n ecu_reset(tx_addr,RESET_TYPE.SOFT)\n\n print(\"Done! VIN programming complete...\")\n\n\n", "id": "5221302", "language": "Python", "matching_score": 2.3436975479125977, "max_stars_count": 9, "path": "selfdrive/car/tesla/radar_tools/programRadarVin.py" }, { "content": "DEBUG = 1\r\n\r\ndef security_access_algorithm(seed):\r\n # k4 = 4 bits\r\n k4 = seed >> 5 & 8 | seed >> 0xB & 4 | seed >> 0x18 & 1 | seed >> 1 & 2\r\n if DEBUG: print(\"k4=\",hex(k4))\r\n if DEBUG: print(\"seed&0x20000=\",hex(seed&0x20000))\r\n\r\n # k32 = 32 bits\r\n if seed & 0x20000 == 0:\r\n k32 = (seed & ~(0xff << k4 & 0xFFFFFFFF)) << 0x20 - k4 & 0xFFFFFFFF | seed >> k4 & 0xFFFFFFFF\r\n else:\r\n k32 = (~(0xff << k4 & 0xFFFFFFFF) << 0x20 - k4 & seed & 0xFFFFFFFF) >> 0x20 - k4 & 0xFFFFFFFF | seed << k4 & 0xFFFFFFFF\r\n if DEBUG: print(\"k32=\",hex(k32))\r\n\r\n # k2 = 2 bits\r\n k2 = seed >> 4 & 2 | seed >> 0x1F\r\n if DEBUG: print(\"k2=\",hex(k2))\r\n if k2 == 0:\r\n return k32 | seed\r\n if k2 == 1:\r\n return k32 & seed\r\n if k2 == 2:\r\n return k32 ^ seed\r\n return k32\r\n\r\nif __name__== \"__main__\":\r\n seed = 0x01234567\r\n key = security_access_algorithm(seed)\r\n print(\"seed=\" , hex(seed))\r\n print(\" key=\" , hex(key))\r\n", "id": "6332310", "language": "Python", "matching_score": 0.48740968108177185, "max_stars_count": 9, "path": "selfdrive/car/tesla/radar_tools/sec_access.py" } ]
1.976173
cvsmith
[ { "content": "# Brainfuck interpreter in python\n\nclass Brainfuck:\n\n def __init__(self):\n self.guide = self.make_guide()\n \n def make_guide(self):\n guide = dict()\n\n guide['>'] = self.inc_ptr\n guide['<'] = self.dec_ptr\n guide['+'] = self.inc_byte\n guide['-'] = self.dec_byte\n guide['.'] = self.output\n guide[','] = self.input\n guide['['] = self.jump_forward\n guide[']'] = self.jump_back\n\n return guide\n\n def inc_ptr(self):\n if self.arr_ptr == len(self.arr) - 1:\n self.arr.append(0)\n self.arr_ptr += 1\n\n def dec_ptr(self):\n if self.arr_ptr == 0:\n self.arr.insert(0, 0)\n else:\n self.arr_ptr -= 1\n\n def inc_byte(self):\n self.arr[self.arr_ptr] += 1\n\n def dec_byte(self):\n self.arr[self.arr_ptr] -= 1\n\n def output(self):\n print chr(self.arr[self.arr_ptr]),\n\n def input(self):\n self.arr[self.arr_ptr] = int(raw_input(\">\"))\n\n def jump_forward(self):\n depth = 1\n if self.arr[self.arr_ptr] == 0:\n while depth != 0:\n self.cmd_ptr += 1\n cmd = self.code[self.cmd_ptr]\n if cmd == '[':\n depth += 1\n elif cmd == ']':\n depth -= 1\n\n def jump_back(self):\n depth = 1\n if self.arr[self.arr_ptr] != 0:\n while depth != 0:\n self.cmd_ptr -= 1\n cmd = self.code[self.cmd_ptr]\n if cmd == ']':\n depth += 1\n elif cmd == '[':\n depth -= 1\n\n def run(self, code, trace=False):\n self.arr = [0]\n self.arr_ptr = 0\n self.cmd_ptr = 0\n self.code = code\n while self.cmd_ptr < len(self.code):\n cmd = self.code[self.cmd_ptr]\n if trace: print self.arr, self.arr_ptr, cmd\n self.guide[cmd]()\n self.cmd_ptr += 1\n\n return self.arr\n\nbrainfuck = Brainfuck()\n\n# Takes input, moves it two cells right\nprint brainfuck.run(\",>>[-]<<[->>+<<]\", trace=True)\n\n# Hello World!\nprint brainfuck.run(\"++++++++[>++++[>++>+++>+++>+<<<<-]>+>+>->>+[<]<-]>>.>---.+++++++..+++.>>.<-.<.+++.------.--------.>>+.>++.\",\n trace=False)\n", "id": "6538831", "language": "Python", "matching_score": 0.003355672350153327, "max_stars_count": 0, "path": "brainfuck.py" }, { "content": "# ScalePairs.py\n\n# Chromatic definitions of different scale types\n# Guide:\n# C:1, Db:2, D:3, Eb:4, E:5, F:6, Gb:7, G:8, Ab:9, A:10, Bb:11, B:12 \nscalePairs = []\n# Copied from NodeBeat's \"NodeBeat Classic\"\nscalePairs += [(\"KosBeat\\nClassic\", [1, 3, 6, 8, 10])]\nscalePairs += [(\"Major\", [1, 3, 5, 6, 8, 10, 12])]\nscalePairs += [(\"Major\\nPentatonic\", [1, 3, 5, 8, 10])]\nscalePairs += [(\"Minor\\nPentatonic\", [1, 4, 6, 8, 11])]\nscalePairs += [(\"Natural\\nMinor\", [1, 3, 4, 6, 8, 9, 11])]\nscalePairs += [(\"Harmonic\\nMinor\", [1, 3, 4, 6, 8, 10, 12])]\nscalePairs += [(\"Melodic\\nMinor\", [1,3,4,6, 8, 8, 10, 12 ])]\nscalePairs += [(\"Chromatic\", [1,2,3,4,5,6,7,8,9,10,11,12])]\nscalePairs += [(\"Whole\\nTone\", [1,3,5,7,9,11])]\nscalePairs += [(\"Blues\", [1, 4, 6, 7, 8, 11])]\nscalePairs += [(\"Bebop\", [1, 3, 5, 6, 8, 10, 11, 12])]\nscalePairs += [(\"Algerian\", [1, 3, 4, 7, 8, 9, 12])]\nscalePairs += [(\"Spanish\", [1, 2, 5, 6, 8, 9, 11])]\nscalePairs += [(\"Arabic\", [1, 2, 5, 6, 8, 9, 12])]\nscalePairs += [(\"Hungarian\", [1, 3, 4, 7, 8, 9, 12])]\nscalePairs += [(\"Egyptian\", [1, 3, 4, 7, 8, 9, 11])]\nscalePairs += [(\"Inuit\", [1, 3, 5, 8])]\nscalePairs += [(\"Japanese\", [1, 3, 4, 8, 9])]", "id": "62696", "language": "Python", "matching_score": 2.1792609691619873, "max_stars_count": 0, "path": "ScalePairs.py" }, { "content": "# KosBeat.py\n# Or: TchaiKosbie, SchostaKosbie, ProKosbiev \n# <NAME> + cvsmith + Lecture 1, Recitation A\n\n################################################################################\n# Imports\n################################################################################\n\nfrom Tkinter import *\nfrom eventBasedAnimationClass import EventBasedAnimationClass\nfrom ScalePairs import scalePairs\nfrom KeyPairs import keyPairs\nimport os, pygame.mixer, time, copy\n\n################################################################################\n# Main class\n################################################################################\n\nclass KosBeat(EventBasedAnimationClass):\n# Run KosBeat game\n def initAnimation(self):\n # Set values at the start of each animation\n \n # No nodes on the screen at the start\n self.nodeList = []\n self.connectionList = []\n self.undoList = []\n\n # Setup audio\n # Numbers taken from \"A tutorial for sound in PyGame\":\n # http://www.pygame.org/wiki/tutorials \n # pygame.mixer.init(22050,-16,2,2048)\n pygame.mixer.init(22050,-16,2,2048)\n\n # Default audio settings\n # Keys defined chromatically, starting with C:1, Db:2, D:3,...B:12\n self.key = 1 # C\n self.scale = \"KosBeat\\nClassic\" # or major, minor, minorPentatonic\n self.activeOctave = 4\n self.beatsPerMinute = 120\n (self.minBPM, self.maxBPM) = (40, 200)\n self.beatsPerSecond = self.beatsPerMinute / 60.0 # 60s per min\n self.beatsPerMeasure = 4\n self.subdivisionsPerBeat = 2\n self.channel = 0\n self.activeDegree = 1\n\n self.nodeRadius = 12\n self.menuMargin = 40\n \n self.activeType = \"Node\"\n\n self.drawRings = False\n\n # Menu options for menu at top of screen\n self.makeMenuList()\n\n self.isPlaying = False\n\n (self.showScaleOption, self.showKeyOption) = (False, False)\n self.showStartScreen = True\n\n self.infiniteLoop = False\n (self.minLoops, self.maxLoops) = (1, 10)\n self.numberOfLoops = 2\n self.currentNumberOfLoops = 0\n\n def makeMenuList(self):\n # Return list of menu nodes and players for the top of the screen\n self.menuList = [MenuPlayer(3, \"#e74c3c\"), MenuPlayer(4, \"#2ecc71\"), \n MenuPlayer(5, \"#3498db\"), MenuPlayer(0, \"#95a5a6\"),\n MenuNode(1, \"#e74c3c\"), MenuNode(2, \"#e67e22\"), \n MenuNode(3, \"#f1c40f\"), MenuNode(4, \"#2ecc71\"),\n MenuNode(5, \"#3498db\"), MenuNode(6, \"#9b59b6\"), \n MenuNode(7, \"#34495e\")]\n\n # Initially select the first menuNode\n self.menuList[4].isSelected = True\n self.activeColor = self.menuList[4].color\n\n def isPointInNode(self, x, y):\n # If click is on an existing node, return True\n # Go through list in reverse to select topmost nodes first\n for node in self.nodeList[::-1]:\n # If click is within node's radius\n if self.isPointInRadius(x, y, node.x, node.y, node.r):\n node.isSelected = True\n return True\n\n @staticmethod\n def getNumberFromPair(pairs, name):\n # Return numerical entry corresponding to given name in list of pairs\n for pair in pairs:\n if pair[0] == name:\n return pair[1]\n\n @staticmethod\n def getNameFromPair(pairs, number):\n # Return name corresponding to given number in list of pairs\n for pair in pairs:\n if pair[1] == number:\n return pair[0]\n\n def isPointInMenu(self, x, y):\n # Return True if given point is within the menu\n numberOfOptions = len(self.menuList)\n if ((y < self.menuMargin * 2) and \n (abs(x-self.width/2) < self.menuMargin * (numberOfOptions+1)/2)):\n return True\n return False\n\n def isPointInSettings(self, x, y):\n # Return True if given point is within the settings\n return y > self.height - self.menuMargin * 2\n\n def isPointInRadius(self, x0, y0, x1, y1, r):\n # Return True (x0, y0) is within r of (x1, y1)\n return (abs(x0 - x1) <= r) and (abs(y0 - y1) <= r)\n\n def clearListSelection(self, itemsList):\n # Set all items in itemsList to unselected\n for item in itemsList:\n item.isSelected = False\n\n def makeNodeMenuSelection(self, x, y):\n # Select menuNode if click is within menu\n for option in self.menuList:\n if self.isPointInRadius(x, y, option.x, option.y, option.r):\n self.clearListSelection(self.menuList)\n option.isSelected = True\n self.activeColor = option.color\n if type(option) == MenuNode:\n self.activeType = \"Node\"\n self.activeDegree = option.degree\n else:\n self.activeType = \"Player\"\n self.activeOctave = option.octave\n\n def makeSettingsSelection(self, x, y):\n # Change the setting corresponding to the click\n margin = self.menuMargin\n # Toggle showKeyOption\n if x < margin * 2: self.showKeyOption = not self.showKeyOption\n else: self.showKeyOption = False\n # Toggle showScaleOption\n if margin * 2 < x < margin * 4: \n self.showScaleOption = not self.showScaleOption\n else: self.showScaleOption = False\n # Move tempoSlider\n if margin * 6.5 <= x <= margin * 15.5:\n self.setTempoSlider(x, y)\n # Move loopSlider\n if margin * 18 <= x <= margin * 22:\n self.setLoopSlider(x, y)\n # Toggle infinite loop\n if margin * 23 <= x <= margin * 25:\n self.infiniteLoop = not self.infiniteLoop\n # Toggle drawRings\n if x > self.width - margin * 2: self.drawRings = not self.drawRings\n\n def makeSingleConnection(self, player, node):\n # Make single connection between player and node\n for ringIndex in xrange(len(player.ringList)):\n # If node is within one of the player's rings\n ringRadius = player.ringList[ringIndex]\n if (abs(node.x - player.x) < ringRadius and\n abs(node.y - player.y) < ringRadius):\n # Create new Connection with sound's properties\n newConnection = Connection(player.x, player.y, \n node.x, node.y,\n self.key, self.scale,\n node.degree, player.octave,\n ringIndex, self.channel)\n self.connectionList += [newConnection]\n self.channel += 1\n pygame.mixer.set_num_channels(self.channel + 1)\n break\n\n def makeConnections(self):\n # Make connections between nodes and players, allowing players to play nodes\n # Clear existing connection list\n self.connectionList = []\n self.channel = 0\n listLength = len(self.nodeList)\n # Go through every pair of nodes and players in nodeList\n for originalIndex in xrange(listLength):\n for otherIndex in xrange(originalIndex + 1, listLength):\n # If original is a Player and other is a Node\n if (type(self.nodeList[originalIndex]) == Player and\n type(self.nodeList[otherIndex]) == Node):\n player = self.nodeList[originalIndex]\n node = self.nodeList[otherIndex]\n # If original is a Node and other is a Player\n elif (type(self.nodeList[originalIndex]) == Node and\n type(self.nodeList[otherIndex]) == Player):\n player = self.nodeList[otherIndex]\n node = self.nodeList[originalIndex]\n else:\n # Skip connection for current pair if they are the same type\n continue\n self.makeSingleConnection(player, node)\n\n def isPointOnCanvas(self, x, y):\n # Return True if point is on canvas\n return (0 <= x <= self.width) and (0 <= y <= self.height)\n\n def leftMouseMoved(self, event):\n # Left mouse moved for click and drag\n # Move a selected node if the mouse is within the screen\n (x, y) = (event.x, event.y)\n\n if (self.isPointOnCanvas(x, y) and (not self.isPointInMenu(x, y)) and \n (not self.isPointInSettings(x, y)) and \n (not(self.width-self.menuMargin*2 < x < self.width and\n 2 < y < self.menuMargin*2))):\n for node in self.nodeList:\n if node.isSelected:\n (node.x, node.y) = (x, y)\n self.makeConnections()\n self.redrawAll()\n elif self.isPointInSettings(x, y):\n if self.menuMargin * 6.5 <= x <= self.menuMargin * 15.5:\n self.setTempoSlider(x, y)\n elif self.menuMargin * 18 <= x <= self.menuMargin * 22:\n self.setLoopSlider(x, y)\n\n def selectScaleAtPoint(self, x, y):\n # Set self.scale to scale at point in scale option menu\n if self.menuMargin * 2 < x < self.menuMargin * 4:\n dy = self.height - self.menuMargin * 2 - y\n if dy > 0:\n newScaleIndex = dy / self.menuMargin\n if newScaleIndex < len(scalePairs):\n self.scale = scalePairs[newScaleIndex][0]\n self.makeConnections()\n self.redrawAll()\n else:\n self.showScaleOption = False\n else:\n self.showScaleOption = False\n\n def selectKeyAtPoint(self, x, y):\n # Set self.key to key at point in key option menu\n if x < self.menuMargin * 2:\n dy = self.height - self.menuMargin * 2 - y\n if dy > 0:\n newKey = (dy / self.menuMargin) + 1\n if newKey <= len(keyPairs):\n self.key = newKey\n self.makeConnections()\n else:\n self.showKeyOption = False\n else:\n self.showKeyOption = False\n\n def reinitializeNodes(self):\n # Remake nodeList after rhythm change\n newNodeList = []\n for node in self.nodeList:\n (x, y, r) = (node.x, node.y, node.r)\n if type(node) == Player:\n new = Player(x, y, r, node.octave, node.color,\n self.beatsPerMeasure * self.subdivisionsPerBeat)\n else:\n # Otherwise, add new node\n new = Node(x, y, r, self.activeDegree, self.activeColor)\n self.nodeList += [new]\n\n def setLoopSlider(self, x, y):\n # Set self.numberOfLoops and move loopSlider accordingly\n margin = self.menuMargin\n if (self.height - margin - self.nodeRadius < y < \n self.height - margin + self.nodeRadius):\n self.numberOfLoops = ((x - margin * 18) * \n (self.maxLoops - self.minLoops) /\n (margin * 22 - margin * 18) + self.minLoops)\n self.redrawAll()\n\n def setTempoSlider(self, x, y):\n # Set self.beatsPerMinute and move tempo slider accordingly\n margin = self.menuMargin\n if (self.height - margin - self.nodeRadius < y < \n self.height - margin + self.nodeRadius):\n self.beatsPerMinute = ((x - margin * 6.5) * \n (self.maxBPM - self.minBPM) /\n (margin * 15.5 - margin * 6.5) + self.minBPM)\n self.beatsPerSecond = self.beatsPerMinute / 60.0\n self.makeConnections()\n self.redrawAll()\n\n def onMousePressed(self, event):\n # Add new node at location mouse pressed or play existing node\n \n if self.isPlaying:\n return None\n if self.showStartScreen:\n return None\n self.clearListSelection(self.nodeList)\n (x, y, r) = (event.x, event.y, self.nodeRadius)\n if self.isPointInMenu(x, y):\n self.showKeyOption = False\n self.showScaleOption = False\n self.makeNodeMenuSelection(x, y)\n # Clear button\n elif (self.width-self.menuMargin*2 < x < self.width and\n 2 < y < self.menuMargin*2): \n self.nodeList = []\n self.makeConnections()\n elif self.isPointInSettings(x, y):\n self.makeSettingsSelection(x, y)\n elif self.showKeyOption:\n self.selectKeyAtPoint(x, y)\n elif self.showScaleOption:\n self.selectScaleAtPoint(x, y)\n \n # Select node if click is on node, otherwise make new one\n else:\n self.showKeyOption = False\n self.showScaleOption = False\n if not self.isPointInNode(x, y):\n\n # Otherwise, add new Player or Node\n if self.activeType == \"Player\":\n new = Player(x, y, r, self.activeOctave, self.activeColor,\n self.beatsPerMeasure * self.subdivisionsPerBeat)\n else:\n # Otherwise, add new node\n new = Node(x, y, r, self.activeDegree, self.activeColor)\n self.nodeList += [new]\n\n # Make connections between nodes and players\n self.makeConnections()\n\n def deleteSelectedNode(self):\n # Delete the selected node in nodeList, if one is selected\n for i in xrange(len(self.nodeList)):\n if self.nodeList[i].isSelected:\n self.nodeList.pop(i)\n self.clearListSelection(self.nodeList)\n self.makeConnections()\n break\n\n def onKeyPressed(self, event):\n # Handle key presses\n # Play/Pause\n if event.keysym == \"space\":\n if self.showStartScreen:\n self.showStartScreen = False\n return\n if not self.isPlaying:\n self.isPlaying = True\n else:\n self.isPlaying = False\n\n if self.isPlaying:\n if self.infiniteLoop:\n text = (\"KosBeat - Playing Infinite Loop...Press 'space' to stop.\")\n self.root.wm_title(text)\n else:\n text = \"KosBeat - Playing %s Loops...\" % self.numberOfLoops\n self.root.wm_title(text)\n self.playLoop()\n\n # Delete\n elif (event.keysym == \"BackSpace\") or (event.keysym == \"Delete\"):\n self.deleteSelectedNode()\n\n # Clear screen\n elif event.char == \"c\":\n self.nodeList = []\n self.makeConnections()\n\n def drawItemsInLists(self, *args):\n # Draw items from lists\n for itemsList in args:\n for item in itemsList:\n item.draw(self.canvas)\n # Draw rings if rings are set to display and item is a player\n if (self.drawRings) and type(item) == Player:\n item.drawRings(self.canvas)\n\n def drawNodeSelectionMenu(self):\n # Draw Node selection menu along top of screen\n \n menuMargin = self.menuMargin\n # Start left of center\n cx = (self.width / 2) - (len(self.menuList) / 2) * menuMargin\n # Vertical margin\n cy = menuMargin\n # Circle radius\n r = self.nodeRadius\n\n # Draw menu box\n self.canvas.create_rectangle(cx-menuMargin, cy-menuMargin+2, \n cx+menuMargin*len(self.menuList), \n cy+menuMargin, fill=\"#ecf0f1\")\n # Draw each node option\n for menuNode in self.menuList:\n (menuNode.x, menuNode.y, menuNode.r) = (cx, cy, r)\n menuNode.draw(self.canvas)\n cx += menuMargin\n\n def drawRingOption(self):\n # Draw ring toggle option in bottom right\n margin = self.menuMargin * 2\n self.canvas.create_rectangle(self.width-margin, self.height-margin,\n self.width, self.height, fill=\"#ecf0f1\")\n message = \"Hide Rings\" if self.drawRings else \"Show Rings\"\n self.canvas.create_text(self.width-margin/2, self.height-margin/2,\n text=message, font=(\"Helvetica\", 10, \"bold\"))\n\n def drawScaleOption(self):\n # Draw scale selections in bottom left\n margin = self.menuMargin\n message = \"Scale:\\n%s\" % (self.scale)\n\n self.canvas.create_rectangle(margin*2, self.height-margin*2,\n margin*4, self.height, fill=\"#ecf0f1\") \n self.canvas.create_text(margin*3, self.height-margin, text=message,\n font=(\"Helvetica\", 10, \"bold\"))\n\n if self.showScaleOption:\n\n y = self.height - margin * 2 - margin/2\n\n for pair in scalePairs:\n\n scaleName = pair[0]\n self.canvas.create_rectangle(margin*2, y-margin/2, \n margin*4, y+margin/2,\n fill=\"#ecf0f1\")\n self.canvas.create_text(margin*3, y, text=scaleName)\n y -= margin\n\n def drawKeyOption(self):\n # Draw key selections in bottom left\n margin = self.menuMargin\n message = \"Key: %s\" % (KosBeat.getNameFromPair(keyPairs, self.key))\n\n self.canvas.create_rectangle(2, self.height-margin*2,\n margin*2, self.height, fill=\"#ecf0f1\")\n self.canvas.create_text(margin, self.height-margin,text=message,\n font=(\"Helvetica\", 10, \"bold\"))\n\n if self.showKeyOption:\n\n y = self.height - margin * 2 - margin/2\n for pair in keyPairs:\n keyName = pair[0]\n self.canvas.create_rectangle(2, y-margin/2, \n margin*2, y+margin/2,\n fill=\"#ecf0f1\")\n self.canvas.create_text(margin, y, text=keyName)\n y -= margin\n\n def drawLoopSlider(self):\n # Draw loop slider along bottom of screen\n margin = self.menuMargin\n if not self.infiniteLoop: message = \"Loops: %s\" % (self.numberOfLoops)\n else: message = \"Loops: \"\n lineY = self.height-margin\n self.canvas.create_rectangle(margin*16, self.height-margin*2,\n self.width-margin*2, self.height, \n fill=\"#ecf0f1\")\n self.canvas.create_text(margin*17, self.height-margin, text=message,\n font=(\"Helvetica\", 10, \"bold\"))\n\n lineStart = margin * 18\n lineEnd = margin * 22\n\n self.canvas.create_text(lineStart, self.height-margin*0.5, \n text=self.minLoops, font=(\"Helvetica\", 10, \"bold\"))\n self.canvas.create_text(lineEnd, self.height-margin*0.5,\n text=self.maxLoops, font=(\"Helvetica\", 10, \"bold\"))\n\n self.canvas.create_line(lineStart, lineY,\n lineEnd, self.height-margin)\n if not self.infiniteLoop:\n sliderX = (lineStart + (self.numberOfLoops - self.minLoops) * \n (lineEnd - lineStart) / (self.maxLoops - self.minLoops))\n\n self.canvas.create_oval(sliderX-self.nodeRadius, \n lineY-self.nodeRadius, sliderX+self.nodeRadius,\n lineY+self.nodeRadius, fill=\"#ecf0f1\")\n\n def drawTempoSlider(self):\n # Draw tempo slider along bottom of screen\n margin = self.menuMargin\n message = \"Beats Per\\nMinute: %i\" % (self.beatsPerMinute)\n lineY = self.height-margin\n self.canvas.create_rectangle(margin*4, self.height-margin*2,\n margin*16, self.height, fill=\"#ecf0f1\")\n self.canvas.create_text(margin*5.25, self.height-margin, text=message,\n font=(\"Helvetica\", 10, \"bold\"))\n \n lineStart = margin * 6.5\n lineEnd = margin * 15.5\n\n self.canvas.create_text(lineStart, self.height-margin*0.5, \n text=self.minBPM, font=(\"Helvetica\", 10, \"bold\"))\n self.canvas.create_text(lineEnd, self.height-margin*0.5,\n text=self.maxBPM, font=(\"Helvetica\", 10, \"bold\"))\n \n self.canvas.create_line(lineStart, lineY,\n lineEnd, self.height-margin)\n sliderX = (lineStart + (self.beatsPerMinute-self.minBPM) * \n (lineEnd - lineStart) / (self.maxBPM - self.minBPM))\n\n self.canvas.create_oval(sliderX-self.nodeRadius, \n lineY-self.nodeRadius, sliderX+self.nodeRadius,\n lineY+self.nodeRadius, fill=\"#ecf0f1\")\n\n def drawInfiniteLoopOption(self):\n # Draw checkbox for infinite loop\n margin = self.menuMargin\n if self.infiniteLoop: message = \"Disable\\nInfinite Loop\"\n else: message = \"Enable\\nInfinite Loop\"\n\n self.canvas.create_text(margin*23.5, self.height-margin,\n text=message, font= \"Helvetica 10 bold\")\n\n def drawSettings(self):\n # Draw settings along bottom of screen\n # Audio:\n # Key\n self.drawKeyOption()\n # Scale type\n self.drawScaleOption()\n # Rhythm:\n # Tempo\n self.drawTempoSlider()\n # Loops:\n self.drawLoopSlider()\n self.drawInfiniteLoopOption()\n #Display:\n # Show rings\n self.drawRingOption()\n pass\n \n def writeTitle(self):\n # Write title\n title = \"KosBeat\"\n self.canvas.create_text(10, 2, text=title, anchor=NW,\n fill=\"#2c3e50\", \n font=(\"Helvetica\", 64, \"bold\"))\n\n def writeOverview(self):\n # Write overview\n text = \"A Geometric Music Generator\"\n self.canvas.create_text(10, 94, text=text, anchor=NW, fill=\"#16a085\",\n font=(\"Helvetica\", 36, \"italic\"))\n\n def writeInstructions(self):\n # Write instructions\n text = \"\"\"Squares are players. Circles are notes.\nWhen players and notes are near each other, the players play the notes.\nA player plays the notes closest to itself first.\nEach player and note combination produces a different sound.\n\nClick on the type of player or note you want at the top of the screen,\nthen click to place it on the screen. You may drag existing players and notes\nand delete selected ones using \"Backspace\" or \"Delete\".\n\nClick on settings at the bottom to customize your composition's properties.\n\nWhen you are ready to hear your composition, press the space bar.\nYou cannot edit while audio is playing.\nDo not click on the screen while audio is playing.\nYour loop will stop on its own after the indicated number of loops.\nOr, if you enabled infinite looping, you can press the space bar, and \nafter completing one more loop, the audio will stop.\n\"\"\"\n self.canvas.create_text(10, 150, anchor=NW, fill=\"#2c3e50\", text=text,\n font=(\"Helvetica\", 20))\n\n def drawStartPrompt(self):\n # Prompt user to press space to begin\n text = \"Press the space bar to begin!\"\n self.canvas.create_text(self.width/2, self.height-self.menuMargin*1.5, \n text=text, fill=\"#16a085\",\n font=(\"Helvetica\", 36, \"italic\"))\n\n def drawStartScreen(self):\n # Draw start screen at beginning of game\n self.writeTitle()\n self.writeOverview()\n self.writeInstructions()\n self.drawStartPrompt()\n\n def drawClearButton(self):\n # Draw clear button in upper right corner\n text = \"Clear Screen\"\n self.canvas.create_rectangle(self.width-self.menuMargin*2, 2,\n self.width, self.menuMargin*2, \n fill=\"#ecf0f1\")\n self.canvas.create_text(self.width-self.menuMargin, \n self.menuMargin,\n text=text, font=(\"Helvetica\", 9, \"bold\"))\n\n def redrawAll(self):\n # Redraw everything on canvas\n if self.showStartScreen:\n self.drawStartScreen()\n return None\n if self.isPlaying:\n if self.infiniteLoop:\n text = \"KosBeat - Playing Infinite Loop...Press the space bar to stop.\"\n self.root.wm_title(text)\n else:\n text = \"KosBeat - Playing %s Loops...\" % self.numberOfLoops\n self.root.wm_title(text)\n self.playLoop()\n else:\n self.root.wm_title(\"KosBeat - Editing\")\n self.canvas.delete(ALL)\n # Draw Nodes, Players, and Connections\n self.drawItemsInLists(self.nodeList, self.connectionList)\n self.drawNodeSelectionMenu()\n self.drawSettings()\n self.drawClearButton()\n \n def clearPlayedConnections(self):\n # Reset all connections to unplayed\n for connection in self.connectionList: connection.played = False\n\n def playLoop(self):\n # Play the audio loop on command\n startTime = time.time()\n\n while self.isPlaying:\n totalTime = time.time() - startTime\n subdivision = int(totalTime * self.beatsPerSecond * \n self.subdivisionsPerBeat)\n\n for connection in self.connectionList:\n if connection.subdivision == subdivision:\n # Highlight rings\n connection.play()\n\n if subdivision == self.beatsPerMeasure * self.subdivisionsPerBeat:\n # When the end of one measure has been reached\n self.currentNumberOfLoops += 1\n if (not self.infiniteLoop and \n self.currentNumberOfLoops >= self.numberOfLoops):\n self.currentNumberOfLoops = 0\n self.isPlaying = False\n self.clearPlayedConnections()\n break\n\n################################################################################\n# Node class\n################################################################################\n\nclass Node(object):\n# Holds node's location and sound info\n def __init__(self, x, y, r, degree, color):\n # Initialize Node object\n # Sound info\n self.degree = degree\n \n # Location info\n (self.x, self.y, self.r) = (x, y, r)\n\n # Color info\n self.color = color\n\n self.isSelected = False\n\n def draw(self, canvas):\n # Draw Node\n # Draw selection circle if selected\n if self.isSelected:\n selectionColor = \"light blue\"\n canvas.create_oval(self.x-self.r*3/2, self.y-self.r*3/2, \n self.x+self.r*3/2, self.y+self.r*3/2,\n fill=selectionColor)\n\n (x, y, r, color) = (self.x, self.y, self.r, self.color)\n canvas.create_oval(x-r, y-r, x+r, y+r, fill=color)\n\nclass Player(object):\n# Player class\n def __init__(self, x, y, r, octave, color, numRings):\n # Initialize Player object\n (self.x, self.y, self.r) = (x, y, r)\n self.octave = octave\n self.color = color\n self.ringList = []\n self.makeRings(numRings)\n\n self.isSelected = False\n\n def draw(self, canvas):\n # Draw Player object\n # Draw selection box if selected\n if self.isSelected:\n selectionColor = \"light blue\"\n canvas.create_rectangle(self.x-self.r*3/2, self.y-self.r*3/2, \n self.x+self.r*3/2, self.y+self.r*3/2,\n fill=selectionColor)\n\n canvas.create_rectangle(self.x-self.r, self.y-self.r, \n self.x+self.r, self.y+self.r, \n fill=self.color)\n\n def drawRings(self, canvas):\n # Draw Player's rings\n for r in self.ringList:\n canvas.create_oval(self.x-r, self.y-r, self.x+r, self.y+r)\n\n def makeRings(self, numRings):\n # Make numRings rings surrounding each player, 1 for each subdivision\n for radius in xrange(1, numRings + 1):\n # Each new radius's size is increased by a factor of 3\n self.ringList += [self.r * radius * 3]\n\nclass Connection(object):\n# Connection objects play nodes from players\n def __init__(self, x0, y0, x1, y1, \n key, scale, degree, octave, subdivision, channel):\n # Initialize connections\n (self.x0, self.y0) = (x0, y0)\n (self.x1, self.y1) = (x1, y1)\n self.key = key\n self.scale = scale\n self.degree = degree\n self.octave = octave\n self.subdivision = subdivision\n\n self.numericalName = self.getNumericalName()\n self.path = os.path.join(\"data\", self.numericalName + \".wav\")\n self.sound = pygame.mixer.Sound(self.path)\n self.channel = pygame.mixer.Channel(channel)\n\n self.played = False\n\n def getNumericalName(self):\n # Return note name from its key, scale, degree, and octave\n # Create name\n currentScale = KosBeat.getNumberFromPair(scalePairs, self.scale)\n # Wrap around for shorter scales at higher degrees\n self.degree = self.degree % len(currentScale) - 1\n # Get numerical note from scale\n currentNumber = currentScale[self.degree]\n # Transpose degree to node's key (If key is 1 for C, no shift)\n currentNumber += self.key - 1\n # Wrap around again after transposition\n # 12 is max note number. 12 remains 12, 13 becomes 1, 14 becomes 2, etc.\n if currentNumber > 12: currentNumber %= 12 \n\n numericalName = str(self.octave) + os.sep + str(currentNumber)\n\n return numericalName\n \n def draw(self, canvas):\n # Draw Connection\n canvas.create_line(self.x0, self.y0, self.x1, self.y1)\n\n def play(self):\n # Play Connection's\n if not self.played:\n self.channel.play(self.sound)\n self.played = True\n\n################################################################################\n# Menu classes\n################################################################################\n\nclass MenuNode(object):\n# Holds menu node's properties\n def __init__(self, degree, color):\n # Initialize MenuNode \n self.degree = degree\n self.color = color\n self.isSelected = False\n\n def draw(self, canvas):\n # Draw MenuNode\n # Draw selection circle if selected\n if self.isSelected:\n selectionColor = \"light blue\"\n canvas.create_oval(self.x-self.r*3/2, self.y-self.r*3/2, \n self.x+self.r*3/2, self.y+self.r*3/2,\n fill=selectionColor)\n\n canvas.create_oval(self.x-self.r, self.y-self.r, \n self.x+self.r, self.y+self.r, \n fill=self.color)\n\nclass MenuPlayer(object):\n# Holds menu player's properties\n def __init__(self, octave, color):\n # Initialize MenuPlayer object\n self.octave = octave\n self.color = color\n self.isSelected = False\n\n def draw(self, canvas):\n # Draw MenuNode\n # Draw selection box if selected\n if self.isSelected:\n selectionColor = \"light blue\"\n canvas.create_rectangle(self.x-self.r*3/2, self.y-self.r*3/2, \n self.x+self.r*3/2, self.y+self.r*3/2,\n fill=selectionColor)\n\n canvas.create_rectangle(self.x-self.r, self.y-self.r, \n self.x+self.r, self.y+self.r, \n fill=self.color)\n\nKosBeat(1065, 805).run()", "id": "4816849", "language": "Python", "matching_score": 2.7971785068511963, "max_stars_count": 0, "path": "KosBeat.py" }, { "content": "# KeyPairs.py\n\n# Pairs key name to corresponding numerical identifier\nkeyPairs = [(\"C\",1),(\"Db/C#\",2),(\"D\",3),(\"Eb/D#\",4),(\"E\",5),(\"F\",6),\n (\"Gb/F#\",7),(\"G\",8),(\"Ab/G#\",9),(\"A\",10),(\"Bb/A#\",11),(\"B\",12)]\n\n\n", "id": "12746999", "language": "Python", "matching_score": 0.28445857763290405, "max_stars_count": 0, "path": "KeyPairs.py" } ]
1.23186
6392-creators
[ { "content": "#!/usr/bin/python3\n# coding=utf-8\n# Copyright(C) Team #6392 Dimension Creators\n\nimport numpy as np\nimport cv2\nimport time\n\ncap = cv2.VideoCapture(0)\n\nmin_hue, min_sat, min_val = 35, 43, 30\nmax_hue, max_sat, max_val = 77, 255, 255\nkernel = np.ones((7, 7), np.uint8)\n\nif not cap.isOpened():\n print(\"No Cameras Detected!\")\n # frame = cv2.imread(\"C:\\\\Users\\\\Leo\\\\Desktop\\\\image.bmp\", flags=cv2.IMREAD_UNCHANGED)\n # hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)\n # binary_img = cv2.inRange(\n # hsv, (min_hue, min_sat, min_val), (max_hue, max_sat, max_val))\n # for i in range(0, 5):\n # binary_img = cv2.morphologyEx(binary_img, cv2.MORPH_OPEN, kernel)\n # for i in range(0, 5):\n # binary_img = cv2.morphologyEx(binary_img, cv2.MORPH_CLOSE, kernel)\n # a, contours, b = cv2.findContours(\n # binary_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n # cv2.drawContours(frame, contours, -1, (204, 1195, 49), 5)\n # while True:\n # cv2.namedWindow('binary_img')\n # cv2.namedWindow('original_img')\n # cv2.imshow('binary_img', binary_img)\n # cv2.imshow('original_img', frame)\n\nelse:\n while True:\n ret, frame = cap.read()\n t1 = time.time_ns()\n if not ret:\n break\n else:\n hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)\n binary_img = cv2.inRange(\n hsv, (min_hue, min_sat, min_val), (max_hue, max_sat, max_val))\n\n for i in range(0, 5):\n binary_img = cv2.morphologyEx(\n binary_img, cv2.MORPH_OPEN, kernel)\n\n for i in range(0, 5):\n binary_img = cv2.morphologyEx(\n binary_img, cv2.MORPH_CLOSE, kernel)\n\n a, contours, b = cv2.findContours(\n image=binary_img, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE)\n cv2.drawContours(frame, contours, contourIdx=-1,\n thickness=5, color=(204, 195, 49))\n\n t2 = time.time_ns()\n fps = \"FPS: \" + str(int(1/((t2 - t1) * (10**-9))))\n cv2.putText(img=frame, text=fps, org=(\n 0, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(218, 178, 217))\n\n cv2.imshow('binary_img', binary_img)\n cv2.imshow('color_img', frame)\n\n if cv2.waitKey(1) and 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n", "id": "11677346", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "localtest.py" }, { "content": "from cscore import *\nimport cv2\nimport numpy as np\n\ncamWidth = 1280\ncamHeight = 960\n\ncs = CameraServer.getInstance()\ncs.enableLogging()\n\ncamera = cs.startAutomaticCapture()\ncamera.setResolution(camWidth, camHeight)\n\nsink = cs.getVideo()\n\nwhile True:\n time, input_img \n\n if time == 0: # There is an error\n continue", "id": "11421625", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "vision.py" } ]
0
fiftysol
[ { "content": "import os\nimport re\nimport sys\nfrom typing import Dict, Set\n\n\ngithub_link = (\n\t\"https://raw.githubusercontent.com/\"\n\t\"{owner}/{repo}/{branch}/{file}.lua\"\n)\ngist_link = (\n\t\"https://gist.githubusercontent.com/\"\n\t\"{owner}/{repo}/raw/{branch}/{file}.lua\"\n)\nrequire_exp = r\"(require\\s*\\(?\\s*[\\\"'])(.+?)([\\\"'])\"\n\n\ndef scan(path: str, files: Dict[str, str]) -> Set[str]:\n\tif not os.path.isfile(path):\n\t\traise FileNotFoundError(f\"Could not find file {path}\")\n\n\tdirname = os.path.dirname(path)\n\twith open(path, \"r\") as file:\n\t\tcontent = file.read()\n\n\tdependencies = set()\n\tfor prefix, required, suffix in re.findall(require_exp, content):\n\t\tdependency = os.path.normpath(os.path.join(dirname, required))\n\t\tdep = dependency.replace(\"\\\\\", \"\\\\\\\\\")\n\t\tcontent = content.replace(\n\t\t\tf\"{prefix}{required}{suffix}\",\n\t\t\tf\"{prefix}{dep}{suffix}\",\n\t\t\t1\n\t\t)\n\n\t\tdependencies.add(dependency)\n\t\tdependencies = dependencies.union(scan(dependency, files))\n\n\tfiles[path] = content\n\treturn dependencies\n\n\ndef link(entry: str) -> str:\n\tentry = os.path.normpath(entry)\n\toutput = [\"--[[ COMPUTER GENERATED FILE: DO NOT MODIFY DIRECTLY ]]--\"]\n\n\t# Append a require mockup, so we can use it inside the script\n\twith open(\"./mockup-require.lua\", \"r\") as file:\n\t\toutput.extend(line.rstrip() for line in file.readlines())\n\n\tfiles: Dict[str, str] = {}\n\tdependencies = scan(entry, files)\n\tdependencies.add(entry)\n\tfor path in dependencies:\n\t\t# For every dependency, we append their file and use __registerFile()\n\t\t# from basic-require.lua; to be able to use it from require()\n\t\toutput.append(\n\t\t\t'__registerFile(\"{}\", {}, function()'\n\t\t\t.format(path.replace(\"\\\\\", \"\\\\\\\\\"), len(output) + 1)\n\t\t)\n\t\toutput.extend(files.pop(path).split(\"\\n\"))\n\t\toutput.append('end)')\n\n\t# Add a call to run init.lua\n\tentry = entry.replace(\"\\\\\", \"\\\\\\\\\")\n\toutput.extend((\n\t\tf'local done, result = pcall(require, \"{entry}\")',\n\t\t'if not done then',\n\t\t'\terror(__errorMessage(result))',\n\t\t'end'\n\t))\n\t# Remove trailing whitespace and join all lines\n\treturn \"\\n\".join(line.rstrip() for line in output)\n\n\nentry, dest = \"./release/init.lua\", \"./dist.lua\"\nfor i in range(1, len(sys.argv), 2):\n\tif sys.argv[i] == \"--src\":\n\t\tentry = sys.argv[i + 1]\n\telif sys.argv[i] == \"--dest\":\n\t\tdest = sys.argv[i + 1]\n\nwith open(dest, \"w\") as file:\n\tfile.write(link(entry))\n", "id": "2076385", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "link.py" } ]
0
rldejournett1
[ { "content": "import importlib\nimport itertools\nimport json\n\nfrom great_expectations.marshmallow__shade import ValidationError\n\n\nclass GreatExpectationsError(Exception):\n def __init__(self, message):\n self.message = message\n super().__init__(message)\n\n\nclass GreatExpectationsValidationError(ValidationError, GreatExpectationsError):\n def __init__(self, message, validation_error=None):\n self.message = message\n self.messages = None\n if validation_error is not None:\n self.messages = validation_error.messages\n\n def __str__(self) -> str:\n if self.message is None:\n return self.messages\n return self.message\n\n\nclass SuiteEditNotebookCustomTemplateModuleNotFoundError(ModuleNotFoundError):\n def __init__(self, custom_module):\n message = f\"The custom module '{custom_module}' could not be found\"\n super().__init__(message)\n\n\nclass DataContextError(GreatExpectationsError):\n pass\n\n\nclass CheckpointError(DataContextError):\n pass\n\n\nclass CheckpointNotFoundError(CheckpointError):\n pass\n\n\nclass StoreBackendError(DataContextError):\n pass\n\n\nclass UnavailableMetricError(GreatExpectationsError):\n pass\n\n\nclass ParserError(GreatExpectationsError):\n pass\n\n\nclass InvalidConfigurationYamlError(DataContextError):\n pass\n\n\nclass InvalidTopLevelConfigKeyError(GreatExpectationsError):\n pass\n\n\nclass MissingTopLevelConfigKeyError(GreatExpectationsValidationError):\n pass\n\n\nclass InvalidBaseYamlConfigError(GreatExpectationsValidationError):\n def __init__(self, message, validation_error=None, field_name=None):\n if validation_error is not None:\n if (\n validation_error\n and validation_error.messages\n and isinstance(validation_error.messages, dict)\n and all([key is None for key in validation_error.messages.keys()])\n ):\n validation_error.messages = list(\n itertools.chain.from_iterable(validation_error.messages.values())\n )\n super().__init__(message=message, validation_error=validation_error)\n self.field_name = field_name\n\n\nclass InvalidDataContextConfigError(InvalidBaseYamlConfigError):\n pass\n\n\nclass InvalidCheckpointConfigError(InvalidBaseYamlConfigError):\n pass\n\n\nclass InvalidBatchKwargsError(GreatExpectationsError):\n pass\n\n\nclass InvalidBatchSpecError(GreatExpectationsError):\n pass\n\n\nclass InvalidBatchIdError(GreatExpectationsError):\n pass\n\n\nclass InvalidDataContextKeyError(DataContextError):\n pass\n\n\nclass UnsupportedConfigVersionError(DataContextError):\n pass\n\n\nclass EvaluationParameterError(GreatExpectationsError):\n pass\n\n\nclass ProfilerError(GreatExpectationsError):\n pass\n\n\nclass InvalidConfigError(DataContextError):\n def __init__(self, message):\n self.message = message\n super().__init__(self.message)\n\n\nclass MissingConfigVariableError(InvalidConfigError):\n def __init__(self, message, missing_config_variable=None):\n if not missing_config_variable:\n missing_config_variable = []\n self.message = message\n self.missing_config_variable = missing_config_variable\n super().__init__(self.message)\n\n\nclass AmbiguousDataAssetNameError(DataContextError):\n def __init__(self, message, candidates=None):\n self.message = message\n self.candidates = candidates\n super().__init__(self.message)\n\n\nclass StoreConfigurationError(DataContextError):\n pass\n\n\nclass InvalidExpectationKwargsError(GreatExpectationsError):\n pass\n\n\nclass InvalidExpectationConfigurationError(GreatExpectationsError):\n pass\n\n\nclass InvalidValidationResultError(GreatExpectationsError):\n pass\n\n\nclass GreatExpectationsTypeError(TypeError):\n pass\n\n\nclass StoreError(DataContextError):\n pass\n\n\nclass InvalidKeyError(StoreError):\n pass\n\n\nclass InvalidCacheValueError(GreatExpectationsError):\n def __init__(self, result_dict):\n template = \"\"\"\\\nInvalid result values were found when trying to instantiate an ExpectationValidationResult.\n- Invalid result values are likely caused by inconsistent cache values.\n- Great Expectations enables caching by default.\n- Please ensure that caching behavior is consistent between the underlying Dataset (e.g. Spark) and Great Expectations.\nResult: {}\n\"\"\"\n self.message = template.format(json.dumps(result_dict, indent=2))\n super().__init__(self.message)\n\n\nclass ConfigNotFoundError(DataContextError):\n \"\"\"The great_expectations dir could not be found.\"\"\"\n\n def __init__(self):\n self.message = \"\"\"Error: No great_expectations directory was found here!\n - Please check that you are in the correct directory or have specified the correct directory.\n - If you have never run Great Expectations in this project, please run `great_expectations init` to get started.\n\"\"\"\n super().__init__(self.message)\n\n\nclass PluginModuleNotFoundError(GreatExpectationsError):\n \"\"\"A module import failed.\"\"\"\n\n def __init__(self, module_name):\n template = \"\"\"\\\nNo module named `{}` could be found in your plugins directory.\n - Please verify your plugins directory is configured correctly.\n - Please verify you have a module named `{}` in your plugins directory.\n\"\"\"\n self.message = template.format(module_name, module_name)\n\n colored_template = \"<red>\" + template + \"</red>\"\n module_snippet = \"</red><yellow>\" + module_name + \"</yellow><red>\"\n self.cli_colored_message = colored_template.format(\n module_snippet, module_snippet\n )\n super().__init__(self.message)\n\n\nclass PluginClassNotFoundError(DataContextError, AttributeError):\n \"\"\"A module import failed.\"\"\"\n\n def __init__(self, module_name, class_name):\n class_name_changes = {\n \"FixedLengthTupleFilesystemStoreBackend\": \"TupleFilesystemStoreBackend\",\n \"FixedLengthTupleS3StoreBackend\": \"TupleS3StoreBackend\",\n \"FixedLengthTupleGCSStoreBackend\": \"TupleGCSStoreBackend\",\n \"InMemoryEvaluationParameterStore\": \"EvaluationParameterStore\",\n \"DatabricksTableGenerator\": \"DatabricksTableBatchKwargsGenerator\",\n \"GlobReaderGenerator\": \"GlobReaderBatchKwargsGenerator\",\n \"SubdirReaderGenerator\": \"SubdirReaderBatchKwargsGenerator\",\n \"QueryGenerator\": \"QueryBatchKwargsGenerator\",\n \"TableGenerator\": \"TableBatchKwargsGenerator\",\n \"S3Generator\": \"S3GlobReaderBatchKwargsGenerator\",\n \"ExtractAndStoreEvaluationParamsAction\": \"StoreEvaluationParametersAction\",\n \"StoreAction\": \"StoreValidationResultAction\",\n }\n\n if class_name_changes.get(class_name):\n template = \"\"\"The module: `{}` does not contain the class: `{}`.\n The class name `{}` has changed to `{}`.\"\"\"\n self.message = template.format(\n module_name, class_name, class_name, class_name_changes.get(class_name)\n )\n else:\n template = \"\"\"The module: `{}` does not contain the class: `{}`.\n - Please verify that the class named `{}` exists.\"\"\"\n self.message = template.format(module_name, class_name, class_name)\n\n colored_template = \"<red>\" + template + \"</red>\"\n module_snippet = \"</red><yellow>\" + module_name + \"</yellow><red>\"\n class_snippet = \"</red><yellow>\" + class_name + \"</yellow><red>\"\n if class_name_changes.get(class_name):\n new_class_snippet = (\n \"</red><yellow>\" + class_name_changes.get(class_name) + \"</yellow><red>\"\n )\n self.cli_colored_message = colored_template.format(\n module_snippet, class_snippet, class_snippet, new_class_snippet\n )\n else:\n self.cli_colored_message = colored_template.format(\n module_snippet,\n class_snippet,\n class_snippet,\n )\n super().__init__(self.message)\n\n\nclass ClassInstantiationError(GreatExpectationsError):\n def __init__(self, module_name, package_name, class_name):\n module_spec = importlib.util.find_spec(module_name, package=package_name)\n if not module_spec:\n if not package_name:\n package_name = \"\"\n self.message = f\"\"\"No module named \"{package_name + module_name}\" could be found in the repository. \\\nPlease make sure that the file, corresponding to this package and module, exists and that dynamic loading of code \\\nmodules, templates, and assets is supported in your execution environment. This error is unrecoverable.\n \"\"\"\n else:\n self.message = f\"\"\"The module \"{module_name}\" exists; however, the system is unable to create an instance \\\nof the class \"{class_name}\", searched for inside this module. Please make sure that the class named \"{class_name}\" is \\\nproperly defined inside its intended module and declared correctly by the calling entity. This error is unrecoverable.\n \"\"\"\n super().__init__(self.message)\n\n\nclass ExpectationSuiteNotFoundError(GreatExpectationsError):\n def __init__(self, data_asset_name):\n self.data_asset_name = data_asset_name\n self.message = (\n \"No expectation suite found for data_asset_name %s\" % data_asset_name\n )\n super().__init__(self.message)\n\n\nclass BatchKwargsError(DataContextError):\n def __init__(self, message, batch_kwargs=None):\n self.message = message\n self.batch_kwargs = batch_kwargs\n super().__init__(self.message)\n\n\nclass BatchDefinitionError(DataContextError):\n def __init__(self, message):\n self.message = message\n super().__init__(self.message)\n\n\nclass BatchSpecError(DataContextError):\n def __init__(self, message):\n self.message = message\n super().__init__(self.message)\n\n\nclass DatasourceError(DataContextError):\n def __init__(self, datasource_name, message):\n self.message = \"Cannot initialize datasource {}, error: {}\".format(\n datasource_name,\n message,\n )\n super().__init__(self.message)\n\n\nclass DatasourceConfigurationError(DatasourceError):\n pass\n\n\nclass DatasourceInitializationError(DatasourceError):\n pass\n\n\nclass DatasourceKeyPairAuthBadPassphraseError(DatasourceInitializationError):\n pass\n\n\nclass InvalidConfigValueTypeError(DataContextError):\n pass\n\n\nclass DataConnectorError(DataContextError):\n def __init__(self, message):\n self.message = message\n super().__init__(self.message)\n\n\nclass ExecutionEngineError(DataContextError):\n def __init__(self, message):\n self.message = message\n super().__init__(self.message)\n\n\nclass PartitionQueryError(DataContextError):\n def __init__(self, message):\n self.message = message\n super().__init__(self.message)\n\n\nclass SorterError(DataContextError):\n def __init__(self, message):\n self.message = message\n super().__init__(self.message)\n", "id": "7399241", "language": "Python", "matching_score": 1.9746594429016113, "max_stars_count": 2, "path": "great_expectations/exceptions/exceptions.py" }, { "content": "import copy\nimport logging\nfrom typing import Optional\n\nimport requests\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.core.batch import BatchRequest\nfrom great_expectations.core.util import nested_update\nfrom great_expectations.data_context.types.base import CheckpointConfig\n\nlogger = logging.getLogger(__name__)\n\n\ndef send_slack_notification(query, slack_webhook):\n session = requests.Session()\n\n try:\n response = session.post(url=slack_webhook, json=query)\n except requests.ConnectionError:\n logger.warning(\n \"Failed to connect to Slack webhook at {url} \"\n \"after {max_retries} retries.\".format(url=slack_webhook, max_retries=10)\n )\n except Exception as e:\n logger.error(str(e))\n else:\n if response.status_code != 200:\n logger.warning(\n \"Request to Slack webhook at {url} \"\n \"returned error {status_code}: {text}\".format(\n url=slack_webhook,\n status_code=response.status_code,\n text=response.text,\n )\n )\n else:\n return \"Slack notification succeeded.\"\n\n\ndef send_opsgenie_alert(query, suite_name, settings):\n \"\"\"Creates an alert in Opsgenie.\"\"\"\n if settings[\"region\"] != None:\n url = \"https://api.{region}.opsgenie.com/v2/alerts\".format(\n region=settings[\"region\"]\n ) # accomodate for Europeans\n else:\n url = \"https://api.opsgenie.com/v2/alerts\"\n\n headers = {\n \"Authorization\": \"GenieKey {api_key}\".format(api_key=settings[\"api_key\"])\n }\n payload = {\n \"message\": \"Great Expectations suite {suite_name} failed\".format(\n suite_name=suite_name\n ),\n \"description\": query,\n \"priority\": settings[\"priority\"], # allow this to be modified in settings\n }\n\n session = requests.Session()\n\n try:\n response = session.post(url, headers=headers, json=payload)\n except requests.ConnectionError:\n logger.warning(\"Failed to connect to Opsgenie\")\n except Exception as e:\n logger.error(str(e))\n else:\n if response.status_code != 202:\n logger.warning(\n \"Request to Opsgenie API at {url} \"\n \"returned error {status_code}: {text}\".format(\n url=url,\n status_code=response.status_code,\n text=response.text,\n )\n )\n else:\n return \"success\"\n return \"error\"\n\n\ndef send_microsoft_teams_notifications(query, microsoft_teams_webhook):\n session = requests.Session()\n try:\n response = session.post(url=microsoft_teams_webhook, json=query)\n except requests.ConnectionError:\n logger.warning(\n \"Failed to connect to Microsoft Teams webhook at {url} \"\n \"after {max_retries} retries.\".format(\n url=microsoft_teams_webhook, max_retries=10\n )\n )\n except Exception as e:\n logger.error(str(e))\n else:\n if response.status_code != 200:\n logger.warning(\n \"Request to Microsoft Teams webhook at {url} \"\n \"returned error {status_code}: {text}\".format(\n url=microsoft_teams_webhook,\n status_code=response.status_code,\n text=response.text,\n )\n )\n return\n else:\n return \"Microsoft Teams notification succeeded.\"\n\n\ndef send_webhook_notifications(query, webhook, target_platform):\n session = requests.Session()\n try:\n response = session.post(url=webhook, json=query)\n except requests.ConnectionError:\n logger.warning(\n \"Failed to connect to {target_platform} webhook at {url} \"\n \"after {max_retries} retries.\".format(\n url=webhook,\n max_retries=10,\n target_platform=target_platform,\n )\n )\n except Exception as e:\n logger.error(str(e))\n else:\n if response.status_code != 200:\n logger.warning(\n \"Request to {target_platform} webhook at {url} \"\n \"returned error {status_code}: {text}\".format(\n url=webhook,\n status_code=response.status_code,\n target_platform=target_platform,\n text=response.text,\n )\n )\n else:\n return \"{target_platform} notification succeeded.\".format(\n target_platform=target_platform\n )\n\n\ndef get_runtime_batch_request(\n substituted_runtime_config: CheckpointConfig,\n validation_batch_request: Optional[dict] = None,\n) -> BatchRequest:\n if substituted_runtime_config.batch_request is None:\n return (\n validation_batch_request\n if validation_batch_request is None\n else BatchRequest(**validation_batch_request)\n )\n\n if validation_batch_request is None:\n return BatchRequest(**substituted_runtime_config.batch_request)\n\n runtime_batch_request_dict: dict = copy.deepcopy(validation_batch_request)\n for key, val in runtime_batch_request_dict.items():\n if (\n val is not None\n and substituted_runtime_config.batch_request.get(key) is not None\n ):\n raise ge_exceptions.CheckpointError(\n f'BatchRequest attribute \"{key}\" was specified in both validation and top-level CheckpointConfig.'\n )\n runtime_batch_request_dict.update(substituted_runtime_config.batch_request)\n return BatchRequest(**runtime_batch_request_dict)\n\n\ndef get_substituted_validation_dict(\n substituted_runtime_config: CheckpointConfig, validation_dict: dict\n) -> dict:\n substituted_validation_dict = {\n \"batch_request\": get_runtime_batch_request(\n substituted_runtime_config=substituted_runtime_config,\n validation_batch_request=validation_dict.get(\"batch_request\"),\n ),\n \"expectation_suite_name\": validation_dict.get(\"expectation_suite_name\")\n or substituted_runtime_config.expectation_suite_name,\n \"action_list\": CheckpointConfig.get_updated_action_list(\n base_action_list=substituted_runtime_config.action_list,\n other_action_list=validation_dict.get(\"action_list\", {}),\n ),\n \"evaluation_parameters\": nested_update(\n substituted_runtime_config.evaluation_parameters,\n validation_dict.get(\"evaluation_parameters\", {}),\n ),\n \"runtime_configuration\": nested_update(\n substituted_runtime_config.runtime_configuration,\n validation_dict.get(\"runtime_configuration\", {}),\n ),\n }\n if validation_dict.get(\"name\") is not None:\n substituted_validation_dict[\"name\"] = validation_dict[\"name\"]\n validate_validation_dict(substituted_validation_dict)\n return substituted_validation_dict\n\n\ndef validate_validation_dict(validation_dict: dict):\n if validation_dict.get(\"batch_request\") is None:\n raise ge_exceptions.CheckpointError(\"validation batch_request cannot be None\")\n if not validation_dict.get(\"expectation_suite_name\"):\n raise ge_exceptions.CheckpointError(\n \"validation expectation_suite_name must be specified\"\n )\n if not validation_dict.get(\"action_list\"):\n raise ge_exceptions.CheckpointError(\"validation action_list cannot be empty\")\n", "id": "678810", "language": "Python", "matching_score": 2.299384832382202, "max_stars_count": 2, "path": "great_expectations/checkpoint/util.py" }, { "content": "import datetime\nimport json\nimport locale\nimport os\nimport random\nimport shutil\nimport threading\nfrom types import ModuleType\nfrom typing import Optional\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom freezegun import freeze_time\nfrom ruamel.yaml import YAML\n\nimport great_expectations as ge\nfrom great_expectations import DataContext\nfrom great_expectations.core import ExpectationConfiguration, expectationSuiteSchema\nfrom great_expectations.core.expectation_suite import ExpectationSuite\nfrom great_expectations.core.expectation_validation_result import (\n ExpectationValidationResult,\n)\nfrom great_expectations.data_context.types.base import CheckpointConfig\nfrom great_expectations.data_context.types.resource_identifiers import (\n ConfigurationIdentifier,\n ExpectationSuiteIdentifier,\n)\nfrom great_expectations.data_context.util import (\n file_relative_path,\n instantiate_class_from_config,\n)\nfrom great_expectations.dataset.pandas_dataset import PandasDataset\nfrom great_expectations.datasource import SqlAlchemyDatasource\nfrom great_expectations.datasource.new_datasource import Datasource\nfrom great_expectations.execution_engine import SqlAlchemyExecutionEngine\nfrom great_expectations.util import import_library_module\n\nfrom .test_utils import expectationSuiteValidationResultSchema, get_dataset\n\nyaml = YAML()\n###\n#\n# NOTE: THESE TESTS ARE WRITTEN WITH THE en_US.UTF-8 LOCALE AS DEFAULT FOR STRING FORMATTING\n#\n###\n\nlocale.setlocale(locale.LC_ALL, \"en_US.UTF-8\")\n\n\nclass LockingConnectionCheck:\n def __init__(self, sa, connection_string):\n self.lock = threading.Lock()\n self.sa = sa\n self.connection_string = connection_string\n self._is_valid = None\n\n def is_valid(self):\n with self.lock:\n if self._is_valid is None:\n try:\n engine = self.sa.create_engine(self.connection_string)\n conn = engine.connect()\n conn.close()\n self._is_valid = True\n except (ImportError, self.sa.exc.SQLAlchemyError) as e:\n print(f\"{str(e)}\")\n self._is_valid = False\n return self._is_valid\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\n \"markers\",\n \"smoketest: mark test as smoketest--it does not have useful assertions but may produce side effects \"\n \"that require manual inspection.\",\n )\n config.addinivalue_line(\n \"markers\",\n \"rendered_output: produces rendered output that should be manually reviewed.\",\n )\n config.addinivalue_line(\n \"markers\",\n \"aws_integration: runs aws integration test that may be very slow and requires credentials\",\n )\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--no-spark\",\n action=\"store_true\",\n help=\"If set, suppress all tests against the spark test suite\",\n )\n parser.addoption(\n \"--no-sqlalchemy\",\n action=\"store_true\",\n help=\"If set, suppress all tests using sqlalchemy\",\n )\n parser.addoption(\n \"--no-postgresql\",\n action=\"store_true\",\n help=\"If set, suppress all tests against postgresql\",\n )\n parser.addoption(\n \"--mysql\",\n action=\"store_true\",\n help=\"If set, execute tests against mysql\",\n )\n parser.addoption(\n \"--mssql\",\n action=\"store_true\",\n help=\"If set, execute tests against mssql\",\n )\n parser.addoption(\n \"--aws-integration\",\n action=\"store_true\",\n help=\"If set, run aws integration tests\",\n )\n\n\ndef build_test_backends_list(metafunc):\n test_backends = [\"PandasDataset\"]\n no_spark = metafunc.config.getoption(\"--no-spark\")\n if not no_spark:\n try:\n import pyspark\n from pyspark.sql import SparkSession\n except ImportError:\n raise ValueError(\"spark tests are requested, but pyspark is not installed\")\n test_backends += [\"SparkDFDataset\"]\n no_sqlalchemy = metafunc.config.getoption(\"--no-sqlalchemy\")\n if not no_sqlalchemy:\n test_backends += [\"sqlite\"]\n\n sa: Optional[ModuleType] = import_library_module(module_name=\"sqlalchemy\")\n\n no_postgresql = metafunc.config.getoption(\"--no-postgresql\")\n if not (sa is None or no_postgresql):\n ###\n # NOTE: 20190918 - JPC: Since I've had to relearn this a few times, a note here.\n # SQLALCHEMY coerces postgres DOUBLE_PRECISION to float, which loses precision\n # round trip compared to NUMERIC, which stays as a python DECIMAL\n\n # Be sure to ensure that tests (and users!) understand that subtlety,\n # which can be important for distributional expectations, for example.\n ###\n connection_string = \"postgresql://postgres@localhost/test_ci\"\n checker = LockingConnectionCheck(sa, connection_string)\n if checker.is_valid() is True:\n test_backends += [\"postgresql\"]\n else:\n raise ValueError(\n f\"backend-specific tests are requested, but unable to connect to the database at \"\n f\"{connection_string}\"\n )\n mysql = metafunc.config.getoption(\"--mysql\")\n if sa and mysql:\n try:\n engine = sa.create_engine(\"mysql+pymysql://root@localhost/test_ci\")\n conn = engine.connect()\n conn.close()\n except (ImportError, sa.exc.SQLAlchemyError):\n raise ImportError(\n \"mysql tests are requested, but unable to connect to the mysql database at \"\n \"'mysql+pymysql://root@localhost/test_ci'\"\n )\n test_backends += [\"mysql\"]\n mssql = metafunc.config.getoption(\"--mssql\")\n if sa and mssql:\n try:\n engine = sa.create_engine(\n \"mssql+pyodbc://sa:ReallyStrongPwd1234%^&*@localhost:1433/test_ci?driver=ODBC Driver 17 for SQL Server&charset=utf8&autocommit=true\",\n # echo=True,\n )\n conn = engine.connect()\n conn.close()\n except (ImportError, sa.exc.SQLAlchemyError):\n raise ImportError(\n \"mssql tests are requested, but unable to connect to the mssql database at \"\n \"'mssql+pyodbc://sa:ReallyStrongPwd1234%^&*@localhost:1433/test_ci?driver=ODBC Driver 17 for SQL Server&charset=utf8&autocommit=true'\",\n )\n test_backends += [\"mssql\"]\n return test_backends\n\n\ndef build_test_backends_list_cfe(metafunc):\n test_backends = [\"pandas\"]\n no_spark = metafunc.config.getoption(\"--no-spark\")\n if not no_spark:\n try:\n import pyspark\n from pyspark.sql import SparkSession\n except ImportError:\n raise ValueError(\"spark tests are requested, but pyspark is not installed\")\n test_backends += [\"spark\"]\n no_sqlalchemy = metafunc.config.getoption(\"--no-sqlalchemy\")\n if not no_sqlalchemy:\n test_backends += [\"sqlite\"]\n\n sa: Optional[ModuleType] = import_library_module(module_name=\"sqlalchemy\")\n\n no_postgresql = metafunc.config.getoption(\"--no-postgresql\")\n if not (sa is None or no_postgresql):\n ###\n # NOTE: 20190918 - JPC: Since I've had to relearn this a few times, a note here.\n # SQLALCHEMY coerces postgres DOUBLE_PRECISION to float, which loses precision\n # round trip compared to NUMERIC, which stays as a python DECIMAL\n\n # Be sure to ensure that tests (and users!) understand that subtlety,\n # which can be important for distributional expectations, for example.\n ###\n connection_string = \"postgresql://postgres@localhost/test_ci\"\n checker = LockingConnectionCheck(sa, connection_string)\n if checker.is_valid() is True:\n test_backends += [\"postgresql\"]\n else:\n raise ValueError(\n f\"backend-specific tests are requested, but unable to connect to the database at \"\n f\"{connection_string}\"\n )\n mysql = metafunc.config.getoption(\"--mysql\")\n if sa and mysql:\n try:\n engine = sa.create_engine(\"mysql+pymysql://root@localhost/test_ci\")\n conn = engine.connect()\n conn.close()\n except (ImportError, sa.exc.SQLAlchemyError):\n raise ImportError(\n \"mysql tests are requested, but unable to connect to the mysql database at \"\n \"'mysql+pymysql://root@localhost/test_ci'\"\n )\n test_backends += [\"mysql\"]\n mssql = metafunc.config.getoption(\"--mssql\")\n if sa and mssql:\n try:\n engine = sa.create_engine(\n \"mssql+pyodbc://sa:ReallyStrongPwd1234%^&*@localhost:1433/test_ci?driver=ODBC Driver 17 for SQL Server&charset=utf8&autocommit=true\",\n # echo=True,\n )\n conn = engine.connect()\n conn.close()\n except (ImportError, sa.exc.SQLAlchemyError):\n raise ImportError(\n \"mssql tests are requested, but unable to connect to the mssql database at \"\n \"'mssql+pyodbc://sa:ReallyStrongPwd1234%^&*@localhost:1433/test_ci?driver=ODBC Driver 17 for SQL Server&charset=utf8&autocommit=true'\",\n )\n test_backends += [\"mssql\"]\n return test_backends\n\n\ndef pytest_generate_tests(metafunc):\n test_backends = build_test_backends_list(metafunc)\n if \"test_backend\" in metafunc.fixturenames:\n metafunc.parametrize(\"test_backend\", test_backends, scope=\"module\")\n if \"test_backends\" in metafunc.fixturenames:\n metafunc.parametrize(\"test_backends\", [test_backends], scope=\"module\")\n\n\ndef pytest_collection_modifyitems(config, items):\n if config.getoption(\"--aws-integration\"):\n # --aws-integration given in cli: do not skip aws-integration tests\n return\n skip_aws_integration = pytest.mark.skip(\n reason=\"need --aws-integration option to run\"\n )\n for item in items:\n if \"aws_integration\" in item.keywords:\n item.add_marker(skip_aws_integration)\n\n\[email protected](autouse=True)\ndef no_usage_stats(monkeypatch):\n # Do not generate usage stats from test runs\n monkeypatch.setenv(\"GE_USAGE_STATS\", \"False\")\n\n\[email protected]\ndef sa(test_backends):\n if (\n \"postgresql\" not in test_backends\n and \"sqlite\" not in test_backends\n and \"mysql\" not in test_backends\n and \"mssql\" not in test_backends\n ):\n pytest.skip(\"No recognized sqlalchemy backend selected.\")\n else:\n try:\n import sqlalchemy as sa\n\n return sa\n except ImportError:\n raise ValueError(\"SQL Database tests require sqlalchemy to be installed.\")\n\n\[email protected]\ndef spark_session(test_backends):\n if \"SparkDFDataset\" not in test_backends:\n pytest.skip(\"No spark backend selected.\")\n try:\n import pyspark\n from pyspark.sql import SparkSession\n\n return SparkSession.builder.getOrCreate()\n except ImportError:\n raise ValueError(\"spark tests are requested, but pyspark is not installed\")\n\n\[email protected]\ndef empty_expectation_suite():\n expectation_suite = {\n \"expectation_suite_name\": \"default\",\n \"meta\": {},\n \"expectations\": [],\n }\n return expectation_suite\n\n\[email protected]\ndef basic_expectation_suite():\n expectation_suite = ExpectationSuite(\n expectation_suite_name=\"default\",\n meta={},\n expectations=[\n ExpectationConfiguration(\n expectation_type=\"expect_column_to_exist\",\n kwargs={\"column\": \"infinities\"},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_to_exist\", kwargs={\"column\": \"nulls\"}\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_to_exist\", kwargs={\"column\": \"naturals\"}\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_unique\",\n kwargs={\"column\": \"naturals\"},\n ),\n ],\n )\n return expectation_suite\n\n\[email protected]\ndef file_data_asset(tmp_path):\n tmp_path = str(tmp_path)\n path = os.path.join(tmp_path, \"file_data_asset.txt\")\n with open(path, \"w+\") as file:\n file.write(json.dumps([0, 1, 2, 3, 4]))\n\n return ge.data_asset.FileDataAsset(file_path=path)\n\n\[email protected]\ndef numeric_high_card_dict():\n data = {\n \"norm_0_1\": [\n 0.7225866251125405,\n -0.5951819764073379,\n -0.2679313226299394,\n -0.22503289285616823,\n 0.1432092195399402,\n 1.1874676802669433,\n 1.2766412196640815,\n 0.15197071140718296,\n -0.08787273509474242,\n -0.14524643717509128,\n -1.236408169492396,\n -0.1595432263317598,\n 1.0856768114741797,\n 0.5082788229519655,\n 0.26419244684748955,\n -0.2532308428977167,\n -0.6362679196021943,\n -3.134120304969242,\n -1.8990888524318292,\n 0.15701781863102648,\n -0.775788419966582,\n -0.7400872167978756,\n -0.10578357492485335,\n 0.30287010067847436,\n -1.2127058770179304,\n -0.6750567678010801,\n 0.3341434318919877,\n 1.8336516507046157,\n 1.105410842250908,\n -0.7711783703442725,\n -0.20834347267477862,\n -0.06315849766945486,\n 0.003016997583954831,\n -1.0500016329150343,\n -0.9168020284223636,\n 0.306128397266698,\n 1.0980602112281863,\n -0.10465519493772572,\n 0.4557797534454941,\n -0.2524452955086468,\n -1.6176089110359837,\n 0.46251282530754667,\n 0.45751208998354903,\n 0.4222844954971609,\n 0.9651098606162691,\n -0.1364401431697167,\n -0.4988616288584964,\n -0.29549238375582904,\n 0.6950204582392359,\n 0.2975369992016046,\n -1.0159498719807218,\n 1.3704532401348395,\n 1.1210419577766673,\n 1.2051869452003332,\n 0.10749349867353084,\n -3.1876892257116562,\n 1.316240976262548,\n -1.3777452919511493,\n -1.0666211985935259,\n 1.605446695828751,\n -0.39682821266996865,\n -0.2828059717857655,\n 1.30488698803017,\n -2.116606225467923,\n -0.2026680301462151,\n -0.05504008273574069,\n -0.028520163428411835,\n 0.4424105678123449,\n -0.3427628263418371,\n 0.23805293411919937,\n -0.7515414823259695,\n -0.1272505897548366,\n 1.803348436304099,\n -2.0178252709022124,\n 0.4860300090112474,\n 1.2304054166426217,\n 0.7228668982068365,\n 1.7400607500575112,\n 0.3480274098246697,\n -0.3887978895385282,\n -1.6511926233909175,\n 0.14517929503564567,\n -1.1599010576123796,\n -0.016133552438119002,\n 0.47157644883706273,\n 0.27657785075518254,\n 1.4464286976282463,\n -1.2605489185634533,\n -1.2548765025615338,\n 0.0755319579826929,\n 1.0476733637516833,\n -0.7038690219524807,\n -0.9580696842862921,\n -0.18135657098008018,\n -0.18163993379314564,\n 0.4092798531146971,\n -2.049808182546896,\n -1.2447062617916826,\n -1.6681140306283337,\n 1.0709944517933483,\n -0.7059385234342846,\n -0.8033587669003331,\n -1.8152275905903312,\n 0.11729996097670137,\n 2.2994900038012376,\n -0.1291192451734159,\n -0.6731565869164164,\n -0.06690994571366346,\n -0.40330072968473235,\n -0.23927186025094221,\n 2.7756216937096676,\n 0.06441299443146056,\n -0.5095247173507204,\n -0.5228853558871007,\n 0.806629654091097,\n -2.110096084114651,\n -0.1233374136509439,\n -1.021178519845751,\n 0.058906278340351045,\n -0.26316852406211017,\n -1.2990807244026237,\n -0.1937986598084067,\n 0.3909222793445317,\n 0.578027315076297,\n -0.11837271520846208,\n -1.134297652720464,\n 0.496915417153268,\n -0.5315184110418045,\n 0.5284176849952198,\n -1.6810338988102331,\n 0.41220454054009154,\n 1.0554031136792,\n -1.4222775023918832,\n -1.1664353586956209,\n 0.018952180522661358,\n -0.04620616876577671,\n -0.8446292647938418,\n -0.6889432180332509,\n -0.16012081070647954,\n 0.5680940644754282,\n -1.9792941921407943,\n 0.35441842206114726,\n 0.12433268557499534,\n 0.25366905921805377,\n 0.6262297786892028,\n 1.327981424671081,\n 1.774834324890265,\n -0.9725604763128438,\n 0.42824027889428,\n 0.19725541390327114,\n 1.4640606982992412,\n 1.6484993842838995,\n 0.009848260786412894,\n -2.318740403198263,\n -0.4125245127403577,\n -0.15500831770388285,\n 1.010740123094443,\n 0.7509498708766653,\n -0.021415407776108144,\n 0.6466776546788641,\n -1.421096837521404,\n 0.5632248951325018,\n -1.230539161899903,\n -0.26766333435961503,\n -1.7208241092827994,\n -1.068122926814994,\n -1.6339248620455546,\n 0.07225436117508208,\n -1.2018233250224348,\n -0.07213000691963527,\n -1.0080992229563746,\n -1.151378048476321,\n -0.2660104149809121,\n 1.6307779136408695,\n 0.8394822016824073,\n -0.23362802143120032,\n -0.36799502320054384,\n 0.35359852278856263,\n 0.5830948999779656,\n -0.730683771776052,\n 1.4715728371820667,\n -1.0668090648998136,\n -1.025762014881618,\n 0.21056106958224155,\n -0.5141254207774576,\n -0.1592942838690149,\n 0.7688711617969363,\n -2.464535892598544,\n -0.33306989349452987,\n 0.9457207224940593,\n 0.36108072442574435,\n -0.6490066877470516,\n -0.8714147266896871,\n 0.6567118414749348,\n -0.18543305444915045,\n 0.11156511615955596,\n 0.7299392157186994,\n -0.9902398239693843,\n -1.3231344439063761,\n -1.1402773433114928,\n 0.3696183719476138,\n -1.0512718152423168,\n -0.6093518314203102,\n 0.0010622538704462257,\n -0.17676306948277776,\n -0.6291120128576891,\n 1.6390197341434742,\n -0.8105788162716191,\n -2.0105672384392204,\n -0.7909143328024505,\n -0.10510684692203587,\n -0.013384480496840259,\n 0.37683659744804815,\n -0.15123337965442354,\n 1.8427651248902048,\n 1.0371006855495906,\n 0.29198928612503655,\n -1.7455852392709181,\n 1.0854545339796853,\n 1.8156620972829793,\n 1.2399563224061596,\n 1.1196530775769857,\n 0.4349954478175989,\n 0.11093680938321168,\n 0.9945934589378227,\n -0.5779739742428905,\n 1.0398502505219054,\n -0.09401160691650227,\n 0.22793239636661505,\n -1.8664992140331715,\n -0.16104499274010126,\n -0.8497511318264537,\n -0.005035074822415585,\n -1.7956896952184151,\n 1.8304783101189757,\n 0.19094408763231646,\n 1.3353023874309002,\n 0.5889134606052353,\n -0.48487660139277866,\n 0.4817014755127622,\n 1.5981632863770983,\n 2.1416849775567943,\n -0.5524061711669017,\n 0.3364804821524787,\n -0.8609687548167294,\n 0.24548635047971906,\n -0.1281468603588133,\n -0.03871410517044196,\n -0.2678174852638268,\n 0.41800607312114096,\n -0.2503930647517959,\n 0.8432391494945226,\n -0.5684563173706987,\n -0.6737077809046504,\n 2.0559579098493606,\n -0.29098826888414253,\n -0.08572747304559661,\n -0.301857666880195,\n -0.3446199959065524,\n 0.7391340848217359,\n -0.3087136212446006,\n 0.5245553707204758,\n -3.063281336805349,\n 0.47471623010413705,\n 0.3733427291759615,\n -0.26216851429591426,\n -0.5433523111756248,\n 0.3305385199964823,\n -1.4866150542941634,\n -0.4699911958560942,\n 0.7312367186673805,\n -0.22346998944216903,\n -0.4102860865811592,\n -0.3003478250288424,\n -0.3436168605845268,\n 0.9456524589400904,\n -0.03710285453384255,\n 0.10330609878001526,\n 0.6919858329179392,\n 0.8673477607085118,\n 0.380742577915601,\n 0.5785785515837437,\n -0.011421905830097267,\n 0.587187810965595,\n -1.172536467775141,\n -0.532086162097372,\n -0.34440413367820183,\n -1.404900386188497,\n -0.1916375229779241,\n 1.6910999461291834,\n -0.6070351182769795,\n -0.8371447893868493,\n 0.8853944070432224,\n 1.4062946075925473,\n -0.4575973141608374,\n 1.1458755768004445,\n 0.2619874618238163,\n 1.7105876844856704,\n -1.3938976454537522,\n -0.11403217166441704,\n -1.0354305240085717,\n -0.4285770475062154,\n 0.10326635421187867,\n 0.6911853442971228,\n 0.6293835213179542,\n -0.819693698713199,\n -0.7378190403744175,\n -1.495947672573938,\n -1.2406693914431872,\n -1.0486341638186725,\n -1.3715759883075953,\n 3.585407817418151,\n -0.8007079372574223,\n -1.527336776754733,\n -0.4716571043072485,\n -0.6967311271405545,\n 1.0003347462169225,\n -0.30569565002022697,\n 0.3646134876772732,\n 0.49083033603832493,\n 0.07754580794955847,\n -0.13467337850920083,\n 0.02134473458605164,\n 0.5025183900540823,\n -0.940929087894874,\n 1.441600637127558,\n -0.0857298131221344,\n -0.575175243519591,\n 0.42622029657630595,\n -0.3239674701415489,\n 0.22648849821602596,\n -0.6636465305318631,\n 0.30415000329164754,\n -0.6170241274574016,\n 0.07578674772163065,\n 0.2952841441615124,\n 0.8120317689468056,\n -0.46861353019671337,\n 0.04718559572470416,\n -0.3105660017232523,\n -0.28898463203535724,\n 0.9575298065734561,\n -0.1977556031830993,\n 0.009658232624257272,\n 1.1432743259603295,\n -1.8989396918936858,\n 0.20787070770386357,\n 1.4256750543782999,\n -0.03838329973778874,\n -0.9051229357470373,\n -1.2002277085489457,\n 2.405569956130733,\n 1.895817948326675,\n -0.8260858325924574,\n 0.5759061866255807,\n 2.7022875569683342,\n 1.0591327405967745,\n 0.21449833798124354,\n 0.19970388388081273,\n 0.018242139911433558,\n -0.630960146999549,\n -2.389646042147776,\n 0.5424304992480339,\n -1.2159551561948718,\n -1.6851632640204128,\n -0.4812221268109694,\n 0.6217652794219579,\n -0.380139431677482,\n -0.2643524783321051,\n 0.5106648694993016,\n -0.895602157034141,\n -0.20559568725141816,\n 1.5449271875734911,\n 1.544075783565114,\n 0.17877619857826843,\n 1.9729717339967108,\n 0.8302033109816261,\n -0.39118561199170965,\n -0.4428357598297098,\n -0.02550407946753186,\n -1.0202977138210447,\n 2.6604654314300835,\n 1.9163029269361842,\n 0.34697436596877657,\n -0.8078124769022497,\n -1.3876596649099957,\n 0.44707250163663864,\n -0.6752837232272447,\n -0.851291770954755,\n 0.7599767868730256,\n 0.8134109401706875,\n -1.6766750539980289,\n -0.06051832829232975,\n -0.4652931327216134,\n -0.9249124398287735,\n 1.9022739762222731,\n 1.7632300613807597,\n 1.675335012283785,\n 0.47529854476887495,\n -0.7892463423254658,\n 0.3910120652706098,\n 0.5812432547936405,\n 0.2693084649672777,\n -0.08138564925779349,\n 0.9150619269526952,\n -0.8637356349272142,\n -0.14137853834901817,\n -0.20192754829896423,\n 0.04718228147088756,\n -0.9743600144318,\n -0.9936290943927825,\n 0.3544612180477054,\n 0.6839546770735121,\n 1.5089070357620178,\n 1.301167565172228,\n -1.5396145667672985,\n 0.42854366341485456,\n -1.5876582617301032,\n -0.0316985879141714,\n 0.3144220016570915,\n -0.05054766725644431,\n 0.2934139006870167,\n 0.11396170275994542,\n -0.6472140129693643,\n 1.6556030742445431,\n 1.0319410208453506,\n 0.3292217603989991,\n -0.058758121958605435,\n -0.19917171648476298,\n -0.5192866115874029,\n 0.1997510689920335,\n -1.3675686656161756,\n -1.7761517497832053,\n -0.11260276070167097,\n 0.9717892642758689,\n 0.0840815981843948,\n -0.40211265381258554,\n 0.27384496844034517,\n -1.0403875081272367,\n 1.2884781173493884,\n -1.8066239592554476,\n 1.1136979156298865,\n -0.06223155785690416,\n 1.3930381289015936,\n 0.4586305673655182,\n 1.3159249757827194,\n -0.5369892835955705,\n 0.17827408233621184,\n 0.22693934439969682,\n 0.8216240002114816,\n -1.0422409752281838,\n 0.3329686606709231,\n -1.5128804353968217,\n 1.0323052869815534,\n 1.1640486934424354,\n 1.6450118078345612,\n -0.6717687395070293,\n -0.08135119186406627,\n 1.2746921873544188,\n -0.8255794145095643,\n 0.7123504776564864,\n 0.6953336934741682,\n 2.191382322698439,\n 1.4155790749261592,\n 2.4681081786912866,\n -2.2904357033803815,\n -0.8375155191566624,\n 1.1040106662196736,\n 0.7084133268872015,\n -3.401968681942055,\n 0.23237090512844757,\n 1.1199436238058174,\n 0.6333916486592628,\n -0.6012340913121055,\n -0.3693951838866523,\n -1.7742670566875682,\n -0.36431378282545124,\n -0.4042586409194551,\n -0.04648644034604476,\n 1.5138191613743486,\n -0.2053670782251071,\n 1.8679122383251414,\n 0.8355881018692999,\n -0.5369705129279005,\n -0.7909355080370954,\n 2.1080036780007987,\n 0.019537331188020687,\n -1.4672982688640615,\n -1.486842866467901,\n -1.1036839537574874,\n 1.0800858540685894,\n -0.2313974176207594,\n 0.47763272078271807,\n -1.9196070490691473,\n -0.8193535127855751,\n -0.6853651905832031,\n -0.18272370464882973,\n -0.33413577684633056,\n 2.2261342671906106,\n 1.6853726343573683,\n 0.8563421109235769,\n 1.0468799885096596,\n 0.12189082561416206,\n -1.3596466927672854,\n -0.7607432068282968,\n 0.7061728288620306,\n -0.4384478018639071,\n 0.8620104661898899,\n 1.04258758121448,\n -1.1464159128515612,\n 0.9617945424413628,\n 0.04987102831355013,\n -0.8472878887606543,\n 0.32986774370339184,\n 1.278319839581162,\n -0.4040926804592034,\n -0.6691567800662129,\n 0.9415431940597389,\n 0.3974846022291844,\n -0.8425204662387112,\n -1.506166868030291,\n -0.04248497940038203,\n 0.26434168799067986,\n -1.5698380163561454,\n -0.6651727917714935,\n 1.2400220571204048,\n -0.1251830593977037,\n 0.6156254221302833,\n 0.43585628657139575,\n -1.6014619037611209,\n 1.9152323656075512,\n -0.8847911114213622,\n 1.359854519784993,\n -0.5554989575409871,\n 0.25064804193232354,\n 0.7976616257678464,\n 0.37834567410982123,\n -0.6300374359617635,\n -1.0613465068052854,\n -0.866474302027355,\n 1.2458556977164312,\n 0.577814049080149,\n 2.069400463823993,\n 0.9068690176961165,\n -0.5031387968484738,\n -0.3640749863516844,\n -1.041502465417534,\n 0.6732994659644133,\n -0.006355018868252906,\n -0.3650517541386253,\n 1.0975063446734974,\n -2.203726812834859,\n 1.060685913143899,\n -0.4618706570892267,\n 0.06475263817517128,\n -0.19326357638969882,\n -0.01812119454736379,\n 0.1337618009668529,\n 1.1838276997792907,\n 0.4273677345455913,\n -0.4912341608307858,\n 0.2349993979417651,\n 0.9566260826411601,\n -0.7948243131958422,\n -0.6168334352331588,\n 0.3369425926447926,\n 0.8547756445246633,\n 0.2666330662219728,\n 2.431868771129661,\n 1.0089732701876513,\n -0.1162341515974066,\n -1.1746306816795218,\n -0.08227639025627424,\n 0.794676385688044,\n 0.15005011094018297,\n -0.8763821573601055,\n -1.0811684990769739,\n 0.6311588092267179,\n 0.026124278982220386,\n 0.8306502001533514,\n 1.0856487813261877,\n -0.018702855899823106,\n -0.07338137135247896,\n -0.8435746484744243,\n -0.18091216366556986,\n 0.2295807891528797,\n -1.0689295774443397,\n -1.5621175533013612,\n 1.3314045672598216,\n 0.6211561903553582,\n 1.0479302317100871,\n -1.1509436982013124,\n 0.447985084931758,\n 0.19917261474342404,\n 0.3582887259341301,\n 0.9953552868908098,\n 0.8948165434511316,\n 0.4949033431999123,\n -0.23004847985703908,\n 0.6411581535557106,\n -1.1589671573242186,\n -0.13691519182560624,\n -0.8849560872785238,\n 0.6629182075027006,\n 2.2608150731789696,\n 2.2823614453180294,\n -1.2291376923498247,\n -0.9267975556981378,\n 0.2597417839242135,\n -0.7667310491821938,\n 0.10503294084132372,\n 2.960320355577672,\n -1.0645098483081497,\n -1.2888339889815872,\n -0.6564570556444346,\n 0.4742489396354781,\n 0.8879606773334898,\n -0.6477585196839569,\n -0.7309497810668936,\n 1.7025953934976548,\n 0.1789174966941155,\n -0.4839093362740933,\n -0.8917713440107442,\n 1.4521776747175792,\n -0.1676974219641624,\n -0.500672037099228,\n -0.2947747621553442,\n 0.929636971325952,\n -0.7614935150071248,\n 1.6886298813725842,\n -0.8136217834373227,\n 1.2030997228178093,\n 1.382267485738376,\n 2.594387458306705,\n -0.7703668776292266,\n -0.7642584795112598,\n 1.3356598324609947,\n -0.5745269784148925,\n -2.212092904499444,\n -1.727975556661197,\n -0.18543087256023608,\n -0.10167435635752538,\n 1.3480966068787303,\n 0.0142803272337873,\n -0.480077631815393,\n -0.32270216749876185,\n -1.7884435311074431,\n -0.5695640948971382,\n -0.22859087912027687,\n -0.08783386938029487,\n -0.18151955278624396,\n 0.2031493507095467,\n 0.06444304447669409,\n -0.4339138073294572,\n 0.236563959074551,\n -0.2937958719187449,\n 0.1611232843821199,\n -0.6574871644742827,\n 1.3141902865107886,\n 0.6093649138398077,\n 0.056674985715912514,\n -1.828714441504608,\n -0.46768482587669535,\n 0.6489735384886999,\n 0.5035677725398181,\n -0.887590772676158,\n -0.3222316759913631,\n -0.35172770495027483,\n -0.4329205472963193,\n -0.8449916868048998,\n 0.38282765028957993,\n 1.3171924061732359,\n 0.2956667124648384,\n 0.5390909497681301,\n -0.7591989862253667,\n -1.1520792974885883,\n -0.39344757869384944,\n 0.6192677330177175,\n -0.05578834574542242,\n 0.593015990282657,\n 0.9374465229256678,\n 0.647772562443425,\n 1.1071167572595217,\n -1.3015016617832518,\n 1.267300472456379,\n -0.5807673178649629,\n 0.9343468385348384,\n -0.28554893036513673,\n 0.4487573993840033,\n 0.6749018890520516,\n -1.20482985206765,\n 0.17291806504654686,\n -0.4124576407610529,\n -0.9203236505429044,\n -0.7461342369802754,\n -0.19694162321688435,\n 0.46556512963300906,\n 0.5198366004764268,\n -1.7222561645076129,\n -0.7078891617994071,\n -1.1653209054214695,\n 1.5560964971092122,\n 0.3335520152642012,\n 0.008390825910327906,\n 0.11336719644324977,\n 0.3158913817073965,\n 0.4704483453862008,\n -0.5700583482495889,\n -1.276634964816531,\n -1.7880560933777756,\n -0.26514994709973827,\n 0.6194447367446946,\n -0.654762456435761,\n 1.0621929196158544,\n 0.4454719444987052,\n -0.9323145612076791,\n 1.3197357985874438,\n -0.8792938558447049,\n -0.2470423905508279,\n 0.5128954444799875,\n -0.09202044992462606,\n -1.3082892596744382,\n -0.34428948138804927,\n 0.012422196356164879,\n 1.4626152292162142,\n 0.34678216997159833,\n 0.409462409138861,\n 0.32838364873801185,\n 1.8776849459782967,\n 1.6816627852133539,\n -0.24894138693568296,\n 0.7150105850753732,\n 0.22929306929129853,\n -0.21434910504054566,\n 1.3339497173912471,\n -1.2497042452057836,\n -0.04487255356399775,\n -0.6486304639082145,\n -0.8048044333264733,\n -1.8090170501469942,\n 1.481689285694336,\n -1.4772553200884717,\n -0.36792462539303805,\n -1.103508260812736,\n -0.2135236993720317,\n 0.40889179796540165,\n 1.993585196733386,\n 0.43879096427562897,\n -0.44512875171982147,\n -1.1780830020629518,\n -1.666001035275436,\n -0.2977294957665528,\n 1.7299614542270356,\n 0.9882265798853356,\n 2.2412430815464597,\n 0.5801434875813244,\n -0.739190619909163,\n -1.2663490594895201,\n 0.5735521649879137,\n 1.2105709455012765,\n 1.9112159951415644,\n -2.259218931706201,\n -0.563310876529377,\n -2.4119185903750493,\n 0.9662624485722368,\n -0.22788851242764951,\n 0.9198283887420099,\n 0.7855927065251492,\n -0.7459868094792474,\n 0.10543289218409971,\n 0.6401750224618271,\n -0.0077375118689326705,\n -0.11647036625911977,\n -0.4722391874001602,\n -0.2718425102733572,\n -0.8796746964457087,\n 0.6112903638894259,\n 0.5347851929096421,\n -0.4749419210717794,\n 1.0633720764557604,\n -0.2590556665572949,\n 2.590182301241823,\n 1.4524061372706638,\n -0.8503733047335056,\n 0.5609357391481067,\n -1.5661825434426477,\n 0.8019667474525984,\n 1.2716795425969496,\n 0.20011166646917924,\n -0.7105405282282679,\n -0.5593129072748189,\n -1.2401371010520867,\n -0.7002520937780202,\n -2.236596391787529,\n -1.8130090502823886,\n -0.23990633860801777,\n 1.7428780878151378,\n 1.4661206538178901,\n -0.8678567353744017,\n 0.2957423562639015,\n 0.13935419069962593,\n 1.399598845123674,\n 0.059729544605779575,\n -0.9607778026198247,\n 0.18474907798482051,\n 1.0117193651915666,\n -0.9173540069396245,\n 0.8934765521365161,\n -0.665655291396948,\n -0.32955768273493324,\n 0.3062873812209283,\n 0.177342106982554,\n 0.3595522704599547,\n -1.5964209653110262,\n 0.6705899137346863,\n -1.1034642863469553,\n -1.0029562484065524,\n 0.10622956543479244,\n 0.4261871936541378,\n 0.7777501694354336,\n -0.806235923997437,\n -0.8272801398172428,\n -1.2783440745845536,\n 0.5982979227669168,\n -0.28214494859284556,\n 1.101560367699546,\n -0.14008021262664466,\n -0.38717961692054237,\n 0.9962925044431369,\n -0.7391490127960976,\n -0.06294945881724459,\n 0.7283671247384875,\n -0.8458895297768138,\n 0.22808829204347086,\n 0.43685668023014523,\n 0.9204095286935638,\n -0.028241645704951284,\n 0.15951784765135396,\n 0.8068984900818966,\n -0.34387965576978663,\n 0.573828962760762,\n -0.13374515460012618,\n -0.5552788325377814,\n 0.5644705833909952,\n -0.7500532220469983,\n 0.33436674493862256,\n -0.8595435026628129,\n -0.38943898244735853,\n 0.6401502590131951,\n -1.2968645995363652,\n 0.5861622311675501,\n 0.2311759458689689,\n 0.10962292708600496,\n -0.26025023584932205,\n -0.5398478003611565,\n -1.0514168636922954,\n 1.2689172189127857,\n 1.7029909647408918,\n -0.02325431623491577,\n -0.3064675950620902,\n -1.5816446841009473,\n 0.6874254059433739,\n 0.7755967316475798,\n 1.4119333324396597,\n 0.14198739135512406,\n 0.2927714469848192,\n -0.7239793888399496,\n 0.3506448783535265,\n -0.7568480706640158,\n -1.2158508387501554,\n 0.22197589131086445,\n -0.5621415304506887,\n -1.2381112050191665,\n -1.917208333033256,\n -0.3321665793941188,\n -0.5916951886991071,\n -1.244826507645294,\n -0.29767661008214463,\n 0.8590635852032509,\n -1.8579290298421591,\n -1.0470546224962876,\n -2.540080936704841,\n 0.5458326769958273,\n 0.042222128206941614,\n 0.6080450228346708,\n 0.6542717901662132,\n -1.7292955132690793,\n -0.4793123354077725,\n 0.7341767020417185,\n -1.3322222208234826,\n -0.5076389542432337,\n 0.684399163420284,\n 0.3948487980667425,\n -1.7919279627150193,\n 1.582925890933478,\n 0.8341846456063038,\n 0.11776890377042544,\n 1.7471239793853526,\n 1.2269451783893597,\n 0.4235463733287474,\n 1.5908284320029056,\n -1.635191535538596,\n 0.04419903330064594,\n -1.264385360373252,\n 0.5370192519783876,\n 1.2368603501240771,\n -0.9241079150337286,\n -0.3428051342915208,\n 0.0882286441353256,\n -2.210824604513402,\n -1.9000343283757128,\n 0.4633735273417207,\n -0.32534396967175094,\n 0.026187836765356437,\n 0.18253601230609245,\n 0.8519745761039671,\n -0.028225375482784816,\n -0.5114197447067229,\n -1.2428743809444227,\n 0.2879711400745508,\n 1.2857130031108321,\n 0.5296743558975853,\n -0.8440551904275335,\n -1.3776032491368861,\n 1.8164028526343798,\n -1.1422045767986222,\n -1.8675179752970443,\n 0.6969635320800454,\n 0.9444010906414336,\n -1.28197913481747,\n -0.06259132322304235,\n -0.4518754825442558,\n 0.9183188639099813,\n -0.2916931407869574,\n -1.1464007469977915,\n -0.4475136941593681,\n 0.44385573868752803,\n 2.1606711638680762,\n -1.4813603018181851,\n -0.5647618024870872,\n -1.474746204557383,\n -2.9067748098220485,\n 0.06132111635940877,\n -0.09663310829361334,\n -1.087053744976143,\n -1.774855117659402,\n 0.8130120568830074,\n -0.5179279676199186,\n -0.32549430825787784,\n -1.1995838271705979,\n 0.8587480835176114,\n -0.02095126282663596,\n 0.6677898019388228,\n -1.1891003375304232,\n -2.1125937754631305,\n -0.047765192715672734,\n 0.09812525010300294,\n -1.034992359189106,\n 1.0213451864081846,\n 1.0788796513160641,\n -1.444469239557739,\n 0.28341828947950637,\n -2.4556013891966737,\n 1.7126080715698266,\n -0.5943068899412715,\n 1.0897594994215383,\n -0.16345461884651272,\n 0.7027032523865234,\n 2.2851158088542562,\n 0.5038100496225458,\n -0.16724173993999966,\n -0.6747457076421414,\n 0.42254684460738184,\n 1.277203836895222,\n -0.34438446183574595,\n 0.38956738377878264,\n -0.26884968654334923,\n -0.02148772950361766,\n 0.02044885235644607,\n -1.3873669828232345,\n 0.19995968746809226,\n -1.5826859815811556,\n -0.20385119370067947,\n 0.5724329589281247,\n -1.330307658319185,\n 0.7756101314358208,\n -0.4989071461473931,\n 0.5388161769427321,\n -0.9811085284266614,\n 2.335331094403556,\n -0.5588657325211347,\n -1.2850853695283377,\n 0.40092993245913744,\n -1.9675685522110529,\n 0.9378938542456674,\n -0.18645815013912917,\n -0.6828273180353106,\n -1.840122530632185,\n -1.2581798109361761,\n 0.2867275394896832,\n ],\n }\n return data\n\n\[email protected]\ndef numeric_high_card_dataset(test_backend, numeric_high_card_dict):\n schemas = {\n \"pandas\": {\n \"norm_0_1\": \"float64\",\n },\n \"postgresql\": {\n # \"norm_0_1\": \"DOUBLE_PRECISION\",\n \"norm_0_1\": \"NUMERIC\",\n },\n \"sqlite\": {\n \"norm_0_1\": \"FLOAT\",\n },\n \"mysql\": {\n \"norm_0_1\": \"DOUBLE\",\n },\n \"mssql\": {\n \"norm_0_1\": \"FLOAT\",\n },\n \"spark\": {\n \"norm_0_1\": \"FloatType\",\n },\n }\n return get_dataset(test_backend, numeric_high_card_dict, schemas=schemas)\n\n\[email protected]\ndef datetime_dataset(test_backend):\n data = {\n \"datetime\": [\n str(datetime.datetime(2020, 2, 4, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 5, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 6, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 7, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 8, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 9, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 10, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 11, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 12, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 13, 22, 12, 5, 943152)),\n ]\n }\n\n schemas = {\n \"pandas\": {\n \"datetime\": \"datetime64\",\n },\n \"postgresql\": {\n \"datetime\": \"TIMESTAMP\",\n },\n \"sqlite\": {\n \"datetime\": \"TIMESTAMP\",\n },\n \"mysql\": {\n \"datetime\": \"TIMESTAMP\",\n },\n \"mssql\": {\n \"datetime\": \"DATETIME\",\n },\n \"spark\": {\n \"datetime\": \"TimestampType\",\n },\n }\n return get_dataset(test_backend, data, schemas=schemas)\n\n\[email protected]\ndef non_numeric_low_card_dataset(test_backend):\n \"\"\"Provide dataset fixtures that have special values and/or are otherwise useful outside\n the standard json testing framework\"\"\"\n\n data = {\n \"lowcardnonnum\": [\n \"a\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n \"b\",\n ]\n }\n schemas = {\n \"pandas\": {\n \"lowcardnonnum\": \"str\",\n },\n \"postgresql\": {\n \"lowcardnonnum\": \"TEXT\",\n },\n \"sqlite\": {\n \"lowcardnonnum\": \"VARCHAR\",\n },\n \"mysql\": {\n \"lowcardnonnum\": \"TEXT\",\n },\n \"mssql\": {\n \"lowcardnonnum\": \"VARCHAR\",\n },\n \"spark\": {\n \"lowcardnonnum\": \"StringType\",\n },\n }\n return get_dataset(test_backend, data, schemas=schemas)\n\n\[email protected]\ndef non_numeric_high_card_dataset(test_backend):\n \"\"\"Provide dataset fixtures that have special values and/or are otherwise useful outside\n the standard json testing framework\"\"\"\n\n data = {\n \"highcardnonnum\": [\n \"CZVYSnQhHhoti8mQ66XbDuIjE5FMeIHb\",\n \"cPWAg2MJjh8fkRRU1B9aD8vWq3P8KTxJ\",\n \"4tehKwWiCDpuOmTPRYYqTqM7TvEa8Zi7\",\n \"ZvlAnCGiGfkKgQoNrhnnyrjmU7sLsUZz\",\n \"AaqMhdYukVdexTk6LlWvzXYXTp5upPuf\",\n \"ZSKmXUB35K14khHGyjYtuCHuI8yeM7yR\",\n \"F1cwKp4HsCN2s2kXQGR5RUa3WAcibCq2\",\n \"coaX8bSHoVZ8FP8SuQ57SFbrvpRHcibq\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>cKymU5Bvnh0MK5R\",\n \"<KEY>hP8y24HzDQOdt9oysgFyx\",\n \"TePy034aBKlNeAmcJmKJ4p1yF7EUYEOg\",\n \"cIfDv6ieTAobe84P84InzDKrJrccmqbq\",\n \"m1979gfI6lVF9ijJA245bchYFd1EaMap\",\n \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\",\n \"7wcR161jyKYhFLEZkhFqSXLwXW46I5x8\",\n \"IpmNsUFgbbVnL0ljJZOBHnTV0FKARwSn\",\n \"hsA4btHJg6Gq1jwOuOc3pl2UPB5QUwZg\",\n \"vwZyG0jGUys3HQdUiOocIbzhUdUugwKX\",\n \"rTc9h94WjOXN5Wg40DyatFEFfp9mgWj6\",\n \"p1f20s14ZJGUTIBUNeBmJEkWKlwoyqjA\",\n \"VzgAIYNKHA0APN0oZtzMAfmbCzJenswy\",\n \"IO7BqR3iS136YMlLCEo6W3jKNOVJIlLG\",\n \"eTEyhiRuyEcTnHThi1W6yi1mxUjq8TEp\",\n \"4OHPKQgk3sPPYpKWcEWUtNZ0jv00UuPU\",\n \"ZJCstyyUvTR2gwSM6FLgkXYDwG54qo8u\",\n \"nGQsvDAzuL5Yc2XpqoG5P7RhpiTpJp8H\",\n \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\",\n \"CP22IFHDX1maoSjTEdtBfrMHWQKACGDB\",\n \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\",\n \"hGwZQW7ao9HqNV2xAovuMBdyafNDE8q6\",\n \"OJmDHbqP1wzarsaSwCphsqvdy5SnTQMT\",\n \"JQbXIcgwUhttfPIGB7VGGfL2KiElabrO\",\n \"eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57\",\n \"GW2JuUJmuCebia7RUiCNl2BTjukIzZWj\",\n \"oVFAvQEKmRTLBqdCuPoJNvzPvQ7UArWC\",\n \"zeMHFFKLr5j4DIFxRQ7jHWCMClrP3LmJ\",\n \"eECcArV5TZRftL6ZWaUDO6D2l3HiZj1Y\",\n \"xLNJXaCkOLrD6E0kgGaFOFwctNXjrd77\",\n \"1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01\",\n \"uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG\",\n \"agIk8H2nFa0K27IFr0VM2RNp6saihYI3\",\n \"cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N\",\n \"fM1IzD5USx4lMYi6bqPCEZjd2aP7G9vv\",\n \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\",\n \"i65d8jqET5FsVw9t5BwAvBjkEJI6eUMj\",\n \"HbT1b7DQL7n7ZEt2FsKHIggycT1XIYd8\",\n \"938eC0iGMSqZNlqgDNG9YVE7t4izO2Ev\",\n \"PyZetp4izgE4ymPcUXyImF5mm7I6zbta\",\n \"FaXA6YSUrvSnW7quAimLqQMNrU1Dxyjs\",\n \"PisVMvI9RsqQw21B7qYcKkRo5c8C2AKd\",\n \"eSQIxFqyYVf55UMzMEZrotPO74i3Sh03\",\n \"2b74DhJ6YFHrAkrjK4tvvKkYUKll44bR\",\n \"3svDRnrELyAsC69Phpnl2Os89856tFBJ\",\n \"ZcSGN9YYNHnHjUp0SktWoZI7JDmvRTTN\",\n \"m9eDkZ5oZEOFP3HUfaZEirecv2UhQ1B1\",\n \"wZTwJmMX5Q58DhDdmScdigTSyUUC04sO\",\n \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\",\n \"KAuFgcmRKQPIIqGMAQQPfjyC1VXt40vs\",\n \"0S4iueoqKNjvS55O57BdY3DbfwhIDwKc\",\n \"ywbQfOLkvXEUzZISZp1cpwCenrrNPjfF\",\n \"Mayxk8JkV3Z6aROtnsKyqwVK5exiJa8i\",\n \"pXqIRP5fQzbDtj1xFqgJey6dyFOJ1YiU\",\n \"6Ba6RSM56x4MIaJ2wChQ3trBVOw1SWGM\",\n \"puqzOpRJyNVAwH2vLjVCL3uuggxO5aoB\",\n \"jOI4E43wA3lYBWbV0nMxqix885Tye1Pf\",\n \"YgTTYpRDrxU1dMKZeVHYzYNovH2mWGB7\",\n \"24yYfUg1ATvfI1PW79ytsEqHWJHI69wQ\",\n \"mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D\",\n \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\",\n \"ItvI4l02oAIZEd5cPtDf4OnyBazji0PL\",\n \"DW4oLNP49MNNENFoFf7jDTI04xdvCiWg\",\n \"vrOZrkAS9MCGOqzhCv4cmr5AGddVBShU\",\n \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"R74JT4EEhh3Xeu5tbx8bZFkXZRhx6HUn\",\n \"bd9yxS6b1QrKXuT4irY4kpjSyLmKZmx6\",\n \"UMdFQNSiJZtLK3jxBETZrINDKcRqRd0c\",\n \"He7xIY2BMNZ7vSO47KfKoYskVJeeedI7\",\n \"G8PqO0ADoKfDPsMT1K0uOrYf1AtwlTSR\",\n \"hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF\",\n \"mlYdlfei13P6JrT7ZbSZdsudhE24aPYr\",\n \"gUTUoH9LycaItbwLZkK9qf0xbRDgOMN4\",\n \"xw3AuIPyHYq59Qbo5QkQnECSqd2UCvLo\",\n \"kbfzRyRqGZ9WvmTdYKDjyds6EK4fYCyx\",\n \"7AOZ3o2egl6aU1zOrS8CVwXYZMI8NTPg\",\n \"Wkh43H7t95kRb9oOMjTSqC7163SrI4rU\",\n \"x586wCHsLsOaXl3F9cYeaROwdFc2pbU1\",\n \"oOd7GdoPn4qqfAeFj2Z3ddyFdmkuPznh\",\n \"suns0vGgaMzasYpwDEEof2Ktovy0o4os\",\n \"of6W1csCTCBMBXli4a6cEmGZ9EFIOFRC\",\n \"mmTiWVje9SotwPgmRxrGrNeI9DssAaCj\",\n \"pIX0vhOzql5c6Z6NpLbzc8MvYiONyT54\",\n \"nvyCo3MkIK4tS6rkuL4Yw1RgGKwhm4c2\",\n \"prQGAOvQbB8fQIrp8xaLXmGwcxDcCnqt\",\n \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\",\n \"mty9rQJBeTsBQ7ra8vWRbBaWulzhWRSG\",\n \"JL38Vw7yERPC4gBplBaixlbpDg8V7gC6\",\n \"MylTvGl5L1tzosEcgGCQPjIRN6bCUwtI\",\n \"hmr0LNyYObqe5sURs408IhRb50Lnek5K\",\n \"CZVYSnQhHhoti8mQ66XbDuIjE5FMeIHb\",\n \"cPWAg2MJjh8fkRRU1B9aD8vWq3P8KTxJ\",\n \"4tehKwWiCDpuOmTPRYYqTqM7TvEa8Zi7\",\n \"ZvlAnCGiGfkKgQoNrhnnyrjmU7sLsUZz\",\n \"AaqMhdYukVdexTk6LlWvzXYXTp5upPuf\",\n \"ZSKmXUB35K14khHGyjYtuCHuI8yeM7yR\",\n \"F1cwKp4HsCN2s2kXQGR5RUa3WAcibCq2\",\n \"coaX8bSHoVZ8FP8SuQ57SFbrvpRHcibq\",\n \"3IzmbSJF525qtn7O4AvfKONnz7eFgnyU\",\n \"gLCtw7435gaR532PNFVCtvk14lNJpZXv\",\n \"hNyjMYZkVlOKRjhg8cKymU5Bvnh0MK5R\",\n \"IqKC2auGTNehP8y24HzDQOdt9oysgFyx\",\n \"TePy034aBKlNeAmcJmKJ4p1yF7EUYEOg\",\n \"cIfDv6ieTAobe84P84InzDKrJrccmqbq\",\n \"m1979gfI6lVF9ijJA245bchYFd1EaMap\",\n \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\",\n \"7wcR161jyKYhFLEZkhFqSXLwXW46I5x8\",\n \"IpmNsUFgbbVnL0ljJZOBHnTV0FKARwSn\",\n \"hsA4btHJg6Gq1jwOuOc3pl2UPB5QUwZg\",\n \"vwZyG0jGUys3HQdUiOocIbzhUdUugwKX\",\n \"rTc9h94WjOXN5Wg40DyatFEFfp9mgWj6\",\n \"p1f20s14ZJGUTIBUNeBmJEkWKlwoyqjA\",\n \"VzgAIYNKHA0APN0oZtzMAfmbCzJenswy\",\n \"IO7BqR3iS136YMlLCEo6W3jKNOVJIlLG\",\n \"eTEyhiRuyEcTnHThi1W6yi1mxUjq8TEp\",\n \"4OHPKQgk3sPPYpKWcEWUtNZ0jv00UuPU\",\n \"ZJCstyyUvTR2gwSM6FLgkXYDwG54qo8u\",\n \"nGQsvDAzuL5Yc2XpqoG5P7RhpiTpJp8H\",\n \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\",\n \"CP22IFHDX1maoSjTEdtBfrMHWQKACGDB\",\n \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\",\n \"hGwZQW7ao9HqNV2xAovuMBdyafNDE8q6\",\n \"OJmDHbqP1wzarsaSwCphsqvdy5SnTQMT\",\n \"<KEY>\",\n \"eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57\",\n \"GW2JuUJmuCebia7RUiCNl2BTjukIzZWj\",\n \"oVFAvQEKmRTLBqdCuPoJNvzPvQ7UArWC\",\n \"zeMHFFKLr5j4DIFxRQ7jHWCMClrP3LmJ\",\n \"eECcArV5TZRftL6ZWaUDO6D2l3HiZj1Y\",\n \"xLNJXaCkOLrD6E0kgGaFOFwctNXjrd77\",\n \"1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01\",\n \"uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG\",\n \"agIk8H2nFa0K27IFr0VM2RNp6saihYI3\",\n \"cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N\",\n \"fM1IzD5USx4lMYi6bqPCEZjd2aP7G9vv\",\n \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\",\n \"i65d8jqET5FsVw9t5BwAvBjkEJI6eUMj\",\n \"HbT1b7DQL7n7ZEt2FsKHIggycT1XIYd8\",\n \"938eC0iGMSqZNlqgDNG9YVE7t4izO2Ev\",\n \"PyZetp4izgE4ymPcUXyImF5mm7I6zbta\",\n \"FaXA6YSUrvSnW7quAimLqQMNrU1Dxyjs\",\n \"PisVMvI9RsqQw21B7qYcKkRo5c8C2AKd\",\n \"<KEY>5UMzMEZrotPO74i3Sh03\",\n \"<KEY>\",\n \"3svDRnrELyAsC69Phpnl2Os89856tFBJ\",\n \"ZcSGN9YYNHnHjUp0SktWoZI7JDmvRTTN\",\n \"m9eDkZ5oZEOFP3HUfaZEirecv2UhQ1B1\",\n \"wZTwJmMX5Q58DhDdmScdigTSyUUC04sO\",\n \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\",\n \"KAuFgcmRKQPIIqGMAQQPfjyC1VXt40vs\",\n \"0S4iueoqKNjvS55O57BdY3DbfwhIDwKc\",\n \"ywbQfOLkvXEUzZISZp1cpwCenrrNPjfF\",\n \"Mayxk8JkV3Z6aROtnsKyqwVK5exiJa8i\",\n \"pXqIRP5fQzbDtj1xFqgJey6dyFOJ1YiU\",\n \"6Ba6RSM56x4MIaJ2wChQ3trBVOw1SWGM\",\n \"puqzOpRJyNVAwH2vLjVCL3uuggxO5aoB\",\n \"jOI4E43wA3lYBWbV0nMxqix885Tye1Pf\",\n \"YgTTYpRDrxU1dMKZeVHYzYNovH2mWGB7\",\n \"24yYfUg1ATvfI1PW79ytsEqHWJHI69wQ\",\n \"mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D\",\n \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\",\n \"ItvI4l02oAIZEd5cPtDf4OnyBazji0PL\",\n \"DW4oLNP49MNNENFoFf7jDTI04xdvCiWg\",\n \"vrOZrkAS9MCGOqzhCv4cmr5AGddVBShU\",\n \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"R74JT4EEhh3Xeu5tbx8bZFkXZRhx6HUn\",\n \"bd9yxS6b1QrKXuT4irY4kpjSyLmKZmx6\",\n \"UMdFQNSiJZtLK3jxBETZrINDKcRqRd0c\",\n \"He7xIY2BMNZ7vSO47KfKoYskVJeeedI7\",\n \"G8PqO0ADoKfDPsMT1K0uOrYf1AtwlTSR\",\n \"hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF\",\n \"mlYdlfei13P6JrT7ZbSZdsudhE24aPYr\",\n \"gUTUoH9LycaItbwLZkK9qf0xbRDgOMN4\",\n \"xw3AuIPyHYq59Qbo5QkQnECSqd2UCvLo\",\n \"kbfzRyRqGZ9WvmTdYKDjyds6EK4fYCyx\",\n \"7AOZ3o2egl6aU1zOrS8CVwXYZMI8NTPg\",\n \"Wkh43H7t95kRb9oOMjTSqC7163SrI4rU\",\n \"x586wCHsLsOaXl3F9cYeaROwdFc2pbU1\",\n \"oOd7GdoPn4qqfAeFj2Z3ddyFdmkuPznh\",\n \"suns0vGgaMzasYpwDEEof2Ktovy0o4os\",\n \"of6W1csCTCBMBXli4a6cEmGZ9EFIOFRC\",\n \"mmTiWVje9SotwPgmRxrGrNeI9DssAaCj\",\n \"pIX0vhOzql5c6Z6NpLbzc8MvYiONyT54\",\n \"nvyCo3MkIK4tS6rkuL4Yw1RgGKwhm4c2\",\n \"prQGAOvQbB8fQIrp8xaLXmGwcxDcCnqt\",\n \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\",\n \"mty9rQJBeTsBQ7ra8vWRbBaWulzhWRSG\",\n \"JL38Vw7yERPC4gBplBaixlbpDg8V7gC6\",\n \"MylTvGl5L1tzosEcgGCQPjIRN6bCUwtI\",\n \"hmr0LNyYObqe5sURs408IhRb50Lnek5K\",\n ],\n # Built from highcardnonnum using the following:\n # vals = pd.Series(data[\"highcardnonnum\"])\n # sample_vals = vals.sample(n=10, random_state=42)\n # weights = np.random.RandomState(42).rand(10)\n # weights = weights / np.sum(weights)\n # new_vals = sample_vals.sample(n=200, weights=weights, replace=True, random_state=11)\n \"medcardnonnum\": [\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\",\n \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\",\n \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\",\n \"<KEY>\",\n \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>AUrdfM0g0RB2X4D\",\n \"<KEY>\",\n \"<KEY>\",\n \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"<KEY>\",\n \"<KEY>J\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\",\n \"<KEY>\",\n \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\",\n \"<KEY>J\",\n \"<KEY>J\",\n \"<KEY>JWN4vvgcFa6MWv8cTeVk\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>gP\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"<KEY>\",\n \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\",\n \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\",\n ],\n }\n schemas = {\n \"pandas\": {\n \"highcardnonnum\": \"str\",\n \"medcardnonnum\": \"str\",\n },\n \"postgresql\": {\n \"highcardnonnum\": \"TEXT\",\n \"medcardnonnum\": \"TEXT\",\n },\n \"sqlite\": {\n \"highcardnonnum\": \"VARCHAR\",\n \"medcardnonnum\": \"VARCHAR\",\n },\n \"mysql\": {\n \"highcardnonnum\": \"TEXT\",\n \"medcardnonnum\": \"TEXT\",\n },\n \"mssql\": {\n \"highcardnonnum\": \"VARCHAR\",\n \"medcardnonnum\": \"VARCHAR\",\n },\n \"spark\": {\n \"highcardnonnum\": \"StringType\",\n \"medcardnonnum\": \"StringType\",\n },\n }\n return get_dataset(test_backend, data, schemas=schemas)\n\n\[email protected]\ndef periodic_table_of_elements():\n data = [\n \"Hydrogen\",\n \"Helium\",\n \"Lithium\",\n \"Beryllium\",\n \"Boron\",\n \"Carbon\",\n \"Nitrogen\",\n \"Oxygen\",\n \"Fluorine\",\n \"Neon\",\n \"Sodium\",\n \"Magnesium\",\n \"Aluminum\",\n \"Silicon\",\n \"Phosphorus\",\n \"Sulfur\",\n \"Chlorine\",\n \"Argon\",\n \"Potassium\",\n \"Calcium\",\n \"Scandium\",\n \"Titanium\",\n \"Vanadium\",\n \"Chromium\",\n \"Manganese\",\n \"Iron\",\n \"Cobalt\",\n \"Nickel\",\n \"Copper\",\n \"Zinc\",\n \"Gallium\",\n \"Germanium\",\n \"Arsenic\",\n \"Selenium\",\n \"Bromine\",\n \"Krypton\",\n \"Rubidium\",\n \"Strontium\",\n \"Yttrium\",\n \"Zirconium\",\n \"Niobium\",\n \"Molybdenum\",\n \"Technetium\",\n \"Ruthenium\",\n \"Rhodium\",\n \"Palladium\",\n \"Silver\",\n \"Cadmium\",\n \"Indium\",\n \"Tin\",\n \"Antimony\",\n \"Tellurium\",\n \"Iodine\",\n \"Xenon\",\n \"Cesium\",\n \"Barium\",\n \"Lanthanum\",\n \"Cerium\",\n \"Praseodymium\",\n \"Neodymium\",\n \"Promethium\",\n \"Samarium\",\n \"Europium\",\n \"Gadolinium\",\n \"Terbium\",\n \"Dysprosium\",\n \"Holmium\",\n \"Erbium\",\n \"Thulium\",\n \"Ytterbium\",\n \"Lutetium\",\n \"Hafnium\",\n \"Tantalum\",\n \"Tungsten\",\n \"Rhenium\",\n \"Osmium\",\n \"Iridium\",\n \"Platinum\",\n \"Gold\",\n \"Mercury\",\n \"Thallium\",\n \"Lead\",\n \"Bismuth\",\n \"Polonium\",\n \"Astatine\",\n \"Radon\",\n \"Francium\",\n \"Radium\",\n \"Actinium\",\n \"Thorium\",\n \"Protactinium\",\n \"Uranium\",\n \"Neptunium\",\n \"Plutonium\",\n \"Americium\",\n \"Curium\",\n \"Berkelium\",\n \"Californium\",\n \"Einsteinium\",\n \"Fermium\",\n \"Mendelevium\",\n \"Nobelium\",\n \"Lawrencium\",\n \"Rutherfordium\",\n \"Dubnium\",\n \"Seaborgium\",\n \"Bohrium\",\n \"Hassium\",\n \"Meitnerium\",\n \"Darmstadtium\",\n \"Roentgenium\",\n \"Copernicium\",\n \"Nihomium\",\n \"Flerovium\",\n \"Moscovium\",\n \"Livermorium\",\n \"Tennessine\",\n \"Oganesson\",\n ]\n return data\n\n\ndef dataset_sample_data(test_backend):\n # No infinities for mysql\n if test_backend == \"mysql\":\n data = {\n # \"infinities\": [-np.inf, -10, -np.pi, 0, np.pi, 10/2.2, np.inf],\n \"nulls\": [np.nan, None, 0, 1.1, 2.2, 3.3, None],\n \"naturals\": [1, 2, 3, 4, 5, 6, 7],\n }\n else:\n data = {\n \"infinities\": [-np.inf, -10, -np.pi, 0, np.pi, 10 / 2.2, np.inf],\n \"nulls\": [np.nan, None, 0, 1.1, 2.2, 3.3, None],\n \"naturals\": [1, 2, 3, 4, 5, 6, 7],\n }\n schemas = {\n \"pandas\": {\"infinities\": \"float64\", \"nulls\": \"float64\", \"naturals\": \"float64\"},\n \"postgresql\": {\n \"infinities\": \"DOUBLE_PRECISION\",\n \"nulls\": \"DOUBLE_PRECISION\",\n \"naturals\": \"NUMERIC\",\n },\n \"sqlite\": {\"infinities\": \"FLOAT\", \"nulls\": \"FLOAT\", \"naturals\": \"FLOAT\"},\n \"mysql\": {\"nulls\": \"DOUBLE\", \"naturals\": \"DOUBLE\"},\n \"mssql\": {\"infinities\": \"FLOAT\", \"nulls\": \"FLOAT\", \"naturals\": \"FLOAT\"},\n \"spark\": {\n \"infinities\": \"FloatType\",\n \"nulls\": \"FloatType\",\n \"naturals\": \"FloatType\",\n },\n }\n return data, schemas\n\n\[email protected]\ndef dataset(test_backend):\n \"\"\"Provide dataset fixtures that have special values and/or are otherwise useful outside\n the standard json testing framework\"\"\"\n data, schemas = dataset_sample_data(test_backend)\n return get_dataset(test_backend, data, schemas=schemas)\n\n\[email protected]\ndef pandas_dataset():\n test_backend = \"PandasDataset\"\n data, schemas = dataset_sample_data(test_backend)\n return get_dataset(test_backend, data, schemas=schemas)\n\n\[email protected]\ndef sqlalchemy_dataset(test_backends):\n \"\"\"Provide dataset fixtures that have special values and/or are otherwise useful outside\n the standard json testing framework\"\"\"\n if \"postgresql\" in test_backends:\n backend = \"postgresql\"\n elif \"sqlite\" in test_backends:\n backend = \"sqlite\"\n else:\n return\n\n data = {\n \"infinities\": [-np.inf, -10, -np.pi, 0, np.pi, 10 / 2.2, np.inf],\n \"nulls\": [np.nan, None, 0, 1.1, 2.2, 3.3, None],\n \"naturals\": [1, 2, 3, 4, 5, 6, 7],\n }\n schemas = {\n \"postgresql\": {\n \"infinities\": \"DOUBLE_PRECISION\",\n \"nulls\": \"DOUBLE_PRECISION\",\n \"naturals\": \"DOUBLE_PRECISION\",\n },\n \"sqlite\": {\"infinities\": \"FLOAT\", \"nulls\": \"FLOAT\", \"naturals\": \"FLOAT\"},\n }\n return get_dataset(backend, data, schemas=schemas, profiler=None)\n\n\[email protected]\ndef sqlitedb_engine(test_backend):\n if test_backend == \"sqlite\":\n import sqlalchemy as sa\n\n try:\n import sqlalchemy as sa\n\n return sa.create_engine(\"sqlite://\")\n except ImportError:\n raise ValueError(\"sqlite tests require sqlalchemy to be installed\")\n else:\n pytest.skip(\"Skipping test designed for sqlite on non-sqlite backend.\")\n\n\[email protected]\ndef postgresql_engine(test_backend):\n if test_backend == \"postgresql\":\n try:\n import sqlalchemy as sa\n\n engine = sa.create_engine(\n \"postgresql://postgres@localhost/test_ci\"\n ).connect()\n yield engine\n engine.close()\n except ImportError:\n raise ValueError(\"SQL Database tests require sqlalchemy to be installed.\")\n else:\n pytest.skip(\"Skipping test designed for postgresql on non-postgresql backend.\")\n\n\[email protected]\ndef empty_data_context(tmp_path_factory) -> DataContext:\n project_path = str(tmp_path_factory.mktemp(\"empty_data_context\"))\n context = ge.data_context.DataContext.create(project_path)\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n os.makedirs(asset_config_path, exist_ok=True)\n return context\n\n\[email protected]\ndef titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store(\n tmp_path_factory,\n):\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n data_path = os.path.join(context_path, \"../data/titanic\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n shutil.copy(\n file_relative_path(\n __file__, \"./test_fixtures/great_expectations_v013_no_datasource.yml\"\n ),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(context_path, \"../data/titanic/Titanic_19120414_1313.csv\")),\n )\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(context_path, \"../data/titanic/Titanic_1911.csv\")),\n )\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(context_path, \"../data/titanic/Titanic_1912.csv\")),\n )\n context = ge.data_context.DataContext(context_path)\n\n datasource_config = f\"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n my_basic_data_connector:\n class_name: InferredAssetFilesystemDataConnector\n base_directory: {data_path}\n default_regex:\n pattern: (.*)\\\\.csv\n group_names:\n - data_asset_name\n \n my_special_data_connector:\n class_name: ConfiguredAssetFilesystemDataConnector\n base_directory: {data_path}\n glob_directive: \"*.csv\"\n\n default_regex:\n pattern: (.+)\\\\.csv\n group_names:\n - name\n assets:\n users:\n base_directory: {data_path}\n pattern: (.+)_(\\\\d+)_(\\\\d+)\\\\.csv\n group_names:\n - name\n - timestamp\n - size\n \n my_other_data_connector:\n class_name: ConfiguredAssetFilesystemDataConnector\n base_directory: {data_path}\n glob_directive: \"*.csv\"\n\n default_regex:\n pattern: (.+)\\\\.csv\n group_names:\n - name\n assets:\n users: {{}}\n \n my_runtime_data_connector:\n module_name: great_expectations.datasource.data_connector\n class_name: RuntimeDataConnector\n runtime_keys:\n - pipeline_stage_name\n - airflow_run_id\n \"\"\"\n\n context.test_yaml_config(name=\"my_datasource\", yaml_config=datasource_config)\n return context\n\n\[email protected]\ndef titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_templates(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store,\n):\n context = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store\n\n # add simple template config\n simple_checkpoint_template_config = CheckpointConfig(\n name=\"my_simple_template_checkpoint\",\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template-$VAR\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n evaluation_parameters={\n \"environment\": \"$GE_ENVIRONMENT\",\n \"tolerance\": 1.0e-2,\n \"aux_param_0\": \"$MY_PARAM\",\n \"aux_param_1\": \"1 + $MY_PARAM\",\n },\n runtime_configuration={\n \"result_format\": {\n \"result_format\": \"BASIC\",\n \"partial_unexpected_count\": 20,\n }\n },\n )\n simple_checkpoint_template_config_key = ConfigurationIdentifier(\n configuration_key=simple_checkpoint_template_config.name\n )\n context.checkpoint_store.set(\n key=simple_checkpoint_template_config_key,\n value=simple_checkpoint_template_config,\n )\n\n # add nested template configs\n nested_checkpoint_template_config_1 = CheckpointConfig(\n name=\"my_nested_checkpoint_template_1\",\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template-$VAR\",\n expectation_suite_name=\"suite_from_template_1\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n evaluation_parameters={\n \"environment\": \"FOO\",\n \"tolerance\": \"FOOBOO\",\n \"aux_param_0\": \"FOOBARBOO\",\n \"aux_param_1\": \"FOOBARBOO\",\n \"template_1_key\": 456,\n },\n runtime_configuration={\n \"result_format\": \"FOOBARBOO\",\n \"partial_unexpected_count\": \"FOOBARBOO\",\n \"template_1_key\": 123,\n },\n validations=[\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource_template_1\",\n \"data_connector_name\": \"my_special_data_connector_template_1\",\n \"data_asset_name\": \"users_from_template_1\",\n \"partition_request\": {\"partition_index\": -999},\n }\n }\n ],\n )\n nested_checkpoint_template_config_1_key = ConfigurationIdentifier(\n configuration_key=nested_checkpoint_template_config_1.name\n )\n context.checkpoint_store.set(\n key=nested_checkpoint_template_config_1_key,\n value=nested_checkpoint_template_config_1,\n )\n\n nested_checkpoint_template_config_2 = CheckpointConfig(\n name=\"my_nested_checkpoint_template_2\",\n config_version=1,\n template_name=\"my_nested_checkpoint_template_1\",\n run_name_template=\"%Y-%M-foo-bar-template-$VAR-template-2\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"MyCustomStoreEvaluationParametersActionTemplate2\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n {\n \"name\": \"new_action_from_template_2\",\n \"action\": {\"class_name\": \"Template2SpecialAction\"},\n },\n ],\n evaluation_parameters={\n \"environment\": \"$GE_ENVIRONMENT\",\n \"tolerance\": 1.0e-2,\n \"aux_param_0\": \"$MY_PARAM\",\n \"aux_param_1\": \"1 + $MY_PARAM\",\n },\n runtime_configuration={\n \"result_format\": \"BASIC\",\n \"partial_unexpected_count\": 20,\n },\n )\n nested_checkpoint_template_config_2_key = ConfigurationIdentifier(\n configuration_key=nested_checkpoint_template_config_2.name\n )\n context.checkpoint_store.set(\n key=nested_checkpoint_template_config_2_key,\n value=nested_checkpoint_template_config_2,\n )\n\n nested_checkpoint_template_config_3 = CheckpointConfig(\n name=\"my_nested_checkpoint_template_3\",\n config_version=1,\n template_name=\"my_nested_checkpoint_template_2\",\n run_name_template=\"%Y-%M-foo-bar-template-$VAR-template-3\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"MyCustomStoreEvaluationParametersActionTemplate3\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n {\n \"name\": \"new_action_from_template_3\",\n \"action\": {\"class_name\": \"Template3SpecialAction\"},\n },\n ],\n evaluation_parameters={\n \"environment\": \"$GE_ENVIRONMENT\",\n \"tolerance\": 1.0e-2,\n \"aux_param_0\": \"$MY_PARAM\",\n \"aux_param_1\": \"1 + $MY_PARAM\",\n \"template_3_key\": 123,\n },\n runtime_configuration={\n \"result_format\": \"BASIC\",\n \"partial_unexpected_count\": 20,\n \"template_3_key\": \"bloopy!\",\n },\n )\n nested_checkpoint_template_config_3_key = ConfigurationIdentifier(\n configuration_key=nested_checkpoint_template_config_3.name\n )\n context.checkpoint_store.set(\n key=nested_checkpoint_template_config_3_key,\n value=nested_checkpoint_template_config_3,\n )\n\n # add minimal SimpleCheckpoint\n simple_checkpoint_config = CheckpointConfig(\n name=\"my_minimal_simple_checkpoint\",\n class_name=\"SimpleCheckpoint\",\n config_version=1,\n )\n simple_checkpoint_config_key = ConfigurationIdentifier(\n configuration_key=simple_checkpoint_config.name\n )\n context.checkpoint_store.set(\n key=simple_checkpoint_config_key,\n value=simple_checkpoint_config,\n )\n\n # add SimpleCheckpoint with slack webhook\n simple_checkpoint_with_slack_webhook_config = CheckpointConfig(\n name=\"my_simple_checkpoint_with_slack\",\n class_name=\"SimpleCheckpoint\",\n config_version=1,\n slack_webhook=\"https://hooks.slack.com/foo/bar\",\n )\n simple_checkpoint_with_slack_webhook_config_key = ConfigurationIdentifier(\n configuration_key=simple_checkpoint_with_slack_webhook_config.name\n )\n context.checkpoint_store.set(\n key=simple_checkpoint_with_slack_webhook_config_key,\n value=simple_checkpoint_with_slack_webhook_config,\n )\n\n # add SimpleCheckpoint with slack webhook and notify_with\n simple_checkpoint_with_slack_webhook_and_notify_with_all_config = CheckpointConfig(\n name=\"my_simple_checkpoint_with_slack_and_notify_with_all\",\n class_name=\"SimpleCheckpoint\",\n config_version=1,\n slack_webhook=\"https://hooks.slack.com/foo/bar\",\n notify_with=\"all\",\n )\n simple_checkpoint_with_slack_webhook_and_notify_with_all_config_key = ConfigurationIdentifier(\n configuration_key=simple_checkpoint_with_slack_webhook_and_notify_with_all_config.name\n )\n context.checkpoint_store.set(\n key=simple_checkpoint_with_slack_webhook_and_notify_with_all_config_key,\n value=simple_checkpoint_with_slack_webhook_and_notify_with_all_config,\n )\n\n # add SimpleCheckpoint with site_names\n simple_checkpoint_with_site_names_config = CheckpointConfig(\n name=\"my_simple_checkpoint_with_site_names\",\n class_name=\"SimpleCheckpoint\",\n config_version=1,\n site_names=[\"local_site\"],\n )\n simple_checkpoint_with_site_names_config_key = ConfigurationIdentifier(\n configuration_key=simple_checkpoint_with_site_names_config.name\n )\n context.checkpoint_store.set(\n key=simple_checkpoint_with_site_names_config_key,\n value=simple_checkpoint_with_site_names_config,\n )\n\n return context\n\n\[email protected]\ndef empty_data_context_with_config_variables(monkeypatch, empty_data_context):\n monkeypatch.setenv(\"FOO\", \"BAR\")\n monkeypatch.setenv(\"REPLACE_ME_ESCAPED_ENV\", \"ive_been_$--replaced\")\n root_dir = empty_data_context.root_directory\n ge_config_path = file_relative_path(\n __file__,\n \"./test_fixtures/great_expectations_basic_with_variables.yml\",\n )\n shutil.copy(ge_config_path, os.path.join(root_dir, \"great_expectations.yml\"))\n config_variables_path = file_relative_path(\n __file__,\n \"./test_fixtures/config_variables.yml\",\n )\n shutil.copy(config_variables_path, os.path.join(root_dir, \"uncommitted\"))\n return DataContext(context_root_dir=root_dir)\n\n\[email protected]\ndef empty_context_with_checkpoint(empty_data_context):\n context = empty_data_context\n root_dir = empty_data_context.root_directory\n fixture_name = \"my_checkpoint.yml\"\n fixture_path = file_relative_path(\n __file__, f\"./data_context/fixtures/contexts/{fixture_name}\"\n )\n checkpoints_file = os.path.join(root_dir, \"checkpoints\", fixture_name)\n shutil.copy(fixture_path, checkpoints_file)\n assert os.path.isfile(checkpoints_file)\n return context\n\n\[email protected]\ndef empty_context_with_checkpoint_stats_enabled(empty_data_context_stats_enabled):\n context = empty_data_context_stats_enabled\n root_dir = context.root_directory\n fixture_name = \"my_checkpoint.yml\"\n fixture_path = file_relative_path(\n __file__, f\"./data_context/fixtures/contexts/{fixture_name}\"\n )\n checkpoints_file = os.path.join(root_dir, \"checkpoints\", fixture_name)\n shutil.copy(fixture_path, checkpoints_file)\n return context\n\n\[email protected]\ndef empty_data_context_stats_enabled(tmp_path_factory, monkeypatch):\n # Reenable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n project_path = str(tmp_path_factory.mktemp(\"empty_data_context\"))\n context = ge.data_context.DataContext.create(project_path)\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n os.makedirs(asset_config_path, exist_ok=True)\n return context\n\n\[email protected]\ndef titanic_data_context(tmp_path_factory):\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n os.makedirs(os.path.join(context_path, \"checkpoints\"), exist_ok=True)\n data_path = os.path.join(context_path, \"../data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(\n __file__, \"./test_fixtures/great_expectations_v013_titanic.yml\"\n )\n shutil.copy(\n titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\"))\n )\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(\n titanic_csv_path, str(os.path.join(context_path, \"../data/Titanic.csv\"))\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_data_context_no_data_docs_no_checkpoint_store(tmp_path_factory):\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n os.makedirs(os.path.join(context_path, \"checkpoints\"), exist_ok=True)\n data_path = os.path.join(context_path, \"../data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(\n __file__, \"./test_fixtures/great_expectations_titanic_pre_v013_no_data_docs.yml\"\n )\n shutil.copy(\n titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\"))\n )\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(\n titanic_csv_path, str(os.path.join(context_path, \"../data/Titanic.csv\"))\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_data_context_no_data_docs(tmp_path_factory):\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n os.makedirs(os.path.join(context_path, \"checkpoints\"), exist_ok=True)\n data_path = os.path.join(context_path, \"../data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(\n __file__, \"./test_fixtures/great_expectations_titanic_no_data_docs.yml\"\n )\n shutil.copy(\n titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\"))\n )\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(\n titanic_csv_path, str(os.path.join(context_path, \"../data/Titanic.csv\"))\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_data_context_stats_enabled_no_config_store(tmp_path_factory, monkeypatch):\n # Reenable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n os.makedirs(os.path.join(context_path, \"checkpoints\"), exist_ok=True)\n data_path = os.path.join(context_path, \"../data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(\n __file__, \"./test_fixtures/great_expectations_titanic.yml\"\n )\n shutil.copy(\n titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\"))\n )\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(\n titanic_csv_path, str(os.path.join(context_path, \"../data/Titanic.csv\"))\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_data_context_stats_enabled(tmp_path_factory, monkeypatch):\n # Reenable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n os.makedirs(os.path.join(context_path, \"checkpoints\"), exist_ok=True)\n data_path = os.path.join(context_path, \"../data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(\n __file__, \"./test_fixtures/great_expectations_v013_titanic.yml\"\n )\n shutil.copy(\n titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\"))\n )\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(\n titanic_csv_path, str(os.path.join(context_path, \"../data/Titanic.csv\"))\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_sqlite_db(sa):\n try:\n import sqlalchemy as sa\n from sqlalchemy import create_engine\n\n titanic_db_path = file_relative_path(__file__, \"./test_sets/titanic.db\")\n engine = create_engine(\"sqlite:///{}\".format(titanic_db_path))\n assert engine.execute(\"select count(*) from titanic\").fetchall()[0] == (1313,)\n return engine\n except ImportError:\n raise ValueError(\"sqlite tests require sqlalchemy to be installed\")\n\n\[email protected]\ndef titanic_expectation_suite():\n return ExpectationSuite(\n expectation_suite_name=\"Titanic.warning\",\n meta={},\n data_asset_type=\"Dataset\",\n expectations=[\n ExpectationConfiguration(\n expectation_type=\"expect_column_to_exist\", kwargs={\"column\": \"PClass\"}\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_not_be_null\",\n kwargs={\"column\": \"Name\"},\n ),\n ],\n )\n\n\[email protected]\ndef empty_sqlite_db(sa):\n \"\"\"An empty in-memory sqlite db that always gets run.\"\"\"\n try:\n import sqlalchemy as sa\n from sqlalchemy import create_engine\n\n engine = create_engine(\"sqlite://\")\n assert engine.execute(\"select 1\").fetchall()[0] == (1,)\n return engine\n except ImportError:\n raise ValueError(\"sqlite tests require sqlalchemy to be installed\")\n\n\[email protected]\n@freeze_time(\"09/26/2019 13:42:41\")\ndef site_builder_data_context_with_html_store_titanic_random(\n tmp_path_factory, filesystem_csv_3\n):\n base_dir = str(tmp_path_factory.mktemp(\"project_dir\"))\n project_dir = os.path.join(base_dir, \"project_path\")\n os.mkdir(project_dir)\n\n os.makedirs(os.path.join(project_dir, \"data\"))\n os.makedirs(os.path.join(project_dir, \"data/titanic\"))\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(project_dir, \"data/titanic/Titanic.csv\")),\n )\n\n os.makedirs(os.path.join(project_dir, \"data/random\"))\n shutil.copy(\n os.path.join(filesystem_csv_3, \"f1.csv\"),\n str(os.path.join(project_dir, \"data/random/f1.csv\")),\n )\n shutil.copy(\n os.path.join(filesystem_csv_3, \"f2.csv\"),\n str(os.path.join(project_dir, \"data/random/f2.csv\")),\n )\n ge.data_context.DataContext.create(project_dir)\n shutil.copy(\n file_relative_path(\n __file__, \"./test_fixtures/great_expectations_site_builder.yml\"\n ),\n str(os.path.join(project_dir, \"great_expectations\", \"great_expectations.yml\")),\n )\n context = ge.data_context.DataContext(\n context_root_dir=os.path.join(project_dir, \"great_expectations\")\n )\n\n context.add_datasource(\n \"titanic\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": os.path.join(project_dir, \"data/titanic/\"),\n }\n },\n )\n context.add_datasource(\n \"random\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": os.path.join(project_dir, \"data/random/\"),\n }\n },\n )\n\n context.profile_datasource(\"titanic\")\n context.profile_datasource(\"random\")\n context.profile_datasource(context.list_datasources()[0][\"name\"])\n\n context._project_config.anonymous_usage_statistics = {\n \"enabled\": True,\n \"data_context_id\": \"f43d4897-385f-4366-82b0-1a8eda2bf79c\",\n }\n\n return context\n\n\[email protected]\n@freeze_time(\"09/26/2019 13:42:41\")\ndef site_builder_data_context_v013_with_html_store_titanic_random(\n tmp_path_factory, filesystem_csv_3\n):\n base_dir = str(tmp_path_factory.mktemp(\"project_dir\"))\n project_dir = os.path.join(base_dir, \"project_path\")\n os.mkdir(project_dir)\n\n os.makedirs(os.path.join(project_dir, \"data\"))\n os.makedirs(os.path.join(project_dir, \"data/titanic\"))\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(project_dir, \"data/titanic/Titanic.csv\")),\n )\n\n os.makedirs(os.path.join(project_dir, \"data/random\"))\n shutil.copy(\n os.path.join(filesystem_csv_3, \"f1.csv\"),\n str(os.path.join(project_dir, \"data/random/f1.csv\")),\n )\n shutil.copy(\n os.path.join(filesystem_csv_3, \"f2.csv\"),\n str(os.path.join(project_dir, \"data/random/f2.csv\")),\n )\n ge.data_context.DataContext.create(project_dir)\n shutil.copy(\n file_relative_path(\n __file__, \"./test_fixtures/great_expectations_v013_site_builder.yml\"\n ),\n str(os.path.join(project_dir, \"great_expectations\", \"great_expectations.yml\")),\n )\n context = ge.data_context.DataContext(\n context_root_dir=os.path.join(project_dir, \"great_expectations\")\n )\n\n context.add_datasource(\n \"titanic\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": os.path.join(project_dir, \"data/titanic/\"),\n }\n },\n )\n context.add_datasource(\n \"random\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": os.path.join(project_dir, \"data/random/\"),\n }\n },\n )\n\n context.profile_datasource(\"titanic\")\n context.profile_datasource(\"random\")\n context.profile_datasource(context.list_datasources()[0][\"name\"])\n\n context._project_config.anonymous_usage_statistics = {\n \"enabled\": True,\n \"data_context_id\": \"f43d4897-385f-4366-82b0-1a8eda2bf79c\",\n }\n\n return context\n\n\[email protected]\ndef titanic_multibatch_data_context(tmp_path_factory):\n \"\"\"\n Based on titanic_data_context, but with 2 identical batches of\n data asset \"titanic\"\n :param tmp_path_factory:\n :return:\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n data_path = os.path.join(context_path, \"../data/titanic\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n shutil.copy(\n file_relative_path(__file__, \"./test_fixtures/great_expectations_titanic.yml\"),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(context_path, \"../data/titanic/Titanic_1911.csv\")),\n )\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(context_path, \"../data/titanic/Titanic_1912.csv\")),\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef v10_project_directory(tmp_path_factory):\n \"\"\"\n GE 0.10.x project for testing upgrade helper\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"v10_project\"))\n context_root_dir = os.path.join(project_path, \"great_expectations\")\n shutil.copytree(\n file_relative_path(\n __file__, \"./test_fixtures/upgrade_helper/great_expectations_v10_project/\"\n ),\n context_root_dir,\n )\n shutil.copy(\n file_relative_path(\n __file__, \"./test_fixtures/upgrade_helper/great_expectations_v1_basic.yml\"\n ),\n os.path.join(context_root_dir, \"great_expectations.yml\"),\n )\n return context_root_dir\n\n\[email protected]\ndef v20_project_directory(tmp_path_factory):\n \"\"\"\n GE config_version: 2 project for testing upgrade helper\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"v20_project\"))\n context_root_dir = os.path.join(project_path, \"great_expectations\")\n shutil.copytree(\n file_relative_path(\n __file__, \"./test_fixtures/upgrade_helper/great_expectations_v20_project/\"\n ),\n context_root_dir,\n )\n shutil.copy(\n file_relative_path(\n __file__, \"./test_fixtures/upgrade_helper/great_expectations_v2.yml\"\n ),\n os.path.join(context_root_dir, \"great_expectations.yml\"),\n )\n return context_root_dir\n\n\[email protected]\ndef data_context_parameterized_expectation_suite_no_checkpoint_store(tmp_path_factory):\n \"\"\"\n This data_context is *manually* created to have the config we want, vs\n created with DataContext.create()\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n fixture_dir = file_relative_path(__file__, \"./test_fixtures\")\n os.makedirs(\n os.path.join(asset_config_path, \"my_dag_node\"),\n exist_ok=True,\n )\n shutil.copy(\n os.path.join(fixture_dir, \"great_expectations_basic.yml\"),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n os.path.join(\n fixture_dir,\n \"expectation_suites/parameterized_expectation_suite_fixture.json\",\n ),\n os.path.join(asset_config_path, \"my_dag_node/default.json\"),\n )\n os.makedirs(os.path.join(context_path, \"plugins\"), exist_ok=True)\n shutil.copy(\n os.path.join(fixture_dir, \"custom_pandas_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_pandas_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sqlalchemy_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sqlalchemy_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sparkdf_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sparkdf_dataset.py\")),\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef data_context_with_bad_datasource(tmp_path_factory):\n \"\"\"\n This data_context is *manually* created to have the config we want, vs\n created with DataContext.create()\n\n This DataContext has a connection to a datasource named my_postgres_db\n which is not a valid datasource.\n\n It is used by test_get_batch_multiple_datasources_do_not_scan_all()\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n fixture_dir = file_relative_path(__file__, \"./test_fixtures\")\n os.makedirs(\n os.path.join(asset_config_path, \"my_dag_node\"),\n exist_ok=True,\n )\n shutil.copy(\n os.path.join(fixture_dir, \"great_expectations_bad_datasource.yml\"),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef data_context_parameterized_expectation_suite_no_checkpoint_store_with_usage_statistics_enabled(\n tmp_path_factory,\n):\n \"\"\"\n This data_context is *manually* created to have the config we want, vs\n created with DataContext.create()\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n fixture_dir = file_relative_path(__file__, \"./test_fixtures\")\n os.makedirs(\n os.path.join(asset_config_path, \"my_dag_node\"),\n exist_ok=True,\n )\n shutil.copy(\n os.path.join(\n fixture_dir, \"great_expectations_basic_with_usage_stats_enabled.yml\"\n ),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n os.path.join(\n fixture_dir,\n \"expectation_suites/parameterized_expectation_suite_fixture.json\",\n ),\n os.path.join(asset_config_path, \"my_dag_node/default.json\"),\n )\n os.makedirs(os.path.join(context_path, \"plugins\"), exist_ok=True)\n shutil.copy(\n os.path.join(fixture_dir, \"custom_pandas_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_pandas_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sqlalchemy_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sqlalchemy_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sparkdf_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sparkdf_dataset.py\")),\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef data_context_parameterized_expectation_suite(tmp_path_factory):\n \"\"\"\n This data_context is *manually* created to have the config we want, vs\n created with DataContext.create()\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n fixture_dir = file_relative_path(__file__, \"./test_fixtures\")\n os.makedirs(\n os.path.join(asset_config_path, \"my_dag_node\"),\n exist_ok=True,\n )\n shutil.copy(\n os.path.join(fixture_dir, \"great_expectations_v013_basic.yml\"),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n os.path.join(\n fixture_dir,\n \"expectation_suites/parameterized_expectation_suite_fixture.json\",\n ),\n os.path.join(asset_config_path, \"my_dag_node/default.json\"),\n )\n os.makedirs(os.path.join(context_path, \"plugins\"), exist_ok=True)\n shutil.copy(\n os.path.join(fixture_dir, \"custom_pandas_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_pandas_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sqlalchemy_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sqlalchemy_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sparkdf_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sparkdf_dataset.py\")),\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef data_context_parameterized_expectation_suite_with_usage_statistics_enabled(\n tmp_path_factory,\n):\n \"\"\"\n This data_context is *manually* created to have the config we want, vs\n created with DataContext.create()\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n fixture_dir = file_relative_path(__file__, \"./test_fixtures\")\n os.makedirs(\n os.path.join(asset_config_path, \"my_dag_node\"),\n exist_ok=True,\n )\n shutil.copy(\n os.path.join(\n fixture_dir, \"great_expectations_v013_basic_with_usage_stats_enabled.yml\"\n ),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n os.path.join(\n fixture_dir,\n \"expectation_suites/parameterized_expectation_suite_fixture.json\",\n ),\n os.path.join(asset_config_path, \"my_dag_node/default.json\"),\n )\n os.makedirs(os.path.join(context_path, \"plugins\"), exist_ok=True)\n shutil.copy(\n os.path.join(fixture_dir, \"custom_pandas_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_pandas_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sqlalchemy_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sqlalchemy_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sparkdf_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sparkdf_dataset.py\")),\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef data_context_with_bad_notebooks(tmp_path_factory):\n \"\"\"\n This data_context is *manually* created to have the config we want, vs\n created with DataContext.create()\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n fixture_dir = file_relative_path(__file__, \"./test_fixtures\")\n custom_notebook_assets_dir = \"notebook_assets\"\n\n os.makedirs(\n os.path.join(asset_config_path, \"my_dag_node\"),\n exist_ok=True,\n )\n shutil.copy(\n os.path.join(fixture_dir, \"great_expectations_basic_with_bad_notebooks.yml\"),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n os.path.join(\n fixture_dir,\n \"expectation_suites/parameterized_expectation_suite_fixture.json\",\n ),\n os.path.join(asset_config_path, \"my_dag_node/default.json\"),\n )\n\n os.makedirs(os.path.join(context_path, \"plugins\"), exist_ok=True)\n shutil.copytree(\n os.path.join(fixture_dir, custom_notebook_assets_dir),\n str(os.path.join(context_path, \"plugins\", custom_notebook_assets_dir)),\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef data_context_custom_notebooks(tmp_path_factory):\n \"\"\"\n This data_context is *manually* created to have the config we want, vs\n created with DataContext.create()\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n fixture_dir = file_relative_path(__file__, \"./test_fixtures\")\n os.makedirs(\n os.path.join(asset_config_path, \"my_dag_node\"),\n exist_ok=True,\n )\n shutil.copy(\n os.path.join(fixture_dir, \"great_expectations_custom_notebooks.yml\"),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n os.path.join(\n fixture_dir,\n \"expectation_suites/parameterized_expectation_suite_fixture.json\",\n ),\n os.path.join(asset_config_path, \"my_dag_node/default.json\"),\n )\n\n os.makedirs(os.path.join(context_path, \"plugins\"), exist_ok=True)\n\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef data_context_simple_expectation_suite(tmp_path_factory):\n \"\"\"\n This data_context is *manually* created to have the config we want, vs\n created with DataContext.create()\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n fixture_dir = file_relative_path(__file__, \"./test_fixtures\")\n os.makedirs(\n os.path.join(asset_config_path, \"my_dag_node\"),\n exist_ok=True,\n )\n shutil.copy(\n os.path.join(fixture_dir, \"great_expectations_basic.yml\"),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n os.path.join(\n fixture_dir,\n \"rendering_fixtures/expectations_suite_1.json\",\n ),\n os.path.join(asset_config_path, \"default.json\"),\n )\n os.makedirs(os.path.join(context_path, \"plugins\"), exist_ok=True)\n shutil.copy(\n os.path.join(fixture_dir, \"custom_pandas_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_pandas_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sqlalchemy_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sqlalchemy_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sparkdf_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sparkdf_dataset.py\")),\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]()\ndef filesystem_csv_data_context_with_validation_operators(\n titanic_data_context_stats_enabled, filesystem_csv_2\n):\n titanic_data_context_stats_enabled.add_datasource(\n \"rad_datasource\",\n module_name=\"great_expectations.datasource\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": str(filesystem_csv_2),\n }\n },\n )\n return titanic_data_context_stats_enabled\n\n\[email protected]()\ndef filesystem_csv_data_context(empty_data_context, filesystem_csv_2):\n empty_data_context.add_datasource(\n \"rad_datasource\",\n module_name=\"great_expectations.datasource\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": str(filesystem_csv_2),\n }\n },\n )\n return empty_data_context\n\n\[email protected]\ndef filesystem_csv(tmp_path_factory):\n base_dir = tmp_path_factory.mktemp(\"filesystem_csv\")\n base_dir = str(base_dir)\n # Put a few files in the directory\n with open(os.path.join(base_dir, \"f1.csv\"), \"w\") as outfile:\n outfile.writelines([\"a,b,c\\n\"])\n with open(os.path.join(base_dir, \"f2.csv\"), \"w\") as outfile:\n outfile.writelines([\"a,b,c\\n\"])\n\n os.makedirs(os.path.join(base_dir, \"f3\"), exist_ok=True)\n with open(os.path.join(base_dir, \"f3\", \"f3_20190101.csv\"), \"w\") as outfile:\n outfile.writelines([\"a,b,c\\n\"])\n with open(os.path.join(base_dir, \"f3\", \"f3_20190102.csv\"), \"w\") as outfile:\n outfile.writelines([\"a,b,c\\n\"])\n\n return base_dir\n\n\[email protected]\ndef filesystem_csv_2(tmp_path_factory):\n base_dir = tmp_path_factory.mktemp(\"test_files\")\n base_dir = str(base_dir)\n\n # Put a file in the directory\n toy_dataset = PandasDataset({\"x\": [1, 2, 3]})\n toy_dataset.to_csv(os.path.join(base_dir, \"f1.csv\"), index=None)\n\n return base_dir\n\n\[email protected]\ndef filesystem_csv_3(tmp_path_factory):\n base_dir = tmp_path_factory.mktemp(\"test_files\")\n base_dir = str(base_dir)\n\n # Put a file in the directory\n toy_dataset = PandasDataset({\"x\": [1, 2, 3]})\n toy_dataset.to_csv(os.path.join(base_dir, \"f1.csv\"), index=None)\n\n toy_dataset_2 = PandasDataset({\"y\": [1, 2, 3]})\n toy_dataset_2.to_csv(os.path.join(base_dir, \"f2.csv\"), index=None)\n\n return base_dir\n\n\[email protected]()\ndef filesystem_csv_4(tmp_path_factory):\n base_dir = tmp_path_factory.mktemp(\"test_files\")\n base_dir = str(base_dir)\n\n # Put a file in the directory\n toy_dataset = PandasDataset(\n {\n \"x\": [1, 2, 3],\n \"y\": [1, 2, 3],\n }\n )\n toy_dataset.to_csv(os.path.join(base_dir, \"f1.csv\"), index=None)\n\n return base_dir\n\n\[email protected]\ndef titanic_profiled_evrs_1():\n with open(\n file_relative_path(\n __file__, \"./render/fixtures/BasicDatasetProfiler_evrs.json\"\n ),\n ) as infile:\n return expectationSuiteValidationResultSchema.loads(infile.read())\n\n\[email protected]\ndef titanic_profiled_name_column_evrs():\n # This is a janky way to fetch expectations matching a specific name from an EVR suite.\n # TODO: It will no longer be necessary once we implement ValidationResultSuite._group_evrs_by_column\n from great_expectations.render.renderer.renderer import Renderer\n\n with open(\n file_relative_path(\n __file__, \"./render/fixtures/BasicDatasetProfiler_evrs.json\"\n ),\n ) as infile:\n titanic_profiled_evrs_1 = expectationSuiteValidationResultSchema.load(\n json.load(infile)\n )\n\n evrs_by_column = Renderer()._group_evrs_by_column(titanic_profiled_evrs_1)\n name_column_evrs = evrs_by_column[\"Name\"]\n\n return name_column_evrs\n\n\[email protected]\ndef titanic_profiled_expectations_1():\n with open(\n file_relative_path(\n __file__, \"./render/fixtures/BasicDatasetProfiler_expectations.json\"\n ),\n ) as infile:\n return expectationSuiteSchema.load(json.load(infile))\n\n\[email protected]\ndef titanic_profiled_name_column_expectations():\n from great_expectations.render.renderer.renderer import Renderer\n\n with open(\n file_relative_path(\n __file__, \"./render/fixtures/BasicDatasetProfiler_expectations.json\"\n ),\n ) as infile:\n titanic_profiled_expectations = expectationSuiteSchema.load(json.load(infile))\n\n columns, ordered_columns = Renderer()._group_and_order_expectations_by_column(\n titanic_profiled_expectations\n )\n name_column_expectations = columns[\"Name\"]\n\n return name_column_expectations\n\n\[email protected]\ndef titanic_validation_results():\n with open(\n file_relative_path(__file__, \"./test_sets/expected_cli_results_default.json\"),\n ) as infile:\n return expectationSuiteValidationResultSchema.load(json.load(infile))\n\n\n# various types of evr\[email protected]\ndef evr_failed():\n return ExpectationValidationResult(\n success=False,\n result={\n \"element_count\": 1313,\n \"missing_count\": 0,\n \"missing_percent\": 0.0,\n \"unexpected_count\": 3,\n \"unexpected_percent\": 0.2284843869002285,\n \"unexpected_percent_nonmissing\": 0.2284843869002285,\n \"partial_unexpected_list\": [\n \"Daly, Mr <NAME> \",\n \"Barber, Ms \",\n \"<NAME> \",\n ],\n \"partial_unexpected_index_list\": [77, 289, 303],\n \"partial_unexpected_counts\": [\n {\"value\": \"Barber, Ms \", \"count\": 1},\n {\"value\": \"Daly, Mr <NAME> \", \"count\": 1},\n {\"value\": \"<NAME> Emily \", \"count\": 1},\n ],\n },\n exception_info={\n \"raised_exception\": False,\n \"exception_message\": None,\n \"exception_traceback\": None,\n },\n expectation_config=ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_not_match_regex\",\n kwargs={\n \"column\": \"Name\",\n \"regex\": \"^\\\\s+|\\\\s+$\",\n \"result_format\": \"SUMMARY\",\n },\n ),\n )\n\n\[email protected]\ndef evr_failed_with_exception():\n return ExpectationValidationResult(\n success=False,\n exception_info={\n \"raised_exception\": True,\n \"exception_message\": \"Invalid partition object.\",\n \"exception_traceback\": 'Traceback (most recent call last):\\n File \"/great_expectations/great_expectations/data_asset/data_asset.py\", line 216, in wrapper\\n return_obj = func(self, **evaluation_args)\\n File \"/great_expectations/great_expectations/dataset/dataset.py\", line 106, in inner_wrapper\\n evaluation_result = func(self, column, *args, **kwargs)\\n File \"/great_expectations/great_expectations/dataset/dataset.py\", line 3381, in expect_column_kl_divergence_to_be_less_than\\n raise ValueError(\"Invalid partition object.\")\\nValueError: Invalid partition object.\\n',\n },\n expectation_config=ExpectationConfiguration(\n expectation_type=\"expect_column_kl_divergence_to_be_less_than\",\n kwargs={\n \"column\": \"live\",\n \"partition_object\": None,\n \"threshold\": None,\n \"result_format\": \"SUMMARY\",\n },\n meta={\"BasicDatasetProfiler\": {\"confidence\": \"very low\"}},\n ),\n )\n\n\[email protected]\ndef evr_success():\n return ExpectationValidationResult(\n success=True,\n result={\"observed_value\": 1313},\n exception_info={\n \"raised_exception\": False,\n \"exception_message\": None,\n \"exception_traceback\": None,\n },\n expectation_config=ExpectationConfiguration(\n expectation_type=\"expect_table_row_count_to_be_between\",\n kwargs={\"min_value\": 0, \"max_value\": None, \"result_format\": \"SUMMARY\"},\n ),\n )\n\n\[email protected]\ndef sqlite_view_engine(test_backends):\n # Create a small in-memory engine with two views, one of which is temporary\n if \"sqlite\" in test_backends:\n try:\n import sqlalchemy as sa\n\n sqlite_engine = sa.create_engine(\"sqlite://\")\n df = pd.DataFrame({\"a\": [1, 2, 3, 4, 5]})\n df.to_sql(\"test_table\", con=sqlite_engine)\n sqlite_engine.execute(\n \"CREATE TEMP VIEW test_temp_view AS SELECT * FROM test_table where a < 4;\"\n )\n sqlite_engine.execute(\n \"CREATE VIEW test_view AS SELECT * FROM test_table where a > 4;\"\n )\n return sqlite_engine\n except ImportError:\n sa = None\n else:\n pytest.skip(\"SqlAlchemy tests disabled; not testing views\")\n\n\[email protected]\ndef expectation_suite_identifier():\n return ExpectationSuiteIdentifier(\"my.expectation.suite.name\")\n\n\[email protected]\ndef basic_sqlalchemy_datasource(sqlitedb_engine):\n return SqlAlchemyDatasource(\"basic_sqlalchemy_datasource\", engine=sqlitedb_engine)\n\n\[email protected]\ndef test_cases_for_sql_data_connector_sqlite_execution_engine(sa):\n if sa is None:\n raise ValueError(\"SQL Database tests require sqlalchemy to be installed.\")\n\n db_file = file_relative_path(\n __file__,\n os.path.join(\"test_sets\", \"test_cases_for_sql_data_connector.db\"),\n )\n\n engine = sa.create_engine(f\"sqlite:////{db_file}\")\n conn = engine.connect()\n\n # Build a SqlAlchemyDataset using that database\n return SqlAlchemyExecutionEngine(\n name=\"test_sql_execution_engine\",\n engine=conn,\n )\n\n\[email protected]\ndef test_folder_connection_path_csv(tmp_path_factory):\n df1 = pd.DataFrame({\"col_1\": [1, 2, 3, 4, 5], \"col_2\": [\"a\", \"b\", \"c\", \"d\", \"e\"]})\n path = str(tmp_path_factory.mktemp(\"test_folder_connection_path_csv\"))\n df1.to_csv(path_or_buf=os.path.join(path, \"test.csv\"), index=False)\n return str(path)\n\n\[email protected]\ndef test_folder_connection_path_tsv(tmp_path_factory):\n df1 = pd.DataFrame({\"col_1\": [1, 2, 3, 4, 5], \"col_2\": [\"a\", \"b\", \"c\", \"d\", \"e\"]})\n path = str(tmp_path_factory.mktemp(\"test_folder_connection_path_tsv\"))\n df1.to_csv(path_or_buf=os.path.join(path, \"test.tsv\"), sep=\"\\t\", index=False)\n return str(path)\n\n\[email protected]\ndef test_folder_connection_path_parquet(tmp_path_factory):\n df1 = pd.DataFrame({\"col_1\": [1, 2, 3, 4, 5], \"col_2\": [\"a\", \"b\", \"c\", \"d\", \"e\"]})\n path = str(tmp_path_factory.mktemp(\"test_folder_connection_path_parquet\"))\n df1.to_parquet(path=os.path.join(path, \"test.parquet\"))\n return str(path)\n\n\[email protected]\ndef test_db_connection_string(tmp_path_factory, test_backends):\n if \"sqlite\" not in test_backends:\n pytest.skip(\"skipping fixture because sqlite not selected\")\n df1 = pd.DataFrame({\"col_1\": [1, 2, 3, 4, 5], \"col_2\": [\"a\", \"b\", \"c\", \"d\", \"e\"]})\n df2 = pd.DataFrame({\"col_1\": [0, 1, 2, 3, 4], \"col_2\": [\"b\", \"c\", \"d\", \"e\", \"f\"]})\n\n try:\n import sqlalchemy as sa\n\n basepath = str(tmp_path_factory.mktemp(\"db_context\"))\n path = os.path.join(basepath, \"test.db\")\n engine = sa.create_engine(\"sqlite:///\" + str(path))\n df1.to_sql(\"table_1\", con=engine, index=True)\n df2.to_sql(\"table_2\", con=engine, index=True, schema=\"main\")\n\n # Return a connection string to this newly-created db\n return \"sqlite:///\" + str(path)\n except ImportError:\n raise ValueError(\"SQL Database tests require sqlalchemy to be installed.\")\n\n\[email protected]\ndef test_df(tmp_path_factory):\n def generate_ascending_list_of_datetimes(\n k, start_date=datetime.date(2020, 1, 1), end_date=datetime.date(2020, 12, 31)\n ):\n start_time = datetime.datetime(\n start_date.year, start_date.month, start_date.day\n )\n days_between_dates = (end_date - start_date).total_seconds()\n\n datetime_list = [\n start_time\n + datetime.timedelta(seconds=random.randrange(days_between_dates))\n for i in range(k)\n ]\n datetime_list.sort()\n return datetime_list\n\n k = 120\n random.seed(1)\n\n timestamp_list = generate_ascending_list_of_datetimes(\n k, end_date=datetime.date(2020, 1, 31)\n )\n date_list = [datetime.date(ts.year, ts.month, ts.day) for ts in timestamp_list]\n\n batch_ids = [random.randint(0, 10) for i in range(k)]\n batch_ids.sort()\n\n session_ids = [random.randint(2, 60) for i in range(k)]\n session_ids.sort()\n session_ids = [i - random.randint(0, 2) for i in session_ids]\n\n events_df = pd.DataFrame(\n {\n \"id\": range(k),\n \"batch_id\": batch_ids,\n \"date\": date_list,\n \"y\": [d.year for d in date_list],\n \"m\": [d.month for d in date_list],\n \"d\": [d.day for d in date_list],\n \"timestamp\": timestamp_list,\n \"session_ids\": session_ids,\n \"event_type\": [\n random.choice([\"start\", \"stop\", \"continue\"]) for i in range(k)\n ],\n \"favorite_color\": [\n \"#\"\n + \"\".join([random.choice(list(\"0123456789ABCDEF\")) for j in range(6)])\n for i in range(k)\n ],\n }\n )\n return events_df\n\n\[email protected]\ndef test_connectable_postgresql_db(sa, test_backends, test_df):\n \"\"\"Populates a postgres DB with a `test_df` table in the `connection_test` schema to test DataConnectors against\"\"\"\n\n if \"postgresql\" not in test_backends:\n pytest.skip(\"skipping fixture because postgresql not selected\")\n\n import sqlalchemy as sa\n\n url = sa.engine.url.URL(\n drivername=\"postgresql\",\n username=\"postgres\",\n password=\"\",\n host=\"localhost\",\n port=\"5432\",\n database=\"test_ci\",\n )\n engine = sa.create_engine(url)\n\n schema_check_results = engine.execute(\n \"SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'connection_test';\"\n ).fetchall()\n if len(schema_check_results) == 0:\n engine.execute(\"CREATE SCHEMA connection_test;\")\n\n table_check_results = engine.execute(\n \"\"\"\nSELECT EXISTS (\n SELECT FROM information_schema.tables\n WHERE table_schema = 'connection_test'\n AND table_name = 'test_df'\n);\n\"\"\"\n ).fetchall()\n if table_check_results != [(True,)]:\n test_df.to_sql(\"test_df\", con=engine, index=True, schema=\"connection_test\")\n\n # Return a connection string to this newly-created db\n return engine\n\n\[email protected]\ndef data_context_with_sql_datasource_for_testing_get_batch(sa, empty_data_context):\n context = empty_data_context\n\n db_file = file_relative_path(\n __file__,\n \"test_sets/test_cases_for_sql_data_connector.db\",\n )\n\n config = yaml.load(\n f\"\"\"\nclass_name: SimpleSqlalchemyDatasource\nconnection_string: sqlite:///{db_file}\n\"\"\"\n + \"\"\"\nintrospection:\n whole_table: {}\n\n daily:\n splitter_method: _split_on_converted_datetime\n splitter_kwargs:\n column_name: date\n date_format_string: \"%Y-%m-%d\"\n\n weekly:\n splitter_method: _split_on_converted_datetime\n splitter_kwargs:\n column_name: date\n date_format_string: \"%Y-%W\"\n\n by_id_dozens:\n splitter_method: _split_on_divided_integer\n splitter_kwargs:\n column_name: id\n divisor: 12\n\"\"\",\n )\n\n try:\n context.add_datasource(\"my_sqlite_db\", **config)\n except AttributeError:\n pytest.skip(\"SQL Database tests require sqlalchemy to be installed.\")\n\n return context\n\n\[email protected]\ndef basic_datasource(tmp_path_factory):\n base_directory: str = str(\n tmp_path_factory.mktemp(\"basic_datasource_runtime_data_connector\")\n )\n\n basic_datasource: Datasource = instantiate_class_from_config(\n config=yaml.load(\n f\"\"\"\nclass_name: Datasource\n\ndata_connectors:\n test_runtime_data_connector:\n module_name: great_expectations.datasource.data_connector\n class_name: RuntimeDataConnector\n runtime_keys:\n - pipeline_stage_name\n - airflow_run_id\n - custom_key_0\n\nexecution_engine:\n class_name: PandasExecutionEngine\n\n \"\"\",\n ),\n runtime_environment={\n \"name\": \"my_datasource\",\n },\n config_defaults={\n \"module_name\": \"great_expectations.datasource\",\n },\n )\n\n return basic_datasource\n", "id": "11515780", "language": "Python", "matching_score": 4.76301383972168, "max_stars_count": 0, "path": "tests/conftest.py" }, { "content": "import os\n\nfrom click.testing import CliRunner\n\nfrom great_expectations.cli import cli\nfrom tests.cli.utils import (\n VALIDATION_OPERATORS_DEPRECATION_MESSAGE,\n assert_no_logging_messages_or_tracebacks,\n)\n\n\ndef test_project_check_on_missing_ge_dir_guides_user_to_fix(caplog, tmp_path_factory):\n project_dir = str(tmp_path_factory.mktemp(\"empty_dir\"))\n runner = CliRunner(mix_stderr=False)\n result = runner.invoke(\n cli, [\"project\", \"check-config\", \"-d\", project_dir], catch_exceptions=False\n )\n stdout = result.output\n assert \"Checking your config files for validity\" in stdout\n assert \"Unfortunately, your config appears to be invalid\" in stdout\n assert \"Error: No great_expectations directory was found here!\" in stdout\n assert result.exit_code == 1\n assert_no_logging_messages_or_tracebacks(caplog, result)\n\n\ndef test_project_check_on_valid_project_says_so(caplog, titanic_data_context):\n project_dir = titanic_data_context.root_directory\n runner = CliRunner(mix_stderr=False)\n result = runner.invoke(\n cli, [\"project\", \"check-config\", \"-d\", project_dir], catch_exceptions=False\n )\n assert \"Checking your config files for validity\" in result.output\n assert \"Your config file appears valid\" in result.output\n assert result.exit_code == 0\n assert_no_logging_messages_or_tracebacks(\n my_caplog=caplog,\n click_result=result,\n allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,\n )\n\n\ndef test_project_check_on_project_with_missing_config_file_guides_user(\n caplog, titanic_data_context\n):\n project_dir = titanic_data_context.root_directory\n # Remove the config file.\n os.remove(os.path.join(project_dir, \"great_expectations.yml\"))\n\n runner = CliRunner(mix_stderr=False)\n result = runner.invoke(\n cli, [\"project\", \"check-config\", \"-d\", project_dir], catch_exceptions=False\n )\n assert result.exit_code == 1\n assert \"Checking your config files for validity\" in result.output\n assert \"Unfortunately, your config appears to be invalid\" in result.output\n assert_no_logging_messages_or_tracebacks(caplog, result)\n", "id": "9788081", "language": "Python", "matching_score": 0.4357372522354126, "max_stars_count": 2, "path": "tests/cli/test_project.py" }, { "content": "import datetime\nimport logging\nfrom typing import Any\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.core.batch import BatchDefinition\nfrom great_expectations.core.util import datetime_to_int, parse_string_to_datetime\nfrom great_expectations.datasource.data_connector.sorter import Sorter\n\nlogger = logging.getLogger(__name__)\n\n\nclass DateTimeSorter(Sorter):\n def __init__(self, name: str, orderby: str = \"asc\", datetime_format=\"%Y%m%d\"):\n super().__init__(name=name, orderby=orderby)\n\n if datetime_format and not isinstance(datetime_format, str):\n raise ge_exceptions.SorterError(\n f\"\"\"DateTime parsing formatter \"datetime_format_string\" must have string type (actual type is\n \"{str(type(datetime_format))}\").\n \"\"\"\n )\n\n self._datetime_format = datetime_format\n\n def get_partition_key(self, batch_definition: BatchDefinition) -> Any:\n partition_definition: dict = batch_definition.partition_definition\n partition_value: Any = partition_definition[self.name]\n dt: datetime.date = parse_string_to_datetime(\n datetime_string=partition_value,\n datetime_format_string=self._datetime_format,\n )\n return datetime_to_int(dt=dt)\n\n def __repr__(self) -> str:\n doc_fields_dict: dict = {\n \"name\": self.name,\n \"reverse\": self.reverse,\n \"type\": \"DateTimeSorter\",\n \"date_time_format\": self._datetime_format,\n }\n return str(doc_fields_dict)\n", "id": "2121792", "language": "Python", "matching_score": 1.283981204032898, "max_stars_count": 2, "path": "great_expectations/datasource/data_connector/sorter/date_time_sorter.py" }, { "content": "from freezegun import freeze_time\n\nfrom great_expectations.core.util import substitute_all_strftime_format_strings\n\n\n@freeze_time(\"11/05/1955\")\ndef test_substitute_all_strftime_format_strings():\n input_dict = {\n \"month_no\": \"%m\",\n \"just_a_string\": \"Bloopy!\",\n \"string_with_month_word\": \"Today we are in the month %B!\",\n \"number\": \"90210\",\n \"escaped_percent\": \"'%%m' is the format string for month number\",\n \"inner_dict\": {\"day_word_full\": \"%A\"},\n \"list\": [\"a\", 123, \"%a\"],\n }\n expected_output_dict = {\n \"month_no\": \"11\",\n \"just_a_string\": \"Bloopy!\",\n \"string_with_month_word\": \"Today we are in the month November!\",\n \"number\": \"90210\",\n \"escaped_percent\": \"'%m' is the format string for month number\",\n \"inner_dict\": {\"day_word_full\": \"Saturday\"},\n \"list\": [\"a\", 123, \"Sat\"],\n }\n assert substitute_all_strftime_format_strings(input_dict) == expected_output_dict\n", "id": "4972686", "language": "Python", "matching_score": 0.5325660705566406, "max_stars_count": 1, "path": "tests/core/test_util.py" }, { "content": "import importlib\nimport json\nimport logging\nimport os\nimport sys\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef build_gallery(include_core=True, include_contrib_experimental=True):\n logger.info(\"Getting base registered expectations list\")\n import great_expectations\n\n core_expectations = (\n great_expectations.expectations.registry.list_registered_expectation_implementations()\n )\n\n if include_contrib_experimental:\n logger.info(\"Finding contrib modules\")\n contrib_experimental_dir = os.path.join(\n os.path.dirname(__file__),\n \"..\",\n \"..\",\n \"contrib\",\n \"experimental\",\n \"great_expectations_experimental\",\n )\n sys.path.append(contrib_experimental_dir)\n expectations_module = importlib.import_module(\n \"expectations\", \"great_expectations_experimental\"\n )\n for expectation_module in expectations_module.__all__:\n logger.debug(f\"Importing {expectation_module}\")\n importlib.import_module(\n f\"expectations.{expectation_module}\", \"great_expectations_experimental\"\n )\n metrics_module = importlib.import_module(\n \"metrics\", \"great_expectations_experimental\"\n )\n for metrics_module in metrics_module.__all__:\n logger.debug(f\"Importing {metrics_module}\")\n importlib.import_module(\n f\"metrics.{metrics_module}\", \"great_expectations_experimental\"\n )\n\n # Above imports may have added additional expectations from contrib\n all_expectations = (\n great_expectations.expectations.registry.list_registered_expectation_implementations()\n )\n\n if include_core:\n build_expectations = set(all_expectations)\n else:\n build_expectations = set(all_expectations) - set(core_expectations)\n\n logger.info(\n f\"Preparing to build gallery metadata for expectations: {build_expectations}\"\n )\n gallery_info = dict()\n for expectation in build_expectations:\n logger.debug(f\"Running diagnostics for expectation: {expectation}\")\n impl = great_expectations.expectations.registry.get_expectation_impl(\n expectation\n )\n diagnostics = impl().run_diagnostics()\n gallery_info[expectation] = diagnostics\n\n return gallery_info\n\n\nif __name__ == \"__main__\":\n gallery_info = build_gallery(include_core=True, include_contrib_experimental=True)\n with open(\"./expectation_library.json\", \"w\") as outfile:\n json.dump(gallery_info, outfile)\n", "id": "6667710", "language": "Python", "matching_score": 0.12156040966510773, "max_stars_count": 1, "path": "assets/scripts/build_gallery.py" }, { "content": "# Simple Pong Project in Python 3\n# By @itsbigrod\n# Part 1: Getting started\n\nimport turtle\nimport winsound\n\nwn = turtle.Screen() #window\nwn.title(\"Pong Game by @itsbigrod\")\nwn.bgcolor(\"white\") #background color\nwn.setup(width=800,height=600) #screen setup\nwn.tracer(0) #makes game faster\n\n# Score\nscore_a = 0\nscore_b = 0 \n\n# Paddle A\npaddle_a = turtle.Turtle() #turtle object \npaddle_a.speed(0) #speed of animation A\npaddle_a.shape(\"square\") #shape of paddle A\npaddle_a.shapesize(stretch_wid=5, stretch_len=1)\npaddle_a.color(\"red\") #color of paddle A\npaddle_a.penup() #picks pen up from drawing line\npaddle_a.goto(-350,0)\n\n\n\n# Paddle B\npaddle_b = turtle.Turtle() #turtle object \npaddle_b.speed(0) #speed of animation B\npaddle_b.shape(\"square\") #shape of paddle B\npaddle_b.shapesize(stretch_wid=5, stretch_len=1)\npaddle_b.color(\"red\") #color of paddle B\npaddle_b.penup() #picks pen up from drawing line\npaddle_b.goto(350,0)\n\n# Ball\nball = turtle.Turtle() #turtle object \nball.speed(0) #speed of animation ball\nball.shape(\"circle\") #shape of ball\nball.color(\"blue\") #color of ball\nball.penup() #picks pen up from drawing line\nball.goto(0,0)\n\n# ball moves by 2 pixels\nball.dx = .2 \nball.dy = .2\n\n# Pen\npen = turtle.Turtle()\npen.speed(0)\npen.color(\"black\")\npen.penup()\npen.hideturtle()\npen.goto(0,260)\npen.write(\"Player A: 0 - Player B: 0\", align=\"center\", font=(\"Courier\", 24, \"normal\"))\n\n# Functions\ndef paddle_a_up():\n y = paddle_a.ycor()\n y += 20\n paddle_a.sety(y)\n\ndef paddle_a_down():\n y = paddle_a.ycor()\n y -= 20\n paddle_a.sety(y)\n\ndef paddle_b_up():\n y = paddle_b.ycor()\n y += 20\n paddle_b.sety(y)\n\ndef paddle_b_down():\n y = paddle_b.ycor()\n y -= 20\n paddle_b.sety(y)\n\n#Keyboard binding\nwn.listen() #listen for keyboard input\nwn.onkeypress(paddle_a_up, \"w\") #calls paddle_a_up on key W\nwn.onkeypress(paddle_a_down, \"s\") #calls paddle_a_down on key S\n\nwn.onkeypress(paddle_b_up, \"Up\") #calls paddle_a_up on key up\nwn.onkeypress(paddle_b_down, \"Down\") #calls paddle_a_down on key down\n\n# Main Game Loop\nwhile True:\n wn.update()\n # Move the ball\n ball.setx(ball.xcor() + ball.dx)\n ball.sety(ball.ycor() + ball.dy)\n\n #Border checking\n if ball.ycor() > 290:\n ball.sety(290)\n ball.dy *= -1\n #winsound.PlaySound(\"ballsound.wav\", winsound.SND_ASYNC)\n\n if ball.ycor() < -290:\n ball.sety(-290) \n ball.dy *= -1\n\n if ball.xcor() > 390:\n ball.goto(0,0)\n ball.dx *= -1\n score_a += 1\n pen.clear()\n pen.write(\"Player A: {} - Player B: {}\".format(score_a, score_b), align=\"center\", font=(\"Courier\", 24, \"normal\"))\n\n\n if ball.xcor() < -390:\n ball.goto(0,0)\n ball.dx *= -1\n score_b += 1\n pen.clear()\n pen.write(\"Player A: {} - Player B: {}\".format(score_a, score_b), align=\"center\", font=(\"Courier\", 24, \"normal\"))\n\n\n # Paddle and ball collisions \n if (ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < paddle_b.ycor() + 40 and ball.ycor() > paddle_b.ycor() -40):\n ball.setx(340)\n ball.dx *= -1\n \n if (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < paddle_a.ycor() + 40 and ball.ycor() > paddle_a.ycor() -40):\n ball.setx(-340)\n ball.dx *= -1", "id": "7086605", "language": "Python", "matching_score": 0.041557759046554565, "max_stars_count": 0, "path": "programs/pong.py" } ]
0.908274
Narasim
[ { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Mar 6 01:29:47 2021\r\n\r\n@author: TOSHIBA\r\n\"\"\"\r\n\r\nimport random\r\nimport datetime\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\n\r\ngeneSet = \" abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!.\"\r\ncanvas = tk.Tk()\r\ncanvas.title(\"Genetic Password Guessing\")\r\ntext_label = ttk.Label(canvas, text = \"Genetic Password Guessing\")\r\ntext_label_1 = ttk.Label(canvas, text = \"Enter Your Name: \").grid(column = 0, row = 1)\r\ntext_label_2 = ttk.Label(canvas, text = \"Guessed Password: \")\r\ntext_label_3 = ttk.Label(canvas, text = \"Fitness: \")\r\ntext_label_4 = ttk.Label(canvas, text = \"Time to Guess \")\r\ntext_variable = tk.StringVar()\r\ntext_entry = ttk.Entry(canvas, width = 15, textvariable = text_variable)\r\ntext_entry.grid(column = 1, row = 1)\r\ndef change_greeting():\r\n temp = str(text_variable.get())\r\n random.seed()\r\n target = temp\r\n startTime = datetime.datetime.now()\r\n bestParent = generate_parent(len(target))\r\n bestFitness = get_fitness(bestParent, target)\r\n while True:\r\n child = mutate(bestParent)\r\n childFitness = get_fitness(child, target)\r\n if bestFitness >= childFitness:\r\n continue\r\n if childFitness >= len(bestParent):\r\n break\r\n bestFitness = childFitness\r\n bestParent = child\r\n timeDiff = datetime.datetime.now() - startTime\r\n fitness = get_fitness(bestParent, target)\r\n text_label_2.configure(text = \"Guessed Password: \" + child)\r\n text_label_3.configure(text = \"Fitness: \" + str(fitness))\r\n text_label_4.configure(text = \"Time to Guess: \" + str(timeDiff))\r\ndef generate_parent(length):\r\n genes = []\r\n genes.extend(random.sample(geneSet, length))\r\n return ''.join(genes)\r\n\r\ndef get_fitness(guess, target):\r\n sum = 0\r\n for i in range(len(guess)):\r\n if(guess[i] == target[i]):\r\n sum+=1\r\n return sum\r\ndef mutate(parent):\r\n index = random.randrange(0, len(parent))\r\n childGenes = list(parent)\r\n alternate = random.sample(geneSet, 1)\r\n childGenes[index] = alternate[0]\r\n return ''.join(childGenes)\r\n\r\nevent_button = ttk.Button(canvas, text=\"Click me and see what happens\", command = change_greeting).grid(column = 2, row = 1)\r\ntext_label.grid(column = 0, row = 0)\r\ntext_label_2.grid(column = 0, row = 2)\r\ntext_label_3.grid(column = 0, row = 3)\r\ntext_label_4.grid(column = 0, row = 4)\r\ncanvas.resizable(False, False)\r\ncanvas.mainloop()", "id": "4529796", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "GUIbased_Genetic_password_guessing.py" }, { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 17 16:42:26 2021\r\n\r\n@author: TOSHIBA\r\n\"\"\"\r\n\r\nimport random\r\nmaxm = 10000000\r\nminm = 1\r\nnumber = random.randint(minm, maxm)\r\nno_of_guess = 1\r\ndirection = 0\r\nguess = random.randint(minm, maxm)\r\nwhile(True):\r\n if(guess == number):\r\n print(\"Right Guess \", guess)\r\n break\r\n elif(guess>number):\r\n print(\"A bit low than\", guess)\r\n maxm = guess\r\n guess = random.randint(minm, maxm)\r\n else:\r\n print(\"A bit higher than\", guess)\r\n minm = guess\r\n guess = random.randint(minm, maxm)\r\n no_of_guess += 1\r\n \r\nprint(\"No of Guesses :\", no_of_guess) \r\n\r\n\r\n#A bit low than 8836393\r\n#A bit low than 1912762\r\n#A bit low than 1846807\r\n#A bit low than 1576197\r\n#A bit low than 1274541\r\n#A bit low than 1048948\r\n#A bit higher than 103898\r\n#A bit low than 618194\r\n#A bit higher than 248142\r\n#A bit higher than 511765\r\n#A bit higher than 522807\r\n#A bit low than 587180\r\n#A bit higher than 546581\r\n#A bit higher than 559721\r\n#A bit higher than 571593\r\n#A bit low than 580356\r\n#A bit higher than 573015\r\n#A bit higher than 573882\r\n#A bit low than 576523\r\n#A bit low than 574679\r\n#A bit higher than 574190\r\n#A bit low than 574364\r\n#A bit low than 574314\r\n#A bit low than 574311\r\n#A bit low than 574244\r\n#A bit higher than 574196\r\n#A bit higher than 574209\r\n#A bit low than 574222\r\n#Right Guess 574210\r\n#No of Guesses : 29", "id": "12169110", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "number_guess.py" } ]
0
ninedollarmilk
[ { "content": "print ('fixed a bug')\n", "id": "3432335", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "bug.py" } ]
0
danielSilva21
[ { "content": "from fastapi.encoders import jsonable_encoder\nimport uuid \nimport datetime\nfrom app.crud import assets as assets_crud\n\nTYPE = \"answer\"\n\nasync def get(collection, id: str):\n return await collection.find_one({\"_id\": id, \"type\": TYPE})\n\nasync def get_all(collection, survey_id):\n return await collection.find({\"asset_id\": survey_id, \"type\": TYPE}).to_list(1000)\n\nasync def create(collection, asset_id: str, user_id: str, data: dict):\n obj = {\"data\": data}\n if assets_crud.get(collection, asset_id):\n obj[\"type\"] = TYPE\n obj[\"_id\"] = uuid.uuid4().hex\n obj[\"created_at\"] = datetime.datetime.now()\n obj[\"user_id\"] = user_id\n obj[\"asset_id\"] = asset_id\n obj = jsonable_encoder(obj)\n db_answer = await collection.insert_one(obj)\n return await get(collection, db_answer.inserted_id)\n else:\n raise Exception(\"Asset does not exist\")\n\nasync def update(collection, id: str, data):\n data[\"updated_at\"] = datetime.datetime.now()\n await collection.update_one( { \"_id\": id, \"type\": TYPE }, { \"$set\": data })\n return await get(collection,id)\n \nasync def delete(collection, id: str):\n return await collection.delete_one({ \"_id\": id, \"type\": TYPE })", "id": "10578475", "language": "Python", "matching_score": 3.749218463897705, "max_stars_count": 0, "path": "surveyapp/app/app/crud/answers.py" }, { "content": "from fastapi.encoders import jsonable_encoder\nfrom app.models.surveys import AssetCreateUpdateSchema\nimport uuid \nimport datetime\n\nTYPE = \"asset\"\n\n\nasync def get(collection, id: str):\n return await collection.find_one({\"_id\": id, \"type\": TYPE})\n\nasync def get_all(collection):\n return await collection.find().to_list(1000)\n\nasync def create(collection, asset: AssetCreateUpdateSchema):\n asset = asset.__dict__\n asset[\"type\"] = TYPE\n asset[\"_id\"] = uuid.uuid4().hex\n asset[\"created_at\"] = datetime.datetime.now()\n asset = jsonable_encoder(asset)\n db_survey = await collection.insert_one(asset)\n return await get(collection, db_survey.inserted_id)\n\nasync def update(collection, id: str, data):\n data[\"updated_at\"] = datetime.datetime.now()\n await collection.update_one( { \"_id\": id, \"type\": TYPE }, { \"$set\": data })\n return await get(collection,id)\n \nasync def delete(collection, id: str):\n return await collection.delete_one({\"_id\": id, \"type\": TYPE})", "id": "2341711", "language": "Python", "matching_score": 0.6726821660995483, "max_stars_count": 0, "path": "surveyapp/app/app/crud/assets.py" }, { "content": "from re import I\nfrom flask import Blueprint, render_template, request, flash, jsonify, g,session,abort\nimport json, requests, math, os\n\nfrom werkzeug.utils import secure_filename\nfrom annotator.notification import Notification\nfrom authInterlink import authInterlink\n\n\nfrom flask import redirect\nfrom flask.helpers import url_for,make_response\nfrom flask_mail import Mail, Message\nfrom annotator import description\nfrom tests.helpers import MockUser\n\nfrom tqdm import tqdm\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin, urlparse\nfrom werkzeug.utils import redirect\nfrom annotator.annotation import Annotation\nfrom annotator.document import Document\nfrom annotator.description import Description\n\nfrom cryptography.fernet import Fernet\n\nfrom datetime import date\n\nfrom flask import current_app\n\nimport urllib.parse\nfrom urllib.parse import unquote\nfrom urllib import parse\nimport math\nimport uuid\nimport secrets\n\nfrom config import settings\n\nfrom website.languages import getLanguagesList\n\n\nfrom flask_login import (\n LoginManager,\n current_user,\n login_required,\n login_user,\n logout_user,\n)\n\n\n\nviews = Blueprint('views',__name__,static_folder=\"static\",template_folder=\"templates\")\n\n\[email protected]('/')\ndef inicio():\n \n #Cargo los combos:\n\n vectorUrls=Description._get_uniqueValuesUrl()\n urlList=[]\n for urls in vectorUrls:\n key=urls[\"key\"]\n if(key!=\"\"):\n domain = urlparse(key).netloc\n if not (domain in urlList):\n urlList.append(domain)\n print(urlList)\n\n\n\n\n\n vectorPAs=Description._get_uniqueValues(campo=\"padministration\")\n paList=[]\n for pas in vectorPAs:\n key=pas[\"key\"]\n\n if key==\"\":\n key='Unassigned'\n\n paList.append(key)\n print(paList)\n\n\n textoABuscar=request.args.get(\"searchText\")\n padministration=request.args.get(\"padministration\")\n domain=request.args.get(\"domain\")\n\n page=request.args.get(\"page\",1)\n registroInicial=(int(page)-1)*10\n \n \n\n totalRegistros=0\n if(textoABuscar==None or textoABuscar==''):\n res= Description.search(offset=registroInicial)\n totalRegistros= Description.count()\n else:\n res= Description._get_Descriptions(textoABuscar=textoABuscar,padministration=padministration,url=domain,offset=registroInicial)\n totalRegistros= Description._get_DescriptionsCounts(textoABuscar=textoABuscar,padministration=padministration,url=domain)\n\n for itemDesc in res:\n urlMainPage = [url['url'] for url in itemDesc['urls'] if url['isMain'] == True][0]\n itemDesc['mainUrl']=urlMainPage\n\n\n\n pagesNumbers=math.ceil(totalRegistros/10)\n \n paginacion={'page':page,'pagesNumbers':pagesNumbers,'totalRegisters':totalRegistros,'searchBox':textoABuscar,'padministration':padministration,'url':domain}\n\n\n #Cargo las Notificaciones\n listNotifications=Notification._get_Notification_byModerCategory(category=\"survey\")\n #listNotifications.append(Notification._get_Notification_byModerCategory(category=\"survey\"))\n numRes=listNotifications['numRes']\n listNotifications=listNotifications['notifications']\n\n return render_template(\"home.html\",descriptions=res,urls=urlList,publicsa=paList,paginacion=paginacion,notifications=listNotifications,notificationNum=numRes)\n\n\n\n\n\n#Formulatio de carga de Pagina\[email protected](\"/buscar\",methods=[\"POST\"])\ndef buscar():\n sitio = request.form[\"nm\"]\n userNombre=request.form[\"usr\"]\n return redirect(url_for(\"views.modifica\",rutaPagina=sitio,userId=userNombre))\n\n\n\n#Formulatio de carga de Pagina\[email protected](\"/dashboard1\")\ndef dashboard1():\n return render_template(\"dashboard1.html\")\n\n\n\n\n\n#Formulatio de carga de Pagina\[email protected](\"/registrar\",methods=[\"POST\"])\ndef saveDescription():\n\n itemsDict = request.form.to_dict()\n\n title = itemsDict.pop(\"createTitle\")\n description = itemsDict.pop(\"createDescription\")\n keywords = itemsDict.pop(\"createKeywords\")\n userNombre = itemsDict.pop(\"usr\")\n\n descriptionId = itemsDict.pop(\"descriptionId\")\n\n #Obtengo el valor de la administracion publica\n try:\n publicAdmin = itemsDict.pop(\"createPA\")\n except:\n publicAdmin=\"\"\n try:\n newPA = itemsDict.pop(\"addNewPA\")\n except:\n newPA=\"\"\n if newPA!=\"\":\n publicAdmin=newPA\n\n todayDateTime=datetime.datetime.now().replace(microsecond=0).isoformat()\n\n #Obtengo el listado de urls nuevo\n\n if 'MainUrlRadio' in itemsDict.keys():\n mainPageItem=itemsDict['MainUrlRadio']\n else:\n mainPageItem='url_1'\n\n\n \n\n\n listadoUrlNuevo={}\n for key in itemsDict:\n if(key.startswith('url_')):\n\n isMain=False\n if(key==mainPageItem):\n isMain=True\n\n webAdress=itemsDict[key]\n langCode=itemsDict['langCode_'+key.split('_')[1]]\n langCodeSel=itemsDict['sel_'+key.split('_')[1]]\n\n if langCode=='':\n langCode=langCodeSel\n\n\n if len(langCode)>2 :\n langCode='Undefined'\n listadoUrlNuevo[webAdress]=[langCode,isMain]\n\n\n if(len(listadoUrlNuevo)==0):\n #Si el campo de lista esta vacio miro el campo url\n flash(\"It is needed to add at least one URL of description.\",\"info\")\n return jsonify({\"error\":\"It is needed to add at least one URL of description\"})\n\n #Busco si alguno de los URLS ya ha sido incluido en existe:\n existePreviamente=False\n listErrorDescriptionSameUrl=[]\n for itemUrl in listadoUrlNuevo:\n editDescripcion =Description._get_Descriptions_byURI(url=itemUrl)\n if len(editDescripcion) != 0:\n existePreviamente=True\n nombreDesc=editDescripcion[0]['title']\n textoError='Error: La descripcion '+nombreDesc+' contiene la url:'+itemUrl\n listErrorDescriptionSameUrl.append(textoError)\n \n \n \n if descriptionId=='':\n\n #Si existe una descripcion con alguna de las descripciones presentar error creacion\n if existePreviamente:\n listErroresDes = \" \" \n listErroresDes.join(listErrorDescriptionSameUrl)\n flash(\"One or some of the urls had been used in another description.\"+listErroresDes,\"info\")\n return jsonify({'Errores':listErroresDes})\n\n #Create:\n perms = {'read': ['group:__world__']}\n moderat = {}\n\n\n #Creo listados de Urls:\n urls=[]\n for itemUrlFormat in listadoUrlNuevo:\n newUrl= {\n 'createdate': todayDateTime,\n 'url': itemUrlFormat,\n 'language': listadoUrlNuevo[itemUrlFormat][0],\n 'isMain': listadoUrlNuevo[itemUrlFormat][1],\n 'email': current_user.email\n }\n urls.append(newUrl)\n\n newdescription=Description(title=title,description=description,\n keywords=keywords,moderators=moderat,\n padministration=publicAdmin,\n permissions=perms,urls=urls\n )\n \n \n if(title==\"\" or description==\"\" or publicAdmin==\"\" ):\n description=editDescripcion \n flash(\"Algunos campos de la descripción no son correctos.\",\"info\")\n return redirect('/descriptionDetail')\n\n else:\n newdescription.save(index=\"description\")\n description=newdescription\n flash(\"Record created successfully.\",\"info\")\n\n else:\n\n editDescripcion =Description._get_Descriptions_byId(id=descriptionId)[0]\n #Update: \n editDescripcion.title=title\n editDescripcion.description=description\n editDescripcion.keywords=keywords\n editDescripcion.padministration=publicAdmin\n editDescripcion.updated=todayDateTime\n\n\n listUrlUpdate=editDescripcion['urls']\n listModificado=editDescripcion['urls']\n\n #Busco Url a borrar:\n contador=0\n for itemUrl in listUrlUpdate:\n if itemUrl['url'] not in listadoUrlNuevo.keys():\n listModificado.pop(contador) \n contador=contador+1 \n \n #Actualizo el listado de links:\n for key in listadoUrlNuevo:\n\n webAdress=key\n langCode=listadoUrlNuevo[key][0]\n\n #Reviso que todos esten y los que no estan los agrego:\n\n existe=False\n for itemUrl in listModificado:\n if ( itemUrl['url'] == webAdress ):\n #Ya existe\n itemUrl['language']=langCode\n itemUrl['isMain']=listadoUrlNuevo[key][1]\n existe=True\n break\n\n if existe==False:\n #Es nuevo y Agrego\n newUrl= {\n 'createdate': todayDateTime,\n 'url': webAdress,\n 'language': langCode,\n 'isMain': listadoUrlNuevo[key][1],\n 'email': current_user.email\n }\n listModificado.append(newUrl)\n \n editDescripcion['urls']=listModificado\n\n\n #Comprobar los permisos de edicion del usuario:\n nroEnc=editDescripcion._get_checkPermisos_byId(email=userNombre,id=descriptionId)\n\n if(nroEnc!=0):\n editDescripcion.updateFields(index=\"description\") \n description=editDescripcion \n flash(\"Registro editado correctamente.\",\"info\")\n\n else:\n description=editDescripcion \n flash(\"No tienes permisos de moderador para editar esta descripción.\",\"info\")\n\n \n return redirect('/description/'+description['id']+'/edit')\n\n\n \n#return redirect(url_for(\"views.modifica\",rutaPagina=sitio,userId=userNombre))\n\n\"\"\" def generar_clave():\n clave= Fernet.generate_key()\n session[\"claveCript\"]=clave\n # with open(\"clave.key\",\"wb\") as archivo_clave:\n # archivo_clave.write(clave)\n\ndef cargar_clave():\n return session[\"claveCript\"]\n # try:\n # return open(\"clave.key\",\"rb\").read()\n # except:\n # return None \"\"\"\n\n\[email protected](\"/claimModeration\",methods=[\"POST\"])\ndef claimModeration():\n \n itemsDict = request.form.to_dict()\n\n firstName=itemsDict.pop(\"firstName\").title()\n lastName=itemsDict.pop(\"lastName\").title()\n userPosition=itemsDict.pop(\"userPosition\").title()\n oneUrl=itemsDict.pop(\"oneUrl\")\n userMotivations=itemsDict.pop(\"userMotivations\")\n\n userMotivations=userMotivations[0].upper()+userMotivations[1:] \n\n\n\n #This are the URI's\n urlList=[]\n for key in itemsDict:\n if(key.startswith(\"id_\")):\n urlList.append(itemsDict[key])\n\n if(len(urlList)==0): \n flash(\"It is needed to add at least one URL of description.\",\"danger\")\n return authInterlink.moderate()\n\n\n #Check if the urls of descriptions are valid:\n allUrlValid=True\n listMsgError=[]\n for key in itemsDict:\n if(key.startswith(\"id_\")):\n encontrado=Description._get_Descriptions_byId(id=itemsDict[key])\n if (len(encontrado)==0):\n allUrlValid=False\n listMsgError.append('The description for '+itemsDict[key]+' do not exist.')\n\n if(not allUrlValid):\n for itemError in listMsgError:\n flash(itemError)\n flash('Before requesting moderation privileges the descriptions must be created.')\n return authInterlink.moderate()\n else:\n\n itemsDict['email'] = current_user.email\n\n dataClaimEncoded=urllib.parse.urlencode(itemsDict)\n\n #Now will send the email:\n msg = Message('The user '+firstName+' '+lastName+' ha realizado un claim to be a moderator.', sender = '<EMAIL>', recipients = [current_user.email])\n\n sites =\" \".join(str(x) for x in urlList)\n claimInfo= \"The user {} {} who is a {} \".format(firstName,lastName,userPosition)+\"send a request to be a moderator of the following descriptions identifiers: \"\n \n\n\n #Encripto los datos del Claim:\n\n \n message = dataClaimEncoded\n\n key = settings.CRYPT_KEY\n \n # cargar_clave() \n # if key ==None:\n # generar_clave()\n # key =cargar_clave()\n\n fernet = Fernet(key) \n encMessage = fernet.encrypt(message.encode())\n print(\"original string: \", message) \n print(\"encrypted string: \", encMessage) \n\n\n\n textHref='http://127.0.0.1:5000/gui/aproveModerator?datos='+encMessage.decode('ascii')\n\n msg.html = \"\"\"<td width='700' class='esd-container-frame' align='center' valign='top'> \n <table cellpadding='0' cellspacing='0' width='100%' style='background-color: #515151; border-radius: 30px 36\n 333333333333333333333333333333333333333333333333333333333333333333333333333333333333333\n 30px 30px 30px; border-collapse: separate;' bgcolor='#515151'>\n <tbody>\n <tr>\n <td align='center' class='esd-block-text es-p20t'>\n <h1 style='color: #ffffff;'>Description moderation request</h1>\n </td>\n </tr>\n <tr>\n <td align='center' style='padding-right: 140px; padding-left: 140px;' class='esd-block-text es-m-p20l es-m-p20r es-p30t'>\n <p style='font-size: 16px; letter-spacing: 1px; color: #ffffff;'>\"\"\"+claimInfo+\"\"\"</p>\n <p style='font-size: 16px; letter-spacing: 1px; color: #ffffff; color: white;\n padding: 14px 25px;\n text-align: center;\n text-decoration: none;'>\"\"\"+sites+\"\"\"</p>\n </td>\n </tr>\n <tr>\n <td align='center' style='padding-right: 110px; padding-left: 110px;' class='esd-block-text es-m-p20l es-m-p20r es-p30t es-p40b'>\n <p style='font-size: 16px; letter-spacing: 1px; color: #ffffff;'>The motivations are:</p>\n <p style='font-size: 16px; letter-spacing: 1px; color: #ffffff;'>\"\"\"+userMotivations+\"\"\"</p>\n </td>\n </tr>\n <tr>\n <td align='center' style='padding-bottom: 50px; font-size: 20px; color: #ffffff;'><a target='_blank' style='background-color: #f44336;\n color: white;\n padding: 14px 25px;\n text-align: center;\n text-decoration: none;\n display: inline-block; border-radius: 5px;' href='\"\"\"+textHref+ \"\"\"'>Aproved the request.</a></td>\n </tr>\n </tbody>\n </table>\n </td>\"\"\"\n \n\n #Agrego los archivos \n\n uploaded_file = request.files['archivoIdentificacion']\n filename = secure_filename(uploaded_file.filename)\n if filename != '':\n\n file_ext = os.path.splitext(filename)[1]\n if file_ext not in current_app.config['UPLOAD_EXTENSIONS']:\n abort(400)\n\n #Guardo Archivo\n uploaded_file.save(filename)\n # Lo adjunto al email\n with current_app.open_resource(filename) as fp:\n msg.attach(filename,'application/pdf', fp.read())\n #Lo borro del disco\n os.remove(filename)\n \n\n \n mail = Mail(current_app)\n mail.send(msg)\n\n\n flash(\"The moderation request has been send.\",\"info\")\n\n return authInterlink.moderate()\n\n #return render_template(\"moderate.html\",descriptions=res,urls=urlList,publicsa=paList,paginacion=paginacion)\n\n\[email protected](\"/aproveModerator\",methods=[\"GET\",\"POST\"])\n@login_required\ndef aproveModerator():\n\n argumentos=request.args.to_dict()\n\n #Obtain Datos datos\n datosBin=argumentos.pop('datos').encode('ascii')\n\n # key = cargar_clave() \n # if key ==None:\n # generar_clave()\n # key =cargar_clave()\n \n key = settings.CRYPT_KEY\n\n fernet = Fernet(key) \n \n if unquote(datosBin)!='':\n\n argumentos = fernet.decrypt(datosBin).decode() \n argumentos =unquote(argumentos)\n\n \n listArgs2=parse.parse_qsl(argumentos)\n argumentos=dict(listArgs2)\n\n argKeys=argumentos.keys()\n email=argumentos.pop('email')\n\n\n existUrl=any(list(map(lambda x: x.startswith('url_'), argKeys))) \n\n urlList=[]\n lisDescriptions={}\n if existUrl:\n for key in argumentos:\n if(key.startswith('id_')):\n #urlList.append(argumentos[key])\n encontrado=Description._get_Descriptions_byId(id=argumentos[key])\n if (len(encontrado)!=0):\n urlList.append(encontrado[0])\n #lisDescriptions[argumentos[key]]=Description._get_Descriptions_byURI(url=argumentos[key])[0]\n \n \n \n \n today = date.today()\n endDate = today.replace(today.year + 1)\n else:\n email=''\n urlList=[]\n today = date.today()\n endDate = today.replace(today.year + 1)\n #endDate=today.strftime(\"%Y-%m-%d\")\n\n\n return render_template(\"approveClaim.html\",email=email,argumentos=urlList, now=today.strftime(\"%Y-%m-%d\"),endDate=endDate.strftime(\"%Y-%m/%d\"))\n\[email protected](\"/aprovarClaimsList\",methods=[\"POST\"])\ndef aprovarClaimsList():\n\n argumentos=request.form.to_dict()\n usuarioModerator=argumentos.pop('email')\n adminComment=argumentos.pop('commentBox')\n\n argumentosList=list(argumentos.values())\n \n contador=0\n nroActualizaciones=0\n listMsg=[]\n for i in range(math.ceil(len(argumentosList)/4)):\n if(i!=0):\n contador=i*4\n estado=argumentosList[contador]\n descriptionId=argumentosList[contador+1]\n initDate=argumentosList[contador+2]\n endDate=argumentosList[contador+3]\n \n\n #Agrego como moderador en la descripcion:\n descriptions=Description._get_Descriptions_byId(id=descriptionId)\n \n if len(descriptions)==1:\n if estado==\"on\":\n descripcionAct=descriptions[0]\n\n if(len(descripcionAct['moderators'])==0):\n descripcionAct['moderators']=[] \n\n descripcionAct['moderators'].append({\n \"created\": initDate,\n \"expire\": endDate,\n \"email\": usuarioModerator\n })\n descripcionAct.updateModerators(index=\"description\")\n nroActualizaciones=nroActualizaciones+1\n listMsg.append(\"The moderation of \"+descriptions[0]['title']+\" has been assigned.\")\n elif len(descriptions)==0:\n listMsg.append(\"The description could not be found (Most be created first) !.\")\n\n listActionsBody=\"\"\n for msnItem in listMsg:\n listActionsBody=listActionsBody+\"\"\"<p style='font-size: 16px; letter-spacing: 1px; color: #ffffff;'>\"\"\"+msnItem+\"\"\"</p>\"\"\" \n \n\n msg = Message('Your claim has been resolved.', sender = '<EMAIL>', recipients = [usuarioModerator])\n\n msg.html = \"\"\"<td width='700' class='esd-container-frame' align='center' valign='top'> \n <table cellpadding='0' cellspacing='0' width='100%' style='background-color: #515151; border-radius: 30px 36\n 333333333333333333333333333333333333333333333333333333333333333333333333333333333333333\n 30px 30px 30px; border-collapse: separate;' bgcolor='#515151'>\n <tbody>\n <tr>\n <td align='center' class='esd-block-text es-p20t'>\n <h2 style='color: #ffffff;'>Your claim to be a moderator has been resolved.</h2>\n \n </td>\n </tr>\n\n <tr>\n <td align='center' style='padding-right: 110px; padding-left: 110px;' class='esd-block-text es-m-p20l es-m-p20r es-p30t es-p40b'>\n \"\"\"+listActionsBody+\"\"\"\n </td>\n </tr>\n\n <tr>\n <td align='center' style='padding-right: 110px; padding-left: 110px;' class='esd-block-text es-m-p20l es-m-p20r es-p30t es-p40b'>\n <p style='font-size: 16px; letter-spacing: 1px; color: #ffffff;'>The admin comments are:</p>\n <p style='font-size: 16px; letter-spacing: 1px; color: #ffffff;'>\"\"\"+adminComment+\"\"\"</p>\n </td>\n </tr>\n\n \n \n </tbody>\n </table>\n </td>\"\"\"\n \n \n mail = Mail(current_app)\n mail.send(msg)\n\n for msnItem in listMsg:\n flash(msnItem,\"info\")\n\n\n return redirect(url_for(\"views.aproveModerator\",datos='',argumentos=argumentosList))\n \n\n\n#Formulatio de carga de Pagina\[email protected](\"/visor\",methods=[\"GET\"])\ndef visor():\n return render_template(\"prototipo.html\")\n \[email protected](\"/survey\",methods=[\"GET\"])\ndef survey():\n return render_template(\"survey.html\")\n\n\n\n\n\n#Cargo la pagina desde beautifulSoup y la muestro en pantalla\[email protected](\"/modifica/<userId>/<path:rutaPagina>\",methods=[\"GET\",\"POST\"])\ndef modifica(rutaPagina,userId):\n\n #En el caso que se tiene interes en una anotacion en particular\n argumentos=request.args.to_dict()\n anotationSel=''\n\n scriptToFocusParragraph=''\n if('annotationSel' in argumentos.keys()):\n anotationSel=argumentos.pop('annotationSel')\n session['anotationSel']=anotationSel\n \n #Obtengo el usuario de session \n #Lo pongo en la session de cookie local:\n if not current_user.is_anonymous:\n session['userId'] = current_user.email # setting session date\n session['username'] = current_user.email\n userId= current_user.email\n else:\n session['username'] = 'Annonymous'\n userId= 'Annonymous'\n\n print(\"La ruta de la Pagina es: \"+rutaPagina)\n print(\"El nombre de usuario es: \"+userId)\n \n \n\n headersUserAgent={\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'\n }\n\n\n #Obtengo el codigo:\n response=requests.get(rutaPagina,headers=headersUserAgent)\n resp_Contenido=response.content\n #print(resp_Contenido.decode())\n soup = BeautifulSoup(resp_Contenido,'html.parser')\n\n\n #Quitamos los scripts:\n for data in soup(['script','pre','noscript']):\n # Remove tags\n data.decompose()\n\n #print(soup.decode)\n\n try:\n headTag =soup.html.head\n except:\n headTag =soup.html\n #Inserto las librerias de css de la pagina:\n \n #1 Obtengo los archivos css\n css_files = []\n\n\n count = 0\n for css in soup.find_all(\"link\"):\n if css.attrs.get(\"href\"):\n # if the link tag has the 'href' attribute\n css_url = urljoin(rutaPagina, css.attrs.get(\"href\"))\n if \"css\" in css_url:\n count += 1\n css_files.append(css_url)\n anotationTemp = soup.new_tag('link', href=css_url,rel=\"stylesheet\")\n headTag.append(anotationTemp)\n print(\"Line{}: {}\".format(count, css_url))\n\n for a_Link in soup.find_all(\"a\"):\n if a_Link.attrs.get(\"href\"):\n hrefVal=a_Link.attrs.get(\"href\")\n if hrefVal.startswith('/'):\n newURLVal = urljoin(rutaPagina, hrefVal)\n a_Link.attrs['href']=\"/gui/modifica/<EMAIL>/\"+newURLVal.lower()\n print(a_Link)\n\n\n \n print(\"Total CSS insertados en the page:\", len(css_files))\n\n #Inserto las librerias del AnnotationJS\n #Creo los tags necesarios:\n \n anotationcss1 = soup.new_tag('link', href=\"/gui/static/lib/annotator-full.1.2.9/annotator.min.css\",rel=\"stylesheet\")\n anotationcss2 = soup.new_tag('link', href=\"/gui/static/src/css/style.css\",rel=\"stylesheet\")\n anotationcss3 = soup.new_tag('link', href=\"/gui/static/lib/css/annotator.touch.css\",rel=\"stylesheet\")\n\n fontAwesome3 = soup.new_tag('link', href=\"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css\",rel=\"stylesheet\")\n\n if not current_user.is_anonymous:\n userName = soup.new_tag( 'meta', id='dataBackEnd', username=current_user.email, portaugmenter=settings.PORTAUGMENTER)\n \n try:\n headTag.append(userName)\n except:\n print(\"Excepcion en Username\")\n\n \n try:\n headTag.append(anotationcss1)\n except:\n print(\"Excepcion en ccs1\")\n\n try:\n headTag.append(anotationcss2)\n except:\n print(\"Excepcion en ccs1\")\n \n\n try:\n headTag.append(anotationcss3)\n except:\n print(\"Excepcion en ccs1\")\n \n try:\n headTag.append(fontAwesome3)\n except:\n print(\"Excepcion en ccs1\")\n \n\n try:\n soup.html.head=headTag\n except:\n print(\"Excepcion en ccs1\")\n\n soup = obtenerReemplazarImagenes(rutaPagina,soup)\n\n #Ingreso el script para iniciar Aplicacion Annotation\n try:\n bodyTag=soup.html.body\n except:\n print(\"Excepcion en ccs1\")\n \n jqueryScript1 = soup.new_tag('script', src=\"/gui/static/lib/jquery-1.9.1.js\")\n jqueryScript2 = soup.new_tag('script', src=\"/gui/static/lib/annotator-full.1.2.9/annotator-full.min.js\")\n jqueryScript3 = soup.new_tag('script', src=\"/gui/static/lib/jquery-i18n-master/jquery.i18n.min.js\")\n jqueryScript4 = soup.new_tag('script', src=\"/gui/static/lib/jquery.dateFormat.js\")\n jqueryScript5 = soup.new_tag('script', src=\"/gui/static/lib/jquery.slimscroll.js\")\n\n jqueryScript12 = soup.new_tag('script', src=\"/gui/static/locale/en/annotator.js\")\n jqueryScript13 = soup.new_tag('script', src=\"/gui/static/lib/tinymce/tinymce.min.js\")\n jqueryScript14 = soup.new_tag('script', src=\"/gui/static/src/richEditor.js\")\n\n\n\n jqueryScript6 = soup.new_tag('script', src=\"/gui/static/lib/lunr.js-0.5.7/lunr.min.js\")\n jqueryScript7 = soup.new_tag('script', src=\"/gui/static/locale/en/annotator.js\")\n jqueryScript8 = soup.new_tag('script', src=\"/gui/static/lib/annotator.touch.js\")\n jqueryScript9 = soup.new_tag('script', src=\"/gui/static/src/view_annotator.js\")\n jqueryScript10 = soup.new_tag('script', src=\"/gui/static/src/categories.js\")\n jqueryScript11 = soup.new_tag('script', src=\"/gui/static/src/search.js\")\n\n try:\n bodyTag.append(jqueryScript1)\n bodyTag.append(jqueryScript2)\n bodyTag.append(jqueryScript3)\n bodyTag.append(jqueryScript4)\n bodyTag.append(jqueryScript5)\n bodyTag.append(jqueryScript6)\n bodyTag.append(jqueryScript7)\n bodyTag.append(jqueryScript8)\n bodyTag.append(jqueryScript9)\n bodyTag.append(jqueryScript10)\n bodyTag.append(jqueryScript11)\n\n bodyTag.append(jqueryScript12)\n bodyTag.append(jqueryScript13)\n bodyTag.append(jqueryScript14)\n except:\n print(\"Excepcion en ccs1\")\n \n\n\n\n\n\n\n anotationIniScript = soup.new_tag('script')\n anotationInitScriptTemp= \"\"\" \n \n jQuery(function ($) {\n\n \n \n $.i18n.load(i18n_dict);\n // Customise the default plugin options with the third argument.\n var annotator = $('body').annotator().annotator().data('annotator');\n var propietary = '\"\"\"+userId+\"\"\"';\n annotator.addPlugin('Permissions', {\n user: propietary,\n permissions: {\n 'read': [propietary],\n 'update': [propietary],\n 'delete': [propietary],\n 'admin': [propietary]\n },\n showViewPermissionsCheckbox: true,\n showEditPermissionsCheckbox: false\n });\n\n sessionStorage.setItem('user', '\"\"\"+userId+\"\"\"'); \n\n $('body').annotator().annotator('addPlugin', 'RichEditor');\n $('body').annotator().annotator('addPlugin', 'Categories', {\n feedback: 'annotator-hl-destacat',\n question: 'annotator-hl-subratllat',\n term: 'annotator-hl-term'\n }\n );\n $('body').annotator().annotator('addPlugin', 'AnnotatorViewer');\n $('body').annotator().annotator(\"addPlugin\", \"Touch\");\n\n\n \n //let uriAdress =$(location).attr('href');\n //const uriAdressBase = uriAdress.split('#')[0];\n\n //Dejo unicamente la primera parte del uri\n uriAdressBase = '\"\"\"+rutaPagina+\"\"\"'; \n\n console.log(uriAdressBase)\n $('body').annotator().annotator('addPlugin', 'Store',{\n annotationData: {uri:uriAdressBase},\n loadFromSearch: {uri:uriAdressBase}\n }\n );\n\n \n \n //noinspection JSJQueryEfficiency\n $('body').annotator().annotator('addPlugin', 'Search');\n\n //Annotation scroll\n $('#anotacions-uoc-panel').slimscroll({height: '100%'});\n\n //$('body').annotator().annotator(\"setupPlugins\");\n\n });\n\n \"\"\"\n\n anotationIniScript.string =anotationInitScriptTemp\n\n\n try: \n bodyTag.append(anotationIniScript)\n\n #Inserto\n # Es como poner una emvoltura sobre un Tag\n bodyTag.wrap(soup.new_tag(\"div\",id=\"contenidoAnotar\"))\n soup.html.body=bodyTag\n except:\n print(\"Excepcion en ccs1\")\n \n\n \n\n headers = {'Content-Type': 'text/html',\n 'x-annotator-auth-token':generate_token()}\n\n return make_response(soup.prettify(), 200,headers) \n\ndef generate_token():\n return jwt.encode({\n 'consumerKey': settings.CONSUMER_KEY,\n 'userId': current_user.id,\n 'issuedAt': _now().isoformat() + 'Z',\n 'ttl': CONSUMER_TTL\n }, CONSUMER_SECRET)\n\n\n\ndef obtenerReemplazarImagenes(rutaPagina,soup):\n #De la misma forma busco todas las imagenes:\n urls = []\n for img in tqdm(soup.find_all(\"img\"), \"Extracting images\"):\n img_url = img.attrs.get(\"src\")\n if not img_url:\n # if img does not contain src attribute, just skip\n continue\n \n # make the URL absolute by joining domain with the URL that is just extracted\n img_url = urljoin(rutaPagina, img_url)\n\n try:\n pos = img_url.index(\"?\")\n img_url = img_url[:pos]\n except ValueError:\n pass\n \n # finally, if the url is valid\n if is_valid(img_url):\n urls.append(img_url)\n #print(urls)\n\n #Reemplazo las fuentes de las imagenes\n for img in soup.findAll('img'):\n for img_urlLine in urls:\n if img['src'] in img_urlLine:\n print(\"Cambia \"+img['src']+\" por: \"+img_urlLine)\n img['src']=img_urlLine\n break\n\n return soup\n\n\n\ndef is_valid(url):\n \"\"\"\n Checks whether `url` is a valid URL.\n \"\"\"\n parsed = urlparse(url)\n return bool(parsed.netloc) and bool(parsed.scheme)\n\n\n\nimport datetime\nimport jwt\n\n# Replace these with your details\n\nCONSUMER_SECRET = secrets.token_urlsafe(16)\n\n# Only change this if you're sure you know what you're doing\nCONSUMER_TTL = settings.CONSUMER_TTL#\n\[email protected](\"/token\")\ndef generate_token():\n return jwt.encode({\n 'consumerKey': settings.CRYPT_KEY,\n 'userId': 1,\n 'issuedAt': _now().isoformat() + 'Z',\n 'ttl': CONSUMER_TTL\n }, CONSUMER_SECRET)\n\ndef _now():\n return datetime.datetime.utcnow().replace(microsecond=0)", "id": "2017763", "language": "Python", "matching_score": 8.323101043701172, "max_stars_count": 0, "path": "website/views.py" }, { "content": "from flask import Blueprint, jsonify, flash\nimport requests, math\nfrom urllib.parse import urljoin, urlparse\nimport datetime\nimport uuid\nfrom flask import current_app, g\n\nfrom flask_babel import format_number,gettext,format_decimal, format_currency, format_percent\n\n\nfrom flask import Flask, render_template, redirect, request, url_for, session\nfrom flask_login import (\n LoginManager,\n current_user,\n login_required,\n login_user,\n logout_user,\n)\nfrom annotator.survey import Survey\n\n\nfrom authInterlink.user import User\n\nfrom annotator.annotation import Annotation\nfrom annotator.description import Description\nfrom annotator.notification import Notification\nfrom website.languages import getLanguagesList\n\nauthInterlink = Blueprint('authInterlink', __name__,template_folder=\"./gui/templates\")\n\n#Genero Secretos para los estados:\ntok1 = uuid.uuid4()\ntok2 = uuid.uuid4()\n\nAPP_STATE = tok1.hex\nNONCE = tok2.hex\n\[email protected](\"/login\")\ndef login():\n # get request params\n query_params = {'client_id': current_app.config[\"CLIENT_ID\"],\n 'redirect_uri': current_app.config[\"REDIRECT_URI\"],\n 'scope': \"openid email profile\",\n 'state': APP_STATE,\n 'nonce': NONCE,\n 'response_type': 'code',\n 'response_mode': 'query'}\n\n # build request_uri\n request_uri = \"{base_url}?{query_params}\".format(\n base_url=current_app.config[\"AUTH_URI\"],\n query_params=requests.compat.urlencode(query_params)\n )\n\n return redirect(request_uri)\n\n\[email protected](\"/logout\", methods=[\"GET\", \"POST\"])\n@login_required\ndef logout():\n logout_user()\n #response = redirect(config[\"end_session_endpoint\"])\n payload = {'id_token_hint': session['id_token'],\n 'post_logout_redirect_uri': \"http://127.0.0.1:5000/home\",\n 'state': APP_STATE}\n #headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n r = requests.get(\n current_app.config[\"END_SESSION_ENDPOINT\"],\n params=payload,\n )\n r.url\n r.text\n session.clear()\n #return response\n #return render_template(\"home.html\")\n return redirect(current_app.config[\"END_SESSION_ENDPOINT\"]) #Por ahora queda asi.\n\n\[email protected](\"/about\")\ndef about():\n return render_template(\"about.html\")\n \n\n\[email protected](\"/dashboard\")\n@login_required\ndef dashboard():\n\n #Cargo los combos:\n\n vectorUrls=Description._get_uniqueValuesUrl()\n urlList=[]\n for urls in vectorUrls:\n key=urls[\"key\"]\n if(key!=\"\"):\n domain = urlparse(key).netloc\n if not (domain in urlList):\n urlList.append(domain)\n print(urlList)\n\n\n\n\n\n vectorPAs=Description._get_uniqueValues(campo=\"padministration\")\n paList=[]\n for pas in vectorPAs:\n key=pas[\"key\"]\n\n if key==\"\":\n key='Unassigned'\n\n paList.append(key)\n print(paList)\n\n\n textoABuscar=request.args.get(\"searchText\")\n padministration=request.args.get(\"padministration\")\n domain=request.args.get(\"domain\")\n\n page=request.args.get(\"page\",1)\n registroInicial=(int(page)-1)*10\n \n \n\n totalRegistros=0\n if(textoABuscar==None or textoABuscar==''):\n res= Description.search(offset=registroInicial)\n totalRegistros= Description.count()\n else:\n res= Description._get_Descriptions(textoABuscar=textoABuscar,padministration=padministration,url=domain,offset=registroInicial)\n totalRegistros= Description._get_DescriptionsCounts(textoABuscar=textoABuscar,padministration=padministration,url=domain)\n\n #Cargo los números de anotaciones por categoria\n for itemDesc in res:\n \n #Obtengo los Urls:\n listUrl=[]\n for url in itemDesc['urls']:\n listUrl.append(url['url'])\n\n #Cargo datos estadisticos de las descripciones\n resCategory=Annotation.descriptionStats(Annotation,uris=listUrl)\n \n nroFeedbacks=0\n nroQuestions=0\n nroTerms=0\n\n nroFeedProgress=0\n nroFeedApproved=0\n nroQuesProgress=0\n nroQuesApproved=0\n nroTermProgress=0\n nroTermApproved=0\n \n #Obtengo la informacion estadistica:\n if(len(resCategory)>0):\n \n for itemCategory in resCategory:\n \n cateGroup=itemCategory['key']\n \n \n if(cateGroup=='feedback'):\n nroFeedbacks=itemCategory['doc_count']\n listStates=itemCategory['group_state']['buckets']\n \n for itemState in listStates:\n cateState=itemState['key']\n nroState=itemState['doc_count']\n if(cateState==0):#In Progress\n nroFeedProgress=nroState\n if(cateState==2):#In Approved\n nroFeedApproved=nroState \n\n if(cateGroup=='question'):\n nroQuestions=itemCategory['doc_count']\n listStates=itemCategory['group_state']['buckets']\n \n for itemState in listStates:\n cateState=itemState['key']\n nroState=itemState['doc_count']\n if(cateState==0):#In Progress\n nroQuesProgress=nroState\n if(cateState==2):#In Approved\n nroQuesApproved=nroState \n\n if(cateGroup=='term'):\n nroTerms=itemCategory['doc_count']\n listStates=itemCategory['group_state']['buckets']\n \n for itemState in listStates:\n cateState=itemState['key']\n nroState=itemState['doc_count']\n if(cateState==0):#In Progress\n nroTermProgress=nroState\n if(cateState==2):#In Approved\n nroTermApproved=nroState \n\n #Cargo los valores totales\n itemDesc['nroTerms']=nroTerms\n itemDesc['nroQuest']=nroQuestions\n itemDesc['nroFeeds']=nroFeedbacks \n\n #Cargo los progressBar con valores por estados.\n # Progreso Total (%) = Approved * 100 / (InProgress + Approved)\n #Feedback Progress:\n\n #Incluyo validacion de la division x / 0 (if statement) \n \n progressFeed= ( (nroFeedApproved * 100) / ( nroFeedProgress + nroFeedApproved ) ) if ( nroFeedProgress + nroFeedApproved ) != 0 else 0\n progressTerm= ( (nroTermApproved * 100) / ( nroTermProgress + nroTermApproved ) ) if ( nroTermProgress + nroTermApproved ) != 0 else 0\n progressQues= ( (nroQuesApproved * 100) / ( nroQuesProgress + nroQuesApproved ) ) if ( nroQuesProgress + nroQuesApproved ) != 0 else 0\n\n itemDesc['progressFeed']=progressFeed\n itemDesc['progressTerm']=progressTerm\n itemDesc['progressQues']=progressQues\n\n\n textoStats=(\"<b>Feedback (\"+str(nroFeedApproved)+\"/\"+str(nroFeedApproved+nroFeedProgress)+\")</b> : \"+str(round(progressFeed))+\"% <br>\"+\n \"<b>Terms (\"+str(nroTermApproved)+\"/\"+str(nroTermApproved+nroTermProgress)+\")</b>: \"+str(round(progressTerm))+\"% <br>\"+\n \"<b>Questions (\"+str(nroQuesApproved)+\"/\"+str(nroQuesApproved+nroQuesProgress)+\")</b>: \"+str(round(progressQues))+\"% <br>\")\n \n \n itemDesc['textoStats']=textoStats\n\n progressTotalApproved = nroFeedApproved + nroTermApproved + nroQuesApproved\n progressTotalInProgress = nroFeedProgress + nroTermProgress + nroQuesProgress\n progressTotal= ( (progressTotalApproved * 100) / ( progressTotalInProgress + progressTotalApproved ) ) if ( progressTotalInProgress + progressTotalApproved ) != 0 else 0\n\n itemDesc['progressTotal']=round(progressTotal)\n\n\n pagesNumbers=math.ceil(totalRegistros/10)\n \n paginacion={'page':page,'pagesNumbers':pagesNumbers,'totalRegisters':totalRegistros,'searchBox':textoABuscar,'padministration':padministration,'url':domain}\n\n listNotifications,numRes=cargarNotifications()\n\n return render_template(\"dashboard.html\",descriptions=res,urls=urlList,publicsa=paList,paginacion=paginacion,notifications=listNotifications,notificationNum=numRes)\n\n\[email protected](\"/moderate\")\n@login_required\ndef moderate():\n\n #Cargo los combos:\n\n vectorUrls=Description._get_uniqueValues(campo=\"url\")\n urlList=[]\n for urls in vectorUrls:\n key=urls[\"key\"]\n if(key!=\"\"):\n domain = urlparse(key).netloc\n if not (domain in urlList):\n urlList.append(domain)\n print(urlList)\n\n\n\n\n\n vectorPAs=Description._get_uniqueValues(campo=\"padministration\")\n paList=[]\n for pas in vectorPAs:\n key=pas[\"key\"]\n\n if key==\"\":\n key='Unassigned'\n\n paList.append(key)\n print(paList)\n\n\n textoABuscar=request.args.get(\"searchText\")\n padministration=request.args.get(\"padministration\")\n domain=request.args.get(\"domain\")\n\n page=request.args.get(\"page\",1)\n registroInicial=(int(page)-1)*10\n \n \n\n totalRegistros=0\n if(textoABuscar==None or textoABuscar==''):\n res= Description.search(offset=registroInicial)\n totalRegistros= Description.count()\n else:\n res= Description._get_Descriptions(textoABuscar=textoABuscar,padministration=padministration,url=domain,offset=registroInicial)\n totalRegistros= Description._get_DescriptionsCounts(textoABuscar=textoABuscar,padministration=padministration,url=domain)\n \n res=Description._get_Descript_byModerEmail(email=current_user.email)\n totalRegistros= Description._get_Descript_byModerEmailCounts(email=current_user.email)\n\n\n pagesNumbers=math.ceil(totalRegistros/10)\n \n paginacion={'page':page,'pagesNumbers':pagesNumbers,'totalRegisters':totalRegistros,'searchBox':textoABuscar,'padministration':padministration,'url':domain}\n\n\n #Cargo las Notificaciones\n listNotifications,numRes=cargarNotifications()\n\n return render_template(\"moderate.html\",descriptions=res,urls=urlList,publicsa=paList,paginacion=paginacion,notifications=listNotifications,notificationNum=numRes)\n\n\[email protected](\"/survey\")\n@login_required\ndef survey():\n\n textoABuscar=request.args.get(\"searchText\")\n \n page=request.args.get(\"page\",1)\n registroInicial=(int(page)-1)*10\n \n totalRegistros=0\n\n #Searchs:\n \"\"\" if(textoABuscar==None or textoABuscar==''):\n res= Survey.search(offset=registroInicial)\n totalRegistros= Survey.count()\n else:\n res= Survey._get_Surveys(textoABuscar=textoABuscar,offset=registroInicial) \"\"\"\n \n resTemp=Survey._get_all()\n res=resTemp['surveys']\n totalRegistros= resTemp['numRes']\n\n\n pagesNumbers=math.ceil(totalRegistros/10)\n \n paginacion={'page':page,'pagesNumbers':pagesNumbers,'totalRegisters':totalRegistros,'searchBox':textoABuscar}\n\n #Cargo las Notificaciones\n listNotifications,numRes=cargarNotifications()\n\n #Defino la direccion del SurveyHost\n surveyHost=current_app.config['SURVEYINTERLINK_URL']\n surveyApiVersion=current_app.config['SURVEYAPI_VERSION']\n\n return render_template(\"surveys.html\",surveys=res,paginacion=paginacion,notifications=listNotifications,notificationNum=numRes,surveyHost=surveyHost,surveyApiVersion=surveyApiVersion)\n\[email protected](\"/surveyInstantiator\",methods=[\"POST\"])\ndef surveyInstantiator():\n\n #Redirecciono al editor:\n return redirect(current_app.config['SURVEYINTERLINK_URL']+\"/assets/\"+\"instantiate\")\n\ndef obtainUsersEmail(listItemsBucket=[]):\n listUsers=[]\n for itemBucket in listItemsBucket:\n userEmail=itemBucket['key']\n if userEmail!='Anonymous' and userEmail!='Annonymous' :\n listUsers.append(userEmail)\n return listUsers\n\n\n\[email protected](\"/lauchSurvey\",methods=[\"POST\"])\ndef surveyLauchProcess():\n\n #Obtengo los valores del Survey:\n selTargetUsers=request.form.get(\"selTargetList\")\n listUsersArea=request.form.get(\"listUsersArea\")\n is_optional=request.form.get(\"is_optional\")\n ini_date=request.form.get(\"ini_date\")\n fin_date=request.form.get(\"fin_date\")\n selEvent=request.form.get(\"selEvent\")\n\n mandatory=True\n if is_optional =='on':\n mandatory=False\n\n\n #Creo la notification:\n idAsset=request.form.get('assetId')\n title=request.form.get('surveyTitle')\n description= request.form.get('surveyDesc')\n\n #Defino the users:\n listUsersEmails=[]\n if selTargetUsers==\"everybody\":\n\n listUsersWhoAnnotated=obtainUsersEmail(Annotation.currentActiveUsers())\n listUsersWhoModerate= obtainUsersEmail(Description.currentActiveUsersModerators())\n listUsersEmails=list(set(listUsersWhoAnnotated+listUsersWhoModerate))\n \n else:\n listUsersEmails= listUsersArea.split(\";\")\n \n\n\n for userEmail in listUsersEmails:\n\n email=userEmail\n target_url=current_app.config['SURVEYINTERLINK_URL']+ \"/assets/\"+idAsset+\"/view/\"\n\n newNotification=Notification(\n title=title,\n email=email,\n description=description,\n target_url=target_url,\n resolved=False,\n category=\"survey\",\n idAsset=idAsset,\n triggerEvent=selEvent,\n triggerDate=ini_date,\n isMandatory=mandatory\n )\n \n\n\n newNotification.save(index=\"notification\")\n\n\n #Se ha lanzado exitosamente el suvey:\n flash(\"The survey has been lauched.\",\"info\")\n \n #Redirecciono al editor:\n return redirect(\"/survey\")\n\n\[email protected]('/advanceSearch',)\ndef advanceSearch():\n\n\n res = Annotation.search(query={'user': current_user.email})\n \n\n\n return render_template(\"advanceSearch.html\", user=current_user, anotations=res)\n\[email protected]('/description/<string:descriptionId>',)\n@login_required\ndef description(descriptionId=None):\n\n description = Description._get_Descriptions_byId(id=descriptionId)[0]\n \n urlMainPage = [url['url'] for url in description['urls'] if url['isMain'] == True][0]\n\n categoria=request.args.get('category')\n\n page=request.args.get(\"page\",1)\n registroInicial=(int(page)-1)*10\n\n \n\n\n if(categoria == None or categoria=='all' ):\n categoria=''\n \n res=[]\n stats=[]\n numRes=0\n listUrlsPages=[]\n for itemUrl in description['urls']:\n url=itemUrl['url']\n listUrlsPages.append(url)\n\n # Cargo las replies de cada annotacion:\n stats=stats+Annotation.annotationStats(Annotation,uri=itemUrl['url'])\n\n res= Annotation._get_by_multiple(Annotation,textoABuscar='',estados={'InProgress':True,'Archived':True,'Approved':True},urls=listUrlsPages,category=categoria,notreply=True,page=page)\n numRes= res['numRes']\n res=res['annotations']\n\n\n dictStats={}\n for itemStat in stats:\n clave=itemStat['key']\n val=itemStat['doc_count']\n dictStats[clave]=val\n\n for itemRes in res:\n if itemRes['id'] in dictStats.keys():\n itemRes['nroReplies']=dictStats[itemRes['id']]\n else:\n itemRes['nroReplies']=0\n \n page=request.args.get(\"page\",1)\n pagesNumbers=math.ceil(numRes/10)\n \n paginacion={'page':page,'pagesNumbers':pagesNumbers,'totalRegisters':numRes}\n\n #Cargo las Notificaciones\n listNotifications,numRes=cargarNotifications()\n\n return render_template(\"description.html\", user=current_user, description=description,anotations=res,categoryLabel=categoria,paginacion=paginacion,urlMainPage=urlMainPage,notifications=listNotifications,notificationNum=numRes)\n # return 'la desc: '+category+'lauri is'+str(uri) \n\[email protected]('/description/<string:descriptionId>/<string:option>',)\n@login_required\ndef editDescription(descriptionId=None,option='Edit'):\n\n vectorPAs=Description._get_uniqueValues(campo=\"padministration\")\n paList=[]\n for pas in vectorPAs:\n key=pas[\"key\"]\n\n if key==\"\":\n key='Unassigned'\n\n paList.append(key)\n print(paList)\n\n description = Description._get_Descriptions_byId(id=descriptionId)[0] \n \n for itemUrl in description['urls']:\n if itemUrl['language']!='Undefined':\n itemUrl['langText']=getLanguagesList()[itemUrl['language']]\n else:\n itemUrl['langText']=\"Undefined\"\n\n#Cargo las Notificaciones\n listNotifications,numRes=cargarNotifications() \n return render_template(\"descriptionDetail.html\", user=current_user, description=description,option=option,publicsa=paList,notifications=listNotifications,notificationNum=numRes)\n\n\n\[email protected]('/subjectPage/<string:descriptionId>/<string:annotatorId>',)\n@login_required\ndef subjectPage(descriptionId=None,annotatorId=None):\n\n description = Description._get_Descriptions_byId(id=descriptionId)[0]\n\n urlMainPage = [url['url'] for url in description['urls'] if url['isMain'] == True][0]\n\n annotation = Annotation._get_Annotation_byId(id=annotatorId)[0]\n\n nroReplies = Annotation.count(query={ 'idReplyRoot': annotatorId ,'category':'reply' })\n replies = Annotation.search(query={ 'idReplyRoot': annotatorId ,'category':'reply' },limit=nroReplies)\n\n nroRepliesOfAnnotation=nroReplies\n #nroRepliesOfAnnotation = Annotation.count(query={ '_id': description['id'] ,'category':'reply','idReplyRoot':annotatorId })\n \n #Cargo las Notificaciones\n listNotifications,numRes=cargarNotifications()\n\n return render_template(\"subjectPage.html\", user=current_user, annotation=annotation,description=description,categoryLabel=annotation['category'],replies=replies,nroReplies=nroRepliesOfAnnotation,urlMainPage=urlMainPage,notifications=listNotifications,notificationNum=numRes)\n # return 'la desc: '+category+'lauri is'+str(uri) \n\[email protected]('/subjectPage/<string:descriptionId>/<string:annotatorId>/<string:option>', methods=[\"GET\", \"POST\"])\n@login_required\ndef changeAnnotation(descriptionId=None,annotatorId=None,option=None):\n\n \n if option == 'state':\n \n argumentos=request.json\n newstate=argumentos.pop('stateToChange')\n commentsChangeState=argumentos.pop('commentsChangeState')\n objtype=argumentos.pop('objtype')\n\n annotationRootId=''\n losvalores=argumentos.keys()\n if \"annotationRootId\" in losvalores:\n annotationRootId=argumentos.pop('annotationRootId')\n \n\n annotation = Annotation._get_Annotation_byId(id=annotatorId)[0]\n \n\n #Registro el cambio y quien lo hizo\n if(len(annotation['statechanges'])==0):\n annotation['statechanges']=[] \n\n #Si el estado inicial es prohibido y el estado final es prohibido\n #Se resetea el estado a in progress.\n if(annotation['state']==3 & int(newstate)==3):\n newstate=0\n \n\n annotation['statechanges'].append({\n \"initstate\": annotation['state'],\n \"endstate\": int(newstate),\n \"text\": commentsChangeState,\n \"objtype\" : objtype,\n \"date\": datetime.datetime.now().replace(microsecond=0).isoformat(),\n \"user\": current_user.email\n })\n\n annotation['state']=int(newstate)\n annotation.updateState()\n\n return jsonify(annotation)\n \n \n\n elif option=='like':\n\n argumentos=request.json\n\n\n vote=int(argumentos.pop('stateToChange'))\n\n newstate=vote\n commentsChangeState=argumentos.pop('commentsChangeState')\n objtype=argumentos.pop('objtype')\n\n annotationRootId=''\n losvalores=argumentos.keys()\n if \"annotationRootId\" in losvalores:\n annotationRootId=argumentos.pop('annotationRootId')\n\n #Verifico si anteriormente este usuario ha realizado una anotacion\n annotation = Annotation._get_Annotation_byId(id=annotatorId)[0]\n nroLikes=annotation.userAlreadyLike(email=current_user.email,id=annotatorId)\n\n #Un usuario solo puede votar una vez\n if nroLikes==0:\n\n \n\n #Registro el cambio y quien lo hizo\n if(len(annotation['statechanges'])==0):\n annotation['statechanges']=[] \n \n #Registro el cambio y quien lo hizo\n initState=0\n if(vote==1):\n initState=int(annotation['like'])\n annotation['like']=initState+1\n objtype='annotation_like'\n \n elif vote==-1:\n initState=int(annotation['dislike'])\n annotation['dislike']=initState+1\n objtype='annotation_like'\n\n #Registro el cambio de estado\n annotation['statechanges'].append({\n \"initstate\": initState,\n \"endstate\": initState+1,\n \"text\": commentsChangeState,\n \"objtype\" : objtype,\n \"date\": datetime.datetime.now().replace(microsecond=0).isoformat(),\n \"user\": current_user.email\n })\n\n\n annotation.updateLike()\n annotation.updateState()\n\n return jsonify(annotation)\n\n\n\n\[email protected](\"/descriptionDetail\")\n@login_required\ndef descriptionDetail():\n\n vectorPAs=Description._get_uniqueValues(campo=\"padministration\")\n paList=[]\n for pas in vectorPAs:\n key=pas[\"key\"]\n\n if key==\"\":\n key='Unassigned'\n\n paList.append(key)\n print(paList)\n\n res = Annotation.search(query={'user': current_user.email})\n \n\n return render_template(\"descriptionDetail.html\", user=current_user, anotations=res,publicsa=paList)\n\n\[email protected](\"/profile\")\n@login_required\ndef profile():\n\n #Cargo las Notificaciones\n listNotifications,numRes=cargarNotifications()\n\n return render_template(\"profile.html\", user=current_user,notifications=listNotifications,notificationNum=numRes)\n\[email protected](\"/settings\")\n@login_required\ndef settings(): \n\n results=[]\n \n anthony =gettext('Anthony')\n\n us_num=format_number(1099)\n results.append(us_num)\n \n us_num=format_currency(1099.98, 'USD')\n results.append(us_num)\n \n us_num=format_decimal(1.2346)\n results.append(us_num)\n \n #Cargo las Notificaciones\n listNotifications,numRes=cargarNotifications()\n\n return render_template(\"settings.html\", user=current_user,results=results,anthony=anthony,notifications=listNotifications,notificationNum=numRes)\n\n\[email protected](\"/oidc_callback\")\ndef callback():\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n \n code = request.args.get(\"code\")\n\n #la pagina que se pretende ingresar es:\n paginaNext=''\n if 'next' in session.keys():\n paginaNext=session['next']\n\n if not code:\n return \"The code was not returned or is not accessible\", 403\n query_params = {'grant_type': 'authorization_code',\n 'code': code,\n 'redirect_uri': request.base_url\n }\n query_params = requests.compat.urlencode(query_params)\n exchange = requests.post(\n current_app.config[\"TOKEN_URI\"],\n headers=headers,\n data=query_params,\n auth=(current_app.config[\"CLIENT_ID\"], current_app.config[\"CLIENT_SECRET\"]),\n ).json()\n\n # Get tokens and validate\n if not exchange.get(\"token_type\"):\n return \"Unsupported token type. Should be 'Bearer'.\", 403\n access_token = exchange[\"access_token\"]\n id_token = exchange[\"id_token\"]\n\n session['id_token']=id_token\n\n #if not is_access_token_valid(access_token, config[\"issuer\"], config[\"client_id\"]):\n # return \"Access token is invalid\", 403\n\n #if not is_id_token_valid(id_token, config[\"issuer\"], config[\"client_id\"], NONCE):\n # return \"ID token is invalid\", 403\n\n # Authorization flow successful, get userinfo and login user\n userinfo_response = requests.get(current_app.config[\"USERINFO_URI\"],\n headers={'Authorization': f'Bearer {access_token}'}).json()\n\n unique_id = userinfo_response[\"sub\"]\n user_email = userinfo_response[\"email\"]\n user_name = userinfo_response[\"given_name\"]\n\n user = User(\n id_=unique_id, name=user_name, email=user_email\n )\n\n if not User.get(unique_id):\n User.create(unique_id, user_name, user_email)\n\n login_user(user)\n # g.user =user\n\n session.pop('_flashes', None)\n\n if paginaNext!=\"\":\n return redirect(paginaNext)\n else:\n return redirect(url_for(\"authInterlink.dashboard\"))\n\ndef cargarNotifications():\n #Cargo las Notificaciones\n listNotifications=Notification._get_Notification_byModerCategory(category=\"survey\")\n #listNotifications.append(Notification._get_Notification_byModerCategory(category=\"survey\"))\n numRes=listNotifications['numRes']\n listNotifications=listNotifications['notifications']\n return listNotifications,numRes\n\n", "id": "169262", "language": "Python", "matching_score": 4.824705600738525, "max_stars_count": 0, "path": "authInterlink/authInterlink.py" }, { "content": "\"\"\"\nThis module implements a Flask-based JSON API to talk with the annotation store via the\nAnnotation model.\nIt defines these routes:\n * Root\n * Index\n * Create\n * Read\n * Update\n * Delete\n * Search\n * Raw ElasticSearch search\nSee their descriptions in `root`'s definition for more detail.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport csv\nimport json\nfrom operator import truediv\n\nfrom elasticsearch.exceptions import TransportError\nfrom flask import Blueprint, Response, session, redirect, flash\nfrom flask import current_app, g\nfrom flask import request\nfrom flask import url_for\nfrom flask_login import current_user\nfrom six import iteritems\n\nfrom annotator.atoi import atoi\nfrom annotator.annotation import Annotation\nfrom annotator.document import Document\nfrom annotator.description import Description\nfrom annotator.elasticsearch import RESULTS_MAX_SIZE\nfrom annotator.notification import Notification\nfrom annotator.survey import Survey\n\nstore = Blueprint('store', __name__)\n\nCREATE_FILTER_FIELDS = ('updated', 'created', 'consumer')#, 'id')\nUPDATE_FILTER_FIELDS = ('updated', 'created', 'user', 'consumer')\n\n\n# We define our own jsonify rather than using flask.jsonify because we wish\n# to jsonify arbitrary objects (e.g. index returns a list) rather than kwargs.\ndef jsonify(obj, *args, **kwargs):\n try:\n res = json.dumps(obj, indent=None if request.is_xhr else 2)\n except:\n res = json.dumps(obj, indent=None if False else 2)\n return Response(res, mimetype='application/json', *args, **kwargs)\n\n\[email protected]_request\ndef before_request():\n if not hasattr(g, 'annotation_class'):\n g.annotation_class = Annotation\n \n if not hasattr(g, 'notification_class'):\n g.notification_class = Notification\n \n if not hasattr(g, 'description_class'):\n g.description_class = Description\n\n user = g.auth.request_user(request)\n if user is not None:\n g.user = user\n elif not hasattr(g, 'user'):\n g.user = None\n\n\n\n\n\n\[email protected]_request\ndef after_request(response):\n print(\"LLEga al AFTER REQUEST\")\n ac = 'Access-Control-'\n rh = response.headers\n\n rh[ac + 'Allow-Origin'] = request.headers.get('origin', '*')\n rh[ac + 'Expose-Headers'] = 'Content-Length, Content-Type, Location'\n\n if request.method == 'OPTIONS':\n rh[ac + 'Allow-Headers'] = ('Content-Length, Content-Type, '\n 'X-Annotator-Auth-Token, X-Requested-With')\n rh[ac + 'Allow-Methods'] = 'GET, POST, PUT, DELETE, OPTIONS'\n rh[ac + 'Max-Age'] = '86400'\n\n return response\n\n\n# ROOT\[email protected]('/')\ndef root():\n return jsonify({\n 'message': \"Annotator Store API\",\n 'links': {\n 'annotation': {\n 'create': {\n 'method': 'POST',\n 'url': url_for('.create_annotation', _external=True),\n 'query': {\n 'refresh': {\n 'type': 'bool',\n 'desc': (\"Force an index refresh after create \"\n \"(default: true)\")\n }\n },\n 'desc': \"Create a new annotation\"\n },\n 'read': {\n 'method': 'GET',\n 'url': url_for('.read_annotation',\n docid=':id',\n _external=True),\n 'desc': \"Get an existing annotation\"\n },\n 'update': {\n 'method': 'PUT',\n 'url':\n url_for(\n '.update_annotation',\n docid=':id',\n _external=True),\n 'query': {\n 'refresh': {\n 'type': 'bool',\n 'desc': (\"Force an index refresh after update \"\n \"(default: true)\")\n }\n },\n 'desc': \"Update an existing annotation\"\n },\n 'delete': {\n 'method': 'DELETE',\n 'url': url_for('.delete_annotation',\n docid=':id',\n _external=True),\n 'desc': \"Delete an annotation\"\n }\n },\n 'search': {\n 'method': 'GET',\n 'url': url_for('.search_annotations', _external=True),\n 'desc': 'Basic search API'\n },\n 'search_raw': {\n 'method': 'GET/POST',\n 'url': url_for('.search_annotations_raw', _external=True),\n 'desc': ('Advanced search API -- direct access to '\n 'ElasticSearch. Uses the same API as the '\n 'ElasticSearch query endpoint.')\n }\n }\n })\n\n\n# INDEX\[email protected]('/annotations')\ndef index():\n if current_app.config.get('AUTHZ_ON'):\n # Pass the current user to do permission filtering on results\n user = g.user\n else:\n user = None\n\n annotations = g.annotation_class.search(user=user)\n return jsonify(annotations)\n\n# INDEX\[email protected]('/notifications')\ndef notificationIndex():\n if current_app.config.get('AUTHZ_ON'):\n # Pass the current user to do permission filtering on results\n user = g.user\n else:\n user = None\n\n notifications = Notification._get_all()\n return jsonify(notifications)\n\n# INDEX\[email protected]('/surveys')\ndef surveysIndex():\n if current_app.config.get('AUTHZ_ON'):\n # Pass the current user to do permission filtering on results\n user = g.user\n else:\n user = None\n\n surveys = Survey._get_all() \n return jsonify(surveys)\n\[email protected]('/completeSurvey')\ndef completeaSurvey():\n #Tengo que poner la notificacion como realizada.\n idAsset=request.args.get('assetId')\n notification=Notification._get_Notification_byAssetId(assetId=idAsset)\n\n notification=notification['notifications'][0]\n notification['resolved']=True\n\n notification.updateFieldResolve(index=\"notification\")\n\n\n\n return redirect('/dashboard')\n \[email protected]('/saveSurvey')\ndef saveSurvey():\n \n idAsset=request.args.get('assetId')\n title=request.args.get('surveyTitle')\n description= request.args.get('surveyDesc')\n #Create a new survey:\n \n\n newSurvey=Survey(title=title,\n description=description,\n idAsset=idAsset,\n isMandatory=True\n )\n \n newSurvey.save(index=\"survey\")\n \n\n return redirect(current_app.config['SURVEYINTERLINK_URL']+\"/assets/\"+idAsset+\"/edit\")\n\[email protected]('/updateSurvey')\ndef updateSurvey():\n\n #Tengo que poner la notificacion como realizada.\n idAsset=request.args.get('assetId')\n title=request.args.get('surveyTitle')\n description= request.args.get('surveyDesc')\n\n #Obtengo el survey usando el Assetid\n surveyEncontrado=Survey._get_Survey_byAssetId(idAsset=idAsset)\n\n surveyEncontrado=surveyEncontrado['surveys'][0]\n #Actualizo a new survey:\n \n surveyEncontrado['title']=title\n surveyEncontrado['description']=description\n \n \n Survey.updateFields(surveyEncontrado,index=\"survey\")\n \n\n\n return redirect('/survey')\n\n\n# CREATE\[email protected]('/notifications', methods=['POST'])\ndef create_notification():\n # Only registered users can create annotations\n if g.user is None:\n return _failed_authz_response('create annotation')\n\n if request.json is not None:\n notification = g.notification_class(\n _filter_input(\n request.json,\n CREATE_FILTER_FIELDS))\n\n notification['consumer'] = g.user.consumer.key\n if _get_annotation_user(notification) != g.user.id:\n notification['user'] = g.user.id\n \n if 'username' in session:\n notification['user'] = session['username']\n else:\n notification['user'] = current_user.email\n\n\n #print(\"El id inicial es:\"+annotation['id'])\n\n if hasattr(g, 'before_annotation_create'):\n g.before_annotation_create(notification)\n\n if hasattr(g, 'after_annotation_create'):\n notification.save(refresh=False)\n g.after_annotation_create(notification)\n \n \n\n refresh = request.args.get('refresh') != 'false'\n notification.save(refresh=refresh)\n\n #print(\"El id final es:\"+annotation['id'])\n\n #location = url_for('.read_notification', docid=notification['id'])\n\n return jsonify(notification), 201#, {'Location': location}\n else:\n return jsonify('No JSON payload sent. Annotation not created.',\n status=400)\n\n\n# READ\[email protected]('/notifications/<docid>')\ndef read_notification(docid):\n notification = Notification.fetch(docid,index='notification')\n if not notification:\n return jsonify('Notification not found!', status=404)\n\n failure = _check_action(notification, 'read')\n if failure:\n return failure\n\n return jsonify(notification)\n\n\n# UPDATE\[email protected]('/notifications/<docid>', methods=['POST', 'PUT'])\ndef update_notification(docid):\n notification = Notification.fetch(docid,index='notification')\n if not notification:\n return jsonify('Notification not found! No update performed.',\n status=404)\n\n failure = _check_action(notification, 'update')\n if failure:\n return failure\n\n if request.json is not None:\n updated = _filter_input(request.json, UPDATE_FILTER_FIELDS)\n updated['id'] = docid # use id from URL, regardless of what arrives in\n # JSON payload\n\n changing_permissions = (\n 'permissions' in updated and\n updated['permissions'] != notification.get('permissions', {}))\n\n if changing_permissions:\n failure = _check_action(notification,\n 'admin',\n message='permissions update')\n if failure:\n return failure\n\n notification.updateFields(updated,index='notification')\n\n if hasattr(g, 'before_notification_update'):\n g.before_notification_update(notification)\n\n refresh = request.args.get('refresh') != 'false'\n notification.save(refresh=refresh)\n\n if hasattr(g, 'after_notification_update'):\n g.after_notification_update(notification)\n\n return jsonify(notification)\n\n\n# DELETE\[email protected]('/notifications/<docid>', methods=['DELETE'])\ndef delete_notification(docid):\n notification = Notification.fetch(docid,index='notification')\n \n if not notification:\n return jsonify('Notification not found. No delete performed.',\n status=404)\n\n failure = _check_action(notification, 'delete')\n if failure:\n return failure\n\n if hasattr(g, 'before_notification_delete'):\n g.before_notification_delete(notification)\n\n notification.delete(index='notification')\n\n if hasattr(g, 'after_notification_delete'):\n g.after_notification_delete(notification)\n\n return '', 204\n\n\n# INDEX\[email protected]('/searchannotations', methods=[\"POST\"])\ndef annotationsIndex():\n\n params = json.loads(request.data.decode('utf-8'))\n\n\n textoABuscar=params.get(\"textoABuscar\")\n if(textoABuscar==None):\n textoABuscar=\"\"\n\n estados=params.get(\"estados\")\n\n stateInProgress=estados['InProgress']\n stateArchived=estados['Archived']\n stateApproved=estados['Approved']\n\n category=params.get(\"category\")\n\n\n descriptionId=params.get(\"descriptionId\")\n descriptionActual= Description._get_Descriptions_byId(id=descriptionId)[0]\n \n\n page=params.get(\"page\")\n if(page==None):\n page=\"1\"\n\n listUrl=[]\n for url in descriptionActual['urls']: \n listUrl.append(url['url'])\n #Realizo la busqueda:\n annotations= Annotation._get_by_multiple(Annotation,textoABuscar=textoABuscar,estados=estados,urls=listUrl,category=category,notreply=True,page=page)\n \n #nroRegistros= Annotation._get_by_multipleCounts(Annotation,textoABuscar=textoABuscar,estados=estados,url=descriptionUri,page=page)\n numRes=annotations['numRes']\n annotations=annotations['annotations']\n\n\n\n # Cargo las replies de cada annotacion:\n stats=[]\n for urlItem in descriptionActual['urls']:\n stats=stats+Annotation.annotationStats(Annotation,uri=urlItem['url'])\n\n dictStats={}\n for itemStat in stats:\n clave=itemStat['key']\n val=itemStat['doc_count']\n dictStats[clave]=val\n for itemRes in annotations:\n if itemRes['id'] in dictStats.keys():\n itemRes['nroReplies']=dictStats[itemRes['id']]\n else:\n itemRes['nroReplies']=0\n\n\n\n\n return jsonify({'annotations':annotations,'nroRegistros':numRes})\n\n\n\n\n# INDEX\[email protected]('/descriptions', methods=[\"POST\"])\ndef descriptionsIndex():\n\n params = json.loads(request.data.decode('utf-8'))\n\n\n textoABuscar=params.get(\"textoABuscar\")\n if(textoABuscar==None):\n textoABuscar=\"\"\n padministration=params.get(\"padministration\")\n if(padministration==None):\n padministration=\"\"\n domain=params.get(\"domain\")\n if(domain==None):\n domain=\"\"\n\n page=params.get(\"page\")\n if(page==None):\n page=\"1\"\n\n\n #annotations = g.annotation_class.search(user=user)\n descriptions= Description._get_by_multiple(textoABuscar=textoABuscar,padministration=padministration,urlPrefix=domain,page=page)\n \n nroRegistros=descriptions['numRes']\n descriptions=descriptions['descriptions']\n #nroRegistros= Description._get_by_multipleCounts(textoABuscar=textoABuscar,padministration=padministration,urlPrefix=domain)\n \n \n return jsonify({'descriptions':descriptions,'nroRegistros':nroRegistros})\n\n\n\[email protected]('/description/<path:urlDescription>', methods=[\"POST\"])\ndef descriptionByUrl(urlDescription):\n\n params = json.loads(request.data.decode('utf-8'))\n url=params['url']\n description = Description._get_Descriptions_byURI(url=url)\n\n\n if len(description)==0:\n return jsonify([])\n\n return jsonify(description[0])\n\n \n\n\n# CREATE\[email protected]('/annotations', methods=['POST'])\ndef create_annotation():\n # Only registered users can create annotations\n if g.user is None:\n return _failed_authz_response('create annotation')\n\n if request.json is not None:\n annotation = g.annotation_class(\n _filter_input(\n request.json,\n CREATE_FILTER_FIELDS))\n\n annotation['consumer'] = g.user.consumer.key\n if _get_annotation_user(annotation) != g.user.id:\n annotation['user'] = g.user.id\n annotation['user'] = session['username']\n\n #print(\"El id inicial es:\"+annotation['id'])\n\n if hasattr(g, 'before_annotation_create'):\n g.before_annotation_create(annotation)\n\n if hasattr(g, 'after_annotation_create'):\n annotation.save(refresh=False)\n g.after_annotation_create(annotation)\n \n \n\n refresh = request.args.get('refresh') != 'false'\n annotation.save(refresh=refresh)\n\n #print(\"El id final es:\"+annotation['id'])\n\n location = url_for('.read_annotation', docid=annotation['id'])\n\n return jsonify(annotation), 201, {'Location': location}\n else:\n return jsonify('No JSON payload sent. Annotation not created.',\n status=400)\n\n\n# READ\[email protected]('/annotations/<docid>')\ndef read_annotation(docid):\n annotation = g.annotation_class.fetch(docid)\n if not annotation:\n return jsonify('Annotation not found!', status=404)\n\n failure = _check_action(annotation, 'read')\n if failure:\n return failure\n\n return jsonify(annotation)\n\n\n# UPDATE\[email protected]('/annotations/<docid>', methods=['POST', 'PUT'])\ndef update_annotation(docid):\n annotation = g.annotation_class.fetch(docid)\n if not annotation:\n return jsonify('Annotation not found! No update performed.',\n status=404)\n\n failure = _check_action(annotation, 'update')\n if failure:\n return failure\n\n if request.json is not None:\n updated = _filter_input(request.json, UPDATE_FILTER_FIELDS)\n updated['id'] = docid # use id from URL, regardless of what arrives in\n # JSON payload\n\n changing_permissions = (\n 'permissions' in updated and\n updated['permissions'] != annotation.get('permissions', {}))\n\n if changing_permissions:\n failure = _check_action(annotation,\n 'admin',\n message='permissions update')\n if failure:\n return failure\n\n annotation.update(updated)\n\n if hasattr(g, 'before_annotation_update'):\n g.before_annotation_update(annotation)\n\n refresh = request.args.get('refresh') != 'false'\n annotation.save(refresh=refresh)\n\n if hasattr(g, 'after_annotation_update'):\n g.after_annotation_update(annotation)\n\n return jsonify(annotation)\n\n\n# DELETE\[email protected]('/annotations/<docid>', methods=['DELETE'])\ndef delete_annotation(docid):\n annotation = g.annotation_class.fetch(docid)\n\n if not annotation:\n return jsonify('Annotation not found. No delete performed.',\n status=404)\n\n failure = _check_action(annotation, 'delete')\n if failure:\n return failure\n\n if hasattr(g, 'before_annotation_delete'):\n g.before_annotation_delete(annotation)\n\n annotation.delete()\n\n if hasattr(g, 'after_annotation_delete'):\n g.after_annotation_delete(annotation)\n\n return '', 204\n\n\n# SEARCH\[email protected]('/search')\ndef search_annotations():\n params = dict(request.args.items())\n kwargs = dict()\n\n # Take limit and offset out of the parameters\n if 'offset' in params:\n kwargs['offset'] = atoi(params.pop('offset'), default=None)\n if 'limit' in params:\n kwargs['limit'] = atoi(params.pop('limit'), default=None)\n if 'sort' in params:\n kwargs['sort'] = params.pop('sort')\n if 'order' in params:\n kwargs['order'] = params.pop('order')\n kwargs['limit'] = 100000\n # All remaining parameters are considered searched fields.\n kwargs['query'] = params\n\n if current_app.config.get('AUTHZ_ON'):\n # Pass the current user to do permission filtering on results\n kwargs['user'] = g.user\n print(f' Kwargs: {kwargs}' )\n results = g.annotation_class.search(**kwargs)\n total = g.annotation_class.count(**kwargs)\n\n return jsonify({'total': total,\n 'rows': results})\n\n\n\n\n# RAW ES SEARCH\[email protected]('/search_raw', methods=['GET', 'POST'])\ndef search_annotations_raw():\n\n try:\n query, params = _build_query_raw(request)\n except ValueError:\n return jsonify('Could not parse request payload!',\n status=400)\n\n if current_app.config.get('AUTHZ_ON'):\n user = g.user\n else:\n user = None\n\n try:\n res = g.annotation_class.search_raw(query, params, raw_result=True,\n user=user)\n except TransportError as err:\n if err.status_code != 'N/A':\n status_code = err.status_code\n else:\n status_code = 500\n return jsonify(err.error,\n status=status_code)\n return jsonify(res, status=res.get('status', 200))\n\n\n# Return the current user logged.\[email protected]('/user', methods=['GET'])\ndef getUser():\n return jsonify(session['username'])\n\n\ndef _filter_input(obj, fields):\n for field in fields:\n obj.pop(field, None)\n\n return obj\n\n\n\n\n\ndef _get_annotation_user(ann):\n \"\"\"Returns the best guess at this annotation's owner user id\"\"\"\n user = ann.get('user')\n\n if not user:\n return None\n\n try:\n return user.get('id', None)\n except AttributeError:\n return user\n\n\ndef _check_action(annotation, action, message=''):\n if not g.authorize(annotation, action, g.user):\n return _failed_authz_response(message)\n\n\ndef _failed_authz_response(msg=''):\n user = g.user.id if g.user else None\n consumer = g.user.consumer.key if g.user else None\n\n if user:\n # If the user is authenticated but not authorized we send a 403.\n message = (\n \"Cannot authorize request{0}. You aren't authorized to make this \"\n \"request. (user={user}, consumer={consumer})\".format(\n ' (' + msg + ')' if msg else '', user=user, consumer=consumer))\n return jsonify(message), 403\n\n else:\n # If the user is not authenticated at all we send a 401.\n return jsonify(\"Cannot authorize request{0}. Perhaps you're not logged in \"\n \"as a user with appropriate permissions on this \"\n \"annotation? \"\n \"(user={user}, consumer={consumer})\".format(\n ' (' + msg + ')' if msg else '',\n user=user,\n consumer=consumer),\n status=401)\n\n\ndef _build_query_raw(request):\n query = {}\n params = {}\n\n if request.method == 'GET':\n for k, v in iteritems(request.args):\n _update_query_raw(query, params, k, v)\n\n if 'query' not in query:\n query['query'] = {'match_all': {}}\n\n elif request.method == 'POST':\n\n try:\n query = json.loads(request.json or\n request.data or\n request.form.keys()[0])\n except (ValueError, IndexError):\n raise ValueError\n\n params = request.args\n\n for o in (params, query):\n if 'from' in o:\n o['from'] = max(0, atoi(o['from']))\n if 'size' in o:\n o['size'] = min(RESULTS_MAX_SIZE, max(0, atoi(o['size'])))\n\n return query, params\n\n\ndef _update_query_raw(qo, params, k, v):\n if 'query' not in qo:\n qo['query'] = {}\n q = qo['query']\n\n if 'query_string' not in q:\n q['query_string'] = {}\n qs = q['query_string']\n\n if k == 'q':\n qs['query'] = v\n\n elif k == 'df':\n qs['default_field'] = v\n\n elif k in ('explain', 'track_scores', 'from', 'size', 'timeout',\n 'lowercase_expanded_terms', 'analyze_wildcard'):\n qo[k] = v\n\n elif k == 'fields':\n qo[k] = _csv_split(v)\n\n elif k == 'sort':\n if 'sort' not in qo:\n qo[k] = []\n\n split = _csv_split(v, ':')\n\n if len(split) == 1:\n qo[k].append(split[0])\n else:\n fld = ':'.join(split[0:-1])\n drn = split[-1]\n qo[k].append({fld: drn})\n\n elif k == 'search_type':\n params[k] = v\n\n\ndef _csv_split(s, delimiter=','):\n return [r for r in csv.reader([s], delimiter=delimiter)][0]\n", "id": "2492343", "language": "Python", "matching_score": 4.448844909667969, "max_stars_count": 0, "path": "annotator/store.py" }, { "content": "from annotator import es,authz\nimport datetime\n\nTYPE = 'description'\nMAPPING = {\n\n \"id\": {\n \"type\": \"string\",\n \"index\": \"no\"\n },\n\n \"title\": {\n \"type\": \"string\",\n \"analyzer\": \"standard\"\n },\n\n \"description\": {\n \"type\": \"string\",\n \"analyzer\": \"standard\"\n },\n\n \"keywords\": {\n \"type\": \"string\",\n \"analyzer\": \"standard\"\n },\n\n \"moderators\": {\n \"type\": \"nested\",\n \"properties\": {\n \"email\": {\"type\": \"string\"},\n \"createdat\": {\n \"type\": \"date\",\n \"format\": \"dateOptionalTime\"\n },\n \"expire\": {\n \"type\": \"date\",\n \"format\": \"dateOptionalTime\"\n }\n }\n },\n\n \n \"padministration\": {\n \"type\": \"string\",\n \"analyzer\": \"standard\"\n },\n\n \"url\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n },\n \n \"urls\": {\n \"type\": \"nested\",\n \"properties\": {\n \"createdate\": {\n \"type\": \"date\",\n \"format\": \"dateOptionalTime\"\n },\n \"ismain\": {\n \"type\" : \"boolean\"},\n \"url\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n },\n \"language\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n },\n \"email\": {\"type\": \"string\"},\n }\n },\n\n 'created': {'type': 'date'},\n 'updated': {'type': 'date'},\n \n 'permissions': {\n 'index_name': 'permission',\n 'properties': {\n 'read': {'type': 'string'},\n 'update': {'type': 'string'},\n 'delete': {'type': 'string'},\n 'admin': {'type': 'string'}\n }\n }\n\n\n}\nMAX_ITERATIONS = 5\nPAGGINATION_SIZE = 10\n\n\nclass Description(es.Model):\n __type__ = TYPE\n __mapping__ = MAPPING\n\n\n @classmethod\n def _get_all(cls):\n \"\"\"\n Returns a list of all descriptions \n \"\"\"\n q= {\n \"sort\": [\n {\n \"updated\": {\n \"order\": \"desc\",\n \"ignore_unmapped\": True\n }\n }\n ],\n \"from\": 0,\n \"size\": PAGGINATION_SIZE,\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match_all\": {}\n }\n ]\n }\n }\n }\n res = cls.es.conn.search(index=\"description\",\n doc_type=cls.__type__,\n body=q)\n return [cls(d['_source'], id=d['_id']) for d in res['hits']['hits']]\n\n @classmethod\n def _get_by_title(cls,searchText=\"\",padministration='',domain=''):\n \n\n\n q= {\n \"query\": {\n \"prefix\":{\n \"title\":searchText\n }\n }\n }\n\n \n\n res = cls.es.conn.search(index=\"description\",\n doc_type=cls.__type__,\n body=q)\n return [cls(d['_source'], id=d['_id']) for d in res['hits']['hits']]\n\n\n @classmethod\n def _get_uniqueValues(cls,campo=\"\"):\n \n\n\n q= {\n \"aggs\": {\n \"group_by_url\": {\n \"terms\": {\n \"field\": campo\n }\n }\n },\n \"size\": 0\n }\n\n \n\n res = cls.es.conn.search(index=\"description\",\n doc_type=cls.__type__,\n body=q)\n\n resultadosDistintos=res[\"aggregations\"][\"group_by_url\"][\"buckets\"]\n \n\n print(resultadosDistintos)\n\n return resultadosDistintos\n\n\n #Get users that has participated as Moderator of a Description\n @classmethod\n def currentActiveUsersModerators(cls,**kwargs):\n q={\n \"aggs\" : {\n \"moderators\" : {\n \"nested\" : {\n \"path\" : \"moderators\"\n },\n \"aggs\" : {\n \"group_by_user\": {\n \"terms\": {\n \"field\": \"moderators.email\"\n }\n }\n\n }\n }\n },\n \"size\": 0\n }\n \n print(q)\n\n \n res = cls.es.conn.search(index=\"description\",\n doc_type=cls.__type__,\n body=q)\n\n\n if(len(res['aggregations']['moderators']['group_by_user']['buckets'])>0):\n res=res['aggregations']['moderators']['group_by_user']['buckets']\n else:\n res=[]\n \n return res\n\n\n @classmethod\n def _get_uniqueValuesUrl(cls):\n \n q= {\n \"aggs\" : {\n \"urls\" : {\n \"nested\" : {\n \"path\" : \"urls\"\n },\n \"aggs\" : {\n \"group_by_url\": {\n \"terms\": {\n \"field\": \"urls.url\"\n }\n }\n\n }\n }\n },\n \"size\": 0\n }\n\n \n\n res = cls.es.conn.search(index=\"description\",\n doc_type=cls.__type__,\n body=q)\n\n resultadosDistintos=res[\"aggregations\"][\"urls\"][\"group_by_url\"][\"buckets\"]\n \n\n print(resultadosDistintos)\n\n return resultadosDistintos\n\n\n \n\n\n @classmethod\n def _get_Descriptions_byId(cls,**kwargs):\n \n q= {\n \n \"query\": {\n \"terms\": {\n \"_id\":[kwargs.pop(\"id\")]\n }\n }\n }\n\n print(q)\n\n\n res = cls.es.conn.search(index=\"description\",\n doc_type=cls.__type__,\n body=q)\n\n return [cls(d['_source'], id=d['_id']) for d in res['hits']['hits']]\n \n @classmethod\n def _get_Descriptions_byURI(cls,**kwargs):\n #Search for the description that include this url in the urls set.\n \n \n q= {\n \n \"query\": {\n \"nested\": {\n \"path\": \"urls\",\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match\": {\n \"urls.url\": kwargs.pop(\"url\")\n }\n }\n ]\n }\n }\n }\n }\n }\n\n \n\n print(q)\n\n\n res = cls.es.conn.search(index=\"description\",\n doc_type=cls.__type__,\n body=q)\n\n return [cls(d['_source'], id=d['_id']) for d in res['hits']['hits']]\n\n\n def _get_checkPermisos_byURI(cls,**kwargs):\n\n q= {\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match\": {\n \"url\":kwargs.pop(\"url\")\n }\n },\n {\n \"nested\": {\n \"path\": \"moderators\",\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match\": {\n \"moderators.email\": kwargs.pop(\"email\")\n }\n }\n ]\n }\n }\n }\n }\n ]\n }\n }\n }\n\n print(q)\n\n \n res = cls.es.conn.count(index=\"description\",\n doc_type=cls.__type__,\n body=q)\n \n return res['count']\n\n def _get_checkPermisos_byId(cls,**kwargs):\n\n q= {\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match\": {\n \"_id\":kwargs.pop(\"id\")\n }\n },\n {\n \"nested\": {\n \"path\": \"moderators\",\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match\": {\n \"moderators.email\": kwargs.pop(\"email\")\n }\n }\n ]\n }\n }\n }\n }\n ]\n }\n }\n }\n\n print(q)\n\n \n res = cls.es.conn.count(index=\"description\",\n doc_type=cls.__type__,\n body=q)\n \n return res['count']\n\n\n @classmethod\n def _get_Descript_byModerEmail(cls,**kwargs):\n \n\n q= {\n \n \"query\": {\n \"nested\": {\n \"path\": \"moderators\",\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match\": {\n \n \"moderators.email\": kwargs.pop(\"email\") \n }\n }\n ]\n }\n }\n }\n }\n }\n\n #Parametros de busqueda:\n\n \n print(q)\n\n \n res = cls.es.conn.search(index=\"description\",\n doc_type=cls.__type__,\n body=q)\n return [cls(d['_source'], id=d['_id']) for d in res['hits']['hits']]\n\n \n @classmethod\n def _get_Descript_byModerEmailCounts(cls,**kwargs):\n \n\n q= {\n \n \"query\": {\n \"nested\": {\n \"path\": \"moderators\",\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match\": {\n \n \"moderators.email\": kwargs.pop(\"email\") \n }\n }\n ]\n }\n }\n }\n }\n }\n\n #Parametros de busqueda:\n\n \n print(q)\n\n \n res = cls.es.conn.count(index=\"description\",\n doc_type=cls.__type__,\n body=q)\n \n return res['count']\n\n @classmethod\n def _get_DescriptionsCounts(cls,**kwargs):\n \n\n q= {\n \"query\": {\n \"bool\": {\n \"must\":[\n {\n \"prefix\":{\n \"title\":kwargs.pop(\"textoABuscar\")\n }\n },\n {\n \"prefix\":{\n \"url\":kwargs.pop(\"url\")\n }\n }\n ]\n }\n }\n }\n\n #Parametros de busqueda:\n\n i = 0\n \n for key, value in kwargs.items():\n i += 1\n\n \n seccion = {\n \"match\":{\n key: value\n }\n\n }\n\n \n q['query']['bool']['must'].append(seccion)\n\n print('_get_DescriptionsCounts') \n print(q)\n\n \n\n \n\n res = cls.es.conn.count(index=\"description\",\n doc_type=cls.__type__,\n body=q)\n return [cls(d['_source'], id=d['_id']) for d in res['hits']['hits']]\n\n @classmethod\n def _get_by_multiple(cls,**kwargs):\n\n #Base filter parameters\n page=kwargs.get(\"page\")\n initReg=(int(page)-1)*10\n q= {\n \"sort\": [\n {\n \"updated\": {\n \"order\": \"desc\",\n \"ignore_unmapped\": True\n }\n }\n ],\n \"from\": initReg,\n \"size\": PAGGINATION_SIZE\n }\n\n\n #Filter by searchBox\n textoBusqueda=kwargs.pop(\"textoABuscar\")\n if textoBusqueda=='':\n searchScope={\n \"match_all\": {}\n }\n else:\n searchScope={\n \n \"bool\": {\n \"must\":[\n {\n \"match\":{\n \"title\":textoBusqueda\n }\n }\n ]\n }\n \n }\n\n q['query']=searchScope\n\n #Filter by Public administration:\n padminitration=kwargs.pop(\"padministration\")\n\n #Filter by Public administration:\n if padminitration!='':\n q['query']['bool']['must'].append({\n \"match\":{\n \"padministration\":padminitration\n }\n })\n\n #Filter by Domain\n urlPrefix=kwargs.pop(\"urlPrefix\")\n if urlPrefix!='':\n q['query']['bool']['must'].append(\n{\n \"nested\": {\n \"path\": \"urls\",\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"prefix\": {\n \"urls.url\": \"https://\"+urlPrefix\n }\n },\n {\n \"prefix\": {\n \"url\": \"http://\"+urlPrefix\n }\n }\n ]\n }\n }\n }\n }\n )\n \n \n\n print('_get_by_multiple')\n print(q)\n\n res = cls.es.conn.search(index=\"description\",\n doc_type=cls.__type__,\n body=q)\n\n\n descriptions=[cls(d['_source'], id=d['_id']) for d in res['hits']['hits']]\n numRes=res['hits']['total']\n\n resultado={'descriptions':descriptions,'numRes':numRes}\n return resultado\n\n\n \n @classmethod\n def _get_by_multipleCounts(cls,**kwargs):\n \n \n \n q= {\n \"query\": {\n \"bool\": {\n \"must\":[\n {\n \"prefix\":{\n \"title\":kwargs.get(\"textoABuscar\")\n }\n }\n ]\n }\n }\n }\n\n #Parametros de busqueda:\n\n i = 0\n \n for key, value in kwargs.items():\n i += 1\n\n if(key=='url'):\n\n preUrl={\"bool\": {\n \"should\":[\n ]\n }}\n\n seccion1 = {\n \"prefix\":{\n key: 'http://'+value\n }\n\n }\n preUrl['bool']['should'].append(seccion1)\n seccion2 = {\n \"prefix\":{\n key: 'https://'+value\n }\n\n }\n preUrl['bool']['should'].append(seccion2)\n q['query']['bool']['must'].append(preUrl)\n\n else: \n if value=='Unassigned':\n value=''\n\n seccion = {\n \"match\":{\n key: value\n }\n\n }\n\n if(key!='textoABuscar' and key!='page'):\n q['query']['bool']['must'].append(seccion)\n else:\n seccion = {\n \"match\":{\n key: value\n }\n\n }\n\n if(key!='textoABuscar' and key!='page'and value!=''):\n q['query']['bool']['must'].append(seccion)\n\n print('_get_by_multipleCounts')\n print(q)\n\n res = cls.es.conn.count(index=\"description\",\n doc_type=cls.__type__,\n body=q)\n \n return res['count']\n\n\n @classmethod\n def _searchAndFilters(cls,**kwargs):\n \n page=kwargs.get(\"page\")\n initReg=(int(page)-1)*10\n q= {\n \"sort\": [\n {\n \"updated\": {\n \"order\": \"desc\",\n \"ignore_unmapped\": True\n }\n }\n ],\n \"from\": initReg,\n \"size\": PAGGINATION_SIZE,\n \"query\": {\n \"bool\": {\n \"must\":[\n {\n \"prefix\":{\n \"title\":kwargs.get(\"textoABuscar\")\n }\n }\n ]\n }\n }\n }\n\n #Parametros de busqueda:\n\n i = 0\n\n\n \"\"\" \"query\": {\n \"query_string\":{\n \"query\": \"*http*\",\n \"fields\": [\"url\"]\n \n }\n }\n } \"\"\"\n\n \n for key, value in kwargs.items():\n i += 1\n\n \n seccion = {\n \"match\":{\n key: value\n }\n\n }\n\n if(key!='textoABuscar' and key!='page' and value!=''):\n q['query']['bool']['must'].append(seccion)\n\n print('_searchAndFilters') \n print(q)\n \n\n res = cls.es.conn.search(index=\"description\",\n doc_type=cls.__type__,\n body=q)\n return [cls(d['_source'], id=d['_id']) for d in res['hits']['hits']]\n\n\n def save(self, *args, **kwargs):\n _add_default_permissions(self)\n\n # If the annotation includes document metadata look to see if we have\n # the document modeled already. If we don't we'll create a new one\n # If we do then we'll merge the supplied links into it.\n\n \n\n super(Description, self).save(*args, **kwargs)\n\n\n\n\n \n\n\n def updateFields(self, *args, **kwargs):\n #_add_default_permissions(self)\n\n # If the annotation includes document metadata look to see if we have\n # the document modeled already. If we don't we'll create a new one\n # If we do then we'll merge the supplied links into it.\n\n \n q = {\n \"doc\" : {\n \"title\":self.title,\n \"description\":self.description,\n \"keywords\":self.keywords,\n \"padministration\":self.padministration,\n \"urls\":self['urls'],\n \"updated\":datetime.datetime.now().replace(microsecond=0).isoformat()\n }\n } \n\n super(Description, self).updateFields(body=q,*args, **kwargs)\n\n\n def updateModerators(self, *args, **kwargs):\n #_add_default_permissions(self)\n\n # If the annotation includes document metadata look to see if we have\n # the document modeled already. If we don't we'll create a new one\n # If we do then we'll merge the supplied links into it.\n\n \n q = {\n \"doc\" : {\n \"moderators\":self['moderators'],\n \"updated\":datetime.datetime.now().replace(microsecond=0).isoformat()\n }\n } \n\n super(Description, self).updateFields(body=q,*args, **kwargs)\n\n @classmethod\n def search_raw(cls, query=None, params=None, raw_result=False,\n user=None, authorization_enabled=None,index='description'):\n \"\"\"Perform a raw Elasticsearch query\n\n Any ElasticsearchExceptions are to be caught by the caller.\n\n Keyword arguments:\n query -- Query to send to Elasticsearch\n params -- Extra keyword arguments to pass to Elasticsearch.search\n raw_result -- Return Elasticsearch's response as is\n user -- The user to filter the results for according to permissions\n authorization_enabled -- Overrides Description.es.authorization_enabled\n \"\"\"\n if query is None:\n query = {}\n if authorization_enabled is None:\n authorization_enabled = es.authorization_enabled\n if authorization_enabled:\n f = authz.permissions_filter(user)\n if not f:\n raise RuntimeError(\"Authorization filter creation failed\")\n filtered_query = {\n 'filtered': {\n 'filter': f\n }\n }\n # Insert original query (if present)\n if 'query' in query:\n filtered_query['filtered']['query'] = query['query']\n # Use the filtered query instead of the original\n query['query'] = filtered_query\n\n res = super(Description, cls).search_raw(index=index,query=query, params=params,\n raw_result=raw_result)\n return res\n\n @classmethod\n def _build_query(cls, query=None, offset=None, limit=None, sort=None, order=None):\n if query is None:\n query = {}\n else:\n query = dict(query) # shallow copy\n\n # Pop 'before' and 'after' parameters out of the query\n after = query.pop('after', None)\n before = query.pop('before', None)\n\n q = super(Description, cls)._build_query(query, offset, limit, sort, order)\n \n print(str(q))\n \n # Create range query from before and/or after\n if before is not None or after is not None:\n clauses = q['query']['bool']['must']\n\n # Remove match_all conjunction, because\n # a range clause is added\n if clauses[0] == {'match_all': {}}:\n clauses.pop(0)\n\n created_range = {'range': {'created': {}}}\n if after is not None:\n created_range['range']['created']['gte'] = after\n if before is not None:\n created_range['range']['created']['lt'] = before\n clauses.append(created_range)\n\n \n\n return q\n\n\ndef _add_default_permissions(ann):\n if 'permissions' not in ann:\n ann['permissions'] = {'read': [authz.GROUP_CONSUMER]}\n", "id": "3147526", "language": "Python", "matching_score": 0.810436487197876, "max_stars_count": 0, "path": "annotator/description.py" }, { "content": "from fastapi import APIRouter, FastAPI\nfrom fastapi.responses import RedirectResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.templating import Jinja2Templates\nfrom starlette.middleware.cors import CORSMiddleware\n\nfrom app.config import settings\nfrom app.database import (\n close_mongo_connection,\n connect_to_mongo,\n)\nfrom app.api.v1.integrable import integrablerouter\nfrom app.api.v1.custom import customrouter\n\napp = FastAPI(\n title=\"Survey interlinker API\", openapi_url=f\"/openapi.json\", docs_url=\"/docs\", root_path=settings.BASE_PATH\n)\napp.add_event_handler(\"startup\", connect_to_mongo)\napp.add_event_handler(\"shutdown\", close_mongo_connection)\n\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\n\n# Set all CORS enabled origins\nif settings.BACKEND_CORS_ORIGINS:\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\nmainrouter = APIRouter()\n\n\[email protected](\"/\")\ndef main():\n return RedirectResponse(url=f\"{settings.BASE_PATH}/docs\")\n\n\[email protected](\"/healthcheck\")\ndef healthcheck():\n return True\n\n\napp.include_router(mainrouter, tags=[\"main\"])\napp.include_router(integrablerouter, tags=[\"Integrable\"])\napp.include_router(customrouter, prefix=settings.API_V1_STR, tags=[\"Custom endpoints\"])\n", "id": "9996042", "language": "Python", "matching_score": 2.028238534927368, "max_stars_count": 0, "path": "surveyapp/app/app/main.py" }, { "content": "import logging\nfrom motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorCollection\nfrom app.config import settings\nimport os\n\nMAX_CONNECTIONS_COUNT = int(os.getenv(\"MAX_CONNECTIONS_COUNT\", 10))\nMIN_CONNECTIONS_COUNT = int(os.getenv(\"MIN_CONNECTIONS_COUNT\", 10))\n\nclass DataBase:\n client: AsyncIOMotorClient = None\n\ndb = DataBase()\n\nasync def get_collection() -> AsyncIOMotorCollection:\n return db.client[settings.MONGODB_DATABASE][settings.COLLECTION_NAME]\n\nasync def connect_to_mongo():\n logging.info(\"Connecting to database...\")\n db.client = AsyncIOMotorClient(settings.MONGODB_URL,\n maxPoolSize=MAX_CONNECTIONS_COUNT,\n minPoolSize=MIN_CONNECTIONS_COUNT)\n logging.info(\"Database connected!\")\n\n\nasync def close_mongo_connection():\n logging.info(\"Closing database connection...\")\n db.client.close()\n logging.info(\"Database closed!\")", "id": "9013551", "language": "Python", "matching_score": 0.21233676373958588, "max_stars_count": 0, "path": "surveyapp/app/app/database.py" }, { "content": "import json\n\nimport pytest\n\nfrom app import crud\nfrom app.defaults import formio\n# https://testdriven.io/blog/fastapi-crud/\n# https://github.com/testdrivenio/fastapi-crud-async/blob/master/src/tests/test_notes.py\nfrom app import database\n\nasync def get_collection_mock(payload):\n print(\"HACE ESTO\")\n return True\n\ndef test_ping(test_app):\n response = test_app.get(\"/healthcheck\")\n assert response.status_code == 200\n assert response.json() == True\n\n\ndef test_endpoints(test_app, monkeypatch):\n response = test_app.get(f\"/assets/instantiate\")\n assert response.status_code == 200\n\n id = \"bb6bae15bbb9497c90e8a91cddc35654\"\n send_payload = {\n \"description\": \"sdfsdf\",\n \"title\": \"sdfsd\"\n }\n response_payload = send_payload\n response_payload[\"_id\"] = id\n\n async def mock(payload):\n return response_payload\n\n monkeypatch.setattr(database, \"get_collection\", get_collection_mock)\n\n monkeypatch.setattr(crud, \"create\", mock)\n monkeypatch.setattr(crud, \"get\", mock)\n\n response = test_app.post(\"/api/v1/assets\", json=send_payload)\n print(response.json())\n assert response.status_code == 201\n\"\"\"\ndef test_create_asset(test_app, monkeypatch):\n test_payload = formio\n\n response_payload = test_payload\n response_payload[\"_id\"] = \"EXAMPLEID\"\n response_payload[\"name\"] = \"EXAMPLENAME\"\n\n async def mock_create(payload):\n return response_payload\n\n monkeypatch.setattr(crud, \"create\", mock_create)\n response = test_app.post(\"/api/v1/surveys/\", json=test_payload)\n\n assert response.status_code == 201\n assert response.json() == response_payload\n\n\n\ndef test_create_asset_invalid_json(test_app):\n response = test_app.post(\"/api/v1/assets/\", data=json.dumps({\"title\": \"something\"}))\n assert response.status_code == 422\n\n response = test_app.post(\"/api/v1/assets/\", data=json.dumps({\"title\": \"1\", \"description\": \"2\"}))\n assert response.status_code == 422\n\n\ndef test_read_asset(test_app, monkeypatch):\n test_data = {\"id\": 1, \"title\": \"something\", \"description\": \"something else\"}\n\n async def mock_get(id):\n return test_data\n\n monkeypatch.setattr(crud, \"get\", mock_get)\n\n response = test_app.get(\"/api/v1/assets/1\")\n assert response.status_code == 200\n assert response.json() == test_data\n\n\ndef test_read_asset_incorrect_id(test_app, monkeypatch):\n async def mock_get(id):\n return None\n\n monkeypatch.setattr(crud, \"get\", mock_get)\n\n response = test_app.get(\"/api/v1/assets/999\")\n assert response.status_code == 404\n assert response.json()[\"detail\"] == \"Asset not found\"\n\n response = test_app.get(\"/api/v1/assets/0\")\n assert response.status_code == 422\n\n\ndef test_read_all_assets(test_app, monkeypatch):\n test_data = [\n {\"title\": \"something\", \"description\": \"something else\", \"id\": 1},\n {\"title\": \"someone\", \"description\": \"someone else\", \"id\": 2},\n ]\n\n async def mock_get_all():\n return test_data\n\n monkeypatch.setattr(crud, \"get_all\", mock_get_all)\n\n response = test_app.get(\"/api/v1/assets/\")\n assert response.status_code == 200\n assert response.json() == test_data\n\n\ndef test_update_asset(test_app, monkeypatch):\n test_update_data = {\"title\": \"someone\", \"description\": \"someone else\", \"id\": 1}\n\n async def mock_get(id):\n return True\n\n monkeypatch.setattr(crud, \"get\", mock_get)\n\n async def mock_put(id, payload):\n return 1\n\n monkeypatch.setattr(crud, \"put\", mock_put)\n\n response = test_app.put(\"/api/v1/assets/1/\", data=json.dumps(test_update_data))\n assert response.status_code == 200\n assert response.json() == test_update_data\n\n\[email protected](\n \"id, payload, status_code\",\n [\n [1, {}, 422],\n [1, {\"description\": \"bar\"}, 422],\n [999, {\"title\": \"foo\", \"description\": \"bar\"}, 404],\n [1, {\"title\": \"1\", \"description\": \"bar\"}, 422],\n [1, {\"title\": \"foo\", \"description\": \"1\"}, 422],\n [0, {\"title\": \"foo\", \"description\": \"bar\"}, 422],\n ],\n)\ndef test_update_asset_invalid(test_app, monkeypatch, id, payload, status_code):\n async def mock_get(id):\n return None\n\n monkeypatch.setattr(crud, \"get\", mock_get)\n\n response = test_app.put(f\"/api/v1/assets/{id}/\", data=json.dumps(payload),)\n assert response.status_code == status_code\n\n\ndef test_remove_asset(test_app, monkeypatch):\n test_data = {\"title\": \"something\", \"description\": \"something else\", \"id\": 1}\n\n async def mock_get(id):\n return test_data\n\n monkeypatch.setattr(crud, \"get\", mock_get)\n\n async def mock_delete(id):\n return id\n\n monkeypatch.setattr(crud, \"delete\", mock_delete)\n\n response = test_app.delete(\"/api/v1/assets/1/\")\n assert response.status_code == 200\n assert response.json() == test_data\n\n\ndef test_remove_asset_incorrect_id(test_app, monkeypatch):\n async def mock_get(id):\n return None\n\n monkeypatch.setattr(crud, \"get\", mock_get)\n\n response = test_app.delete(\"/api/v1/assets/999/\")\n assert response.status_code == 404\n assert response.json()[\"detail\"] == \"Asset not found\"\n\n response = test_app.delete(\"/api/v1/assets/0/\")\n assert response.status_code == 422\n\"\"\"\n", "id": "8809774", "language": "Python", "matching_score": 0.7735445499420166, "max_stars_count": 0, "path": "surveyapp/app/app/tests/test_main.py" }, { "content": "import os\nfrom flask import Flask, g, request\n\nfrom annotator import es, auth, authz, annotation, store, document\n\nfrom .helpers import MockUser, MockConsumer\n\nhere = os.path.dirname(__file__)\n\n\ndef create_app():\n app = Flask(__name__)\n app.config.from_pyfile(os.path.join(here, 'test.cfg'))\n\n es.host = app.config['ELASTICSEARCH_URL']\n es.index = app.config['ELASTICSEARCH_INDEX']\n es.authorization_enabled = app.config['AUTHZ_ON']\n\n @app.before_request\n def before_request():\n g.auth = auth.Authenticator(MockConsumer)\n g.authorize = authz.authorize\n\n app.register_blueprint(store.store, url_prefix='/api')\n\n return app\n\n\nclass TestCase(object):\n @classmethod\n def setup_class(cls):\n cls.app = create_app()\n annotation.Annotation.drop_all()\n document.Document.drop_all()\n\n def setup(self):\n annotation.Annotation.create_all()\n document.Document.create_all()\n es.conn.cluster.health(wait_for_status='yellow')\n self.cli = self.app.test_client()\n\n def teardown(self):\n annotation.Annotation.drop_all()\n document.Document.drop_all()\n", "id": "2523812", "language": "Python", "matching_score": 0.9965831637382507, "max_stars_count": 0, "path": "tests/__init__.py" }, { "content": "import secrets\nfrom typing import List, Union\nfrom pydantic import AnyHttpUrl, BaseSettings, validator\nimport os\n\n\nclass Settings(BaseSettings):\n \n ELASTICSEARCH_URL: str = os.getenv(\"ELASTICSEARCH_URL\")\n SURVEYINTERLINK_URL: str = os.getenv(\"SURVEYINTERLINK_URL\")\n SURVEYAPI_VERSION : str = os.getenv(\"SURVEYAPI_VERSION\")\n PORTAUGMENTER: str = os.getenv(\"PORTAUGMENTER\")\n HOSTAUGMENTER: str = os.getenv(\"HOSTAUGMENTER\")\n\n\n DEBUG: bool = os.getenv(\"DEBUG\")\n SECRET_KEY: str = os.getenv(\"SECRET_KEY\")\n ELASTICSEARCH_INDEX:str = os.getenv(\"ELASTICSEARCH_INDEX\")\n AUTH_ON: bool = os.getenv(\"AUTH_ON\")\n AUTHZ_ON: bool = os.getenv(\"AUTHZ_ON\")\n \n\n\n # Mail server configuration Parameters:\n MAIL_SERVER:str = os.getenv(\"MAIL_SERVER\")\n MAIL_PORT:int = os.getenv(\"MAIL_PORT\")\n \n MAIL_USERNAME:str = os.getenv(\"MAIL_USERNAME\") \n MAIL_PASSWORD:str = <PASSWORD>(\"<PASSWORD>\") \n MAIL_DEFAULT_SENDER:str = os.getenv(\"MAIL_DEFAULT_SENDER\") \n MAIL_USE_TLS: bool = os.getenv(\"MAIL_USE_TLS\")\n MAIL_USE_SSL: bool = os.getenv(\"MAIL_USE_SSL\")\n MAIL_MAX_EMAILS:str = os.getenv(\"MAIL_MAX_EMAILS\")\n MAIL_ASCII_ATTACHMENTS: bool = os.getenv(\"MAIL_ASCII_ATTACHMENTS\")\n\n MAX_CONTENT_LENGTH: int = os.getenv(\"MAX_CONTENT_LENGTH\") \n UPLOAD_EXTENSIONS: List[str] = os.getenv(\"UPLOAD_EXTENSIONS\")\n UPLOAD_PATH:str = os.getenv(\"UPLOAD_PATH\") \n USE_SESSION_FOR_NEXT: bool = os.getenv(\"USE_SESSION_FOR_NEXT\")\n\n # Swagger parameters:\n\n SWAGGER_URL:str = os.getenv(\"SWAGGER_URL\")\n API_URL:str = os.getenv(\"API_URL\")\n\n #Babel:\n BABEL_DEFAULT_LOCALE:str = os.getenv(\"BABEL_DEFAULT_LOCALE\")\n\n\n\n #Parametros de Autenticacion:\n\n CLIENT_ID:str = os.getenv(\"CLIENT_ID\")\n CLIENT_SECRET:str = os.getenv(\"CLIENT_SECRET\")\n ISSUER:str = os.getenv(\"ISSUER\")\n\n AUTH_URI:str = os.getenv(\"AUTH_URI\")\n TOKEN_URI:str = os.getenv(\"TOKEN_URI\")\n TOKEN_INTROSPECTION_URI:str = os.getenv(\"TOKEN_INTROSPECTION_URI\")\n\n REDIRECT_URI:str = os.getenv(\"REDIRECT_URI\")\n \n USERINFO_URI:str = os.getenv(\"USERINFO_URI\")\n\n END_SESSION_ENDPOINT:str = os.getenv(\"END_SESSION_ENDPOINT\")\n\n CONSUMER_KEY:str = os.getenv(\"CONSUMER_KEY\")\n CONSUMER_TTL: int = os.getenv(\"CONSUMER_TTL\")\n CRYPT_KEY:str = os.getenv(\"CRYPT_KEY\")\n\n\n\n\n class Config:\n case_sensitive = True\n\nsettings = Settings()", "id": "7613092", "language": "Python", "matching_score": 3.6897547245025635, "max_stars_count": 0, "path": "config.py" }, { "content": "import secrets\nfrom typing import List, Union\nfrom pydantic import AnyHttpUrl, BaseSettings, validator\nimport os\n\nmode = os.getenv(\"MODE\", \"\")\nclass Settings(BaseSettings):\n MODE: str\n MODE_SOLO: bool = mode == \"solo\"\n MODE_INTEGRATED: bool = mode == \"integrated\"\n MODE_PRODUCTION: bool = mode == \"production\"\n\n API_V1_STR: str = \"/api/v1\"\n BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = []\n SECRET_KEY: str = secrets.token_urlsafe(32)\n \n PROTOCOL: str\n SERVER_NAME: str\n BASE_PATH: str\n COMPLETE_SERVER_NAME: AnyHttpUrl = os.getenv(\"PROTOCOL\") + os.getenv(\"SERVER_NAME\") + os.getenv(\"BASE_PATH\")\n\n MONGODB_URL: str\n MONGODB_DATABASE: str\n COLLECTION_NAME: str\n\n SERVICEPEDIA_URL:str = os.getenv(\"SERVICEPEDIA_URL\")\n \n class Config:\n case_sensitive = True\n\nsettings = Settings()\n", "id": "8760884", "language": "Python", "matching_score": 4.117150783538818, "max_stars_count": 0, "path": "surveyapp/app/app/config.py" }, { "content": "from fastapi.templating import Jinja2Templates\nfrom app.config import settings\n\ntemplates = Jinja2Templates(directory=\"templates\")\ndomainfo = {\n \"PROTOCOL\": settings.PROTOCOL,\n \"SERVER_NAME\": settings.SERVER_NAME ,\n \"BASE_PATH\": settings.BASE_PATH ,\n \"COMPLETE_SERVER_NAME\": settings.COMPLETE_SERVER_NAME ,\n \"SERVICEPEDIA_URL\" : settings.SERVICEPEDIA_URL\n }", "id": "645969", "language": "Python", "matching_score": 0.7215480804443359, "max_stars_count": 0, "path": "surveyapp/app/app/api/v1/common.py" }, { "content": "from pydantic import BaseModel, Field, Extra, validator\nfrom typing import Union, Optional\nimport datetime\nfrom app.config import settings\n\nclass AssetCreateUpdateSchema(BaseModel, extra=Extra.allow):\n title: Union[str, object]\n\nclass AssetSchema(BaseModel):\n id: str = Field(..., alias='_id')\n created_at: datetime.datetime\n updated_at: Optional[datetime.datetime]\n\n # extra allowed\n class Config:\n extra = Extra.allow\n allow_population_by_field_name = True\n\nclass AssetBasicDataSchema(BaseModel):\n id: str = Field(alias='_id')\n title: str = Field(alias='name')\n interlinker_name: str = \"Survey\"\n icon: str = \"https://cdn.pixabay.com/photo/2017/05/15/23/48/survey-2316468_1280.png\"\n createdTime: datetime.datetime = Field(alias='created_at')\n modifiedTime: Optional[datetime.datetime] = Field(alias='updated_at')\n viewLink: Optional[str]\n editLink: Optional[str]\n cloneLink: Optional[str]\n\n class Config:\n allow_population_by_field_name = True\n \n @validator('viewLink', always=True)\n def view_link(cls, name, values):\n asset_id = values[\"id\"]\n return settings.COMPLETE_SERVER_NAME + f\"/assets/{asset_id}/view\"\n\n @validator('editLink', always=True)\n def edit_link(cls, name, values):\n asset_id = values[\"id\"]\n return settings.COMPLETE_SERVER_NAME + f\"/assets/{asset_id}/edit\"\n \n @validator('cloneLink', always=True)\n def clone_link(cls, name, values):\n asset_id = values[\"id\"]\n return settings.COMPLETE_SERVER_NAME + f\"/assets/{asset_id}/clone\"", "id": "6515902", "language": "Python", "matching_score": 0.40706667304039, "max_stars_count": 0, "path": "surveyapp/app/app/models/surveys.py" }, { "content": "formio = {\n \"name\": \"<NAME>\",\n \"formSchema\": {\n \"components\": [\n {\n \"key\": \"age\",\n \"label\": \"age-text\",\n \"type\": \"textfield\",\n \"validate\": {\n \"required\": True\n }\n },\n {\n \"key\": \"submit\",\n \"label\": \"submit-text\",\n \"type\": \"button\"\n },\n ],\n },\n \"translations\": {\n \"language\": \"en\",\n \"i18n\": {\n \"en\": {\n \"submit-text\": \"Submit\",\n \"age-text\": \"Age\"\n },\n \"es\": {\n \"submit-text\": \"Enviar\",\n \"age-text\": \"Edad\"\n }\n },\n },\n \"type\": \"default\"\n}\n", "id": "7429368", "language": "Python", "matching_score": 0.15464605391025543, "max_stars_count": 0, "path": "surveyapp/app/app/defaults.py" } ]
0.996583
PouyaGhahramanian
[ { "content": "import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\n\n#data_stream = 'rbf_fast'\ndata_stream_ = 1\nstreams = ['agrawal', 'hyperplane', 'led', 'rbf_slow', 'rbf_fast', 'sea']\n#data_stream = streams[data_stream_]\nprint('\\n\\t----------------------------')\nprint('\\tData Stream: === ' + streams[int(sys.argv[1])].upper() + ' === ')\nprint('\\t----------------------------')\ndata_stream = streams[int(sys.argv[1])]\n\naccuracies_1 = np.load('results/' + data_stream + '_accuracies_1.npy') * 100.0\naccuracies_2 = np.load('results/' + data_stream + '_accuracies_2.npy') * 100.0\naccuracies_3_goowe = np.load('results/' + data_stream + '_goowe_accuracies_3.npy') * 100.0\naccuracies_3_mv = np.load('results/' + data_stream + '_mv_accuracies_3.npy') * 100.0\naccuracies_3_av = np.load('results/' + data_stream + '_av_accuracies_3.npy') * 100.0\n\nprint('\\tAccuracy values:\\n\\tSource 1: {}\\n\\tSource 2: {}\\n\\tGoowe: {}\\n\\tMV: {}\\n\\tAV: {}\\n'\n .format(np.mean(accuracies_1), np.mean(accuracies_2), np.mean(accuracies_3_goowe),\n np.mean(accuracies_3_mv), np.mean(accuracies_3_av)))\n\nplt.rcParams.update({'font.size': 20})\nfig = plt.figure(figsize=(20, 12))\n#plt.ylim(ymin=0.6, ymax=0.95)\nplt.plot(accuracies_1, color = 'r', linestyle = 'dashed', label='Source Stream 1')\nplt.plot(accuracies_2, color='orange', linestyle = 'dashed', label='Source Stream 2')\nplt.plot(accuracies_3_goowe, color = 'c', label='Target Stream (Goowe)')\nplt.plot(accuracies_3_mv, color = 'b', label='Target Stream (MV)')\nplt.plot(accuracies_3_av, color = 'k', label='Target Stream (AV)')\nplt.grid(True)\nplt.legend()\nfig.savefig('figs/' + data_stream + '.png')\nplt.show()\n", "id": "11314734", "language": "Python", "matching_score": 4.183807373046875, "max_stars_count": 0, "path": "plotter.py" }, { "content": "import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport random\n\n#data_stream = 'rbf_fast'\nN_STREAMS = 10\ndata_stream_ = 1\nstreams = ['agrawal', 'hyperplane', 'led', 'rbf_slow', 'rbf_fast', 'sea']\n#data_stream = streams[data_stream_]\nprint('\\n\\t----------------------------')\nprint('\\tData Stream: === ' + streams[int(sys.argv[1])].upper() + ' === ')\nprint('\\t----------------------------')\ndata_stream = streams[int(sys.argv[1])]\n\naccuracies_all = np.load('results/multi_source/' + data_stream + '_accuracies_sources.npy') * 100.0\naccuracies_goowe = np.load('results/multi_source/' + data_stream + '_accuracies_goowe.npy') * 100.0\naccuracies_mv = np.load('results/multi_source/' + data_stream + '_accuracies_mv.npy') * 100.0\naccuracies_av = np.load('results/multi_source/' + data_stream + '_accuracies_av.npy') * 100.0\n\nprint('\\tAccuracy values: ')\nfor j in range(N_STREAMS):\n print('\\tSource {}: {}'.format(j+1, np.round(np.mean(accuracies_all[j]), 3)))\nprint('\\tMV: {}\\n\\tAV: {}\\n\\tGoowe: {}\\n'.format(np.round(np.mean(accuracies_mv), 3),\n np.round(np.mean(accuracies_av)), np.round(np.mean(accuracies_goowe))))\n\nplt.rcParams.update({'font.size': 20})\nfig = plt.figure(figsize=(20, 12))\n#plt.ylim(ymin=0.6, ymax=0.95)\nfor j in range(N_STREAMS):\n r = random.random()\n b = random.random()\n g = random.random()\n color_random = (r, g, b)\n plt.plot(accuracies_all[j], color = color_random, linestyle = 'dashed', label='Source Stream {}'.format(j+1))\nplt.plot(accuracies_goowe, color = 'c', label='Target Stream (Goowe)')\nplt.plot(accuracies_mv, color = 'b', label='Target Stream (MV)')\nplt.plot(accuracies_av, color = 'k', label='Target Stream (AV)')\nplt.grid(True)\nplt.legend()\nfig.savefig('figs/multi_source/' + data_stream + '.png')\nplt.show()\n", "id": "11016506", "language": "Python", "matching_score": 2.1764845848083496, "max_stars_count": 0, "path": "ms_plotter.py" }, { "content": "from skmultiflow.data.file_stream import FileStream\nimport numpy as np\nfrom Goowe import Goowe\n#from skmultiflow.data import ConceptDriftStream\nfrom skmultiflow.data import HyperplaneGenerator\nimport logging\nfrom GooweMSS import GooweMS\nimport random\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n# Prepare the data stream\nstreams = []\nN_STREAMS = 10\ninstances_num = 10000\n\nfor i in range(N_STREAMS):\n stream = HyperplaneGenerator(random_state=None, n_features=10, n_drift_features=2,\n mag_change=0.1, noise_percentage=0.0, sigma_percentage=0.1)\n streams.append(stream)\n stream.prepare_for_use()\n\nstream_t = HyperplaneGenerator(random_state=None, n_features=10, n_drift_features=2,\n mag_change=0.1, noise_percentage=0.0, sigma_percentage=0.1)\nstream_t = streams[0]\nstream_t.prepare_for_use()\n\ninstances_counter = 0\n\n### Arrays for storing accuracy values for Streams\naccuracies_all = []\nfor i in range(N_STREAMS):\n accuracies = []\n accuracies_all.append(accuracies)\naccuracies_mv = []\naccuracies_av = []\naccuracies_goowe = []\naccuracies_all.append(accuracies_av)\naccuracies_all.append(accuracies_mv)\naccuracies_all.append(accuracies_goowe)\n\nnum_features = stream_t.n_features\nnum_targets = stream_t.n_targets\nnum_classes = 2\ntarget_values = [0., 1.]\nlogging.info(\"\\n\\tStreams are generated and prepared for use.\\n\\tNumber of features: {0} - Number of targets: {1} - Number of classes: {2} - Target values: {3}\"\n .format(num_features, num_targets, num_classes, target_values))\n\nN_MAX_CLASSIFIERS = 15\nN_MAX_CLASSIFIERS_TARGET = 30\nCHUNK_SIZE = 500 # User-specified\nWINDOW_SIZE = 100 # User-specified\n\n### Probability of drift in streams\np_tresholds = []\nfor i in range(N_STREAMS):\n p_treshold = 0.8\n p_tresholds.append(p_treshold)\npt_threshold = 0.8\n\n# Initialize the ensemble\ngoowes = []\nfor i in range(N_STREAMS):\n goowe = Goowe(n_max_components=N_MAX_CLASSIFIERS,\n chunk_size=CHUNK_SIZE,\n window_size=WINDOW_SIZE,\n logging = False)\n goowes.append(goowe)\n # Initialize the ensemble\n goowe.prepare_post_analysis_req(num_features, num_targets, num_classes, target_values)\n\ngoowe_t = GooweMS(goowes, num_streams = 10, n_max_components=N_MAX_CLASSIFIERS_TARGET,\n chunk_size=CHUNK_SIZE,\n window_size=WINDOW_SIZE,\n logging = False)\ngoowe_t.prepare_post_analysis_req(num_features, num_targets, num_classes, target_values)\n\n# For the first chunk, there is no prediction.\nfor i in range(N_STREAMS):\n X_init, y_init = streams[i].next_sample(CHUNK_SIZE)\n goowes[i].partial_fit(X_init, y_init)\n\nX_init, y_init = stream_t.next_sample(CHUNK_SIZE)\ngoowe_t.update(X_init, y_init, [])\n\naccuracies_tmp = np.zeros(N_STREAMS)\naccuracy_mv = 0.0\naccuracy_av = 0.0\naccuracy_goowe = 0.0\ntotals = np.zeros(N_STREAMS)\ntrue_predictions = np.zeros(N_STREAMS)\ntrue_predictions_t = 0.0\ntrue_predictions_t_mv = 0.0\ntrue_predictions_t_av = 0.0\ntrue_predictions_t_goowe = 0.0\ntotal = 0.\n\nfor i in range(CHUNK_SIZE):\n total += 1\n Xs = []\n ys = []\n for j in range(N_STREAMS):\n curr = streams[j].next_sample()\n X, y = curr[0], curr[1]\n Xs.append(X)\n ys.append(y)\n preds = goowes[j].predict(X)\n true_predictions[j] += np.sum(preds == y)\n accuracies_tmp[j] = true_predictions[j] / total\n\n curr_t = stream_t.next_sample()\n X_t, y_t = curr_t[0], curr_t[1]\n preds_t_mv = goowe_t.predict(X_t, ensemble_type='mv')\n preds_t_av = goowe_t.predict(X_t, ensemble_type='av')\n preds_t_goowe = goowe_t.predict(X_t, ensemble_type='goowe')\n true_predictions_t_mv += np.sum(preds_t_mv == y_t)\n true_predictions_t_av += np.sum(preds_t_av == y_t)\n true_predictions_t_goowe += np.sum(preds_t_goowe == y_t)\n accuracy_mv = true_predictions_t_mv / total\n accuracy_av = true_predictions_t_av / total\n accuracy_goowe = true_predictions_t_goowe / total\n for j in range(N_STREAMS):\n print('\\tSTREAM {} :: Data instance: {} - Accuracy: {}'.format(str(j+1), int(total), round(accuracies_tmp[j]*100.0, 3)))\n print('\\tTARGET STREAM :: Data instance: {} - Accuracies: MV: {} - AV: {} - Goowe: {}'.format(int(total),\n round(accuracy_mv*100.0, 3), round(accuracy_av*100.0, 3), round(accuracy_goowe*100.0, 3)))\n print('\\t==========================================================================')\n for j in range(N_STREAMS):\n goowes[j].partial_fit(Xs[j], ys[j])\n goowe_t.update(X_t, y_t,[])\n\n# Now, for the remaining instances, do ITTT (Interleaved Test Then Train).\nwhile(stream_t.has_more_samples() and instances_counter < instances_num):\n\n if(instances_counter % CHUNK_SIZE == 0):\n accuracies_tmp = np.zeros(N_STREAMS)\n accuracy_mv = 0.0\n accuracy_av = 0.0\n accuracy_goowe = 0.0\n totals = np.zeros(N_STREAMS)\n true_predictions = np.zeros(N_STREAMS)\n true_predictions_t = 0.0\n true_predictions_t_mv = 0.0\n true_predictions_t_av = 0.0\n true_predictions_t_goowe = 0.0\n total = 0.\n\n ### Generating drifts by generating random values for each Stream\n ps = []\n for j in range(N_STREAMS):\n p = random.random()\n ps.append(p)\n #if p > p_tresholds[j]:\n #streams[j].generate_drift()\n #logging.info('\\n\\tDrift generatoed for STREAM {}'.format(str(j+1)))\n p_t = np.random.random()\n #if p_t > pt_threshold:\n #stream_t.generate_drift()\n #logging.info('\\n\\tDrift generatoed for TARGET STREAM')\n total += 1\n Xs = []\n ys = []\n for j in range(N_STREAMS):\n curr = streams[j].next_sample()\n X, y = curr[0], curr[1]\n Xs.append(X)\n ys.append(y)\n preds = goowes[j].predict(X)\n true_predictions[j] += np.sum(preds == y)\n accuracies_tmp[j] = true_predictions[j] / total\n\n curr_t = stream_t.next_sample()\n X_t, y_t = curr_t[0], curr_t[1]\n preds_t_mv = goowe_t.predict(X_t, ensemble_type='mv')\n preds_t_av = goowe_t.predict(X_t, ensemble_type='av')\n preds_t_goowe = goowe_t.predict(X_t, ensemble_type='goowe')\n true_predictions_t_mv += np.sum(preds_t_mv == y_t)\n true_predictions_t_av += np.sum(preds_t_av == y_t)\n true_predictions_t_goowe += np.sum(preds_t_goowe == y_t)\n accuracy_mv = true_predictions_t_mv / total\n accuracy_av = true_predictions_t_av / total\n accuracy_goowe = true_predictions_t_goowe / total\n for j in range(N_STREAMS):\n print('\\tSTREAM {} :: Data instance: {} - Accuracy: {}'.format(str(j+1), int(total), round(accuracies_tmp[j]*100.0, 3)))\n print('\\tTARGET STREAM :: Data instance: {} - Accuracies: MV: {} - AV: {} - Goowe: {}'.format(int(total),\n round(accuracy_mv*100.0, 3), round(accuracy_av*100.0, 3), round(accuracy_goowe*100.0, 3)))\n print('\\tCurrent classifier indices: {}'.format(goowe_t.get_classifer_indices()))\n print('\\t==========================================================================')\n for j in range(N_STREAMS):\n goowes[j].partial_fit(Xs[j], ys[j])\n goowe_t.update(X_t, y_t, [])\n for j in range(N_STREAMS):\n accuracies_all[j].append(accuracies_tmp[j])\n accuracies_mv.append(accuracy_mv)\n accuracies_av.append(accuracy_av)\n accuracies_goowe.append(accuracy_goowe)\n #np.save('results/agrawal_'+ENSEMBLE_TYPE+'_accuracies_1.npy', np.asarray(accuracies_1))\n #np.save('results/agrawal_'+ENSEMBLE_TYPE+'_accuracies_2.npy', np.asarray(accuracies_2))\n #np.save('results/agrawal_'+ENSEMBLE_TYPE+'_accuracies_3.npy', np.asarray(accuracies_3))\n instances_counter += 1\nnp.save('results/multi_source/hyperplane_accuracies_sources.npy', np.asarray(accuracies_all))\nnp.save('results/multi_source/hyperplane_accuracies_mv.npy', np.asarray(accuracies_mv))\nnp.save('results/multi_source/hyperplane_accuracies_av.npy', np.asarray(accuracies_av))\nnp.save('results/multi_source/hyperplane_accuracies_goowe.npy', np.asarray(accuracies_goowe))\n# TODO: Create new goowe_3 by using components of the other two goowes with highest weights (5 from each i.e.)\n# TODO: AND update goowe_3 at each chunk (each while step)\n", "id": "532324", "language": "Python", "matching_score": 5.031919956207275, "max_stars_count": 0, "path": "ms_m_hyperplane_experiment.py" }, { "content": "from skmultiflow.data.file_stream import FileStream\nimport numpy as np\nfrom Goowe import Goowe\n#from skmultiflow.data import ConceptDriftStream\nfrom skmultiflow.data import RandomRBFGeneratorDrift\nimport logging\nfrom GooweMS import GooweMS\nimport random\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n# Prepare the data stream\n#, num_drift_centroids=5\n#DRIFT_SPEED = 0.01\n#DRIFT_TYPE = 'fast'\nDRIFT_SPEED = 0.0001\nDRIFT_TYPE = 'slow'\nENSEMBLE_TYPE = 'av'\nstream_1 = RandomRBFGeneratorDrift(model_random_state=None, sample_random_state=None, n_classes=4, n_features=20, n_centroids=50, change_speed=DRIFT_SPEED)\nstream_2 = RandomRBFGeneratorDrift(model_random_state=None, sample_random_state=None, n_classes=4, n_features=20, n_centroids=50, change_speed=DRIFT_SPEED)\nstream_3 = RandomRBFGeneratorDrift(model_random_state=None, sample_random_state=None, n_classes=4, n_features=20, n_centroids=50, change_speed=DRIFT_SPEED)\nstream_1.prepare_for_use()\nstream_2.prepare_for_use()\nstream_3.prepare_for_use()\n\ninstances_num = 10000\ninstances_counter = 0\n\n### Arrays for storing accuracy values for Streams\naccuracies_1 = []\naccuracies_2 = []\naccuracies_3_mv = []\naccuracies_3_av = []\naccuracies_3_goowe = []\n\nnum_features = stream_1.n_features\nnum_targets = stream_1.n_targets\nnum_classes = len(stream_1.target_values)\ntarget_values = stream_1.target_values\nlogging.info(\"\\n\\tStreams are generated and prepared for use.\\n\\tNumber of features: {0} - Number of targets: {1} - Number of classes: {2} - Target values: {3}\"\n .format(num_features, num_targets, num_classes, target_values))\n\nN_MAX_CLASSIFIERS = 15\nCHUNK_SIZE = 500 # User-specified\nWINDOW_SIZE = 100 # User-specified\n\n### Probability of drift in streams\np1_threshold = 0.8\np2_threshold = 0.9\np3_threshold = 0.85\n\n# Initialize the ensemble\ngoowe_1 = Goowe(n_max_components=N_MAX_CLASSIFIERS,\n chunk_size=CHUNK_SIZE,\n window_size=WINDOW_SIZE,\n logging = False)\ngoowe_1.prepare_post_analysis_req(num_features, num_targets, num_classes, target_values)\n\n# Initialize the ensemble\ngoowe_2 = Goowe(n_max_components=N_MAX_CLASSIFIERS,\n chunk_size=CHUNK_SIZE,\n window_size=WINDOW_SIZE,\n logging = False)\ngoowe_2.prepare_post_analysis_req(num_features, num_targets, num_classes, target_values)\n\n\ngoowe_3 = GooweMS(goowe_1, goowe_2, n_max_components=N_MAX_CLASSIFIERS,\n chunk_size=CHUNK_SIZE,\n window_size=WINDOW_SIZE,\n logging = False)\ngoowe_3.prepare_post_analysis_req(num_features, num_targets, num_classes, target_values)\n# For the first chunk, there is no prediction.\n\nX_init, y_init = stream_1.next_sample(CHUNK_SIZE)\ngoowe_1.partial_fit(X_init, y_init)\n\nX_init, y_init = stream_2.next_sample(CHUNK_SIZE)\ngoowe_2.partial_fit(X_init, y_init)\n\nX_init, y_init = stream_3.next_sample(CHUNK_SIZE)\n#a = goowe_3.predict(X_init)\n#print('==============', a)\ngoowe_3.update(X_init, y_init, 1, 1)\n# TODO: update_from(goowe_1, goowe_2) :: updates existing goowe by selecting N_MAX_CLASSIFIERS / 2 components from each of them.\n\naccuracy_1 = 0.0\ntotal_1 = 0.0\ntrue_predictions_1 = 0.0\n\naccuracy_2 = 0.0\ntotal_2 = 0.0\ntrue_predictions_2 = 0.0\n\naccuracy_3 = 0.0\ntotal_3 = 0.0\ntrue_predictions_3_mv = 0.0\ntrue_predictions_3_av = 0.0\ntrue_predictions_3_goowe = 0.0\n\ntotal = 0.\n\nfor i in range(CHUNK_SIZE):\n total += 1\n cur_1 = stream_1.next_sample()\n cur_2 = stream_2.next_sample()\n cur_3 = stream_3.next_sample()\n X_1, y_1 = cur_1[0], cur_1[1]\n X_2, y_2 = cur_2[0], cur_2[1]\n X_3, y_3 = cur_3[0], cur_3[1]\n preds_1 = goowe_1.predict(X_1)\n preds_2 = goowe_2.predict(X_2)\n preds_3_mv = goowe_3.predict(X_3, ensemble_type='mv')\n preds_3_av = goowe_3.predict(X_3, ensemble_type='av')\n preds_3_goowe = goowe_3.predict(X_3, ensemble_type='goowe')\n true_predictions_1 += np.sum(preds_1 == y_1)\n true_predictions_2 += np.sum(preds_2 == y_2)\n true_predictions_3_mv += np.sum(preds_3_mv == y_3)\n true_predictions_3_av += np.sum(preds_3_av == y_3)\n true_predictions_3_goowe += np.sum(preds_3_goowe == y_3)\n accuracy_1 = true_predictions_1 / total\n accuracy_2 = true_predictions_2 / total\n accuracy_3_mv = true_predictions_3_mv / total\n accuracy_3_av = true_predictions_3_av / total\n accuracy_3_goowe = true_predictions_3_goowe / total\n print('\\tSTREAM 1 :: Data instance: {} - Accuracy: {}'.format(int(total), round(accuracy_1*100.0, 3)))\n print('\\tSTREAM 2 :: Data instance: {} - Accuracy: {}'.format(int(total), round(accuracy_2*100.0, 3)))\n print('\\tSTREAM 3 :: Data instance: {} - Accuracies: MV: {} - AV: {} - Goowe: {}'.format(int(total),\n round(accuracy_3_mv*100.0, 3), round(accuracy_3_av*100.0, 3), round(accuracy_3_goowe*100.0, 3)))\n print('\\t==========================================================================')\n goowe_1.partial_fit(X_1, y_1)\n goowe_2.partial_fit(X_2, y_2)\n goowe_3.update(X_3, y_3, 1, 1)\n\n# Now, for the remaining instances, do ITTT (Interleaved Test Then Train).\nwhile(stream_1.has_more_samples() and stream_2.has_more_samples() and instances_counter < instances_num):\n\n if(instances_counter % CHUNK_SIZE == 0):\n accuracy_1 = 0.0\n total_1 = 0.0\n true_predictions_1 = 0.0\n accuracy_2 = 0.0\n total_2 = 0.0\n true_predictions_2 = 0.0\n accuracy_3_mv = 0.0\n accuracy_3_av = 0.0\n accuracy_3_goowe = 0.0\n total_3 = 0.0\n true_predictions_3_mv = 0.0\n true_predictions_3_av = 0.0\n true_predictions_3_goowe = 0.0\n total = 0.\n\n ### Generating drifts by generating random values for each Stream\n p1 = random.random()\n p2 = random.random()\n p3 = random.random()\n if p1 > p1_threshold:\n #stream_1.generate_drift()\n logging.info('\\n\\tDrift generatoed for STREAM 1')\n if p2 > p2_threshold:\n #stream_2.generate_drift()\n logging.info('\\n\\tDrift generatoed for STREAM 2')\n if p3 > p3_threshold:\n #stream_3.generate_drift()\n logging.info('\\n\\tDrift generatoed for STREAM 3')\n total += 1\n cur_1 = stream_1.next_sample()\n cur_2 = stream_2.next_sample()\n cur_3 = stream_3.next_sample()\n X_1, y_1 = cur_1[0], cur_1[1]\n X_2, y_2 = cur_2[0], cur_2[1]\n X_3, y_3 = cur_3[0], cur_3[1]\n preds_1 = goowe_1.predict(X_1)\n preds_2 = goowe_2.predict(X_2)\n preds_3_goowe = goowe_3.predict(X_3, ensemble_type='goowe')\n preds_3_mv = goowe_3.predict(X_3, ensemble_type='mv')\n preds_3_av = goowe_3.predict(X_3, ensemble_type='av')\n true_predictions_1 += np.sum(preds_1 == y_1)\n true_predictions_2 += np.sum(preds_2 == y_2)\n true_predictions_3_mv += np.sum(preds_3_mv == y_3)\n true_predictions_3_av += np.sum(preds_3_av == y_3)\n true_predictions_3_goowe += np.sum(preds_3_goowe == y_3)\n accuracy_1 = true_predictions_1 / total\n accuracy_2 = true_predictions_2 / total\n accuracy_3_mv = true_predictions_3_mv / total\n accuracy_3_av = true_predictions_3_av / total\n accuracy_3_goowe = true_predictions_3_goowe / total\n accuracies_1.append(accuracy_1)\n accuracies_2.append(accuracy_2)\n accuracies_3_mv.append(accuracy_3_mv)\n accuracies_3_av.append(accuracy_3_av)\n accuracies_3_goowe.append(accuracy_3_goowe)\n np.save('results/rbf_'+DRIFT_TYPE+'_accuracies_1.npy', np.asarray(accuracies_1))\n np.save('results/rbf_'+DRIFT_TYPE+'_accuracies_2.npy', np.asarray(accuracies_2))\n np.save('results/rbf_'+DRIFT_TYPE+'_mv_accuracies_3.npy', np.asarray(accuracies_3_mv))\n np.save('results/rbf_'+DRIFT_TYPE+'_av_accuracies_3.npy', np.asarray(accuracies_3_av))\n np.save('results/rbf_'+DRIFT_TYPE+'_goowe_accuracies_3.npy', np.asarray(accuracies_3_goowe))\n print('\\tSTREAM 1 :: Data instance: {} - Accuracy: {}'.format(int(total), round(accuracy_1*100.0, 3)))\n print('\\tSTREAM 2 :: Data instance: {} - Accuracy: {}'.format(int(total), round(accuracy_2*100.0, 3)))\n print('\\tSTREAM 3 :: Data instance: {} - Accuracies: MV: {} - AV: {} - Goowe: {}'.format(int(total),\n round(accuracy_3_mv*100.0, 3), round(accuracy_3_av*100.0, 3), round(accuracy_3_goowe*100.0, 3)))\n print('\\t==========================================================================')\n goowe_1.partial_fit(X_1, y_1) # Then train\n goowe_2.partial_fit(X_2, y_2) # Then train\n goowe_3.update(X_3, y_3, 1, 1)\n instances_counter += 1\n\n# TODO: Create new goowe_3 by using components of the other two goowes with highest weights (5 from each i.e.)\n# TODO: AND update goowe_3 at each chunk (each while step)\n", "id": "9333966", "language": "Python", "matching_score": 4.571475982666016, "max_stars_count": 0, "path": "ms_rbf_experiment.py" }, { "content": "from skmultiflow.data.file_stream import FileStream\nimport numpy as np\nfrom Goowe import Goowe\n\n\n# Prepare the data stream\nstream = FileStream('./datasets/sea_stream.csv')\nstream.prepare_for_use()\n\nnum_features = stream.n_features\nnum_targets = stream.n_targets\nprint(stream.get_target_values())\nnum_classes = len(stream.get_target_values())\ntarget_values = stream.get_target_values()\n\nN_MAX_CLASSIFIERS = 15\nCHUNK_SIZE = 500 # User-specified\nWINDOW_SIZE = 100 # User-specified\n\n# Initialize the ensemble\ngoowe = Goowe(n_max_components=N_MAX_CLASSIFIERS,\n chunk_size=CHUNK_SIZE,\n window_size=WINDOW_SIZE,\n logging = False)\ngoowe.prepare_post_analysis_req(num_features, num_targets, num_classes, target_values)\n\n# For the first chunk, there is no prediction.\n\nX_init, y_init = stream.next_sample(CHUNK_SIZE)\nprint(X_init)\nprint(y_init)\ngoowe.partial_fit(X_init, y_init)\n\naccuracy = 0.0\ntotal = 0.0\ntrue_predictions = 0.0\n\nfor i in range(CHUNK_SIZE):\n total += 1\n cur = stream.next_sample()\n X, y = cur[0], cur[1]\n preds = goowe.predict(X)\n true_predictions += np.sum(preds == y)\n accuracy = true_predictions / total\n print('\\tData instance: {} - Accuracy: {}'.format(total, accuracy))\n goowe.partial_fit(X, y)\n\n# Now, for the remaining instances, do ITTT (Interleaved Test Then Train).\nwhile(stream.has_more_samples()):\n total += 1\n cur = stream.next_sample()\n X, y = cur[0], cur[1]\n preds = goowe.predict(X) # Test\n true_predictions += np.sum(preds == y)\n accuracy = true_predictions / total\n print('\\tData instance: {} - Accuracy: {}'.format(int(total), round(accuracy*100.0, 3)))\n goowe.partial_fit(X, y) # Then train\n", "id": "440034", "language": "Python", "matching_score": 0.9185022711753845, "max_stars_count": 0, "path": "experiment.py" }, { "content": "\nimport numpy as np\nfrom Goowe import Goowe\n\nclass GooweMS(Goowe):\n\n # TODO: Number of Base Streams is 2.\n # Implementation for more than 2 base streams can be done\n # by changing self._goowe_1 and self._goowe_2 with a list of Goowe objects.\n '''\n def __init__(self, goowe_1, goowe_2, n_max_components: int = 10,\n chunk_size: int = 500, window_size: int = 100, logging = True, num_streams = 2):\n super().__init__(n_max_components, chunk_size, window_size, logging)\n self._num_streams = num_streams\n self._goowe_1 = goowe_1\n self._goowe_2 = goowe_2\n '''\n def __init__(self, goowes, n_max_components: int = 10,\n chunk_size: int = 500, window_size: int = 100, logging = True, num_streams = 5):\n super().__init__(n_max_components, chunk_size, window_size, logging)\n self._num_streams = num_streams\n self._goowes = goowes\n assert num_streams == len(goowes), 'Number of source Goowes is not equal to number of streams.'\n self._classifier_indices = []\n '''\n def __init__(self, n_max_components: int = 10,\n chunk_size: int = 500, window_size: int = 100, logging = True, num_streams = 2):\n super().__init__(n_max_components, chunk_size, window_size, logging)\n self._num_streams = num_streams\n self._goowe_1 = Goowe(n_max_components, chunk_size, window_size, logging)\n self._goowe_2 = Goowe(n_max_components, chunk_size, window_size, logging)\n\n def __init__(self, goowe_1, goowe_2, num_streams = 2):\n self._num_streams = num_streams\n self._goowe_1 = goowe_1\n self._goowe_2 = goowe_2\n '''\n\n def update(self, X, y, clf_nums = []):\n if(len(X) == 1):\n y_i = np.array([y])\n self._chunk_data.add_element(X, y_i)\n self._num_of_processed_instances += 1\n #self._update_classifiers(clf_num_1 = 1, clf_num_2 = 1)\n\n if(self._num_of_processed_instances % self._chunk_size == 0):\n print(\"Instance {}\".format(self._num_of_processed_instances))\n self._update_classifiers(clf_nums = [])\n\n elif(len(X) > 1):\n for i in range(len(X)):\n X_i = np.array([X[i]])\n y_i = np.array([[y[i]]])\n self._chunk_data.add_element(X_i, y_i)\n self._num_of_processed_instances += 1\n #self._update_classifiers(clf_num_1 = 1, clf_num_2 = 1)\n\n if(self._num_of_processed_instances % self._chunk_size == 0):\n print(\"Instance {}\".format(self._num_of_processed_instances))\n self._update_classifiers(clf_nums = [])\n\n else:\n print(\"Something wrong with the data...\")\n print(\"len(X) is: {}\".format(len(X)))\n raise ValueError\n\n def _update_classifiers(self, clf_nums = []):\n self._classifier_indices = []\n classifiers = []\n weights = []\n crr_clfs = []\n ps = []\n indices = []\n clfs = []\n if(clf_nums == []):\n clf_nums = np.ones(self._num_streams).tolist()\n sum_clf_nums = sum(clf_nums)\n for j in range(self._num_streams):\n weights.append(np.asarray(self._goowes[j].get_weights()))\n crr_clfs.append(self._goowes[j].get_number_of_current_classifiers())\n #ps.append(max(clf_nums[j] / sum_clf_nums, 1.))\n ps.append((clf_nums[j] / sum_clf_nums) * self._num_of_max_classifiers)\n indices_j = weights[j].argsort()[(-1) * round(ps[j] * crr_clfs[j]):][int(ps[j]*-1):]\n #print('vvvv', int(ps[j]*-1))\n #print('wwww', indices_j)\n #print(indices_j)\n #print(weights[j])\n indices.append(indices_j)\n clfs_j = np.asarray(self._goowes[j].get_classifiers()[indices_j])\n clfs_j = np.asarray(clfs_j[clfs_j != np.array(None)]).tolist()\n classifiers += clfs_j\n self._classifier_indices += indices_j.tolist()\n if(self._num_of_current_classifiers > 0):\n self._adjust_weights()\n #classifiers = classifiers[:self._num_of_max_classifiers]\n self._classifiers = np.asarray(classifiers)\n self._num_of_current_classifiers = len(self._classifiers)\n if(self._num_of_current_classifiers > 0):\n #print('DONE!')\n self._normalize_weights_softmax() # maybe useful. we'll see.\n #print(self.get_weights())\n #self._adjust_weights()\n #self._normalize_weights_softmax()\n if(self._Logging):\n print(\"After normalizing weights: \")\n print(self._weights)\n '''\n print('===========================')\n print(crr_clfs_1)\n print(crr_clfs_2)\n print(round(p_1 * crr_clfs_1))\n print(round(p_2 * crr_clfs_1))\n print(len(self._classifiers))\n print(self._num_of_current_classifiers)\n print(self._classifiers)\n print('===========================')\n '''\n \"\"\"\n def _update_classifiers(self, clf_num_1 = 1, clf_num_2 = 1):\n classifiers = []\n weights_1 = np.asarray(self._goowe_1.get_weights())\n weights_2 = np.asarray(self._goowe_2.get_weights())\n crr_clfs_1 = self._goowe_1.get_number_of_current_classifiers()\n crr_clfs_2 = self._goowe_2.get_number_of_current_classifiers()\n p_1 = clf_num_1 / (clf_num_1 + clf_num_2)\n p_2 = clf_num_2 / (clf_num_1 + clf_num_2)\n if(self._num_of_current_classifiers > 0):\n self._adjust_weights()\n indices_1 = weights_1.argsort()[(-1) * round(p_1 * crr_clfs_1):][::-1]\n indices_2 = weights_2.argsort()[(-1) * round(p_2 * crr_clfs_2):][::-1]\n clfs_1 = self._goowe_1.get_classifiers()[indices_1]\n clfs_2 = self._goowe_2.get_classifiers()[indices_2]\n clfs_1 = np.asarray(clfs_1[clfs_1 != np.array(None)]).tolist()\n clfs_2 = np.asarray(clfs_2[clfs_2 != np.array(None)]).tolist()\n classifiers = clfs_1 + clfs_2\n classifiers = classifiers[:self._num_of_max_classifiers]\n self._classifiers = np.asarray(classifiers)\n self._num_of_current_classifiers = len(self._classifiers)\n if(self._num_of_current_classifiers > 0):\n #print('DONE!')\n self._normalize_weights_softmax() # maybe useful. we'll see.\n #print(self.get_weights())\n #self._adjust_weights()\n #self._normalize_weights_softmax()\n if(self._Logging):\n print(\"After normalizing weights: \")\n print(self._weights)\n '''\n print('===========================')\n print(crr_clfs_1)\n print(crr_clfs_2)\n print(round(p_1 * crr_clfs_1))\n print(round(p_2 * crr_clfs_1))\n print(len(self._classifiers))\n print(self._num_of_current_classifiers)\n print(self._classifiers)\n print('===========================')\n '''\n \"\"\"\n def prepare_post_analysis_req(self, num_features, num_targets, num_classes, target_values, record=False):\n super().prepare_post_analysis_req(num_features, num_targets, num_classes, target_values, record=False)\n\n def _get_components_predictions_for_instance(self, inst):\n wep = super()._get_components_predictions_for_instance(inst)\n #print('++++++++++++++++++++++++++++++++++')\n #print(wep)\n #print('++++++++++++++++++++++++++++++++++')\n\n def _get_components_predictions_for_instance_2(self, inst):\n preds = np.zeros((self._num_of_current_classifiers, self._num_classes))\n for k in range(len(preds)):\n kth_comp_pred = self._classifiers[k].predict_proba(inst)\n preds[k, :] = kth_comp_pred[0]\n if(self._Logging):\n print('Component Predictions:')\n print(preds)\n '''\n print('///////////////////////////////////')\n print(preds)\n print('///////////////////////////////////')\n '''\n return preds\n\n def _adjust_weights(self):\n super()._adjust_weights()\n\n def _normalize_weights(self):\n super()._normalize_weights()\n\n def _normalize_weights_softmax(self):\n super()._normalize_weights_softmax()\n\n def _process_chunk(self):\n super()._process_chunk()\n\n def _record_truths_this_chunk(self):\n super()._record_truths_this_chunk();\n\n def _record_comp_preds_this_chunk(self):\n super()._record_comp_preds_this_chunk()\n\n def _record_weights_this_chunk(self):\n super()._record_weights_this_chunk()\n\n def fit(self, X, y, classes=None, weight=None):\n super().fit(X, y, classes=None, weight=None)\n\n def partial_fit(self, X, y, classes=None, weight=None):\n super().partial_fit(X, y, classes=None, weight=None)\n\n def predict(self, X, ensemble_type = 'mv'):\n #super().predict(X)\n if(ensemble_type == 'goowe'):\n return self.predict_goowe(X)\n elif(ensemble_type == 'mv'):\n return self.predict_mv(X)\n elif(ensemble_type == 'av'):\n return self.predict_av(X)\n else:\n raise NotImplementedError(\"For now, only the Goowe, Average Voting and \"\n \"Majority Voting methods are implemented. \"\n \"You can use goowe, av and mv options.\")\n\n def predict_goowe(self, X):\n predictions = []\n if(len(X) == 1):\n predictions.append(np.argmax(self.predict_proba(X)))\n elif(len(X) > 1):\n for i in range(len(X)):\n relevance_scores = self.predict_proba(X[i])\n predictions.append(np.argmax(relevance_scores))\n if(self._Logging):\n print('Ensemble Prediction:')\n print(np.array(predictions))\n return np.array(predictions)\n\n def predict_mv(self, X):\n component_probs = self.predict_proba_mv(X)\n component_preds = np.argmax(component_probs, axis = 1)\n pred = np.bincount(component_preds).argmax()\n #print(component_preds)\n #print(np.bincount(component_preds))\n #print(pred)\n return(pred)\n\n def predict_av(self, X):\n predictions = []\n if(len(X) == 1):\n predictions.append(np.argmax(self.predict_proba_av(X)))\n elif(len(X) > 1):\n for i in range(len(X)):\n relevance_scores = self.predict_proba_av(X[i])\n predictions.append(np.argmax(relevance_scores))\n if(self._Logging):\n print('Ensemble Prediction:')\n print(np.array(predictions))\n return np.array(predictions)\n\n def predict_proba_av(self, X):\n weights = np.array(self._weights)\n weights = weights[:self._num_of_current_classifiers]\n weights.fill(1./len(weights))\n components_preds = self._get_components_predictions_for_instance_2(X)\n self._chunk_comp_preds.add_element([components_preds])\n weighted_ensemble_vote = np.dot(weights, components_preds)\n self._chunk_ensm_preds.add_element(weighted_ensemble_vote)\n return(weighted_ensemble_vote)\n\n def predict_proba_mv(self, X):\n components_preds = self._get_components_predictions_for_instance_2(X)\n self._chunk_comp_preds.add_element([components_preds])\n #weighted_ensemble_vote = np.dot(weights, components_preds)\n #self._chunk_ensm_preds.add_element(weighted_ensemble_vote)\n return components_preds\n\n def predict_proba(self, X):\n #super().predict_proba(X)\n weights = np.array(self._weights)\n weights = weights[:self._num_of_current_classifiers]\n components_preds = self._get_components_predictions_for_instance_2(X)\n #print('*****************************')\n #print(components_preds)\n #print('*****************************')\n self._chunk_comp_preds.add_element([components_preds])\n weighted_ensemble_vote = np.dot(weights, components_preds)\n self._chunk_ensm_preds.add_element(weighted_ensemble_vote)\n #print('weights: ', weights)\n #print('component preds: ', components_preds)\n #print('weighted ensemble vote: ', weighted_ensemble_vote)\n return weighted_ensemble_vote\n\n def reset(self):\n super().reset()\n\n def score(self, X, y):\n super().score(X, y)\n\n def get_info(self):\n super().get_info()\n\n def get_class_type(self):\n super().get_class_type()\n\n def get_number_of_current_classifiers(self):\n super().get_number_of_current_classifiers()\n\n def get_number_of_max_classifiers(self):\n super().get_number_of_max_classifiers()\n\n def get_classifer_indices(self):\n return self._classifier_indices\n", "id": "8133307", "language": "Python", "matching_score": 5.142807960510254, "max_stars_count": 0, "path": "GooweMSS.py" }, { "content": "import numpy as np\r\nfrom skmultiflow.core.base import StreamModel\r\n#from skmultiflow.core.base import BaseEstimator\r\nfrom skmultiflow.trees import HoeffdingTree\r\nfrom skmultiflow.utils.data_structures import InstanceWindow, FastBuffer\r\n\r\n\r\nclass Goowe(StreamModel):\r\n#class Goowe(BaseEstimator):\r\n \"\"\" GOOWE (Geometrically Optimum Online Weighted Ensemble), as it is\r\n described in Bonab and Can (2017). Common notation in the code is\r\n as follows:\r\n K for maximum number of classifiers in the ensemble.\r\n N for data instances.\r\n A, d as they are, in the aforementioned paper.\r\n\r\n\r\n Parameters\r\n ----------\r\n n_max_components: int\r\n Ensemble size limit. Maximum number of component classifiers.\r\n chunk_size: int\r\n The amount of instances necessary for ensemble to learn concepts from.\r\n At each chunk_size many instances, some training is done.\r\n window_size: int\r\n Size of sliding window, which keeps record of the last k instances\r\n that are encountered in the data stream.\r\n \"\"\"\r\n\r\n def __init__(self, n_max_components: int = 10,\r\n chunk_size: int = 500, window_size: int = 100, logging = True):\r\n super().__init__()\r\n self._num_of_max_classifiers = n_max_components\r\n self._chunk_size = chunk_size\r\n self._Logging = logging\r\n self._num_of_current_classifiers = 0\r\n self._num_of_processed_instances = 0\r\n self._classifiers = np.empty((self._num_of_max_classifiers),\r\n dtype=object)\r\n self._weights = np.zeros((self._num_of_max_classifiers,))\r\n\r\n # What to save from current Data Chunk --> will be used for\r\n # adjusting weights, pruning purposes and so on.\r\n # Individual predictions of components, overall prediction of ensemble,\r\n # and ground truth info.\r\n self._chunk_comp_preds = FastBuffer(max_size=chunk_size)\r\n self._chunk_ensm_preds = FastBuffer(max_size=chunk_size)\r\n\r\n # chunk_data has instances in the chunk and their ground truth.\r\n # To be initialized after receiving n_features, n_targets\r\n self._chunk_data = None\r\n # self._chunk_truths = FastBuffer(max_size=chunk_size)\r\n\r\n # some external stuff that is about the data we are dealing with\r\n # but useful for recording predictions\r\n self._num_classes = None\r\n self._target_values = None # Required to correctly train HTs\r\n self._record = False # Boolean for keeping records to files\r\n\r\n # TODO: Implement Sliding Window Continuous Evaluator.\r\n # What to save at Sliding Window (last n instances) --> will be\r\n # used for continuous evaluation.\r\n # self._sliding_window_ensemble_preds =FastBuffer(max_size=window_size)\r\n # self._sliding_window_truths = FastBuffer(max_size=window_size)\r\n\r\n def prepare_post_analysis_req(self, num_features, num_targets, num_classes, target_values, record=False):\r\n # Need to get the dataset information but we do not want to\r\n # take it as an argument to the classifier itself, nor we do want to\r\n # ask it at each data instance. Hence we take dataset info from user\r\n # explicitly to create _chunk_data entries.\r\n #chunk_size = self._chunk_size\r\n self._chunk_data = InstanceWindow(n_features = num_features,\r\n n_targets = num_targets,\r\n max_size = self._chunk_size)\r\n #self._chunk_data = chunk_data\r\n # num_targets shows how many columns you want to predict in the data.\r\n # num classes is eqv to possible number of values that that column\r\n # can have.\r\n self._num_classes = num_classes\r\n self._target_values = target_values\r\n self._record = record\r\n\r\n if(self._record):\r\n # Create files that keeps record of:\r\n # - weights at each chunk\r\n # - individual component results for every instance\r\n # - ground truths for every instance.\r\n self._f_comp_preds = open(\"component_predictions.csv\", \"w+\")\r\n self._f_truths = open(\"ground_truths.csv\", \"w+\")\r\n self._f_weights = open(\"weights.csv\", \"w+\")\r\n\r\n self._f_comp_preds.write(str(self._chunk_size) + '\\n')\r\n\r\n self._f_comp_preds.close()\r\n self._f_truths.close()\r\n self._f_weights.close()\r\n return\r\n\r\n def _get_components_predictions_for_instance(self, inst):\r\n \"\"\" For a given data instance, takes predictions of\r\n individual components from the ensemble as a matrix.\r\n\r\n Parameters\r\n ----------\r\n inst: data instance for which votes of components are delivered.\r\n\r\n Returns\r\n ----------\r\n numpy.array\r\n A 2-d numpy array where each row corresponds to predictions of\r\n each classifier.\r\n \"\"\"\r\n preds = np.zeros((self._num_of_current_classifiers, self._num_classes))\r\n # print(np.shape(preds))\r\n for k in range(len(preds)):\r\n kth_comp_pred = self._classifiers[k].predict_proba(inst)\r\n # print(kth_comp_pred[0])\r\n # print(preds)\r\n # print(\"Component {}'s Prediction: {}\".format(k, kth_comp_pred))\r\n preds[k, :] = kth_comp_pred[0]\r\n if(self._Logging):\r\n print('Component Predictions:')\r\n print(preds)\r\n return preds\r\n\r\n def _adjust_weights(self):\r\n \"\"\" Weight adustment by solving linear least squares, as it is\r\n described in Bonab and Can (2017).\r\n \"\"\"\r\n # Prepare variables for Weight Adjustment\r\n # print('number of current classifiers: {}'.format(self._num_of_current_classifiers))\r\n A = np.zeros(shape=(self._num_of_current_classifiers,\r\n self._num_of_current_classifiers))\r\n d = np.zeros(shape=(self._num_of_current_classifiers,))\r\n\r\n # Go over all the data chunk, calculate values of (S_i x S_j) for A.\r\n # (S_i x O) for d.\r\n y_all = self._chunk_data.get_targets_matrix().astype(int)\r\n # print(y_all)\r\n for i in range(len(y_all)):\r\n class_index = y_all[i]\r\n comp_preds = self._chunk_comp_preds.get_next_element()\r\n #print(\"{} components predictions:\".format(i))\r\n #print(comp_preds)\r\n\r\n A = A + comp_preds.dot(comp_preds.T)\r\n d = d + comp_preds[0][class_index]\r\n\r\n # A and d are filled. Now, the linear system Aw=d to be solved\r\n # to get our desired weights. w is of size K.\r\n # print(\"Solving Aw=d\")\r\n # print(A)\r\n # print(d)\r\n w = np.linalg.lstsq(A, d, rcond=None)[0]\r\n\r\n # _weights has maximum size but what we found can be\r\n # smaller. Therefore, need to put the values of w to global weights\r\n if(self._num_of_current_classifiers < self._num_of_max_classifiers):\r\n for i in range(len(w)):\r\n self._weights[i] = w[i]\r\n else: # If full size, there is no problem.\r\n self._weights = w\r\n # print(\"After solving Aw=d weights:\")\r\n # print(self._weights)\r\n return\r\n\r\n def _normalize_weights(self):\r\n \"\"\" Normalizes the weights of the ensemble to (0, 1) range.\r\n Performs (x_i - min(x)) / (max(x) - min(x)) on the nonzero elements\r\n of the weight vector.\r\n \"\"\"\r\n min = np.amin(self._weights[:self._num_of_current_classifiers])\r\n max = np.amax(self._weights[:self._num_of_current_classifiers])\r\n\r\n if(min == max): # all weights are the same\r\n for i in range(self._num_of_current_classifiers):\r\n self._weights[i] = 1. / self._num_of_current_classifiers\r\n else:\r\n for i in range(self._num_of_current_classifiers):\r\n self._weights[i] = (self._weights[i] - min) / (max - min)\r\n return\r\n\r\n def _normalize_weights_softmax(self):\r\n \"\"\" Normalizes the weights of the ensemble to (0, 1) range.\r\n Performs (x_i - min(x)) / (max(x) - min(x)) on the nonzero elements\r\n of the weight vector.\r\n \"\"\"\r\n cur_weights = self._weights[:self._num_of_current_classifiers]\r\n self._weights[:self._num_of_current_classifiers] = np.exp(cur_weights) / sum(np.exp(cur_weights))\r\n\r\n return\r\n\r\n def _process_chunk(self):\r\n \"\"\" A subroutine that runs at the end of each chunk, allowing\r\n the components to be trained and ensemble weights to be adjusted.\r\n Until the first _process_chunk call, the ensemble is not yet ready.\r\n At first call, the first component is learned.\r\n At the rest of the calls, new components are formed, and the older ones\r\n are trained by the given chunk.\r\n If the ensemble size is reached, then the lowest weighted component is\r\n removed from the ensemble.\r\n \"\"\"\r\n new_clf = HoeffdingTree() # with default parameters for now\r\n new_clf.reset()\r\n\r\n # Save records of previous chunk\r\n if(self._record and self._num_of_current_classifiers > 0):\r\n self._record_truths_this_chunk()\r\n self._record_comp_preds_this_chunk()\r\n self._record_weights_this_chunk()\r\n\r\n # Case 1: No classifier in the ensemble yet, first chunk:\r\n if(self._num_of_current_classifiers == 0):\r\n self._classifiers[0] = new_clf\r\n self._weights[0] = 1.0 # weight is 1 for the first clf\r\n self._num_of_current_classifiers += 1\r\n else:\r\n # First, adjust the weights of the old component classifiers\r\n # according to what happened in this chunk.\r\n self._adjust_weights()\r\n # Case 2: There are classifiers in the ensemble but\r\n # the ensemble size is still not capped.\r\n if(self._num_of_current_classifiers < self._num_of_max_classifiers):\r\n # Put the new classifier to ensemble with the weight of 1\r\n self._classifiers[self._num_of_current_classifiers] = new_clf\r\n self._weights[self._num_of_current_classifiers] = float(1.0)\r\n self._num_of_current_classifiers += 1\r\n\r\n # Case 3: Ensemble size is capped. Need to replace the component\r\n # with lowest weight.\r\n else:\r\n assert (self._num_of_current_classifiers\r\n == self._num_of_max_classifiers), \"Ensemble not full.\"\r\n index_of_lowest_weight = np.argmin(self._weights)\r\n self._classifiers[index_of_lowest_weight] = new_clf\r\n self._weights[index_of_lowest_weight] = 1.0\r\n\r\n # Normalizing weigths to simplify numbers\r\n self._normalize_weights_softmax() # maybe useful. we'll see.\r\n if(self._Logging):\r\n print(\"After normalization weights: \")\r\n print(self._weights)\r\n # Ensemble maintenance is done. Now train all classifiers\r\n # in the ensemble from the current chunk.\r\n # Can be parallelized.\r\n data_features = self._chunk_data.get_attributes_matrix()\r\n data_truths = self._chunk_data.get_targets_matrix()\r\n data_truths = data_truths.astype(int).flatten()\r\n\r\n if(self._Logging):\r\n print(\"Starting training the components with the current chunk...\")\r\n for k in range(self._num_of_current_classifiers):\r\n print(\"Training classifier {}\".format(k))\r\n self._classifiers[k].partial_fit(data_features, data_truths,\r\n classes=self._target_values)\r\n print(\"Training the components with the current chunk completed...\")\r\n else:\r\n for k in range(self._num_of_current_classifiers):\r\n self._classifiers[k].partial_fit(data_features, data_truths, classes=self._target_values)\r\n return\r\n\r\n def _record_truths_this_chunk(self):\r\n f = open(\"ground_truths.csv\", \"ab\")\r\n\r\n data_truths = self._chunk_data.get_targets_matrix()\r\n data_truths = data_truths.astype(int).flatten()\r\n\r\n # Default behaviour is to store list of lists for savetxt.\r\n # Hence, to prevent newline after each element of list, we surround\r\n # the truth array with one more set of bracketts.\r\n np.savetxt(f, [data_truths], delimiter=\",\", fmt='%d')\r\n\r\n f.close()\r\n return\r\n\r\n def _record_comp_preds_this_chunk(self):\r\n f = open(\"component_predictions.csv\", \"a+\")\r\n np.savetxt(f, [self._num_of_current_classifiers], fmt='%d')\r\n\r\n comp_preds = np.array(self._chunk_comp_preds.get_queue())\r\n\r\n for i in range(len(comp_preds)):\r\n np.savetxt(f, comp_preds[i], delimiter=',', fmt='%1.5f')\r\n f.close()\r\n return\r\n\r\n def _record_weights_this_chunk(self):\r\n f = open(\"weights.csv\", \"a+\")\r\n np.savetxt(f, [self._num_of_current_classifiers], fmt='%d')\r\n\r\n weights = self._weights\r\n np.savetxt(f, [weights], delimiter=',', fmt='%1.5f')\r\n f.close()\r\n return\r\n\r\n # --------------------------------------------------\r\n # Overridden methods from the parent (StreamModel)\r\n # --------------------------------------------------\r\n def fit(self, X, y, classes=None, weight=None):\r\n raise NotImplementedError(\"For now, only the stream version \"\r\n \"is implemented. Use partial_fit()\")\r\n\r\n def partial_fit(self, X, y, classes=None, weight=None):\r\n # This method should work with individual instances, as well as bunch\r\n # of instances, since there can be pre-training for warm start.\r\n\r\n # If an individual instance is inputted, then just save X and y to\r\n # train from them later.\r\n if(len(X) == 1):\r\n # Save X and y to train classifiers later\r\n # y is required to be 1x1, and hence the square bracketts.\r\n y_i = np.array([y])\r\n # print(type(X))\r\n # print(type(y_i))\r\n # print(X)\r\n # print(y_i)\r\n self._chunk_data.add_element(X, y_i)\r\n\r\n # If still filling the chunk, then just add the instance to the\r\n # current data chunk, wait for it to be filled.\r\n self._num_of_processed_instances += 1\r\n\r\n # If at the end of a chunk, start training components\r\n # and adjusting weights using information in this chunk.\r\n if(self._num_of_processed_instances % self._chunk_size == 0):\r\n print(\"Instance {}\".format(self._num_of_processed_instances))\r\n self._process_chunk()\r\n elif(len(X) > 1):\r\n # Input is a chunk. Add them individually.\r\n for i in range(len(X)):\r\n X_i = np.array([X[i]])\r\n y_i = np.array([[y[i]]])\r\n # print(X_i)\r\n # print(y_i)\r\n self._chunk_data.add_element(X_i, y_i)\r\n self._num_of_processed_instances += 1\r\n\r\n # If at the end of a chunk, start training components\r\n # and adjusting weights using information in this chunk.\r\n if(self._num_of_processed_instances % self._chunk_size == 0):\r\n print(\"Instance {}\".format(self._num_of_processed_instances))\r\n self._process_chunk()\r\n else:\r\n print(\"Something wrong with the data...\")\r\n print(\"len(X) is: {}\".format(len(X)))\r\n return\r\n\r\n def predict(self, X):\r\n \"\"\" For a given data instance, yields the prediction values.\r\n\r\n Parameters\r\n ----------\r\n X: numpy.ndarray of shape (n_samples, n_features)\r\n Samples for which we want to predict the labels.\r\n\r\n Returns\r\n -------\r\n numpy.array\r\n Predicted labels for all instances in X.\r\n \"\"\"\r\n predictions = []\r\n if(len(X) == 1):\r\n predictions.append(np.argmax(self.predict_proba(X)))\r\n elif(len(X) > 1):\r\n # Add many predictions\r\n for i in range(len(X)):\r\n relevance_scores = self.predict_proba(X[i])\r\n predictions.append(np.argmax(relevance_scores))\r\n # print(np.argmax(relevance_scores))\r\n if(self._Logging):\r\n print('Ensemble Prediction:')\r\n print(np.array(predictions))\r\n return np.array(predictions) #, one_hot\r\n\r\n def predict_proba(self, X):\r\n \"\"\" For a given data instance, takes WEIGHTED combination\r\n of components to get relevance scores for each class.\r\n\r\n Parameters\r\n ----------\r\n X: data instance for which weighted combination is delivered.\r\n\r\n Returns\r\n ----------\r\n numpy.array\r\n A vector with number_of_classes elements where each element\r\n represents class score of corresponding class for this instance.\r\n \"\"\"\r\n weights = np.array(self._weights)\r\n\r\n # get only the useful weights\r\n weights = weights[:self._num_of_current_classifiers]\r\n components_preds = self._get_components_predictions_for_instance(X)\r\n #print('*****************************')\r\n #print(components_preds)\r\n #print('*****************************')\r\n # Save individual component predictions and ensemble prediction\r\n # for later analysis.\r\n self._chunk_comp_preds.add_element([components_preds])\r\n\r\n #print(weights)\r\n #print(components_preds)\r\n #print(self.get_classifiers())\r\n weighted_ensemble_vote = np.dot(weights, components_preds)\r\n # print(\"Weighted Ensemble vote: {}\".format(weighted_ensemble_vote))\r\n self._chunk_ensm_preds.add_element(weighted_ensemble_vote)\r\n\r\n return weighted_ensemble_vote\r\n\r\n def reset(self):\r\n pass\r\n\r\n def score(self, X, y):\r\n pass\r\n\r\n def get_info(self):\r\n return 'The Ensemble GOOWE (Bonab and Can, 2017) with' + \\\r\n ' - n_max_components: ' + str(self._num_of_max_classifiers) + \\\r\n ' - num_of_current_components: ' + str(self._num_of_current_classifiers) + \\\r\n ' - chunk_size: ' + str(self._chunk_size) + \\\r\n ' - num_dimensions_in_label_space(num_classes): ' + str(self._num_classes) + \\\r\n ' - recording: ' + str(self._record)\r\n\r\n def get_class_type(self):\r\n pass\r\n\r\n # Some getters and setters..\r\n def get_number_of_current_classifiers(self):\r\n return self._num_of_current_classifiers\r\n\r\n def get_number_of_max_classifiers(self):\r\n return self._num_of_max_classifiers\r\n\r\n # Helper methods for GooweMS\r\n def get_classifiers(self):\r\n return self._classifiers\r\n\r\n def set_classifiers(self, classifiers):\r\n self._classifiers = classifiers\r\n\r\n def get_weights(self):\r\n return self._weights\r\n", "id": "10560412", "language": "Python", "matching_score": 3.6433658599853516, "max_stars_count": 0, "path": "Goowe.py" }, { "content": "from skmultiflow.data.file_stream import FileStream\nfrom skmultiflow.data import SEAGenerator\nfrom skmultiflow.trees import HoeffdingTree\nfrom skmultiflow.evaluation import EvaluatePrequential\n\nfrom Goowe import Goowe\n\n\n# Prepare the data stream\nstream = FileStream('./datasets/og/covtype.csv')\n\nstream.prepare_for_use()\n\nnum_features = stream.n_features\nnum_targets = stream.n_targets\nnum_classes = stream.n_classes\ntarget_values = stream.target_values\n\nprint(\"Dataset with num_features:{}, num_targets:{}, num_classes:{}\".format(\n num_features, num_targets, num_classes))\n\n\nN_MAX_CLASSIFIERS = num_classes\nCHUNK_SIZE = 100 # User-specified\nWINDOW_SIZE = 100 # User-specified\n\n# Initialize the ensemble\ngoowe = Goowe(n_max_components=N_MAX_CLASSIFIERS,\n chunk_size=CHUNK_SIZE,\n window_size=WINDOW_SIZE)\ngoowe.prepare_post_analysis_req(num_features, num_targets,\n num_classes, target_values, record=True)\n\nht = HoeffdingTree()\n\nevaluator = EvaluatePrequential(max_samples=100000,\n max_time=1000,\n pretrain_size=CHUNK_SIZE,\n batch_size=1,\n n_wait=CHUNK_SIZE,\n show_plot=True,\n output_file=\"out.txt\",\n metrics=['accuracy', 'kappa'])\n\n# evaluator.evaluate(stream=stream, model=goowe, model_names=['GOOWE'])\nevaluator.evaluate(stream=stream, model=[goowe, ht], model_names=['GOOWE', 'HT'])\n", "id": "6246629", "language": "Python", "matching_score": 7.708332538604736, "max_stars_count": 4, "path": "eval_datasets.py" }, { "content": "from skmultiflow.data.file_stream import FileStream\nfrom skmultiflow.data import SEAGenerator\nfrom skmultiflow.trees import HoeffdingTree\nfrom skmultiflow.evaluation import EvaluatePrequential\n\nfrom Goowe import Goowe\n\n\n# Prepare the data stream\nstream = FileStream('./datasets/sea_stream.csv')\n\nstream.prepare_for_use()\nnum_features = stream.n_features\nnum_targets = stream.n_targets\nnum_classes = stream.n_classes\n\n\nN_MAX_CLASSIFIERS = 15\nCHUNK_SIZE = 500 # User-specified\nWINDOW_SIZE = 100 # User-specified\n\n# Initialize the ensemble\ngoowe = Goowe(n_max_components=N_MAX_CLASSIFIERS,\n chunk_size=CHUNK_SIZE,\n window_size=WINDOW_SIZE)\ngoowe.prepare_post_analysis_req(num_features, num_targets, num_classes)\n\nht = HoeffdingTree()\n\nevaluator = EvaluatePrequential(max_samples=100000,\n max_time=1000,\n pretrain_size=CHUNK_SIZE,\n batch_size=1,\n n_wait=CHUNK_SIZE,\n show_plot=True,\n output_file=\"out.txt\",\n metrics=['accuracy', 'kappa'])\n\nevaluator.evaluate(stream=stream, model=[goowe, ht], model_names=['GOOWE', 'HT'])\n", "id": "1029086", "language": "Python", "matching_score": 7.329626083374023, "max_stars_count": 4, "path": "evalexperiment.py" } ]
4.571476
wafferyo
[ { "content": "from tkinter import Label, Button, Entry, OptionMenu, IntVar, StringVar, Frame, Toplevel\nfrom tkinter.constants import BOTH\nimport numpy as np\nimport menu\n\nalphabet = '<KEY>'\n\n\nclass Add:\n def back_to_menu(self):\n self.gui_add_output.destroy()\n menu.gui_menu.deiconify()\n\n def compute_sum(self):\n try:\n # convert matrix_a and matrix_b to int\n for i in range(self.rows_get):\n for j in range(self.cols_get):\n self.matrix_a[i][j] = int(self.matrix_a[i][j])\n for i in range(self.rows_get):\n for j in range(self.cols_get):\n self.matrix_b[i][j] = int(self.matrix_b[i][j])\n\n # use np.add to achieve sum\n self.sum_matrix = np.add(self.matrix_a, self.matrix_b)\n\n except (TypeError, Exception):\n pass\n\n try:\n # convert sum_matrix back to str\n list_mat = [str(i) for i in self.sum_matrix]\n\n # remove square brackets\n for i in range(len(list_mat)):\n list_mat[i] = list_mat[i][1:-1]\n\n # return sum_matrix as a list of strings\n return list_mat\n\n except (NameError, TypeError, Exception):\n pass\n\n def output_matrix(self):\n # create output window\n self.gui_add_input.destroy()\n self.gui_add_output = Toplevel()\n self.gui_add_output.title(\"Add\")\n self.gui_add_output.resizable(False, False)\n\n # create output frame\n self.frame_add_output = Frame(self.gui_add_output, highlightbackground='black', highlightthickness=1)\n self.frame_add_output.pack(fill=BOTH, expand=True, padx=5, pady=5)\n\n # go back to menu button\n Button(self.frame_add_output, text=\"Back\", width=4, command=self.back_to_menu).grid(\n row=self.rows_get + self.rows_get + 10, column=1)\n\n # display matrix_a input\n Label(self.frame_add_output, text='Matrix A:', font=('arial', 10, 'bold'), underline=0).grid(row=1, column=1)\n\n for i in range(self.rows_get):\n for j in range(self.cols_get):\n Label(self.frame_add_output, text=self.matrix_a[i][j], bd=5).grid(row=i+1, column=j+2)\n\n # display matrix_b input\n Label(self.frame_add_output, text='Matrix B:', font=('arial', 10, 'bold'), underline=0)\\\n .grid(row=1, column=self.cols_get+2)\n\n for i in range(self.rows_get):\n for j in range(self.cols_get):\n Label(self.frame_add_output, text=self.matrix_b[i][j], bd=5).grid(row=i+1, column=j+self.cols_get*2+2)\n\n # finally to display the sum\n Label(self.frame_add_output, text='Sum:', font=('arial', 10, 'bold'), underline=0).grid(\n row=self.rows_get * 2,\n column=1)\n\n # compute the sum\n self.sum_matrix = self.compute_sum()\n\n # display the sum\n for i in range(len(self.sum_matrix)):\n Label(self.frame_add_output, text=self.sum_matrix[i], bd=5).grid(\n row=i + self.rows_get * 2, column=2, columnspan=5, sticky='w ')\n\n # gui stuff\n self.gui_add_output.protocol(\"WM_DELETE_WINDOW\", menu.gui_menu.destroy)\n self.gui_add_output.mainloop()\n\n def input_matrix(self):\n # create input window\n self.gui_add_menu.destroy()\n self.gui_add_input = Toplevel()\n self.gui_add_input.title(\"Add\")\n self.gui_add_input.resizable(False, False)\n\n # create input frame\n self.frame_add_input = Frame(self.gui_add_input, highlightbackground='black', highlightthickness=1)\n self.frame_add_input.pack(fill=BOTH, expand=True, padx=5, pady=5)\n\n # create matrix A entries\n Label(self.frame_add_input, text=\"Enter matrix A:\", font=('arial', 10, 'bold')).grid(row=1, column=1)\n\n text_var = []\n entries = []\n\n # convert rows and cols from IntVar to int\n self.rows_get, self.cols_get = (self.rows.get(), self.cols.get())\n\n # create the list of entries with corresponding text_vars\n for i in range(self.rows_get):\n # append an empty list to text_var and entries to append to later\n text_var.append([])\n entries.append([])\n\n for j in range(self.cols_get):\n # for column indications\n if i == 1:\n Label(self.frame_add_input, text=alphabet[j]).grid(row=1, column=j + 2)\n\n # append StringVar\n text_var[i].append(StringVar())\n\n # append the entry into the list\n entries[i].append(Entry(self.frame_add_input, textvariable=text_var[i][j], width=3))\n\n # display entry\n entries[i][j].grid(row=i + 2, column=j + 2)\n\n # for row indications\n Label(self.frame_add_input, text=i + 1).grid(row=i + 2, column=1, sticky='e')\n\n # do the same for matrix_b\n Label(self.frame_add_input, text=\"Enter matrix B:\", font=('arial', 10, 'bold')).grid(row=self.rows_get * 2,\n column=1)\n text_var_b = []\n entries_b = []\n\n for i in range(self.rows_get):\n text_var_b.append([])\n entries_b.append([])\n for j in range(self.cols_get):\n if i == 1:\n Label(self.frame_add_input, text=alphabet[j]).grid(row=self.rows_get * 2, column=j + 2)\n text_var_b[i].append(StringVar())\n entries_b[i].append(Entry(self.frame_add_input, textvariable=text_var_b[i][j], width=3))\n entries_b[i][j].grid(row=i + self.rows_get + 5, column=j + 2)\n Label(self.frame_add_input, text=i + 1).grid(row=i + self.rows_get + 5, column=1, sticky='e')\n\n # callback functions to convert text_vars into actual matrix elements\n def get_mat_a():\n self.matrix_a = []\n for i2 in range(self.rows_get):\n self.matrix_a.append([])\n for j2 in range(self.cols_get):\n self.matrix_a[i2].append(text_var[i2][j2].get())\n\n def get_mat_b():\n self.matrix_b = []\n for i3 in range(self.rows_get):\n self.matrix_b.append([])\n for j3 in range(self.cols_get):\n self.matrix_b[i3].append(text_var_b[i3][j3].get())\n\n def get_mat():\n try:\n get_mat_a()\n get_mat_b()\n self.output_matrix()\n except (ValueError, Exception):\n pass\n\n # button to trigger callback functions\n Button(self.frame_add_input, text=\"Enter\", width=8, command=get_mat)\\\n .grid(row=self.cols_get + self.cols_get + 10, column=1)\n\n # gui stuff\n self.gui_add_input.protocol(\"WM_DELETE_WINDOW\", menu.gui_menu.destroy)\n self.gui_add_input.mainloop()\n\n def __init__(self):\n # prototype the variables to be used\n self.gui_add_input = None\n self.frame_add_input = None\n self.rows_get, self.cols_get = None, None\n self.matrix_a, self.matrix_b = None, None\n self.gui_add_output = None\n self.frame_add_output = None\n self.sum_matrix = None\n\n # create sub-menu window and withdraw main menu window\n menu.gui_menu.withdraw()\n self.gui_add_menu = Toplevel()\n self.gui_add_menu.title(\"Add\")\n self.gui_add_menu.resizable(False, False)\n\n # create sub-menu frame\n self.frame_add_menu = Frame(self.gui_add_menu, highlightbackground='black', highlightthickness=1)\n self.frame_add_menu.pack(fill=BOTH, expand=True, padx=5, pady=5)\n\n # prompt matrix dimensions\n Label(self.frame_add_menu, text='Matrix dimensions:', font=('arial', 10, 'bold'))\\\n .grid(row=3, column=1, columnspan=1)\n\n # create var for rows\n self.rows = IntVar()\n self.rows.set(2)\n\n # create drop down for rows\n OptionMenu(self.frame_add_menu, self.rows, *range(2, 5)).grid(row=3, column=2)\n\n # \"x\"\n Label(self.frame_add_menu, text='x').grid(row=3, column=3)\n\n # create var for cols\n self.cols = IntVar()\n self.cols.set(2)\n\n # create drop down for cols\n OptionMenu(self.frame_add_menu, self.cols, *range(2, 5)).grid(row=3, column=4)\n\n # button to next window\n Button(self.frame_add_menu, text='Enter', padx=16, pady=5, command=self.input_matrix).grid(row=5, column=4)\n\n # gui stuff\n self.gui_add_menu.protocol(\"WM_DELETE_WINDOW\", menu.gui_menu.destroy)\n self.gui_add_menu.mainloop()\n", "id": "6420716", "language": "Python", "matching_score": 3.953235149383545, "max_stars_count": 1, "path": "add.py" }, { "content": "from tkinter import Label, Button, Entry, OptionMenu, IntVar, StringVar, Frame, Toplevel\nfrom tkinter.constants import BOTH\nimport numpy as np\nimport menu\n\nalphabet = '<KEY>'\n\n\nclass Trans:\n def back_to_menu(self):\n self.gui_trans_output.destroy()\n menu.gui_menu.deiconify()\n\n def compute_transpose(self):\n try:\n for i in range(self.rows_get):\n for j in range(self.cols_get):\n self.matrix[i][j] = int(self.matrix[i][j])\n\n list_mat = [str(i) for i in np.transpose(self.matrix)]\n\n # remove square brackets\n for i in range(len(list_mat)):\n list_mat[i] = list_mat[i][1:-1]\n return list_mat\n\n except (TypeError, Exception):\n pass\n\n def output_matrix(self):\n self.gui_trans_input.destroy()\n self.gui_trans_output = Toplevel()\n self.gui_trans_output.title(\"Transpose\")\n self.gui_trans_output.resizable(False, False)\n\n self.frame_trans_output = Frame(self.gui_trans_output, highlightbackground='black', highlightthickness=1)\n self.frame_trans_output.pack(fill=BOTH, expand=True, padx=5, pady=5)\n\n # go back to menu button\n Button(self.frame_trans_output, text=\"Back\", width=4, command=self.back_to_menu).grid(\n row=self.rows_get + self.rows_get + 10, column=1)\n\n Label(self.frame_trans_output, text='Input:', font=('arial', 10, 'bold'), underline=0).grid(row=1, column=1)\n for i in range(self.rows_get):\n for j in range(self.cols_get):\n Label(self.frame_trans_output, text=self.matrix[i][j], bd=5).grid(row=i + 1, column=j + 2)\n\n # display output\n Label(self.frame_trans_output, text='Transposed:', font=('arial', 10, 'bold'), underline=0).grid(\n row=self.rows_get * 2,\n column=1)\n\n self.transposed_matrix = self.compute_transpose()\n for i in range(self.cols_get):\n Label(self.frame_trans_output, text=self.transposed_matrix[i], bd=5).grid(\n row=i + self.rows_get * 2, column=2, columnspan=self.cols_get, sticky='w ')\n\n self.gui_trans_output.protocol(\"WM_DELETE_WINDOW\", menu.gui_menu.destroy)\n self.gui_trans_output.mainloop()\n\n def input_matrix(self):\n self.gui_trans_menu.destroy()\n self.gui_trans_input = Toplevel()\n self.gui_trans_input.title(\"Transpose\")\n self.gui_trans_input.resizable(False, False)\n\n self.frame_trans_input = Frame(self.gui_trans_input, highlightbackground='black', highlightthickness=1)\n self.frame_trans_input.pack(fill=BOTH, expand=True, padx=5, pady=5)\n\n Label(self.frame_trans_input, text=\"Enter matrix:\", font=('arial', 10, 'bold')).grid(row=1, column=1)\n\n # empty arrays for Entry and StringVars\n text_var = []\n entries = []\n\n self.rows_get, self.cols_get = (self.rows.get(), self.cols.get())\n for i in range(self.rows_get):\n # append an empty list to arrays to append to later\n text_var.append([])\n entries.append([])\n for j in range(self.cols_get):\n # for column indications\n if i == 1:\n Label(self.frame_trans_input, text=alphabet[j]).grid(row=1, column=j + 2)\n\n # append StringVar\n text_var[i].append(StringVar())\n\n # append the entry into the list\n entries[i].append(Entry(self.frame_trans_input, textvariable=text_var[i][j], width=3))\n\n # display entry\n entries[i][j].grid(row=i + 2, column=j + 2)\n\n # for row indications\n Label(self.frame_trans_input, text=i + 1).grid(row=i + 2, column=1, sticky='e')\n\n def get_mat():\n try:\n self.matrix = []\n for i2 in range(self.rows_get):\n self.matrix.append([])\n for j2 in range(self.cols_get):\n self.matrix[i2].append(text_var[i2][j2].get())\n self.output_matrix()\n except (ValueError, Exception):\n pass\n\n Button(self.frame_trans_input, text=\"Enter\", width=8, command=get_mat)\\\n .grid(row=self.cols_get + self.cols_get + 10, column=1)\n\n self.gui_trans_input.protocol(\"WM_DELETE_WINDOW\", menu.gui_menu.destroy)\n self.gui_trans_input.mainloop()\n\n def __init__(self):\n self.gui_trans_input = None\n self.frame_trans_input = None\n self.gui_trans_output = None\n self.frame_trans_output = None\n self.transposed_matrix = None\n self.matrix = None\n self.rows_get, self.cols_get = None, None\n\n menu.gui_menu.withdraw()\n self.gui_trans_menu = Toplevel()\n self.gui_trans_menu.title(\"Transpose\")\n self.gui_trans_menu.resizable(False, False)\n\n self.frame_trans_menu = Frame(self.gui_trans_menu, highlightbackground='black', highlightthicknes=1)\n self.frame_trans_menu.pack(fill=BOTH, expand=True, padx=5, pady=5)\n\n Label(self.frame_trans_menu, text='Enter matrix dimensions:', font=('arial', 10, 'bold')).grid(row=1, column=1)\n\n # enter matrix dimensions:\n self.rows = IntVar()\n self.rows.set(2)\n OptionMenu(self.frame_trans_menu, self.rows, *range(2, 5)).grid(row=1, column=2)\n\n Label(self.frame_trans_menu, text='x').grid(row=1, column=3)\n\n self.cols = IntVar()\n self.cols.set(2)\n OptionMenu(self.frame_trans_menu, self.cols, *range(2, 5)).grid(row=1, column=4)\n\n Button(self.frame_trans_menu, text='Enter', padx=16, pady=5, command=self.input_matrix).grid(row=2, column=4)\n\n self.gui_trans_menu.protocol(\"WM_DELETE_WINDOW\", menu.gui_menu.destroy)\n self.gui_trans_menu.mainloop()\n", "id": "8289956", "language": "Python", "matching_score": 1.4961820840835571, "max_stars_count": 1, "path": "trans.py" }, { "content": "from tkinter import Label, Button, Entry, OptionMenu, IntVar, StringVar, Frame, Toplevel\nfrom tkinter.constants import BOTH\nimport numpy as np\nimport menu\n\nalphabet = '<KEY>'\n\n\nclass Multi:\n def back_to_menu(self):\n self.gui_multi_output.destroy()\n menu.gui_menu.deiconify()\n\n def compute_product(self):\n try:\n # convert matrix_a and matrix_b to int\n for i in range(self.rows_a):\n for j in range(self.cols_a):\n self.matrix_a[i][j] = int(self.matrix_a[i][j])\n\n for i in range(self.rows_b):\n for j in range(self.cols_b):\n self.matrix_b[i][j] = int(self.matrix_b[i][j])\n\n # use np.matmul to achieve product\n self.product_matrix = np.matmul(self.matrix_a, self.matrix_b)\n\n except (TypeError, Exception):\n pass\n\n try:\n # convert product_matrix back to str\n list_mat = [str(i) for i in self.product_matrix]\n\n # remove square brackets\n for i in range(len(list_mat)):\n list_mat[i] = list_mat[i][1:-1]\n\n # return product_matrix as a list of strings\n return list_mat\n\n except (NameError, TypeError, Exception):\n pass\n\n def output_matrix(self):\n # create output window\n self.gui_multi_input.destroy()\n self.gui_multi_output = Toplevel()\n self.gui_multi_output.title(\"Multiply\")\n self.gui_multi_output.resizable(False, False)\n\n # create output frame\n self.frame_multi_output = Frame(self.gui_multi_output, highlightbackground='black', highlightthickness=1)\n self.frame_multi_output.pack(fill=BOTH, expand=True, padx=5, pady=5)\n\n # go back to menu button\n Button(self.frame_multi_output, text=\"Back\", width=4, command=self.back_to_menu).grid(\n row=self.rows_a + self.rows_b + 10,\n column=1)\n\n # display matrix_a input\n Label(self.frame_multi_output, text='Matrix A:', font=('arial', 10, 'bold'), underline=0).grid(row=1, column=1)\n\n for i in range(self.rows_a):\n for j in range(self.cols_a):\n Label(self.frame_multi_output, text=self.matrix_a[i][j], bd=5).grid(row=i + 1, column=j + 2)\n\n # display matrix_b input\n Label(self.frame_multi_output, text='Matrix B:', font=('arial', 10, 'bold'), underline=0).grid(\n row=1, column=self.cols_a + 2)\n\n for i in range(self.rows_b):\n for j in range(self.cols_b):\n Label(self.frame_multi_output, text=self.matrix_b[i][j], bd=5).\\\n grid(row=i + 1, column=j + self.cols_a * 2 + 2)\n\n # display product\n Label(self.frame_multi_output, text='Product:', font=('arial', 10, 'bold'), underline=0).grid(\n row=self.rows_a * 2,\n column=1)\n\n # compute product\n self.product_matrix = self.compute_product()\n\n # display product\n for i in range(len(self.product_matrix)):\n Label(self.frame_multi_output, text=self.product_matrix[i], bd=5).grid(\n row=i + self.rows_a * 2, column=2, columnspan=5, sticky='w ')\n\n # gui stuff\n self.gui_multi_output.protocol(\"WM_DELETE_WINDOW\", menu.gui_menu.destroy)\n self.gui_multi_output.mainloop()\n\n def input_matrix(self):\n # create input window\n self.gui_multi_menu.destroy()\n self.gui_multi_input = Toplevel()\n self.gui_multi_input.title(\"Multiply\")\n self.gui_multi_input.resizable(False, False)\n\n # create input frame\n self.frame_multi_input = Frame(self.gui_multi_input, highlightbackground='black', highlightthickness=1)\n self.frame_multi_input.pack(fill=BOTH, expand=True, padx=5, pady=5)\n # window_dimensions = str(self.m_length.get()**3+90) + \"x\" + str(self.m_height.get())\n # print(window_dimensions)\n # window.geometry(window_dimensions)\n # self.gui_inverse_input.resizable(False, False)\n\n # create matrix A entries\n Label(self.frame_multi_input, text=\"Enter matrix A:\", font=('arial', 10, 'bold')).grid(row=1, column=1)\n # to create matrix of entry cells we need to create a 2d list of entries\n # thank god to stackoverflow peeps for that\n\n # empty arrays for Entry and StringVars\n text_var = []\n entries = []\n\n # convert rows and cols from IntVar to int\n self.rows_a, self.cols_a = (self.ma_rows.get(), self.ma_cols.get())\n\n # create the list of entries with corresponding text_vars\n for i in range(self.rows_a):\n # append an empty list to append to later\n text_var.append([])\n entries.append([])\n for j in range(self.cols_a):\n # for column indications\n if i == 1:\n Label(self.frame_multi_input, text=alphabet[j]).grid(row=1, column=j + 2)\n\n # append StringVar\n text_var[i].append(StringVar())\n\n # append the entry into the list\n entries[i].append(Entry(self.frame_multi_input, textvariable=text_var[i][j], width=3))\n\n # display entry\n entries[i][j].grid(row=i + 2, column=j + 2)\n\n # for row indications\n Label(self.frame_multi_input, text=i + 1).grid(row=i + 2, column=1, sticky='e')\n\n Label(self.frame_multi_input, text=\"Enter matrix B:\", font=('arial', 10, 'bold')).grid(row=self.rows_a * 2,\n column=1)\n\n text_var_b = []\n entries_b = []\n\n self.rows_b, self.cols_b = (self.ma_cols.get(), self.mb_cols.get())\n for i in range(self.rows_b):\n text_var_b.append([])\n entries_b.append([])\n for j in range(self.cols_b):\n if i == 1:\n Label(self.frame_multi_input, text=alphabet[j]).grid(row=self.rows_a * 2, column=j + 2)\n text_var_b[i].append(StringVar())\n entries_b[i].append(Entry(self.frame_multi_input, textvariable=text_var_b[i][j], width=3))\n entries_b[i][j].grid(row=i + self.rows_a + 5, column=j + 2)\n Label(self.frame_multi_input, text=i + 1).grid(row=i + self.rows_a + 5, column=1, sticky='e')\n\n # callback functions to get StringVars/convert them to strings\n # and store in matrices\n def get_mat_a():\n self.matrix_a = []\n for i2 in range(self.rows_a):\n self.matrix_a.append([])\n for j2 in range(self.cols_a):\n self.matrix_a[i2].append(text_var[i2][j2].get())\n\n def get_mat_b():\n self.matrix_b = []\n for i3 in range(self.rows_b):\n self.matrix_b.append([])\n for j3 in range(self.cols_b):\n self.matrix_b[i3].append(text_var_b[i3][j3].get())\n\n def get_mat():\n try:\n get_mat_a()\n get_mat_b()\n self.output_matrix()\n except (ValueError, Exception):\n pass\n\n # button to trigger the entire thing\n Button(self.frame_multi_input, text=\"Enter\", width=8, command=get_mat).grid(row=self.cols_a + self.cols_b + 10,\n column=1)\n\n # gui stuff\n self.gui_multi_input.protocol(\"WM_DELETE_WINDOW\", menu.gui_menu.destroy)\n self.gui_multi_input.mainloop()\n\n def __init__(self):\n # pre-declare variables\n self.product_matrix = None\n self.matrix_a, self.matrix_b = None, None\n self.matrix = None\n self.gui_multi_input = None\n self.frame_multi_input = None\n self.rows_a, self.cols_a = None, None\n self.rows_b, self.cols_b = None, None\n self.gui_multi_output = None\n self.frame_multi_output = None\n\n # create sub-menu window then withdraw main menu window\n menu.gui_menu.withdraw()\n self.gui_multi_menu = Toplevel()\n self.gui_multi_menu.title(\"Multiply\")\n self.gui_multi_menu.resizable(False, False)\n\n # create sub-menu frame\n self.frame_multi_menu = Frame(self.gui_multi_menu, highlightbackground='black', highlightthickness=1)\n self.frame_multi_menu.pack(fill=BOTH, expand=True, padx=5, pady=5)\n\n # inputs Label(self.frame_multi_menu, text='NOTE: Matrix A height and Matrix B length').grid(row=1, column=1,\n # column span =6) Label(self.frame_multi_menu, text='...are to be equal for multiplication').grid(row=2,\n # column=1, column span =6) A matrix\n\n # prompt dimensions\n Label(self.frame_multi_menu, text='Matrix A dimensions:', font=('arial', 10, 'bold')).grid(row=3, column=1,\n columnspan=1)\n Label(self.frame_multi_menu, text='Matrix B dimensions:', font=('arial', 10, 'bold')).grid(row=4, column=1,\n columnspan=1)\n\n # create var for rows\n self.ma_rows = IntVar()\n self.ma_rows.set(2)\n\n # drop down for rows\n OptionMenu(self.frame_multi_menu, self.ma_rows, *range(2, 5)).grid(row=3, column=2)\n\n # 'x'\n Label(self.frame_multi_menu, text='x').grid(row=3, column=3)\n\n # create var for cols\n self.ma_cols = IntVar()\n self.ma_cols.set(2)\n OptionMenu(self.frame_multi_menu, self.ma_cols, *range(2, 5)).grid(row=3, column=4)\n\n # B matrix\n self.mb_rows = IntVar()\n # self.mb_rows.set(self.ma_cols.get())\n Label(self.frame_multi_menu, text=\"[n]\", font=('arial', 10, 'bold'), padx=5, pady=5).grid(row=4, column=2)\n # OptionMenu(self.frame_multi_menu, self.mb_rows, *range(2, 16)).grid(row=2, column=2)\n\n Label(self.frame_multi_menu, text='x').grid(row=4, column=3)\n\n self.mb_cols = IntVar()\n self.mb_cols.set(2)\n OptionMenu(self.frame_multi_menu, self.mb_cols, *range(2, 5)).grid(row=4, column=4)\n\n # in order to move to input window\n Button(self.frame_multi_menu, text='Enter', padx=16, pady=5, command=self.input_matrix).grid(row=5, column=4)\n\n # gui stuff\n self.gui_multi_menu.protocol(\"WM_DELETE_WINDOW\", menu.gui_menu.destroy)\n self.gui_multi_menu.mainloop()\n", "id": "3065239", "language": "Python", "matching_score": 4.347410202026367, "max_stars_count": 1, "path": "multi.py" }, { "content": "from tkinter import Label, Button, Entry, OptionMenu, IntVar, StringVar, Frame, Toplevel\nfrom tkinter.constants import BOTH\nfrom numpy.linalg import inv\nimport menu\n\nalphabet = '<KEY>'\n\n\nclass Inverse:\n def back_to_menu(self):\n self.gui_inverse_output.destroy()\n menu.gui_menu.deiconify()\n\n def compute_inverse(self):\n # convert matrix of strings to integers\n try:\n for i in range(self.rows):\n for j in range(self.cols):\n self.matrix[i][j] = int(self.matrix[i][j])\n\n except (NameError, TypeError, Exception):\n # Label(self.frame_inverse_output, text=\"Invalid input(s)\").grid(row=1, column=2)\n pass\n\n try:\n # invert matrix then convert back to string\n self.matrix = inv(self.matrix)\n list_mat = [str(i) for i in self.matrix]\n\n # remove square brackets\n for i in range(len(list_mat)):\n list_mat[i] = list_mat[i][1:-1]\n return list_mat\n\n except (TypeError, Exception):\n Label(self.frame_inverse_output, text=\"(Your matrix is\").grid(row=1, column=self.cols * 2 + 1)\n Label(self.frame_inverse_output, text=\"not invertible!)\").grid(row=2, column=self.cols * 2 + 1)\n\n def output_matrix(self):\n # create window\n self.gui_inverse_input.destroy()\n self.gui_inverse_output = Toplevel()\n self.gui_inverse_output.title(\"Inverse\")\n self.gui_inverse_output.resizable(False, False)\n\n self.frame_inverse_output = Frame(self.gui_inverse_output, highlightbackground='black', highlightthickness=1)\n self.frame_inverse_output.pack(fill=BOTH, expand=True, padx=5, pady=5)\n\n # go back to menu button\n Button(self.frame_inverse_output, text=\"Back\", width=4, command=self.back_to_menu).grid(\n row=self.rows + 10,\n column=1)\n\n # display user input\n Label(self.frame_inverse_output, text='Input:', font=('arial', 10, 'bold'), underline=0).grid(row=1, column=1)\n for i in range(self.rows):\n for j in range(self.cols):\n Label(self.frame_inverse_output, text=self.matrix[i][j], bd=5).grid(row=i + 1, column=j + 2)\n\n # display output\n Label(self.frame_inverse_output, text='Inverted:', font=('arial', 10, 'bold'), underline=0).grid(row=1,\n column=self.cols * 2)\n\n inverse_matrix = self.compute_inverse()\n for i in range(self.rows):\n Label(self.frame_inverse_output, text=inverse_matrix[i], bd=5).grid(\n row=i + 1, column=self.cols * 2 + 1)\n\n # def disable_event():\n # pass\n\n self.gui_inverse_output.protocol(\"WM_DELETE_WINDOW\", menu.gui_menu.destroy)\n self.gui_inverse_output.mainloop()\n\n def input_matrix(self):\n self.gui_inverse_menu.destroy()\n self.gui_inverse_input = Toplevel()\n self.gui_inverse_input.title(\"Inverse\")\n self.gui_inverse_input.resizable(False, False)\n\n self.frame_inverse_input = Frame(self.gui_inverse_input, highlightbackground='black', highlightthickness=1)\n self.frame_inverse_input.pack(fill=BOTH, expand=True, padx=5, pady=5)\n # window_dimensions = str(self.m_length.get()**3+90) + \"x\" + str(self.m_height.get())\n # print(window_dimensions)\n # window.geometry(window_dimensions)\n # self.gui_inverse_input.resizable(False, False)\n\n Label(self.frame_inverse_input, text=\"Enter matrix:\", font=('arial', 10, 'bold')).grid(row=1, column=1)\n\n # to create matrix of entry cells we need to create a 2d list of entries\n # thank god to stackoverflow peeps for that\n\n # empty arrays for Entry and StringVars\n text_var = []\n entries = []\n\n self.rows, self.cols = (self.m_dimensions.get(), self.m_dimensions.get())\n for i in range(self.rows):\n # append an empty list to arrays to append to later\n text_var.append([])\n entries.append([])\n for j in range(self.cols):\n # for column indications\n if i == 1:\n Label(self.frame_inverse_input, text=alphabet[j]).grid(row=1, column=j + 2)\n\n # append StringVar\n text_var[i].append(StringVar())\n\n # append the entry into the list\n entries[i].append(Entry(self.frame_inverse_input, textvariable=text_var[i][j], width=3))\n\n # display entry\n entries[i][j].grid(row=i + 2, column=j + 2)\n\n # for row indications\n Label(self.frame_inverse_input, text=i + 1).grid(row=i + 2, column=1, sticky='e')\n\n # callback function to get StringVars/convert them to strings\n # and store in matrix[]\n\n def get_mat():\n try:\n self.matrix = []\n for i2 in range(self.rows):\n self.matrix.append([])\n for j2 in range(self.cols):\n self.matrix[i2].append(text_var[i2][j2].get())\n self.output_matrix()\n\n except (ValueError, Exception):\n # Label(self.frame_inverse_output, text=\"Invalid input(s)\").grid(row=1, column=2)\n Label(self.frame_inverse_output, text=\"(Your matrix is\").grid(row=1, column=self.cols * 2 + 1)\n Label(self.frame_inverse_output, text=\"not invertible!)\").grid(row=2, column=self.cols * 2 + 1)\n\n Button(self.frame_inverse_input, text=\"Enter\", width=8, command=get_mat).grid(row=self.cols + 10,\n column=1)\n\n self.gui_inverse_input.protocol(\"WM_DELETE_WINDOW\", menu.gui_menu.destroy)\n self.gui_inverse_input.mainloop()\n\n def __init__(self):\n self.gui_inverse_input, self.gui_inverse_output = None, None\n self.frame_inverse_output, self.frame_inverse_input = None, None\n self.frame_inverse_menu = None\n self.inverse_matrix = []\n self.rows, self.cols = None, None\n self.matrix = None\n\n menu.gui_menu.withdraw()\n self.gui_inverse_menu = Toplevel()\n self.gui_inverse_menu.title(\"Inverse\")\n self.gui_inverse_menu.resizable(False, False)\n\n self.frame_inverse_menu = Frame(self.gui_inverse_menu, highlightbackground='black', highlightthickness=1)\n self.frame_inverse_menu.pack(fill=BOTH, expand=True, padx=5, pady=5)\n\n Label(self.frame_inverse_menu, text='Enter matrix dimensions:', font=('arial', 10, 'bold')).grid(row=4, column=1)\n\n # enter matrix dimensions\n self.m_dimensions = IntVar()\n self.m_dimensions.set(2)\n OptionMenu(self.frame_inverse_menu, self.m_dimensions, *range(2, 5)).grid(row=4, column=2)\n\n Button(self.frame_inverse_menu, text='Enter', padx=16, pady=5, command=self.input_matrix).grid(row=5, column=2)\n\n self.gui_inverse_menu.protocol(\"WM_DELETE_WINDOW\", menu.gui_menu.destroy)\n self.gui_inverse_menu.mainloop()\n", "id": "6645954", "language": "Python", "matching_score": 1.263201117515564, "max_stars_count": 1, "path": "inverse.py" }, { "content": "from tkinter import Tk, Label, Button, Frame\nfrom tkinter.constants import BOTH\nimport inverse\nimport multi\nimport trans\nimport add\n\ngui_menu = Tk()\n# gui_menu.geometry('150x180')\ngui_menu.title('Menu')\ngui_menu.resizable(False, False)\nframe_menu = Frame(gui_menu, highlightbackground='black', highlightthickness=1)\nframe_menu.pack(fill=BOTH, expand=True, padx=5, pady=5)\n\n\nclass Menu:\n def __init__(self):\n label = Label(frame_menu, text=\"Choose Operation:\", pady=10, font=('arial', 10, 'bold'))\n\n inv = Button(frame_menu, text=\"Inverse\", padx=30, pady=5, command=inverse.Inverse)\n ad = Button(frame_menu, text=\"Add\", padx=40, pady=5, command=add.Add)\n tran = Button(frame_menu, text=\"Transpose\", padx=22, pady=5, command=trans.Trans)\n mlt = Button(frame_menu, text=\"Multiply\", padx=28, pady=5, command=multi.Multi)\n\n label.pack()\n inv.pack()\n mlt.pack()\n ad.pack()\n tran.pack()\n\n # def on_closing():\n # if messagebox.askokcancel(\"Quit\", \"Do you want to quit?\"):\n # gui_menu.destroy()\n # gui_menu.protocol(\"WM_DELETE_WINDOW\", on_closing)\n\n gui_menu.mainloop()\n", "id": "9380626", "language": "Python", "matching_score": 0.3144305348396301, "max_stars_count": 1, "path": "menu.py" } ]
1.496182
Warcreed
[ { "content": "# -*- coding: UTF-8 -*-\n\n# set streamer nationality\ntwitch_streamer_nationality = \"en\"\n\n# Spark-Kafka Settings\nbootstrap_server=\"10.0.100.22:2181\"\ntopic = \"twitch\"\napp_name = \"Twitch-Spark\"\ngroupId = \"spark-streaming-consumer\"\nlog_level = \"ERROR\"\nwindow = 5 # Sec\n\nkafka_params = {\n \"auto.offset.reset\": \"smallest\",\n}\n\nmessage_log = False\n\n# ElasticSearch Settings\nelastic_host=\"10.0.100.51\"\nelastic_host_port=\"9200\"\nelastic_index=\"twitch\"\nelastic_document=\"_doc\"\n\nes_write_conf = {\n\"es.nodes\" : elastic_host,\n\"es.port\" : elastic_host_port,\n\"es.resource\" : '%s/%s' % (elastic_index,elastic_document),\n\"es.input.json\" : \"yes\",\n\"mapred.reduce.tasks.speculative.execution\": \"false\",\n\"mapred.map.tasks.speculative.execution\": \"false\",\n\"es.mapping.id\": \"timestamp\"\n}\n\nmapping = {\n \"mappings\": {\n \"properties\": {\n \"timestamp\": {\n \"type\": \"date\"\n }\n }\n }\n}\n\n# sentiment_class ={\n# 'molto_positiva' : 0,\n# 'opinione_positiva' : 1,\n# 'opinione_neutra' : 3,\n# }\n", "id": "1450561", "language": "Python", "matching_score": 4.679755210876465, "max_stars_count": 8, "path": "Spark/Python/code/modules/sparkConsumerConfig.py" }, { "content": "# -*- coding: UTF-8 -*-\nimport os\nimport pyspark\nfrom pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.streaming.kafka import KafkaUtils\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.dataframe import DataFrame\nfrom pyspark.sql import functions as F\nfrom pyspark.sql import Row\nfrom elasticsearch import Elasticsearch\n\nimport pandas as pd\nimport json\nimport sentimentAnalysis as sentiment\nimport sparkConsumerConfig as config\n\ndef get_item_structure(item): \n if config.twitch_streamer_nationality == \"en\": \n result = sentiment.get_sentiment_analyzer_en(item['message'])\n # elif twitch_streamer_nationality == \"it\":\n # result = get_sentiment_analysis_ita(item['message'])\n return {\n 'targetChannelUsername': item['targetChannelUsername'],\n 'nickname': item['nickname'],\n 'message': item['message'],\n 'sentiment': result,\n 'timestamp': int(item['timestamp'])\n }\n\ndef get_messages(key,rdd): \n message_dataframe = spark.read.json(rdd.map(lambda value: json.loads(value[1]))) \n if not message_dataframe.rdd.isEmpty(): \n analyzed_rdd = message_dataframe.rdd.map(lambda item: get_item_structure(item))\n if config.message_log:\n print(\"********************\") \n print(spark.createDataFrame(analyzed_rdd).show(20, False)) \n print(\"********************\\n\")\n elastic_rdd = analyzed_rdd.map(lambda item: json.dumps(item)).map(lambda x: ('', x))\n\n elastic_rdd.saveAsNewAPIHadoopFile(\n path='-',\n outputFormatClass=\"org.elasticsearch.hadoop.mr.EsOutputFormat\",\n keyClass=\"org.apache.hadoop.io.NullWritable\",\n valueClass=\"org.elasticsearch.hadoop.mr.LinkedMapWritable\",\n conf=config.es_write_conf) \n\ndef main():\n\n # ElasticSearch\n elastic = Elasticsearch(hosts=[config.elastic_host])\n\n response = elastic.indices.create(\n index=config.elastic_index,\n body=config.mapping,\n ignore=400\n )\n # elasticsearch index response\n if 'acknowledged' in response:\n if response['acknowledged'] == True:\n print (\"INDEX MAPPING SUCCESS FOR INDEX:\", response['index'])\n elif 'error' in response:\n print (\"ERROR:\", response['error']['root_cause'])\n print (\"TYPE:\", response['error']['type'])\n\n # Spark-Kafka\n global spark\n spark = SparkSession.builder.appName(config.app_name).getOrCreate()\n spark.sparkContext.setLogLevel(config.log_level)\n ssc = StreamingContext(spark.sparkContext, config.window)\n stream = KafkaUtils.createStream(ssc, config.bootstrap_server, config.groupId, {config.topic: 1}, config.kafka_params)\n\n stream.foreachRDD(get_messages)\n\n ssc.start()\n ssc.awaitTermination()\n\nif __name__ == '__main__':\n main()\n\n", "id": "9241706", "language": "Python", "matching_score": 2.2910284996032715, "max_stars_count": 8, "path": "Spark/Python/code/sparkConsumer.py" }, { "content": "# -*- coding: UTF-8 -*-\n\n# from sentita import calculate_polarity\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\n# set your custom emote value to use for sentiment analyzer. Keep in mind to set a balanced values for each emote.\ntwitch_emotes = {\n '<3': 0.4,\n '4head': 1,\n 'babyrage': -0.7,\n 'biblethump': -0.7,\n 'blessrng': 0.3,\n 'bloodtrail': 0.7,\n 'coolstorybob': -1,\n 'residentsleeper': -1,\n 'kappa': 0.3,\n 'lul': 0.1,\n 'pogchamp': 1.5,\n 'heyguys': 1,\n 'wutface': -1.5,\n 'kreygasm': 1,\n 'seemsgood': 0.7,\n 'kappapride': 0.7,\n 'feelsgoodman': 1,\n 'notlikethis': -1\n}\n\n#Vader\nanalyzer = SentimentIntensityAnalyzer()\nanalyzer.lexicon.update(twitch_emotes)\n\ndef get_sentiment_analyzer_en(phrase): \n polarity = analyzer.polarity_scores(phrase) \n if polarity[\"compound\"] >= 0.05:\n if polarity['pos'] - polarity[\"neu\"] > 0.1:\n return 'very_positive'\n elif 0 <= abs(polarity['pos'] - polarity[\"neu\"]) <= 0.6:\n if polarity['neg'] > 0.05:\n return 'ironic'\n return 'positive_opinion'\n elif polarity[\"compound\"] <= -0.05:\n if polarity['neg'] - polarity[\"neu\"] > 0.1:\n return 'very_negative'\n elif 0 <= abs(polarity['neg'] - polarity[\"neu\"]) <= 0.6:\n if polarity['pos'] > 0.05:\n return 'ironic'\n return 'negative_opinion'\n else:\n if polarity[\"pos\"] > 0 and polarity[\"neg\"] > 0:\n return \"ironic\"\n elif polarity['neu'] - polarity[\"pos\"] < 0.4:\n return \"positive_opinion\"\n elif polarity['neu'] - polarity[\"neg\"] < 0.4:\n return 'negative_opinion'\n return 'neutral_opinion'\n\n# def get_sentiment_analyzer_ita(phrase): \n# data = [phrase]\n# results, polarities = calculate_polarity(data) \n# if abs(polarities[0] - polarities[1]) < 2:\n# return \"neutral_opinion\"\n# elif polarities[0] - polarities[1] >= 4:\n# return \"very_positive\" \n# elif polarities[0] - polarities[1] >= 2:\n# return \"positive_opinion\" \n# elif polarities[1] - polarities[0] >= 4:\n# return \"very_negative\" \n# elif polarities[1] - polarities[0] >= 2:\n# return \"negative_opinion\" ", "id": "8686945", "language": "Python", "matching_score": 0.7693607211112976, "max_stars_count": 8, "path": "Spark/Python/code/modules/sentimentAnalysis.py" } ]
2.291028
sthamma
[ { "content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport pytest\n\n\nFIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixture')\n\n\ndef test_buckets(plan_runner):\n \"Test bucket resources.\"\n _, resources = plan_runner(FIXTURES_DIR)\n assert len(resources) == 2\n assert set(r['type'] for r in resources) == set(['google_storage_bucket'])\n assert set(r['values']['name'] for r in resources) == set([\n 'bucket-a', 'bucket-b'\n ])\n assert set(r['values']['project'] for r in resources) == set([\n 'my-project'\n ])\n\n\ndef test_prefix(plan_runner):\n \"Test bucket name when prefix is set.\"\n _, resources = plan_runner(FIXTURES_DIR, prefix='foo')\n assert set(r['values']['name'] for r in resources) == set([\n 'foo-eu-bucket-a', 'foo-eu-bucket-b'\n ])\n\n\ndef test_map_values(plan_runner):\n \"Test that map values set the correct attributes on buckets.\"\n _, resources = plan_runner(FIXTURES_DIR)\n bpo = dict((r['values']['name'], r['values']['bucket_policy_only'])\n for r in resources)\n assert bpo == {'bucket-a': False, 'bucket-b': True}\n force_destroy = dict((r['values']['name'], r['values']['force_destroy'])\n for r in resources)\n assert force_destroy == {'bucket-a': True, 'bucket-b': False}\n versioning = dict((r['values']['name'], r['values']['versioning'])\n for r in resources)\n assert versioning == {\n 'bucket-a': [{'enabled': True}], 'bucket-b': [{'enabled': False}]\n }\n logging_config = dict((r['values']['name'], r['values']['logging'])\n for r in resources)\n assert logging_config == {\n 'bucket-a': [{'log_bucket': 'foo'}],\n 'bucket-b': []\n }\n retention_policies = dict((r['values']['name'], r['values']['retention_policy'])\n for r in resources)\n assert retention_policies == {\n 'bucket-a': [],\n 'bucket-b': [{'is_locked': False, 'retention_period': 5}]\n }\n for r in resources:\n assert r['values']['labels'] == {\n 'environment': 'test', 'location': 'eu',\n 'storage_class': 'multi_regional', 'name': r['values']['name']\n }\n\n\ndef test_iam_roles_only(plan_runner):\n \"Test bucket resources with only iam roles passed.\"\n _, resources = plan_runner(\n FIXTURES_DIR, iam_roles='{bucket-a = [ \"roles/storage.admin\"]}')\n assert len(resources) == 3\n\n\ndef test_iam(plan_runner):\n \"Test bucket resources with iam roles and members.\"\n iam_roles = (\n '{bucket-a = [\"roles/storage.admin\"], '\n 'bucket-b = [\"roles/storage.objectAdmin\"]}'\n )\n iam_members = '{folder-a = { \"roles/storage.admin\" = [\"user:<EMAIL>\"] }}'\n _, resources = plan_runner(\n FIXTURES_DIR, iam_roles=iam_roles, iam_members=iam_members)\n assert len(resources) == 4\n", "id": "8946488", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tests/modules/gcs/test_plan.py" } ]
0
tutlane
[ { "content": "import tkinter as tk\nfrom pyrustic import tkmisc\nfrom pyrustic import widget\nfrom pyrustic.widget.scrollbox import Scrollbox\nfrom pyrustic.view import View\nfrom pyrustic.tkmisc import merge_cnfs\n\n\n# select button flavor\nCHECK = \"check\" # for checkbutton\nRADIO = \"radio\" # for radiobutton\n\n\n# Components\nLABEL_HEADER = \"label_header\"\nSCROLLBOX = \"scrollbox\"\nLABEL_MESSAGE = \"label_message\"\nFRAME_PANE = \"frame_pane\"\nFRAME_FOOTER = \"frame_footer\"\nBUTTON_CONTINUE = \"button_continue\"\nBUTTON_CANCEL = \"button_cancel\"\nRADIOBUTTONS = \"radiobuttons\"\nCHECKBUTTONS = \"checkbuttons\"\n\n\nclass Choice(widget.Toplevel):\n \"\"\"\n Choice is a dialog box to make the user select some items among others.\n The Choice could be implemented with either radiobuttons or checkbuttons.\n\n Example:\n\n import tkinter as tk\n from pyrustic.widget.choice import Choice\n\n def my_handler(result):\n print(result)\n\n root = tk.Tk()\n my_items = (\"first\", \"second\", \"third\")\n choice = Choice(root, title=\"Choice\", header=\"Make a choice\",\n items=my_items, handler=my_handler)\n choice.build()\n root.mainloop()\n\n \"\"\"\n\n def __init__(self,\n master=None,\n title=None,\n header=None,\n message=None,\n items=None,\n selected=None,\n flavor=\"radio\",\n handler=None,\n geometry=None,\n cnfs=None):\n \"\"\"\n PARAMETERS:\n\n - master: widget parent. Example: an instance of tk.Frame\n\n - title: title of dialog box\n\n - header: the text to show as header\n\n - message: the text to show as message\n\n - use_scrollbox: bool, set it to True to make the Dialog scrollable\n\n - items: a sequence of strings. Example: (\"banana\", \"apple\").\n\n - selected: a sequence of indexes to indicate default selection.\n Set it to None if u don't need it.\n\n - flavor: it could be either RADIO or CHECK\n for respectively radiobutton and checkbutton\n\n - handler: a callback to be executed immediately\n after closing the dialog box.\n The callback should allow one parameter, the result:\n\n - If the flavor is RADIO,\n then, result is a tuple like: (the selected index, item string).\n\n - If the flavor is CHECK,\n then, result is a sequence of tuples.\n Each tuple is like: (integer, item string),\n with integer being 1 if the button has been clicked, else 0.\n\n - geometry: str, as the dialog box is a toplevel (BODY),\n you can edit its geometry. Example: \"500x300\"\n\n - options: dictionary of widgets options\n The widgets keys are: BODY, LABEL_HEADER, SCROLLBOX, LABEL_MESSAGE,\n FRAME_PANE, FRAME_FOOTER, BUTTON_CONTINUE, BUTTON_CANCEL,\n RADIOBUTTONS, CHECKBUTTONS.\n\n Example: Assume that you want to set the LABEL_MESSAGE's background to black\n and the BODY's background to red:\n options = { BODY: {\"background\": \"red\"},\n LABEL_MESSAGE: {\"background\": \"black\"} }\n\n \"\"\"\n self.__cnfs = merge_cnfs(None, cnfs, components=(\"body\",\n LABEL_HEADER, SCROLLBOX, LABEL_MESSAGE,\n FRAME_PANE, FRAME_FOOTER, BUTTON_CONTINUE,\n BUTTON_CANCEL, RADIOBUTTONS, CHECKBUTTONS))\n super().__init__(master=master,\n class_=\"Choice\",\n cnf=self.__cnfs[\"body\"],\n on_build=self.__on_build,\n on_display=self.__on_display,\n on_destroy=self.__on_destroy,\n toplevel_geometry=self.__toplevel_geometry)\n self.__title = title\n self.__header = header\n self.__message = message\n self.__items = [] if not items else items\n self.__selected = selected\n self.__flavor = flavor\n self.__handler = handler\n self.__geometry = geometry\n #\n self.__result = None\n self.__closing_context = \"close\"\n self.__components = dict()\n self.__label_header = None\n self.__label_message = None\n self.__pane = None\n self.__footer = None\n self.__buttons = None\n self.__intvar = tk.IntVar()\n self.__intvars = []\n # components\n self.__components = {}\n # build\n self.__view = self.build()\n\n # ======================================\n # PROPERTIES\n # ======================================\n\n @property\n def header(self):\n return self.__header\n\n @property\n def message(self):\n return self.__message\n\n @property\n def items(self):\n return self.__items.copy()\n\n @property\n def selected(self):\n \"\"\"\n - If the flavor is RADIO,\n then, result is a tuple like: (the selected index, item string).\n Example: 3 items, the second has been selected:\n result = (1, \"Item at index 1\")\n\n - If the flavor is CHECK,\n then, result is a sequence of tuples, each positioned in\n the sequence according to its index number.\n Each tuple is like: (integer, item string),\n with integer being 1 if the button has been clicked, else 0.\n Example: 3 items, only the last 2 are checked:\n result = ( (0, \"item 1\"), (1, \"item 2\"), (1, \"item 3\") )\n \"\"\"\n return self.__result\n\n @property\n def flavor(self):\n return self.__flavor\n\n @property\n def handler(self):\n return self.__handler\n\n @property\n def components(self):\n \"\"\"\n Get the components (widgets instances) used to build this dialog.\n\n This property returns a dict. The keys are:\n BODY, LABEL_HEADER, SCROLLBOX, LABEL_MESSAGE,\n FRAME_PANE, FRAME_FOOTER, BUTTON_CONTINUE, BUTTON_CANCEL,\n RADIOBUTTONS, CHECKBUTTONS.\n\n Warning: radiobuttons and checkbuttons are sequences of widgets positioned\n in the sequence according to the index.\n\n Another Warning: check the presence of key before usage.\n \"\"\"\n return self.__components\n\n # ======================================\n # INTERNAL\n # ======================================\n def __on_build(self):\n self.title(self.__title)\n self.resizable(0, 0)\n #\n if self.__geometry:\n self.geometry(self.__geometry)\n self.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=0)\n self.rowconfigure(1, weight=0)\n self.rowconfigure(2, weight=2, uniform=\"a\")\n self.rowconfigure(3, weight=0, uniform=\"a\")\n # == Set Header\n if self.__header:\n label_header = tk.Label(self,\n name=LABEL_HEADER,\n text=self.__header,\n justify=tk.LEFT,\n anchor=\"w\",\n cnf=self.__cnfs[LABEL_HEADER])\n self.__components[LABEL_HEADER] = label_header\n label_header.grid(row=0, column=0, sticky=\"w\",\n padx=(5, 5), pady=(5, 5))\n label_header.config(text=self.__header)\n # == Set Message\n if self.__message:\n label_message = tk.Label(self,\n name=LABEL_MESSAGE,\n text=self.__message,\n justify=tk.LEFT,\n anchor=\"w\",\n cnf=self.__cnfs[LABEL_MESSAGE])\n self.__components[LABEL_MESSAGE] = label_message\n label_message.grid(row=1, column=0, sticky=\"w\",\n padx=(5, 5), pady=(0, 5))\n # == Scrollbox\n scrollbox = Scrollbox(self, orient=\"vertical\",\n cnfs=self.__cnfs[SCROLLBOX])\n self.__components[SCROLLBOX] = scrollbox\n scrollbox.grid(row=2, column=0, sticky=\"nswe\",\n padx=5)\n # == Footer\n self.__footer = tk.Frame(self,\n name=FRAME_FOOTER,\n cnf=self.__cnfs[FRAME_FOOTER])\n self.__components[FRAME_FOOTER] = self.__footer\n self.__footer.grid(row=3, column=0, sticky=\"swe\", pady=(30, 0))\n #\n button_continue = tk.Button(self.__footer, name=BUTTON_CONTINUE,\n text=\"Continue\",\n command=self.__on_click_continue,\n cnf=self.__cnfs[BUTTON_CONTINUE])\n self.__components[BUTTON_CONTINUE] = button_continue\n button_continue.pack(side=tk.RIGHT, padx=2, pady=2)\n #\n button_cancel = tk.Button(self.__footer, name=BUTTON_CANCEL,\n text=\"Cancel\",\n command=self.__on_click_cancel,\n cnf=self.__cnfs[BUTTON_CANCEL])\n self.__components[BUTTON_CANCEL] = button_cancel\n button_cancel.pack(side=tk.RIGHT, padx=(2, 0), pady=2)\n # install and populate check/radio buttons\n key = RADIOBUTTONS if self.__flavor == \"radio\" else CHECKBUTTONS\n self.__components[key] = []\n cache = None\n for i, choice in enumerate(self.__items):\n if not self.__flavor or self.__flavor not in (\"radio\", \"check\"):\n break\n if self.__flavor == \"radio\":\n cache = tk.Radiobutton(scrollbox.box,\n variable=self.__intvar,\n text=choice, value=i,\n cnf=self.__cnfs[RADIOBUTTONS])\n self.__components[RADIOBUTTONS].append(cache)\n elif self.__flavor == \"check\":\n tk_var = tk.IntVar()\n self.__intvars.append(tk_var)\n cache = tk.Checkbutton(scrollbox.box,\n variable=tk_var,\n onvalue=1, offvalue=0,\n text=choice,\n cnf=self.__cnfs[CHECKBUTTONS])\n self.__components[CHECKBUTTONS].append(cache)\n if cache:\n cache.pack(anchor=\"w\", expand=1)\n\n def __on_display(self):\n # fill selected items\n if self.__flavor == RADIO and self.__selected is not None:\n if isinstance(self.__selected, int) and self.__selected >= 0:\n self.__intvar.set(self.__selected)\n elif self.__flavor == CHECK and self.__selected is not None:\n if isinstance(self.__selected, tuple):\n for i in self.__selected:\n try:\n self.__intvars[i].set(1)\n except IndexError:\n pass\n elif isinstance(self.__selected, int):\n self.__intvars[self.__selected].set(1)\n\n def __on_destroy(self):\n if self.__closing_context == \"continue\":\n self.__result = self.__get_result()\n if self.__handler:\n self.__handler(self.__result)\n\n def __toplevel_geometry(self):\n tkmisc.center_window(self, within=self.master.winfo_toplevel())\n tkmisc.dialog_effect(self)\n\n def __on_click_continue(self):\n self.__closing_context = \"continue\"\n self.destroy()\n\n def __on_click_cancel(self):\n self.__closing_context = \"cancel\"\n self.destroy()\n\n def __get_result(self):\n result = None\n if self.__flavor == \"radio\":\n index = self.__intvar.get()\n result = (index, self.__items[index])\n elif self.__flavor == \"check\":\n cache = []\n for i, intvar in enumerate(self.__intvars):\n intvar_index = intvar.get()\n cache.append((intvar_index, self.__items[i]))\n result = tuple(cache)\n return result\n\n\nclass _ChoiceTest(View):\n def __init__(self, root):\n super().__init__()\n self._root = root\n self._body = None\n\n def _on_build(self):\n self._body = tk.Frame(self._root)\n btn_launch_check_choice = tk.Button(self._body,\n text=\"Launch checkbutton choice\",\n command=self._on_click_btn_check)\n btn_launch_check_choice.pack(side=tk.LEFT, anchor=\"nw\")\n btn_launch_radio_choice = tk.Button(self._body,\n text=\"Launch radiobutton choice\",\n command=self._on_click_btn_radio)\n btn_launch_radio_choice.pack(side=tk.LEFT, anchor=\"nw\")\n\n\n def _on_display(self):\n pass\n\n def _on_destroy(self):\n pass\n\n def _on_click_btn_check(self):\n Choice(self._body, title=\"Title\", header=\"header\", flavor=\"check\",\n message=\"message\",\n items=[\"first\", \"second\", \"third\"],\n selected=1,\n handler=self._choice_handler)\n\n def _on_click_btn_radio(self):\n tests = (\"test \"*10).split()\n Choice(self._root,\n title=\"Title\", header=\"header\", flavor=\"radio\",\n message=\"message\",\n items=[\"first\", \"second\", \"third\", *tests],\n selected=1, handler=self._choice_handler)\n\n def _choice_handler(self, data):\n print(\"Choice: {}\".format(data))\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n root.geometry(\"500x300+0+0\")\n choice_test = _ChoiceTest(root)\n choice_test.build_pack()\n root.mainloop()\n", "id": "6753274", "language": "Python", "matching_score": 6.855579376220703, "max_stars_count": 0, "path": "pyrustic/widget/choice.py" }, { "content": "import tkinter as tk\nfrom pyrustic import widget\nfrom pyrustic import tkmisc\nfrom pyrustic.view import View\nfrom pyrustic.tkmisc import merge_cnfs\n\n\n# Components\nLABEL_HEADER = \"label_header\"\nLABEL_MESSAGE = \"label_message\"\nFRAME_FOOTER = \"frame_footer\"\nBUTTON_CANCEL = \"button_cancel\"\nBUTTON_CONFIRM = \"button_confirm\"\n\n\nclass Confirm(widget.Toplevel):\n \"\"\"\n Confirm is a dialog box to ask the user to confirm an action.\n\n Example:\n\n import tkinter as tk\n from pyrustic.widget.confirm import Confirm\n\n def my_handler(result):\n print(result)\n\n root = tk.Tk()\n confirm = Confirm(root, title=\"Confirm\", header=\"Confirmation\",\n message=\"Do you really want to continue ?\",\n handler=my_handler)\n confirm.build()\n root.mainloop()\n\n \"\"\"\n def __init__(self,\n master=None,\n title=None,\n header=None,\n message=None,\n handler=None,\n geometry=None,\n cnfs=None):\n \"\"\"\n PARAMETERS:\n\n - master: widget parent. Example: an instance of tk.Frame\n\n - title: title of dialog box\n\n - header: the text to show as header\n\n - message: the text to show as message\n\n - handler: a callback to be executed immediately after closing the dialog box.\n This callback should accept a boolean positional argument.\n True means Ok, confirmed.\n\n - geometry: str, as the dialog box is a toplevel (BODY),\n you can edit its geometry. Example: \"500x300\"\n\n - options: dictionary of widgets options\n The widgets keys are: BODY, LABEL_HEADER,\n LABEL_MESSAGE, FRAME_FOOTER, BUTTON_CANCEL, BUTTON_CONFIRM.\n\n Example: Assume that you want to set the LABEL_MESSAGE's background to black\n and the BODY's background to red:\n options = { BODY: {\"background\": \"red\"},\n LABEL_MESSAGE: {\"background\": \"black\"} }\n\n \"\"\"\n self.__cnfs = merge_cnfs(None, cnfs, components=(\"body\",\n LABEL_HEADER, LABEL_MESSAGE, FRAME_FOOTER,\n BUTTON_CANCEL, BUTTON_CONFIRM))\n super().__init__(master=master,\n class_=\"Confirm\",\n cnf=self.__cnfs[\"body\"],\n on_build=self.__on_build,\n on_display=self.__on_display,\n on_destroy=self.__on_destroy,\n toplevel_geometry=self.__toplevel_geometry)\n self.__title = title\n self.__header = header\n self.__message = message\n self.__handler = handler\n self.__geometry = geometry\n self.__components = {}\n self.__ok = False\n # build\n self.__view = self.build()\n\n # ====================================\n # PROPERTIES\n # ====================================\n\n @property\n def header(self):\n return self.__header\n\n @property\n def message(self):\n return self.__message\n\n @property\n def handler(self):\n return self.__handler\n\n @property\n def ok(self):\n \"\"\"\n Returns True if user confirmed, else get False\n \"\"\"\n return self.__ok\n\n @property\n def components(self):\n \"\"\"\n Get the components (widgets instances) used to build this dialog.\n\n This property returns a dict. The keys are:\n BODY, LABEL_HEADER,\n LABEL_MESSAGE, FRAME_FOOTER, BUTTON_CANCEL, BUTTON_CONFIRM\n\n Warning: check the presence of key before usage\n \"\"\"\n return self.__components\n\n # ====================================\n # INTERNAL\n # ====================================\n def __on_build(self):\n self.title(self.__title)\n self.resizable(0, 0)\n #\n #\n if self.__geometry:\n self.geometry(self.__geometry)\n #\n if self.__header:\n label_header = tk.Label(self,\n text=self.__header,\n anchor=\"w\",\n justify=tk.LEFT,\n name=LABEL_HEADER,\n cnf=self.__cnfs[LABEL_HEADER])\n self.__components[LABEL_HEADER] = label_header\n label_header.pack(fill=tk.X, expand=1, anchor=\"w\", pady=5, padx=5)\n #\n if self.__message:\n label_message = tk.Label(self,\n name=LABEL_MESSAGE,\n text=self.__message,\n anchor=\"w\",\n justify=tk.LEFT,\n cnf=self.__cnfs[LABEL_MESSAGE])\n self.__components[LABEL_MESSAGE] = label_message\n label_message.pack(fill=tk.BOTH,\n expand=1, padx=5, pady=(5, 10))\n\n #\n frame_footer = tk.Frame(self, cnf=self.__cnfs[FRAME_FOOTER])\n self.__components[FRAME_FOOTER] = frame_footer\n frame_footer.pack(anchor=\"e\", pady=(0, 2), padx=2)\n #\n button_confirm = tk.Button(frame_footer,\n text=\"Confirm\",\n name=BUTTON_CONFIRM,\n command=self.__on_click_confirm,\n cnf=self.__cnfs[BUTTON_CONFIRM])\n self.__components[BUTTON_CONFIRM] = button_confirm\n button_confirm.pack(side=tk.RIGHT)\n #\n button_cancel = tk.Button(frame_footer,\n text=\"Cancel\",\n name=BUTTON_CANCEL,\n command=self.__on_click_cancel,\n cnf=self.__cnfs[BUTTON_CANCEL])\n self.__components[BUTTON_CANCEL] = button_cancel\n button_cancel.pack(side=tk.RIGHT, padx=(0, 2))\n\n def __on_display(self):\n pass\n\n def __on_destroy(self):\n if self.__handler:\n self.__handler(self.__ok)\n\n def __toplevel_geometry(self):\n tkmisc.center_window(self, within=self.master.winfo_toplevel())\n tkmisc.dialog_effect(self)\n\n def __on_click_cancel(self):\n self.__ok = False\n self.destroy()\n\n def __on_click_confirm(self):\n self.__ok = True\n self.destroy()\n\n\nclass _ConfirmTest(View):\n def __init__(self, root):\n super().__init__()\n self._root = root\n self._body = None\n\n def _on_build(self):\n self._body = tk.Frame(self._root)\n btn_launch = tk.Button(self._body, text=\"Launch\",\n command=self._on_click_launch)\n btn_launch.pack()\n\n def _on_display(self):\n pass\n\n def _on_destroy(self):\n pass\n\n def _on_click_launch(self):\n confirm = Confirm(root, title=\"Confirm\",\n header=\"Confirmation\",\n message=\"Do you really want to continue ?\\nPress ok to continue\\nOr die !\")\n confirm.wait_window()\n print(\"Confirm:\", confirm.ok)\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n root.geometry(\"500x300+0+0\")\n confirm_test = _ConfirmTest(root)\n confirm_test.build_pack()\n root.mainloop()\n", "id": "1247076", "language": "Python", "matching_score": 6.211079120635986, "max_stars_count": 0, "path": "pyrustic/widget/confirm.py" }, { "content": "import tkinter as tk\nfrom pyrustic import widget\nfrom pyrustic.view import View, CustomView\nfrom pyrustic.tkmisc import merge_cnfs\nfrom pyrustic import tkmisc\n\n\n# Components\nLABEL_HEADER = \"label_header\"\nLABEL_MESSAGE = \"label_message\"\n\n\nclass Toast(widget.Toplevel):\n \"\"\"\n A toast is a dialog box with or without decoration\n that is displayed for a given duration.\n\n Any \"click\" action on the Toast's body will close it.\n\n Example:\n import tkinter as tk\n from pyrustic.widget.toast import Toast\n\n root = tk.Tk()\n toast = Toast(root, header=\"My Header\", message=\"My Message\")\n toast.build()\n root.mainloop()\n\n \"\"\"\n\n def __init__(self,\n master=None,\n title=None,\n header=None,\n message=None,\n duration=1234,\n decoration=False,\n geometry=None,\n cnfs=None):\n \"\"\"\n PARAMETERS:\n\n - master: widget parent. Example: an instance of tk.Frame\n\n - title: title of dialog box\n\n - header: the text to show as header\n\n - message: the text to show as message\n\n - duration: int, in milliseconds.\n You can set None to duration to cancel the self-destroying timer\n\n - decoration: True or False to allow Window decoration\n\n - geometry: str, as the dialog box is a toplevel (BODY),\n you can edit its geometry. Example: \"500x300\"\n\n - options: dictionary of widgets options\n The widgets keys are: BODY, LABEL_HEADER, LABEL_MESSAGE.\n\n Example: Assume that you want to set the LABEL_MESSAGE's background to black\n and the BODY's background to red:\n options = { BODY: {\"background\": \"red\"},\n LABEL_MESSAGE: {\"background\": \"black\"} }\n \"\"\"\n self.__cnfs = merge_cnfs(None, cnfs, components=(\"body\",\n LABEL_HEADER, LABEL_MESSAGE))\n super().__init__(master=master,\n class_=\"Toast\",\n cnf=self.__cnfs[\"body\"],\n on_build=self.__on_build,\n on_display=self.__on_display,\n on_destroy=self.__on_destroy,\n toplevel_geometry=self.__toplevel_geometry)\n self.__title = title\n self.__header = header\n self.__message = message\n self.__duration = duration\n self.__decoration = decoration\n self.__geometry = geometry\n self.__cancel_id = None\n self.__components = {}\n self.__view = self.build()\n\n # ======================================\n # PROPERTIES\n # ======================================\n\n @property\n def header(self):\n return self.__header\n\n @property\n def message(self):\n return self.__message\n\n @property\n def duration(self):\n return self.__duration\n\n @property\n def decoration(self):\n return self.__decoration\n\n @property\n def components(self):\n \"\"\"\n Get the components (widgets instances) used to build this Toast.\n\n This property returns a dict. The keys are:\n BODY, LABEL_HEADER, LABEL_MESSAGE,\n \"\"\"\n return self.__components\n\n # ======================================\n # LIFECYCLE\n # ======================================\n def __on_build(self):\n if not self.__decoration:\n self.overrideredirect(1)\n if self.__geometry:\n self.geometry(self.__geometry)\n if self.__title:\n self.title(self.__title)\n self.bind(\"<Button-1>\", self.__on_click, \"+\")\n if self.__header:\n label_header = tk.Label(self,\n name=LABEL_HEADER,\n text=self.__header,\n anchor=\"w\",\n justify=tk.LEFT,\n cnf=self.__cnfs[LABEL_HEADER])\n self.__components[LABEL_HEADER] = label_header\n label_header.pack(fill=tk.X, padx=10, pady=10)\n if self.__message:\n label_message = tk.Label(self,\n name=LABEL_MESSAGE,\n text=self.__message,\n anchor=\"w\",\n justify=tk.LEFT,\n cnf=self.__cnfs[LABEL_MESSAGE])\n self.__components[LABEL_MESSAGE] = label_message\n label_message.pack(fill=tk.X, padx=10, pady=10)\n\n def __on_display(self):\n if self.__duration is not None:\n self.__cancel_id = self.after(self.__duration, self.destroy)\n\n def __on_destroy(self):\n pass\n\n def __toplevel_geometry(self):\n tkmisc.center_window(self, within=self.master.winfo_toplevel())\n tkmisc.dialog_effect(self)\n\n def __on_click(self, event):\n self.destroy()\n\n\nclass _ToastTest(View):\n\n def __init__(self, root):\n super().__init__()\n self._root = root\n self._body = None\n\n def _on_build(self):\n self._body = tk.Frame(self._root)\n btn_launch = tk.Button(self._body, text=\"launch\",\n command=self._on_click_launch)\n btn_launch.pack()\n\n def _on_display(self):\n pass\n\n def _on_destroy(self):\n pass\n\n def _on_click_launch(self):\n Toast(self._body, title=\"Toast Title\",\n header=\"Header\", message=\"This is the message\",\n duration=3000)\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n root.geometry(\"500x300+0+0\")\n toast_test = _ToastTest(root)\n toast_test.build_pack()\n root.mainloop()\n", "id": "1985936", "language": "Python", "matching_score": 2.7979588508605957, "max_stars_count": 0, "path": "pyrustic/widget/toast.py" }, { "content": "import tkinter as tk\nfrom pyrustic.view import CustomView\nfrom pyrustic import widget\nfrom pyrustic.tkmisc import merge_cnfs\nfrom pyrustic.exception import PyrusticWidgetException\n\n\n# Components\nFRAME_NODE = \"frame_node\"\nFRAME_HEADER = \"frame_header\"\nFRAME_BOX = \"frame_box\"\n\n\nclass Tree(widget.Frame):\n \"\"\"\n Tree is the megawidget to use to display the data as a tree.\n To use Tree, you need to subclass it.\n\n pyrustic.tree.SimpleTree is a nice example to study.\n\n Scroll to the bottom of this file at the top-level script\n environment to see the usage of SimpleTree\n \"\"\"\n def __init__(self,\n master=None,\n indent=50,\n spacing=10,\n cnfs=None):\n \"\"\"\n PARAMETERS:\n\n - master: widget parent. Example: an instance of tk.Frame\n\n - indent: left indent\n\n - spacing: space between two nodes\n\n - options: dictionary of widgets options\n The widgets keys are: BODY, NODE_FRAME, HEADER_FRAME, and BOX_FRAME.\n Example: Assume that you want to set the NODE_FRAME's background to black\n and the BODY's background to red:\n options = {BODY: {\"background\": \"red\"},\n NODE_FRAME: {\"background\": \"black\"}}\n \"\"\"\n self.__cnfs = merge_cnfs(None, cnfs, components=(\"body\",\n FRAME_NODE, FRAME_HEADER, FRAME_BOX))\n super().__init__(master=master,\n class_=\"Tree\",\n cnf=self.__cnfs[\"body\"],\n on_build=self.__on_build,\n on_display=self.__on_display,\n on_destroy=self.__on_destroy)\n self.__indent = indent\n self.__spacing = spacing\n self.__root = None\n self.__nodes = {}\n self.__internal_count = 0\n self.__cache = None\n self.__hook = None\n self.__view = self.build()\n\n # ==============================================\n # PROPERTIES\n # ==============================================\n @property\n def indent(self):\n return self.__indent\n\n @property\n def spacing(self):\n return self.__spacing\n\n @property\n def root(self):\n return self.__root\n\n @property\n def nodes(self):\n \"\"\"\n Returns sequence of nodes. Check the method 'node()' to\n see how an individual node data structure looks like.\n \"\"\"\n return [node.copy() for key, node in self.__nodes.items()]\n\n @property\n def hook(self):\n return self.__hook\n\n @hook.setter\n def hook(self, val):\n self.__hook = val\n\n # ==============================================\n # PUBLIC METHODS\n # ==============================================\n def insert(self, parent=None, title=\"\", index=\"end\",\n data=None, container=True, expand=False):\n \"\"\"\n Insert a node.\n - parent: the node_id of the parent or None if this is the root node of the tree\n - title: string\n - index: an integer to indicate where to put the node between\n its parent's descendants.\n Put \"end\" to indicate that this node should be added at the the end\n - data: None or dictionary to contain whatever data you want. It could help later.\n - container: boolean. True, if the node should contain another node. False else.\n - expand: boolean, True if this node should be expanded from creation. False else.\n Returns:\n None if failed to insert the node, else returns the newly created node_id\n \"\"\"\n # a root node shouldn't have a parent and should have index \"end\"\n data = {} if data is None else data\n if not self.__nodes:\n if parent is not None or (index != \"end\"):\n return None\n # a non-root should be legal\n elif not self.__check_non_root_node_is_legal(parent, index):\n return None\n # create node and return its id\n return self.__build_node(parent, title, index, data, container, expand)\n\n def node(self, id_or_path):\n \"\"\"\n Returns a node by its node_id or its dotted path.\n A node is a dictionary of data:\n node = {\"parent\": int, \"node_id\": int, \"container\": bool,\n \"index\": int, \"expanded\": bool, \"data\": dict, \"title\": str,\n \"frame_node\": tk.Frame, \"frame_header\": tk.Frame,\n \"frame_box\": tk.Frame, \"attached\": bool, \"ghosted\": bool}\n Example of dotted path (each number in the path is a position index):\n Hub\n Africa\n America\n Asia\n China\n china node = \"0.2.0\"\n \"\"\"\n node = None\n if isinstance(id_or_path, str):\n node = self.__node_from_path(id_or_path)\n elif isinstance(id_or_path, int):\n node = self.__get_node(id_or_path)\n if node:\n return node.copy()\n\n def feed(self, node_id, *args, **kwargs):\n \"\"\"\n This method will call \"_on_feed(*args, **kwargs\").\n Use it to feed some data to the tree\n \"\"\"\n node = self.__get_node(node_id)\n if not node:\n return\n view = node.get(\"view\", None)\n if not view:\n return\n view.on_feed_node(self, self, node, *args, **kwargs)\n\n def descendants(self, node_id):\n \"\"\"\n List of descendants nodes.\n [ node, node, ...]\n\n Please check the doc of the method \"node()\" to learn more about\n the structure of a node object (a dict in fact)\n \"\"\"\n return [node.copy() for key, node in self.__nodes.items()\n if node[\"parent\"] == node_id]\n\n def expand(self, node_id):\n \"\"\"\n Expands this node. Returns True if it worked, else returns False\n \"\"\"\n node = self.__get_node(node_id)\n if node and node[\"container\"] and not node[\"expanded\"]:\n node[\"expanded\"] = True\n view = node.get(\"view\", None)\n if view:\n view.on_expand_node(self, node)\n node[\"frame_box\"].grid(row=1, column=0)\n return True\n return False\n\n def collapse(self, node_id):\n \"\"\"\n Collapses this node. Returns True if it worked, else returns False\n \"\"\"\n node = self.__get_node(node_id)\n if node and node[\"container\"] and node[\"expanded\"]:\n node[\"expanded\"] = False\n view = node.get(\"view\", None)\n if view:\n view.on_collapse_node(self, node)\n node[\"frame_box\"].grid_remove()\n return True\n return False\n\n def expanded(self, node_id):\n \"\"\"\n Returns True if this node is actually expanded, else returns False\n \"\"\"\n node = self.__get_node(node_id)\n if node:\n return node[\"expanded\"]\n return None\n\n def collexp(self, node_id):\n \"\"\"\n Useful method to toggle the state collapsed/expanded of the node\n \"\"\"\n if not self.collapse(node_id):\n self.expand(node_id)\n\n def title(self, node_id, title=None):\n \"\"\"\n Use this method to display or edit the title of a node.\n Returns this node's title if you don't set a title as argument\n \"\"\"\n node = self.__get_node(node_id)\n data = None\n if node:\n if title:\n node[\"title_stringvar\"].set(title)\n data = node[\"title_stringvar\"].get()\n return data\n\n def tag(self, node_id, data=None):\n \"\"\"\n Edits this node's data. Data should be None or a dict\n Returns the data\n \"\"\"\n data = {} if data is None else data\n node = self.__get_node(node_id)\n if not node:\n return\n for key, value in data.items():\n node[\"data\"][key] = value\n return node[\"data\"].copy()\n\n def untag(self, node_id, data=None):\n \"\"\"\n Edits this node's data. Data should be a sequence of keys.\n Returns the data\n \"\"\"\n node = self.__get_node(node_id)\n if node:\n for tag in data:\n try:\n del node[\"data\"][tag]\n except KeyError:\n pass\n\n def delete(self, node_id):\n \"\"\"\n Deletes this node.\n Returns True or False\n \"\"\"\n for key, node in self.__nodes.items():\n if key == node_id:\n if node[\"container\"]:\n for descendant in self.descendants(node_id):\n self.delete(descendant[\"node_id\"])\n try:\n node[\"frame_node\"].destroy()\n except Exception as e:\n pass\n if node_id > 0:\n self.node(node[\"parent\"])[\"frame_box\"].config(height=1)\n del self.__nodes[key]\n return True\n return False\n\n def clear(self, node_id):\n \"\"\"\n Deletes the descendants of this node. Returns True if all right, else False.\n \"\"\"\n cache = True\n descendants = self.descendants(node_id)\n for descendant in descendants:\n deleted = self.delete(descendant[\"node_id\"])\n cache = False if not deleted else cache\n return cache\n\n def move(self, node_id, parent_id=None, index=0):\n \"\"\"\n Moves a node to another index. Returns True if all right, else False.\n \"\"\"\n node = self.__get_node(node_id)\n if not node:\n return False\n if parent_id is None:\n parent_id = node[\"parent\"]\n parent_node = self.__get_node(parent_id)\n if parent_node is None:\n return False\n if not self.__check_non_root_node_is_legal(parent_id, index):\n return False\n if isinstance(index, int):\n if index < len([node for key, node in self.__nodes.items()\n if node[\"parent\"] == parent_id]):\n self.__relocate(parent_id, index, direction=\"+\")\n node[\"parent\"] = parent_id\n node[\"index\"] = index\n node[\"frame_node\"].grid_remove()\n node[\"frame_node\"].grid(in_=parent_node[\"frame_box\"], row=index)\n return True\n\n def walk(self, node_id):\n \"\"\"\n Walks throughout the node.\n Example:\n for node_id, descendants in tree.walk(2):\n print(node_id, len(descendants))\n \"\"\"\n for key, node in self.__nodes.items():\n if key == node_id:\n if node[\"container\"]:\n descendants = self.descendants(node_id)\n yield node_id, descendants\n for descendant in descendants:\n for a, b in self.walk(descendant[\"node_id\"]):\n if a is None:\n continue\n yield a, b\n\n def attach(self, node_id):\n \"\"\"\n Attaches (again) a detached node. Returns True if it worked, else False\n \"\"\"\n node = self.__get_node(node_id)\n if node and not node[\"attached\"]:\n node[\"frame_node\"].grid()\n node[\"attached\"] = True\n return True\n return False\n\n def detach(self, node_id):\n \"\"\"\n Detaches an attached node. Returns True if it worked, else False.\n The detached node won't be visible anymore.\n The detached node's descendants won't be visible anymore.\n \"\"\"\n node = self.__get_node(node_id)\n if node and node[\"attached\"]:\n node[\"frame_node\"].grid_remove()\n node[\"attached\"] = False\n return True\n return False\n\n def ghost(self, node_id):\n \"\"\"\n Hide the header frame of the node whose node_id is given.\n Note that the descendants nodes will still be visible.\n Use this method to give illusion that descendants nodes\n don't have a root at all (kind of floating without root).\n This method returns a boolean (True to indicate that all right, else False)\n \"\"\"\n node = self.__get_node(node_id)\n if node and not node[\"ghosted\"]:\n node[\"frame_header\"].grid_remove()\n node[\"frame_box\"].grid(padx=(0, 0))\n node[\"frame_node\"].grid(pady=(0, 0))\n node[\"ghosted\"] = True\n node[\"expanded\"] = True\n return True\n return False\n\n def unghost(self, node_id):\n \"\"\"\n Reveals the header frame of the node whose node_id is given.\n This method returns a boolean (True to indicate that all right, else False)\n \"\"\"\n node = self.__get_node(node_id)\n if node and node[\"ghosted\"]:\n node[\"frame_header\"].grid()\n node[\"frame_box\"].grid(padx=(self.__indent, 0))\n node[\"frame_node\"].grid(pady=(self.__spacing, 0))\n node[\"ghosted\"] = False\n return True\n return False\n\n\n # ==============================================\n # INTERNAL\n # ==============================================\n def __on_build(self):\n pass\n\n def __on_display(self):\n pass\n\n def __on_destroy(self):\n pass\n\n def __build_node(self, parent, title, index, data, container, expand):\n # Case 1: root node\n if parent is None:\n index = 0\n else:\n descendants_count = len([node for key, node in self.__nodes.items()\n if node[\"parent\"] == parent])\n # Case 2: non-root node with an index \"end\" or count root descendants\n if index == \"end\" or index == descendants_count:\n if index == \"end\":\n index = descendants_count\n # Case 3: non-root node with an existent index\n elif 0 <= index < descendants_count:\n # relocate\n self.__relocate(parent, index)\n node_id = self.__internal_count\n self.__internal_count += 1\n frame_node, frame_header, frame_box = self.__build_node_frame(parent,\n index)\n node = {\"parent\": parent, \"node_id\": node_id, \"container\": container,\n \"index\": index, \"expanded\": expand, \"data\": data, \"title\": title,\n \"frame_node\": frame_node, \"frame_header\": frame_header,\n \"frame_box\": frame_box, \"attached\": True, \"ghosted\": False}\n self.__nodes[node_id] = node\n if parent is None:\n self.__root = node\n view = self.__get_view(frame_header, node.copy())\n if view:\n node[\"view\"] = view\n view.build()\n # Silently collapse\n if not expand:\n node[\"frame_box\"].grid_remove()\n return node_id\n\n def __build_node_frame(self, parent, index):\n # node frame\n master = self if parent is None else self.__get_node(parent)[\"frame_box\"]\n master.columnconfigure(0, weight=1)\n frame_node = tk.Frame(master, class_=\"FrameNode\",\n cnf=self.__cnfs[FRAME_NODE])\n frame_node.columnconfigure(0, weight=1)\n # grid frame_node\n if parent is None:\n frame_node.grid(column=0, row=0, sticky=\"we\")\n else:\n frame_node.grid(column=0, row=index,\n sticky=\"we\", pady=(self.__spacing, 0))\n # header\n frame_header = tk.Frame(frame_node, name=FRAME_HEADER,\n cnf=self.__cnfs[FRAME_HEADER])\n frame_header.columnconfigure(0, weight=1)\n frame_header.grid(row=0, column=0, sticky=\"we\")\n # box\n frame_box = tk.Frame(frame_node, name=FRAME_BOX,\n cnf=self.__cnfs[FRAME_BOX])\n frame_box.grid(row=1, column=0,\n padx=(self.__indent, 0),\n sticky=\"we\")\n return frame_node, frame_header, frame_box\n\n def __relocate(self, parent, from_index, direction=\"+\"):\n \"\"\"\n direction = - or +\n \"\"\"\n if direction not in (\"+\", \"-\"):\n return\n for key, node in self.__nodes.items():\n if node[\"parent\"] == parent and node[\"index\"] >= from_index:\n if direction == \"-\":\n node[\"index\"] -= 1\n elif direction == \"+\":\n node[\"index\"] += 1\n node[\"frame_node\"].grid(column=0, row=node[\"index\"])\n\n def __check_non_root_node_is_legal(self, parent_id, index):\n # a non-root node should have a parent\n if parent_id is None:\n return False\n # a non-root node should have an existent parent\n if parent_id not in [key for key, node in self.__nodes.items()]:\n return False\n # a non-root node should have a legal index: \"end\" or an integer value\n if isinstance(index, str) and index != \"end\":\n return False\n elif isinstance(index, int):\n # a non-root node should have an index that exists or is the last index +1\n if not (0 <= index <= len([node for key, node in self.__nodes.items()\n if node[\"parent\"] == parent_id])):\n return False\n return True\n\n def __get_node(self, node_id):\n if self.__cache:\n if self.__cache[\"node_id\"] == node_id:\n return self.__cache\n for key, node in self.__nodes.items():\n if key == node_id:\n self.__cache = node\n return node\n return None\n\n def __node_from_path(self, path):\n path = path.split(\".\")\n if not path or path[0] != \"0\":\n return\n cache = 0\n del path[0]\n for index in path:\n try:\n index = int(index)\n except Exception:\n return\n descendants = self.descendants(cache)\n if not descendants:\n return\n valid_index = False\n for descendant in descendants:\n if descendant[\"index\"] == index:\n valid_index = True\n cache = descendant[\"node_id\"]\n break\n if not valid_index:\n return\n return self.__get_node(cache)\n\n def __get_view(self, body, node):\n if not self.__hook:\n return None\n hook = self.__hook()\n if not isinstance(hook, Hook):\n message = (\"The hook should be a callable\",\n \"that returns a pyrustic.widget.tree.hook.Hook\")\n raise PyrusticWidgetException(\" \".join(message))\n on_build = (lambda tree=self,\n node=node,\n frame=body,\n hook=hook:\n hook.on_build_node(tree, node, frame))\n on_display = (lambda tree=self,\n node=node,\n frame=body,\n hook=hook:\n hook.on_display_node(tree, node))\n on_destroy = (lambda tree=self,\n node=node,\n frame=body,\n hook=hook:\n hook.on_destroy_node(tree, node))\n view = CustomView(body=body, on_build=on_build,\n on_display=on_display,\n on_destroy=on_destroy)\n view.on_feed_node = hook.on_feed_node\n view.on_expand_node = hook.on_expand_node\n view.on_collapse_node = hook.on_collapse_node\n return view\n\n\nclass Hook:\n\n def on_build_node(self, tree, node, frame):\n pass\n\n def on_display_node(self, tree, node):\n pass\n\n def on_destroy_node(self, tree, node):\n pass\n\n def on_feed_node(self, tree, node, *args, **kwargs):\n pass\n\n def on_expand_node(self, tree, node):\n pass\n\n def on_collapse_node(self, tree, node):\n pass\n\n\n# ====================================\n# DEMO\n# ====================================\nclass ExampleHook(Hook):\n def __init__(self):\n self._expander_stringvar = tk.StringVar()\n self._title_stringvar = tk.StringVar()\n\n def on_build_node(self, tree, node, frame):\n node_id = node[\"node_id\"]\n title = node[\"title\"]\n container = node[\"container\"]\n # Header_1 - contains Expander_btn and title_label\n titlebar = tk.Frame(frame, name=\"treeTitlebar\")\n titlebar.grid(row=0, column=0, sticky=\"we\")\n titlebar.columnconfigure(1, weight=1)\n # Header_2 - is a frame\n toolbar = tk.Frame(frame, name=\"treeToolbar\")\n toolbar.grid(row=1, column=0, sticky=\"we\")\n #\n if container:\n self._expander_stringvar.set(\"-\" if node[\"expanded\"] else \"+\")\n expander_btn = tk.Button(titlebar, name=\"treeExpander\",\n textvariable=self._expander_stringvar,\n padx=0, pady=0,\n command=lambda self=self, node_id=node_id:\n tree.collexp(node_id))\n expander_btn.grid(row=0, column=0)\n self._title_stringvar.set(title)\n title_label = tk.Label(titlebar, name=\"treeTitleLabel\",\n anchor=\"w\", textvariable=self._title_stringvar)\n title_label.grid(row=0, column=1, sticky=\"we\")\n\n def on_display_node(self, tree, node):\n pass\n\n def on_destroy_node(self, tree, node):\n pass\n\n def on_collapse_node(self, tree, node):\n self._expander_stringvar.set(\"+\")\n\n def on_expand_node(self, tree, node):\n self._expander_stringvar.set(\"-\")\n\n def on_feed_node(self, tree, node, *args, **kwargs):\n pass\n\ndef _populate_example(tree):\n #\n hub_id = tree.insert(title=\"Hub\")\n africa_id = tree.insert(title=\"Africa\", parent=hub_id)\n america_id = tree.insert(title=\"America\", parent=hub_id)\n asia_id = tree.insert(title=\"Asia\", parent=hub_id)\n europe_id = tree.insert(title=\"Europe\", parent=hub_id)\n #\n france_id = tree.insert(title=\"France\", parent=europe_id)\n italy_id = tree.insert(title=\"Italy\", parent=europe_id)\n china_id = tree.insert(title=\"China\", parent=asia_id)\n japan_id = tree.insert(title=\"Japan\", parent=asia_id)\n usa_id = tree.insert(title=\"USA\", parent=america_id)\n mexico_id = tree.insert(title=\"Mexico\", parent=america_id)\n ghana_id = tree.insert(title=\"Ghana\", parent=africa_id)\n morocco_id = tree.insert(title=\"Morocco\", parent=africa_id)\n tunisia_id = tree.insert(title=\"Tunisia\", parent=africa_id)\n #\n paris_id = tree.insert(title=\"Paris\", parent=france_id, container=False)\n accra_id = tree.insert(title=\"Accra\", parent=ghana_id, container=False)\n tunis_id = tree.insert(title=\"Tunis\", parent=tunisia_id, container=False)\n rabat_id = tree.insert(title=\"Rabat\", parent=morocco_id, container=False)\n tokyo_id = tree.insert(title=\"Tokyo\", parent=japan_id, container=False)\n\nif __name__ == \"__main__\":\n from pyrustic.widget.scrollbox import Scrollbox\n\n root = tk.Tk()\n root.geometry(\"500x500+0+0\")\n scrollbox = Scrollbox(root)\n scrollbox.pack(expand=1, fill=tk.BOTH)\n tree = Tree(scrollbox.box)\n tree.pack(side=tk.LEFT, anchor=\"nw\")\n tree.hook = lambda: ExampleHook()\n _populate_example(tree)\n root.mainloop()\n", "id": "8121693", "language": "Python", "matching_score": 2.5200161933898926, "max_stars_count": 0, "path": "pyrustic/widget/tree.py" }, { "content": "import tkinter as tk\nfrom tkinter import filedialog\nfrom pyrustic import widget\nfrom pyrustic.tkmisc import merge_cnfs\nfrom pyrustic.view import View\n\n\nENTRY = \"entry\"\nBUTTON = \"button\"\nDIALOG = \"dialog\"\n\n\nclass Pathentry(widget.Frame):\n \"\"\"\n \"\"\"\n def __init__(self,\n master=None,\n browse=\"file\",\n width=17,\n title=None,\n initialdir=None,\n cnfs=None):\n \"\"\"\n - master: widget parent. Example: an instance of tk.Frame\n\n \"\"\"\n self.__cnfs = merge_cnfs({ENTRY: {\"width\": width}}, cnfs,\n components=(\"body\", ENTRY, BUTTON, DIALOG))\n super().__init__(master=master,\n class_=\"Pathentry\",\n cnf=self.__cnfs[\"body\"],\n on_build=self.__on_build,\n on_display=self.__on_display,\n on_destroy=self.__on_destroy)\n self.__browse = browse\n self.__title = title\n self.__initialdir = initialdir\n self.__entry = None\n self.__button = None\n self.__components = {}\n self.__string_var = tk.StringVar(value=\"\")\n # build\n self.__view = self.build()\n # ==============================================\n # PROPERTIES\n # ==============================================\n @property\n def components(self):\n \"\"\"\n \"\"\"\n return self.__components\n\n @property\n def string_var(self):\n return self.__string_var\n\n @property\n def path(self):\n return self.__path\n\n @path.setter\n def path(self, val):\n self.__path = val\n\n def __on_build(self):\n self.__entry = tk.Entry(self, textvariable=self.__string_var,\n cnf=self.__cnfs[ENTRY])\n self.__entry.pack(side=tk.LEFT, pady=0, fill=tk.X, expand=1)\n self.__components[\"entry\"] = self.__entry\n self.__button = tk.Button(self, text=\"...\",\n command=self.__on_click_button,\n cnf=self.__cnfs[BUTTON])\n self.__button.pack(side=tk.LEFT, padx=(2, 0), fill=tk.Y)\n self.__components[\"button\"] = self.__button\n\n def __on_display(self):\n pass\n\n def __on_destroy(self):\n pass\n\n def __on_click_button(self):\n if self.__browse == \"file\":\n try:\n filename = filedialog.askopenfilename(initialdir=self.__initialdir,\n title=self.__title,\n **self.__cnfs[DIALOG])\n except Exception as e:\n return\n path = None\n if not filename:\n pass\n elif isinstance(filename, str):\n path = filename\n else:\n path = \";\".join(filename)\n if path:\n self.__string_var.set(path)\n else:\n try:\n filename = filedialog.askdirectory(initialdir=self.__initialdir,\n title=self.__title,\n **self.__cnfs[DIALOG])\n except Exception as e:\n return\n path = None\n if not filename:\n pass\n elif isinstance(filename, str):\n path = filename\n else:\n path = \";\".join(filename)\n if path:\n self.__string_var.set(path)\n self.__entry.icursor(\"end\")\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n pathentry_test = Pathentry(root, browse=\"dir\",\n extra_options={\"dialog\":\n {\"initialdir\": \"/home/alex\",\n \"title\": \"Hello\"}})\n pathentry_test.pack(fill=tk.BOTH, expand=1)\n root.mainloop()\n", "id": "2172316", "language": "Python", "matching_score": 1.211140513420105, "max_stars_count": 0, "path": "pyrustic/widget/pathentry.py" }, { "content": "import tkinter as tk\nfrom pyrustic import widget\nfrom pyrustic.tkmisc import merge_cnfs\nfrom pyrustic.view import View\n\n\n# Components\nCANVAS = \"canvas\"\nBOX = \"box\"\nHSB = \"hsb\"\nVSB = \"vsb\"\n\n# Orient\nBOTH = \"both\"\nVERTICAL = \"vertical\"\nHORIZONTAL = \"horizontal\"\n\n\nclass Scrollbox(widget.Frame):\n \"\"\"\n Scrollbox is a scrollable surface. You just need to use its property \"box\" as\n your layout's parent.\n\n Example:\n\n import tkinter as tk\n from pyrustic.widget.scrollbox import Scrollbox\n\n root = tk.Tk()\n scrollbox = Scrollbox(root)\n scrollbox.build_pack()\n # Pack 50 Label on the box\n for i in range(50):\n label = tk.Label(scrollbox.box, text=\"Label {}\".format(i))\n label.pack(anchor=tk.W)\n root.mainloop()\n\n \"\"\"\n def __init__(self,\n master=None,\n orient=VERTICAL,\n box_sticky=\"nswe\",\n resizable_box=True,\n cnfs=None):\n \"\"\"\n - master: widget parent. Example: an instance of tk.Frame\n\n - orient: could be one of: VERTICAL, HORIZONTAL, BOTH\n\n - options: dictionary of widgets options\n The widgets keys are: BODY, CANVAS, BOX, HSB, VSB\n Example: Assume that you want to set the CANVAS background to red\n options = {CANVAS: {\"background\": \"red\"}}\n \"\"\"\n self.__cnfs = merge_cnfs(None, cnfs,\n components=(\"body\", CANVAS, BOX, HSB, VSB))\n super().__init__(master=master,\n class_=\"Scrollbox\",\n cnf=self.__cnfs[\"body\"],\n on_build=self.__on_build,\n on_display=self.__on_display,\n on_destroy=self.__on_destroy)\n self.__orient = orient\n self.__box_sticky = box_sticky\n self.__resizable_box = resizable_box\n self.__canvas_options = None\n self.__canvas = None\n self.__box = None\n self.__box_id = None\n self.__vsb = None\n self.__hsb = None\n self.__hsb_under_mouse = False\n self.__is_scrollable = False\n self.__components = {}\n # build\n self.__view = self.build()\n # ==============================================\n # PROPERTIES\n # ==============================================\n\n\n @property\n def box(self):\n return self.__box\n\n @property\n def orient(self):\n return self.__orient\n\n @property\n def components(self):\n \"\"\"\n Get the components (widgets instances) used to build this scrollbox.\n\n This property returns a dict. The keys are:\n BODY, CANVAS, BOX, HSB, VSB\n\n Warning: check the presence of key before usage. Example,\n the widget linked to the HSB key may be missing because\n only VSB is used\n \"\"\"\n return self.__components\n\n # ==============================================\n # PUBLIC METHODS\n # ==============================================\n\n def xview_moveto(self, fraction):\n \"\"\"\n Calls canvas's method 'xview_moveto'\n Set:\n - 0: to scroll to left\n - 1: to scroll to right\n \"\"\"\n if self.__canvas:\n self.update_idletasks()\n self.__canvas.xview_moveto(fraction)\n\n def yview_moveto(self, fraction):\n \"\"\"\n Calls canvas's method 'yview_moveto'\n Set:\n - 0: to scroll to top\n - 1: to scroll to bottom\n \"\"\"\n if self.__canvas:\n self.update_idletasks()\n self.__canvas.yview_moveto(fraction)\n\n def box_config(self, **options):\n \"\"\"\n As the BOX is an item compared to CANVAS, some\n the options concerning the BOX can be edited only via\n CANVAS \"itemconfig\" method.\n Use this method to edit these options.\n itemconfig options are: anchor, state, height, width.\n\n Warning: these options are not the same as the arguments\n of BOX's own constructor !\n \"\"\"\n if self.__box:\n self.__canvas.itemconfig(self.__box_id, cnf=options)\n\n def clear(self):\n \"\"\"\n Clears the Scrollbox.\n This method doesn't destruct this object but BOX's children\n \"\"\"\n if self.__box:\n for x in self.__box.winfo_children():\n x.destroy()\n\n\n # ==============================================\n # PRIVATE METHODS\n # ==============================================\n\n def __on_build(self):\n self.bind(\"<Enter>\", self.__on_enter_body, \"+\")\n self.bind(\"<Leave>\", self.__on_leave_body, \"+\")\n self.bind(\"<Unmap>\", self.__on_unmap_body, \"+\")\n self.bind(\"<Destroy>\", self.__on_destroy_body, \"+\")\n self.bind_all(\"<MouseWheel>\", self.__on_mouse_wheel, \"+\")\n self.bind_all(\"<Button-4>\", self.__on_mouse_wheel, \"+\")\n self.bind_all(\"<Button-5>\", self.__on_mouse_wheel, \"+\")\n self.columnconfigure(0, weight=1, uniform=1)\n self.rowconfigure(0, weight=1, uniform=1)\n self.winfo_toplevel().bind(\"<Configure>\",\n self.__on_configure_box_canvas, \"+\")\n # canvas\n self.__canvas = tk.Canvas(self,\n name=CANVAS,\n width=0,\n height=0,\n cnf=self.__cnfs[CANVAS])\n self.__components[CANVAS] = self.__canvas\n self.__canvas.grid(row=0, column=0, sticky=self.__box_sticky)\n # box\n self.__box = tk.Frame(self.__canvas,\n name=BOX,\n cnf=self.__cnfs[BOX])\n self.__components[BOX] = self.__box\n self.__box_id = self.__canvas.create_window(0, 0, window=self.__box, anchor=\"nw\")\n self.__box.bind(\"<Configure>\", self.__on_configure_box_canvas, \"+\")\n # scrollbar\n self.__set_scrollbars()\n\n def __on_display(self):\n pass\n\n def __on_destroy(self):\n self.__unbind_funcs()\n\n def __on_mouse_wheel(self, event):\n if not self.__orient or not self.__is_scrollable:\n return\n # scroll down (value: 1) -> event.num = 5 or event.delta < 0\n # scroll up (value: -1) -> event.num = 4 or event.delta >= 0\n scroll = 1 if event.num == 5 or event.delta < 0 else -1\n if self.__orient in (\"horizontal\", \"x\", \"h\"):\n self.__canvas.xview_scroll(scroll, \"units\")\n elif self.__orient in (\"both\", \"vertical\", \"y\", \"v\"):\n if self.__hsb_under_mouse:\n self.__canvas.xview_scroll(scroll, \"units\")\n else:\n self.__canvas.yview_scroll(scroll, \"units\")\n\n def __set_scrollbars(self):\n if self.__orient in (\"both\", \"horizontal\", \"h\", \"x\"):\n self.__hsb = tk.Scrollbar(self, orient=\"horizontal\",\n name=HSB,\n command=self.__canvas.xview,\n cnf=self.__cnfs[HSB])\n self.__components[HSB] = self.__hsb\n self.__hsb.grid(row=1, column=0, columnspan=2, sticky=\"swe\")\n self.__canvas.config(xscrollcommand=self.__hsb.set)\n self.__bind_enter_leave_to_hsb()\n if self.__orient in (\"both\", \"vertical\", \"v\", \"y\"):\n self.__vsb = tk.Scrollbar(self, orient=\"vertical\",\n name=VSB,\n command=self.__canvas.yview,\n cnf=self.__cnfs[VSB])\n self.__components[VSB] = self.__vsb\n self.__vsb.grid(row=0, column=1, sticky=self.__box_sticky)\n self.__canvas.config(yscrollcommand=self.__vsb.set)\n\n def __bind_enter_leave_to_hsb(self):\n def enter_hsb(event):\n self.__hsb_under_mouse = True\n def leave_hsb(event):\n self.__hsb_under_mouse = False\n self.__hsb.bind('<Enter>', enter_hsb, \"+\")\n self.__hsb.bind('<Leave>', leave_hsb, \"+\")\n\n def __on_configure_box_canvas(self, event):\n if self.__box.winfo_exists():\n if self.__orient in (\"horizontal\", \"h\", \"x\"):\n if self.__resizable_box:\n self.__canvas.itemconfig(self.__box_id,\n height=self.__canvas.winfo_height())\n else:\n self.__canvas.config(height=self.__box.winfo_height())\n elif self.__orient in (\"vertical\", \"v\", \"y\"):\n if self.__resizable_box:\n self.__canvas.itemconfig(self.__box_id,\n width=self.__canvas.winfo_width())\n else:\n self.__canvas.config(width=self.__box.winfo_width())\n self.__canvas.config(scrollregion=self.__canvas.bbox(\"all\"))\n\n def __on_enter_body(self, event):\n self.__is_scrollable = True\n\n def __on_leave_body(self, event):\n self.__is_scrollable = False\n\n def __on_unmap_body(self, event):\n self.__is_scrollable = False\n\n def __on_destroy_body(self, event):\n self.__is_scrollable = False\n\n def __unbind_funcs(self):\n try:\n for val in (\"<Enter>\", \"<Leave>\",\n \"<Unmap>\", \"<Destroy>\",\n \"<MouseWheel>\", \"<Button-4>\",\n \"<Button-5>\", \"<Configure>\"):\n self.unbind(val)\n except Exception as e:\n pass\n\n\nclass _ScrollboxTest(View):\n\n def __init__(self, root):\n super().__init__()\n self._root = root\n self._body = None\n\n def _on_build(self):\n self._body = tk.Frame(self._root)\n # Pane 1\n pane_1 = tk.Frame(self._root)\n pane_1.pack(side=tk.LEFT, padx=10,\n pady=10, expand=1, fill=tk.BOTH)\n # Scrollbox 1\n scrollbox_1 = Scrollbox(pane_1, orient=VERTICAL)\n scrollbox_1.pack(pady=5, expand=1, fill=tk.BOTH)\n # Button 1\n command = (lambda self=self, box=scrollbox_1.box, side=tk.TOP:\n self._on_click_add(box, side))\n button_1 = tk.Button(pane_1, text=\"Add\",\n command=command)\n button_1.pack(side=tk.BOTTOM)\n # Pane 2\n pane_2 = tk.Frame(self._root)\n pane_2.pack(side=tk.LEFT, padx=10,\n pady=10, expand=1, fill=tk.BOTH)\n # Scrollbox 2\n scrollbox_2 = Scrollbox(pane_2, orient=HORIZONTAL)\n scrollbox_2.pack(pady=5, expand=1, fill=tk.BOTH)\n # Button 2\n command = (lambda self=self, box=scrollbox_2.box, side=tk.LEFT:\n self._on_click_add(box, side))\n button_2 = tk.Button(pane_2, text=\"Add\",\n command=command)\n button_2.pack(side=tk.BOTTOM)\n\n def _on_display(self):\n pass\n\n def _on_destroy(self):\n pass\n\n def _on_click_add(self, frame, side=tk.TOP):\n label = tk.Label(frame, text=\"<NAME>\")\n label.pack(side=side)\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n scrollbox_test = _ScrollboxTest(root)\n scrollbox_test.build_pack(fill=tk.BOTH, expand=1)\n root.mainloop()\n", "id": "4558902", "language": "Python", "matching_score": 2.2179782390594482, "max_stars_count": 0, "path": "pyrustic/widget/scrollbox.py" }, { "content": "import tkinter as tk\n\n\ndef center_window(window, within=None):\n \"\"\" Center the window within another window (tk obj) or the screen (None)\"\"\"\n window.withdraw()\n window.update_idletasks()\n window.geometry(\"+0+0\")\n window.update_idletasks()\n width = window.winfo_width()\n height = window.winfo_height()\n if within is None:\n x = (window.winfo_screenwidth() - width) // 2\n y = (window.winfo_screenheight() - height) // 2\n else:\n if isinstance(within, tk.Tk):\n pass\n elif isinstance(within, tk.Toplevel):\n pass\n else:\n within = within.winfo_toplevel()\n data = formal_geometry(within)\n x = ((data[0] // 2) + data[2]) - (width//2)\n y = ((data[1] // 2) + data[3]) - (height//2)\n if (window.winfo_screenwidth() - x) < width:\n x = window.winfo_screenwidth() - width\n if (window.winfo_screenheight() - y) < height:\n y = window.winfo_screenheight() - height\n window.geometry(\"+{}+{}\".format(x, y))\n window.deiconify()\n\n\ndef align_window(window, under=None):\n window.withdraw()\n window.update_idletasks()\n width = window.winfo_width()\n height = window.winfo_height()\n under_x = under.winfo_rootx()\n under_y = under.winfo_rooty()\n x = under_x\n y = under_y + under.winfo_height()\n x = abs(x)\n y = abs(y)\n if window.winfo_screenwidth() - x < width:\n x = window.winfo_screenwidth() - width\n if window.winfo_screenheight() - y < height:\n y = window.winfo_screenheight() - height\n # align\n window.geometry(\"+{}+{}\".format(x, y))\n window.deiconify()\n\n\ndef dialog_effect(window):\n window.transient(window.master)\n window.lift()\n window.grab_set()\n window.focus_set()\n\n\ndef formal_geometry(window):\n width = window.winfo_width()\n height = window.winfo_height()\n coord_x = window.winfo_x()\n coord_y = window.winfo_y()\n return width, height, coord_x, coord_y\n\n\ndef merge_cnfs(main_cnfs, extra_cnfs, components=None):\n main_cnfs = {} if not main_cnfs else main_cnfs\n extra_cnfs = {} if not extra_cnfs else extra_cnfs\n components = {} if not components else components\n cnfs = {}\n # components\n for item in components:\n cnfs[item] = {}\n # parse extra_cnf\n for key, val in extra_cnfs.items():\n cnfs[key] = val\n # parse main_cnf\n for key, val in main_cnfs.items():\n if val is not None:\n cnfs[key] = val\n return cnfs\n", "id": "5855114", "language": "Python", "matching_score": 0.5616738796234131, "max_stars_count": 0, "path": "pyrustic/tkmisc.py" }, { "content": "import sys\nimport copy\nimport platform\nimport pkgutil\nimport tkinter as tk\nimport pyrustic\nfrom pyrustic.view import View, CustomView\nfrom pyrustic import tkmisc\nfrom pyrustic.jasonix import Jasonix\nfrom pyrustic.exception import PyrusticAppException\nfrom pyrustic.private.enhance_tk import EnhanceTk\n\n\nclass App:\n \"\"\"\n Pyrustic Framework's entry point.\n This class should be instantiated inside the file \"main.py\".\n \"\"\"\n def __init__(self, package):\n \"\"\"\n Create an App instance.\n package: the name of the package in which the caller is. Use __package__.\n\n It's recommended to don't write any code above this instantiation.\n \"\"\"\n self._package = package\n self._is_running = False\n self._restartable = False\n self._root = tk.Tk()\n self._theme = None\n self._theme_cache = None\n self._view = None\n self._view_cache = None\n self._center_window = False\n self._config = None\n self._gui_config = None\n self._exit_handler = None\n self._setup()\n\n # ============================================\n # PROPERTIES\n # ============================================\n @property\n def root(self):\n \"\"\"\n Get the main tk root\n \"\"\"\n return self._root\n\n @property\n def installed(self):\n data = pyrustic.dist(self._package)\n return True if data else False\n\n @property\n def config(self):\n return copy.deepcopy(self._config)\n\n @config.setter\n def config(self, val):\n \"\"\" val is dict, path or file-like object\"\"\"\n jasonix = Jasonix(val)\n self._config = jasonix.data\n if self._config:\n self._gui_config = self._config.get(\"gui\", self._gui_config)\n\n @property\n def gui_config(self):\n \"\"\"\n Setter et Getter\n \"\"\"\n return copy.deepcopy(self._gui_config)\n\n @gui_config.setter\n def gui_config(self, val):\n jasonix = Jasonix(val)\n self._gui_config = jasonix.data\n if self._gui_config:\n self._config[\"gui\"] = self._gui_config\n\n @property\n def theme(self):\n \"\"\"\n Get the theme object\n For more information about what a theme is:\n - check 'pyrustic.theme.Theme';\n - then check the beautiful theme 'pyrustic.theme.cyberpunk'\n \"\"\"\n return self._theme\n\n @theme.setter\n def theme(self, val):\n \"\"\"\n Set the theme object.\n If you set None, it will invalidate the previous theme.\n Don't forget to call the method \"restart()\" or \"start()\" to apply the change !\n Remember that \"start()\" should be called only once !\n For more information about what a theme is:\n - check \"pyrustic.theme.Theme\";\n - then check the beautiful theme \"pyrustic.theme.cyberpunk\"\n \"\"\"\n self._theme_cache = val\n\n @property\n def view(self):\n \"\"\"\n Get the view object.\n A view should implement \"pyrustic.viewable.Viewable\"\n \"\"\"\n return self._view\n\n @view.setter\n def view(self, val):\n \"\"\"\n Set a view object.\n If you set None, the previous view will be destroyed.\n The new view will destroy the previous one if there are a previous one.\n VAL can be a tkinter object or a callable (if u plan to REFRESH the app)\n \"\"\"\n self._view_cache = val\n\n @property\n def body(self):\n return self._body\n\n @body.setter\n def body(self, val):\n self._body = val\n\n @property\n def exit_handler(self):\n return self._exit_handler\n\n @exit_handler.setter\n def exit_handler(self, val):\n self._exit_handler = val\n\n # ============================================\n # PUBLIC METHODS\n # ============================================\n def start(self):\n \"\"\"\n Call this method to start the app.\n It should be called once and put on the last line of the file.\n \"\"\"\n if self._is_running:\n message = \"This method shouldn't be called twice. Please use 'restart' instead\"\n raise PyrusticAppException(message)\n self._is_running = True\n self._build()\n # main loop\n try:\n self._root.mainloop()\n except KeyboardInterrupt:\n pass\n\n def refresh(self):\n \"\"\"\n Call this method to refresh the app.\n If you have submitted a new view or a new theme,\n the previous view or theme will be removed and the new one installed\n \"\"\"\n if not self._is_running:\n return False\n if not self._view_cache or not callable(self._view_cache):\n return False\n if self._theme_cache:\n self._apply_theme(self._theme_cache)\n self._theme_cache = None\n if self._view:\n self._view.destroy()\n self._body = None\n return self._install_view(self._view_cache,\n is_refresh=True)\n\n def exit(self):\n \"\"\"\n Exit, simply ;-)\n Depending on your config file, the application will close quickly or not.\n A quick exit will ignore the lifecycle of a Viewable (pyrustic.viewable).\n In others words, '_on_destroy()' methods won't be called.\n Exit quickly if you don't care clean-up but want the app to close as fast as possible.\n \"\"\"\n self._on_exit()\n\n def maximize(self):\n \"\"\"\n Maximize the window\n \"\"\"\n system = platform.system()\n if system == \"Linux\":\n self._root.attributes(\"-zoomed\", True)\n else: # for \"Darwin\" (OSX) and \"Window\"\n self._root.state(\"zoomed\")\n\n def center(self):\n \"\"\"\n Center the window\n \"\"\"\n self._center_window = True\n\n # ============================================\n # PRIVATE METHODS\n # ============================================\n\n def _setup(self):\n # gui_config\n self._set_config()\n # set default title\n self._set_default_title()\n\n def _build(self):\n # bind self._on_exit\n self._root.protocol(\"WM_DELETE_WINDOW\", self._on_exit)\n handler = (lambda event,\n root=self._root:\n self._on_exit() if event.widget is root else None)\n self._root.bind(\"<Destroy>\", handler)\n EnhanceTk(self._root)\n # apply config, set theme then install view\n self._apply_config()\n self._apply_theme(self._theme_cache)\n self._install_view(self._view_cache)\n\n def _set_config(self):\n self._set_gui_config()\n\n def _set_gui_config(self):\n gui_config_json = None\n gui_config_json_resource = \"pyrustic_data/gui.json\"\n default_gui_json_resource = \\\n \"manager/default_json/pyrustic_data/gui_default.json\"\n if self._package:\n try:\n gui_config_json = pkgutil.get_data(self._package,\n gui_config_json_resource)\n except Exception as e:\n pass\n if not gui_config_json:\n gui_config_json = pkgutil.get_data(__package__,\n default_gui_json_resource)\n jasonix = Jasonix(gui_config_json)\n self._gui_config = jasonix.data\n self._config = {\"gui\": self._gui_config}\n\n def _set_default_title(self):\n name = self._package\n if not self._package or \".\" in name:\n name = \"Application\"\n title = \"{} | built with Pyrustic\".format(name)\n self._root.title(title)\n\n def _apply_config(self):\n self._apply_gui_config()\n\n def _apply_gui_config(self):\n # app geometry\n if not self._gui_config[\"ignore_geometry\"]:\n self._root.geometry(self._gui_config[\"root_geometry\"])\n # background\n background_color = self._gui_config[\"root_background\"]\n self._root.config(background=background_color)\n # resizable width and height\n resizable_width = self._gui_config[\"resizable_width\"]\n resizable_height = self._gui_config[\"resizable_height\"]\n self._root.resizable(width=resizable_width, height=resizable_height)\n # maximize screen\n if self._gui_config[\"maximize_window\"]:\n self.maximize()\n\n def _apply_theme(self, theme):\n if self._theme:\n self._root.option_clear()\n self._theme = theme\n if not self._gui_config[\"allow_theme\"]:\n return\n if not self._theme:\n return\n self._theme.target(self._root)\n\n def _install_view(self, view, is_refresh=False):\n self._view = self._get_view(view)\n self._view.build()\n if not self._view.build():\n return False\n body = self._view.body\n if isinstance(body, tk.Frame):\n self._view.body.pack(in_=self._root,\n expand=1, fill=tk.BOTH)\n elif isinstance(body, tk.Toplevel):\n pass\n else:\n self._view.body.pack(in_=self._root)\n # center\n if not is_refresh and self._center_window:\n tkmisc.center_window(self._root)\n return True\n\n def _get_view(self, view):\n if callable(view):\n view = view()\n if isinstance(view, View):\n return view\n if isinstance(view, type) and issubclass(view, View):\n return view()\n if view is None:\n view = tk.Frame(self._root,\n bg=\"black\",\n width=350,\n height=200)\n return CustomView(body=view)\n\n def _on_exit(self):\n if self._exit_handler:\n val = self._exit_handler()\n if not val:\n return\n if self._view:\n if self._view.body:\n pass\n #self._root.iconify()\n #self._root.withdraw()\n self._view.destroy()\n self._view = None\n if self._root:\n self._root.destroy()\n self._root = None\n sys.exit()\n", "id": "12319694", "language": "Python", "matching_score": 4.514703273773193, "max_stars_count": 0, "path": "pyrustic/app.py" }, { "content": "import tkinter as tk\nfrom pyrustic import tkmisc\nfrom pyrustic.exception import PyrusticException\n\n\n# Constants\nNEW = \"new\"\nBUILT = \"built\"\nDISPLAYED = \"displayed\"\nDESTROYED = \"destroyed\"\n\n\nclass View:\n \"\"\"\n Subclass this if you are going to create a view.\n\n Lifecycle of a view:\n 1- you instantiate the view\n 2- '__init__()' is implicitly called\n 3- you call the method '.build()'\n 4- '_on_build()' is implicitly called\n 5- '_on_display()' is implicitly called once the widget is visible\n 6- '_on_destroy()' is implicitly called when the widget is destroyed/closed\n\n The rules to create your view is simple:\n - You need to subclass Viewable.\n - You need to implement the methods '_on_build()', and optionally\n implement '_on_display()' and '_on_destroy()'.\n - You need to set an instance variable '_body' with either a tk.Frame or tk.Toplevel\n in the method '_on_build()'\n That's all ! Of course, when you are ready to use the view, just call the 'build()' method.\n Calling the 'build()' method will return the body of the view. The one that you assigned\n to the instance variable '_body'. The same body can be retrieved with the property 'body'.\n The 'build()' method should be called once. Calling it more than once will still return\n the body object, but the view won't be built again.\n You can't re-build your same view instance after destroying its body.\n You can destroy the body directly, by calling the conventional tkinter destruction method\n on the view's body. But it's recommended to destroy the view by calling the view's method\n 'destroy()' inherited from the class Viewable.\n The difference between these two ways of destruction is that when u call the Viewable's\n 'destroy()' method, the method '_on_destroy()' will be called BEFORE the effective\n destruction of the body. If u call directly 'destroy' conventionally on the tkinter\n object (the body), the method '_on_destroy()' will be called AFTER the beginning\n of destruction of the body.\n\n By the way, you can use convenience methods \"build_pack\", \"build_grid\", \"build_place\"\n to build and pack/grid/place your widget in the master !!\n Use \"build_wait\" for toplevels if you want the app to wait till the window closes\n \"\"\"\n\n def __init__(self):\n self.__master = None\n self.__state = 0\n self.__built = False\n self.__bind_id = None\n self.__destroyed = False\n self._body = None\n\n # ==============================================\n # PROPERTIES\n # ==============================================\n\n @property\n def body(self):\n \"\"\"\n Get the body of this view.\n \"\"\"\n return self._body\n\n @property\n def state(self):\n \"\"\" Return the current state of the Viewable instance.\n States are integers, you can use these constants:\n - pyrustic.view.NEW: the state just after instantiation;\n - pyrustic.view.BUILT: the state after the call of on_body\n - pyrustic.view.DISPLAYED: the state after the call of on_display\n - pyrustic.view.DESTROYED: the state after the call of on_destroy\n \"\"\"\n return self.__state\n # ==============================================\n # PUBLIC METHODS\n # ==============================================\n\n def build(self):\n \"\"\"\n Build the view. Return the body\n \"\"\"\n return self.__build()\n\n def build_pack(self, cnf=None, **kwargs):\n cnf = {} if not cnf else cnf\n body = self.__build()\n self._check_missing_body(body)\n body.pack(cnf=cnf, **kwargs)\n\n def build_grid(self, cnf=None, **kwargs):\n cnf = {} if not cnf else cnf\n body = self.__build()\n self._check_missing_body(body)\n body.grid(cnf=cnf, **kwargs)\n\n def build_place(self, cnf=None, **kwargs):\n cnf = {} if not cnf else cnf\n body = self.__build()\n self._check_missing_body(body)\n body.place(cnf=cnf, **kwargs)\n\n def build_wait(self):\n \"\"\"\n Build the view. Return the body\n \"\"\"\n body = self.__build()\n body.wait_window(body)\n\n def destroy(self):\n \"\"\"\n Destroy the body of this view\n \"\"\"\n self.__build()\n self.__exec_on_destroy()\n\n\n # ==============================================\n # METHODS TO IMPLEMENT\n # ==============================================\n\n def _on_build(self):\n \"\"\"\n Put here the code that build the body of this view.\n The body is either a tk.Frame or a tk.Toplevel instance.\n \"\"\"\n pass\n\n def _on_display(self):\n \"\"\"\n Put here the code that will be executed once the body is visible.\n \"\"\"\n pass\n\n def _on_destroy(self):\n \"\"\"\n Put here the code that will be executed as clean-up.\n \"\"\"\n pass\n\n def _toplevel_geometry(self):\n \"\"\"\n If the body of this view is a toplevel and\n you need to change the geometry of this toplevel,\n override this method !\n \"\"\"\n tkmisc.center_window(self.body, self.__master.winfo_toplevel())\n tkmisc.dialog_effect(self.body)\n\n # ==============================================\n # INTERNAL METHODS\n # ==============================================\n\n def __build(self):\n if self.__built:\n return self.body\n self._on_build()\n self.__built = True\n self.__state = BUILT\n if not self.body:\n return\n try:\n self.__master = self.body.master\n except Exception:\n pass\n is_toplevel = isinstance(self.body, tk.Toplevel)\n #is_frame = isinstance(self.body, tk.Frame)\n if is_toplevel:\n #self.body.protocol(\"WM_DELETE_WINDOW\", self.__exec_on_destroy)\n self._toplevel_geometry()\n self.__bind_destroy_event()\n if self.body.winfo_viewable():\n self.__exec_on_display()\n else:\n self.__bind_id = self.body.bind(\"<Map>\",\n self.__exec_on_display,\n \"+\")\n return self.body\n\n def __exec_on_display(self, event=None):\n self._on_display()\n self.__state = DISPLAYED\n if self.__bind_id is not None:\n self.body.unbind(\"<Map>\", self.__bind_id)\n\n def __bind_destroy_event(self):\n command = (lambda event,\n widget=self.body,\n callback=self.__exec_on_destroy:\n callback() if event.widget is widget else None)\n self.body.bind(\"<Destroy>\", command, \"+\")\n\n def __exec_on_destroy(self):\n if not self.__built or self.__destroyed:\n return\n if not self.body:\n return\n window_manager = self.body.winfo_manager()\n \"\"\"\n # Hide the window first to avoid the visual\n # iterative (slow) destruction\n # of each child\n #\n if window_manager == \"wm\":\n if self.body.winfo_ismapped():\n self.body.withdraw()\n elif window_manager == \"grid\":\n if self.body.winfo_ismapped():\n self.body.grid_forget()\n elif window_manager == \"pack\":\n if self.body.winfo_ismapped():\n self.body.pack_forget()\n elif window_manager == \"place\":\n if self.body.winfo_ismapped():\n self.body.place_forget()\n \"\"\"\n try:\n self.body.destroy()\n except Exception as e:\n pass\n self.__destroyed = True\n self._on_destroy()\n self.__state = DESTROYED\n try:\n if self.__master.focus_get() is None:\n self.__master.winfo_toplevel().focus_lastfor().focus_force()\n except Exception as e:\n pass\n\n def _check_missing_body(self, body):\n if not body:\n raise PyrusticException(\"Missing body\")\n\n\nclass CustomView(View):\n def __init__(self, body=None, on_build=None,\n on_display=None,\n on_destroy=None,\n toplevel_geometry=None):\n super().__init__()\n self._body = body\n if on_build:\n self._on_build = on_build\n if on_display:\n self._on_display = on_display\n if on_destroy:\n self._on_destroy = on_destroy\n if toplevel_geometry:\n self._toplevel_geometry = toplevel_geometry\n", "id": "9038400", "language": "Python", "matching_score": 3.4238648414611816, "max_stars_count": 0, "path": "pyrustic/view.py" }, { "content": "import tkinter as tk\nfrom pyrustic.view import CustomView\n\n\"\"\"\nGUIDE to make your own widget (megawidget to be precise)\n========================================================\n\n1- subclass one of the classes:\n - pyrustic.widget.Frame\n - pyrustic.widget.Toplevel\n \n2- use the method 'build' at the end of your __init__\n\n3- be sure to use double underscores on your private attributes\n\n4- make sure to don't clash methods names with tk native methods:\n - check pyrustic/private/tk_frame_public_and_protected_attributes.txt\n - check pyrustic/private/tk_toplevel_public_and_protected_attributes.txt\n\"\"\"\nclass Frame(tk.Frame):\n\n def __init__(self, master, class_, cnf,\n on_build, on_display, on_destroy):\n super().__init__(master=master,\n class_=class_,\n cnf=cnf)\n self.__built = False\n self.__on_build = on_build\n self.__on_display = on_display\n self.__on_destroy = on_destroy\n self.__view = None\n\n def build(self):\n if self.__built:\n return\n self.__view = CustomView(body=self,\n on_build=self.__on_build,\n on_display=self.__on_display,\n on_destroy=self.__on_destroy)\n self.__view.build()\n self.__built = True\n return self.__view\n\n\nclass Toplevel(tk.Toplevel):\n\n def __init__(self, master, class_, cnf,\n on_build, on_display, on_destroy,\n toplevel_geometry):\n super().__init__(master=master,\n class_=class_,\n cnf=cnf)\n self.__built = False\n self.__on_build = on_build\n self.__on_display = on_display\n self.__on_destroy = on_destroy\n self.__toplevel_geometry = toplevel_geometry\n self.__view = None\n\n def build(self):\n if self.__built:\n return\n self.__view = CustomView(body=self,\n on_build=self.__on_build,\n on_display=self.__on_display,\n on_destroy=self.__on_destroy,\n toplevel_geometry=self.__toplevel_geometry)\n self.__view.build()\n self.__built = True\n return self.__view\n", "id": "2824905", "language": "Python", "matching_score": 1.8071684837341309, "max_stars_count": 0, "path": "pyrustic/widget/__init__.py" }, { "content": "\n\nclass Theme:\n \"\"\"\n A theme is a collection of styles and... others theme.\n \"\"\"\n def __init__(self):\n self._styles = []\n self._themes = []\n\n @property\n def styles(self):\n \"\"\"\n Get a list of styles that this theme contains\n \"\"\"\n return self._styles.copy()\n\n @property\n def themes(self):\n \"\"\"\n Get a list of theme that this theme contains\n \"\"\"\n return self._themes.copy()\n\n def add_style(self, style, scope=None):\n \"\"\"\n Add a style to this theme.\n The style is an instance of 'pyrustic.default_style._Style'.\n You don't have to directly subclass the private class '_Style'.\n Instead, subclass one of the public classes in the module 'pyrustic.default_style'.\n The scope here is an optional string.\n When you don't set the scope, the style will be applied as it.\n Example. If you add a Button style in your theme, this style will be\n applied to all buttons widgets. But you can restrict this effect to a scope.\n This scope could be by example \"*Canvas*Button.\", meaning all buttons\n that are living on all Canvas, are candidates for the given style.\n \"\"\"\n self._styles.append((style, scope))\n\n def add_theme(self, theme):\n \"\"\"\n Add a theme to this... theme\n \"\"\"\n self._themes.append(theme)\n self._styles.extend(theme.styles)\n\n def target(self, master):\n \"\"\"\n Set this theme to master. Master here should be the root widget of your app.\n You need to set the theme to master before installing others widgets on the master.\n \"\"\"\n for style, scope in self._styles:\n widget_class = style.widget_class\n for key, val in style.__dict__.copy().items():\n if val is None:\n continue\n pattern = None\n if scope is None:\n pattern = \"*{}.{}\".format(widget_class, key)\n else:\n pattern = \"{}{}\".format(scope, key)\n master.option_add(pattern, val)\n", "id": "9919815", "language": "Python", "matching_score": 0.4074629545211792, "max_stars_count": 0, "path": "pyrustic/theme.py" }, { "content": "import os\nimport os.path\nimport pkgutil\nfrom pyrustic.manager.misc import funcs\nfrom pyrustic.jasonix import Jasonix\n\n\nclass InitHandler:\n \"\"\"\n Description\n -----------\n Use this command to init your project.\n Pyrustic Manager will install a basic\n project structure in your project.\n The PROJECT_DIR is the project's root\n directory.\n The APP_DIR is the directory of your\n source code.\n The APP_PKG is simply the name of the\n root package of your source code.\n\n Usage\n -----\n - Description: Init your project\n - Command: init\n\n Example\n -------\n Assume the linked target is:\n /home/alex/demo\n This target is also your project root\n directory. And 'demo' is your project's\n name. So let's assume that your target\n is an empty directory.\n When you issue the command 'init', this\n is what the project root will look like:\n demo # target or PROJECT_ROOT\n demo # APP_PKG or APP_DIR, source here\n __main__.py # entry point\n __init__.py\n version.py # __version__ = \"0.0.1\"\n view # the demo.view package\n main_view.py # module\n pyrustic_data # folder\n hubstore.json\n gui.json # configure your GUI\n tests\n __init__.py\n setup.py\n setup.cfg # edit your project config\n pyproject.toml\n MANIFEST.in # don't worry, I take care\n\n So when you want to add a file \"my_file.txt\"\n and the module \"mod.py\" in the package\n demo.view, you issue in the manager:\n - add demo.view my_file.txt mod.py\n\n \"\"\"\n def __init__(self, target, app_pkg, args):\n self._target = target\n self._app_pkg = app_pkg\n self._process(args)\n\n def _process(self, args):\n if not self._target:\n print(\"You should link a Target first\")\n return\n if args:\n print(\"Wrong usage of this command\")\n return\n # ask for app_pkg\n self._set_app_pkg()\n # create package\n self._make_packages()\n # create folders\n self._make_folders()\n # add files\n self._add_files()\n # add json data files\n self._add_json_data_files()\n print(\"Successfully initialized !\")\n\n def _make_packages(self):\n hooking_pkg = \"{}.hooking\".format(self._app_pkg)\n packages = (self._app_pkg, \"tests\", hooking_pkg)\n for package in packages:\n funcs.build_package(self._target, package)\n app_dir = os.path.join(self._target, self._app_pkg)\n packages = (\"view\", )\n for package in packages:\n funcs.build_package(app_dir, package)\n\n def _make_folders(self):\n folders = (\"pyrustic_data\",)\n for folder in folders:\n path = os.path.join(self._target, self._app_pkg, folder)\n if os.path.exists(path):\n continue\n os.mkdir(path)\n\n def _add_files(self):\n resource_prefix = \"manager/template/\"\n # add version.py\n resource = resource_prefix + \"version_template.txt\"\n app_dir = os.path.join(self._target, self._app_pkg)\n dest_path = os.path.join(app_dir, \"version.py\")\n data = pkgutil.get_data(\"pyrustic\", resource).decode(\"utf-8\")\n self._add_file(dest_path, data)\n # add __main__.py\n resource = resource_prefix + \"main_template.txt\"\n dest_path = os.path.join(app_dir, \"__main__.py\")\n data = pkgutil.get_data(\"pyrustic\", resource).decode(\"utf-8\")\n data = data.format(app_pkg=self._app_pkg)\n self._add_file(dest_path, data)\n # add main_view.py\n resource = resource_prefix + \"main_view_template.txt\"\n dest_path = os.path.join(app_dir, \"view\", \"main_view.py\")\n data = pkgutil.get_data(\"pyrustic\", resource).decode(\"utf-8\")\n self._add_file(dest_path, data)\n # add .gitignore\n resource = resource_prefix + \"gitignore_template.txt\"\n dest_path = os.path.join(self._target, \".gitignore\")\n data = pkgutil.get_data(\"pyrustic\", resource).decode(\"utf-8\")\n self._add_file(dest_path, data)\n # add LICENSE\n resource = resource_prefix + \"license_template.txt\"\n dest_path = os.path.join(self._target, \"LICENSE\")\n data = pkgutil.get_data(\"pyrustic\", resource).decode(\"utf-8\")\n self._add_file(dest_path, data)\n # add README.md\n resource = resource_prefix + \"readme_template.txt\"\n dest_path = os.path.join(self._target, \"README.md\")\n data = pkgutil.get_data(\"pyrustic\", resource).decode(\"utf-8\")\n self._add_file(dest_path, data)\n # add MANIFEST.in\n resource = resource_prefix + \"manifest_template.txt\"\n dest_path = os.path.join(self._target, \"MANIFEST.in\")\n data = pkgutil.get_data(\"pyrustic\", resource).decode(\"utf-8\")\n data = data.format(app_pkg=self._app_pkg)\n self._add_file(dest_path, data)\n # add setup.py\n resource = resource_prefix + \"setup_py_template.txt\"\n dest_path = os.path.join(self._target, \"setup.py\")\n data = pkgutil.get_data(\"pyrustic\", resource).decode(\"utf-8\")\n self._add_file(dest_path, data)\n # add setup.cfg\n resource = resource_prefix + \"setup_cfg_template.txt\"\n dest_path = os.path.join(self._target, \"setup.cfg\")\n data = pkgutil.get_data(\"pyrustic\", resource).decode(\"utf-8\")\n data = data.format(project_name=os.path.basename(self._target),\n app_pkg=self._app_pkg)\n self._add_file(dest_path, data)\n # add pyproject.toml\n resource = resource_prefix + \"pyproject_template.txt\"\n dest_path = os.path.join(self._target, \"pyproject.toml\")\n data = pkgutil.get_data(\"pyrustic\", resource).decode(\"utf-8\")\n self._add_file(dest_path, data)\n # add pre_building_hook.py\n resource = resource_prefix + \"pre_building_hook_template.txt\"\n dest_path = os.path.join(self._target, self._app_pkg,\n \"hooking\",\n \"pre_building_hook.py\")\n data = pkgutil.get_data(\"pyrustic\", resource).decode(\"utf-8\")\n self._add_file(dest_path, data)\n # add post_building_hook.py\n resource = resource_prefix + \"post_building_hook_template.txt\"\n dest_path = os.path.join(self._target, self._app_pkg,\n \"hooking\",\n \"post_building_hook.py\")\n data = pkgutil.get_data(\"pyrustic\", resource).decode(\"utf-8\")\n self._add_file(dest_path, data)\n # add pre_publishing_hook.py\n resource = resource_prefix + \"pre_publishing_hook_template.txt\"\n dest_path = os.path.join(self._target, self._app_pkg,\n \"hooking\",\n \"pre_publishing_hook.py\")\n data = pkgutil.get_data(\"pyrustic\", resource).decode(\"utf-8\")\n self._add_file(dest_path, data)\n # add post_publishing_hook.py\n resource = resource_prefix + \"post_publishing_hook_template.txt\"\n dest_path = os.path.join(self._target, self._app_pkg,\n \"hooking\",\n \"post_publishing_hook.py\")\n data = pkgutil.get_data(\"pyrustic\", resource).decode(\"utf-8\")\n self._add_file(dest_path, data)\n\n def _add_json_data_files(self):\n local_pyrustic_data_folder = os.path.join(self._target,\n self._app_pkg,\n \"pyrustic_data\")\n resource_prefix = \"manager/default_json/pyrustic_data/\"\n # add dev.json\n path = os.path.join(local_pyrustic_data_folder, \"dev.json\")\n default_resource = resource_prefix + \"dev_default.json\"\n data = pkgutil.get_data(\"pyrustic\", default_resource)\n if not os.path.exists(path):\n with open(path, \"wb\") as file:\n file.write(data)\n jasonix = Jasonix(path)\n jasonix.data[\"hooking_pkg\"] = \"{}.hooking\".format(self._app_pkg)\n jasonix.save()\n # add gui.json\n path = os.path.join(local_pyrustic_data_folder, \"gui.json\")\n default_resource = resource_prefix + \"gui_default.json\"\n data = pkgutil.get_data(\"pyrustic\", default_resource)\n if not os.path.exists(path):\n with open(path, \"wb\") as file:\n file.write(data)\n # add publishing.json\n path = os.path.join(local_pyrustic_data_folder,\n \"publishing.json\")\n default_resource = resource_prefix + \"publishing_default.json\"\n data = pkgutil.get_data(\"pyrustic\", default_resource)\n if not os.path.exists(path):\n with open(path, \"wb\") as file:\n file.write(data)\n # add hubstore.json\n path = os.path.join(local_pyrustic_data_folder, \"hubstore.json\")\n default_resource = resource_prefix + \"hubstore_default.json\"\n data = pkgutil.get_data(\"pyrustic\", default_resource)\n if not os.path.exists(path):\n with open(path, \"wb\") as file:\n file.write(data)\n\n def _add_file(self, path, data):\n if os.path.exists(path):\n return\n with open(path, \"w\") as file:\n file.write(data)\n\n def _set_app_pkg(self):\n if self._app_pkg is not None:\n return\n self._app_pkg = os.path.basename(self._target)\n", "id": "9093468", "language": "Python", "matching_score": 3.498831272125244, "max_stars_count": 0, "path": "pyrustic/manager/handler/init_handler.py" }, { "content": "import os\nimport os.path\nimport shutil\nfrom pyrustic.manager import constant\nfrom pyrustic.jasonix import Jasonix\nfrom pyrustic.gurl import Gurl\nimport pyrustic\nfrom setuptools.config import read_configuration\n\n\ndef check_project_state(target):\n \"\"\" Target is the path to the project\n Return:\n 0: all right\n 1: uninitialized project\n 2: uninstalled project\n \"\"\"\n app_pkg = os.path.basename(target)\n app_dir = os.path.join(target, app_pkg)\n if not os.path.exists(app_dir):\n return 1\n data = pyrustic.dist(app_pkg)\n if not data:\n return 2\n return 0\n\n\ndef get_app_pkg(target):\n app_pkg = None\n if not target or not os.path.exists(target):\n return app_pkg\n config = setup_config(target)\n if config:\n app_pkg = config.get(\"name\", None)\n if app_pkg:\n return app_pkg\n app_pkg = os.path.basename(target)\n cache = app_pkg.split(\"-\")\n app_pkg = \"_\".join(cache)\n return app_pkg\n\n\ndef setup_config(target):\n \"\"\" Get the metadata from setup.cfg \"\"\"\n setup_cfg = os.path.join(target, \"setup.cfg\")\n if not os.path.exists(setup_cfg):\n return None\n return read_configuration(setup_cfg)[\"metadata\"]\n\n\ndef wheels_assets(target):\n dist_folder = os.path.join(target,\n \"dist\")\n if not os.path.exists(dist_folder):\n return []\n assets = []\n for item in os.listdir(dist_folder):\n _, ext = os.path.splitext(item)\n if ext != \".whl\":\n continue\n path = os.path.join(dist_folder, item)\n if not os.path.isfile(path):\n continue\n assets.append(item)\n assets = _sort_wheels_names(assets)\n assets.reverse()\n return assets\n\n\ndef copyto(src, dest):\n \"\"\"\n Please make sure that DEST doesn't exist yet !\n Copy a file or contents of directory (src) to a destination file or folder (dest)\n \"\"\"\n if not os.path.exists(src) or os.path.exists(dest):\n return False\n if os.path.isdir(src):\n try:\n shutil.copytree(src, dest)\n except Exception as e:\n return False\n else:\n try:\n shutil.copy2(src, dest)\n except Exception as e:\n return False\n return True\n\n\ndef moveto(src, dest):\n \"\"\"\n If the DEST exists:\n * Before moveto *\n - /home/lake (SRC)\n - /home/lake/fish.txt\n - /home/ocean (DEST)\n * Moveto *\n moveto(\"/home/lake\", \"/home/ocean\")\n * After Moveto *\n - /home/ocean\n - /home/ocean/lake\n - /home/ocean/lake/fish.txt\n Else IF the DEST doesn't exist:\n * Before moveto *\n - /home/lake (SRC)\n - /home/lake/fish.txt\n * Moveto *\n moveto(\"/home/lake\", \"/home/ocean\")\n * After Moveto *\n - /home/ocean\n - /home/ocean/fish.txt\n\n\n Move a file or directory (src) to a destination folder (dest)\n \"\"\"\n if not os.path.exists(src) or os.path.exists(dest):\n return False\n try:\n shutil.move(src, dest)\n except Exception as e:\n return False\n return True\n\n\ndef package_name_to_path(target, package_name, prefix=\"\"):\n # returns a dotted package name to a regular pathname\n # example: package_name_to_path(\"/home/proj\", \"view.lol\", prefix=\"tests.\")\n return os.path.join(target, *((prefix + package_name).split(\".\")))\n\n\ndef build_package(target, package_name, prefix=\"\"):\n \"\"\"\n Literally build a package, returns None or the string pathname\n package represented by prefix must already exist\n \"\"\"\n splitted = package_name.split(\".\")\n dir = package_name_to_path(target, prefix) if prefix else target\n for item in splitted:\n dir = os.path.join(dir, item)\n if os.path.exists(dir):\n continue\n try:\n os.mkdir(dir)\n with open(os.path.join(dir, \"__init__.py\"), \"w\") as file:\n pass\n except Exception as e:\n return None\n if not os.path.isdir(dir):\n return None\n return dir\n\n\ndef module_name_to_class(module_name):\n \"\"\"\n Convert a module name like my_module.py to a class name like MyModule\n \"\"\"\n name = os.path.splitext(module_name)[0]\n # ...\n if not \"_\" in name:\n return strictly_capitalize(name)\n else:\n splitted = name.split(\"_\")\n cache = []\n for x in splitted:\n cache.append(strictly_capitalize(x))\n return \"\".join(cache)\n\n\ndef strictly_capitalize(string):\n # I don't remember why I haven't used str.capitalize()\n return string[0].upper() + string[1:]\n\n\ndef get_root_from_package(package_name):\n \"\"\"\n Return the root from a dotted package name.\n Example the root here \"my.package.is.great\" is \"my\".\n \"\"\"\n splitted = package_name.split(\".\")\n root = None\n for x in splitted:\n if x == \"\" or x.isspace():\n continue\n root = x\n break\n return root\n\n\ndef get_manager_jasonix(readonly=True):\n jasonix = Jasonix(constant.MANAGER_SHARED_DATA_FILE,\n readonly=readonly)\n return jasonix\n\n\ndef get_sqleditor_jasonix(readonly=True):\n jasonix = Jasonix(constant.SQLEDITOR_SHARED_DATA_FILE,\n readonly=readonly)\n return jasonix\n\n\ndef get_runtest_jasonix(readonly=True):\n jasonix = Jasonix(constant.RUNTEST_SHARED_DATA_FILE,\n readonly=readonly)\n return jasonix\n\n\ndef get_hub_jasonix(readonly=True):\n jasonix = Jasonix(constant.HUB_SHARED_DATA_FILE,\n readonly=readonly)\n return jasonix\n\n\ndef create_gurl():\n headers = {\"Accept\": \"application/vnd.github.v3+json\",\n \"User-Agent\": \"Pyrustic\"}\n gurl = Gurl(headers=headers)\n return gurl\n\n\ndef get_hub_url(res):\n target = \"https://api.github.com\"\n return \"{}{}\".format(target, res)\n\n\ndef _sort_wheels_names(data):\n cache = list()\n for name in data:\n version = name.split(\"-\")[1]\n cache.append((version, name))\n cache.sort(key=lambda s: [int(i) for i in s[0].split('.')])\n return [name for version, name in cache]\n", "id": "4753877", "language": "Python", "matching_score": 2.214895486831665, "max_stars_count": 0, "path": "pyrustic/manager/misc/funcs.py" }, { "content": "from pyrustic.manager.misc import funcs\nimport os\nimport os.path\nimport pkgutil\n\n\nclass AddHandler:\n \"\"\"\n Description\n -----------\n Use this command to add a file, a package or a regular folder\n to the Target.\n Pyrustic Manager will assume that files with extension \".py\"\n are modules.\n Some convenient lines of code may be automatically inserted\n into modules based on the package name.\n\n Usage\n -----\n - Description: Add a file\n - Command: add <destination> <file.ext>\n\n - Description: Add multiple files\n - Command: add <destination> <file_1.ext> <file_2.ext>\n\n - Description: Add a module\n - Command: add <destination> <file.py>\n\n - Description: Add a package\n - Command: add <my.pack.age>\n\n - Description: Add a file to the Target's APP_DIR\n - Command: add ./ <file.ext>\n\n - Description: Add a module to the Target's APP_DIR\n - Command: add . <file.py>\n\n Note: The destination is either a relative path to the\n Target's APP_DIR, or a package name.\n Also, only SLASH is allowed in the path as separator.\n\n Example\n -------\n - Description: Add a module\n - Preliminary: Assume you want to add \"my_view.py\"\n to the package 'demo.view'\n - Command: add demo.view my_view.py\n\n - Description: Add a package\n - Preliminary: Assume you want to add 'my.new.package'\n - Command: add my.new.package\n\n Note: Please use simple or double quotes as delimiters if a\n string contains space\n \"\"\"\n def __init__(self, target,\n app_pkg, args):\n self._target = target\n self._app_pkg = app_pkg\n self._args = args\n self._process(target, app_pkg, args)\n\n def _process(self, target, app_pkg, args):\n if not target:\n self._print_catalog(\"missing_target\")\n return\n if not app_pkg:\n print(\"Please init the project first. Check 'help init'.\")\n return\n if not args:\n self._print_catalog(\"incomplete\")\n return\n # package or folder\n package_or_folder_name = args[0]\n if (package_or_folder_name == \".\"\n or (\"/\" not in package_or_folder_name\n and \"\\\\\" not in package_or_folder_name)):\n self._process_package_and_files(target, args)\n else:\n self._process_folder_and_files(target, args)\n\n def _process_package_and_files(self, target, args):\n package_name = args[0]\n if len(args) > 1:\n if not self._handle_package_part(target, package_name, verbose=False):\n return\n self._add_files_into_package(target, package_name, args[1:])\n else:\n self._handle_package_part(target, package_name)\n\n def _process_folder_and_files(self, target, args):\n folder = args[0]\n if len(args) > 1:\n if not self._handle_folder_part(target, folder, verbose=False):\n return\n self._add_files_into_folder(target, folder, args[1:])\n else:\n self._handle_folder_part(target, folder)\n\n def _handle_package_part(self, target, package_name, verbose=True):\n path = funcs.package_name_to_path(target, package_name)\n if os.path.isdir(path):\n if verbose:\n self._print_catalog(\"package_already_exists\")\n return True\n self._print_catalog(\"package_not_exists\")\n self._print_catalog(\"building_package\", package_name=package_name)\n path = funcs.build_package(target, package_name)\n if path is None:\n self._print_catalog(\"failed_to_build_package\")\n return False\n else:\n self._print_catalog(\"package_built\")\n return True\n\n def _add_files_into_package(self, target, package_name, files):\n for file in files:\n # Is module\n if file.endswith(\".py\"):\n if not self._install_module(target, package_name, file):\n return False\n # Is simple file\n else:\n if not self._install_file(target, package_name, file):\n return False\n return True\n\n def _handle_folder_part(self, target, folder, verbose=True):\n path = os.path.join(target, folder)\n path = os.path.normpath(path)\n if os.path.isdir(path):\n if verbose:\n self._print_catalog(\"folder_already_exists\")\n return True\n self._print_catalog(\"folder_not_exists\")\n self._print_catalog(\"creating_folder\", folder=folder)\n try:\n os.makedirs(path)\n except Exception as e:\n self._print_catalog(\"failed_to_create_folder\")\n return False\n else:\n self._print_catalog(\"folder_created\")\n return True\n\n def _add_files_into_folder(self, target, folder, files):\n path = os.path.join(target, folder)\n path = os.path.normpath(path)\n for filename in files:\n path = os.path.join(path, filename)\n if os.path.exists(path):\n self._print_catalog(\"file_exists\", filename=filename)\n continue\n try:\n with open(path, \"w\") as file:\n pass\n except Exception as e:\n self._print_catalog(\"file_creation_failed\", filename=filename)\n return False\n self._print_catalog(\"file_created\", filename=filename)\n return True\n\n def _install_module(self, target, package_name, filename):\n # creating file\n class_name = funcs.module_name_to_class(filename)\n path = funcs.package_name_to_path(target, package_name)\n path = os.path.join(path, filename)\n if os.path.exists(path):\n self._print_catalog(\"module_exists\", module=filename)\n return True\n resource_prefix = \"manager/template/\"\n if not self._app_pkg:\n self._print_catalog(\"module_created\", module=filename)\n return True\n with open(path, \"wb\") as file:\n if package_name.startswith(\"{}.view\".format(self._app_pkg)):\n resource = resource_prefix + \"view_template.txt\"\n data = pkgutil.get_data(\"pyrustic\", resource).decode(\"utf-8\")\n data = data.format(app_pkg=self._app_pkg,\n class_name=class_name)\n file.write(data)\n else:\n print(\"HEEEREE !!!!\")\n resource = resource_prefix + \"module_template.txt\"\n data = pkgutil.get_data(\"pyrustic\", resource).decode(\"utf-8\")\n data = data.format(app_pkg=self._app_pkg,\n class_name=class_name)\n file.write(data)\n self._print_catalog(\"module_created\", module=filename)\n return True\n\n def _install_file(self, target, package_name, filename):\n path = funcs.package_name_to_path(target, package_name)\n path = os.path.join(path, filename)\n if os.path.exists(path):\n self._print_catalog(\"file_exists\", filename=filename)\n return True\n try:\n with open(path, \"w\") as file:\n pass\n except Exception as e:\n self._print_catalog(\"file_creation_failed\", filename=filename)\n return False\n self._print_catalog(\"file_created\", filename=filename)\n return True\n\n def _create_file(self):\n pass\n\n def _get_data_from_template(self, path):\n data = \"\"\n with open(path, \"r\") as file:\n data = file.read()\n return data\n\n def _print_catalog(self, item, **kwargs):\n message = \"\"\n if item == \"missing_target\":\n message = \"Please link a Target first. Check 'help target'.\"\n elif item == \"incomplete\":\n message = \"Incomplete command. Check 'help add'.\"\n elif item == \"package_already_exists\":\n message = \"This package already exists !\"\n elif item == \"package_not_exists\":\n message = \"This package doesn't exist yet.\"\n elif item == \"building_package\":\n message = \"Building package '{}'\".format(kwargs[\"package_name\"])\n elif item == \"package_built\":\n message = \"Package successfully built.\"\n elif item == \"failed_to_build_package\":\n message = \"Failed to finish the package building.\"\n elif item == \"file_created\":\n message = \"File '{}' successfully created.\".format(kwargs[\"filename\"])\n elif item == \"file_creation_failed\":\n message = \"Failed to create file '{}'.\".format(kwargs[\"filename\"])\n elif item == \"file_exists\":\n message = \"The file '{}' already exists.\".format(kwargs[\"filename\"])\n elif item == \"module_exists\":\n message = \"The module '{}' already exists.\".format(kwargs[\"module\"])\n elif item == \"module_created\":\n message = \"Module '{}' successfully created.\".format(kwargs[\"module\"])\n elif item == \"folder_already_exists\":\n message = \"This folder already exists !\"\n elif item == \"folder_not_exists\":\n message = \"This folder doesn't exist yet.\"\n elif item == \"creating_folder\":\n message = \"Creating folder '{}'\".format(kwargs[\"folder\"])\n elif item == \"folder_created\":\n message = \"Folder successfully created.\"\n elif item == \"failed_to_create_folder\":\n message = \"Failed to create the folder.\"\n print(message)\n", "id": "6804853", "language": "Python", "matching_score": 4.020310878753662, "max_stars_count": 0, "path": "pyrustic/manager/handler/add_handler.py" }, { "content": "import sys\nimport subprocess\nimport os.path\n\n\nclass RunHandler:\n \"\"\"\n Description\n -----------\n Use this command to run a module located in the Target.\n Only dotted name of a module is allowed, so please ignore\n the extension \".py\".\n Running a module blocks Pyrustic Manager.\n\n Usage\n -----\n - Description: Run a module\n - Command: run <the.module.name>\n\n - Description: Run the Target\n - Command: run\n - Note: The Manager will implicitly execute __main__.py\n\n - Description: Run a module with some arguments\n - Command: run <the.module.name> <argument_1> <argument_2>\n\n Example\n -------\n - Description: Run the module\n - Preliminary: Assume that \"my_view.py\" is in the \"view\" package\n - Command: run view.my_view\n\n - Description: Run the module with arguments\n - Preliminary: Assume that \"my_view.py\" is in the \"view\" package\n - Command: run view.my_view argument_1 \"argument 2\"\n\n Note: Please use simple or double quotes as delimiters if a string\n contains space\n \"\"\"\n def __init__(self, target, app_pkg, args):\n self._target = target\n self._app_pkg = app_pkg\n self._process(target, app_pkg, args)\n\n def _process(self, target, app_pkg, args):\n name = None\n if len(args) == 0:\n if not target:\n self._print_catalog(\"missing_target\")\n return\n elif not app_pkg:\n self._print_catalog(\"missing_app_pkg\")\n return\n source_dir = os.path.join(target, app_pkg)\n if os.path.exists(os.path.join(source_dir, \"__main__.py\")):\n args = [\"-m\", app_pkg]\n name = \"__main__\"\n else:\n print(\"Missing entry point\")\n else:\n args = [\"-m\", *args]\n name = \" \".join(args)\n if not args:\n return\n if sys.executable:\n self._print_catalog(\"running\", module=name)\n p = subprocess.Popen([sys.executable, *args], cwd=target)\n p.communicate()\n\n def _print_catalog(self, item, **kwargs):\n message = \"\"\n if item == \"missing_target\":\n message = \"Please link a Target first. Check 'help target'.\"\n elif item == \"running\":\n message = \"Running '{}' ...\".format(kwargs[\"module\"])\n elif item == \"missing_app_pkg\":\n message = \"Please init the project first. Check 'help init'.\"\n print(message)\n", "id": "7697205", "language": "Python", "matching_score": 1.1312165260314941, "max_stars_count": 0, "path": "pyrustic/manager/handler/run_handler.py" }, { "content": "import sys\nimport time\nimport subprocess\nimport os\nimport os.path\nimport pkgutil\nfrom pyrustic.jasonix import Jasonix\nfrom pyrustic.manager.misc.lite_test_runner import LiteTestRunner\nfrom pyrustic.manager.misc.funcs import setup_config, wheels_assets\n\n\nclass BuildHandler:\n \"\"\"\n Description\n -----------\n Use this command to build a distribution package\n that could be published later with Hub.\n This command will block the Pyrustic Manager.\n The distribution package is a Wheel.\n\n Usage\n -----\n - Description: Build\n - Command: build\n \"\"\"\n\n def __init__(self, target, app_pkg):\n self._target = target\n self._app_pkg = app_pkg\n self._version = None\n self._pre_building_hook = None\n self._post_building_hook = None\n self._process(target, app_pkg)\n\n def _process(self, target, app_pkg):\n if target is None:\n print(\"Please link a Target first. Check 'help target'.\")\n return\n if not app_pkg:\n print(\"Please initialize this project first. Check 'help init'.\")\n return\n # version\n self._version = self._get_version()\n # get confirmation\n message = \"You are going to build '{}' v{}.\".format(app_pkg,\n self._version)\n print(message)\n if not self._ask_for_confirmation(\"\\nDo you want to continue ?\"):\n return\n print(\"\")\n # tests\n if not self._run_tests():\n return\n # build\n pre, post = self._get_hooks()\n self._pre_building_hook, self._post_building_hook = pre, post\n if not self._run_pre_building_hook():\n return\n print(\"\\nBuilding...\")\n if sys.executable:\n args = [\"setup.py\", \"--quiet\", \"sdist\", \"bdist_wheel\"]\n p = subprocess.Popen([sys.executable, *args], cwd=target)\n p.communicate()\n code = p.returncode\n if code == 0:\n self._gen_build_report()\n if not self._run_post_building_hook():\n return\n print(\"\\nSuccessfully built !\")\n return\n print(\"\\nFailed to built the project.\\nReturn code: {}\".format(code))\n\n def _run_tests(self):\n if not self._ask_for_confirmation(\"Do you want to run tests first ?\",\n \"n\"):\n return True\n print(\"Running tests...\")\n test_exist, test_success, test_result = self._test_runner()\n if not test_exist:\n print(\"There aren't Tests\\n\")\n return True\n if test_success:\n print(\"Testing passed\\n\")\n return True\n else:\n print(\"Testing failed\")\n print(test_result)\n return False\n\n def _test_runner(self):\n test_path = os.path.join(self._target, \"tests\")\n test_success = True\n test_result = None\n test_exist = False\n if os.path.exists(test_path):\n test_exist = True\n test_host = LiteTestRunner(test_path, self._target)\n test_success, test_result = test_host.run()\n return test_exist, test_success, test_result\n\n def _ask_for_confirmation(self, message, default=\"y\"): #TODO: fix, EOF error (ctrl+d)\n cache = \"Y/n\" if default == \"y\" else \"y/N\"\n user_input = input(\"{} ({}): \".format(message, cache))\n if not user_input:\n user_input = default\n if user_input.lower() == \"y\":\n return True\n return False\n\n def _run_pre_building_hook(self):\n if not self._pre_building_hook:\n return True\n args = [\"-m\", self._pre_building_hook, self._target,\n self._app_pkg, self._version]\n p = subprocess.Popen([sys.executable, *args],\n cwd=self._target)\n p.communicate()\n code = p.returncode\n print(\"\")\n if code == 0:\n print(\"pre_building_hook.py executed with success\")\n return True\n else:\n print(\"Failed to execute pre_building_hook.py\")\n return False\n\n def _run_post_building_hook(self):\n if not self._post_building_hook:\n return True\n args = [\"-m\", self._post_building_hook, self._target,\n self._app_pkg, self._version]\n p = subprocess.Popen([sys.executable, *args],\n cwd=self._target)\n p.communicate()\n code = p.returncode\n print(\"\")\n if code == 0:\n print(\"post_building_hook.py executed with success\")\n return True\n else:\n print(\"Failed to execute post_building_hook.py\")\n return False\n\n def _gen_build_report(self):\n pyrustic_data_path = os.path.join(self._target,\n self._app_pkg,\n \"pyrustic_data\")\n try:\n os.mkdir(pyrustic_data_path)\n except Exception as e:\n pass\n res = \"manager/default_json/pyrustic_data/build_report_default.json\"\n default_build_report_json = pkgutil.get_data(\"pyrustic\", res)\n build_report_json = os.path.join(pyrustic_data_path,\n \"build_report.json\")\n jasonix = Jasonix(build_report_json,\n default=default_build_report_json)\n jasonix.data[\"timestamp\"] = int(time.time())\n jasonix.data[\"target\"] = self._target\n jasonix.data[\"app_pkg\"] = self._app_pkg\n wheels_assets_list = wheels_assets(self._target)\n wheel_asset = None\n if wheels_assets_list:\n wheel_asset = wheels_assets_list[0]\n wheel_asset = os.path.join(self._target,\n \"dist\",\n wheel_asset)\n jasonix.data[\"app_version\"] = self._version\n jasonix.data[\"wheel_asset\"] = wheel_asset\n jasonix.save()\n\n def _get_hooks(self):\n dev_json = os.path.join(self._target,\n self._app_pkg,\n \"pyrustic_data\",\n \"dev.json\")\n if not os.path.exists(dev_json):\n return None, None\n jasonix = Jasonix(dev_json)\n hooks_package = jasonix.data.get(\"hooking_pkg\", None)\n if not hooks_package:\n return None, None\n pre_building_hook = \"{}.pre_building_hook\".format(hooks_package)\n post_building_hook = \"{}.post_building_hook\".format(hooks_package)\n return pre_building_hook, post_building_hook\n\n def _get_version(self):\n setup_config_dict = setup_config(self._target)\n version = None\n if setup_config_dict:\n version = setup_config_dict[\"version\"]\n return version\n", "id": "2365903", "language": "Python", "matching_score": 5.455840587615967, "max_stars_count": 0, "path": "pyrustic/manager/handler/build_handler.py" }, { "content": "import os\nimport os.path\nimport sys\nimport subprocess\nimport getpass\nfrom pyrustic.jasonix import Jasonix\nfrom pyrustic.manager.hubway import Catapult\nfrom pyrustic.manager.misc.funcs import create_gurl, setup_config\n\n\nclass PublishHandler:\n \"\"\"\n Description\n -----------\n Use this command to build a distribution package\n that could be published later with Hub.\n This command will block the Pyrustic Manager.\n The distribution package is a Wheel.\n\n Usage\n -----\n - Description: Build\n - Command: build\n \"\"\"\n\n def __init__(self, target, app_pkg):\n self._target = target\n self._app_pkg = app_pkg\n self._version = None\n self._pre_publishing_hook = None\n self._post_publishing_hook = None\n self._process(target, app_pkg)\n\n def _process(self, target, app_pkg):\n if target is None:\n print(\"Please link a Target first. Check 'help target'.\")\n return\n if not app_pkg:\n print(\"Please initialize this project first. Check 'help init'.\")\n return\n # get version\n self._version = self._get_version()\n # get confirmation\n message = \"You are going to publish '{}' v{}.\".format(app_pkg,\n self._version)\n print(message)\n if not self._ask_for_confirmation(\"\\nDo you want to continue ?\"):\n return\n print(\"\")\n # check build_report\n if not self._check_build_report():\n return\n # get publishing hooks\n cache = self._get_hooks()\n self._pre_publishing_hook, self._post_publishing_hook = cache\n # execute pre_publishing_hook.py\n if not self._run_pre_publishing_hook():\n return\n # publish\n if not self._publish():\n return\n # execute post_publishing_hook.py\n if not self._run_post_publishing_hook():\n return\n print(\"\\nSuccessfully published !\")\n\n def _check_build_report(self):\n path = os.path.join(self._target, self._app_pkg,\n \"pyrustic_data\", \"build_report.json\")\n if os.path.exists(path):\n return True\n text = (\"\\nMissing 'build_report.json' file in 'pyrustic_data' folder.\",\n \"Please build the project first with the command 'build'\")\n print(\"\\n\".join(text))\n return False\n\n def _run_pre_publishing_hook(self):\n if not self._pre_publishing_hook:\n return True\n args = [\"-m\", self._pre_publishing_hook, self._target,\n self._app_pkg, self._version]\n p = subprocess.Popen([sys.executable, *args],\n cwd=self._target)\n p.communicate()\n code = p.returncode\n if code == 0:\n print(\"pre_publishing_hook.py executed with success\")\n return True\n else:\n print(\"Failed to execute pre_publishing_hook.py\")\n return False\n\n def _run_post_publishing_hook(self):\n if not self._post_publishing_hook:\n return True\n args = [\"-m\", self._post_publishing_hook, self._target,\n self._app_pkg, self._version]\n p = subprocess.Popen([sys.executable, *args],\n cwd=self._target)\n p.communicate()\n code = p.returncode\n print(\"\")\n if code == 0:\n print(\"post_publishing_hook.py executed with success\")\n return True\n else:\n print(\"Failed to execute pre_publishing_hook.py\")\n return False\n\n def _get_hooks(self):\n dev_json = os.path.join(self._target,\n self._app_pkg,\n \"pyrustic_data\",\n \"dev.json\")\n if not os.path.exists(dev_json):\n return None, None\n jasonix = Jasonix(dev_json)\n hooks_package = jasonix.data.get(\"hooking_pkg\", None)\n if not hooks_package:\n return None, None\n pre_publishing_hook = \"{}.pre_publishing_hook\".format(hooks_package)\n post_publishing_hook = \"{}.post_publishing_hook\".format(hooks_package)\n return pre_publishing_hook, post_publishing_hook\n\n def _publish(self):\n publishing_json_path = os.path.join(self._target, self._app_pkg,\n \"pyrustic_data\",\n \"publishing.json\")\n if not os.path.exists(publishing_json_path):\n print(\"Missing 'publishing.json' in 'pyrustic_data' folder.\\nPlease init your app.\")\n return False\n jasonix = Jasonix(publishing_json_path)\n owner = jasonix.data.get(\"owner\", None)\n repo = jasonix.data.get(\"repo\", None)\n name = jasonix.data.get(\"name\", None)\n tag_name = jasonix.data.get(\"tag_name\", None)\n target_commitish = jasonix.data.get(\"target_commitish\", None)\n description = jasonix.data.get(\"description\", None)\n prerelease = jasonix.data.get(\"prerelease\", None)\n draft = jasonix.data.get(\"draft\", None)\n asset_path = jasonix.data.get(\"asset_path\", None)\n asset_name = jasonix.data.get(\"asset_name\", None)\n asset_label = jasonix.data.get(\"asset_label\", None)\n if (not name or not tag_name\n or not asset_path or not owner or not repo):\n print(\"Missing mandatory elements in $APP_DIR/pyrustic_data/publishing.json\")\n return False\n gurl = create_gurl()\n gurl.token = getpass.getpass(\"\\nYour Github Token: \")\n print(\"\\nProcessing...\")\n catapult = Catapult(gurl, owner, repo)\n cache = catapult.publish(name, tag_name,target_commitish,\n description, prerelease, draft,\n asset_path, asset_name, asset_label)\n meta_code = cache[\"meta_code\"]\n status_code = cache[\"status_code\"]\n status_text = cache[\"status_text\"]\n if meta_code == 0:\n return True\n if meta_code == 1:\n print(\"Failed to create release.\\n{} {}\".format(status_code,\n status_text))\n return False\n if meta_code == 2:\n print(\"Failed to upload asset.\\n{} {}\".format(status_code,\n status_text))\n return False\n print(\"Unknown error\")\n return False\n\n def _get_version(self):\n setup_config_dict = setup_config(self._target)\n version = None\n if setup_config_dict:\n version = setup_config_dict[\"version\"]\n return version\n\n def _ask_for_confirmation(self, message, default=\"y\"):\n cache = \"Y/n\" if default == \"y\" else \"y/N\"\n user_input = input(\"{} ({}): \".format(message, cache))\n if not user_input:\n user_input = default\n if user_input.lower() == \"y\":\n return True\n return False\n", "id": "6345452", "language": "Python", "matching_score": 3.7213146686553955, "max_stars_count": 0, "path": "pyrustic/manager/handler/publish_handler.py" }, { "content": "import os\nimport os.path\nfrom pyrustic.manager.misc.funcs import create_gurl\nfrom pyrustic.manager import hubway\nfrom pyrustic.jasonix import Jasonix\n\n\nclass HubHandler:\n \"\"\"\n Description\n -----------\n Use this command to build a distribution package\n that could be published later with Hub.\n This command will block the Pyrustic Manager.\n The distribution package is a Wheel.\n\n Usage\n -----\n - Description: Build\n - Command: build\n \"\"\"\n def __init__(self, target, app_pkg, args):\n self._target = target\n self._app_pkg = app_pkg\n self._args = args\n self._process(args)\n\n def _process(self, args):\n if not args:\n cache = self._default_owner_repo()\n if cache:\n text = \"{}/{}\".format(*cache)\n args = [text]\n else:\n print(\"Wrong usage. Check 'help hub'.\")\n return\n elif len(args) > 1:\n print(\"Wrong usage. Check 'help hub'.\")\n return\n owner_repo = args[0].split(\"/\")\n if len(owner_repo) != 2:\n print(\"Incorrect request. Check 'help hub'.\")\n return\n owner, repo = owner_repo\n print(\"https://github.com/{}/{}\\n\".format(owner, repo))\n gurl = create_gurl()\n if not self._show_repo_description(gurl, owner, repo):\n return\n if not self._show_latest_release(gurl, owner, repo):\n return\n if not self._show_latest_releases_downloads(gurl, owner, repo):\n return\n\n def _show_repo_description(self, gurl, owner, repo):\n status_code, status_text, data = hubway.repo_description(gurl,\n owner,\n repo)\n if status_code not in (200, 304):\n print(\"Failed to get the repo description\")\n print(\"{} {}\".format(status_code, status_text))\n return False\n self._show_section(\"Repository description\")\n description = data[\"description\"]\n description = \"- No description -\" if not description else description\n print(description)\n print(\"Created on {}\".format(data[\"created_at\"]))\n stargazers = data[\"stargazers_count\"]\n subscribers = data[\"subscribers_count\"]\n print(\"{} Stargazer{} and {} Subscriber{}\".format(stargazers,\n self._plural(stargazers),\n subscribers,\n self._plural(subscribers)))\n print(\"\")\n return True\n\n def _show_latest_release(self, gurl, owner, repo):\n status_code, status_text, data = hubway.latest_release(gurl,\n owner,\n repo)\n if status_code not in (200, 304):\n print(\"Failed to get the latest release info\")\n print(\"{} {}\".format(status_code, status_text))\n return False\n self._show_section(\"Latest release\")\n print(\"Tag name: {}\".format(data[\"tag_name\"]))\n print(\"Published on {}\".format(data[\"published_at\"]))\n downloads = data[\"downloads_count\"]\n print(\"{} Download{}\".format(downloads,\n self._plural(downloads)))\n print(\"\")\n return True\n\n def _show_latest_releases_downloads(self, gurl, owner, repo):\n status_code, status_text, data = \\\n hubway.latest_releases_downloads(gurl, owner, repo)\n if status_code not in (200, 304):\n print(\"Failed to get the latest ten (pre)releases info\")\n print(\"{} {}\".format(status_code, status_text))\n return False\n self._show_section(\"Latest ten (pre)releases\")\n downloads = data\n print(\"{} Download{}\".format(downloads,\n self._plural(downloads)))\n print(\"\")\n return True\n\n def _show_section(self, title):\n count = len(title)\n print(title)\n print(\"\".join([\"=\" for _ in range(count)]))\n\n def _plural(self, item):\n item = int(item)\n return \"s\" if item > 1 else \"\"\n\n def _default_owner_repo(self):\n if not self._target and not self._app_pkg:\n return None\n publishing_json_path = os.path.join(self._target, self._app_pkg,\n \"pyrustic_data\",\n \"publishing.json\")\n if not os.path.exists(publishing_json_path):\n return None\n jasonix = Jasonix(publishing_json_path)\n if not jasonix.data:\n return None\n owner = jasonix.data[\"owner\"]\n repo = jasonix.data[\"repo\"]\n if not owner or not repo:\n return None\n return owner, repo\n", "id": "9939895", "language": "Python", "matching_score": 3.6369030475616455, "max_stars_count": 0, "path": "pyrustic/manager/handler/hub_handler.py" }, { "content": "from pyrustic.gurl import dict_to_json_body\nfrom pyrustic.manager.misc import funcs\nimport urllib.parse\n\n\nclass Catapult:\n def __init__(self, gurl, owner, repo):\n self._gurl = gurl\n self._owner = owner\n self._repo = repo\n\n def publish(self, name, tag_name,\n target_commitish, description,\n prerelease, draft,\n asset_path, asset_name, asset_label):\n \"\"\"\n Return {\"meta_code\":, \"status_code\", \"status_text\", \"data\"}\n meta code:\n 0- success\n 1- failed to create release (check 'status_code', 'status_text')\n 2- failed to upload asset (check 'status_code', 'status_text')\n \"\"\"\n publishing_result = {\"meta_code\": None, \"status_code\": None,\n \"status_text\": None, \"data\": None}\n # == create release\n response = self._create_release(self._owner, self._repo, name, tag_name,\n target_commitish, description,\n prerelease, draft)\n code = response.code\n if code != 201:\n publishing_result[\"meta_code\"] = 1\n publishing_result[\"status_code\"] = code\n publishing_result[\"status_text\"] = response.status[1]\n return publishing_result\n # == upload asset\n upload_url = response.json[\"upload_url\"]\n response = self._upload_asset(upload_url, asset_path,\n asset_name, asset_label)\n code = response.code\n if code != 201:\n publishing_result[\"meta_code\"] = 2\n publishing_result[\"status_code\"] = code\n publishing_result[\"status_text\"] = response.status[1]\n return publishing_result\n publishing_result[\"meta_code\"] = 0\n return publishing_result\n\n def _create_release(self, owner, repo, name, tag_name, target_commitish,\n description, prerelease, draft):\n res = \"/repos/{}/{}/releases\".format(owner, repo)\n body = {\"tag_name\": tag_name, \"target_commitish\": target_commitish,\n \"name\": name, \"body\": description, \"draft\": draft,\n \"prerelease\": prerelease}\n body = dict_to_json_body(body)\n response = self._gurl.request(funcs.get_hub_url(res),\n body=body, method=\"POST\")\n return response\n\n def _upload_asset(self, upload_url, path, name, label):\n url = upload_url.replace(\"{?name,label}\", \"?{}\")\n parameters = urllib.parse.urlencode({\"label\": label, \"name\": name})\n url = url.format(parameters)\n with open(path, \"rb\") as file:\n data = file.read()\n header = {\"Content-Type\": \"application/zip\"}\n response = self._gurl.request(url, body=data, method=\"POST\",\n headers=header)\n return response\n\n\ndef repo_description(gurl, owner, repo):\n \"\"\"\n Returns: (status_code, status_text, data)\n data = {\"created_at\": date, \"description\": str,\n \"stargazers_count\": int, \"subscribers_count\": int}\n \"\"\"\n res = \"/repos/{}/{}\".format(owner, repo)\n response = gurl.request(funcs.get_hub_url(res))\n code = response.code\n json = response.json\n data = {}\n if code == 304:\n json = response.cached_response.json\n if (code in (200, 304)) and json:\n data[\"description\"] = json[\"description\"]\n date = json[\"created_at\"]\n data[\"created_at\"] = _badass_iso_8601_date_parser(date)\n data[\"stargazers_count\"] = json[\"stargazers_count\"]\n data[\"subscribers_count\"] = json[\"subscribers_count\"]\n return *response.status, data\n\n\ndef latest_release(gurl, owner, repo):\n \"\"\"\n Returns: (status_code, status_text, data}\n data = {\"tag_name\": str, \"published_at\": date,\n \"downloads_count\": int}\n \"\"\"\n res = \"/repos/{}/{}/releases/latest\".format(owner, repo)\n response = gurl.request(funcs.get_hub_url(res))\n code = response.code\n json = response.json\n data = {}\n if code == 304:\n json = response.cached_response.json\n if (code in (200, 304)) and json:\n data[\"tag_name\"] = json[\"tag_name\"]\n date = json[\"published_at\"]\n data[\"published_at\"] = _badass_iso_8601_date_parser(date)\n data[\"downloads_count\"] = _downloads_counter(json)\n return *response.status, data\n\n\ndef latest_releases_downloads(gurl, owner, repo, maxi=10):\n \"\"\"\n Returns: (status_code, status_text, data}\n data = int, downloads count\n \"\"\"\n res = \"/repos/{}/{}/releases?per_page={}\".format(owner, repo, maxi)\n response = gurl.request(funcs.get_hub_url(res))\n code = response.code\n json = response.json\n data = 0\n if code == 304:\n json = response.cached_response.json\n if (code in (200, 304)) and json:\n for release in json:\n data += _downloads_counter(release)\n return *response.status, data\n\n\ndef _downloads_counter(json):\n count = 0\n for asset in json[\"assets\"]:\n count += asset[\"download_count\"]\n return count\n\n\ndef _badass_iso_8601_date_parser(date):\n # YYYY-MM-DDTHH:MM:SSZ\n date = date.rstrip(\"Z\")\n date_part, time_part = date.split(\"T\")\n months = (\"Jan\", \"Feb\", \"March\", \"April\", \"May\", \"June\", \"July\",\n \"Aug\", \"Sept\", \"Oct\", \"Nov\", \"Dec\")\n year, month, day = date_part.split(\"-\")\n text = \"{} {} {} at {}\".format(day, months[int(month) - 1], year, time_part)\n return text\n", "id": "6704719", "language": "Python", "matching_score": 1.8912582397460938, "max_stars_count": 0, "path": "pyrustic/manager/hubway.py" }, { "content": "from urllib.request import Request, urlopen\nfrom urllib.error import URLError, HTTPError\nimport json\n\n\n# Headers\n_ETAG = \"ETag\"\n_LAST_MODIFIED = \"Last-Modified\"\n_AUTH = \"Authorization\"\n_IF_NONE_MATCH = \"If-None-Match\"\n_IF_MODIFIED_SINCE = \"If-Modified-Since\"\n\n\n# HTTP Status Codes\nHTTP_STATUS_CODES = {\n 200: \"Ok\",\n 201: \"Created\",\n 301: \"Moved Permanently\",\n 304: \"Not Modified\",\n 400: \"Bad Request\",\n 401: \"Unauthorized\",\n 403: \"Forbidden\",\n 404: \"Not Found\",\n 422: \"Unprocessable Entity\"\n }\n\n\nclass Gurl: # TODO write a better documentation ! ;)\n \"\"\"\n Gurl is a great suite for accessing the web!\n \"\"\"\n def __init__(self, token=None, headers=None,\n web_cache=True, response_cache=True):\n \"\"\"\n PARAMETERS:\n\n - token: Authentication token\n\n - headers: dict of headers. Example:\n { \"Accept\": \"application/vnd.github.v3+json\",\n \"User-Agent\": \"Mozilla/5.0\" )\n\n - web_cache: bool, set it to True to activate the web cache\n\n - response_cache: bool, set it to True to access cached responses\n\n \"\"\"\n self._token = token\n self._headers = {} if not headers else headers\n self._web_cache = _WebCache(response_cache) if web_cache else None\n\n @property\n def token(self):\n return self._token\n\n @token.setter\n def token(self, val):\n self._token = val\n\n @property\n def headers(self):\n return self._headers\n\n @headers.setter\n def headers(self, val):\n self._headers = val\n\n @property\n def web_cache(self):\n if self._web_cache is None:\n return False\n return True\n\n @web_cache.setter\n def web_cache(self, val):\n self._web_cache = _WebCache() if val else None\n\n def request(self, url, body=None, method=\"GET\", headers=None):\n \"\"\" Returns a Response object \"\"\"\n headers = {} if not headers else headers\n # request object\n req = _get_req(url, body, method)\n # add request to webcache\n if self._web_cache:\n self._web_cache.add_request(url, req)\n # set authorization\n _set_authorization(req, self._token)\n # set headers\n _set_headers(req, {**self._headers, **headers})\n # get response\n response = _get_response(req)\n # add response to webcache\n if self._web_cache:\n response = self._web_cache.add_response(url, response)\n return response\n\n\nclass Response:\n def __init__(self, native=None, error=None, cached_response=None):\n self._native = native\n self._error = error\n self._body = None\n self._json = None\n self._text = None\n self._cached_response = cached_response\n\n # ========================\n # PROPERTIES\n # ========================\n @property\n def native(self):\n return self._native\n\n @property\n def error(self):\n return self._error\n\n @property\n def error_reason(self):\n if self._error is None:\n return None\n return self._error.reason\n\n @property\n def code(self):\n code = None\n if self._native:\n code = self._native.getcode()\n elif isinstance(self._error, HTTPError):\n code = self._error.code\n return code\n\n @property\n def status(self):\n return _code_to_status(self.code)\n\n @property\n def reason(self):\n if self._native:\n return self._native.reason\n\n @property\n def headers(self):\n headers = None\n if self._native:\n headers = self._native.getheaders()\n return headers\n\n @property\n def url(self):\n url = None\n if self._native:\n url = self._native.geturl()\n return url\n\n @property\n def body(self):\n if self._body is None and self._native is not None:\n try:\n self._body = self._native.read()\n except Exception:\n pass\n except KeyboardInterrupt:\n pass\n return self._body\n\n @property\n def json(self):\n if self._json is None:\n data = _decode_data(self.body)\n self._json = _load_json(data)\n return self._json\n\n @property\n def cached_response(self):\n return self._cached_response\n\n # ========================\n # PUBLIC\n # ========================\n def show(self, include_headers=False, include_body=False):\n if self._text is not None:\n return self._text\n texts = []\n # ERROR\n if self._error is not None:\n cache = \"ERROR: {}\".format(self._error.reason)\n texts.append(cache)\n # STATUS\n status = self.code\n if status is not None:\n cache = \"STATUS: {}\".format(status)\n texts.append(cache)\n # URL\n url = self.url\n if url is not None:\n cache = \"URL: {}\".format(url)\n texts.append(cache)\n if self._native is not None:\n # REASON\n cache = \"REASON: {}\".format(self._native.reason)\n texts.append(cache)\n # HEADERS\n if include_headers:\n headers_text = \"\\n\".join(\"{}: {}\".format(x, y)\n for x, y in self._native.getheaders())\n cache = \"HEADERS:\\n{}\".format(headers_text)\n texts.append(cache)\n # BODY\n if include_body:\n data = _decode_data(self.body)\n cache = \"BODY:\\n{}\".format(data)\n texts.append(cache)\n self._text = \"\\n\".join(texts)\n return self._text\n\n def header(self, name, default=None):\n if self._native:\n return self._native.getheader(name, default)\n return default\n\n\ndef dict_to_json_body(data):\n data = json.dumps(data)\n return data.encode()\n\n# ========================\n# PRIVATE\n# ========================\n\n\ndef _get_req(url, body, method):\n req = Request(url, data=body, method=method)\n return req\n\n\ndef _set_authorization(req, token):\n # authorization\n if token:\n req.add_header(_AUTH, \"token {}\".format(token))\n\n\ndef _set_headers(req, headers):\n # add headers\n for key, val in headers.items():\n req.add_header(key, val)\n\n\ndef _get_response(req):\n \"\"\" Returns a Response object \"\"\"\n error = response = None\n try:\n response = urlopen(req)\n except HTTPError as e:\n error = e\n except URLError as e:\n error = e\n except Exception as e:\n error = e\n except KeyboardInterrupt:\n pass\n return Response(native=response, error=error)\n\n\ndef _decode_data(data, encoding=\"utf-8\"):\n try:\n data = data.decode(encoding)\n except Exception as e:\n data = None\n except KeyboardInterrupt:\n pass\n return data\n\n\ndef _load_json(text):\n try:\n text = json.loads(text)\n except Exception as e:\n text = None\n except KeyboardInterrupt:\n pass\n return text\n\n\ndef _code_to_status(code):\n text = None\n if code is None:\n text = \"Check your connection\"\n else:\n text = HTTP_STATUS_CODES.get(code, \"Unknown HTTP Status Code\")\n return code, text\n\n\nclass _WebCache:\n # TODO: what happens when you make a request 1, the server doesn't return\n # a body. You do a request 2, the server states you that the resources\n # hasn't changed, so technically you can count on the cached_response.\n # but... the cached_response doesn't have a body (there aren't even a cached response !)\n\n def __init__(self, cache_response):\n self._response_cache = {} if cache_response else None\n self._cache = {}\n\n def add_request(self, url, req):\n if url in self._cache:\n validator, value = self._cache[url]\n header = _IF_NONE_MATCH\n if validator == _LAST_MODIFIED:\n header = _IF_MODIFIED_SINCE\n headers = {header: value}\n _set_headers(req, headers)\n\n def add_response(self, url, response):\n etag_header = response.header(_ETAG, None)\n last_modified_header = response.header(_LAST_MODIFIED, None)\n if etag_header is not None:\n self._cache[url] = (_ETAG, etag_header)\n elif last_modified_header is not None:\n self._cache[url] = (_LAST_MODIFIED, last_modified_header)\n return self._get_response(url, response)\n\n def _get_response(self, url, response):\n if self._response_cache is None:\n pass\n elif response.code == 304:\n response = Response(native=response.native,\n error=response.error,\n cached_response=self._response_cache.get(url, None))\n elif response.body:\n self._response_cache[url] = response\n # TODO, what if response.body is None ? No cached_response. But next\n # request will state that resource hasn't changed... so it won't send any body !\n return response\n", "id": "1158435", "language": "Python", "matching_score": 0.691214919090271, "max_stars_count": 0, "path": "pyrustic/gurl.py" }, { "content": "import unittest\nimport os.path\nimport sys\n\n\nclass LiteTestRunner:\n def __init__(self, path, app_dir):\n self._path = path\n self._app_dir = app_dir\n\n def run(self, failfast=True):\n reloader = _Reloader()\n reloader.save_state()\n cache = self._run(failfast)\n reloader.restore_state()\n return cache\n\n def _run(self, failfast):\n if not os.path.exists(self._path):\n return False, \"This path doesn't exist\"\n test_loader = unittest.TestLoader()\n suite = test_loader.discover(self._path, top_level_dir=self._app_dir)\n result = unittest.TestResult()\n try:\n result.startTestRun()\n result.failfast = failfast\n suite.run(result)\n except Exception as e:\n return False, e\n finally:\n result.stopTestRun()\n if result.wasSuccessful():\n return True, None\n else:\n return False, self._stringify_result(result)\n\n def _stringify_result(self, result):\n data = []\n if result.errors:\n for error in result.errors:\n cache = \"{}\\n{}\\n\\n\".format(error[0], error[1])\n data.append(cache)\n if result.failures:\n for failure in result.failures:\n cache = \"{}\\n{}\\n\\n\".format(failure[0], failure[1])\n data.append(cache)\n if result.unexpectedSuccesses:\n for expected_failure in result.expectedFailures:\n cache = \"{}\\n{}\\n\\n\".format(expected_failure[0],\n expected_failure[1])\n data.append(cache)\n\n return \"\".join(data)\n\n\nclass _Reloader:\n def __init__(self):\n self._state = None\n\n def save_state(self):\n self._state = sys.modules.copy()\n\n def restore_state(self):\n for x in sys.modules.copy().keys():\n if not x in self._state:\n del sys.modules[x]\n", "id": "8924756", "language": "Python", "matching_score": 0.32914337515830994, "max_stars_count": 0, "path": "pyrustic/manager/misc/lite_test_runner.py" }, { "content": "import os\nimport os.path\nimport json\ntry:\n from pyrustic.exception import PyrusticException\nexcept ImportError:\n class PyrusticException(Exception):\n pass\n\n\nclass Jasonix:\n \"\"\"\n Jasonix allows you to play with JSON files like toys ! (really)\n \"\"\"\n def __init__(self, target, default=None, readonly=False):\n \"\"\"\n PARAMETERS:\n\n - target: dict or file-like object or a path to a json file. If target is a path\n and this path doesn't exist, a new file will be created or not according\n to the parameter \"default\n\n - default: file-like object or a path or a dict.\n\n - readonly: bool\n \"\"\"\n self._target = target\n self._default = default\n self._readonly = readonly\n #\n self._data = None\n #\n self._setup()\n\n\n # ==============================================\n # PROPERTIES\n # ==============================================\n\n @property\n def data(self):\n \"\"\"\n The dict-like representation of the JSON file\n \"\"\"\n return self._data\n\n @data.setter\n def data(self, val):\n \"\"\"\n The dict to push into JSON file\n \"\"\"\n self._data = val\n\n @property\n def target(self):\n return self._target\n\n @property\n def default(self):\n return self._default\n\n # ==============================================\n # PUBLIC METHODS\n # ==============================================\n\n def save(self):\n \"\"\"\"\n Push data into the JSON file (not the default file !) if 'readonly' is False\n \"\"\"\n if self._readonly:\n PyrusticException(\"Attempt to save a readonly config !\")\n self._json_dump(self._target, self._data)\n\n def reload(self):\n \"\"\"\n Reload data from JSON file\n \"\"\"\n self._setup()\n\n # ==============================================\n # PRIVATE METHODS\n # ==============================================\n def _setup(self):\n if not self._target:\n return\n data = self._json_load(self._target,\n ignore_exception=True)\n if data is None:\n if isinstance(self._target, str):\n with open(self._target, \"w\") as file:\n pass\n if not self._default:\n raise PyrusticException(\"Missing target !\")\n default_data = self._json_load(self._default)\n if default_data is None:\n message = (\"Missing target !\",\n \"And invalid default json.\")\n raise PyrusticException(\" \".join(message))\n self._data = default_data\n self.save()\n return\n self._data = data\n\n def _json_load(self, target, ignore_exception=False):\n data = None\n if isinstance(target, str):\n data = self._json_load_from_path(target,\n ignore_exception)\n elif isinstance(target, dict):\n data = target\n elif isinstance(target, bytes):\n target = target.decode(\"utf-8\")\n data = json.loads(target)\n else:\n data = self._json_load_from_file(target,\n ignore_exception)\n return data\n\n def _json_load_from_path(self, path,\n ignore_exception=False):\n data = None\n try:\n with open(path, \"r\") as file:\n data = json.load(file)\n except Exception as e:\n if not ignore_exception:\n raise e\n return data\n\n def _json_load_from_file(self, file,\n ignore_exception=False):\n data = None\n try:\n data = json.load(file)\n except Exception as e:\n if not ignore_exception:\n raise e\n return data\n\n def _json_dump(self, target, data,\n ignore_exception=False):\n if isinstance(target, str):\n self._json_dump_to_path(target, data, ignore_exception)\n else:\n self._json_dump_to_file(target, data, ignore_exception)\n\n def _json_dump_to_path(self, path, data,\n ignore_exception=False):\n try:\n with open(path, \"w\") as file:\n json.dump(data, file, indent=4, sort_keys=True)\n except Exception as e:\n if not ignore_exception:\n raise e\n\n def _json_dump_to_file(self, file, data,\n ignore_exception=False):\n try:\n json.dump(data, file, indent=4, sort_keys=True)\n except Exception as e:\n if not ignore_exception:\n raise e\n", "id": "589352", "language": "Python", "matching_score": 1.6661651134490967, "max_stars_count": 0, "path": "pyrustic/jasonix.py" }, { "content": "\"\"\"\nPyrustic exceptions\n\"\"\"\n\n\nclass PyrusticException(Exception):\n pass\n\nclass PyrusticAppException(PyrusticException):\n pass\n\n\nclass PyrusticWidgetException(PyrusticException):\n pass\n\n\nclass PyrusticTableException(PyrusticWidgetException):\n pass\n", "id": "5260689", "language": "Python", "matching_score": 0.12175214290618896, "max_stars_count": 0, "path": "pyrustic/exception.py" }, { "content": "from pyrustic.manager import constant\nfrom pyrustic.jasonix import Jasonix\nimport os.path\n\n\nclass RecentHandler:\n \"\"\"\n Description\n -----------\n Display the recent Targets.\n The command \"relink\" links again Pyrustic Manager with the\n Target at index 0 from the \"recent\" list.\n\n Usage\n -----\n - Description: List of recent Targets\n - Command: recent\n \"\"\"\n\n def __init__(self, target,\n app_pkg, args):\n self._target = target\n self._app_pkg = app_pkg\n self._process(args)\n\n def _process(self, args):\n if args:\n print(\"Wrong usage of this command\")\n return\n jasonix = Jasonix(constant.MANAGER_SHARED_DATA_FILE)\n recent_list = jasonix.data[\"recent\"]\n len_recent_list = len(recent_list)\n if len_recent_list == 0:\n print(\"- Empty -\")\n for i, path in enumerate(reversed(recent_list)):\n print(\"#{}\".format(i))\n print(\"Name: {}\".format(os.path.basename(path)))\n print(\"Path: {}\".format(path))\n if i < len_recent_list - 1:\n print(\"\")\n", "id": "10030932", "language": "Python", "matching_score": 3.647897243499756, "max_stars_count": 0, "path": "pyrustic/manager/handler/recent_handler.py" }, { "content": "from pyrustic.manager import constant\nfrom pyrustic.jasonix import Jasonix\nfrom pyrustic.manager.handler.link_handler import LinkHandler\nimport os.path\n\n\nclass RelinkHandler:\n \"\"\"\n Description\n -----------\n Link again the previous Target or one of recent Targets.\n\n Usage\n -----\n - Description: Link again the previous Target\n - Command: relink\n\n - Description: Link again a previous Target with its index\n - Command: relink <index>\n\n Example\n -------\n - Description: Link again a previous Target\n - Preliminary: Assume you want to link again the Target\n with index #2 (found the index with the command \"recent\")\n - Command: relink 2\n \"\"\"\n def __init__(self, target,\n app_pkg, args):\n self._target = target\n self._app_pkg = app_pkg\n self._process(args)\n\n @property\n def target(self):\n return self._target\n\n def _process(self, args):\n jasonix = Jasonix(constant.MANAGER_SHARED_DATA_FILE, readonly=True)\n path = jasonix.data[\"target\"]\n if not jasonix.data[\"recent\"]:\n print(\"- Empty -\")\n return\n if len(args) == 1:\n try:\n index = int(args[0])\n path = list(reversed(jasonix.data[\"recent\"]))[index]\n except Exception as e:\n print(\"Wrong index\")\n return\n elif len(args) > 1:\n print(\"Wrong usage of this command\")\n return\n link_handler = LinkHandler(self._target, self._app_pkg, [path])\n self._target = link_handler.target\n\n def _check_path(self, path):\n \"\"\" Returns True if the path is valid, else False \"\"\"\n if not os.path.exists(path):\n print(\"{}\".format(path))\n print(\"This path doesn't exist\")\n return False\n return True\n", "id": "10820439", "language": "Python", "matching_score": 1.319433331489563, "max_stars_count": 0, "path": "pyrustic/manager/handler/relink_handler.py" }, { "content": "import os\nimport os.path\nimport pkgutil\nfrom pyrustic.jasonix import Jasonix\nfrom pyrustic.manager import constant\n\n\ndef main():\n set_environment()\n\n\ndef set_environment():\n success = True\n # Make folders\n if not make_folders():\n success = False\n # Add shared data files\n if success and not add_default_shared_data_files():\n success = False\n # check success\n if success:\n return True\n else:\n return False\n\n\ndef make_folders():\n folders = (constant.PYRUSTIC_DATA_FOLDER,\n constant.MANAGER_SHARED_FOLDER,\n constant.MANAGER_CACHE_FOLDER,\n constant.RUNTEST_SHARED_FOLDER,\n constant.RUNTEST_CACHE_FOLDER,\n constant.SQLEDITOR_SHARED_FOLDER,\n constant.SQLEDITOR_CACHE_FOLDER,\n constant.HUB_SHARED_FOLDER,\n constant.HUB_CACHE_FOLDER)\n for path in folders:\n if not _make_folder(path):\n return False\n return True\n\n\ndef add_default_shared_data_files():\n resource_prefix = \"manager/default_json/PyrusticData/\"\n data = ((\"manager_default.json\", constant.MANAGER_SHARED_DATA_FILE),\n (\"jupitest_default.json\", constant.RUNTEST_SHARED_DATA_FILE),\n (\"rustiql_default.json\", constant.SQLEDITOR_SHARED_DATA_FILE),\n (\"hubway_default.json\", constant.HUB_SHARED_DATA_FILE))\n for json_name, path in data:\n data = pkgutil.get_data(\"pyrustic\",\n resource_prefix + json_name)\n if os.path.exists(path):\n return True\n try:\n Jasonix(path, default=data)\n except Exception as e:\n print(\"Failed to initialize {}\".format(json_name))\n print(e)\n return True\n\n\ndef _make_folder(path):\n if not os.path.exists(path):\n try:\n os.mkdir(path)\n except Exception as e:\n print(\"Failed to make directory '{}'\".format(path))\n print(e)\n return False\n return True\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "8900242", "language": "Python", "matching_score": 5.495182037353516, "max_stars_count": 0, "path": "pyrustic/manager/install.py" }, { "content": "import os.path\n\n\n# general constants\nUSER_AGENT = (\"User-Agent\", \"Pyrustic\")\nPYRUSTIC_DATA = \"PyrusticData\"\nPYRUSTIC_DATA_FOLDER = os.path.join(os.path.expanduser(\"~\"),\n PYRUSTIC_DATA)\n\n\n# manager constants\nMANAGER_SHARED_FOLDER = os.path.join(PYRUSTIC_DATA_FOLDER, \"manager\")\n\nMANAGER_CACHE_FOLDER = os.path.join(MANAGER_SHARED_FOLDER, \"cache\")\n\nMANAGER_SHARED_DATA_FILE = os.path.join(MANAGER_SHARED_FOLDER,\n \"manager_shared_data.json\")\n\n\n# sqleditor constants\nSQLEDITOR_SHARED_FOLDER = os.path.join(PYRUSTIC_DATA_FOLDER, \"rustiql\")\n\nSQLEDITOR_CACHE_FOLDER = os.path.join(SQLEDITOR_SHARED_FOLDER, \"cache\")\n\nSQLEDITOR_SHARED_DATA_FILE = os.path.join(SQLEDITOR_SHARED_FOLDER,\n \"rustiql_shared_data.json\")\n\n\n# runtest constants\nRUNTEST_SHARED_FOLDER = os.path.join(PYRUSTIC_DATA_FOLDER, \"jupitest\")\n\nRUNTEST_CACHE_FOLDER = os.path.join(RUNTEST_SHARED_FOLDER, \"cache\")\n\nRUNTEST_SHARED_DATA_FILE = os.path.join(RUNTEST_SHARED_FOLDER,\n \"jupitest_shared_data.json\")\n\n\n# hub constants\nHUB_SHARED_FOLDER = os.path.join(PYRUSTIC_DATA_FOLDER, \"hubway\")\n\nHUB_CACHE_FOLDER = os.path.join(HUB_SHARED_FOLDER, \"cache\")\n\nHUB_SHARED_DATA_FILE = os.path.join(HUB_SHARED_FOLDER,\n \"hubway_shared_data.json\")\n", "id": "4928546", "language": "Python", "matching_score": 0.313320517539978, "max_stars_count": 0, "path": "pyrustic/manager/constant.py" }, { "content": "from pyrustic.version import __version__\ntry:\n import importlib.metadata as dist_info\nexcept ImportError:\n import importlib_metadata as dist_info\n\n\ndef dist(name, target=None): # TODO: implement target=None or path to blah blah...\n \"\"\"\n DESCRIPTION:\n Use this function to get some info about a distribution package\n PARAM:\n name: the distribution name, example: \"wheel\"\n RETURN:\n A dict with these keys:\n name, author, author_email, description, home_page,\n maintainer, maintainer_email, version\n All values are strings.\n \"\"\"\n metadata_cache = None\n try:\n metadata_cache = dist_info.metadata(name)\n except Exception:\n pass\n keys = ((\"author\", \"Author\"),\n (\"author_email\", \"Author-email\"),\n (\"description\", \"Summary\"),\n (\"home_page\", \"Home-page\"),\n (\"maintainer\", \"Maintainer\"),\n (\"maintainer_email\", \"Maintainer-email\"),\n (\"version\", \"Version\"))\n data = None\n if metadata_cache:\n data = {\"name\": name}\n for item in keys:\n if item[1] in metadata_cache:\n data[item[0]] = metadata_cache[item[1]]\n return data\n", "id": "10565468", "language": "Python", "matching_score": 1.1689777374267578, "max_stars_count": 0, "path": "pyrustic/__init__.py" }, { "content": "from pyrustic import dist\n\n\nclass VersionHandler:\n \"\"\"\n Description\n -----------\n Use this command to check the version of Pyrustic.\n\n Usage\n -----\n - Description: Check information\n - Command: version\n \"\"\"\n def __init__(self):\n self._process()\n\n def _process(self):\n print(\"Pyrustic Manager\")\n print(\"Version: {}\".format(dist(\"pyrustic\")[\"version\"]))\n", "id": "11581423", "language": "Python", "matching_score": 0.24346718192100525, "max_stars_count": 0, "path": "pyrustic/manager/handler/version_handler.py" }, { "content": "import tkinter as tk\nfrom tkinter import filedialog\nimport os\nimport os.path\nfrom pyrustic import dist\nfrom pyrustic.manager.misc import funcs\n\n\nclass LinkHandler:\n \"\"\"\n Description\n -----------\n This will link your Target project to Pyrustic Manager.\n\n Usage\n -----\n - Description: Open the directory chooser\n - Command: link\n\n - Description: Link a Target\n - Command: link </path/to/target/project>\n\n \"\"\"\n def __init__(self, target, app_pkg, args):\n self._target = target\n self._app_pkg = app_pkg\n self._process(args)\n\n @property\n def target(self):\n return self._target\n\n def _process(self, args):\n path = None\n # no args, so open the folder chooser\n if not args:\n path = self._open_folder_chooser()\n # more than 1 arg isn't allowed\n elif len(args) > 1:\n print(\"Wrong usage of this command\")\n return\n # 1 arg submitted: the path\n else:\n path = args[0]\n # invalid path\n if not self._check_path(path):\n return\n # linking\n path = os.path.normpath(path)\n self._link_to(path)\n\n def _open_folder_chooser(self):\n #initialdir = os.path.expanduser(\"~\")\n initialdir = os.getcwd()\n root = tk.Tk()\n root.withdraw()\n path = filedialog.askdirectory(initialdir=initialdir,\n title=\"Select your project\")\n root.destroy()\n if not isinstance(path, str) or not path:\n return\n return path\n\n def _check_path(self, path):\n \"\"\" Returns True if the path is valid, else False \"\"\"\n if not path:\n print(\"You haven't submitted a path\")\n return False\n if not os.path.exists(path):\n print(\"This path doesn't exist\")\n return False\n return True\n\n def _link_to(self, path):\n if not self._store_target(path):\n print(\"Failed to store the Target in config\")\n return\n self._target = path\n print(\"Successfully linked !\")\n app_pkg = os.path.basename(path)\n print(\"[{}] {}\".format(app_pkg, path))\n data = funcs.check_project_state(path)\n if data == 0:\n print(\"Version: {}\".format(dist(app_pkg)[\"version\"]))\n elif data == 1:\n print(\"Not yet initialized project (check 'help init')\")\n elif data == 2:\n print(\"Not yet installed project (think about: 'pip install -e .')\")\n\n def _store_target(self, path):\n jasonix = funcs.get_manager_jasonix(False)\n recent_list = jasonix.data[\"recent\"]\n for i, item in enumerate(recent_list):\n if item == path:\n del recent_list[i]\n recent_list.append(path)\n len_recent_list = len(recent_list)\n max_items = 5\n if len_recent_list > max_items:\n for i in range(len_recent_list - max_items):\n del recent_list[0]\n jasonix.data[\"target\"] = path\n jasonix.save()\n return True\n", "id": "9809865", "language": "Python", "matching_score": 4.602692604064941, "max_stars_count": 0, "path": "pyrustic/manager/handler/link_handler.py" }, { "content": "import os.path\nfrom pyrustic import dist\nfrom pyrustic.manager.misc import funcs\n\n\nclass TargetHandler:\n \"\"\"\n Description\n -----------\n Use this command to check the currently linked Target.\n\n Usage\n -----\n - Description: Check the currently linked Target\n - Command: target\n\n Note: This command also shows the version of your project\n if the data is available.\n \"\"\"\n def __init__(self, target,\n app_pkg,\n args):\n self._target = target\n self._app_pkg = app_pkg\n self._process(target, app_pkg, args)\n\n @property\n def target(self):\n return self._target\n\n def _process(self, target, app_pkg, args):\n if not target:\n print(\"None\")\n return\n # args are present: invalid command\n if args:\n print(\"Wrong usage of this command\")\n return\n #\n data = funcs.check_project_state(target)\n print(\"[{}] {}\".format(os.path.basename(target), self._target))\n if data == 0:\n print(\"Version: {}\".format(dist(app_pkg)[\"version\"]))\n elif data == 1:\n print(\"Not yet initialized project (check 'help init')\")\n elif data == 2:\n print(\"Not yet installed project (think about: 'pip install -e .')\")\n", "id": "11098618", "language": "Python", "matching_score": 2.6810646057128906, "max_stars_count": 0, "path": "pyrustic/manager/handler/target_handler.py" }, { "content": "\n\nclass UnlinkHandler:\n \"\"\"\n Description\n -----------\n Use this command to unlink the currently linked Target.\n\n Usage\n -----\n - Description: Unlink the currently linked Target\n - Command: unlink\n \"\"\"\n\n def __init__(self, target, app_pkg, args):\n self._target = target\n self._app_pkg = app_pkg\n self._process(args)\n\n @property\n def target(self):\n return self._target\n\n @property\n def app_pkg(self):\n return self._app_pkg\n\n def _process(self, args):\n # args are present: invalid command\n if args:\n print(\"Wrong usage of this command\")\n return\n # valid command\n if self._target:\n self._target = None\n self._app_pkg = None\n print(\"Successfully unlinked !\")\n print(\"Target: {}\".format(self._target))\n", "id": "1403372", "language": "Python", "matching_score": 2.0057451725006104, "max_stars_count": 0, "path": "pyrustic/manager/handler/unlink_handler.py" }, { "content": "import os\nimport os.path\nimport sys\nimport signal\nimport traceback\nimport pyrustic\nfrom cmd import Cmd\nfrom pyrustic import pymisc\nfrom pyrustic.manager.misc.funcs import get_app_pkg\nfrom pyrustic.manager import install\nfrom pyrustic.manager.handler.link_handler import LinkHandler\nfrom pyrustic.manager.handler.unlink_handler import UnlinkHandler\nfrom pyrustic.manager.handler.relink_handler import RelinkHandler\nfrom pyrustic.manager.handler.target_handler import TargetHandler\nfrom pyrustic.manager.handler.recent_handler import RecentHandler\nfrom pyrustic.manager.handler.init_handler import InitHandler\nfrom pyrustic.manager.handler.run_handler import RunHandler\nfrom pyrustic.manager.handler.add_handler import AddHandler\nfrom pyrustic.manager.handler.build_handler import BuildHandler\nfrom pyrustic.manager.handler.publish_handler import PublishHandler\nfrom pyrustic.manager.handler.hub_handler import HubHandler\nfrom pyrustic.manager.handler.version_handler import VersionHandler\nfrom pyrustic.manager import constant\n\ntry:\n import readline\nexcept ImportError:\n readline = None\n\n\ndef main(argv=None, target=None):\n if not argv:\n argv = sys.argv[1:]\n pm = init_pyrustic_manager()\n # Non interactive mode\n if len(argv) > 0:\n _non_interactive_mode(pm, argv, target)\n # Enable Interactive Mode\n else:\n _interactive_mode(pm, target)\n\n\ndef exit_handler(pm):\n pm.onecmd(\"exit\")\n pm.postloop()\n sys.exit()\n\ndef init_pyrustic_manager():\n install.main()\n pm = PyrusticManager()\n # Interrupt process (typically CTRL+C or 'delete' char or 'break' key)\n signal_num = signal.SIGINT\n handler = lambda signum, frame, pm=pm: exit_handler(pm)\n signal.signal(signal_num, handler)\n return pm\n\n\n# decorator for all commands handlers\ndef guard(func):\n def obj(self, arg):\n cache = None\n try:\n arg = pymisc.parse_cmd(arg) if isinstance(arg, str) else arg\n cache = func(self, arg)\n except Exception as e:\n print(\"Oops... Exception occurred !\\n\")\n print(\"\".join(traceback.format_exception(*sys.exc_info())))\n return cache\n return obj\n\n\nclass PyrusticManager(Cmd):\n intro = (\"\"\"Welcome to Pyrustic Manager !\\n\"\"\"\n + \"\"\"Version: {}\\n\"\"\".format(pyrustic.__version__)\n + \"\"\"Type \"help\" or \"?\" to list commands. Type \"exit\" to leave.\\n\"\"\")\n\n prompt = \"(pyrustic) \"\n\n def __init__(self):\n super().__init__()\n self.__history_size = 420\n self.__history_file = None\n self.__target = None\n self.__app_pkg = None\n\n @property\n def target(self):\n if not self.__target:\n return None\n if not os.path.isabs(self.__target):\n self.__target = None\n return self.__target\n\n @target.setter\n def target(self, val):\n self.__target = val\n\n @property\n def app_pkg(self):\n if not self.__app_pkg:\n self.__app_pkg = get_app_pkg(self.target)\n return self.__app_pkg\n\n @app_pkg.setter\n def app_pkg(self, val):\n self.__app_pkg = val\n\n @property\n def history_size(self):\n return self.__history_size\n\n @property\n def history_file(self):\n if not self.__history_file:\n path = os.path.join(constant.MANAGER_SHARED_FOLDER,\n \"cmd_history.txt\")\n if not os.path.exists(path):\n with open(path, \"w\") as file:\n pass\n self.__history_file = path\n return self.__history_file\n\n # ===============================\n # OVERRIDING\n # ===============================\n def preloop(self):\n if readline and self.history_file:\n readline.read_history_file(self.history_file)\n\n def postloop(self):\n if readline:\n readline.set_history_length(self.history_size)\n readline.write_history_file(self.history_file)\n\n def precmd(self, line):\n if line == \"EOF\":\n line = \"\"\n print(\"\")\n return line\n\n def postcmd(self, stop, line):\n print(\"\")\n return stop\n\n def emptyline(self):\n pass\n\n # ===============================\n # COMMANDS\n # ===============================\n @guard\n def do_link(self, args):\n link_handler = LinkHandler(self.target, self.app_pkg,\n args)\n self.target = link_handler.target\n\n @guard\n def do_unlink(self, args):\n unlink_handler = UnlinkHandler(self.target,\n self.app_pkg,\n args)\n self.target = unlink_handler.target\n self.app_pkg = unlink_handler.app_pkg\n\n @guard\n def do_relink(self, args):\n relink_handler = RelinkHandler(self.target,\n self.app_pkg,\n args)\n self.target = relink_handler.target\n\n @guard\n def do_target(self, args):\n TargetHandler(self.target,\n self.app_pkg,\n args)\n\n @guard\n def do_recent(self, args):\n RecentHandler(self.target,\n self.app_pkg, args)\n\n @guard\n def do_init(self, args):\n InitHandler(self.target,\n self.app_pkg, args)\n\n @guard\n def do_run(self, args):\n RunHandler(self.target,\n self.app_pkg, args)\n\n @guard\n def do_add(self, args):\n AddHandler(self.target, self.app_pkg, args)\n\n @guard\n def do_build(self, args):\n BuildHandler(self.target, self.app_pkg)\n\n @guard\n def do_publish(self, args):\n PublishHandler(self.target, self.app_pkg)\n\n @guard\n def do_hub(self, args):\n HubHandler(self.target, self.app_pkg, args)\n\n @guard\n def do_version(self, args):\n VersionHandler()\n\n @guard\n def do_exit(self, args):\n print(\"Exiting...\")\n return True\n\n\n # ===============================\n # COMMANDS\n # ===============================\n def help_link(self):\n print(LinkHandler.__doc__)\n\n def help_unlink(self):\n print(UnlinkHandler.__doc__)\n\n def help_relink(self):\n print(RelinkHandler.__doc__)\n\n def help_target(self):\n print(TargetHandler.__doc__)\n\n def help_recent(self):\n print(RecentHandler.__doc__)\n\n def help_init(self):\n print(InitHandler.__doc__)\n\n def help_run(self):\n print(RunHandler.__doc__)\n\n def help_add(self):\n print(AddHandler.__doc__)\n\n def help_build(self):\n print(BuildHandler.__doc__)\n\n def help_publish(self):\n print(PublishHandler.__doc__)\n\n def help_hub(self):\n print(HubHandler.__doc__)\n\n def help_version(self):\n print(VersionHandler.__doc__)\n\n def help_exit(self):\n print(\"This command closes the program graciously.\")\n\n\ndef _interactive_mode(pm, target):\n pm.target = target\n pm.cmdloop()\n\n\ndef _non_interactive_mode(pm, data, target):\n if not target:\n target = os.getcwd()\n pm.target = target\n command = data[0]\n args = []\n handler = None\n if len(data) > 1:\n args = data[1:]\n if command == \"help\":\n handler = pm.do_help\n args = \" \".join(args)\n else:\n handler = getattr(pm, \"do_{}\".format(command))\n if handler is None:\n print(\"Failed to process your request. Type <help>\")\n handler(args)\n", "id": "3840789", "language": "Python", "matching_score": 1.934789776802063, "max_stars_count": 0, "path": "pyrustic/manager/main.py" }, { "content": "from pyrustic import pymisc\nfrom pyrustic.manager.main import main\n\n\n__all__ = [\"command\"]\n\n\ndef command(line=None, target=None):\n \"\"\"\n Param:\n - line is a string\n - target is a string\n \"\"\"\n print(\"EXECUTION\")\n args = None\n if line is not None:\n args = pymisc.parse_cmd(line)\n main(argv=args, target=target)\n", "id": "176185", "language": "Python", "matching_score": 0.41027963161468506, "max_stars_count": 0, "path": "pyrustic/manager/__init__.py" } ]
2.11032
marcelodavid
[ { "content": "# django\nfrom django.conf import settings\n\n# drf\nfrom rest_framework.routers import DefaultRouter, SimpleRouter\n\n# viewsets\nimport houm.heartbeats.views.heartbeats as heartbeats_view\nimport houm.heartbeats.views.moments as moments_view\n\nif settings.DEBUG:\n router = DefaultRouter()\nelse:\n router = SimpleRouter()\n\nrouter.register(\n r\"houmers/(?P<user_pk>[0-9]+)/heartbeats\",\n heartbeats_view.HeartbeatViewSet,\n basename=\"heartbeats\",\n)\n\nrouter.register(\n r\"houmers/(?P<user_pk>[0-9]+)/moments\",\n moments_view.MomentViewSet,\n basename=\"moments\",\n)\n\n\napp_name = \"heartbeats\"\nurlpatterns = router.urls\n", "id": "7017956", "language": "Python", "matching_score": 3.3710570335388184, "max_stars_count": 0, "path": "houm/heartbeats/urls.py" }, { "content": "# django\nfrom django.conf import settings\n\n# drf\nfrom rest_framework.routers import DefaultRouter, SimpleRouter\n\n# viewsets\nimport houm.properties.views as properties_view\n\nif settings.DEBUG:\n router = DefaultRouter()\nelse:\n router = SimpleRouter()\n\nrouter.register(\n r\"houmers/(?P<user_pk>[0-9]+)/properties\",\n properties_view.PropertyViewSet,\n basename=\"properties\",\n)\n\n\napp_name = \"properties\"\nurlpatterns = router.urls\n", "id": "7305262", "language": "Python", "matching_score": 1.3212043046951294, "max_stars_count": 0, "path": "houm/properties/urls.py" }, { "content": "\"\"\"heartbeats viewset\"\"\"\n\n# django\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib.gis.measure import D\n\n# drf\nfrom rest_framework.mixins import CreateModelMixin\nfrom rest_framework.viewsets import GenericViewSet\n\n# permissions\nfrom rest_framework.permissions import IsAuthenticated\n\n# models\nfrom houm.heartbeats.models import Heartbeat\nfrom houm.users.models import User\nfrom houm.properties.models import Property\n\n# serializers\nfrom houm.heartbeats.serializers import HeartbeatModelSerializer\n\n# redis\nfrom redis import Redis\n\n# utils\nfrom datetime import datetime\nimport math\n\n\nclass HeartbeatViewSet(GenericViewSet, CreateModelMixin):\n \"\"\"Handle Heartbeats received from houmers.\n Heartbeat must be send periodicaly.\n For example each 10 seconds\n \"\"\"\n\n permission_classes = [IsAuthenticated]\n queryset = Heartbeat.objects.all()\n serializer_class = HeartbeatModelSerializer\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"Verify that the resource exits\"\"\"\n user_pk = kwargs.pop(\"user_pk\")\n self.user = get_object_or_404(User, pk=user_pk)\n return super(HeartbeatViewSet, self).dispatch(request, *args, **kwargs)\n\n def perform_create(self, serializer):\n \"\"\"Asociate user with the heartbeat and verify if houmer is into a property\"\"\"\n heartbeat = serializer.save()\n heartbeat.user = self.user\n\n # asocciate property if is near\n nearest_property = Property.objects.filter(\n location__distance_lte=(heartbeat.location, D(m=200))\n ).first()\n\n if nearest_property:\n heartbeat.property = nearest_property\n\n r = Redis(\n host=\"redis\", port=6379, db=5, encoding=\"utf-8\", decode_responses=True\n )\n\n # get last saved position\n last_location = r.hgetall(f\"user-{self.user.pk}\")\n\n # save actual position in Redis\n r.hmset(\n f\"user-{self.user.pk}\",\n {\n \"y\": heartbeat.location.y,\n \"x\": heartbeat.location.x,\n \"ts\": datetime.timestamp(heartbeat.created),\n },\n )\n\n r.close()\n if bool(last_location):\n # Haversine formula:\n # a = sin²(Δφ/2) + cos φ1 ⋅ cos φ2 ⋅ sin²(Δλ/2)\n # c = 2 ⋅ atan2( √a, √(1−a) )\n # d = R ⋅ c\n y = float(last_location[\"y\"])\n x = float(last_location[\"x\"])\n\n R = 6371e3\n fi2 = heartbeat.location.y * math.pi / 180 # in radians\n fi1 = y * math.pi / 180\n delta_fi = (heartbeat.location.y - y) * math.pi / 180\n delta_lambda = (heartbeat.location.x - x) * math.pi / 180\n a = (\n math.sin(delta_fi / 2) ** 2\n + math.cos(fi1) * math.cos(fi2) * math.sin(delta_lambda / 2) ** 2\n )\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = R * c # in meters\n delta_time = abs(\n datetime.timestamp(heartbeat.created) - float(last_location[\"ts\"])\n )\n speed = (abs(d) / delta_time) * 3.6 # in km/hs\n\n heartbeat.speed = speed\n\n heartbeat.save()\n", "id": "8221811", "language": "Python", "matching_score": 4.946018218994141, "max_stars_count": 0, "path": "houm/heartbeats/views/heartbeats.py" }, { "content": "\"\"\"moments viewsets\"\"\"\n\n# django\nfrom django.shortcuts import get_object_or_404\n\n# drf\nfrom rest_framework.mixins import ListModelMixin\nfrom rest_framework.viewsets import GenericViewSet\n\n# permissions\nfrom rest_framework.permissions import IsAuthenticated\n\n# models\nfrom houm.heartbeats.models import Heartbeat\nfrom houm.users.models import User\n\n# serializer\nfrom houm.heartbeats.serializers import HearbeatsMomentModelSerializer\n\n# filters\nfrom django_filters import rest_framework as filter\nfrom houm.heartbeats.filters import MomentFilter\n\n\nclass MomentViewSet(GenericViewSet, ListModelMixin):\n \"\"\"List houmers moments where the houmer\n exceeds certain speed\"\"\"\n\n permission_classes = [IsAuthenticated]\n serializer_class = HearbeatsMomentModelSerializer\n filter_backends = [filter.DjangoFilterBackend]\n filterset_class = MomentFilter\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"Verify that the resource exits\"\"\"\n user_pk = kwargs.pop(\"user_pk\")\n self.user = get_object_or_404(User, pk=user_pk)\n return super(MomentViewSet, self).dispatch(request, *args, **kwargs)\n\n def get_queryset(self):\n queryset = Heartbeat.objects.filter(user=self.user)\n return queryset\n", "id": "1678922", "language": "Python", "matching_score": 5.41798210144043, "max_stars_count": 0, "path": "houm/heartbeats/views/moments.py" }, { "content": "\"\"\"properties viewset\"\"\"\n\n# django\nfrom django.shortcuts import get_object_or_404\nfrom django.db.models import Max, Min\n\n# drf\nfrom rest_framework.viewsets import GenericViewSet\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_200_OK\n\n# permissions\nfrom rest_framework.permissions import IsAuthenticated\nfrom houm.properties import serializers\n\n# models\nfrom houm.properties.models import Property\nfrom houm.users.models import User\n\n# filters\nfrom django_filters import rest_framework as filter\nfrom houm.properties.filters import PropertyFilter\n\n# serializers\nfrom houm.properties.serializers import VisitedSerializer\n\n\nclass PropertyViewSet(GenericViewSet):\n \"\"\"Property Viewset.\"\"\"\n\n queryset = Property.objects.all()\n permission_classes = [IsAuthenticated]\n filter_backends = [filter.DjangoFilterBackend]\n filterset_class = PropertyFilter\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"Verify that the resource exits\"\"\"\n user_pk = kwargs.pop(\"user_pk\")\n self.user = get_object_or_404(User, pk=user_pk)\n return super(PropertyViewSet, self).dispatch(request, *args, **kwargs)\n\n @action(detail=False, methods=[\"get\"])\n def visited(self, request, *args, **kwargs):\n queryset = self.filter_queryset(self.get_queryset())\n data = (\n queryset.filter(heartbeat__user=self.user)\n .values(\"name\", \"heartbeat__user__username\", \"location\")\n .annotate(\n duration=Max(\"heartbeat__created\") - Min(\"heartbeat__created\"),\n start=Min(\"heartbeat__created\"),\n end=Max(\"heartbeat__created\"),\n )\n )\n\n serializer = VisitedSerializer(data=data, many=True)\n serializer.is_valid()\n\n return Response(data=serializer.data, status=HTTP_200_OK)\n", "id": "11198599", "language": "Python", "matching_score": 1.7081810235977173, "max_stars_count": 0, "path": "houm/properties/views.py" }, { "content": "from .properties import PropertiesModelSerializer\nfrom .visited import VisitedSerializer\n", "id": "12211656", "language": "Python", "matching_score": 2.1263492107391357, "max_stars_count": 0, "path": "houm/properties/serializers/__init__.py" }, { "content": "\"\"\"Properties Serializers\"\"\"\n\n# drf\nfrom rest_framework import serializers\n\n# models\nfrom houm.properties.models import Property\n\n\nclass PropertiesModelSerializer(serializers.ModelSerializer):\n \"\"\"Properties Model Serializers\"\"\"\n\n class Meta:\n model = Property\n fields = \"__all__\"\n", "id": "12255608", "language": "Python", "matching_score": 0.4273805022239685, "max_stars_count": 0, "path": "houm/properties/serializers/properties.py" }, { "content": "\"\"\"Properties Serializers\"\"\"\n\n# drf\nfrom rest_framework import serializers\nfrom rest_framework_gis.serializers import GeometryField\n\n\nclass VisitedSerializer(serializers.Serializer):\n \"\"\"For Visited Report representation\"\"\"\n\n name = serializers.CharField(read_only=True)\n username = serializers.CharField(read_only=True, source=\"heartbeat__user__username\")\n duration = serializers.DurationField(read_only=True)\n start = serializers.DateTimeField(read_only=True)\n end = serializers.DateTimeField(read_only=True)\n location = GeometryField(read_only=True)\n", "id": "8613533", "language": "Python", "matching_score": 2.2256953716278076, "max_stars_count": 0, "path": "houm/properties/serializers/visited.py" }, { "content": "\"\"\"Heartbeats Serializers\"\"\"\n\n# django\nfrom django.contrib.gis.geos import Point\n\n# drf\nfrom rest_framework import serializers\nfrom rest_framework_gis.serializers import GeometryField\n\n# models\nfrom houm.heartbeats.models import Heartbeat\n\n# serializers\nfrom houm.properties.serializers import PropertiesModelSerializer\n\n\nclass HeartbeatModelSerializer(serializers.ModelSerializer):\n \"\"\"Validate and create a Heartbeat. Calc params\"\"\"\n\n lat = serializers.FloatField(write_only=True)\n lng = serializers.FloatField(write_only=True)\n property = PropertiesModelSerializer(read_only=True)\n location = GeometryField(read_only=True)\n\n class Meta:\n model = Heartbeat\n fields = [\"id\", \"lat\", \"lng\", \"location\", \"property\", \"speed\"]\n read_only_fields = [\"id\", \"location\", \"property\", \"speed\"]\n\n def create(self, validated_data):\n point = Point(validated_data[\"lng\"], validated_data[\"lat\"])\n heartbeat = Heartbeat.objects.create(location=point)\n return heartbeat\n", "id": "3015586", "language": "Python", "matching_score": 4.653648853302002, "max_stars_count": 0, "path": "houm/heartbeats/serializers/heartbeats.py" }, { "content": "\"\"\"moments Serializers\"\"\"\n\n# drf\nfrom rest_framework import serializers\nfrom rest_framework_gis.serializers import GeometryField\n\n# models\nfrom houm.heartbeats.models import Heartbeat\n\n\nclass HearbeatsMomentModelSerializer(serializers.ModelSerializer):\n \"\"\"Validate and create a Heartbeat. Calc params\"\"\"\n\n location = GeometryField(read_only=True)\n\n class Meta:\n model = Heartbeat\n fields = [\"id\", \"location\", \"speed\", \"created\"]\n", "id": "372932", "language": "Python", "matching_score": 3.2284607887268066, "max_stars_count": 0, "path": "houm/heartbeats/serializers/moments.py" }, { "content": "from .heartbeats import HeartbeatModelSerializer\nfrom .moments import HearbeatsMomentModelSerializer\n", "id": "3423892", "language": "Python", "matching_score": 0.7970827221870422, "max_stars_count": 0, "path": "houm/heartbeats/serializers/__init__.py" }, { "content": "from .moments import MomentFilter\n", "id": "4739108", "language": "Python", "matching_score": 1.0041701793670654, "max_stars_count": 0, "path": "houm/heartbeats/filters/__init__.py" }, { "content": "from .properties import PropertyFilter", "id": "4373557", "language": "Python", "matching_score": 0.8411814570426941, "max_stars_count": 0, "path": "houm/properties/filters/__init__.py" }, { "content": "\"\"\"moments filters\"\"\"\n\n# django_filters\nfrom django_filters import rest_framework as filters\n\n# models\nfrom houm.heartbeats.models import Heartbeat\n\n\nclass MomentFilter(filters.FilterSet):\n \"\"\"Transaccion django filter backends\"\"\"\n\n speed__gte = filters.NumberFilter(field_name=\"speed\", lookup_expr=\"gte\")\n speed__lte = filters.NumberFilter(field_name=\"speed\", lookup_expr=\"lte\")\n fecha__date = filters.DateFilter(field_name=\"created\", lookup_expr=\"date\")\n\n class Meta:\n model = Heartbeat\n fields = [\"speed__gte\", \"speed__lte\", \"fecha__date\"]\n", "id": "11364114", "language": "Python", "matching_score": 3.4531493186950684, "max_stars_count": 0, "path": "houm/heartbeats/filters/moments.py" }, { "content": "\"\"\"properties filters\"\"\"\n\n# django_filters\nfrom django_filters import rest_framework as filters\n\n# models\nfrom houm.properties.models import Property\n\n\nclass PropertyFilter(filters.FilterSet):\n \"\"\"Transaccion django filter backends\"\"\"\n\n fecha__gte = filters.DateFilter(field_name=\"heartbeat__created\", lookup_expr=\"gte\")\n fecha__lte = filters.DateFilter(field_name=\"heartbeat__created\", lookup_expr=\"lte\")\n fecha__date = filters.DateFilter(field_name=\"heartbeat__created\", lookup_expr=\"date\")\n\n class Meta:\n model = Property\n fields = [\"fecha__gte\", \"fecha__lte\", \"fecha__date\"]\n", "id": "4666883", "language": "Python", "matching_score": 0.9626391530036926, "max_stars_count": 0, "path": "houm/properties/filters/properties.py" }, { "content": "# Generated by Django 3.2.11 on 2022-02-02 02:38\n\nfrom django.conf import settings\nimport django.contrib.gis.db.models.fields\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('properties', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Heartbeat',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created', models.DateTimeField(auto_now_add=True, help_text='Fecha de creacion', verbose_name='created_at')),\n ('modified', models.DateTimeField(auto_now=True, help_text='Fecha de ultima modificacion', verbose_name='modified_at')),\n ('location', django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326)),\n ('velocity', models.PositiveIntegerField(default=0)),\n ('property', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='properties.property')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ['-created', '-modified'],\n 'get_latest_by': 'created',\n 'abstract': False,\n },\n ),\n ]\n", "id": "5969071", "language": "Python", "matching_score": 4.834532260894775, "max_stars_count": 0, "path": "houm/heartbeats/migrations/0001_initial.py" }, { "content": "# django\nfrom django.contrib.gis.db import models\n\n\nclass BModel(models.Model):\n \"\"\"\n Proxy Utilities model.\n This models is abstract model to provide timestamp info.\n All models inherit from this.\n \"\"\"\n\n created = models.DateTimeField(\n \"created_at\", auto_now_add=True, help_text=\"Fecha de creacion\"\n )\n modified = models.DateTimeField(\n \"modified_at\", auto_now=True, help_text=\"Fecha de ultima modificacion\"\n )\n\n class Meta:\n abstract = True\n get_latest_by = \"created\"\n ordering = [\"-created\", \"-modified\"]", "id": "11148614", "language": "Python", "matching_score": 2.24247670173645, "max_stars_count": 0, "path": "houm/utils/models.py" }, { "content": "# Generated by Django 3.2.11 on 2022-02-02 00:21\n\nimport datetime\nimport django.core.validators\nfrom django.db import migrations, models\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='user',\n options={'get_latest_by': 'created', 'ordering': ['-created', '-modified']},\n ),\n migrations.AddField(\n model_name='user',\n name='created',\n field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2022, 2, 2, 0, 21, 27, 78375, tzinfo=utc), help_text='Fecha de creacion', verbose_name='created_at'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='user',\n name='direction',\n field=models.CharField(blank=True, max_length=150, null=True),\n ),\n migrations.AddField(\n model_name='user',\n name='modified',\n field=models.DateTimeField(auto_now=True, help_text='Fecha de ultima modificacion', verbose_name='modified_at'),\n ),\n migrations.AddField(\n model_name='user',\n name='phone',\n field=models.CharField(blank=True, max_length=20, validators=[django.core.validators.RegexValidator(message='El formato deber ser: +5959******. Con un maximo de 15 digitos', regex='\\\\+?5?\\\\d{9,15}$')]),\n ),\n migrations.AddField(\n model_name='user',\n name='uid',\n field=models.CharField(db_index=True, default=2979427, error_messages={'unique': 'Ya existe un usuario con esta credencial'}, help_text='User identification number', max_length=50, unique=True, verbose_name='user id'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='user',\n name='email',\n field=models.EmailField(error_messages={'unique': 'Ya existe un usuario con este email.'}, max_length=254, unique=True, verbose_name='email address'),\n ),\n ]\n", "id": "7581570", "language": "Python", "matching_score": 5.100217819213867, "max_stars_count": 0, "path": "houm/users/migrations/0002_auto_20220202_0021.py" }, { "content": "# django\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db.models import CharField\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom django.contrib.gis.db import models\nfrom django.core.validators import RegexValidator\n\n# utils\nfrom houm.utils.models import BModel\n\n\nclass User(BModel, AbstractUser):\n \"\"\"\n Default custom user model for houm.\n If adding fields that need to be filled at user signup,\n check forms.SignupForm and forms.SocialSignupForms accordingly.\n \"\"\"\n\n #: First and last name do not cover name patterns around the globe\n name = CharField(_(\"Name of User\"), blank=True, max_length=255)\n first_name = None # type: ignore\n last_name = None # type: ignore\n\n email = models.EmailField(\n \"email address\",\n unique=True,\n error_messages={\"unique\": \"Ya existe un usuario con este email.\"},\n )\n phone_regex = RegexValidator(\n regex=r\"\\+?5?\\d{9,15}$\",\n message=\"El formato deber ser: +5959******. Con un maximo de 15 digitos\",\n )\n phone = models.CharField(validators=[phone_regex], max_length=20, blank=True)\n USERNAME_FIELD = \"email\"\n REQUIRED_FIELDS = [\"username\", \"uid\"]\n uid = models.CharField(\n \"user id\",\n help_text=\"User identification number\",\n max_length=50,\n unique=True,\n db_index=True,\n error_messages={\"unique\": \"Ya existe un usuario con esta credencial\"},\n )\n direction = models.CharField(max_length=150, blank=True, null=True)\n is_houmer = models.BooleanField(default=False)\n\n def get_absolute_url(self):\n \"\"\"Get url for user's detail view.\n\n Returns:\n str: URL for user detail.\n\n \"\"\"\n return reverse(\"users:detail\", kwargs={\"username\": self.username})\n", "id": "6041856", "language": "Python", "matching_score": 0.7895464897155762, "max_stars_count": 0, "path": "houm/users/models.py" }, { "content": "from django.apps import AppConfig\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass HeartbeatsConfig(AppConfig):\n name = \"houm.heartbeats\"\n verbose_name = _(\"Heartbeats\")\n", "id": "12293507", "language": "Python", "matching_score": 2.7424182891845703, "max_stars_count": 0, "path": "houm/heartbeats/apps.py" }, { "content": "from django.apps import AppConfig\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass PropertiesConfig(AppConfig):\n name = \"houm.properties\"\n verbose_name = _(\"Properties\")\n", "id": "10895699", "language": "Python", "matching_score": 0.38814985752105713, "max_stars_count": 0, "path": "houm/properties/apps.py" }, { "content": "# Generated by Django 3.2.11 on 2022-02-02 16:51\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('heartbeats', '0002_alter_heartbeat_user'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='heartbeat',\n old_name='velocity',\n new_name='speed',\n ),\n ]\n", "id": "6936348", "language": "Python", "matching_score": 1.5966840982437134, "max_stars_count": 0, "path": "houm/heartbeats/migrations/0003_rename_velocity_heartbeat_speed.py" }, { "content": "# Generated by Django 3.2.11 on 2022-02-02 01:04\n\nimport django.contrib.gis.db.models.fields\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Property',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50)),\n ('direction', models.CharField(max_length=180)),\n ('number', models.PositiveIntegerField(blank=True, null=True)),\n ('location', django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326)),\n ],\n ),\n ]\n", "id": "12762268", "language": "Python", "matching_score": 3.6294901371002197, "max_stars_count": 0, "path": "houm/properties/migrations/0001_initial.py" }, { "content": "# django\nfrom django.contrib.gis.db import models\n\n\nclass Property(models.Model):\n \"\"\"Property model\"\"\"\n\n name = models.CharField(max_length=50)\n direction = models.CharField(max_length=180)\n number = models.PositiveIntegerField(blank=True, null=True)\n location = models.PointField(blank=True, null=True)\n\n def __srt__(self):\n return f\"{self.name}({self.direction}-{self.number})\"\n", "id": "1810416", "language": "Python", "matching_score": 1.9797403812408447, "max_stars_count": 0, "path": "houm/properties/models.py" }, { "content": "# django\nfrom django.contrib.gis.db import models\n\n# utils\nfrom houm.utils.models import BModel\n\n# models\nfrom houm.users.models import User\nfrom houm.properties.models import Property\n\n\nclass Heartbeat(BModel):\n \"\"\"Heartbeat Model.\n We save Positions of houmers and calc relevant parameters\n like if houmer is in Property and its velocity\n \"\"\"\n\n location = models.PointField(blank=True, null=True)\n user = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)\n speed = models.PositiveIntegerField(default=0)\n property = models.ForeignKey(\n Property, on_delete=models.SET_NULL, blank=True, null=True\n )\n\n def __srt__(self):\n return f\"{self.location} ({self.velocity} km/h)\"\n", "id": "5564327", "language": "Python", "matching_score": 2.4017484188079834, "max_stars_count": 0, "path": "houm/heartbeats/models.py" }, { "content": "# django\nfrom django.contrib import admin\nfrom django.contrib.gis.db import models\n\n# models\nfrom houm.properties.models import Property\n\n# widgets\nfrom mapwidgets.widgets import GooglePointFieldWidget\n\n\[email protected](Property)\nclass PropertyAdmin(admin.ModelAdmin):\n \"\"\"Properties admin\"\"\"\n formfield_overrides = {models.PointField: {\"widget\": GooglePointFieldWidget}}\n list_display = (\"name\", \"direction\", \"number\", \"location\")", "id": "10339843", "language": "Python", "matching_score": 0.7463786005973816, "max_stars_count": 0, "path": "houm/properties/admin.py" } ]
2.176022
tonogaishunsuke
[ { "content": "from typing import Union\nfrom xenonpy.descriptor.base import BaseFeaturizer, BaseDescriptor\n\nfrom xenonpy.model.training import Trainer\nfrom xenonpy.datatools import Scaler\nimport torch, numpy\n\nclass NNPropDescriptor(BaseFeaturizer):\n def __init__(self, fp_calc:Union[BaseDescriptor, BaseFeaturizer], nnmdl:Trainer, scaler:Scaler):\n \n super().__init__(n_jobs=0)\n self.fp = fp_calc\n self.nn = nnmdl\n self.scaler = scaler\n self.columns = ['thermal_conductivity', 'thermal_diffusivity', 'density',\n 'static_dielectric_const', 'nematic_order_param', 'Cp', 'Cv',\n 'compress_T', 'compress_S', 'bulk_modulus_T', 'bulk_modulus_S',\n 'speed_of_sound', 'volume_expansion', 'linear_expansion']\n \n def featurize(self, x,):\n tmp_df = self.fp.transform(x)\n output = self.nn.predict(x_in=torch.tensor(tmp_df.values, dtype=torch.float)).detach().numpy()\n return pd.DataFrame(self.scaler.inverse_transform(output), index=tmp_df.index,columns=self.columns)\n \n @property\n def feature_labels(self):\n return self.columns\n ", "id": "10118961", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "xenonpy/contrib/nn_prop_descriptor/nn_prop_descriptor.py" } ]
0
sebastiangv
[ { "content": "import pytest\nfrom os.path import join, abspath, dirname\n\n\[email protected]\ndef app():\n from basic_rate.app import app\n from webtest import TestApp\n return TestApp(app)\n\n\[email protected]\ndef spanish_months_abbr():\n return ['ene', 'feb', 'mar', 'abr', 'may', 'jun',\n 'jul', 'ago', 'sep', 'oct', 'nov', 'dic']\n\n\[email protected]\ndef scraper():\n from basic_rate.scraper.bccr import BasicRate\n return BasicRate()\n\n\[email protected]\ndef table_headers():\n return [2008, 2009, 2010, 2011, 2012, 2013, \n 2014, 2015, 2016, 2017, 2018, 2019] \n\n\[email protected]\ndef data_raw():\n return [['1 ene', '', '11.50', '8.25', '8.00', '8.00', '9.20', '6.50', '7.20', '5.95', '4.45', '5.95', '5.00'],\n ['2 ene', '', '11.50', '8.25', '8.00', '8.00', '9.20', '6.50', '7.20', '5.95', '4.45', '5.95', ''],\n ['3 ene', '', '11.50', '8.25', '8.00', '8.00', '9.10', '6.50', '7.20', '5.95', '4.45', '5.95', '8.00'],\n ['1 feb', '7.25', '11.25', '8.00', '', '8.75', '8.30', '6.55', '7.20', '5.95', '4.55', '6.05', '6.25'], \n ['2 feb', '7.25', '11.25', '8.00', '', '8.75', '8.30', '6.55', '7.20', '5.95', '4.60', '6.05', '6.25']]\n\n\[email protected]\ndef data_obj():\n return [['1 ene', '', 11.50, 8.25, 8.00, 8.00, 9.20, 6.50, 7.20, 5.95, 4.45, 5.95, 5.00], \n ['2 ene', '', 11.50, 8.25, 8.00, 8.00, 9.20, 6.50, 7.20, 5.95, 4.45, 5.95, ''], \n ['3 ene', '', 11.50, 8.25, 8.00, 8.00, 9.10, 6.50, 7.20, 5.95, 4.45, 5.95, 8.00], \n ['1 feb', 7.25, 11.25, 8.00, '', 8.75, 8.30, 6.55, 7.20, 5.95, 4.55, 6.05, 6.25], \n ['2 feb', 7.25, 11.25, 8.00, '', 8.75, 8.30, 6.55, 7.20, 5.95, 4.60, 6.05, 6.25]]\n\n\[email protected]\ndef data_html():\n from bs4 import BeautifulSoup\n sample_html = join(abspath(dirname(__file__)), 'data/bccr.html')\n with open(sample_html, 'r', encoding='utf-8') as f:\n data = f.read()\n return BeautifulSoup(data, 'html.parser')\n\n\n", "id": "1728990", "language": "Python", "matching_score": 3.274611473083496, "max_stars_count": 0, "path": "tests/conftest.py" }, { "content": "import pytest\n\n\ndef test_years_match_table_headers(scraper, table_headers):\n years = [year for year in scraper.get_years()]\n assert years == table_headers\n\n\ndef test_months_abbreviations_are_in_spanish(scraper, spanish_months_abbr):\n months = [month for month in scraper.get_months_abbr()]\n assert months[0] == ''\n assert months[1:] == spanish_months_abbr\n \n \ndef test_basic_rate_table_is_retrived():\n pass\n\n\ndef test_parse_basic_rate_html_to_2d_array(scraper, data_raw, data_html):\n assert scraper.parse_basic_rate_html(data_html) == data_raw\n\n\[email protected]('year, month, avg',\n [(2019, None, '6.38'),\n (2018, None, '5.99'),\n (2008, 'ene', None),\n (2011, 'ene', '8.00'),\n (2019, 'ene', '6.50'),\n (None, 'ene', '7.43'),\n (None, 'feb', '7.28')])\ndef test_basic_rate_avg_by_year_month_or_both(scraper, data_html, year, month, avg):\n scraper.parse_basic_rate_html(data_html)\n assert scraper.get_average(year, month) == avg\n", "id": "2036582", "language": "Python", "matching_score": 0.3446139395236969, "max_stars_count": 0, "path": "tests/unit/test_scraper.py" }, { "content": "def test_index(app):\n assert app.get('/').status == '200 OK'\n", "id": "10870683", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tests/functional/test_app.py" }, { "content": "from setuptools import setup, find_packages\n\nwith open('README.md', 'r') as f:\n readme = f.read()\n\nsetup(\n name='basic_rate',\n version='0.1.0',\n license='MIT',\n description='Retrieve basic rate avarage by year, month also year and month',\n long_description=readme,\n author='<NAME>',\n author_email='<EMAIL>',\n url='https://github.com/sebastiangv/basic_rate',\n packages=find_packages('src'),\n package_dir={'': 'src'},\n include_package_data=True,\n setup_requires=[\n 'pytest-runner',\n 'flake8'\n ],\n install_requires=[\n 'bottle==0.12.17',\n 'bs4',\n 'requests'\n ],\n extras_require={\n \"test\": [\n 'pytest',\n 'coverage',\n 'pytest-cov',\n 'webtest',\n ]\n }\n)\n", "id": "4751280", "language": "Python", "matching_score": 1.5407171249389648, "max_stars_count": 0, "path": "setup.py" }, { "content": "import locale\nfrom calendar import month_abbr as months\nfrom bs4 import BeautifulSoup\nfrom os import getenv\nfrom requests import (get, ReadTimeout, ConnectTimeout, RequestException,\n HTTPError, Timeout, ConnectionError, utils)\n\n\nclass BasicRate:\n\n def __init__(self):\n self.TIME_OUT = float(getenv('TIME_OUT') or 20)\n self.URL = getenv('URL') or \\\n 'https://gee.bccr.fi.cr/indicadoreseconomicos/Cuadros/frmVerCatCuadro.aspx?idioma=1&CodCuadro=%2017'\n self.data = []\n\n def get_years(self):\n return range(2008, 2020)\n\n def get_months_abbr(self):\n locale.setlocale(locale.LC_ALL, 'es_ES.UTF-8')\n return months\n\n def get_basic_rate_html(self):\n error = None\n data = []\n try:\n headers = utils.default_headers()\n headers['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'\n response = get(self.URL, timeout=self.TIME_OUT, headers=headers)\n response.raise_for_status()\n except (ConnectTimeout, HTTPError, ReadTimeout,\n Timeout, ConnectionError, RequestException) as err:\n error = str(err)\n print('ERROR: %s' % error)\n else:\n data = BeautifulSoup(response.text, 'html.parser')\n finally:\n return (data, error)\n\n def parse_basic_rate_html(self, html):\n rows = html.find('table', attrs={'id': 'Table17'}).find_all('table')\n self.data = []\n for row in rows[1:]:\n row = [r for r in row.find_all('td', attrs={'class': 'celda17'})]\n row = [e.text.strip().lower().replace(',', '.') for e in row]\n self.data.append(row)\n return self.data\n\n def get_average(self, year=None, month=None):\n if year:\n year_index = int([y for y in self.get_years()].index(year) + 1)\n self.data = [float(r[year_index])\n for r in (filter(lambda row: month in row[0], self.data)\n if month is not None else self.data) if r[year_index] != '']\n else:\n self.data = [r[1:] for r in filter(lambda row: month in row[0], self.data)]\n # Nested List Comprehension to flatten the given matrix and filter\n self.data = [float(val) for sublist in self.data for val in sublist if val != '']\n return '%.2f' % (sum(self.data) / len(self.data)) if len(self.data) > 0 else None\n\ndef main():\n pass\n\nif __name__ == \"__main__\":\n main()\n", "id": "7580000", "language": "Python", "matching_score": 1.9631977081298828, "max_stars_count": 0, "path": "src/basic_rate/scraper/bccr.py" }, { "content": "from os import getenv\nfrom bottle import default_app, Bottle, TEMPLATE_PATH, run, template, request, response, error, redirect\nfrom basic_rate.scraper.bccr import BasicRate\n\n\nclass Controller:\n\n def __init__(self):\n self.scraper = BasicRate()\n self.years = self.scraper.get_years() or None\n self.months = self.scraper.get_months_abbr() or None\n \n def index(self):\n print(\"> index\")\n return self.form_handler()\n\n def avg_handler(self):\n year = None\n error = None\n data = None\n avg = None\n month = ('' if request.forms.get('month') == '' else str(request.forms.get('month')))\n year = ('' if request.forms.get('year') == '' else int(request.forms.get('year')))\n print(\"> types: year %s month %s avg %s error %s\" % (type(year), type(month), type(avg), type(error)))\n if year == '' and month == '':\n return self.form_handler(error='At least one value must be selected.')\n else:\n data, error = self.scraper.get_basic_rate_html()\n data = self.scraper.parse_basic_rate_html(data)\n avg = self.scraper.get_average(year, month)\n avg = 'Not available yet' if avg is None else avg\n return self.form_handler(year=year, month=month, avg=avg, error=error)\n\n def error404_handler(self):\n response.status = 404\n return self.form_handler(error='Page Not found')\n\n def form_handler(self, year=None, month=None, avg=None, error=None):\n data = {'years': self.years,\n 'months': self.months,\n 'year': year,\n 'month': month,\n 'avg': ('Not available yet' if avg is None else avg),\n 'error': error}\n print(\"> form_handler: year %s month %s avg %s error %s\" % (year, month, avg, error))\n return template('index', data=data)\n\n", "id": "6857496", "language": "Python", "matching_score": 3.2993204593658447, "max_stars_count": 0, "path": "src/basic_rate/controller.py" }, { "content": "from os import getenv\nfrom bottle import Bottle, TEMPLATE_PATH, run\nfrom basic_rate.controller import Controller\n\napp = application = Bottle()\nTEMPLATE_PATH.insert(0, getenv('TEMPLATE_PATH') or 'basic_rate/views')\n\napp.route('/', 'GET', Controller().index)\napp.route('/', 'POST', Controller().avg_handler)\n\[email protected](404)\ndef error404(error):\n return Controller().error404_handler()\n\ndef main():\n run(app, host='0.0.0.0', port=8080, debug=True, reloader=True)\n\nif __name__ == '__main__':\n main()", "id": "6519030", "language": "Python", "matching_score": 2.170435905456543, "max_stars_count": 0, "path": "src/basic_rate/app.py" }, { "content": "from basic_rate.app import Controller\n\n\ndef test_index_controller():\n assert 'Get BCCR passive basic rate' in Controller().index()\n\n\ndef test_error404_handler():\n assert 'Page Not found' in Controller().error404_handler()", "id": "9963850", "language": "Python", "matching_score": 1.631152868270874, "max_stars_count": 0, "path": "tests/unit/test_app_controller.py" }, { "content": "from basic_rate.scraper.bccr import main\n\nmain()\n", "id": "9580463", "language": "Python", "matching_score": 0.3009999692440033, "max_stars_count": 0, "path": "src/basic_rate/scraper/__main__.py" } ]
1.631153
Irioth
[ { "content": "from setuptools import setup, find_packages\n\npackages = find_packages('.', include=['yandexcloud*', 'yandex*'])\n\nsetup(name='yandexcloud',\n version='0.119.0',\n description='The Yandex.Cloud official SDK',\n url='https://github.com/yandex-cloud/python-sdk',\n author='<NAME>',\n author_email='<EMAIL>',\n license='MIT',\n install_requires=[\n 'cryptography>=2.8',\n 'grpcio>=1.38.1',\n 'googleapis-common-protos>=1.53.0',\n 'pyjwt>=1.7.1',\n 'requests>=2.22.0',\n 'six>=1.14.0',\n ],\n classifiers=[\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n ],\n tests_require=['pytest'],\n packages=packages,\n zip_safe=False)\n", "id": "6827581", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "setup.py" } ]
0
mberingen
[ { "content": "from django.conf.urls import include, url\n\nfrom .views import OutboxListView, MailTemplateView\n\n\nurlpatterns = [\n url(r'^$', OutboxListView.as_view(), name='outbox'),\n url(r'^(?P<id>.+)/$', MailTemplateView.as_view(), name='mail'),\n]\n", "id": "5465602", "language": "Python", "matching_score": 2.6612050533294678, "max_stars_count": 0, "path": "django_outbox/urls.py" }, { "content": "from django.views.generic import TemplateView, ListView\n\nfrom .outbox import Outbox\n\n\nclass OutboxListView(ListView):\n template_name = 'django_outbox/outbox.html'\n context_object_name = 'mails'\n\n def get_queryset(self):\n return sorted(Outbox().all(), key=lambda r: r.when)\n\n\nclass MailTemplateView(TemplateView):\n template_name = 'django_outbox/mail.html'\n\n def get_context_data(self, id, **kwargs):\n context = super(MailTemplateView, self).get_context_data(**kwargs)\n mail = Outbox().get(id)\n context['mail'] = mail\n context['content_type'] = self.request.GET['content_type']\n context['content'] = mail.body[self.request.GET['content_type']]\n return context\n", "id": "2067339", "language": "Python", "matching_score": 0.36519739031791687, "max_stars_count": 0, "path": "django_outbox/views.py" }, { "content": "import re\nfrom os import path, listdir\nfrom email.parser import Parser\n\nfrom django.conf import settings\n\n\nclass Outbox(object):\n \n def __init__(self):\n self._parser = Parser()\n\n def all(self):\n try:\n return list(reversed(\n [self._message_from_file(filepath) \n for filepath in listdir(self.maildirectory)]))\n except OSError:\n return []\n\n def get(self, id):\n return self._message_from_file(id)\n\n def _message_from_file(self, filepath):\n abspath = path.join(self.maildirectory, filepath)\n with open(abspath) as f:\n message = self._parser.parse(f)\n return self._convert_message(filepath, message)\n\n def _convert_message(self, filepath, message):\n if message.is_multipart():\n body = {submessage.get_content_type(): \n self._clear_content(submessage.get_payload())\n for submessage in message.get_payload()}\n \n else:\n body = {message.get_content_type(): \n self._clear_content(message.get_payload())}\n\n return Mail(\n filepath,\n message.get('Subject'), \n message.get('From'), \n message.get('To'), \n message.get('Date'),\n message.get_content_type(),\n body)\n\n def _clear_content(self, content):\n return re.sub(r'\\n-+', '', content)\n\n @property\n def maildirectory(self):\n return settings.EMAIL_FILE_PATH\n\n\nclass Mail(object):\n\n def __init__(self, id, subject, from_address, to, when, content_type, body):\n self._id = id\n self._subject = subject\n self._from_address = from_address\n self._to = to\n self._when = when\n self._content_type = content_type\n self._body = body\n\n @property\n def id(self):\n return self._id\n\n @property\n def subject(self):\n return self._subject\n\n @property\n def body(self):\n return self._body\n\n @property\n def from_address(self):\n return self._from_address\n\n @property\n def to(self):\n return self._to\n\n @property\n def when(self):\n return self._when\n\n @property\n def content_type(self):\n return self._content_type\n", "id": "12227586", "language": "Python", "matching_score": 3.2845282554626465, "max_stars_count": 6, "path": "django_outbox/outbox.py" }, { "content": "import shutil\nimport re\nfrom os import path, listdir\nfrom time import sleep\nfrom datetime import datetime\n\nfrom django.test import TestCase\nfrom django.core import mail\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\n\nfrom expecter import expect\n\nfrom django_outbox.outbox import Outbox\n\n\nclass OutboxTestMixin(object):\n\n def setUp(self):\n settings.EMAIL_BACKEND = \\\n 'django.core.mail.backends.filebased.EmailBackend'\n\n self._clearmails()\n\n self.outbox = Outbox()\n\n def _clearmails(self):\n if path.exists(settings.EMAIL_FILE_PATH):\n shutil.rmtree(settings.EMAIL_FILE_PATH)\n\n def _send_mail(self, subject='Look at Foo!'):\n sleep(1)\n\n mail.send_mail(\n subject, \n 'Here is my Foo.', \n '<EMAIL>',\n ['<EMAIL>'])\n\n def _assert_mail_data(self, mail):\n expect(mail.id).contains(datetime.strftime(datetime.now(), '%Y%m%d'))\n expect(mail.subject) == 'Look at Foo!'\n expect(mail.to) == '<EMAIL>'\n expect(mail.from_address) == '<EMAIL>'\n expect(mail.body) == {'text/plain': 'Here is my Foo.\\n'}\n expect(mail.content_type) == 'text/plain'\n\n\nclass OutboxAllTest(OutboxTestMixin, TestCase):\n\n def test_fetch_all_sent_mails(self):\n self._send_mail()\n self._send_mail('Look at Bar!')\n\n mails = self.outbox.all()\n\n expect(len(mails)) == 2\n\n def test_mail_order_is_from_the_most_recent_to_the_oldest(self):\n self._send_mail()\n self._send_mail('Look at Bar!')\n self._send_mail('Look at Qux!')\n\n mails = self.outbox.all()\n\n expect([mail.subject for mail in mails]) == [\n 'Look at Qux!', 'Look at Bar!', 'Look at Foo!']\n\n def test_mail_data(self):\n self._send_mail()\n\n mails = self.outbox.all()\n mail = mails[0]\n \n self._assert_mail_data(mail)\n\n\nclass OutboxGetTest(OutboxTestMixin, TestCase):\n\n def test_get_a_specific_email(self):\n self._send_mail()\n\n mail = self.outbox.get(self._get_mail_id())\n\n self._assert_mail_data(mail)\n\n def test_multipart_email(self):\n subject, from_email, to = 'hello', '<EMAIL>', '<EMAIL>'\n text_content = 'This is an important message.'\n html_content = '<p>This is an <strong>important</strong> message.</p>'\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n\n mail = self.outbox.get(self._get_mail_id())\n\n expect(mail.id).contains(datetime.strftime(datetime.now(), '%Y%m%d'))\n expect(mail.subject) == subject\n expect(mail.to) == to\n expect(mail.from_address) == from_email\n expect(mail.content_type) == 'multipart/alternative'\n expect(mail.body) == {\n 'text/plain': text_content, \n 'text/html': html_content}\n\n def _get_mail_id(self):\n maildirectory = settings.EMAIL_FILE_PATH\n return path.join(maildirectory, listdir(maildirectory)[0])\n", "id": "7059447", "language": "Python", "matching_score": 2.903282880783081, "max_stars_count": 6, "path": "tests/tests/test_outbox.py" } ]
2.782244
kliem
[ { "content": "r\"\"\"\nCyclic sieving phenomenon\n\nImplementation of the Cyclic Sieving Phenomenon as described by\nReiner, Stanton, and White in [RSW2004]_.\n\nWe define the :func:`CyclicSievingPolynomial` of a finite set `S`\ntogether with cyclic action ``cyc_act`` (of order `n`) to be the\nunique polynomial ``P(q)`` of order < `n` such that the triple (`S`,\n``cyc_act``, ``P(q)``) exhibits the cyclic sieving phenomenon.\n\nAUTHORS:\n\n- <NAME>\n\nREFERENCES:\n\n.. [RSW2004] Reiner, Stanton, White - *The cyclic sieving phenomenon*,\n Journal of Combinatorial Theory A 108 (2004)\n\"\"\"\n# ****************************************************************************\n# Copyright (C) 2010 <NAME> <EMAIL>\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n# https://www.gnu.org/licenses/\n# ****************************************************************************\nfrom sage.rings.integer_ring import ZZ\nfrom sage.arith.all import lcm\nfrom sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing\n\n\ndef CyclicSievingPolynomial(L, cyc_act=None, order=None, get_order=False):\n \"\"\"\n Return the unique polynomial ``p`` of degree smaller than ``order`` such\n that the triple ``(L, cyc_act, p)`` exhibits the Cyclic Sieving Phenomenon.\n\n If ``cyc_act`` is None, ``L`` is expected to contain the orbit lengths.\n\n INPUT:\n\n - ``L`` -- if ``cyc_act`` is ``None``: list of orbit sizes,\n otherwise list of objects\n\n - ``cyc_act`` -- (default:``None``) bijective function from ``L`` to ``L``\n\n - ``order`` -- (default:``None``) if set to an integer, this\n cyclic order of ``cyc_act`` is used (must be an integer multiple\n of the order of ``cyc_act``) otherwise, the order of ``cyc_action`` is\n used\n\n - ``get_order`` -- (default:``False``) if ``True``, a tuple ``[p,n]``\n is returned where ``p`` is as above, and ``n`` is the order\n\n EXAMPLES::\n\n sage: from sage.combinat.cyclic_sieving_phenomenon import CyclicSievingPolynomial\n sage: S42 = Subsets([1,2,3,4], 2)\n sage: def cyc_act(S): return Set(i.mod(4) + 1 for i in S)\n sage: cyc_act([1,3])\n {2, 4}\n sage: cyc_act([1,4])\n {1, 2}\n sage: CyclicSievingPolynomial(S42, cyc_act)\n q^3 + 2*q^2 + q + 2\n sage: CyclicSievingPolynomial(S42, cyc_act, get_order=True)\n [q^3 + 2*q^2 + q + 2, 4]\n sage: CyclicSievingPolynomial(S42, cyc_act, order=8)\n q^6 + 2*q^4 + q^2 + 2\n sage: CyclicSievingPolynomial([4,2])\n q^3 + 2*q^2 + q + 2\n\n TESTS:\n\n We check that :trac:`13997` is handled::\n\n sage: CyclicSievingPolynomial(S42, cyc_act, order=8, get_order=True)\n [q^6 + 2*q^4 + q^2 + 2, 8]\n sage: CyclicSievingPolynomial(S42, cyc_act, order=11)\n Traceback (most recent call last):\n ...\n ValueError: order is not a multiple of the order of the cyclic action\n \"\"\"\n if cyc_act:\n orbits = orbit_decomposition(L, cyc_act)\n else:\n orbits = [list(range(k)) for k in L]\n\n R = PolynomialRing(ZZ, 'q')\n q = R.gen()\n p = R.zero()\n\n orbit_sizes = {}\n for orbit in orbits:\n length = len(orbit)\n if length in orbit_sizes:\n orbit_sizes[length] += 1\n else:\n orbit_sizes[length] = 1\n\n n = lcm(list(orbit_sizes))\n\n if order:\n if order.mod(n):\n raise ValueError(\"order is not a multiple of the order\"\n \" of the cyclic action\")\n else:\n order = n\n\n for i in range(n):\n if i == 0:\n j = sum(orbit_sizes.values())\n else:\n j = sum(orbit_sizes[l] for l in orbit_sizes\n if ZZ(i).mod(n / l) == 0)\n p += j * q**i\n\n p = p(q**(order // n))\n\n if get_order:\n return [p, order]\n else:\n return p\n\n\ndef CyclicSievingCheck(L, cyc_act, f, order=None):\n \"\"\"\n Return whether the triple ``(L, cyc_act, f)`` exhibits\n the cyclic sieving phenomenon.\n\n If ``cyc_act`` is None, ``L`` is expected to contain the orbit lengths.\n\n INPUT:\n\n - ``L`` -- if ``cyc_act`` is ``None``: list of orbit sizes,\n otherwise list of objects\n\n - ``cyc_act`` -- (default:``None``) bijective function from ``L`` to ``L``\n\n - ``order`` -- (default:``None``) if set to an integer, this\n cyclic order of ``cyc_act`` is used (must be an integer\n multiple of the order of ``cyc_act``) otherwise, the order of\n ``cyc_action`` is used\n\n EXAMPLES::\n\n sage: from sage.combinat.cyclic_sieving_phenomenon import *\n sage: from sage.combinat.q_analogues import q_binomial\n sage: S42 = Subsets([1,2,3,4], 2)\n sage: def cyc_act(S): return Set(i.mod(4) + 1 for i in S)\n sage: cyc_act([1,3])\n {2, 4}\n sage: cyc_act([1,4])\n {1, 2}\n sage: p = q_binomial(4,2); p\n q^4 + q^3 + 2*q^2 + q + 1\n sage: CyclicSievingPolynomial( S42, cyc_act )\n q^3 + 2*q^2 + q + 2\n sage: CyclicSievingCheck( S42, cyc_act, p )\n True\n \"\"\"\n p1, n = CyclicSievingPolynomial(L, cyc_act=cyc_act, order=order,\n get_order=True)\n R = p1.parent()\n q = R.gen()\n p2 = R(f).mod(q**n - 1)\n return p1 == p2\n\n\ndef orbit_decomposition(L, cyc_act):\n \"\"\"\n Return the orbit decomposition of ``L`` by the action of ``cyc_act``.\n\n INPUT:\n\n - ``L`` -- list\n\n - ``cyc_act`` -- bijective function from ``L`` to ``L``\n\n OUTPUT:\n\n - a list of lists, the orbits under the cyc_act acting on ``L``\n\n EXAMPLES::\n\n sage: from sage.combinat.cyclic_sieving_phenomenon import *\n sage: S42 = Subsets([1,2,3,4], 2); S42\n Subsets of {1, 2, 3, 4} of size 2\n sage: def cyc_act(S): return Set(i.mod(4) + 1 for i in S)\n sage: cyc_act([1,3])\n {2, 4}\n sage: cyc_act([1,4])\n {1, 2}\n sage: orbits = orbit_decomposition(S42, cyc_act)\n sage: sorted([sorted(orb, key=sorted) for orb in orbits], key=len)\n [[{1, 3}, {2, 4}], [{1, 2}, {1, 4}, {2, 3}, {3, 4}]]\n \"\"\"\n orbits = []\n L_prime = set(L)\n while L_prime != set():\n obj = L_prime.pop()\n orbit = [obj]\n obj = cyc_act(obj)\n while obj in L_prime:\n orbit.append(obj)\n L_prime.remove(obj)\n obj = cyc_act(obj)\n orbits.append(orbit)\n return orbits\n", "id": "5169638", "language": "Python", "matching_score": 1.0587027072906494, "max_stars_count": 10, "path": "src/sage/combinat/cyclic_sieving_phenomenon.py" }, { "content": "r\"\"\"\nCoordinate Charts on Differentiable Manifolds\n\nThe class :class:`DiffChart` implements coordinate charts on a differentiable\nmanifold over a topological field `K` (in most applications, `K = \\RR` or\n`K = \\CC`).\n\nThe subclass :class:`RealDiffChart` is devoted\nto the case `K=\\RR`, for which the concept of coordinate range is meaningful.\nMoreover, :class:`RealDiffChart` is endowed with some plotting\ncapabilities (cf. method :meth:`~sage.manifolds.chart.RealChart.plot`).\n\nTransition maps between charts are implemented via the class\n:class:`DiffCoordChange`.\n\nAUTHORS:\n\n- <NAME>, <NAME> (2013-2015) : initial version\n\nREFERENCES:\n\n- Chap. 1 of [Lee2013]_\n\n\"\"\"\n# ****************************************************************************\n# Copyright (C) 2015 <NAME> <<EMAIL>>\n# Copyright (C) 2015 <NAME> <<EMAIL>>\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n# as published by the Free Software Foundation; either version 2 of\n# the License, or (at your option) any later version.\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nfrom sage.misc.cachefunc import cached_method\nfrom sage.manifolds.chart import Chart, RealChart, CoordChange\nfrom sage.manifolds.differentiable.vectorframe import CoordFrame\n\n\nclass DiffChart(Chart):\n r\"\"\"\n Chart on a differentiable manifold.\n\n Given a differentiable manifold `M` of dimension `n` over a topological\n field `K`, a *chart* is a member `(U,\\varphi)` of the manifold's\n differentiable atlas; `U` is then an open subset of `M` and\n `\\varphi: U \\rightarrow V \\subset K^n` is a homeomorphism from\n `U` to an open subset `V` of `K^n`.\n\n The components `(x^1,\\ldots,x^n)` of `\\varphi`, defined by\n `\\varphi(p) = (x^1(p),\\ldots,x^n(p))\\in K^n` for any point `p\\in U`, are\n called the *coordinates* of the chart `(U,\\varphi)`.\n\n INPUT:\n\n - ``domain`` -- open subset `U` on which the chart is defined\n - ``coordinates`` -- (default: '' (empty string)) single string defining\n the coordinate symbols, with ``' '`` (whitespace) as a separator; each\n item has at most three fields, separated by a colon (``:``):\n\n 1. the coordinate symbol (a letter or a few letters)\n 2. (optional) the period of the coordinate if the coordinate is\n periodic; the period field must be written as ``period=T``, where\n ``T`` is the period (see examples below)\n 3. (optional) the LaTeX spelling of the coordinate; if not provided the\n coordinate symbol given in the first field will be used\n\n The order of fields 2 and 3 does not matter and each of them can be\n omitted. If it contains any LaTeX expression, the string ``coordinates``\n must be declared with the prefix 'r' (for \"raw\") to allow for a proper\n treatment of LaTeX's backslash character (see examples below).\n If no period and no LaTeX spelling are to be set for any coordinate, the\n argument ``coordinates`` can be omitted when the shortcut operator\n ``<,>`` is used to declare the chart (see examples below).\n - ``calc_method`` -- (default: ``None``) string defining the calculus\n method for computations involving coordinates of the chart; must be\n one of\n\n - ``'SR'``: Sage's default symbolic engine (Symbolic Ring)\n - ``'sympy'``: SymPy\n - ``None``: the default of\n :class:`~sage.manifolds.calculus_method.CalculusMethod` will be\n used\n - ``names`` -- (default: ``None``) unused argument, except if\n ``coordinates`` is not provided; it must then be a tuple containing\n the coordinate symbols (this is guaranteed if the shortcut operator\n ``<,>`` is used).\n - ``coord_restrictions``: Additional restrictions on the coordinates.\n A restriction can be any symbolic equality or inequality involving\n the coordinates, such as ``x > y`` or ``x^2 + y^2 != 0``. The items\n of the list (or set or frozenset) ``coord_restrictions`` are combined\n with the ``and`` operator; if some restrictions are to be combined with\n the ``or`` operator instead, they have to be passed as a tuple in some\n single item of the list (or set or frozenset) ``coord_restrictions``.\n For example::\n\n coord_restrictions=[x > y, (x != 0, y != 0), z^2 < x]\n\n means ``(x > y) and ((x != 0) or (y != 0)) and (z^2 < x)``.\n If the list ``coord_restrictions`` contains only one item, this\n item can be passed as such, i.e. writing ``x > y`` instead\n of the single element list ``[x > y]``. If the chart variables have\n not been declared as variables yet, ``coord_restrictions`` must\n be ``lambda``-quoted.\n\n EXAMPLES:\n\n A chart on a complex 2-dimensional differentiable manifold::\n\n sage: M = Manifold(2, 'M', field='complex')\n sage: X = M.chart('x y'); X\n Chart (M, (x, y))\n sage: latex(X)\n \\left(M,(x, y)\\right)\n sage: type(X)\n <class 'sage.manifolds.differentiable.chart.DiffChart'>\n\n To manipulate the coordinates `(x,y)` as global variables, one has to set::\n\n sage: x,y = X[:]\n\n However, a shortcut is to use the declarator ``<x,y>`` in the left-hand\n side of the chart declaration (there is then no need to pass the string\n ``'x y'`` to ``chart()``)::\n\n sage: M = Manifold(2, 'M', field='complex')\n sage: X.<x,y> = M.chart(); X\n Chart (M, (x, y))\n\n The coordinates are then immediately accessible::\n\n sage: y\n y\n sage: x is X[0] and y is X[1]\n True\n\n The trick is performed by Sage preparser::\n\n sage: preparse(\"X.<x,y> = M.chart()\")\n \"X = M.chart(names=('x', 'y',)); (x, y,) = X._first_ngens(2)\"\n\n Note that ``x`` and ``y`` declared in ``<x,y>`` are mere Python variable\n names and do not have to coincide with the coordinate symbols;\n for instance, one may write::\n\n sage: M = Manifold(2, 'M', field='complex')\n sage: X.<x1,y1> = M.chart('x y'); X\n Chart (M, (x, y))\n\n Then ``y`` is not known as a global Python variable and the\n coordinate `y` is accessible only through the global variable ``y1``::\n\n sage: y1\n y\n sage: latex(y1)\n y\n sage: y1 is X[1]\n True\n\n However, having the name of the Python variable coincide with the\n coordinate symbol is quite convenient; so it is recommended to declare::\n\n sage: M = Manifold(2, 'M', field='complex')\n sage: X.<x,y> = M.chart()\n\n In the above example, the chart X covers entirely the manifold M::\n\n sage: X.domain()\n 2-dimensional complex manifold M\n\n Of course, one may declare a chart only on an open subset of M::\n\n sage: U = M.open_subset('U')\n sage: Y.<z1, z2> = U.chart(r'z1:\\zeta_1 z2:\\zeta_2'); Y\n Chart (U, (z1, z2))\n sage: Y.domain()\n Open subset U of the 2-dimensional complex manifold M\n\n In the above declaration, we have also specified some LaTeX writing\n of the coordinates different from the text one::\n\n sage: latex(z1)\n {\\zeta_1}\n\n Note the prefix ``r`` in front of the string ``r'z1:\\zeta_1 z2:\\zeta_2'``;\n it makes sure that the backslash character is treated as an ordinary\n character, to be passed to the LaTeX interpreter.\n\n Periodic coordinates are declared through the keyword ``period=`` in the\n coordinate field::\n\n sage: N = Manifold(2, 'N', field='complex')\n sage: XN.<Z1,Z2> = N.chart('Z1:period=1+2*I Z2')\n sage: XN.periods()\n {0: 2*I + 1}\n\n Coordinates are Sage symbolic variables (see\n :mod:`sage.symbolic.expression`)::\n\n sage: type(z1)\n <class 'sage.symbolic.expression.Expression'>\n\n In addition to the Python variable name provided in the operator ``<.,.>``,\n the coordinates are accessible by their indices::\n\n sage: Y[0], Y[1]\n (z1, z2)\n\n The index range is that declared during the creation of the manifold. By\n default, it starts at 0, but this can be changed via the parameter\n ``start_index``::\n\n sage: M1 = Manifold(2, 'M_1', field='complex', start_index=1)\n sage: Z.<u,v> = M1.chart()\n sage: Z[1], Z[2]\n (u, v)\n\n The full set of coordinates is obtained by means of the operator\n ``[:]``::\n\n sage: Y[:]\n (z1, z2)\n\n Each constructed chart is automatically added to the manifold's user\n atlas::\n\n sage: M.atlas()\n [Chart (M, (x, y)), Chart (U, (z1, z2))]\n\n and to the atlas of the chart's domain::\n\n sage: U.atlas()\n [Chart (U, (z1, z2))]\n\n Manifold subsets have a *default chart*, which, unless changed via the\n method\n :meth:`~sage.manifolds.manifold.TopologicalManifold.set_default_chart`,\n is the first defined chart on the subset (or on a open subset of it)::\n\n sage: M.default_chart()\n Chart (M, (x, y))\n sage: U.default_chart()\n Chart (U, (z1, z2))\n\n The default charts are not privileged charts on the manifold, but rather\n charts whose name can be skipped in the argument list of functions having\n an optional ``chart=`` argument.\n\n The action of the chart map `\\varphi` on a point is obtained by means of\n the call operator, i.e. the operator ``()``::\n\n sage: p = M.point((1+i, 2), chart=X); p\n Point on the 2-dimensional complex manifold M\n sage: X(p)\n (I + 1, 2)\n sage: X(p) == p.coord(X)\n True\n\n A vector frame is naturally associated to each chart::\n\n sage: X.frame()\n Coordinate frame (M, (∂/∂x,∂/∂y))\n sage: Y.frame()\n Coordinate frame (U, (∂/∂z1,∂/∂z2))\n\n as well as a dual frame (basis of 1-forms)::\n\n sage: X.coframe()\n Coordinate coframe (M, (dx,dy))\n sage: Y.coframe()\n Coordinate coframe (U, (dz1,dz2))\n\n .. SEEALSO::\n\n :class:`~sage.manifolds.differentiable.chart.RealDiffChart` for charts\n on differentiable manifolds over `\\RR`.\n\n \"\"\"\n def __init__(self, domain, coordinates, calc_method=None, periods=None, coord_restrictions=None):\n r\"\"\"\n Construct a chart.\n\n TESTS::\n\n sage: M = Manifold(2, 'M', field='complex')\n sage: X.<x,y> = M.chart()\n sage: X\n Chart (M, (x, y))\n sage: type(X)\n <class 'sage.manifolds.differentiable.chart.DiffChart'>\n sage: assumptions() # no assumptions on x,y set by X._init_coordinates\n []\n sage: TestSuite(X).run()\n\n \"\"\"\n super().__init__(domain, coordinates, calc_method=calc_method,\n periods=periods, coord_restrictions=coord_restrictions)\n # Construction of the coordinate frame associated to the chart:\n self._frame = CoordFrame(self)\n self._coframe = self._frame._coframe\n\n def transition_map(self, other, transformations, intersection_name=None,\n restrictions1=None, restrictions2=None):\n r\"\"\"\n Construct the transition map between the current chart,\n `(U,\\varphi)` say, and another one, `(V,\\psi)` say.\n\n If `n` is the manifold's dimension, the *transition map* is the\n map\n\n .. MATH::\n\n \\psi\\circ\\varphi^{-1}: \\varphi(U\\cap V) \\subset K^n\n \\rightarrow \\psi(U\\cap V) \\subset K^n,\n\n where `K` is the manifold's base field. In other words, the\n transition map expresses the coordinates `(y^1,\\ldots,y^n)` of\n `(V,\\psi)` in terms of the coordinates `(x^1,\\ldots,x^n)` of\n `(U,\\varphi)` on the open subset where the two charts intersect, i.e.\n on `U\\cap V`.\n\n By definition, the transition map `\\psi\\circ\\varphi^{-1}` must be\n of class `C^k`, where `k` is the degree of differentiability of the\n manifold (cf.\n :meth:`~sage.manifolds.differentiable.manifold.DifferentiableManifold.diff_degree`).\n\n INPUT:\n\n - ``other`` -- the chart `(V,\\psi)`\n - ``transformations`` -- tuple (or list) `(Y_1,\\ldots,Y_2)`, where\n `Y_i` is the symbolic expression of the coordinate `y^i` in terms\n of the coordinates `(x^1,\\ldots,x^n)`\n - ``intersection_name`` -- (default: ``None``) name to be given to the\n subset `U\\cap V` if the latter differs from `U` or `V`\n - ``restrictions1`` -- (default: ``None``) list of conditions on the\n coordinates of the current chart that define `U\\cap V` if the\n latter differs from `U`. ``restrictions1`` must be a list of\n of symbolic equalities or inequalities involving the\n coordinates, such as x>y or x^2+y^2 != 0. The items of the list\n ``restrictions1`` are combined with the ``and`` operator; if some\n restrictions are to be combined with the ``or`` operator instead,\n they have to be passed as a tuple in some single item of the list\n ``restrictions1``. For example, ``restrictions1`` = [x>y,\n (x!=0, y!=0), z^2<x] means (x>y) and ((x!=0) or (y!=0)) and (z^2<x).\n If the list ``restrictions1`` contains only one item, this item can\n be passed as such, i.e. writing x>y instead of the single-element\n list [x>y].\n - ``restrictions2`` -- (default: ``None``) list of conditions on the\n coordinates of the chart `(V,\\psi)` that define `U\\cap V` if the\n latter differs from `V` (see ``restrictions1`` for the syntax)\n\n OUTPUT:\n\n - The transition map `\\psi\\circ\\varphi^{-1}` defined on `U\\cap V`, as an\n instance of :class:`DiffCoordChange`.\n\n EXAMPLES:\n\n Transition map between two stereographic charts on the circle `S^1`::\n\n sage: M = Manifold(1, 'S^1')\n sage: U = M.open_subset('U') # Complement of the North pole\n sage: cU.<x> = U.chart() # Stereographic chart from the North pole\n sage: V = M.open_subset('V') # Complement of the South pole\n sage: cV.<y> = V.chart() # Stereographic chart from the South pole\n sage: M.declare_union(U,V) # S^1 is the union of U and V\n sage: trans = cU.transition_map(cV, 1/x, intersection_name='W',\n ....: restrictions1= x!=0, restrictions2 = y!=0)\n sage: trans\n Change of coordinates from Chart (W, (x,)) to Chart (W, (y,))\n sage: trans.display()\n y = 1/x\n\n The subset `W`, intersection of `U` and `V`, has been created by\n ``transition_map()``::\n\n sage: F = M.subset_family(); F\n Set {S^1, U, V, W} of open subsets of the 1-dimensional differentiable manifold S^1\n sage: W = F['W']\n sage: W is U.intersection(V)\n True\n sage: M.atlas()\n [Chart (U, (x,)), Chart (V, (y,)), Chart (W, (x,)), Chart (W, (y,))]\n\n Transition map between the polar chart and the Cartesian one on\n `\\RR^2`::\n\n sage: M = Manifold(2, 'R^2')\n sage: c_cart.<x,y> = M.chart()\n sage: U = M.open_subset('U') # the complement of the half line {y=0, x >= 0}\n sage: c_spher.<r,phi> = U.chart(r'r:(0,+oo) phi:(0,2*pi):\\phi')\n sage: trans = c_spher.transition_map(c_cart, (r*cos(phi), r*sin(phi)),\n ....: restrictions2=(y!=0, x<0))\n sage: trans\n Change of coordinates from Chart (U, (r, phi)) to Chart (U, (x, y))\n sage: trans.display()\n x = r*cos(phi)\n y = r*sin(phi)\n\n In this case, no new subset has been created since `U\\cap M = U`::\n\n sage: M.subset_family()\n Set {R^2, U} of open subsets of the 2-dimensional differentiable manifold R^2\n\n but a new chart has been created: `(U, (x, y))`::\n\n sage: M.atlas()\n [Chart (R^2, (x, y)), Chart (U, (r, phi)), Chart (U, (x, y))]\n\n \"\"\"\n dom1 = self.domain()\n dom2 = other.domain()\n dom = dom1.intersection(dom2, name=intersection_name)\n if dom is dom1:\n chart1 = self\n else:\n chart1 = self.restrict(dom, restrictions1)\n if dom is dom2:\n chart2 = other\n else:\n chart2 = other.restrict(dom, restrictions2)\n if not isinstance(transformations, (tuple, list)):\n transformations = [transformations]\n return DiffCoordChange(chart1, chart2, *transformations)\n\n def frame(self):\n r\"\"\"\n Return the vector frame (coordinate frame) associated with ``self``.\n\n OUTPUT:\n\n - a :class:`~sage.manifolds.differentiable.vectorframe.CoordFrame`\n representing the coordinate frame\n\n EXAMPLES:\n\n Coordinate frame associated with some chart on a 2-dimensional\n manifold::\n\n sage: M = Manifold(2, 'M')\n sage: c_xy.<x,y> = M.chart()\n sage: c_xy.frame()\n Coordinate frame (M, (∂/∂x,∂/∂y))\n sage: type(c_xy.frame())\n <class 'sage.manifolds.differentiable.vectorframe.CoordFrame'>\n\n Check that ``c_xy.frame()`` is indeed the coordinate frame associated\n with the coordinates `(x,y)`::\n\n sage: ex = c_xy.frame()[0] ; ex\n Vector field ∂/∂x on the 2-dimensional differentiable manifold M\n sage: ey = c_xy.frame()[1] ; ey\n Vector field ∂/∂y on the 2-dimensional differentiable manifold M\n sage: ex(M.scalar_field(x)).display()\n 1: M → ℝ\n (x, y) ↦ 1\n sage: ex(M.scalar_field(y)).display()\n zero: M → ℝ\n (x, y) ↦ 0\n sage: ey(M.scalar_field(x)).display()\n zero: M → ℝ\n (x, y) ↦ 0\n sage: ey(M.scalar_field(y)).display()\n 1: M → ℝ\n (x, y) ↦ 1\n\n \"\"\"\n return self._frame\n\n def coframe(self):\n r\"\"\"\n Return the coframe (basis of coordinate differentials) associated\n with ``self``.\n\n OUTPUT:\n\n - a :class:`~sage.manifolds.differentiable.vectorframe.CoordCoFrame`\n representing the coframe\n\n EXAMPLES:\n\n Coordinate coframe associated with some chart on a 2-dimensional\n manifold::\n\n sage: M = Manifold(2, 'M')\n sage: c_xy.<x,y> = M.chart()\n sage: c_xy.coframe()\n Coordinate coframe (M, (dx,dy))\n sage: type(c_xy.coframe())\n <class 'sage.manifolds.differentiable.vectorframe.CoordCoFrame'>\n\n Check that ``c_xy.coframe()`` is indeed the coordinate coframe\n associated with the coordinates `(x, y)`::\n\n sage: dx = c_xy.coframe()[0] ; dx\n 1-form dx on the 2-dimensional differentiable manifold M\n sage: dy = c_xy.coframe()[1] ; dy\n 1-form dy on the 2-dimensional differentiable manifold M\n sage: ex = c_xy.frame()[0] ; ex\n Vector field ∂/∂x on the 2-dimensional differentiable manifold M\n sage: ey = c_xy.frame()[1] ; ey\n Vector field ∂/∂y on the 2-dimensional differentiable manifold M\n sage: dx(ex).display()\n dx(∂/∂x): M → ℝ\n (x, y) ↦ 1\n sage: dx(ey).display()\n dx(∂/∂y): M → ℝ\n (x, y) ↦ 0\n sage: dy(ex).display()\n dy(∂/∂x): M → ℝ\n (x, y) ↦ 0\n sage: dy(ey).display()\n dy(∂/∂y): M → ℝ\n (x, y) ↦ 1\n\n \"\"\"\n return self._coframe\n\n def restrict(self, subset, restrictions=None):\n r\"\"\"\n Return the restriction of ``self`` to some subset.\n\n If the current chart is `(U, \\varphi)`, a *restriction* (or\n *subchart*) is a chart `(V, \\psi)` such that `V \\subset U`\n and `\\psi = \\varphi |_V`.\n\n If such subchart has not been defined yet, it is constructed here.\n\n The coordinates of the subchart bare the same names as the\n coordinates of the original chart.\n\n INPUT:\n\n - ``subset`` -- open subset `V` of the chart domain `U`\n - ``restrictions`` -- (default: ``None``) list of coordinate\n restrictions defining the subset `V`\n\n A restriction can be any symbolic equality or inequality involving\n the coordinates, such as ``x > y`` or ``x^2 + y^2 != 0``. The items\n of the list ``restrictions`` are combined with the ``and`` operator;\n if some restrictions are to be combined with the ``or`` operator\n instead, they have to be passed as a tuple in some single item\n of the list ``restrictions``. For example::\n\n restrictions = [x > y, (x != 0, y != 0), z^2 < x]\n\n means ``(x > y) and ((x != 0) or (y != 0)) and (z^2 < x)``.\n If the list ``restrictions`` contains only one item, this\n item can be passed as such, i.e. writing ``x > y`` instead\n of the single element list ``[x > y]``.\n\n OUTPUT:\n\n - a :class:`DiffChart` `(V, \\psi)`\n\n EXAMPLES:\n\n Coordinates on the unit open ball of `\\CC^2` as a subchart\n of the global coordinates of `\\CC^2`::\n\n sage: M = Manifold(2, 'C^2', field='complex')\n sage: X.<z1, z2> = M.chart()\n sage: B = M.open_subset('B')\n sage: X_B = X.restrict(B, abs(z1)^2 + abs(z2)^2 < 1); X_B\n Chart (B, (z1, z2))\n\n \"\"\"\n if subset == self.domain():\n return self\n if subset not in self._dom_restrict:\n resu = Chart.restrict(self, subset, restrictions=restrictions)\n # Update of superframes and subframes:\n resu._frame._superframes.update(self._frame._superframes)\n for sframe in self._frame._superframes:\n sframe._subframes.add(resu._frame)\n sframe._restrictions[subset] = resu._frame\n # The subchart frame is not a \"top frame\" in the supersets\n # (including self.domain()):\n for dom in self.domain().open_supersets():\n if resu._frame in dom._top_frames:\n # it was added by the Chart constructor invoked in\n # Chart.restrict above\n dom._top_frames.remove(resu._frame)\n return self._dom_restrict[subset]\n\n def symbolic_velocities(self, left='D', right=None):\n r\"\"\"\n Return a list of symbolic variables ready to be used by the\n user as the derivatives of the coordinate functions with respect\n to a curve parameter (i.e. the velocities along the curve).\n It may actually serve to denote anything else than velocities,\n with a name including the coordinate functions.\n The choice of strings provided as 'left' and 'right' arguments\n is not entirely free since it must comply with Python\n prescriptions.\n\n INPUT:\n\n - ``left`` -- (default: ``D``) string to concatenate to the left\n of each coordinate functions of the chart\n - ``right`` -- (default: ``None``) string to concatenate to the\n right of each coordinate functions of the chart\n\n OUTPUT:\n\n - a list of symbolic expressions with the desired names\n\n EXAMPLES:\n\n Symbolic derivatives of the Cartesian coordinates of the\n 3-dimensional Euclidean space::\n\n sage: R3 = Manifold(3, 'R3', start_index=1)\n sage: cart.<X,Y,Z> = R3.chart()\n sage: D = cart.symbolic_velocities(); D\n [DX, DY, DZ]\n sage: D = cart.symbolic_velocities(left='d', right=\"/dt\"); D\n Traceback (most recent call last):\n ...\n ValueError: The name \"dX/dt\" is not a valid Python\n identifier.\n sage: D = cart.symbolic_velocities(left='d', right=\"_dt\"); D\n [dX_dt, dY_dt, dZ_dt]\n sage: D = cart.symbolic_velocities(left='', right=\"'\"); D\n Traceback (most recent call last):\n ...\n ValueError: The name \"X'\" is not a valid Python\n identifier.\n sage: D = cart.symbolic_velocities(left='', right=\"_dot\"); D\n [X_dot, Y_dot, Z_dot]\n sage: R.<t> = manifolds.RealLine()\n sage: canon_chart = R.default_chart()\n sage: D = canon_chart.symbolic_velocities() ; D\n [Dt]\n\n \"\"\"\n\n from sage.symbolic.ring import var\n\n # The case len(self[:]) = 1 is treated apart due to the\n # following fact.\n # In the case of several coordinates, the argument of 'var' (as\n # implemented below after the case len(self[:]) = 1) is a list\n # of strings of the form ['Dx1', 'Dx2', ...] and not a unique\n # string of the form 'Dx1 Dx2 ...'.\n # Although 'var' is supposed to accept both syntaxes, the first\n # one causes an error when it contains only one argument, due to\n # line 784 of sage/symbolic/ring.pyx :\n # \"return self.symbol(name, latex_name=formatted_latex_name, domain=domain)\"\n # In this line, the first argument 'name' of 'symbol' is a list\n # and not a string if the argument of 'var' is a list of one\n # string (of the type ['Dt']), which causes error in 'symbol'.\n # This might be corrected.\n if len(self[:]) == 1:\n string_vel = left + format(self[:][0]) # will raise an error\n # in case left is not a string\n if right is not None:\n string_vel += right # will raise an error in case right\n # is not a string\n\n # If the argument of 'var' contains only one word, for\n # instance::\n # var('Dt')\n # then 'var' does not return a tuple containing one symbolic\n # expression, but the symbolic expression itself.\n # This is taken into account below in order to return a list\n # containing one symbolic expression.\n return [var(string_vel)]\n\n list_strings_velocities = [left + format(coord_func)\n for coord_func in self[:]] # will\n # raise an error in case left is not a string\n\n if right is not None:\n list_strings_velocities = [str_vel + right for str_vel\n in list_strings_velocities] # will\n # raise an error in case right is not a string\n\n return list(var(list_strings_velocities))\n\n\n\n#*****************************************************************************\n\nclass RealDiffChart(DiffChart, RealChart):\n r\"\"\"\n Chart on a differentiable manifold over `\\RR`.\n\n Given a differentiable manifold `M` of dimension `n` over `\\RR`,\n a *chart* is a member `(U,\\varphi)` of the manifold's\n differentiable atlas; `U` is then an open subset of `M` and\n `\\varphi: U \\rightarrow V \\subset \\RR^n` is a homeomorphism from\n `U` to an open subset `V` of `\\RR^n`.\n\n The components `(x^1,\\ldots,x^n)` of `\\varphi`, defined by\n `\\varphi(p) = (x^1(p),\\ldots,x^n(p))\\in \\RR^n` for any point `p\\in U`, are\n called the *coordinates* of the chart `(U,\\varphi)`.\n\n INPUT:\n\n - ``domain`` -- open subset `U` on which the chart is defined\n - ``coordinates`` -- (default: '' (empty string)) single string defining\n the coordinate symbols, with ``' '`` (whitespace) as a separator; each\n item has at most four fields, separated by a colon (``:``):\n\n 1. the coordinate symbol (a letter or a few letters)\n 2. (optional) the interval `I` defining the coordinate range: if not\n provided, the coordinate is assumed to span all `\\RR`; otherwise\n `I` must be provided in the form ``(a,b)`` (or equivalently\n ``]a,b[``); the bounds ``a`` and ``b`` can be ``+/-Infinity``,\n ``Inf``, ``infinity``, ``inf`` or ``oo``; for *singular*\n coordinates, non-open intervals such as ``[a,b]`` and ``(a,b]``\n (or equivalently ``]a,b]``) are allowed; note that the interval\n declaration must not contain any whitespace\n 3. (optional) indicator of the periodic character of the coordinate,\n either as ``period=T``, where ``T`` is the period, or as the keyword\n ``periodic`` (the value of the period is then deduced from the\n interval `I` declared in field 2; see examples below)\n 4. (optional) the LaTeX spelling of the coordinate; if not provided the\n coordinate symbol given in the first field will be used\n\n The order of fields 2 to 4 does not matter and each of them can be\n omitted. If it contains any LaTeX expression, the string ``coordinates``\n must be declared with the prefix 'r' (for \"raw\") to allow for a proper\n treatment of LaTeX's backslash character (see examples below).\n If interval range, no period and no LaTeX spelling are to be set for any\n coordinate, the argument ``coordinates`` can be omitted when the shortcut\n operator ``<,>`` is used to declare the chart (see examples below).\n - ``calc_method`` -- (default: ``None``) string defining the calculus\n method for computations involving coordinates of the chart; must be\n one of\n\n - ``'SR'``: Sage's default symbolic engine (Symbolic Ring)\n - ``'sympy'``: SymPy\n - ``None``: the default of\n :class:`~sage.manifolds.calculus_method.CalculusMethod` will be\n used\n - ``names`` -- (default: ``None``) unused argument, except if\n ``coordinates`` is not provided; it must then be a tuple containing\n the coordinate symbols (this is guaranteed if the shortcut operator\n ``<,>`` is used).\n - ``coord_restrictions``: Additional restrictions on the coordinates.\n A restriction can be any symbolic equality or inequality involving\n the coordinates, such as ``x > y`` or ``x^2 + y^2 != 0``. The items\n of the list (or set or frozenset) ``coord_restrictions`` are combined\n with the ``and`` operator; if some restrictions are to be combined with\n the ``or`` operator instead, they have to be passed as a tuple in some\n single item of the list (or set or frozenset) ``coord_restrictions``.\n For example::\n\n coord_restrictions=[x > y, (x != 0, y != 0), z^2 < x]\n\n means ``(x > y) and ((x != 0) or (y != 0)) and (z^2 < x)``.\n If the list ``coord_restrictions`` contains only one item, this\n item can be passed as such, i.e. writing ``x > y`` instead\n of the single element list ``[x > y]``. If the chart variables have\n not been declared as variables yet, ``coord_restrictions`` must\n be ``lambda``-quoted.\n\n EXAMPLES:\n\n Cartesian coordinates on `\\RR^3`::\n\n sage: M = Manifold(3, 'R^3', r'\\RR^3', start_index=1)\n sage: c_cart = M.chart('x y z'); c_cart\n Chart (R^3, (x, y, z))\n sage: type(c_cart)\n <class 'sage.manifolds.differentiable.chart.RealDiffChart'>\n\n To have the coordinates accessible as global variables, one has to set::\n\n sage: (x,y,z) = c_cart[:]\n\n However, a shortcut is to use the declarator ``<x,y,z>`` in the left-hand\n side of the chart declaration (there is then no need to pass the string\n ``'x y z'`` to ``chart()``)::\n\n sage: M = Manifold(3, 'R^3', r'\\RR^3', start_index=1)\n sage: c_cart.<x,y,z> = M.chart(); c_cart\n Chart (R^3, (x, y, z))\n\n The coordinates are then immediately accessible::\n\n sage: y\n y\n sage: y is c_cart[2]\n True\n\n The trick is performed by Sage preparser::\n\n sage: preparse(\"c_cart.<x,y,z> = M.chart()\")\n \"c_cart = M.chart(names=('x', 'y', 'z',)); (x, y, z,) = c_cart._first_ngens(3)\"\n\n Note that ``x, y, z`` declared in ``<x,y,z>`` are mere Python variable\n names and do not have to coincide with the coordinate symbols; for instance,\n one may write::\n\n sage: M = Manifold(3, 'R^3', r'\\RR^3', start_index=1)\n sage: c_cart.<x1,y1,z1> = M.chart('x y z'); c_cart\n Chart (R^3, (x, y, z))\n\n Then ``y`` is not known as a global variable and the coordinate `y`\n is accessible only through the global variable ``y1``::\n\n sage: y1\n y\n sage: y1 is c_cart[2]\n True\n\n However, having the name of the Python variable coincide with the\n coordinate symbol is quite convenient; so it is recommended to declare::\n\n sage: forget() # for doctests only\n sage: M = Manifold(3, 'R^3', r'\\RR^3', start_index=1)\n sage: c_cart.<x,y,z> = M.chart()\n\n Spherical coordinates on the subset `U` of `\\RR^3` that is the\n complement of the half-plane `\\{y=0, x\\geq 0\\}`::\n\n sage: U = M.open_subset('U')\n sage: c_spher.<r,th,ph> = U.chart(r'r:(0,+oo) th:(0,pi):\\theta ph:(0,2*pi):\\phi')\n sage: c_spher\n Chart (U, (r, th, ph))\n\n Note the prefix 'r' for the string defining the coordinates in the\n arguments of ``chart``.\n\n Coordinates are Sage symbolic variables (see\n :mod:`sage.symbolic.expression`)::\n\n sage: type(th)\n <class 'sage.symbolic.expression.Expression'>\n sage: latex(th)\n {\\theta}\n sage: assumptions(th)\n [th is real, th > 0, th < pi]\n\n Coordinate are also accessible by their indices::\n\n sage: x1 = c_spher[1]; x2 = c_spher[2]; x3 = c_spher[3]\n sage: [x1, x2, x3]\n [r, th, ph]\n sage: (x1, x2, x3) == (r, th, ph)\n True\n\n The full set of coordinates is obtained by means of the operator [:]::\n\n sage: c_cart[:]\n (x, y, z)\n sage: c_spher[:]\n (r, th, ph)\n\n Let us check that the declared coordinate ranges have been taken into\n account::\n\n sage: c_cart.coord_range()\n x: (-oo, +oo); y: (-oo, +oo); z: (-oo, +oo)\n sage: c_spher.coord_range()\n r: (0, +oo); th: (0, pi); ph: (0, 2*pi)\n sage: bool(th>0 and th<pi)\n True\n sage: assumptions() # list all current symbolic assumptions\n [x is real, y is real, z is real, r is real, r > 0, th is real,\n th > 0, th < pi, ph is real, ph > 0, ph < 2*pi]\n\n The coordinate ranges are used for simplifications::\n\n sage: simplify(abs(r)) # r has been declared to lie in the interval (0,+oo)\n r\n sage: simplify(abs(x)) # no positive range has been declared for x\n abs(x)\n\n A coordinate can be declared periodic by adding the keyword ``periodic``\n to its range::\n\n sage: V = M.open_subset('V')\n sage: c_spher1.<r,th,ph1> = \\\n ....: V.chart(r'r:(0,+oo) th:(0,pi):\\theta ph1:(0,2*pi):periodic:\\phi_1')\n sage: c_spher1.periods()\n {3: 2*pi}\n sage: c_spher1.coord_range()\n r: (0, +oo); th: (0, pi); ph1: [0, 2*pi] (periodic)\n\n It is equivalent to give the period as ``period=2*pi``, skipping the\n coordinate range::\n\n sage: c_spher2.<r,th,ph2> = \\\n ....: V.chart(r'r:(0,+oo) th:(0,pi):\\theta ph2:period=2*pi:\\phi_2')\n sage: c_spher2.periods()\n {3: 2*pi}\n sage: c_spher2.coord_range()\n r: (0, +oo); th: (0, pi); ph2: [0, 2*pi] (periodic)\n\n Each constructed chart is automatically added to the manifold's\n user atlas::\n\n sage: M.atlas()\n [Chart (R^3, (x, y, z)), Chart (U, (r, th, ph)),\n Chart (V, (r, th, ph1)), Chart (V, (r, th, ph2))]\n\n and to the atlas of its domain::\n\n sage: U.atlas()\n [Chart (U, (r, th, ph))]\n\n Manifold subsets have a *default chart*, which, unless changed via the\n method\n :meth:`~sage.manifolds.manifold.TopologicalManifold.set_default_chart`,\n is the first defined chart on the subset (or on a open subset of it)::\n\n sage: M.default_chart()\n Chart (R^3, (x, y, z))\n sage: U.default_chart()\n Chart (U, (r, th, ph))\n\n The default charts are not privileged charts on the manifold, but rather\n charts whose name can be skipped in the argument list of functions having\n an optional ``chart=`` argument.\n\n The action of the chart map `\\varphi` on a point is obtained by means of\n the call operator, i.e. the operator ``()``::\n\n sage: p = M.point((1,0,-2)); p\n Point on the 3-dimensional differentiable manifold R^3\n sage: c_cart(p)\n (1, 0, -2)\n sage: c_cart(p) == p.coord(c_cart)\n True\n sage: q = M.point((2,pi/2,pi/3), chart=c_spher) # point defined by its spherical coordinates\n sage: c_spher(q)\n (2, 1/2*pi, 1/3*pi)\n sage: c_spher(q) == q.coord(c_spher)\n True\n sage: a = U.point((1,pi/2,pi)) # the default coordinates on U are the spherical ones\n sage: c_spher(a)\n (1, 1/2*pi, pi)\n sage: c_spher(a) == a.coord(c_spher)\n True\n\n Cartesian coordinates on `U` as an example of chart construction with\n coordinate restrictions: since `U` is the complement of the half-plane\n `\\{y=0, x\\geq 0\\}`, we must have `y\\not=0` or `x<0` on U. Accordingly,\n we set::\n\n sage: c_cartU.<x,y,z> = U.chart(coord_restrictions=lambda x,y,z: (y!=0, x<0))\n ....: # the tuple (y!=0, x<0) means y!=0 or x<0\n ....: # [y!=0, x<0] would have meant y!=0 AND x<0\n sage: U.atlas()\n [Chart (U, (r, th, ph)), Chart (U, (x, y, z))]\n sage: M.atlas()\n [Chart (R^3, (x, y, z)), Chart (U, (r, th, ph)),\n Chart (V, (r, th, ph1)), Chart (V, (r, th, ph2)),\n Chart (U, (x, y, z))]\n sage: c_cartU.valid_coordinates(-1,0,2)\n True\n sage: c_cartU.valid_coordinates(1,0,2)\n False\n sage: c_cart.valid_coordinates(1,0,2)\n True\n\n A vector frame is naturally associated to each chart::\n\n sage: c_cart.frame()\n Coordinate frame (R^3, (∂/∂x,∂/∂y,∂/∂z))\n sage: c_spher.frame()\n Coordinate frame (U, (∂/∂r,∂/∂th,∂/∂ph))\n\n as well as a dual frame (basis of 1-forms)::\n\n sage: c_cart.coframe()\n Coordinate coframe (R^3, (dx,dy,dz))\n sage: c_spher.coframe()\n Coordinate coframe (U, (dr,dth,dph))\n\n Chart grids can be drawn in 2D or 3D graphics thanks to the method\n :meth:`~sage.manifolds.chart.RealChart.plot`.\n\n \"\"\"\n def __init__(self, domain, coordinates, calc_method=None,\n bounds=None, periods=None, coord_restrictions=None):\n r\"\"\"\n Construct a chart on a real differentiable manifold.\n\n TESTS::\n\n sage: forget() # for doctests only\n sage: M = Manifold(2, 'M')\n sage: X.<x,y> = M.chart()\n sage: X\n Chart (M, (x, y))\n sage: type(X)\n <class 'sage.manifolds.differentiable.chart.RealDiffChart'>\n sage: assumptions() # assumptions set in X._init_coordinates\n [x is real, y is real]\n sage: TestSuite(X).run()\n\n \"\"\"\n RealChart.__init__(self, domain, coordinates, calc_method=calc_method,\n bounds=bounds, periods=periods, coord_restrictions=coord_restrictions)\n # Construction of the coordinate frame associated to the chart:\n self._frame = CoordFrame(self)\n self._coframe = self._frame._coframe\n\n\n def restrict(self, subset, restrictions=None):\n r\"\"\"\n Return the restriction of the chart to some subset.\n\n If the current chart is `(U, \\varphi)`, a *restriction* (or\n *subchart*) is a chart `(V, \\psi)` such that `V \\subset U`\n and `\\psi = \\varphi |_V`.\n\n If such subchart has not been defined yet, it is constructed here.\n\n The coordinates of the subchart bare the same names as the\n coordinates of the original chart.\n\n INPUT:\n\n - ``subset`` -- open subset `V` of the chart domain `U`\n - ``restrictions`` -- (default: ``None``) list of coordinate\n restrictions defining the subset `V`\n\n A restriction can be any symbolic equality or inequality involving\n the coordinates, such as ``x > y`` or ``x^2 + y^2 != 0``. The items\n of the list ``restrictions`` are combined with the ``and`` operator;\n if some restrictions are to be combined with the ``or`` operator\n instead, they have to be passed as a tuple in some single item\n of the list ``restrictions``. For example::\n\n restrictions = [x > y, (x != 0, y != 0), z^2 < x]\n\n means ``(x > y) and ((x != 0) or (y != 0)) and (z^2 < x)``.\n If the list ``restrictions`` contains only one item, this\n item can be passed as such, i.e. writing ``x > y`` instead\n of the single element list ``[x > y]``.\n\n OUTPUT:\n\n - a :class:`RealDiffChart` `(V, \\psi)`\n\n EXAMPLES:\n\n Cartesian coordinates on the unit open disc in `\\RR^2` as a subchart\n of the global Cartesian coordinates::\n\n sage: M = Manifold(2, 'R^2')\n sage: c_cart.<x,y> = M.chart() # Cartesian coordinates on R^2\n sage: D = M.open_subset('D') # the unit open disc\n sage: c_cart_D = c_cart.restrict(D, x^2+y^2<1)\n sage: p = M.point((1/2, 0))\n sage: p in D\n True\n sage: q = M.point((1, 2))\n sage: q in D\n False\n\n Cartesian coordinates on the annulus `1 < \\sqrt{x^2+y^2} < 2`::\n\n sage: A = M.open_subset('A')\n sage: c_cart_A = c_cart.restrict(A, [x^2+y^2>1, x^2+y^2<4])\n sage: p in A, q in A\n (False, False)\n sage: a = M.point((3/2,0))\n sage: a in A\n True\n\n \"\"\"\n if subset == self.domain():\n return self\n if subset not in self._dom_restrict:\n resu = RealChart.restrict(self, subset, restrictions=restrictions)\n # Update of superframes and subframes:\n resu._frame._superframes.update(self._frame._superframes)\n for sframe in self._frame._superframes:\n sframe._subframes.add(resu._frame)\n sframe._restrictions[subset] = resu._frame\n # The subchart frame is not a \"top frame\" in the supersets\n # (including self.domain()):\n for dom in self.domain().open_supersets():\n if resu._frame in dom._top_frames:\n # it was added by the Chart constructor invoked in\n # Chart.restrict above\n dom._top_frames.remove(resu._frame)\n return self._dom_restrict[subset]\n\n#******************************************************************************\n\nclass DiffCoordChange(CoordChange):\n r\"\"\"\n Transition map between two charts of a differentiable manifold.\n\n Giving two coordinate charts `(U,\\varphi)` and `(V,\\psi)` on a\n differentiable manifold `M` of dimension `n` over a topological field `K`,\n the *transition map from* `(U,\\varphi)` *to* `(V,\\psi)` is the map\n\n .. MATH::\n\n \\psi\\circ\\varphi^{-1}: \\varphi(U\\cap V) \\subset K^n\n \\rightarrow \\psi(U\\cap V) \\subset K^n,\n\n In other words, the transition map `\\psi\\circ\\varphi^{-1}` expresses the\n coordinates `(y^1,\\ldots,y^n)` of `(V,\\psi)` in terms of the coordinates\n `(x^1,\\ldots,x^n)` of `(U,\\varphi)` on the open subset where the two\n charts intersect, i.e. on `U\\cap V`.\n\n By definition, the transition map `\\psi\\circ\\varphi^{-1}` must be\n of class `C^k`, where `k` is the degree of differentiability of the\n manifold (cf.\n :meth:`~sage.manifolds.differentiable.manifold.DifferentiableManifold.diff_degree`).\n\n INPUT:\n\n - ``chart1`` -- chart `(U,\\varphi)`\n - ``chart2`` -- chart `(V,\\psi)`\n - ``transformations`` -- tuple (or list) `(Y_1,\\ldots,Y_2)`, where\n `Y_i` is the symbolic expression of the coordinate `y^i` in terms\n of the coordinates `(x^1,\\ldots,x^n)`\n\n EXAMPLES:\n\n Transition map on a 2-dimensional differentiable manifold::\n\n sage: M = Manifold(2, 'M')\n sage: X.<x,y> = M.chart()\n sage: Y.<u,v> = M.chart()\n sage: X_to_Y = X.transition_map(Y, [x+y, x-y])\n sage: X_to_Y\n Change of coordinates from Chart (M, (x, y)) to Chart (M, (u, v))\n sage: type(X_to_Y)\n <class 'sage.manifolds.differentiable.chart.DiffCoordChange'>\n sage: X_to_Y.display()\n u = x + y\n v = x - y\n\n \"\"\"\n def __init__(self, chart1, chart2, *transformations):\n r\"\"\"\n Construct a transition map.\n\n TESTS::\n\n sage: M = Manifold(2, 'M')\n sage: X.<x,y> = M.chart()\n sage: Y.<u,v> = M.chart()\n sage: X_to_Y = X.transition_map(Y, [x+y, x-y])\n sage: X_to_Y\n Change of coordinates from Chart (M, (x, y)) to Chart (M, (u, v))\n sage: type(X_to_Y)\n <class 'sage.manifolds.differentiable.chart.DiffCoordChange'>\n sage: TestSuite(X_to_Y).run(skip='_test_pickling')\n\n .. TODO::\n\n fix _test_pickling\n\n \"\"\"\n CoordChange.__init__(self, chart1, chart2, *transformations)\n # Jacobian matrix:\n self._jacobian = self._transf.jacobian()\n # If the two charts are on the same open subset, the Jacobian matrix is\n # added to the dictionary of changes of frame:\n if chart1.domain() == chart2.domain():\n domain = chart1.domain()\n frame1 = chart1._frame\n frame2 = chart2._frame\n vf_module = domain.vector_field_module()\n ch_basis = vf_module.automorphism()\n ch_basis.add_comp(frame1)[:, chart1] = self._jacobian\n ch_basis.add_comp(frame2)[:, chart1] = self._jacobian\n vf_module._basis_changes[(frame2, frame1)] = ch_basis\n for sdom in domain.open_supersets():\n sdom._frame_changes[(frame2, frame1)] = ch_basis\n # The inverse is computed only if it does not exist already\n # (because if it exists it may have a simpler expression than that\n # obtained from the matrix inverse)\n if (frame1, frame2) not in vf_module._basis_changes:\n ch_basis_inv = ch_basis.inverse()\n vf_module._basis_changes[(frame1, frame2)] = ch_basis_inv\n for sdom in domain.open_supersets():\n sdom._frame_changes[(frame1, frame2)] = ch_basis_inv\n\n def jacobian(self):\n r\"\"\"\n Return the Jacobian matrix of ``self``.\n\n If ``self`` corresponds to the change of coordinates\n\n .. MATH::\n\n y^i = Y^i(x^1,\\ldots,x^n)\\qquad 1\\leq i \\leq n\n\n the Jacobian matrix `J` is given by\n\n .. MATH::\n\n J_{ij} = \\frac{\\partial Y^i}{\\partial x^j}\n\n where `i` is the row index and `j` the column one.\n\n OUTPUT:\n\n - Jacobian matrix `J`, the elements `J_{ij}` of which being\n coordinate functions\n (cf. :class:`~sage.manifolds.chart_func.ChartFunction`)\n\n EXAMPLES:\n\n Jacobian matrix of a 2-dimensional transition map::\n\n sage: M = Manifold(2, 'M')\n sage: X.<x,y> = M.chart()\n sage: Y.<u,v> = M.chart()\n sage: X_to_Y = X.transition_map(Y, [x+y^2, 3*x-y])\n sage: X_to_Y.jacobian()\n [ 1 2*y]\n [ 3 -1]\n\n Each element of the Jacobian matrix is a coordinate function::\n\n sage: parent(X_to_Y.jacobian()[0,0])\n Ring of chart functions on Chart (M, (x, y))\n\n \"\"\"\n return self._jacobian # has been computed in __init__\n\n @cached_method\n def jacobian_det(self):\n r\"\"\"\n Return the Jacobian determinant of ``self``.\n\n The Jacobian determinant is the determinant of the Jacobian\n matrix (see :meth:`jacobian`).\n\n OUTPUT:\n\n - determinant of the Jacobian matrix `J` as a coordinate\n function\n (cf. :class:`~sage.manifolds.chart_func.ChartFunction`)\n\n EXAMPLES:\n\n Jacobian determinant of a 2-dimensional transition map::\n\n sage: M = Manifold(2, 'M')\n sage: X.<x,y> = M.chart()\n sage: Y.<u,v> = M.chart()\n sage: X_to_Y = X.transition_map(Y, [x+y^2, 3*x-y])\n sage: X_to_Y.jacobian_det()\n -6*y - 1\n sage: X_to_Y.jacobian_det() == det(X_to_Y.jacobian())\n True\n\n The Jacobian determinant is a coordinate function::\n\n sage: parent(X_to_Y.jacobian_det())\n Ring of chart functions on Chart (M, (x, y))\n\n \"\"\"\n return self._transf.jacobian_det()\n", "id": "3602528", "language": "Python", "matching_score": 2.1117987632751465, "max_stars_count": 10, "path": "src/sage/manifolds/differentiable/chart.py" }, { "content": "r\"\"\"\nBase class for polyhedra, part 0\n\nInitialization and access to Vrepresentation and Hrepresentation.\n\"\"\"\n\n# ****************************************************************************\n# Copyright (C) 2008-2012 <NAME> <<EMAIL>>\n# Copyright (C) 2011-2015 <NAME> <<EMAIL>>\n# Copyright (C) 2012-2018 <NAME>\n# Copyright (C) 2013 <NAME>\n# Copyright (C) 2014-2017 <NAME>\n# Copyright (C) 2014-2019 <NAME>\n# Copyright (C) 2015 <NAME>\n# Copyright (C) 2015-2017 <NAME>\n# Copyright (C) 2015-2017 <NAME>\n# Copyright (C) 2015-2018 <NAME>\n# Copyright (C) 2015-2020 <NAME> <labbe at math.huji.ac.il>\n# Copyright (C) 2015-2021 <NAME>\n# Copyright (C) 2016-2019 <NAME>\n# Copyright (C) 2017 <NAME>\n# Copyright (C) 2017-2018 <NAME>\n# Copyright (C) 2019 <NAME>\n# Copyright (C) 2019-2020 <NAME>\n# Copyright (C) 2019-2020 <NAME>\n# Copyright (C) 2019-2021 <NAME> <<EMAIL>>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nfrom sage.misc.cachefunc import cached_method\nfrom sage.structure.element import Element\nimport sage.geometry.abc\n\nclass Polyhedron_base0(Element, sage.geometry.abc.Polyhedron):\n \"\"\"\n Initialization and basic access for polyhedra.\n\n See :class:`sage.geometry.polyhedron.base.Polyhedron_base`.\n\n TESTS::\n\n sage: from sage.geometry.polyhedron.base0 import Polyhedron_base0\n sage: P = Polyhedron(rays=[[1, 0, 0]], lines=[[0, 1, 0]])\n sage: Polyhedron_base0.Vrepresentation(P)\n (A line in the direction (0, 1, 0),\n A vertex at (0, 0, 0),\n A ray in the direction (1, 0, 0))\n sage: Polyhedron_base0.vertices.f(P)\n (A vertex at (0, 0, 0),)\n sage: Polyhedron_base0.rays.f(P)\n (A ray in the direction (1, 0, 0),)\n sage: Polyhedron_base0.lines.f(P)\n (A line in the direction (0, 1, 0),)\n sage: Polyhedron_base0.Hrepresentation(P)\n (An equation (0, 0, 1) x + 0 == 0, An inequality (1, 0, 0) x + 0 >= 0)\n sage: Polyhedron_base0.inequalities.f(P)\n (An inequality (1, 0, 0) x + 0 >= 0,)\n sage: Polyhedron_base0.equations.f(P)\n (An equation (0, 0, 1) x + 0 == 0,)\n sage: Polyhedron_base0.base_ring(P)\n Integer Ring\n sage: Polyhedron_base0.backend(P)\n 'ppl'\n sage: Polyhedron_base0.change_ring(P, ZZ, backend='field').backend()\n 'field'\n sage: Polyhedron_base0.base_extend(P, QQ)\n A 2-dimensional polyhedron in QQ^3 defined as the convex hull of 1 vertex, 1 ray, 1 line\n \"\"\"\n def __init__(self, parent, Vrep, Hrep, Vrep_minimal=None, Hrep_minimal=None, pref_rep=None, mutable=False, **kwds):\n \"\"\"\n Initializes the polyhedron.\n\n See :class:`sage.geometry.polyhedron.base.Polyhedron_base` for a description of the input\n data.\n\n TESTS::\n\n sage: p = Polyhedron() # indirect doctests\n\n sage: from sage.geometry.polyhedron.backend_field import Polyhedron_field\n sage: from sage.geometry.polyhedron.parent import Polyhedra_field\n sage: parent = Polyhedra_field(AA, 1, 'field')\n sage: Vrep = [[[0], [1/2], [1]], [], []]\n sage: Hrep = [[[0, 1], [1, -1]], []]\n sage: p = Polyhedron_field(parent, Vrep, Hrep,\n ....: Vrep_minimal=False, Hrep_minimal=True)\n Traceback (most recent call last):\n ...\n ValueError: if both Vrep and Hrep are provided, they must be minimal...\n\n Illustration of ``pref_rep``.\n Note that ``ppl`` doesn't support precomputed data::\n\n sage: from sage.geometry.polyhedron.backend_ppl import Polyhedron_QQ_ppl\n sage: from sage.geometry.polyhedron.parent import Polyhedra_QQ_ppl\n sage: parent = Polyhedra_QQ_ppl(QQ, 1, 'ppl')\n sage: p = Polyhedron_QQ_ppl(parent, Vrep, 'nonsense',\n ....: Vrep_minimal=True, Hrep_minimal=True, pref_rep='Vrep')\n sage: p = Polyhedron_QQ_ppl(parent, 'nonsense', Hrep,\n ....: Vrep_minimal=True, Hrep_minimal=True, pref_rep='Hrep')\n sage: p = Polyhedron_QQ_ppl(parent, 'nonsense', Hrep,\n ....: Vrep_minimal=True, Hrep_minimal=True, pref_rep='Vrepresentation')\n Traceback (most recent call last):\n ...\n ValueError: ``pref_rep`` must be one of ``(None, 'Vrep', 'Hrep')``\n\n If the backend supports precomputed data, ``pref_rep`` is ignored::\n\n sage: p = Polyhedron_field(parent, Vrep, 'nonsense',\n ....: Vrep_minimal=True, Hrep_minimal=True, pref_rep='Vrep')\n Traceback (most recent call last):\n ...\n TypeError: ..._init_Hrepresentation() takes 3 positional arguments but 9 were given\n\n The empty polyhedron is detected when the Vrepresentation is given with generator;\n see :trac:`29899`::\n\n sage: from sage.geometry.polyhedron.backend_cdd import Polyhedron_QQ_cdd\n sage: from sage.geometry.polyhedron.parent import Polyhedra_QQ_cdd\n sage: parent = Polyhedra_QQ_cdd(QQ, 0, 'cdd')\n sage: p = Polyhedron_QQ_cdd(parent, [iter([]), iter([]), iter([])], None)\n \"\"\"\n Element.__init__(self, parent=parent)\n if Vrep is not None and Hrep is not None:\n if not (Vrep_minimal is True and Hrep_minimal is True):\n raise ValueError(\"if both Vrep and Hrep are provided, they must be minimal\"\n \" and Vrep_minimal and Hrep_minimal must both be True\")\n if hasattr(self, \"_init_from_Vrepresentation_and_Hrepresentation\"):\n self._init_from_Vrepresentation_and_Hrepresentation(Vrep, Hrep)\n return\n else:\n if pref_rep is None:\n # Initialize from Hrepresentation if this seems simpler.\n Vrep = [tuple(Vrep[0]), tuple(Vrep[1]), Vrep[2]]\n Hrep = [tuple(Hrep[0]), Hrep[1]]\n if len(Hrep[0]) < len(Vrep[0]) + len(Vrep[1]):\n pref_rep = 'Hrep'\n else:\n pref_rep = 'Vrep'\n if pref_rep == 'Vrep':\n Hrep = None\n elif pref_rep == 'Hrep':\n Vrep = None\n else:\n raise ValueError(\"``pref_rep`` must be one of ``(None, 'Vrep', 'Hrep')``\")\n if Vrep is not None:\n vertices, rays, lines = Vrep\n\n # We build tuples out of generators now to detect the empty polyhedron.\n\n # The damage is limited:\n # The backend will have to obtain all elements from the generator anyway.\n # The generators are mainly for saving time with initializing from\n # Vrepresentation and Hrepresentation.\n # If we dispose of one of them (see above), it is wasteful to have generated it.\n\n # E.g. the dilate will be set up with new Vrepresentation and Hrepresentation\n # regardless of the backend along with the argument ``pref_rep``.\n # As we only use generators, there is no penalty to this approach\n # (and the method ``dilation`` does not have to distinguish by backend).\n\n if not isinstance(vertices, (tuple, list)):\n vertices = tuple(vertices)\n if not isinstance(rays, (tuple, list)):\n rays = tuple(rays)\n if not isinstance(lines, (tuple, list)):\n lines = tuple(lines)\n\n if vertices or rays or lines:\n self._init_from_Vrepresentation(vertices, rays, lines, **kwds)\n else:\n self._init_empty_polyhedron()\n elif Hrep is not None:\n ieqs, eqns = Hrep\n self._init_from_Hrepresentation(ieqs, eqns, **kwds)\n else:\n self._init_empty_polyhedron()\n\n def _init_from_Vrepresentation(self, vertices, rays, lines, **kwds):\n \"\"\"\n Construct polyhedron from V-representation data.\n\n INPUT:\n\n - ``vertices`` -- list of point. Each point can be specified\n as any iterable container of\n :meth:`~sage.geometry.polyhedron.base.base_ring` elements.\n\n - ``rays`` -- list of rays. Each ray can be specified as any\n iterable container of\n :meth:`~sage.geometry.polyhedron.base.base_ring` elements.\n\n - ``lines`` -- list of lines. Each line can be specified as\n any iterable container of\n :meth:`~sage.geometry.polyhedron.base.base_ring` elements.\n\n EXAMPLES::\n\n sage: p = Polyhedron()\n sage: from sage.geometry.polyhedron.base import Polyhedron_base\n sage: Polyhedron_base._init_from_Vrepresentation(p, [], [], [])\n Traceback (most recent call last):\n ...\n NotImplementedError: a derived class must implement this method\n \"\"\"\n raise NotImplementedError('a derived class must implement this method')\n\n def _init_from_Hrepresentation(self, ieqs, eqns, **kwds):\n \"\"\"\n Construct polyhedron from H-representation data.\n\n INPUT:\n\n - ``ieqs`` -- list of inequalities. Each line can be specified\n as any iterable container of\n :meth:`~sage.geometry.polyhedron.base.base_ring` elements.\n\n - ``eqns`` -- list of equalities. Each line can be specified\n as any iterable container of\n :meth:`~sage.geometry.polyhedron.base.base_ring` elements.\n\n EXAMPLES::\n\n sage: p = Polyhedron()\n sage: from sage.geometry.polyhedron.base import Polyhedron_base\n sage: Polyhedron_base._init_from_Hrepresentation(p, [], [])\n Traceback (most recent call last):\n ...\n NotImplementedError: a derived class must implement this method\n \"\"\"\n raise NotImplementedError('a derived class must implement this method')\n\n def _init_empty_polyhedron(self):\n \"\"\"\n Initializes an empty polyhedron.\n\n TESTS::\n\n sage: empty = Polyhedron(); empty\n The empty polyhedron in ZZ^0\n sage: empty.Vrepresentation()\n ()\n sage: empty.Hrepresentation()\n (An equation -1 == 0,)\n sage: Polyhedron(vertices = [])\n The empty polyhedron in ZZ^0\n sage: Polyhedron(vertices = [])._init_empty_polyhedron()\n sage: from sage.geometry.polyhedron.parent import Polyhedra\n sage: Polyhedra(QQ,7)()\n A 0-dimensional polyhedron in QQ^7 defined as the convex hull of 1 vertex\n \"\"\"\n self._Vrepresentation = []\n self._Hrepresentation = []\n self.parent()._make_Equation(self, [-1] + [0] * self.ambient_dim())\n self._Vrepresentation = tuple(self._Vrepresentation)\n self._Hrepresentation = tuple(self._Hrepresentation)\n\n def base_extend(self, base_ring, backend=None):\n \"\"\"\n Return a new polyhedron over a larger base ring.\n\n This method can also be used to change the backend.\n\n INPUT:\n\n - ``base_ring`` -- the new base ring\n\n - ``backend`` -- the new backend, see\n :func:`~sage.geometry.polyhedron.constructor.Polyhedron`.\n If ``None`` (the default), attempt to keep the same backend.\n Otherwise, use the same defaulting behavior\n as described there.\n\n OUTPUT:\n\n The same polyhedron, but over a larger base ring and possibly with a changed backend.\n\n EXAMPLES::\n\n sage: P = Polyhedron(vertices=[(1,0), (0,1)], rays=[(1,1)], base_ring=ZZ); P\n A 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices and 1 ray\n sage: P.base_extend(QQ)\n A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 2 vertices and 1 ray\n sage: P.base_extend(QQ) == P\n True\n\n TESTS:\n\n Test that :trac:`22575` is fixed::\n\n sage: Q = P.base_extend(ZZ, backend='field')\n sage: Q.backend()\n 'field'\n\n \"\"\"\n new_parent = self.parent().base_extend(base_ring, backend)\n return new_parent(self, copy=True)\n\n def change_ring(self, base_ring, backend=None):\n \"\"\"\n Return the polyhedron obtained by coercing the entries of the\n vertices/lines/rays of this polyhedron into the given ring.\n\n This method can also be used to change the backend.\n\n INPUT:\n\n - ``base_ring`` -- the new base ring\n\n - ``backend`` -- the new backend or ``None`` (default), see\n :func:`~sage.geometry.polyhedron.constructor.Polyhedron`.\n If ``None`` (the default), attempt to keep the same backend.\n Otherwise, use the same defaulting behavior\n as described there.\n\n EXAMPLES::\n\n sage: P = Polyhedron(vertices=[(1,0), (0,1)], rays=[(1,1)], base_ring=QQ); P\n A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 2 vertices and 1 ray\n sage: P.change_ring(ZZ)\n A 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices and 1 ray\n sage: P.change_ring(ZZ) == P\n True\n\n sage: P = Polyhedron(vertices=[(-1.3,0), (0,2.3)], base_ring=RDF); P.vertices()\n (A vertex at (-1.3, 0.0), A vertex at (0.0, 2.3))\n sage: P.change_ring(QQ).vertices()\n (A vertex at (-13/10, 0), A vertex at (0, 23/10))\n sage: P == P.change_ring(QQ)\n True\n sage: P.change_ring(ZZ)\n Traceback (most recent call last):\n ...\n TypeError: cannot change the base ring to the Integer Ring\n\n sage: P = polytopes.regular_polygon(3); P # optional - sage.rings.number_field\n A 2-dimensional polyhedron in AA^2 defined as the convex hull of 3 vertices\n sage: P.vertices() # optional - sage.rings.number_field\n (A vertex at (0.?e-16, 1.000000000000000?),\n A vertex at (0.866025403784439?, -0.500000000000000?),\n A vertex at (-0.866025403784439?, -0.500000000000000?))\n sage: P.change_ring(QQ) # optional - sage.rings.number_field\n Traceback (most recent call last):\n ...\n TypeError: cannot change the base ring to the Rational Field\n\n .. WARNING::\n\n The base ring ``RDF`` should be used with care. As it is\n not an exact ring, certain computations may break or\n silently produce wrong results, for example changing the\n base ring from an exact ring into ``RDF`` may cause a\n loss of data::\n\n sage: P = Polyhedron([[2/3,0],[6666666666666667/10^16,0]], base_ring=AA); P # optional - sage.rings.number_field\n A 1-dimensional polyhedron in AA^2 defined as the convex hull of 2 vertices\n sage: Q = P.change_ring(RDF); Q # optional - sage.rings.number_field\n A 0-dimensional polyhedron in RDF^2 defined as the convex hull of 1 vertex\n sage: P.n_vertices() == Q.n_vertices() # optional - sage.rings.number_field\n False\n \"\"\"\n from sage.categories.rings import Rings\n\n if base_ring not in Rings():\n raise ValueError(\"invalid base ring\")\n\n try:\n vertices = [[base_ring(x) for x in vertex] for vertex in self.vertices_list()]\n rays = [[base_ring(x) for x in ray] for ray in self.rays_list()]\n lines = [[base_ring(x) for x in line] for line in self.lines_list()]\n\n except (TypeError, ValueError):\n raise TypeError(\"cannot change the base ring to the {0}\".format(base_ring))\n\n new_parent = self.parent().change_ring(base_ring, backend)\n return new_parent([vertices, rays, lines], None)\n\n def is_mutable(self):\n r\"\"\"\n Return True if the polyhedron is mutable, i.e. it can be modified in place.\n\n EXAMPLES::\n\n sage: p = polytopes.cube(backend='field')\n sage: p.is_mutable()\n False\n \"\"\"\n return False\n\n def is_immutable(self):\n r\"\"\"\n Return True if the polyhedron is immutable, i.e. it cannot be modified in place.\n\n EXAMPLES::\n\n sage: p = polytopes.cube(backend='field')\n sage: p.is_immutable()\n True\n \"\"\"\n return True\n\n @cached_method\n def n_equations(self):\n \"\"\"\n Return the number of equations. The representation will\n always be minimal, so the number of equations is the\n codimension of the polyhedron in the ambient space.\n\n EXAMPLES::\n\n sage: p = Polyhedron(vertices = [[1,0,0],[0,1,0],[0,0,1]])\n sage: p.n_equations()\n 1\n \"\"\"\n return len(self.equations())\n\n @cached_method\n def n_inequalities(self):\n \"\"\"\n Return the number of inequalities. The representation will\n always be minimal, so the number of inequalities is the\n number of facets of the polyhedron in the ambient space.\n\n EXAMPLES::\n\n sage: p = Polyhedron(vertices = [[1,0,0],[0,1,0],[0,0,1]])\n sage: p.n_inequalities()\n 3\n\n sage: p = Polyhedron(vertices = [[t,t^2,t^3] for t in range(6)])\n sage: p.n_facets()\n 8\n \"\"\"\n return len(self.inequalities())\n\n n_facets = n_inequalities\n\n @cached_method\n def n_vertices(self):\n \"\"\"\n Return the number of vertices. The representation will\n always be minimal.\n\n .. WARNING::\n\n If the polyhedron has lines, return the number of vertices in\n the ``Vrepresentation``. As the represented polyhedron has\n no 0-dimensional faces (i.e. vertices), ``n_vertices`` corresponds\n to the number of `k`-faces, where `k` is the number of lines::\n\n sage: P = Polyhedron(rays=[[1,0,0]],lines=[[0,1,0]])\n sage: P.n_vertices()\n 1\n sage: P.faces(0)\n ()\n sage: P.f_vector()\n (1, 0, 1, 1)\n\n sage: P = Polyhedron(rays=[[1,0,0]],lines=[[0,1,0],[0,1,1]])\n sage: P.n_vertices()\n 1\n sage: P.f_vector()\n (1, 0, 0, 1, 1)\n\n EXAMPLES::\n\n sage: p = Polyhedron(vertices = [[1,0],[0,1],[1,1]], rays=[[1,1]])\n sage: p.n_vertices()\n 2\n \"\"\"\n return len(self.vertices())\n\n @cached_method\n def n_rays(self):\n \"\"\"\n Return the number of rays. The representation will\n always be minimal.\n\n EXAMPLES::\n\n sage: p = Polyhedron(vertices = [[1,0],[0,1]], rays=[[1,1]])\n sage: p.n_rays()\n 1\n \"\"\"\n return len(self.rays())\n\n @cached_method\n def n_lines(self):\n \"\"\"\n Return the number of lines. The representation will\n always be minimal.\n\n EXAMPLES::\n\n sage: p = Polyhedron(vertices = [[0,0]], rays=[[0,1],[0,-1]])\n sage: p.n_lines()\n 1\n \"\"\"\n return len(self.lines())\n\n def Hrepresentation(self, index=None):\n \"\"\"\n Return the objects of the H-representation. Each entry is\n either an inequality or a equation.\n\n INPUT:\n\n - ``index`` -- either an integer or ``None``\n\n OUTPUT:\n\n The optional argument is an index running from ``0`` to\n ``self.n_Hrepresentation()-1``. If present, the\n H-representation object at the given index will be\n returned. Without an argument, returns the list of all\n H-representation objects.\n\n EXAMPLES::\n\n sage: p = polytopes.hypercube(3, backend='field')\n sage: p.Hrepresentation(0)\n An inequality (-1, 0, 0) x + 1 >= 0\n sage: p.Hrepresentation(0) == p.Hrepresentation()[0]\n True\n \"\"\"\n if index is None:\n return self._Hrepresentation\n else:\n return self._Hrepresentation[index]\n\n def Hrepresentation_str(self, separator='\\n', latex=False, style='>=', align=None, **kwds):\n r\"\"\"\n Return a human-readable string representation of the Hrepresentation of this\n polyhedron.\n\n INPUT:\n\n - ``separator`` -- a string. Default is ``\"\\n\"``.\n\n - ``latex`` -- a boolean. Default is ``False``.\n\n - ``style`` -- either ``\"positive\"`` (making all coefficients positive)\n or ``\"<=\"``, or ``\">=\"``. Default is ``\">=\"``.\n\n - ``align`` -- a boolean or ``None''. Default is ``None`` in which case\n ``align`` is ``True`` if ``separator`` is the newline character.\n If set, then the lines of the output string are aligned\n by the comparison symbol by padding blanks.\n\n Keyword parameters of\n :meth:`~sage.geometry.polyhedron.representation.Hrepresentation.repr_pretty`\n are passed on:\n\n - ``prefix`` -- a string\n\n - ``indices`` -- a tuple or other iterable\n\n OUTPUT:\n\n A string.\n\n EXAMPLES::\n\n sage: P = polytopes.permutahedron(3)\n sage: print(P.Hrepresentation_str())\n x0 + x1 + x2 == 6\n x0 + x1 >= 3\n -x0 - x1 >= -5\n x1 >= 1\n -x0 >= -3\n x0 >= 1\n -x1 >= -3\n\n sage: print(P.Hrepresentation_str(style='<='))\n -x0 - x1 - x2 == -6\n -x0 - x1 <= -3\n x0 + x1 <= 5\n -x1 <= -1\n x0 <= 3\n -x0 <= -1\n x1 <= 3\n\n sage: print(P.Hrepresentation_str(style='positive'))\n x0 + x1 + x2 == 6\n x0 + x1 >= 3\n 5 >= x0 + x1\n x1 >= 1\n 3 >= x0\n x0 >= 1\n 3 >= x1\n\n sage: print(P.Hrepresentation_str(latex=True))\n \\begin{array}{rcl}\n x_{0} + x_{1} + x_{2} & = & 6 \\\\\n x_{0} + x_{1} & \\geq & 3 \\\\\n -x_{0} - x_{1} & \\geq & -5 \\\\\n x_{1} & \\geq & 1 \\\\\n -x_{0} & \\geq & -3 \\\\\n x_{0} & \\geq & 1 \\\\\n -x_{1} & \\geq & -3\n \\end{array}\n\n sage: print(P.Hrepresentation_str(align=False))\n x0 + x1 + x2 == 6\n x0 + x1 >= 3\n -x0 - x1 >= -5\n x1 >= 1\n -x0 >= -3\n x0 >= 1\n -x1 >= -3\n\n sage: c = polytopes.cube()\n sage: c.Hrepresentation_str(separator=', ', style='positive')\n '1 >= x0, 1 >= x1, 1 >= x2, 1 + x0 >= 0, 1 + x2 >= 0, 1 + x1 >= 0'\n \"\"\"\n pretty_hs = [h.repr_pretty(split=True, latex=latex, style=style, **kwds) for h in self.Hrepresentation()]\n shift = any(pretty_h[2].startswith('-') for pretty_h in pretty_hs)\n\n if align is None:\n align = separator == \"\\n\"\n if align:\n lengths = [(len(s[0]), len(s[1]), len(s[2])) for s in pretty_hs]\n from operator import itemgetter\n length_left = max(lengths, key=itemgetter(0))[0]\n length_middle = max(lengths, key=itemgetter(1))[1]\n length_right = max(lengths, key=itemgetter(2))[2]\n if shift:\n length_right += 1\n if latex:\n h_line = \"{:>\" + \"{}\".format(length_left) + \"} & {:\" + \\\n \"{}\".format(length_middle) + \"} & {:\" + \\\n \"{}\".format(length_right) + \"}\\\\\\\\\"\n else:\n h_line = \"{:>\" + \"{}\".format(length_left) \\\n + \"} {:\" + \"{}\".format(length_middle) \\\n + \"} {:\" + \"{}\".format(length_right) + \"}\"\n elif latex:\n h_line = \"{} & {} & {}\\\\\\\\\"\n else:\n h_line = \"{} {} {}\"\n\n def pad_non_minus(s):\n if align and shift and not s.startswith('-'):\n return ' ' + s\n else:\n return s\n h_list = [h_line.format(pretty_h[0], pretty_h[1], pad_non_minus(pretty_h[2]))\n for pretty_h in pretty_hs]\n pretty_print = separator.join(h_list)\n\n if not latex:\n return pretty_print\n else:\n # below we remove the 2 unnecessary backslashes at the end of pretty_print\n return \"\\\\begin{array}{rcl}\\n\" + pretty_print[:-2] + \"\\n\\\\end{array}\"\n\n def Hrep_generator(self):\n \"\"\"\n Return an iterator over the objects of the H-representation\n (inequalities or equations).\n\n EXAMPLES::\n\n sage: p = polytopes.hypercube(3)\n sage: next(p.Hrep_generator())\n An inequality (-1, 0, 0) x + 1 >= 0\n \"\"\"\n for H in self.Hrepresentation():\n yield H\n\n @cached_method\n def n_Hrepresentation(self):\n \"\"\"\n Return the number of objects that make up the\n H-representation of the polyhedron.\n\n OUTPUT:\n\n Integer.\n\n EXAMPLES::\n\n sage: p = polytopes.cross_polytope(4)\n sage: p.n_Hrepresentation()\n 16\n sage: p.n_Hrepresentation() == p.n_inequalities() + p.n_equations()\n True\n \"\"\"\n return len(self.Hrepresentation())\n\n def Vrepresentation(self, index=None):\n \"\"\"\n Return the objects of the V-representation. Each entry is\n either a vertex, a ray, or a line.\n\n See :mod:`sage.geometry.polyhedron.constructor` for a\n definition of vertex/ray/line.\n\n INPUT:\n\n - ``index`` -- either an integer or ``None``\n\n OUTPUT:\n\n The optional argument is an index running from ``0`` to\n ``self.n_Vrepresentation()-1``. If present, the\n V-representation object at the given index will be\n returned. Without an argument, returns the list of all\n V-representation objects.\n\n EXAMPLES::\n\n sage: p = polytopes.simplex(4, project=True)\n sage: p.Vrepresentation(0)\n A vertex at (0.7071067812, 0.4082482905, 0.2886751346, 0.2236067977)\n sage: p.Vrepresentation(0) == p.Vrepresentation() [0]\n True\n \"\"\"\n if index is None:\n return self._Vrepresentation\n else:\n return self._Vrepresentation[index]\n\n @cached_method\n def n_Vrepresentation(self):\n \"\"\"\n Return the number of objects that make up the\n V-representation of the polyhedron.\n\n OUTPUT:\n\n Integer.\n\n EXAMPLES::\n\n sage: p = polytopes.simplex(4)\n sage: p.n_Vrepresentation()\n 5\n sage: p.n_Vrepresentation() == p.n_vertices() + p.n_rays() + p.n_lines()\n True\n \"\"\"\n return len(self.Vrepresentation())\n\n def Vrep_generator(self):\n \"\"\"\n Return an iterator over the objects of the V-representation\n (vertices, rays, and lines).\n\n EXAMPLES::\n\n sage: p = polytopes.cyclic_polytope(3,4)\n sage: vg = p.Vrep_generator()\n sage: next(vg)\n A vertex at (0, 0, 0)\n sage: next(vg)\n A vertex at (1, 1, 1)\n \"\"\"\n for V in self.Vrepresentation():\n yield V\n\n def inequality_generator(self):\n \"\"\"\n Return a generator for the defining inequalities of the\n polyhedron.\n\n OUTPUT:\n\n A generator of the inequality Hrepresentation objects.\n\n EXAMPLES::\n\n sage: triangle = Polyhedron(vertices=[[1,0],[0,1],[1,1]])\n sage: for v in triangle.inequality_generator(): print(v)\n An inequality (1, 1) x - 1 >= 0\n An inequality (0, -1) x + 1 >= 0\n An inequality (-1, 0) x + 1 >= 0\n sage: [ v for v in triangle.inequality_generator() ]\n [An inequality (1, 1) x - 1 >= 0,\n An inequality (0, -1) x + 1 >= 0,\n An inequality (-1, 0) x + 1 >= 0]\n sage: [ [v.A(), v.b()] for v in triangle.inequality_generator() ]\n [[(1, 1), -1], [(0, -1), 1], [(-1, 0), 1]]\n \"\"\"\n for H in self.Hrepresentation():\n if H.is_inequality():\n yield H\n\n @cached_method\n def inequalities(self):\n \"\"\"\n Return all inequalities.\n\n OUTPUT:\n\n A tuple of inequalities.\n\n EXAMPLES::\n\n sage: p = Polyhedron(vertices = [[0,0,0],[0,0,1],[0,1,0],[1,0,0],[2,2,2]])\n sage: p.inequalities()[0:3]\n (An inequality (1, 0, 0) x + 0 >= 0,\n An inequality (0, 1, 0) x + 0 >= 0,\n An inequality (0, 0, 1) x + 0 >= 0)\n sage: p3 = Polyhedron(vertices = Permutations([1,2,3,4]))\n sage: ieqs = p3.inequalities()\n sage: ieqs[0]\n An inequality (0, 1, 1, 1) x - 6 >= 0\n sage: list(_)\n [-6, 0, 1, 1, 1]\n \"\"\"\n return tuple(self.inequality_generator())\n\n def inequalities_list(self):\n \"\"\"\n Return a list of inequalities as coefficient lists.\n\n .. NOTE::\n\n It is recommended to use :meth:`inequalities` or\n :meth:`inequality_generator` instead to iterate over the\n list of :class:`Inequality` objects.\n\n EXAMPLES::\n\n sage: p = Polyhedron(vertices = [[0,0,0],[0,0,1],[0,1,0],[1,0,0],[2,2,2]])\n sage: p.inequalities_list()[0:3]\n [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n sage: p3 = Polyhedron(vertices = Permutations([1,2,3,4]))\n sage: ieqs = p3.inequalities_list()\n sage: ieqs[0]\n [-6, 0, 1, 1, 1]\n sage: ieqs[-1]\n [-3, 0, 1, 0, 1]\n sage: ieqs == [list(x) for x in p3.inequality_generator()]\n True\n \"\"\"\n return [list(x) for x in self.inequality_generator()]\n\n def equation_generator(self):\n \"\"\"\n Return a generator for the linear equations satisfied by the\n polyhedron.\n\n EXAMPLES::\n\n sage: p = polytopes.regular_polygon(8,base_ring=RDF)\n sage: p3 = Polyhedron(vertices = [x+[0] for x in p.vertices()], base_ring=RDF)\n sage: next(p3.equation_generator())\n An equation (0.0, 0.0, 1.0) x + 0.0 == 0\n \"\"\"\n for H in self.Hrepresentation():\n if H.is_equation():\n yield H\n\n @cached_method\n def equations(self):\n \"\"\"\n Return all linear constraints of the polyhedron.\n\n OUTPUT:\n\n A tuple of equations.\n\n EXAMPLES::\n\n sage: test_p = Polyhedron(vertices = [[1,2,3,4],[2,1,3,4],[4,3,2,1],[3,4,1,2]])\n sage: test_p.equations()\n (An equation (1, 1, 1, 1) x - 10 == 0,)\n \"\"\"\n return tuple(self.equation_generator())\n\n def equations_list(self):\n \"\"\"\n Return the linear constraints of the polyhedron. As with\n inequalities, each constraint is given as [b -a1 -a2 ... an]\n where for variables x1, x2,..., xn, the polyhedron satisfies\n the equation b = a1*x1 + a2*x2 + ... + an*xn.\n\n .. NOTE::\n\n It is recommended to use :meth:`equations` or\n :meth:`equation_generator()` instead to iterate over the\n list of\n :class:`~sage.geometry.polyhedron.representation.Equation`\n objects.\n\n EXAMPLES::\n\n sage: test_p = Polyhedron(vertices = [[1,2,3,4],[2,1,3,4],[4,3,2,1],[3,4,1,2]])\n sage: test_p.equations_list()\n [[-10, 1, 1, 1, 1]]\n \"\"\"\n return [list(eq) for eq in self.equation_generator()]\n\n def vertices_list(self):\n \"\"\"\n Return a list of vertices of the polyhedron.\n\n .. NOTE::\n\n It is recommended to use :meth:`vertex_generator` instead to\n iterate over the list of :class:`Vertex` objects.\n\n .. WARNING::\n\n If the polyhedron has lines, return the vertices\n of the ``Vrepresentation``. However, the represented polyhedron\n has no 0-dimensional faces (i.e. vertices)::\n\n sage: P = Polyhedron(rays=[[1,0,0]],lines=[[0,1,0]])\n sage: P.vertices_list()\n [[0, 0, 0]]\n sage: P.faces(0)\n ()\n\n EXAMPLES::\n\n sage: triangle = Polyhedron(vertices=[[1,0],[0,1],[1,1]])\n sage: triangle.vertices_list()\n [[0, 1], [1, 0], [1, 1]]\n sage: a_simplex = Polyhedron(ieqs = [\n ....: [0,1,0,0,0],[0,0,1,0,0],[0,0,0,1,0],[0,0,0,0,1]\n ....: ], eqns = [[1,-1,-1,-1,-1]])\n sage: a_simplex.vertices_list()\n [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n sage: a_simplex.vertices_list() == [list(v) for v in a_simplex.vertex_generator()]\n True\n \"\"\"\n return [list(x) for x in self.vertex_generator()]\n\n def vertex_generator(self):\n \"\"\"\n Return a generator for the vertices of the polyhedron.\n\n .. WARNING::\n\n If the polyhedron has lines, return a generator for the vertices\n of the ``Vrepresentation``. However, the represented polyhedron\n has no 0-dimensional faces (i.e. vertices)::\n\n sage: P = Polyhedron(rays=[[1,0,0]],lines=[[0,1,0]])\n sage: list(P.vertex_generator())\n [A vertex at (0, 0, 0)]\n sage: P.faces(0)\n ()\n\n EXAMPLES::\n\n sage: triangle = Polyhedron(vertices=[[1,0],[0,1],[1,1]])\n sage: for v in triangle.vertex_generator(): print(v)\n A vertex at (0, 1)\n A vertex at (1, 0)\n A vertex at (1, 1)\n sage: v_gen = triangle.vertex_generator()\n sage: next(v_gen) # the first vertex\n A vertex at (0, 1)\n sage: next(v_gen) # the second vertex\n A vertex at (1, 0)\n sage: next(v_gen) # the third vertex\n A vertex at (1, 1)\n sage: try: next(v_gen) # there are only three vertices\n ....: except StopIteration: print(\"STOP\")\n STOP\n sage: type(v_gen)\n <... 'generator'>\n sage: [ v for v in triangle.vertex_generator() ]\n [A vertex at (0, 1), A vertex at (1, 0), A vertex at (1, 1)]\n \"\"\"\n for V in self.Vrepresentation():\n if V.is_vertex():\n yield V\n\n @cached_method\n def vertices(self):\n \"\"\"\n Return all vertices of the polyhedron.\n\n OUTPUT:\n\n A tuple of vertices.\n\n .. WARNING::\n\n If the polyhedron has lines, return the vertices\n of the ``Vrepresentation``. However, the represented polyhedron\n has no 0-dimensional faces (i.e. vertices)::\n\n sage: P = Polyhedron(rays=[[1,0,0]],lines=[[0,1,0]])\n sage: P.vertices()\n (A vertex at (0, 0, 0),)\n sage: P.faces(0)\n ()\n\n EXAMPLES::\n\n sage: triangle = Polyhedron(vertices=[[1,0],[0,1],[1,1]])\n sage: triangle.vertices()\n (A vertex at (0, 1), A vertex at (1, 0), A vertex at (1, 1))\n sage: a_simplex = Polyhedron(ieqs = [\n ....: [0,1,0,0,0],[0,0,1,0,0],[0,0,0,1,0],[0,0,0,0,1]\n ....: ], eqns = [[1,-1,-1,-1,-1]])\n sage: a_simplex.vertices()\n (A vertex at (1, 0, 0, 0), A vertex at (0, 1, 0, 0),\n A vertex at (0, 0, 1, 0), A vertex at (0, 0, 0, 1))\n \"\"\"\n return tuple(self.vertex_generator())\n\n def ray_generator(self):\n \"\"\"\n Return a generator for the rays of the polyhedron.\n\n EXAMPLES::\n\n sage: pi = Polyhedron(ieqs = [[1,1,0],[1,0,1]])\n sage: pir = pi.ray_generator()\n sage: [x.vector() for x in pir]\n [(1, 0), (0, 1)]\n \"\"\"\n for V in self.Vrepresentation():\n if V.is_ray():\n yield V\n\n @cached_method\n def rays(self):\n \"\"\"\n Return a list of rays of the polyhedron.\n\n OUTPUT:\n\n A tuple of rays.\n\n EXAMPLES::\n\n sage: p = Polyhedron(ieqs = [[0,0,0,1],[0,0,1,0],[1,1,0,0]])\n sage: p.rays()\n (A ray in the direction (1, 0, 0),\n A ray in the direction (0, 1, 0),\n A ray in the direction (0, 0, 1))\n \"\"\"\n return tuple(self.ray_generator())\n\n def rays_list(self):\n \"\"\"\n Return a list of rays as coefficient lists.\n\n .. NOTE::\n\n It is recommended to use :meth:`rays` or\n :meth:`ray_generator` instead to iterate over the list of\n :class:`Ray` objects.\n\n OUTPUT:\n\n A list of rays as lists of coordinates.\n\n EXAMPLES::\n\n sage: p = Polyhedron(ieqs = [[0,0,0,1],[0,0,1,0],[1,1,0,0]])\n sage: p.rays_list()\n [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n sage: p.rays_list() == [list(r) for r in p.ray_generator()]\n True\n \"\"\"\n return [list(x) for x in self.ray_generator()]\n\n def line_generator(self):\n \"\"\"\n Return a generator for the lines of the polyhedron.\n\n EXAMPLES::\n\n sage: pr = Polyhedron(rays = [[1,0],[-1,0],[0,1]], vertices = [[-1,-1]])\n sage: next(pr.line_generator()).vector()\n (1, 0)\n \"\"\"\n for V in self.Vrepresentation():\n if V.is_line():\n yield V\n\n @cached_method\n def lines(self):\n \"\"\"\n Return all lines of the polyhedron.\n\n OUTPUT:\n\n A tuple of lines.\n\n EXAMPLES::\n\n sage: p = Polyhedron(rays = [[1,0],[-1,0],[0,1],[1,1]], vertices = [[-2,-2],[2,3]])\n sage: p.lines()\n (A line in the direction (1, 0),)\n \"\"\"\n return tuple(self.line_generator())\n\n def lines_list(self):\n \"\"\"\n Return a list of lines of the polyhedron. The line data is given\n as a list of coordinates rather than as a Hrepresentation object.\n\n .. NOTE::\n\n It is recommended to use :meth:`line_generator` instead to\n iterate over the list of :class:`Line` objects.\n\n EXAMPLES::\n\n sage: p = Polyhedron(rays = [[1,0],[-1,0],[0,1],[1,1]], vertices = [[-2,-2],[2,3]])\n sage: p.lines_list()\n [[1, 0]]\n sage: p.lines_list() == [list(x) for x in p.line_generator()]\n True\n \"\"\"\n return [list(x) for x in self.line_generator()]\n\n def base_ring(self):\n \"\"\"\n Return the base ring.\n\n OUTPUT:\n\n The ring over which the polyhedron is defined. Must be a\n sub-ring of the reals to define a polyhedron, in particular\n comparison must be defined. Popular choices are\n\n * ``ZZ`` (the ring of integers, lattice polytope),\n\n * ``QQ`` (exact arithmetic using gmp),\n\n * ``RDF`` (double precision floating-point arithmetic), or\n\n * ``AA`` (real algebraic field).\n\n EXAMPLES::\n\n sage: triangle = Polyhedron(vertices = [[1,0],[0,1],[1,1]])\n sage: triangle.base_ring() == ZZ\n True\n \"\"\"\n return self.parent().base_ring()\n\n def backend(self):\n \"\"\"\n Return the backend used.\n\n OUTPUT:\n\n The name of the backend used for computations. It will be one of\n the following backends:\n\n * ``ppl`` the Parma Polyhedra Library\n\n * ``cdd`` CDD\n\n * ``normaliz`` normaliz\n\n * ``polymake`` polymake\n\n * ``field`` a generic Sage implementation\n\n EXAMPLES::\n\n sage: triangle = Polyhedron(vertices = [[1, 0], [0, 1], [1, 1]])\n sage: triangle.backend()\n 'ppl'\n sage: D = polytopes.dodecahedron()\n sage: D.backend()\n 'field'\n sage: P = Polyhedron([[1.23]])\n sage: P.backend()\n 'cdd'\n \"\"\"\n return self.parent().backend()\n", "id": "4508028", "language": "Python", "matching_score": 3.4409334659576416, "max_stars_count": 0, "path": "src/sage/geometry/polyhedron/base0.py" }, { "content": "r\"\"\"\nInterface to Macaulay2\n\n.. NOTE::\n\n You must have ``Macaulay2`` installed on your computer\n for this interface to work. Macaulay2 is not included with Sage,\n but you can obtain it from https://faculty.math.illinois.edu/Macaulay2/.\n No additional optional Sage packages are required.\n\nSage provides an interface to the Macaulay2 computational algebra\nsystem. This system provides extensive functionality for commutative\nalgebra. You do not have to install any optional packages.\n\nThe Macaulay2 interface offers three pieces of functionality:\n\n- ``macaulay2_console()`` -- A function that dumps you\n into an interactive command-line Macaulay2 session.\n\n- ``macaulay2.eval(expr)`` -- Evaluation of arbitrary Macaulay2\n expressions, with the result returned as a string.\n\n- ``macaulay2(expr)`` -- Creation of a Sage object that wraps a\n Macaulay2 object. This provides a Pythonic interface to Macaulay2. For\n example, if ``f = macaulay2(10)``, then ``f.gcd(25)`` returns the\n GCD of `10` and `25` computed using Macaulay2.\n\nEXAMPLES::\n\n sage: macaulay2('3/5 + 7/11') # optional - macaulay2\n 68\n --\n 55\n sage: f = macaulay2('f = i -> i^3') # optional - macaulay2\n sage: f # optional - macaulay2\n f\n sage: f(5) # optional - macaulay2\n 125\n\n sage: R = macaulay2('ZZ/5[x,y,z]') # optional - macaulay2\n sage: R # optional - macaulay2\n ZZ\n --[x...z]\n 5\n sage: x = macaulay2('x') # optional - macaulay2\n sage: y = macaulay2('y') # optional - macaulay2\n sage: (x+y)^5 # optional - macaulay2\n 5 5\n x + y\n sage: parent((x+y)^5) # optional - macaulay2\n Macaulay2\n\nThe name of the variable to which a Macaulay2 element is assigned internally\ncan be passed as an argument. This is useful for types like polynomial rings\nwhich acquire that name in Macaulay2::\n\n sage: R = macaulay2('QQ[x,y,z,w]', 'R') # optional - macaulay2\n sage: R # optional - macaulay2\n R\n\n sage: f = macaulay2('x^4 + 2*x*y^3 + x*y^2*w + x*y*z*w + x*y*w^2' # optional - macaulay2\n ....: '+ 2*x*z*w^2 + y^4 + y^3*w + 2*y^2*z*w + z^4 + w^4')\n sage: f # optional - macaulay2\n 4 3 4 4 2 3 2 2 2 4\n x + 2x*y + y + z + x*y w + y w + x*y*z*w + 2y z*w + x*y*w + 2x*z*w + w\n sage: g = f * macaulay2('x+y^5') # optional - macaulay2\n sage: print(g.factor()) # optional - macaulay2\n 4 3 4 4 2 3 2 2 2 4 5\n (x + 2x*y + y + z + x*y w + y w + x*y*z*w + 2y z*w + x*y*w + 2x*z*w + w )(y + x)\n\nUse :meth:`eval` for explicit control over what is sent to the interpreter.\nThe argument is evaluated in Macaulay2 as is::\n\n sage: macaulay2.eval('compactMatrixForm') # optional - macaulay2\n true\n sage: macaulay2.eval('compactMatrixForm = false;') # optional - macaulay2\n sage: macaulay2.eval('matrix {{1, x^2+y}}') # optional - macaulay2\n | 2 |\n | 1 x + y |\n <BLANKLINE>\n 1 2\n Matrix R <--- R\n sage: macaulay2.eval('compactMatrixForm = true;') # optional - macaulay2\n\n\nAUTHORS:\n\n- <NAME> and <NAME> (2006-02-05, during Sage coding sprint)\n- <NAME> (2006-02-09): inclusion in Sage; prompt uses regexp,\n calling of Macaulay2 functions via __call__.\n- <NAME> (2006-02-09): fixed bug in reading from file and\n improved output cleaning.\n- <NAME> (2006-02-12): added ring and ideal constructors,\n list delimiters, is_Macaulay2Element, sage_polystring,\n __floordiv__, __mod__, __iter__, __len__; stripped extra\n leading space and trailing newline from output.\n\n.. TODO::\n\n Get rid of all numbers in output, e.g., in ideal function below.\n\"\"\"\n\n# ****************************************************************************\n# Copyright (C) 2006 <NAME> <<EMAIL>>\n# <NAME> <<EMAIL>>\n# <NAME> <<EMAIL>>\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# The full text of the GPL is available at:\n#\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nimport os\nimport re\n\nfrom sage.interfaces.expect import (Expect, ExpectElement, ExpectFunction,\n FunctionElement)\nfrom sage.interfaces.interface import AsciiArtString\nfrom sage.misc.multireplace import multiple_replace\nfrom sage.misc.superseded import deprecated_function_alias\nfrom sage.interfaces.tab_completion import ExtraTabCompletion\nfrom sage.docs.instancedoc import instancedoc\nfrom sage.structure.global_options import GlobalOptions\n\n\ndef remove_output_labels(s):\n r\"\"\"\n Remove output labels of Macaulay2 from a string.\n\n - s: output of Macaulay2\n\n - s: string\n\n Returns: the input string with `n` symbols removed from the beginning of\n each line, where `n` is the minimal number of spaces or symbols of\n Macaulay2 output labels (looking like 'o39 = ') present on every non-empty\n line.\n\n Return type: string\n\n .. note::\n\n If ``s`` consists of several outputs and their labels have\n different width, it is possible that some strings will have leading\n spaces (or maybe even pieces of output labels). However, this\n function will try not cut any messages.\n\n EXAMPLES::\n\n sage: from sage.interfaces.macaulay2 import remove_output_labels\n sage: output = 'o1 = QQ [x, y]\\n\\no1 : PolynomialRing\\n'\n sage: remove_output_labels(output)\n 'QQ [x, y]\\n\\nPolynomialRing\\n'\n \"\"\"\n label = re.compile(r\"^o+[0-9]+ (=|:) |^ *\")\n lines = s.split(\"\\n\")\n matches = [label.match(l) for l in lines if l]\n if not matches:\n return s\n else:\n n = min(m.end() - m.start() for m in matches)\n return \"\\n\".join(l[n:] for l in lines)\n\n\nPROMPT = \"_EGAS_ : \"\nPROMPT_LOAD = \"_EGAS_LOAD_ : \"\n\n\nclass Macaulay2(ExtraTabCompletion, Expect):\n \"\"\"\n Interface to the Macaulay2 interpreter.\n \"\"\"\n def __init__(self, maxread=None, script_subdirectory=None,\n logfile=None, server=None, server_tmpdir=None, command=None):\n \"\"\"\n Initialize a Macaulay2 interface instance.\n\n We replace the standard input prompt with a strange one, so that\n we do not catch input prompts inside the documentation.\n\n We replace the standard input continuation prompt, which is\n just a bunch of spaces and cannot be automatically detected in a\n reliable way. This is necessary for allowing commands that occupy\n several strings.\n\n We also change the starting line number to make all the output\n labels to be of the same length. This allows correct stripping of\n the output of several commands.\n\n TESTS::\n\n sage: macaulay2 == loads(dumps(macaulay2))\n True\n \"\"\"\n if command is None:\n command = os.getenv('SAGE_MACAULAY2_COMMAND') or 'M2'\n init_str = (\n # Prompt changing commands\n 'sageLoadMode = false;'\n 'ZZ#{Standard,Core#\"private dictionary\"#\"InputPrompt\"} = '\n 'ZZ#{Standard,Core#\"private dictionary\"#\"InputContinuationPrompt\"} = ' +\n 'lineno -> if(sageLoadMode) then \"%s\" else \"%s\";' % (PROMPT_LOAD, PROMPT) +\n # Also prevent line wrapping in Macaulay2\n \"printWidth = 0;\" +\n # And make all output labels to be of the same width\n \"lineNumber = 10^9;\"\n # Assignment of internal expect variables.\n 'sageAssign = (k, v) -> (if not instance(v, Sequence) then use v; k <- v);'\n )\n command = \"%s --no-debug --no-readline --silent -e '%s'\" % (command, init_str)\n Expect.__init__(self,\n name = 'macaulay2',\n prompt = PROMPT,\n command = command,\n server = server,\n server_tmpdir = server_tmpdir,\n script_subdirectory = script_subdirectory,\n verbose_start = False,\n logfile = logfile,\n eval_using_file_cutoff=500)\n\n # Macaulay2 provides no \"clear\" function. However, Macaulay2 does provide\n # garbage collection; since expect automatically reuses variable names,\n # garbage collection in Sage properly sets up garbage collection in\n # Macaulay2.\n\n def __reduce__(self):\n \"\"\"\n Used in serializing an Macaulay2 interface.\n\n EXAMPLES::\n\n sage: rlm2, t = macaulay2.__reduce__()\n sage: rlm2(*t)\n Macaulay2\n \"\"\"\n return reduce_load_macaulay2, tuple([])\n\n def _read_in_file_command(self, filename):\n \"\"\"\n Load and *execute* the content of ``filename`` in Macaulay2.\n\n INPUT:\n\n - filename: the name of the file to be loaded and executed\n (type: string)\n\n OUTPUT:\n\n Returns Macaulay2 command loading and executing commands in\n ``filename``.\n Return type: string\n\n TESTS::\n\n sage: filename = tmp_filename()\n sage: f = open(filename, \"w\")\n sage: _ = f.write(\"sage_test = 7;\")\n sage: f.close()\n sage: macaulay2.read(filename) # indirect doctest, optional - macaulay2\n sage: macaulay2.eval(\"sage_test\") # optional - macaulay2\n 7\n sage: import os\n sage: os.unlink(filename)\n sage: macaulay2(10^10000) == 10^10000 # optional - macaulay2\n True\n \"\"\"\n # We use `input` because `load` does not echo the output values\n return 'sageLoadMode=true;input \"%s\";sageLoadMode=false;' % filename\n\n def _post_process_from_file(self, s):\n r\"\"\"\n TESTS:\n\n Check that evaluating using a file gives the same result as without (:trac:`25903`)::\n\n sage: from sage.interfaces.macaulay2 import remove_output_labels\n sage: s1 = macaulay2._eval_line_using_file('ZZ^2') # indirect doctest, optional - macaulay2\n sage: s2 = macaulay2._eval_line('ZZ^2', allow_use_file=False) # optional - macaulay2\n sage: remove_output_labels(s1) == remove_output_labels(s2) # optional - macaulay2\n True\n\n Test multiline input from file::\n\n sage: (macaulay2.eval('ZZ^2\\nZZ^3', allow_use_file=False) == # indirect doctest, optional - macaulay2\n ....: macaulay2.eval('ZZ^2\\n%sZZ^3' % (' ' * macaulay2._eval_using_file_cutoff)))\n True\n \"\"\"\n s = '\\n'.join(line for line in s.split('\\n')\n if not line.startswith(PROMPT_LOAD))\n return s\n\n def eval(self, code, strip=True, **kwds):\n \"\"\"\n Send the code x to the Macaulay2 interpreter and return the output\n as a string suitable for input back into Macaulay2, if possible.\n\n INPUT:\n\n - code -- str\n - strip -- ignored\n\n EXAMPLES::\n\n sage: macaulay2.eval(\"2+2\") # optional - macaulay2\n 4\n \"\"\"\n code = code.strip()\n # TODO: in some cases change toExternalString to toString??\n ans = Expect.eval(self, code, strip=strip, **kwds).strip('\\n')\n if strip:\n ans = remove_output_labels(ans)\n return AsciiArtString(ans)\n\n def restart(self):\n r\"\"\"\n Restart Macaulay2 interpreter.\n\n TESTS::\n\n sage: macaulay2.restart() # optional - macaulay2\n \"\"\"\n # If we allow restart to be called as a function, there will be\n # parasitic output\n self.eval(\"restart\")\n\n def set_seed(self, seed=None):\n r\"\"\"\n Set the seed for Macaulay2 interpreter.\n\n INPUT:\n\n - ``seed`` -- number (default: ``None``). If ``None``, it\n is set to a random number.\n\n OUTPUT: the new seed\n\n EXAMPLES::\n\n sage: m = Macaulay2() # optional - macaulay2\n sage: m.set_seed(123456) # optional - macaulay2\n 123456\n sage: [m.random(100) for _ in range(11)] # optional - macaulay2\n [8, 29, 5, 22, 4, 32, 35, 57, 3, 95, 36]\n \"\"\"\n if seed is None:\n seed = self.rand_seed()\n self.eval('setRandomSeed(%d)' % seed)\n self._seed = seed\n return seed\n\n class options(GlobalOptions):\n r\"\"\"\n Global options for Macaulay2 elements.\n\n @OPTIONS@\n\n EXAMPLES::\n\n sage: macaulay2.options.after_print = True # optional - macaulay2\n sage: A = macaulay2(matrix([[1, 2], [3, 6]])); A # optional - macaulay2\n | 1 2 |\n | 3 6 |\n <BLANKLINE>\n 2 2\n Matrix ZZ <--- ZZ\n sage: A.kernel() # optional - macaulay2\n image | 2 |\n | -1 |\n <BLANKLINE>\n 2\n ZZ-module, submodule of ZZ\n sage: macaulay2.options.after_print = False # optional - macaulay2\n \"\"\"\n NAME = 'Macaulay2'\n module = 'sage.interfaces.macaulay2'\n after_print = dict(default=False,\n description='append AfterPrint type information to '\n 'textual representations',\n checker=lambda val: isinstance(val, bool))\n\n def get(self, var):\n \"\"\"\n Get the value of the variable ``var``.\n\n INPUT:\n\n - ``var`` - string; the name of the variable in Macaulay2\n\n OUTPUT: a string of the textual representation of the variable in\n Macaulay2\n\n EXAMPLES::\n\n sage: macaulay2.set(\"a\", \"2\") # optional - macaulay2\n sage: macaulay2.get(\"a\") # optional - macaulay2\n 2\n\n Note that the following syntax is used to obtain a\n ``Macaulay2Element`` instead::\n\n sage: a = macaulay2('2'); a # optional - macaulay2\n 2\n sage: type(a) # optional - macaulay2\n <class 'sage.interfaces.macaulay2.Macaulay2Element'>\n \"\"\"\n return self.eval('print(%s)' % var, strip=False)\n\n def set(self, var, value):\n \"\"\"\n Set the variable ``var`` to the given value.\n\n INPUT:\n\n - ``var`` - string; the name of the variable in Macaulay2\n - ``value`` - a string to evaluate\n\n EXAMPLES::\n\n sage: macaulay2.set(\"a\", \"1+1\") # optional - macaulay2\n sage: macaulay2.get(\"a\") # optional - macaulay2\n 2\n\n TESTS:\n\n Check that internal expect variables do not acquire their global\n variable name and that ``use`` is invoked (:trac:`28303`)::\n\n sage: R = macaulay2('QQ[x, y]') # indirect doctest, optional - macaulay2\n sage: R.net() # optional - macaulay2\n QQ[x...y]\n sage: S = R / macaulay2('ideal {x^2 - y}') # optional - macaulay2\n sage: macaulay2.eval('class x === %s' % S.name()) # optional - macaulay2\n true\n \"\"\"\n if re.match(r'sage\\d+$', var):\n cmd = 'sageAssign(symbol %s,(%s));' % (var, value)\n else:\n cmd = '%s=(%s);' % (var,value)\n ans = Expect.eval(self, cmd, strip=False)\n if ans.find(\"stdio:\") != -1:\n raise RuntimeError(\"Error evaluating Macaulay2 code.\\nIN:%s\\nOUT:%s\" % (cmd, ans))\n\n def clear(self, var):\n \"\"\"\n Clear the variable named ``var``.\n\n The interface automatically clears Macaulay2 elements when they fall\n out of use, so calling this method is usually not necessary.\n\n EXAMPLES::\n\n sage: macaulay2.eval('R = QQ[x,y];') # optional - macaulay2\n sage: macaulay2.eval('net class R') # optional - macaulay2\n PolynomialRing\n sage: macaulay2.clear('R') # optional - macaulay2\n sage: macaulay2.eval('net class R') # optional - macaulay2\n Symbol\n\n TESTS:\n\n Check that only internal variables get reused by the interface::\n\n sage: all(s.startswith('sage') for s in macaulay2._available_vars) # optional - macaulay2\n True\n \"\"\"\n if re.match(r'sage\\d+$', var):\n self._available_vars.append(var)\n else:\n # this approach is also used by Macaulay2 itself in clearAll\n cmd = 'globalAssign(symbol {0},symbol {0});'.format(var)\n Expect.eval(self, cmd, strip=False)\n\n def _contains(self, v1, v2):\n \"\"\"\n EXAMPLES::\n\n sage: a = macaulay2([3,4,5]) # optional - macaulay2\n sage: 0 in a, 2 in a, 3 in a # optional - macaulay2, indirect doctest\n (True, True, False)\n sage: b = macaulay2('hashTable {\"x\" => 1, \"y\" => 2}') # optional - macaulay2\n sage: 'x' in b, '\"x\"' in b # optional - macaulay2, indirect doctest\n (False, True)\n \"\"\"\n return self.eval(\"%s#?%s\" % (v2, v1)) == self._true_symbol()\n\n def _object_class(self):\n \"\"\"\n Return the class of Macaulay2 elements.\n\n EXAMPLES::\n\n sage: macaulay2._object_class()\n <class 'sage.interfaces.macaulay2.Macaulay2Element'>\n \"\"\"\n return Macaulay2Element\n\n def _function_class(self):\n \"\"\"\n Return the class of Macaulay2 functions.\n\n EXAMPLES::\n\n sage: macaulay2._function_class()\n <class 'sage.interfaces.macaulay2.Macaulay2Function'>\n \"\"\"\n return Macaulay2Function\n\n def _function_element_class(self):\n \"\"\"\n Return the class of partially-applied Macaulay2 functions.\n\n EXAMPLES::\n\n sage: macaulay2._function_element_class()\n <class 'sage.interfaces.macaulay2.Macaulay2FunctionElement'>\n \"\"\"\n return Macaulay2FunctionElement\n\n def console(self):\n \"\"\"\n Spawn a new M2 command-line session.\n\n EXAMPLES::\n\n sage: macaulay2.console() # not tested\n Macaulay 2, version 1.1\n with packages: Classic, Core, Elimination, IntegralClosure, LLLBases, Parsing, PrimaryDecomposition, SchurRings, TangentCone\n ...\n\n \"\"\"\n macaulay2_console()\n\n def _install_hints(self):\n r\"\"\"\n\n TESTS::\n\n sage: m2 = Macaulay2(command='/wrongpath/M2')\n sage: m2('3+2')\n Traceback (most recent call last):\n ...\n TypeError: unable to start macaulay2 because the command '/wrongpath/M2 ...' failed: The command was not found or was not executable: /wrongpath/M2.\n <BLANKLINE>\n Your attempt to start Macaulay2 failed, either because you do not have\n have Macaulay2 installed, or because it is not configured correctly...\n \"\"\"\n return r\"\"\"\n Your attempt to start Macaulay2 failed, either because you do not have\n have Macaulay2 installed, or because it is not configured correctly.\n\n - Macaulay2 is not included with Sage, but you can obtain it from\n https://faculty.math.illinois.edu/Macaulay2/. No additional\n optional Sage packages are required.\n\n - If you have Macaulay2 installed, then perhaps it is not configured\n correctly. Sage assumes that you can start Macaulay2 with the command\n M2.\n\n - Alternatively, you can use the following command\n to point Sage to the correct command for your system.\n\n m2 = Macaulay2(command='/usr/local/bin/M2')\n\n or by setting the environment variable SAGE_MACAULAY2_COMMAND.\n \"\"\"\n\n def _left_list_delim(self):\n \"\"\"\n Returns the Macaulay2 left delimiter for lists.\n\n EXAMPLES::\n\n sage: macaulay2._left_list_delim()\n '{'\n \"\"\"\n return '{'\n\n def _right_list_delim(self):\n \"\"\"\n Returns the Macaulay2 right delimiter for lists.\n\n EXAMPLES::\n\n sage: macaulay2._right_list_delim()\n '}'\n \"\"\"\n return '}'\n\n def _true_symbol(self):\n \"\"\"\n Returns the Macaulay2 symbol for True.\n\n EXAMPLES::\n\n sage: macaulay2._true_symbol()\n 'true'\n \"\"\"\n return 'true'\n\n def _false_symbol(self):\n \"\"\"\n Returns the Macaulay2 symbol for False.\n\n EXAMPLES::\n\n sage: macaulay2._false_symbol()\n 'false'\n \"\"\"\n return 'false'\n\n def _equality_symbol(self):\n \"\"\"\n Returns the Macaulay2 symbol for equality.\n\n EXAMPLES::\n\n sage: macaulay2._false_symbol()\n 'false'\n \"\"\"\n return '=='\n\n def cputime(self, t=None):\n \"\"\"\n EXAMPLES::\n\n sage: R = macaulay2(\"QQ[x,y]\") # optional - macaulay2\n sage: x,y = R.gens() # optional - macaulay2\n sage: a = (x+y+1)^20 # optional - macaulay2\n sage: macaulay2.cputime() # optional - macaulay2; random\n 0.48393700000000001\n \"\"\"\n _t = float(self.cpuTime()._sage_())\n if t:\n return _t - t\n else:\n return _t\n\n def version(self):\n \"\"\"\n Returns the version of Macaulay2.\n\n EXAMPLES::\n\n sage: macaulay2.version() # optional - macaulay2\n (1, 1...\n \"\"\"\n s = self.eval(\"version\")\n r = re.compile(\"VERSION => (.*?)\\n\")\n s = r.search(s).groups()[0]\n return tuple(int(i) for i in s.split(\".\"))\n\n### Constructors\n\n def ideal(self, *gens):\n \"\"\"\n Return the ideal generated by gens.\n\n INPUT:\n\n - gens -- list or tuple of Macaulay2 objects (or objects that can be\n made into Macaulay2 objects via evaluation)\n\n OUTPUT:\n\n the Macaulay2 ideal generated by the given list of gens\n\n EXAMPLES::\n\n sage: R2 = macaulay2.ring('QQ', '[x, y]'); R2 # optional - macaulay2\n QQ[x...y]\n sage: I = macaulay2.ideal( ('y^2 - x^3', 'x - y') ); I # optional - macaulay2\n 3 2\n ideal (- x + y , x - y)\n sage: J = I^3; J.gb().gens().transpose() # optional - macaulay2\n {-9} | y9-3y8+3y7-y6 |\n {-7} | xy6-2xy5+xy4-y7+2y6-y5 |\n {-5} | x2y3-x2y2-2xy4+2xy3+y5-y4 |\n {-3} | x3-3x2y+3xy2-y3 |\n\n \"\"\"\n if len(gens) == 1 and isinstance(gens[0], (list, tuple)):\n gens = gens[0]\n gens2 = []\n for g in gens:\n if not isinstance(g, Macaulay2Element):\n gens2.append(self(g))\n else:\n gens2.append(g)\n return self('ideal {%s}'%(\",\".join([g.name() for g in gens2])))\n\n def ring(self, base_ring='ZZ', vars='[x]', order='Lex'):\n r\"\"\"\n Create a Macaulay2 polynomial ring.\n\n INPUT:\n\n - ``base_ring`` -- base ring (see examples below)\n - ``vars`` -- a tuple or string that defines the variable names\n - ``order`` -- string (default: 'Lex'); the monomial order\n\n OUTPUT: a Macaulay2 ring\n\n EXAMPLES:\n\n This is a ring in variables named ``a`` through ``d`` over the finite\n field of order 7, with graded reverse lex ordering::\n\n sage: R1 = macaulay2.ring('ZZ/7', '[a..d]', 'GRevLex') # optional - macaulay2\n sage: R1.describe() # optional - macaulay2\n ZZ\n --[a..d, Degrees => {4:1}, Heft => {1}, MonomialOrder => {MonomialSize => 16},\n 7 {GRevLex => {4:1} }\n {Position => Up }\n --------------------------------------------------------------------------------\n DegreeRank => 1]\n sage: R1.char() # optional - macaulay2\n 7\n\n This is a polynomial ring over the rational numbers::\n\n sage: R2 = macaulay2.ring('QQ', '[x, y]') # optional - macaulay2\n sage: R2.describe() # optional - macaulay2\n QQ[x..y, Degrees => {2:1}, Heft => {1}, MonomialOrder => {MonomialSize => 16},\n {Lex => 2 }\n {Position => Up }\n --------------------------------------------------------------------------------\n DegreeRank => 1]\n\n TESTS::\n\n sage: macaulay2.ring('QQ', '[a_0..a_2,b..<d,f]').vars() # optional - macaulay2\n | a_0 a_1 a_2 b c f |\n \"\"\"\n return self.new(self._macaulay2_input_ring(base_ring, vars, order))\n\n def help(self, s):\n \"\"\"\n EXAMPLES::\n\n sage: macaulay2.help(\"load\") # optional - macaulay2 - 1st call might be chatty...\n ...\n sage: macaulay2.help(\"load\") # optional - macaulay2\n load...\n ****...\n ...\n * \"input\" -- read Macaulay2 commands and echo\n * \"notify\" -- whether to notify the user when a file is loaded...\n\n TESTS:\n\n Check that help also works for Macaulay2 keywords and variables\n (:trac:`28565`)::\n\n sage: from sage.repl.interpreter import get_test_shell\n sage: shell = get_test_shell()\n sage: shell.run_cell('macaulay2.help(\"try\")') # optional - macaulay2\n try -- catch an error\n ****...\n The object \"try\" is a keyword.\n\n sage: from sage.repl.interpreter import get_test_shell\n sage: shell = get_test_shell()\n sage: shell.run_cell('macaulay2.help(\"errorDepth\")') # optional - macaulay2\n errorDepth...\n The object \"errorDepth\" is an integer.\n \"\"\"\n r = self.eval('help \"%s\"' % s)\n end = r.rfind(\"\\n\\nDIV\")\n if end != -1:\n r = r[:end]\n return AsciiArtString(r)\n\n def _tab_completion(self):\n \"\"\"\n Return a list of tab completions for Macaulay2.\n\n Returns dynamically built sorted list of commands obtained using\n Macaulay2 \"apropos\" command.\n Return type: list of strings\n\n TESTS::\n\n sage: names = macaulay2._tab_completion() # optional - macaulay2\n sage: 'ring' in names # optional - macaulay2\n True\n sage: macaulay2.eval(\"abcabc = 4\") # optional - macaulay2\n 4\n sage: names = macaulay2._tab_completion() # optional - macaulay2\n sage: \"abcabc\" in names # optional - macaulay2\n True\n \"\"\"\n # Get all the names from Macaulay2 except numbered outputs like\n # o1, o2, etc. and automatic Sage variable names sage0, sage1, etc.\n # It is faster to get it back as a string.\n r = macaulay2.eval(r\"\"\"\n print toString select(\n apply(apropos \"^[[:alnum:]]+$\", toString),\n s -> not match(\"^(o|sage)[0-9]+$\", s))\n \"\"\")\n # Now split this string into separate names\n r = sorted(r[1:-1].split(\", \"))\n # Macaulay2 sorts things like A, a, B, b, ...\n return r\n\n def use(self, R):\n \"\"\"\n Use the Macaulay2 ring R.\n\n EXAMPLES::\n\n sage: R = macaulay2(\"QQ[x,y]\") # optional - macaulay2\n sage: P = macaulay2(\"ZZ/7[symbol x, symbol y]\") # optional - macaulay2\n sage: macaulay2(\"x\").cls()._operator('===', P) # optional - macaulay2\n true\n sage: macaulay2.use(R) # optional - macaulay2\n sage: macaulay2(\"x\").cls()._operator('===', R) # optional - macaulay2\n true\n \"\"\"\n R = self(R)\n self.eval(\"use %s;\" % R.name(), strip=False)\n\n def new_from(self, type, value):\n \"\"\"\n Return a new ``Macaulay2Element`` of type ``type`` constructed from\n ``value``.\n\n EXAMPLES::\n\n sage: l = macaulay2.new_from(\"MutableList\", [1,2,3]) # optional - macaulay2\n sage: l # optional - macaulay2\n MutableList{...3...}\n sage: list(l) # optional - macaulay2\n [1, 2, 3]\n\n \"\"\"\n type = self(type)\n value = self(value)\n return self.new(\"new %s from %s\"%(type.name(), value.name()))\n\n def _macaulay2_input_ring(self, base_ring, vars, order='GRevLex'):\n \"\"\"\n Build a string representation of a polynomial ring which can be used as\n Macaulay2 input.\n\n TESTS::\n\n sage: R = GF(101)['x']\n sage: macaulay2._macaulay2_input_ring(R.base_ring(), R.gens(), 'Lex') # optional - macaulay2\n 'sage...[symbol x, MonomialSize=>16, MonomialOrder=>Lex]'\n \"\"\"\n if not isinstance(base_ring, str):\n base_ring = self(base_ring).name()\n\n varstr = str(vars)[1:-1].rstrip(',')\n r = re.compile(r\"(?<=,)|(?<=\\.\\.<)|(?<=\\.\\.)(?!<)\")\n varstr = \"symbol \" + r.sub(\"symbol \", varstr)\n return '%s[%s, MonomialSize=>16, MonomialOrder=>%s]' % (base_ring, varstr,\n order)\n\n\n@instancedoc\nclass Macaulay2Element(ExtraTabCompletion, ExpectElement):\n \"\"\"\n Instances of this class represent objects in Macaulay2.\n\n Using the method :meth:`sage` we can translate some of them to\n SageMath objects:\n\n .. automethod:: _sage_\n \"\"\"\n def _latex_(self):\n r\"\"\"\n EXAMPLES::\n\n sage: m = macaulay2('matrix {{1,2},{3,4}}') # optional - macaulay2\n sage: m # optional - macaulay2\n | 1 2 |\n | 3 4 |\n sage: latex(m) # optional - macaulay2\n \\begin{pmatrix}...1...2...3...4...\\end{pmatrix}\n \"\"\"\n s = self.tex().external_string().strip('\"').strip('$').replace('\\\\\\\\','\\\\')\n s = s.replace(r\"\\bgroup\",\"\").replace(r\"\\egroup\",\"\")\n return s\n\n def __iter__(self):\n \"\"\"\n EXAMPLES::\n\n sage: l = macaulay2([1,2,3]) # optional - macaulay2\n sage: list(iter(l)) # optional - macaulay2\n [1, 2, 3]\n \"\"\"\n for i in range(len(self)): # zero-indexed!\n yield self[i]\n\n def __str__(self):\n \"\"\"\n EXAMPLES::\n\n sage: R = macaulay2(\"QQ[x,y,z]/(x^3-y^3-z^3)\") # optional - macaulay2\n sage: x = macaulay2('x') # optional - macaulay2\n sage: y = macaulay2('y') # optional - macaulay2\n sage: str(x+y) # optional - macaulay2\n x + y\n sage: str(macaulay2(\"QQ[x,y,z]\")) # optional - macaulay2\n QQ[x...z]\n sage: str(macaulay2(\"QQ[x,y,z]/(x+y+z)\")) # optional - macaulay2\n QQ[x...z]\n -------...\n x + y + z\n \"\"\"\n P = self._check_valid()\n return P.get(self._name)\n\n def _repr_(self):\n \"\"\"\n EXAMPLES::\n\n sage: repr(macaulay2('1..25')) # optional - macaulay2\n (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,\n --------------------------------------------------------------------------------\n 23, 24, 25)\n sage: str(macaulay2('1..25')) # optional - macaulay2\n (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25)\n\n If ``AfterPrint`` is enabled, the ``repr`` contains type information,\n but the string representation does not::\n\n sage: macaulay2.options.after_print = True # optional - macaulay2\n sage: repr(macaulay2('1..25')) # optional - macaulay2\n (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,\n --------------------------------------------------------------------------------\n 23, 24, 25)\n <BLANKLINE>\n Sequence\n sage: str(macaulay2('1..25')) # optional - macaulay2\n (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25)\n sage: macaulay2.options.after_print = False # optional - macaulay2\n \"\"\"\n from sage.typeset.ascii_art import empty_ascii_art\n P = self.parent()\n if P.options.after_print:\n # In M2, the wrapped output is indented by the width of the prompt,\n # which we strip in Sage. We hardcode the width of the prompt to\n # 14=len('o1000000001 = '), which is tested in the doctests by the\n # output getting wrapped at 80 characters.\n width = 14 + empty_ascii_art._terminal_width()\n return P.eval('printWidth=%d;%s' % (width, self._name))\n # Otherwise manually wrap the net representation which does not display\n # AfterPrint text\n return P.eval('print(wrap(%d,\"-\",net %s))'\n % (empty_ascii_art._terminal_width(), self._name),\n strip=False)\n\n def external_string(self):\n \"\"\"\n EXAMPLES::\n\n sage: R = macaulay2(\"QQ[symbol x, symbol y]\") # optional - macaulay2\n sage: R.external_string() # optional - macaulay2\n 'QQ(monoid[x..y, Degrees => {2:1}, Heft => {1}, MonomialOrder => VerticalList{MonomialSize => 32, GRevLex => {2:1}, Position => Up}, DegreeRank => 1])'\n \"\"\"\n P = self._check_valid()\n code = 'toExternalString(%s)'%self.name()\n X = P.eval(code, strip=True)\n\n if 'stdio:' in X:\n if 'to external string' in X:\n return P.eval('%s'%self.name())\n raise RuntimeError(\"Error evaluating Macaulay2 code.\\nIN:%s\\nOUT:%s\"%(code, X))\n\n s = multiple_replace({'\\r':'', '\\n':' '}, X)\n return s\n\n def name(self, new_name=None):\n \"\"\"\n Get or change the name of this Macaulay2 element.\n\n INPUT:\n\n - ``new_name`` -- string (default: ``None``). If ``None``, return the\n name of this element; else return a new object identical to ``self``\n whose name is ``new_name``.\n\n Note that this can overwrite existing variables in the system.\n\n EXAMPLES::\n\n sage: S = macaulay2(QQ['x,y']) # optional - macaulay2\n sage: S.name() # optional - macaulay2\n 'sage...'\n sage: R = S.name(\"R\") # optional - macaulay2\n sage: R.name() # optional - macaulay2\n 'R'\n sage: R.vars().cokernel().resolution() # optional - macaulay2\n 1 2 1\n R <-- R <-- R <-- 0\n <BLANKLINE>\n 0 1 2 3\n\n The name can also be given at definition::\n\n sage: A = macaulay2(ZZ['x,y,z'], name='A') # optional - macaulay2\n sage: A.name() # optional - macaulay2\n 'A'\n sage: A^1 # optional - macaulay2\n 1\n A\n \"\"\"\n if new_name is None:\n return self._name\n if not isinstance(new_name, str):\n raise TypeError(\"new_name must be a string\")\n\n P = self.parent()\n # First release self, so that new_name becomes the initial reference to\n # its value. This is needed to change the name of a PolynomialRing.\n # NOTE: This does not work if self._name is not the initial reference.\n cmd = \"\"\"(() -> (\n m := lookup(GlobalReleaseHook, class {0});\n if m =!= null then m(symbol {0}, {0});\n {1} = {0};\n ))()\"\"\".format(self._name, new_name)\n ans = P.eval(cmd)\n if ans.find(\"stdio:\") != -1:\n raise RuntimeError(\"Error evaluating Macaulay2 code.\\n\"\n \"IN:%s\\nOUT:%s\" % (cmd, ans))\n return P._object_class()(P, new_name, is_name=True)\n\n def __len__(self):\n \"\"\"\n EXAMPLES::\n\n sage: l = macaulay2([1,2,3]) # optional - macaulay2\n sage: len(l) # optional - macaulay2\n 3\n sage: type(_) # optional - macaulay2\n <... 'int'>\n \"\"\"\n self._check_valid()\n # we use str instead of repr to avoid wrapping\n return int(str(self.parent()(\"#%s\"%self.name())))\n\n def __getitem__(self, n):\n \"\"\"\n EXAMPLES::\n\n sage: l = macaulay2([1,2,3]) # optional - macaulay2\n sage: l[0] # optional - macaulay2\n 1\n \"\"\"\n self._check_valid()\n n = self.parent()(n)\n return self.parent().new('%s # %s'%(self.name(), n.name()))\n\n def __setitem__(self, index, value):\n \"\"\"\n EXAMPLES::\n\n sage: l = macaulay2.new_from(\"MutableList\", [1,2,3]) # optional - macaulay2\n sage: l[0] = 4 # optional - macaulay2\n sage: list(l) # optional - macaulay2\n [4, 2, 3]\n\n \"\"\"\n P = self.parent()\n index = P(index)\n value = P(value)\n res = P.eval(\"%s # %s = %s\"%(self.name(), index.name(), value.name()))\n if \"assignment attempted to element of immutable list\" in res:\n raise TypeError(\"item assignment not supported\")\n\n def __call__(self, x):\n \"\"\"\n EXAMPLES::\n\n sage: R = macaulay2(\"QQ[x, y]\") # optional - macaulay2\n sage: x,y = R.gens() # optional - macaulay2\n sage: I = macaulay2.ideal(x*y, x+y) # optional - macaulay2\n sage: gb = macaulay2.gb # optional - macaulay2\n sage: gb(I) # optional - macaulay2\n GroebnerBasis[status: done; S-pairs encountered up to degree 1]\n \"\"\"\n self._check_valid()\n P = self.parent()\n r = P(x)\n return P('%s %s'%(self.name(), r.name()))\n\n def __floordiv__(self, x):\n \"\"\"\n Quotient of division of self by other. This is denoted //.\n\n EXAMPLES::\n\n sage: R.<x,y> = GF(7)[]\n\n Now make the M2 version of R, so we can coerce elements of R to M2::\n\n sage: _ = macaulay2(R) # optional - macaulay2\n sage: h = macaulay2((x^3 + 2*y^2*x)^7); h # optional - macaulay2\n 21 7 14\n x + 2x y\n sage: h1 = macaulay2(x^2 + 2*y*x) # optional - macaulay2\n sage: h2 = macaulay2(x^3 + 2*y*x) # optional - macaulay2\n sage: u = h // [h1,h2] # optional - macaulay2\n sage: h == u[0]*h1 + u[1]*h2 + (h % [h1,h2]) # optional - macaulay2\n True\n \"\"\"\n if isinstance(x, (list, tuple)):\n y = self.parent(x)\n z = self.parent().new('%s // matrix{%s}'%(self.name(), y.name()))\n return list(z.entries().flatten())\n else:\n return self.parent().new('%s // %s'%(self.name(), x.name()))\n\n def __mod__(self, x):\n \"\"\"\n Remainder of division of self by other. This is denoted %.\n\n EXAMPLES::\n\n sage: R.<x,y> = GF(7)[]\n\n Now make the M2 version of R, so we can coerce elements of R to M2::\n\n sage: _ = macaulay2(R) # optional - macaulay2\n sage: h = macaulay2((x^3 + 2*y^2*x)^7); h # optional - macaulay2\n 21 7 14\n x + 2x y\n sage: h1 = macaulay2(x^2 + 2*y*x) # optional - macaulay2\n sage: h2 = macaulay2(x^3 + 2*y*x) # optional - macaulay2\n sage: h % [h1,h2] # optional - macaulay2\n -3x*y\n sage: u = h // [h1,h2] # optional - macaulay2\n sage: h == u[0]*h1 + u[1]*h2 + (h % [h1,h2]) # optional - macaulay2\n True\n \"\"\"\n if isinstance(x, (list, tuple)):\n y = self.parent(x)\n return self.parent().new('%s %% matrix{%s}'%(self.name(), y.name()))\n if not isinstance(x, Macaulay2Element):\n x = self.parent(x)\n return self.parent().new('%s %% %s'%(self.name(), x.name()))\n\n def __bool__(self):\n \"\"\"\n Return whether this Macaulay2 element is not ``False`` or not ``0``.\n\n EXAMPLES::\n\n sage: a = macaulay2(0) # optional - macaulay2\n sage: a == 0 # optional - macaulay2\n True\n sage: bool(a) # optional - macaulay2\n False\n\n TESTS:\n\n Check that :trac:`28705` is fixed::\n\n sage: t = macaulay2(True); t # optional - macaulay2\n true\n sage: bool(t) # optional - macaulay2\n True\n sage: bool(macaulay2('false')) # optional - macaulay2\n False\n sage: bool(macaulay2('\"a\"')) # optional - macaulay2\n True\n \"\"\"\n P = self.parent()\n return P.eval('{0}===false or {0}==0'.format(self._name)) != 'true'\n\n __nonzero__ = __bool__\n\n def sage_polystring(self):\n \"\"\"\n If this Macaulay2 element is a polynomial, return a string\n representation of this polynomial that is suitable for\n evaluation in Python. Thus ``*`` is used for multiplication\n and ``**`` for exponentiation. This function is primarily\n used internally.\n\n EXAMPLES::\n\n sage: R = macaulay2.ring('QQ','(x,y)') # optional - macaulay2\n sage: f = macaulay2('x^3 + 3*y^11 + 5') # optional - macaulay2\n sage: print(f) # optional - macaulay2\n 3 11\n x + 3y + 5\n sage: f.sage_polystring() # optional - macaulay2\n 'x**3+3*y**11+5'\n \"\"\"\n return self.external_string().replace('^','**')\n\n def structure_sheaf(self):\n \"\"\"\n EXAMPLES::\n\n sage: S = macaulay2('QQ[a..d]') # optional - macaulay2\n sage: R = S / macaulay2('a^3 + b^3 + c^3 + d^3') # optional - macaulay2\n sage: X = R.Proj().name('X') # optional - macaulay2\n sage: X.structure_sheaf() # optional - macaulay2\n doctest:...: DeprecationWarning: The function `structure_sheaf` is deprecated. Use `self.sheaf()` instead.\n See https://trac.sagemath.org/27848 for details.\n OO\n X\n sage: X.sheaf() # optional - macaulay2\n OO\n X\n \"\"\"\n from sage.misc.superseded import deprecation\n deprecation(27848, 'The function `structure_sheaf` is deprecated. Use `self.sheaf()` instead.')\n return self.parent()('OO_%s'%self.name())\n\n def substitute(self, *args, **kwds):\n \"\"\"\n Note that we have to override the substitute method so that we get\n the default one from Macaulay2 instead of the one provided by Element.\n\n EXAMPLES::\n\n sage: R = macaulay2(\"QQ[x]\") # optional - macaulay2\n sage: P = macaulay2(\"ZZ/7[symbol x]\") # optional - macaulay2\n sage: x, = R.gens() # optional - macaulay2\n sage: a = x^2 + 1 # optional - macaulay2\n sage: a = a.substitute(P) # optional - macaulay2\n sage: a.sage().parent() # optional - macaulay2\n Univariate Polynomial Ring in x over Finite Field of size 7\n\n \"\"\"\n return self.__getattr__(\"substitute\")(*args, **kwds)\n\n subs = substitute\n\n def _tab_completion(self):\n \"\"\"\n Return a list of tab completions for ``self``.\n\n Returns dynamically built sorted list of commands obtained using\n Macaulay2 \"methods\" command. All returned functions can take ``self``\n as their first argument\n\n Return type: list of strings\n\n TESTS::\n\n sage: a = macaulay2(\"QQ[x,y]\") # optional - macaulay2\n sage: traits = a._tab_completion() # optional - macaulay2\n sage: \"generators\" in traits # optional - macaulay2\n True\n\n The implementation of this function does not set or change global\n variables::\n\n sage: a.dictionary()._operator('#?', '\"r\"') # optional - macaulay2\n false\n \"\"\"\n # It is possible, that these are not all possible methods, but\n # there are still plenty and at least there are no definitely\n # wrong ones...\n r = self.parent().eval(\n \"\"\"(() -> (\n currentClass := class %s;\n total := {};\n while true do (\n -- Select methods with first argument of the given class\n r := select(methods currentClass, s -> s_1 === currentClass);\n -- Get their names as strings\n r = apply(r, s -> toString s_0);\n -- Keep only alpha-numeric ones\n r = select(r, s -> match(\"^[[:alnum:]]+$\", s));\n -- Add to existing ones\n total = total | select(r, s -> not any(total, e -> e == s));\n if parent currentClass === currentClass then break;\n currentClass = parent currentClass;\n );\n print toString total\n ))()\"\"\" % self.name())\n r = sorted(r[1:-1].split(\", \"))\n return r\n\n def cls(self):\n \"\"\"\n Since class is a keyword in Python, we have to use cls to call\n Macaulay2's class. In Macaulay2, class corresponds to Sage's\n notion of parent.\n\n EXAMPLES::\n\n sage: macaulay2(ZZ).cls() # optional - macaulay2\n Ring\n\n \"\"\"\n return self.parent()(\"class %s\"%self.name())\n\n def after_print_text(self):\n r\"\"\"\n Obtain type information for this Macaulay2 element.\n\n This is the text that is displayed using ``AfterPrint`` in a Macaulay2\n interpreter.\n\n Macaulay2 by default includes this information in the output.\n In Sage, this behavior can optionally be enabled by setting the option\n ``after_print`` in :class:`Macaulay2.options`.\n\n EXAMPLES::\n\n sage: B = macaulay2(matrix([[1, 2], [3, 6]])).kernel(); B # optional - macaulay2\n image | 2 |\n | -1 |\n sage: B.after_print_text() # optional - macaulay2\n 2\n ZZ-module, submodule of ZZ\n \"\"\"\n return self.parent().eval('(lookup({topLevelMode,AfterPrint},' +\n 'class {0}))({0})'.format(self._name))\n\n ##########################\n #Aliases for M2 operators#\n ##########################\n def dot(self, x):\n \"\"\"\n EXAMPLES::\n\n sage: d = macaulay2.new(\"MutableHashTable\") # optional - macaulay2\n sage: d[\"k\"] = 4 # optional - macaulay2\n sage: d.dot(\"k\") # optional - macaulay2\n 4\n \"\"\"\n parent = self.parent()\n x = parent(x)\n return parent(\"%s.%s\" % (self.name(), x))\n\n def _operator(self, opstr, x):\n \"\"\"\n Returns the infix binary operation specified by opstr applied\n to self and x.\n\n EXAMPLES::\n\n sage: a = macaulay2(\"3\") # optional - macaulay2\n sage: a._operator(\"+\", a) # optional - macaulay2\n 6\n sage: a._operator(\"*\", a) # optional - macaulay2\n 9\n \"\"\"\n parent = self.parent()\n x = parent(x)\n return parent(\"%s%s%s\"%(self.name(), opstr, x.name()))\n\n def sharp(self, x):\n \"\"\"\n EXAMPLES::\n\n sage: a = macaulay2([1,2,3]) # optional - macaulay2\n sage: a.sharp(0) # optional - macaulay2\n 1\n \"\"\"\n return self._operator(\"#\", x)\n\n def starstar(self, x):\n \"\"\"\n The binary operator ``**`` in Macaulay2 is usually used for tensor\n or Cartesian power.\n\n EXAMPLES::\n\n sage: a = macaulay2([1,2]).set() # optional - macaulay2\n sage: a.starstar(a) # optional - macaulay2\n set {(1, 1), (1, 2), (2, 1), (2, 2)}\n\n \"\"\"\n return self._operator(\"**\", x)\n\n def underscore(self, x):\n \"\"\"\n EXAMPLES::\n\n sage: a = macaulay2([1,2,3]) # optional - macaulay2\n sage: a.underscore(0) # optional - macaulay2\n 1\n \"\"\"\n return self._operator(\"_\", x)\n\n ####################\n #Conversion to Sage#\n ####################\n def _sage_(self):\n r\"\"\"\n EXAMPLES::\n\n sage: macaulay2(ZZ).sage() # optional - macaulay2, indirect doctest\n Integer Ring\n sage: macaulay2(QQ).sage() # optional - macaulay2\n Rational Field\n\n sage: macaulay2(2).sage() # optional - macaulay2\n 2\n sage: macaulay2(1/2).sage() # optional - macaulay2\n 1/2\n sage: macaulay2(2/1).sage() # optional - macaulay2\n 2\n sage: _.parent() # optional - macaulay2\n Rational Field\n sage: macaulay2([1,2,3]).sage() # optional - macaulay2\n [1, 2, 3]\n\n sage: m = matrix([[1,2],[3,4]])\n sage: macaulay2(m).sage() # optional - macaulay2\n [1 2]\n [3 4]\n\n sage: D = macaulay2('hashTable {4 => 1, 2 => 3}') # optional - macaulay2\n sage: D.pairs() # optional - macaulay2\n {(4, 1), (2, 3)}\n sage: D.sage() == {4: 1, 2: 3} # optional - macaulay2\n True\n\n sage: macaulay2(QQ['x,y']).sage() # optional - macaulay2\n Multivariate Polynomial Ring in x, y over Rational Field\n sage: macaulay2(QQ['x']).sage() # optional - macaulay2\n Univariate Polynomial Ring in x over Rational Field\n sage: macaulay2(GF(7)['x,y']).sage() # optional - macaulay2\n Multivariate Polynomial Ring in x, y over Finite Field of size 7\n\n sage: macaulay2(GF(7)).sage() # optional - macaulay2\n Finite Field of size 7\n sage: macaulay2(GF(49, 'a')).sage() # optional - macaulay2\n Finite Field in a of size 7^2\n\n sage: R.<x,y> = QQ[]\n sage: macaulay2(x^2+y^2+1).sage() # optional - macaulay2\n x^2 + y^2 + 1\n\n sage: R = macaulay2(\"QQ[x,y]\") # optional - macaulay2\n sage: I = macaulay2(\"ideal (x,y)\") # optional - macaulay2\n sage: I.sage() # optional - macaulay2\n Ideal (x, y) of Multivariate Polynomial Ring in x, y over Rational Field\n\n sage: macaulay2(\"x = symbol x\") # optional - macaulay2\n x\n sage: macaulay2(\"QQ[x_0..x_25]\").sage() # optional - macaulay2\n Multivariate Polynomial Ring in x_0, x_1,..., x_25 over Rational Field\n\n sage: S = ZZ['x,y'].quotient('x^2-y')\n sage: macaulay2(S).sage() == S # optional - macaulay2\n True\n sage: S = GF(101)['x,y'].quotient('x^2-y')\n sage: macaulay2(S).sage() == S # optional - macaulay2\n True\n\n sage: R = GF(13)['a,b']['c,d']\n sage: macaulay2(R).sage() == R # optional - macaulay2\n True\n sage: macaulay2('a^2 + c').sage() == R('a^2 + c') # optional - macaulay2\n True\n sage: macaulay2.substitute('a', R).sage().parent() is R # optional - macaulay2\n True\n\n sage: R = macaulay2(\"QQ^2\") # optional - macaulay2\n sage: R.sage() # optional - macaulay2\n Vector space of dimension 2 over Rational Field\n\n sage: macaulay2(\"vector {4_QQ, 2}\").sage() # optional - macaulay2\n (4, 2)\n sage: _.parent() # optional - macaulay2\n Vector space of dimension 2 over Rational Field\n\n sage: m = macaulay2('\"hello\"') # optional - macaulay2\n sage: m.sage() # optional - macaulay2\n 'hello'\n\n sage: gg = macaulay2.needsPackage('\"Graphs\"') # optional - macaulay2\n sage: g = macaulay2.barbellGraph(3) # optional - macaulay2\n sage: g.sage() # optional - macaulay2\n Graph on 6 vertices\n sage: g.sage().edges(labels=False) # optional - macaulay2\n [(0, 1), (0, 2), (1, 2), (2, 3), (3, 4), (3, 5), (4, 5)]\n\n sage: d = 'digraph ({{1,2},{2,1},{3,1}}, EntryMode => \"edges\")'\n sage: g = macaulay2(d) # optional - macaulay2\n sage: g.sage() # optional - macaulay2\n Digraph on 3 vertices\n sage: g.sage().edges(labels=False) # optional - macaulay2\n [(1, 2), (2, 1), (3, 1)]\n\n Chain complexes and maps of chain complexes can be converted::\n\n sage: R = ZZ['a,b,c']\n sage: C = macaulay2(ideal(R.gens())).resolution() # optional - macaulay2\n sage: ascii_art(C.sage()) # optional - macaulay2\n [-b 0 -c] [ c]\n [ a -c 0] [ a]\n [a b c] [ 0 b a] [-b]\n 0 <-- C_0 <-------- C_1 <----------- C_2 <----- C_3 <-- 0\n sage: F = C.dot('dd') # optional - macaulay2\n sage: G = F.sage() # optional - macaulay2\n sage: G.in_degree(2) # optional - macaulay2\n [-b 0 -c]\n [ a -c 0]\n [ 0 b a]\n sage: F.underscore(2).sage() == G.in_degree(2) # optional - macaulay2\n True\n sage: (F^2).sage() # optional - macaulay2\n Chain complex morphism:\n From: Chain complex with at most 4 nonzero terms over Multivariate Polynomial Ring in a, b, c over Integer Ring\n To: Chain complex with at most 4 nonzero terms over Multivariate Polynomial Ring in a, b, c over Integer Ring\n\n Quotient rings in Macaulay2 inherit variable names from the ambient\n ring, so we mimic this behaviour in Sage::\n\n sage: R = macaulay2(\"ZZ/7[x,y]\") # optional - macaulay2\n sage: I = macaulay2(\"ideal (x^3 - y^2)\") # optional - macaulay2\n sage: (R/I).gens() # optional - macaulay2\n {x, y}\n sage: (R/I).sage().gens() # optional - macaulay2\n (x, y)\n\n Elements of quotient rings::\n\n sage: x, y = (R/I).gens() # optional - macaulay2\n sage: f = ((x^3 + 2*y^2*x)^7).sage(); f # optional - macaulay2\n 2*x*y^18 + y^14\n sage: f.parent() # optional - macaulay2\n Quotient of Multivariate Polynomial Ring in x, y over Finite Field of size 7 by the ideal (x^3 - y^2)\n\n \"\"\"\n repr_str = str(self)\n cls_str = str(self.cls())\n cls_cls_str = str(self.cls().cls())\n\n if repr_str == \"ZZ\":\n from sage.rings.integer_ring import ZZ\n return ZZ\n elif repr_str == \"QQ\":\n from sage.rings.rational_field import QQ\n return QQ\n\n if cls_cls_str == \"Type\":\n if cls_str == \"List\":\n return [entry._sage_() for entry in self]\n elif cls_str == \"Matrix\":\n base_ring = self.ring()._sage_()\n return self._matrix_(base_ring)\n elif cls_str == 'HashTable':\n return {x._sage_(): y._sage_() for (x, y) in self.pairs()}\n elif cls_str == \"Ideal\":\n parent = self.ring()._sage_()\n gens = self.gens().entries().flatten()._sage_()\n return parent.ideal(*gens)\n elif cls_str == \"QuotientRing\":\n #Handle the ZZ/n case\n ambient = self.ambient()\n if ambient.external_string() == 'ZZ':\n from sage.rings.integer_ring import ZZ\n from sage.rings.finite_rings.finite_field_constructor import GF\n external_string = self.external_string()\n zz, n = external_string.split(\"/\")\n\n #Note that n must be prime since it is\n #coming from Macaulay 2\n return GF(ZZ(n))\n else:\n ambient_ring = ambient._sage_()\n ideal = self.ideal()._sage_()\n return ambient_ring.quotient(ideal, names=ambient_ring.variable_names())\n elif cls_str == \"PolynomialRing\":\n from sage.rings.all import PolynomialRing\n from sage.rings.polynomial.term_order import inv_macaulay2_name_mapping\n\n #Get the base ring\n base_ring = self.coefficientRing()._sage_()\n\n #Get a string list of generators\n gens = str(self.gens().toString())[1:-1]\n\n # Check that we are dealing with default degrees, i.e. 1's.\n if self.options().sharp(\"Degrees\").any(\"x -> x != {1}\")._sage_():\n raise ValueError(\"cannot convert Macaulay2 polynomial ring with non-default degrees to Sage\")\n #Handle the term order\n external_string = self.external_string()\n order = None\n if \"MonomialOrder\" not in external_string:\n order = \"degrevlex\"\n else:\n for order_name in inv_macaulay2_name_mapping:\n if order_name in external_string:\n order = inv_macaulay2_name_mapping[order_name]\n if len(gens) > 1 and order is None:\n raise ValueError(\"cannot convert Macaulay2's term order to a Sage term order\")\n\n return PolynomialRing(base_ring, order=order, names=gens)\n elif cls_str == \"GaloisField\":\n from sage.rings.integer_ring import ZZ\n from sage.rings.finite_rings.finite_field_constructor import GF\n gf, n = repr_str.split(\" \")\n n = ZZ(n)\n if n.is_prime():\n return GF(n)\n else:\n gen = str(self.gens())[1:-1]\n return GF(n, gen)\n elif cls_str == \"Boolean\":\n if repr_str == \"true\":\n return True\n elif repr_str == \"false\":\n return False\n elif cls_str == \"String\":\n return str(repr_str)\n elif cls_str == \"Module\":\n from sage.modules.all import FreeModule\n if self.isFreeModule()._sage_():\n ring = self.ring()._sage_()\n rank = self.rank()._sage_()\n return FreeModule(ring, rank)\n elif cls_str in (\"Graph\", \"Digraph\"):\n if cls_str == \"Graph\":\n from sage.graphs.graph import Graph\n graph_cls = Graph\n else:\n from sage.graphs.digraph import DiGraph\n graph_cls = DiGraph\n adj_mat = self.adjacencyMatrix().sage()\n g = graph_cls(adj_mat, format='adjacency_matrix')\n g.relabel(self.vertices())\n return g\n elif cls_str == \"ChainComplex\":\n from sage.homology.chain_complex import ChainComplex\n ring = self.ring()._sage_()\n dd = self.dot('dd')\n degree = dd.degree()._sage_()\n a = self.min()._sage_()\n b = self.max()._sage_()\n matrices = {i: dd.underscore(i)._matrix_(ring)\n for i in range(a, b+1)}\n return ChainComplex(matrices, degree=degree)\n elif cls_str == \"ChainComplexMap\":\n from sage.homology.chain_complex_morphism import ChainComplexMorphism\n ring = self.ring()._sage_()\n source = self.source()\n a = source.min()._sage_()\n b = source.max()._sage_()\n degree = self.degree()._sage_()\n matrices = {i: self.underscore(i)._matrix_(ring)\n for i in range(a, b+1)}\n C = source._sage_()\n # in Sage, chain complex morphisms are degree-preserving,\n # so we shift the degrees of the target\n D = self.target()._operator(' ', '[%s]' % degree)._sage_()\n return ChainComplexMorphism(matrices, C, D)\n else:\n #Handle the integers and rationals separately\n if cls_str == \"ZZ\":\n from sage.rings.integer_ring import ZZ\n return ZZ(repr_str)\n elif cls_str == \"QQ\":\n from sage.rings.rational_field import QQ\n repr_str = self.external_string()\n if \"/\" not in repr_str:\n repr_str = repr_str + \"/1\"\n return QQ(repr_str)\n\n m2_parent = self.cls()\n parent = m2_parent._sage_()\n\n if cls_cls_str in (\"PolynomialRing\", \"QuotientRing\"):\n return parent(self.external_string())\n elif cls_cls_str == \"Module\":\n entries = self.entries()._sage_()\n return parent._element_constructor_(entries)\n\n from sage.misc.sage_eval import sage_eval\n try:\n return sage_eval(repr_str)\n except Exception:\n raise NotImplementedError(\"cannot convert %s to a Sage object\"%repr_str)\n\n to_sage = deprecated_function_alias(27848, ExpectElement.sage)\n\n def _matrix_(self, R):\n r\"\"\"\n If ``self`` is a Macaulay2 matrix, return the corresponding Sage matrix\n over the Sage ring ``R``.\n\n INPUT:\n\n - ``R`` - ring to coerce into\n\n OUTPUT: matrix\n\n EXAMPLES::\n\n sage: A = macaulay2('matrix {{1,2},{3,4}}') # optional - macaulay2\n sage: matrix(QQ, A) # optional - macaulay2, indirect doctest\n [1 2]\n [3 4]\n\n TESTS:\n\n Check that degenerate matrix dimensions are preserved (:trac:`28591`)::\n\n sage: m = macaulay2('matrix {{},{}}') # optional - macaulay2\n sage: matrix(ZZ, m).dimensions() # optional - macaulay2\n (2, 0)\n sage: matrix(ZZ, m.transpose()).dimensions() # optional - macaulay2\n (0, 2)\n \"\"\"\n from sage.matrix.constructor import matrix\n m = matrix(R, self.entries()._sage_())\n if not m.nrows():\n return matrix(R, 0, self.numcols()._sage_())\n return m\n\n\n@instancedoc\nclass Macaulay2Function(ExpectFunction):\n \"\"\"\n TESTS::\n\n sage: gb = macaulay2.gb # optional - macaulay2\n sage: type(gb) # optional - macaulay2\n <class 'sage.interfaces.macaulay2.Macaulay2Function'>\n sage: gb._name # optional - macaulay2\n 'gb'\n \"\"\"\n\n def _instancedoc_(self):\n \"\"\"\n EXAMPLES::\n\n sage: print(macaulay2.load.__doc__) # optional - macaulay2\n nodetex,noreplace\n load...\n ****...\n ...\n * \"input\" -- read Macaulay2 commands and echo\n * \"notify\" -- whether to notify the user when a file is loaded...\n\n TESTS:\n\n Check that detex is disabled, so that the output does not get\n reformatted (:trac:`28565`)::\n\n sage: from sage.repl.interpreter import get_test_shell\n sage: shell = get_test_shell()\n sage: shell.run_cell('macaulay2.matrix?') # optional - macaulay2\n ...\n +----------------------------+\n |i1 : matrix{{1,2,3},{4,5,6}}|\n | |\n |o1 = | 1 2 3 | |\n | | 4 5 6 | |\n | |\n | 2 3 |\n |o1 : Matrix ZZ <--- ZZ |\n +----------------------------+\n ...\n \"\"\"\n r = self._parent.help(self._name)\n return AsciiArtString('nodetex,noreplace\\n' + r)\n\n def _sage_src_(self):\n \"\"\"\n EXAMPLES::\n\n sage: macaulay2.gb._sage_src_() # optional - macaulay2\n -- code for method: gb(Ideal)...\n -- code for method: gb(Matrix)...\n ...\n \"\"\"\n return self._parent.eval('code methods %s' % self._name)\n\n\n@instancedoc\nclass Macaulay2FunctionElement(FunctionElement):\n def _instancedoc_(self):\n \"\"\"\n TESTS:\n\n Since :trac:`28565`, the help output includes all documentation nodes\n that can take ``self._obj`` as first argument. This also checks that\n detex is disabled, so that the output does not get reformatted. ::\n\n sage: from sage.repl.interpreter import get_test_shell\n sage: shell = get_test_shell()\n sage: shell.run_cell('I = macaulay2(\"ideal {4}\")') # optional - macaulay2\n sage: shell.run_cell('I.resolution?') # optional - macaulay2\n Signature:...\n Docstring:\n resolution -- projective resolution\n ****...\n <BLANKLINE>\n resolution(Ideal) -- compute a projective resolution of...\n ****...\n | 1 4 6 4 1 |\n |o3 = R <-- R <-- R <-- R <-- R <-- 0|\n | |\n | 0 1 2 3 4 5|\n ...\n \"\"\"\n P = self._obj.parent()\n r = P.eval('help prepend({0}, select(methods {0}, m->'\n 'instance({1}, m#1)))'.format(self._name, self._obj._name))\n end = r.rfind(\"\\n\\nDIV\")\n if end != -1:\n r = r[:end]\n return AsciiArtString('nodetex,noreplace\\n' + r)\n\n def _sage_src_(self):\n \"\"\"\n EXAMPLES::\n\n sage: m = macaulay2('matrix {{4,6}}') # optional - macaulay2\n sage: m.resolution._sage_src_() # optional - macaulay2\n -- code for method: resolution(Matrix)...\n \"\"\"\n return self._obj.parent().eval(\n 'code select(methods %s, m->instance(%s, m#1))'\n % (self._name, self._obj._name))\n\n\ndef is_Macaulay2Element(x):\n \"\"\"\n EXAMPLES::\n\n sage: from sage.interfaces.macaulay2 import is_Macaulay2Element\n sage: is_Macaulay2Element(2) # optional - macaulay2\n False\n sage: is_Macaulay2Element(macaulay2(2)) # optional - macaulay2\n True\n \"\"\"\n return isinstance(x, Macaulay2Element)\n\n# An instance\nmacaulay2 = Macaulay2()\n\n\ndef macaulay2_console():\n \"\"\"\n Spawn a new M2 command-line session.\n\n EXAMPLES::\n\n sage: macaulay2_console() # not tested\n Macaulay 2, version 1.1\n with packages: Classic, Core, Elimination, IntegralClosure, LLLBases, Parsing, PrimaryDecomposition, SchurRings, TangentCone\n ...\n\n \"\"\"\n from sage.repl.rich_output.display_manager import get_display_manager\n if not get_display_manager().is_in_terminal():\n raise RuntimeError('Can use the console only in the terminal. Try %%macaulay2 magics instead.')\n os.system('M2')\n\n\n\ndef reduce_load_macaulay2():\n \"\"\"\n Used for reconstructing a copy of the Macaulay2 interpreter from a pickle.\n\n EXAMPLES::\n\n sage: from sage.interfaces.macaulay2 import reduce_load_macaulay2\n sage: reduce_load_macaulay2()\n Macaulay2\n \"\"\"\n return macaulay2\n\n", "id": "1481530", "language": "Python", "matching_score": 2.8692867755889893, "max_stars_count": 10, "path": "src/sage/interfaces/macaulay2.py" }, { "content": "## -*- encoding: utf-8 -*-\n\"\"\"\nThis file (./mpoly_doctest.sage) was *autogenerated* from ./mpoly.tex,\nwith sagetex.sty version 2011/05/27 v2.3.1.\nIt contains the contents of all the sageexample environments from this file.\nYou should be able to doctest this file with:\nsage -t ./mpoly_doctest.sage\nIt is always safe to delete this file; it is not used in typesetting your\ndocument.\n\nSage example in ./mpoly.tex, line 65::\n\n sage: R = PolynomialRing(QQ, 'x,y,z')\n sage: x,y,z = R.gens() # gives the tuples of indeterminates\n\nSage example in ./mpoly.tex, line 78::\n\n sage: R = PolynomialRing(QQ, 'x', 10)\n\nSage example in ./mpoly.tex, line 84::\n\n sage: x = R.gens()\n sage: sum(x[i] for i in range(5))\n x0 + x1 + x2 + x3 + x4\n\nSage example in ./mpoly.tex, line 96::\n\n sage: def test_poly(ring, deg=3):\n ....: monomials = Subsets(\n ....: flatten([(x,)*deg for x in (1,) + ring.gens()]),\n ....: deg, submultiset=True)\n ....: return add(mul(m) for m in monomials)\n\nSage example in ./mpoly.tex, line 103::\n\n sage: test_poly(QQ['x,y']) # py2\n x^3 + x^2*y + x*y^2 + y^3 + x^2 + x*y + y^2 + x + y + 1\n sage: test_poly(QQ['y,x']) # py2\n y^3 + y^2*x + y*x^2 + x^3 + y^2 + y*x + x^2 + y + x + 1\n sage: test_poly(QQ['x,y']) == test_poly(QQ['y,x']) # py2\n True\n\nSage example in ./mpoly.tex, line 127::\n\n sage: test_poly(PolynomialRing(QQ, 'x,y', order='deglex')) # py2\n x^3 + x^2*y + x*y^2 + y^3 + x^2 + x*y + y^2 + x + y + 1\n\nSage example in ./mpoly.tex, line 230::\n\n sage: R.<x,y> = InfinitePolynomialRing(ZZ, order='lex')\n sage: p = mul(x[k] - y[k] for k in range(2)); p\n x_1*x_0 - x_1*y_0 - x_0*y_1 + y_1*y_0\n sage: p + x[100]\n x_100 + x_1*x_0 - x_1*y_0 - x_0*y_1 + y_1*y_0\n\nSage example in ./mpoly.tex, line 343::\n\n sage: R.<x,y,z> = QQ[]\n sage: p = 7*y^2*x^2 + 3*y*x^2 + 2*y*z + x^3 + 6\n sage: p.lt()\n 7*x^2*y^2\n\nSage example in ./mpoly.tex, line 358::\n\n sage: p[x^2*y] == p[(2,1,0)] == p[2,1,0] == 3\n True\n\nSage example in ./mpoly.tex, line 366::\n\n sage: p(0, 3, -1)\n 0\n sage: p.subs(x = 1, z = x^2+1)\n 2*x^2*y + 7*y^2 + 5*y + 7\n\nSage example in ./mpoly.tex, line 381::\n\n sage: print(\"total={d} (in x)={dx} partial={ds}\"\\\n ....: .format(d=p.degree(), dx=p.degree(x), ds=p.degrees()))\n total=4 (in x)=3 partial=(3, 2, 1)\n\nSage example in ./mpoly.tex, line 441::\n\n sage: R.<x,y> = QQ[]; p = x^2 + y^2; q = x + y\n sage: print(\"({quo})*({q}) + ({rem}) == {p}\".format( \\\n ....: quo=p//q, q=q, rem=p%q, p=p//q*q+p%q))\n (-x + y)*(x + y) + (2*x^2) == x^2 + y^2\n sage: p.mod(q) # is NOT equivalent to p%q\n 2*y^2\n\nSage example in ./mpoly.tex, line 459::\n\n sage: R.<x,y> = QQ[exp(2*I*pi/5)][]\n sage: (x^10 + y^5).gcd(x^4 - y^2)\n x^2 + y\n sage: (x^10 + y^5).factor()\n (x^2 + y) * (x^2 + (a^3)*y) * (x^2 + (a^2)*y) * (x^2 + a*y) * (x^2 + (-a^3 - a^2 - a - 1)*y)\n\nSage example in ./mpoly.tex, line 564::\n\n sage: R.<x,y,z> = QQ[]\n sage: J = R.ideal(x^2 * y * z - 18,\n ....: x * y^3 * z - 24,\n ....: x * y * z^4 - 6)\n\nSage example in ./mpoly.tex, line 575::\n\n sage: J.dimension()\n 0\n\nSage example in ./mpoly.tex, line 584::\n\n sage: J.variety() # py2\n [{y: 2, z: 1, x: 3}]\n sage: J.variety() # py3\n [{z: 1, y: 2, x: 3}]\n\nSage example in ./mpoly.tex, line 596::\n\n sage: V = J.variety(QQbar)\n sage: len(V)\n 17\n\nSage example in ./mpoly.tex, line 603::\n\n sage: sorted(V, key=str)[-3:]\n [{z: 0.9324722294043558? + 0.3612416661871530?*I,\n y: -1.700434271459229? - 1.052864325754712?*I,\n x: 1.337215067329615? + 2.685489874065187?*I},\n {z: 0.9324722294043558? - 0.3612416661871530?*I,\n y: -1.700434271459229? + 1.052864325754712?*I,\n x: 1.337215067329615? - 2.685489874065187?*I},\n {z: 1, y: 2, x: 3}]\n\nSage example in ./mpoly.tex, line 619::\n\n sage: (xx, yy, zz) = QQbar['x,y,z'].gens()\n sage: sorted([pt[xx].degree() for pt in V])\n [1, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,\n 16, 16]\n\nSage example in ./mpoly.tex, line 640::\n\n sage: Set(tuple(abs(pt[i]) for i in (xx,yy,zz)) for pt in V)\n {(3, 2, 1)}\n\nSage example in ./mpoly.tex, line 662::\n\n sage: w = QQbar.zeta(17); w # primitive root of 1\n 0.9324722294043558? + 0.3612416661871530?*I\n sage: Set(pt[zz] for pt in V) == Set(w^i for i in range(17))\n True\n\nSage example in ./mpoly.tex, line 690::\n\n sage: set(pt[zz].minpoly() for pt in sorted(V, key=str)[:-1])\n {x^16 + x^15 + x^14 + x^13 + x^12 + x^11 + x^10 + x^9 + x^8 + x^7 + x^6 + x^5 + x^4 + x^3 + x^2 + x + 1}\n\nSage example in ./mpoly.tex, line 706::\n\n sage: def polar_form(z):\n ....: rho = z.abs(); rho.simplify()\n ....: theta = 2 * pi * z.rational_argument()\n ....: return (SR(rho) * exp(I*theta))\n sage: sorted([tuple(polar_form(pt[i]) for i in [xx,yy,zz])\n ....: for pt in sorted(V, key=str)[-3:]])\n [(3*e^(6/17*I*pi), 2*e^(-14/17*I*pi), e^(2/17*I*pi)),\n (3*e^(-6/17*I*pi), 2*e^(14/17*I*pi), e^(-2/17*I*pi)),\n (3, 2, 1)]\n\nSage example in ./mpoly.tex, line 733::\n\n sage: J.triangular_decomposition()\n [Ideal (z^17 - 1, y - 2*z^10, x - 3*z^3) of Multivariate\n Polynomial Ring in x, y, z over Rational Field]\n sage: J.transformed_basis()\n [z^17 - 1, -2*z^10 + y, -3/4*y^2 + x]\n\nSage example in ./mpoly.tex, line 909::\n\n sage: R.<x,y> = QQ[]\n sage: J = R.ideal(x^2 + y^2 - 1, 16*x^2*y^2 - 1)\n\nSage example in ./mpoly.tex, line 917::\n\n sage: ybar2 = R.quo(J)(y^2)\n sage: [ybar2^i for i in range(3)]\n [1, ybar^2, ybar^2 - 1/16]\n sage: ((ybar2 + 1)^2).lift()\n 3*y^2 + 15/16\n\nSage example in ./mpoly.tex, line 958::\n\n sage: u = (16*y^4 - 16*y^2 + 1).lift(J); u\n [16*y^2, -1]\n sage: u[0]*J.0 + u[1]*J.1\n 16*y^4 - 16*y^2 + 1\n\nSage example in ./mpoly.tex, line 968::\n\n sage: (y^4).mod(J)\n y^2 - 1/16\n\nSage example in ./mpoly.tex, line 978::\n\n sage: (y^4).reduce([x^2 + y^2 - 1, 16*x^2*y^2 - 1])\n y^4\n\nSage example in ./mpoly.tex, line 1047::\n\n sage: 1 in ideal(x^2+y^2-1, (x-4)^2+y^2-1)\n False\n\nSage example in ./mpoly.tex, line 1056::\n\n sage: R(1).lift(ideal(x^2+y^2-1, (x-4)^2+y^2-1, x-y))\n [-1/28*y + 1/14, 1/28*y + 1/14, -1/7*x + 1/7*y + 4/7]\n\nSage example in ./mpoly.tex, line 1079::\n\n sage: J1 = (x^2 + y^2 - 1, 16*x^2*y^2 - 1)*R\n sage: J2 = (x^2 + y^2 - 1, 4*x^2*y^2 - 1)*R\n sage: J1.radical() == J1\n True\n sage: J2.radical()\n Ideal (2*y^2 - 1, 2*x^2 - 1) of Multivariate Polynomial\n Ring in x, y over Rational Field\n sage: 2*y^2 - 1 in J2\n False\n\nSage example in ./mpoly.tex, line 1122::\n\n sage: C = ideal(x^2 + y^2 - 1); H = ideal(16*x^2*y^2 - 1)\n sage: C + H == J1\n True\n\nSage example in ./mpoly.tex, line 1151::\n\n sage: CH = C.intersection(H).quotient(ideal(4*x*y-1)); CH\n Ideal (4*x^3*y + 4*x*y^3 + x^2 - 4*x*y + y^2 - 1) of\n Multivariate Polynomial Ring in x, y over Rational Field\n sage: CH.gen(0).factor()\n (4*x*y + 1) * (x^2 + y^2 - 1)\n\nSage example in ./mpoly.tex, line 1161::\n\n sage: H.quotient(C) == H\n True\n\nSage example in ./mpoly.tex, line 1184::\n\n sage: [J.dimension() for J in [J1, J2, C, H, H*J2, J1+J2]]\n [0, 0, 1, 1, 1, -1]\n\nSage example in ./mpoly.tex, line 1285::\n\n sage: R.<x,y,z> = QQ[]\n sage: J = ideal(2*x+y-2*z, 2*x+2*y+z-1)\n sage: J.elimination_ideal(x)\n Ideal (y + 3*z - 1) of Multivariate Polynomial Ring in x, y, z\n over Rational Field\n sage: J.elimination_ideal([x,y])\n Ideal (0) of Multivariate Polynomial Ring in x, y, z over Rational Field\n\nSage example in ./mpoly.tex, line 1312::\n\n sage: R.<x,y> = QQ[]\n sage: J1 = ideal(x^2 + y^2 - 1, 16*x^2*y^2 - 1)\n\nSage example in ./mpoly.tex, line 1328::\n\n sage: g = J1.elimination_ideal(y).gens(); g\n [16*x^4 - 16*x^2 + 1]\n sage: SR(g[0]).solve(SR(x)) # solves by radicals\n [x == -1/2*sqrt(sqrt(3) + 2), x == 1/2*sqrt(sqrt(3) + 2),\n x == -1/2*sqrt(-sqrt(3) + 2), x == 1/2*sqrt(-sqrt(3) + 2)]\n\nSage example in ./mpoly.tex, line 1351::\n\n sage: C.elimination_ideal(y).gens()\n [0]\n sage: H.elimination_ideal(y).gens()\n [0]\n\nSage example in ./mpoly.tex, line 1431::\n\n sage: R.<x,y,t> = QQ[]\n sage: Param = R.ideal((1-t^2)-(1+t^2)*x, 2*t-(1+t^2)*y)\n\nSage example in ./mpoly.tex, line 1437::\n\n sage: Param.elimination_ideal(t).gens()\n [x^2 + y^2 - 1]\n\nSage example in ./mpoly.tex, line 1469::\n\n sage: R.<x,y,t> = QQ[]\n sage: eq = x^2 + (y-t)^2 - 1/2*(t^2+1)\n sage: fig = add((eq(t=k/5)*QQ[x,y]).plot() for k in (-15..15))\n sage: fig.show(aspect_ratio=1, xmin=-2, xmax=2, ymin=-3, ymax=3)\n\nSage example in ./mpoly.tex, line 1494::\n\n sage: env = ideal(eq, eq.derivative(t)).elimination_ideal(t)\n sage: env.gens()\n [2*x^2 - 2*y^2 - 1]\n\nSage example in ./mpoly.tex, line 1502::\n\n sage: env.change_ring(QQ[x,y]).plot((x,-2,2),(y,-3,3))\n Graphics object consisting of 1 graphics primitive\n\nSage example in ./mpoly.tex, line 1538::\n\n sage: R.<x,y,t> = QQ[]\n sage: J = (y-t*x, y-t*(1-x))*R\n sage: (x^2+y^2) - ((1-x)^2+y^2) in J\n False\n\nSage example in ./mpoly.tex, line 1553::\n\n sage: R.<x,y,t,u> = QQ[]\n sage: J = (y-t*x, y-t*(1-x), t*u-1)*R\n sage: (x^2+y^2) - ((1-x)^2+y^2) in J\n True\n\nSage example in ./mpoly.tex, line 1615::\n\n sage: R.<x,y,t> = QQ[]\n\nSage example in ./mpoly.tex, line 1618::\n\n sage: eq.derivative(t).resultant(eq, t)\n x^2 - y^2 - 1/2\n\nSage example in ./mpoly.tex, line 1636::\n\n sage: R.<x,y> = QQ[]\n sage: p = y^2 - x; q = y^2 + x\n sage: p.resultant(q, y)\n 4*x^2\n sage: ideal(p, q).elimination_ideal(y)\n Ideal (x) of Multivariate Polynomial Ring in x, y over Rational Field\n\nSage example in ./mpoly.tex, line 1665::\n\n sage: R.<x,y> = QQ[]\n sage: ((x^2 + y^2)*(x^2 + y^2 + 1)*R).dimension()\n 1\n\nSage example in ./mpoly.tex, line 1690::\n\n sage: R.<x,y> = QQ[]\n sage: J1 = (x^2 + y^2 - 1, 16*x^2*y^2 - 1)*R\n sage: J1.variety()\n []\n\nSage example in ./mpoly.tex, line 1705::\n\n sage: J1.variety(QQbar)[0:2]\n [{y: -0.9659258262890683?, x: -0.2588190451025208?},\n {y: -0.9659258262890683?, x: 0.2588190451025208?}]\n\nSage example in ./mpoly.tex, line 1759::\n\n sage: R.<x,y> = PolynomialRing(QQ, order='lex')\n sage: C = ideal(x^2+y^2-1)\n sage: D = ideal((x+y-1)*(x+y+1))\n sage: J = C + D\n\nSage example in ./mpoly.tex, line 1786::\n\n sage: J.triangular_decomposition()\n [Ideal (y, x^2 - 1) of Multivariate Polynomial Ring in x, y\n over Rational Field,\n Ideal (y^2 - 1, x) of Multivariate Polynomial Ring in x, y\n over Rational Field]\n\nSage example in ./mpoly.tex, line 1840::\n\n sage: D = ideal((x+2*y-1)*(x+2*y+1)); J = C + D\n sage: J.variety()\n [{y: 0, x: 1}, {y: 0, x: -1}, {y: 4/5, x: -3/5}, {y: -4/5, x: 3/5}]\n sage: [T.gens() for T in J.triangular_decomposition()]\n [[y, x^2 - 1], [25*y^2 - 16, 4*x + 3*y]]\n\nSage example in ./mpoly.tex, line 1855::\n\n sage: Jy = J.elimination_ideal(x); Jy.gens()\n [25*y^3 - 16*y]\n\nSage example in ./mpoly.tex, line 1863::\n\n sage: ys = QQ['y'](Jy.0).roots(); ys\n [(4/5, 1), (0, 1), (-4/5, 1)]\n sage: QQ['x'](J.1(y=ys[0][0])).roots()\n [(-3/5, 1), (-13/5, 1)]\n\nSage example in ./mpoly.tex, line 1882::\n\n sage: ys = CDF['y'](Jy.0).roots(); ys # abs tol 2e-15\n [(-0.8, 1), (0.0, 1), (0.8, 1)]\n sage: [CDF['x'](p(y=ys[0][0])).roots() for p in J.gens()] # abs tol 2e-15\n [[(-0.5999999999999999, 1), (0.6000000000000001, 1)], [(0.6000000000000001, 1), (2.600000000000001, 1)]]\n\nSage example in ./mpoly.tex, line 1911::\n\n sage: R.<x,y> = QQ[]; J = ideal([ x^7-(100*x-1)^2, y-x^7+1 ])\n sage: J.variety(RealField(51)) # random\n [{y: 396340.890166545, x: -14.1660266425312}]\n\nSage example in ./mpoly.tex, line 1923::\n\n sage: J.variety(AA) # py2\n [{x: 0.00999999900000035?, y: -0.999999999999990?},\n {x: 0.01000000100000035?, y: -0.999999999999990?},\n {x: 6.305568998641385?, y: 396340.8901665450?}]\n sage: J.variety(AA) # py3\n [{y: -0.999999999999990?, x: 0.00999999900000035?},\n {y: -0.999999999999990?, x: 0.01000000100000035?},\n {y: 396340.8901665450?, x: 6.305568998641385?}]\n\n\nSage example in ./mpoly.tex, line 1983::\n\n sage: len(J2.variety(QQbar)), J2.vector_space_dimension()\n (4, 8)\n\nSage example in ./mpoly.tex, line 1993::\n\n sage: J2.normal_basis()\n [x*y^3, y^3, x*y^2, y^2, x*y, y, x, 1]\n\nSage example in ./mpoly.tex, line 2187::\n\n sage: R.<x,y,z,t> = PolynomialRing(QQ, order='lex')\n\nSage example in ./mpoly.tex, line 2243::\n\n sage: ((x+y+z)^2).reduce([x-t, y-t^2, z^2-t])\n 2*z*t^2 + 2*z*t + t^4 + 2*t^3 + t^2 + t\n\nSage example in ./mpoly.tex, line 2298::\n\n sage: R.<x,y> = PolynomialRing(QQ, order='lex')\n sage: (g, h) = (x-y, x-y^2); p = x*y - x\n sage: p.reduce([g, h]) # two reductions by h\n y^3 - y^2\n sage: p.reduce([h, g]) # two reductions by g\n y^2 - y\n\nSage example in ./mpoly.tex, line 2311::\n\n sage: p - y*g + h\n 0\n\nSage example in ./mpoly.tex, line 2575::\n\n sage: R.<x,y> = PolynomialRing(QQ, order='lex')\n sage: R.ideal(x*y^4, x^2*y^3, x^4*y, x^5).basis_is_groebner()\n True\n\nSage example in ./mpoly.tex, line 2584::\n\n sage: R.ideal(x^2+y^2-1, 16*x^2*y^2-1).basis_is_groebner()\n False\n\nSage example in ./mpoly.tex, line 2610::\n\n sage: R.ideal(x^2+y^2-1, 16*x^2*y^2-1).groebner_basis()\n [x^2 + y^2 - 1, y^4 - y^2 + 1/16]\n\nSage example in ./mpoly.tex, line 2618::\n\n sage: R.ideal(16*x^2*y^2-1).groebner_basis()\n [x^2*y^2 - 1/16]\n\nSage example in ./mpoly.tex, line 2626::\n\n sage: R.ideal(x^2+y^2-1, (x+y)^2-1).groebner_basis()\n [x^2 + y^2 - 1, x*y, y^3 - y]\n\nSage example in ./mpoly.tex, line 2636::\n\n sage: R_lex.<x,y> = PolynomialRing(QQ, order='lex')\n sage: J_lex = (x*y+x+y^2+1, x^2*y+x*y^2+1)*R_lex; J_lex.gens()\n [x*y + x + y^2 + 1, x^2*y + x*y^2 + 1]\n sage: J_lex.groebner_basis()\n [x - 1/2*y^3 + y^2 + 3/2, y^4 - y^3 - 3*y - 1]\n\nSage example in ./mpoly.tex, line 2644::\n\n sage: R_invlex = PolynomialRing(QQ, 'x,y', order='invlex')\n sage: J_invlex = J_lex.change_ring(R_invlex); J_invlex.gens()\n [y^2 + x*y + x + 1, x*y^2 + x^2*y + 1]\n sage: J_invlex.groebner_basis()\n [y^2 + x*y + x + 1, x^2 + x - 1]\n\nSage example in ./mpoly.tex, line 2651::\n\n sage: R_drl = PolynomialRing(QQ, 'x,y', order='degrevlex')\n sage: J_drl = J_lex.change_ring(R_drl); J_drl.gens()\n [x*y + y^2 + x + 1, x^2*y + x*y^2 + 1]\n sage: J_drl.groebner_basis()\n [y^3 - 2*y^2 - 2*x - 3, x^2 + x - 1, x*y + y^2 + x + 1]\n\nSage example in ./mpoly.tex, line 2719::\n\n sage: p = (x + y)^5\n sage: J_lex.reduce(p)\n 17/2*y^3 - 12*y^2 + 4*y - 49/2\n\nSage example in ./mpoly.tex, line 2726::\n\n sage: p.reduce(J_lex.groebner_basis())\n 17/2*y^3 - 12*y^2 + 4*y - 49/2\n\nSage example in ./mpoly.tex, line 2732::\n\n sage: R_lex.quo(J_lex)(p)\n 17/2*ybar^3 - 12*ybar^2 + 4*ybar - 49/2\n\nSage example in ./mpoly.tex, line 2738::\n\n sage: R_drl.quo(J_drl)(p)\n 5*ybar^2 + 17*xbar + 4*ybar + 1\n\nSage example in ./mpoly.tex, line 2751::\n\n sage: J_lex.normal_basis()\n [y^3, y^2, y, 1]\n sage: J_invlex.normal_basis()\n [x*y, y, x, 1]\n sage: J_drl.normal_basis()\n [y^2, y, x, 1]\n\nSage example in ./mpoly.tex, line 2775::\n\n sage: ideal(16*x^2*y^2-1).dimension()\n 1\n\nSage example in ./mpoly.tex, line 2851::\n\n sage: R.<t,x,y,z> = PolynomialRing(QQ, order='lex')\n sage: J = R.ideal(t+x+y+z-1, t^2-x^2-y^2-z^2-1, t-x*y)\n sage: [u.polynomial(u.variable(0)) for u in J.groebner_basis()]\n [t + x + y + z - 1,\n (y + 1)*x + y + z - 1,\n (z - 2)*x + y*z - 2*y - 2*z + 1,\n (z - 2)*y^2 + (-2*z + 1)*y - z^2 + z - 1]\n\nSage example in ./mpoly.tex, line 2970::\n\n sage: from sage.rings.ideal import Cyclic\n sage: Cyclic(QQ['x,y,z'])\n Ideal (x + y + z, x*y + x*z + y*z, x*y*z - 1) of\n Multivariate Polynomial Ring in x, y, z over Rational Field\n\nSage example in ./mpoly.tex, line 2980::\n\n sage: def C(R, n): return Cyclic(PolynomialRing(R, 'x', n))\n\nSage example in ./mpoly.tex, line 3010::\n\n sage: p = previous_prime(2^30)\n sage: len(C(GF(p), 6).groebner_basis())\n 45\n\n\"\"\"\n\n", "id": "5751432", "language": "Python", "matching_score": 1.2258278131484985, "max_stars_count": 1742, "path": "src/sage/tests/books/computational-mathematics-with-sagemath/mpoly_doctest.py" }, { "content": "# -*- encoding: utf-8 -*-\n\"\"\"\nParsing docstrings\n\nThis module contains functions and classes that parse docstrings.\n\nAUTHORS:\n\n- <NAME> (2012-03-27) -- initial version, based on <NAME>'s code.\n\n- <NAME>(2014-08-28) -- much improved handling of tolerances\n using interval arithmetic (:trac:`16889`).\n\"\"\"\n\n# ****************************************************************************\n# Copyright (C) 2012 <NAME> <<EMAIL>>\n# <NAME> <<EMAIL>>\n# <NAME> <<EMAIL>>\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n# as published by the Free Software Foundation; either version 2 of\n# the License, or (at your option) any later version.\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nimport re\nimport doctest\nfrom collections import defaultdict\nfrom sage.repl.preparse import preparse, strip_string_literals\nfrom Cython.Build.Dependencies import strip_string_literals as cython_strip_string_literals\nfrom functools import reduce\n\n\nfrom .external import available_software\n\nfloat_regex = re.compile(r'\\s*([+-]?\\s*((\\d*\\.?\\d+)|(\\d+\\.?))([eE][+-]?\\d+)?)')\noptional_regex = re.compile(r'(arb216|arb218|py2|py3|long time|not implemented|not tested|known bug)|([^ a-z]\\s*optional\\s*[:-]*((\\s|\\w|[.])*))')\n# Version 4.65 of glpk prints the warning \"Long-step dual simplex will\n# be used\" frequently. When Sage uses a system installation of glpk\n# which has not been patched, we need to ignore that message.\n# See :trac:`29317`.\nglpk_simplex_warning_regex = re.compile(r'(Long-step dual simplex will be used)')\n# :trac:`31204` -- suppress warning about ld and OS version for dylib files.\nld_warning_regex = re.compile(r'.*dylib.*was built for newer macOS version.*than being linked.*')\nfind_sage_prompt = re.compile(r\"^(\\s*)sage: \", re.M)\nfind_sage_continuation = re.compile(r\"^(\\s*)\\.\\.\\.\\.:\", re.M)\nfind_python_continuation = re.compile(r\"^(\\s*)\\.\\.\\.([^\\.])\", re.M)\npython_prompt = re.compile(r\"^(\\s*)>>>\", re.M)\n# The following are used to allow ... at the beginning of output\nellipsis_tag = \"<TEMP_ELLIPSIS_TAG>\"\ncontinuation_tag = \"<TEMP_CONTINUATION_TAG>\"\nrandom_marker = re.compile('.*random', re.I)\ntolerance_pattern = re.compile(r'\\b((?:abs(?:olute)?)|(?:rel(?:ative)?))? *?tol(?:erance)?\\b( +[0-9.e+-]+)?')\nbackslash_replacer = re.compile(r\"\"\"(\\s*)sage:(.*)\\\\\\ *\n\\ *(((\\.){4}:)|((\\.){3}))?\\ *\"\"\")\n\n_RIFtol = None\n\ndef RIFtol(*args):\n \"\"\"\n Create an element of the real interval field used for doctest tolerances.\n\n It allows large numbers like 1e1000, it parses strings with spaces\n like ``RIF(\" - 1 \")`` out of the box and it carries a lot of\n precision. The latter is useful for testing libraries using\n arbitrary precision but not guaranteed rounding such as PARI. We use\n 1044 bits of precision, which should be good to deal with tolerances\n on numbers computed with 1024 bits of precision.\n\n The interval approach also means that we do not need to worry about\n rounding errors and it is also very natural to see a number with\n tolerance as an interval.\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import RIFtol\n sage: RIFtol(-1, 1)\n 0.?\n sage: RIFtol(\" - 1 \")\n -1\n sage: RIFtol(\"1e1000\")\n 1.00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000?e1000\n \"\"\"\n global _RIFtol\n if _RIFtol is None:\n try:\n # We need to import from sage.all to avoid circular imports.\n from sage.all import RealIntervalField\n except ImportError:\n from warnings import warn\n warn(\"RealIntervalField not available, ignoring all tolerance specifications in doctests\")\n def fake_RIFtol(*args):\n return 0\n _RIFtol = fake_RIFtol\n else:\n _RIFtol = RealIntervalField(1044)\n return _RIFtol(*args)\n\n# This is the correct pattern to match ISO/IEC 6429 ANSI escape sequences:\n#\nansi_escape_sequence = re.compile(r'(\\x1b[@-Z\\\\-~]|\\x1b\\[.*?[@-~]|\\x9b.*?[@-~])')\n\n\ndef remove_unicode_u(string):\n \"\"\"\n Given a string, try to remove all unicode u prefixes inside.\n\n This will help to keep the same doctest results in Python2 and Python3.\n The input string is typically the documentation of a method or function.\n This string may contain some letters u that are unicode python2 prefixes.\n The aim is to remove all of these u and only them.\n\n INPUT:\n\n - ``string`` -- either ``unicode`` or ``bytes`` (if ``bytes``, it\n will be converted to ``unicode`` assuming UTF-8)\n\n OUTPUT: ``unicode`` string\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import remove_unicode_u as remu\n sage: remu(\"u'you'\")\n u\"'you'\"\n sage: remu('u')\n u'u'\n sage: remu(\"[u'am', 'stram', u'gram']\")\n u\"['am', 'stram', 'gram']\"\n sage: remu('[u\"am\", \"stram\", u\"gram\"]')\n u'[\"am\", \"stram\", \"gram\"]'\n\n This deals correctly with nested quotes::\n\n sage: str = '''[u\"Singular's stuff\", u'good']'''\n sage: print(remu(str))\n [\"Singular's stuff\", 'good']\n\n TESTS:\n\n This supports python2 str type as input::\n\n sage: euro = \"'€'\"\n sage: print(remu(euro))\n '€'\n \"\"\"\n stripped, replacements = cython_strip_string_literals(string,\n \"__remove_unicode_u\")\n string = stripped.replace('u\"', '\"').replace(\"u'\", \"'\")\n for magic, literal in replacements.items():\n string = string.replace(magic, literal)\n return string\n\n\n_long_repr_re = re.compile(r'([+-]?[0-9]+)[lL]')\ndef normalize_long_repr(s):\n \"\"\"\n Simple conversion from Python 2 representation of ``long`` ints (that\n is, integers with the ``L``) suffix, to the Python 3 representation\n (same number, without the suffix, since Python 3 doesn't have a\n distinct ``long`` type).\n\n Note: This just uses a simple regular expression that can't distinguish\n representations of long objects from strings containing a long repr.\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import normalize_long_repr\n sage: normalize_long_repr('10L')\n '10'\n sage: normalize_long_repr('[10L, -10L, +10L, \"ALL\"]')\n '[10, -10, +10, \"ALL\"]'\n \"\"\"\n return _long_repr_re.sub(lambda m: m.group(1), s)\n\n\n# Collection of fixups applied in the SageOutputChecker. Each element in this\n# this list a pair of functions applied to the actual test output ('g' for\n# \"got\") and the expected test output ('w' for \"wanted\"). The first function\n# should be a simple fast test on the expected and/or actual output to\n# determine if a fixup should be applied. The second function is the actual\n# fixup, which is applied if the test function passes. In most fixups only one\n# of the expected or received outputs are normalized, depending on the\n# application.\n# For example, on Python 3 we strip all u prefixes from unicode strings in the\n# expected output, because we never expect to see those on Python 3.\n_repr_fixups = [\n (lambda g, w: 'u\"' in w or \"u'\" in w,\n lambda g, w: (g, remove_unicode_u(w))),\n (lambda g, w: 'L' in w or 'l' in w,\n lambda g, w: (g, normalize_long_repr(w)))\n]\n\n\ndef parse_optional_tags(string):\n \"\"\"\n Return a set consisting of the optional tags from the following\n set that occur in a comment on the first line of the input string.\n\n - 'long time'\n - 'not implemented'\n - 'not tested'\n - 'known bug'\n - 'py2'\n - 'py3'\n - 'arb216'\n - 'arb218'\n - 'optional: PKG_NAME' -- the set will just contain 'PKG_NAME'\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import parse_optional_tags\n sage: parse_optional_tags(\"sage: magma('2 + 2')# optional: magma\")\n {'magma'}\n sage: parse_optional_tags(\"sage: #optional -- mypkg\")\n {'mypkg'}\n sage: parse_optional_tags(\"sage: print(1) # parentheses are optional here\")\n set()\n sage: parse_optional_tags(\"sage: print(1) # optional\")\n {''}\n sage: sorted(list(parse_optional_tags(\"sage: #optional -- foo bar, baz\")))\n ['bar', 'foo']\n sage: parse_optional_tags(\"sage: #optional -- foo.bar, baz\")\n {'foo.bar'}\n sage: sorted(list(parse_optional_tags(\" sage: factor(10^(10^10) + 1) # LoNg TiME, NoT TeSTED; OptioNAL -- P4cka9e\")))\n ['long time', 'not tested', 'p4cka9e']\n sage: parse_optional_tags(\" sage: raise RuntimeError # known bug\")\n {'bug'}\n sage: sorted(list(parse_optional_tags(\" sage: determine_meaning_of_life() # long time, not implemented\")))\n ['long time', 'not implemented']\n\n We don't parse inside strings::\n\n sage: parse_optional_tags(\" sage: print(' # long time')\")\n set()\n sage: parse_optional_tags(\" sage: print(' # long time') # not tested\")\n {'not tested'}\n\n UTF-8 works::\n\n sage: parse_optional_tags(\"'ěščřžýáíéďĎ'\")\n set()\n \"\"\"\n safe, literals, state = strip_string_literals(string)\n first_line = safe.split('\\n', 1)[0]\n if '#' not in first_line:\n return set()\n comment = first_line[first_line.find('#')+1:]\n comment = comment[comment.index('(')+1 : comment.rindex(')')]\n # strip_string_literals replaces comments\n comment = \"#\" + (literals[comment]).lower()\n\n tags = []\n for m in optional_regex.finditer(comment):\n cmd = m.group(1)\n if cmd == 'known bug':\n tags.append('bug') # so that such tests will be run by sage -t ... -only-optional=bug\n elif cmd:\n tags.append(cmd)\n else:\n tags.extend(m.group(3).split() or [\"\"])\n return set(tags)\n\n\ndef parse_tolerance(source, want):\n \"\"\"\n Return a version of ``want`` marked up with the tolerance tags\n specified in ``source``.\n\n INPUT:\n\n - ``source`` -- a string, the source of a doctest\n - ``want`` -- a string, the desired output of the doctest\n\n OUTPUT:\n\n - ``want`` if there are no tolerance tags specified; a\n :class:`MarkedOutput` version otherwise.\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import parse_tolerance\n sage: marked = parse_tolerance(\"sage: s.update(abs_tol = .0000001)\", \"\")\n sage: type(marked)\n <class 'str'>\n sage: marked = parse_tolerance(\"sage: s.update(tol = 0.1); s.rel_tol # abs tol 0.01 \", \"\")\n sage: marked.tol\n 0\n sage: marked.rel_tol\n 0\n sage: marked.abs_tol\n 0.010000000000000000000...?\n \"\"\"\n safe, literals, state = strip_string_literals(source)\n first_line = safe.split('\\n', 1)[0]\n if '#' not in first_line:\n return want\n comment = first_line[first_line.find('#')+1:]\n comment = comment[comment.index('(')+1 : comment.rindex(')')]\n # strip_string_literals replaces comments\n comment = literals[comment]\n if random_marker.search(comment):\n want = MarkedOutput(want).update(random=True)\n else:\n m = tolerance_pattern.search(comment)\n if m:\n rel_or_abs, epsilon = m.groups()\n if epsilon is None:\n epsilon = RIFtol(\"1e-15\")\n else:\n epsilon = RIFtol(epsilon)\n if rel_or_abs is None:\n want = MarkedOutput(want).update(tol=epsilon)\n elif rel_or_abs.startswith('rel'):\n want = MarkedOutput(want).update(rel_tol=epsilon)\n elif rel_or_abs.startswith('abs'):\n want = MarkedOutput(want).update(abs_tol=epsilon)\n else:\n raise RuntimeError\n return want\n\n\ndef pre_hash(s):\n \"\"\"\n Prepends a string with its length.\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import pre_hash\n sage: pre_hash(\"abc\")\n '3:abc'\n \"\"\"\n return \"%s:%s\" % (len(s), s)\n\n\ndef get_source(example):\n \"\"\"\n Return the source with the leading 'sage: ' stripped off.\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import get_source\n sage: from sage.doctest.sources import DictAsObject\n sage: example = DictAsObject({})\n sage: example.sage_source = \"2 + 2\"\n sage: example.source = \"sage: 2 + 2\"\n sage: get_source(example)\n '2 + 2'\n sage: example = DictAsObject({})\n sage: example.source = \"3 + 3\"\n sage: get_source(example)\n '3 + 3'\n \"\"\"\n return getattr(example, 'sage_source', example.source)\n\ndef reduce_hex(fingerprints):\n \"\"\"\n Return a symmetric function of the arguments as hex strings.\n\n The arguments should be 32 character strings consisting of hex\n digits: 0-9 and a-f.\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import reduce_hex\n sage: reduce_hex([\"abc\", \"12399aedf\"])\n '0000000000000000000000012399a463'\n sage: reduce_hex([\"12399aedf\",\"abc\"])\n '0000000000000000000000012399a463'\n \"\"\"\n from operator import xor\n res = reduce(xor, (int(x, 16) for x in fingerprints), 0)\n if res < 0:\n res += 1 << 128\n return \"%032x\" % res\n\n\nclass MarkedOutput(str):\n \"\"\"\n A subclass of string with context for whether another string\n matches it.\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import MarkedOutput\n sage: s = MarkedOutput(\"abc\")\n sage: s.rel_tol\n 0\n sage: s.update(rel_tol = .05)\n u'abc'\n sage: s.rel_tol\n 0.0500000000000000\n\n sage: MarkedOutput(u\"56 µs\")\n u'56 \\xb5s'\n \"\"\"\n random = False\n rel_tol = 0\n abs_tol = 0\n tol = 0\n def update(self, **kwds):\n \"\"\"\n EXAMPLES::\n\n sage: from sage.doctest.parsing import MarkedOutput\n sage: s = MarkedOutput(\"0.0007401\")\n sage: s.update(abs_tol = .0000001)\n u'0.0007401'\n sage: s.rel_tol\n 0\n sage: s.abs_tol\n 1.00000000000000e-7\n \"\"\"\n self.__dict__.update(kwds)\n return self\n\n def __reduce__(self):\n \"\"\"\n Pickling.\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import MarkedOutput\n sage: s = MarkedOutput(\"0.0007401\")\n sage: s.update(abs_tol = .0000001)\n u'0.0007401'\n sage: t = loads(dumps(s)) # indirect doctest\n sage: t == s\n True\n sage: t.abs_tol\n 1.00000000000000e-7\n \"\"\"\n return make_marked_output, (str(self), self.__dict__)\n\n\ndef make_marked_output(s, D):\n \"\"\"\n Auxiliary function for pickling.\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import make_marked_output\n sage: s = make_marked_output(\"0.0007401\", {'abs_tol':.0000001})\n sage: s\n u'0.0007401'\n sage: s.abs_tol\n 1.00000000000000e-7\n \"\"\"\n ans = MarkedOutput(s)\n ans.__dict__.update(D)\n return ans\n\n\nclass OriginalSource(object):\n r\"\"\"\n Context swapping out the pre-parsed source with the original for\n better reporting.\n\n EXAMPLES::\n\n sage: from sage.doctest.sources import FileDocTestSource\n sage: from sage.doctest.control import DocTestDefaults\n sage: from sage.env import SAGE_SRC\n sage: import os\n sage: filename = os.path.join(SAGE_SRC,'sage','doctest','forker.py')\n sage: FDS = FileDocTestSource(filename,DocTestDefaults())\n sage: doctests, extras = FDS.create_doctests(globals())\n sage: ex = doctests[0].examples[0]\n sage: ex.sage_source\n u'doctest_var = 42; doctest_var^2\\n'\n sage: ex.source\n u'doctest_var = Integer(42); doctest_var**Integer(2)\\n'\n sage: from sage.doctest.parsing import OriginalSource\n sage: with OriginalSource(ex):\n ....: ex.source\n u'doctest_var = 42; doctest_var^2\\n'\n \"\"\"\n def __init__(self, example):\n \"\"\"\n Swaps out the source for the sage_source of a doctest example.\n\n INPUT:\n\n - ``example`` -- a :class:`doctest.Example` instance\n\n EXAMPLES::\n\n sage: from sage.doctest.sources import FileDocTestSource\n sage: from sage.doctest.control import DocTestDefaults\n sage: from sage.env import SAGE_SRC\n sage: import os\n sage: filename = os.path.join(SAGE_SRC,'sage','doctest','forker.py')\n sage: FDS = FileDocTestSource(filename,DocTestDefaults())\n sage: doctests, extras = FDS.create_doctests(globals())\n sage: ex = doctests[0].examples[0]\n sage: from sage.doctest.parsing import OriginalSource\n sage: OriginalSource(ex)\n <sage.doctest.parsing.OriginalSource object at ...>\n \"\"\"\n self.example = example\n\n def __enter__(self):\n r\"\"\"\n EXAMPLES::\n\n sage: from sage.doctest.sources import FileDocTestSource\n sage: from sage.doctest.control import DocTestDefaults\n sage: from sage.env import SAGE_SRC\n sage: import os\n sage: filename = os.path.join(SAGE_SRC,'sage','doctest','forker.py')\n sage: FDS = FileDocTestSource(filename,DocTestDefaults())\n sage: doctests, extras = FDS.create_doctests(globals())\n sage: ex = doctests[0].examples[0]\n sage: from sage.doctest.parsing import OriginalSource\n sage: with OriginalSource(ex): # indirect doctest\n ....: ex.source\n u'doctest_var = 42; doctest_var^2\\n'\n \"\"\"\n if hasattr(self.example, 'sage_source'):\n self.old_source, self.example.source = self.example.source, self.example.sage_source\n\n def __exit__(self, *args):\n r\"\"\"\n EXAMPLES::\n\n sage: from sage.doctest.sources import FileDocTestSource\n sage: from sage.doctest.control import DocTestDefaults\n sage: from sage.env import SAGE_SRC\n sage: import os\n sage: filename = os.path.join(SAGE_SRC,'sage','doctest','forker.py')\n sage: FDS = FileDocTestSource(filename,DocTestDefaults())\n sage: doctests, extras = FDS.create_doctests(globals())\n sage: ex = doctests[0].examples[0]\n sage: from sage.doctest.parsing import OriginalSource\n sage: with OriginalSource(ex): # indirect doctest\n ....: ex.source\n u'doctest_var = 42; doctest_var^2\\n'\n sage: ex.source # indirect doctest\n u'doctest_var = Integer(42); doctest_var**Integer(2)\\n'\n \"\"\"\n if hasattr(self.example, 'sage_source'):\n self.example.source = self.old_source\n\n\nclass SageDocTestParser(doctest.DocTestParser):\n \"\"\"\n A version of the standard doctest parser which handles Sage's\n custom options and tolerances in floating point arithmetic.\n \"\"\"\n def __init__(self, optional_tags=(), long=False):\n r\"\"\"\n INPUT:\n\n - ``optional_tags`` -- a list or tuple of strings.\n - ``long`` -- boolean, whether to run doctests marked as taking a\n long time.\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import SageDocTestParser\n sage: DTP = SageDocTestParser(('sage','magma','guava'))\n sage: ex = DTP.parse(\"sage: 2 + 2\\n\")[1]\n sage: ex.sage_source\n '2 + 2\\n'\n sage: ex = DTP.parse(\"sage: R.<x> = ZZ[]\")[1]\n sage: ex.source\n \"R = ZZ['x']; (x,) = R._first_ngens(1)\\n\"\n\n TESTS::\n\n sage: TestSuite(DTP).run()\n \"\"\"\n self.long = long\n self.optionals = defaultdict(int) # record skipped optional tests\n if optional_tags is True: # run all optional tests\n self.optional_tags = True\n self.optional_only = False\n else:\n self.optional_tags = set(optional_tags)\n if 'sage' in self.optional_tags:\n self.optional_only = False\n self.optional_tags.remove('sage')\n else:\n self.optional_only = True\n\n def __eq__(self, other):\n \"\"\"\n Comparison.\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import SageDocTestParser\n sage: DTP = SageDocTestParser(('sage','magma','guava'), True)\n sage: DTP2 = SageDocTestParser(('sage','magma','guava'), False)\n sage: DTP == DTP2\n False\n \"\"\"\n if not isinstance(other, SageDocTestParser):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Test for non-equality.\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import SageDocTestParser\n sage: DTP = SageDocTestParser(('sage','magma','guava'), True)\n sage: DTP2 = SageDocTestParser(('sage','magma','guava'), False)\n sage: DTP != DTP2\n True\n \"\"\"\n return not (self == other)\n\n def parse(self, string, *args):\n r\"\"\"\n A Sage specialization of :class:`doctest.DocTestParser`.\n\n INPUT:\n\n - ``string`` -- the string to parse.\n - ``name`` -- optional string giving the name identifying string,\n to be used in error messages.\n\n OUTPUT:\n\n - A list consisting of strings and :class:`doctest.Example`\n instances. There will be at least one string between\n successive examples (exactly one unless or long or optional\n tests are removed), and it will begin and end with a string.\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import SageDocTestParser\n sage: DTP = SageDocTestParser(('sage','magma','guava'))\n sage: example = 'Explanatory text::\\n\\n sage: E = magma(\"EllipticCurve([1, 1, 1, -10, -10])\") # optional: magma\\n\\nLater text'\n sage: parsed = DTP.parse(example)\n sage: parsed[0]\n 'Explanatory text::\\n\\n'\n sage: parsed[1].sage_source\n 'E = magma(\"EllipticCurve([1, 1, 1, -10, -10])\") # optional: magma\\n'\n sage: parsed[2]\n '\\nLater text'\n\n If the doctest parser is not created to accept a given\n optional argument, the corresponding examples will just be\n removed::\n\n sage: DTP2 = SageDocTestParser(('sage',))\n sage: parsed2 = DTP2.parse(example)\n sage: parsed2\n ['Explanatory text::\\n\\n', '\\nLater text']\n\n You can mark doctests as having a particular tolerance::\n\n sage: example2 = 'sage: gamma(1.6) # tol 2.0e-11\\n0.893515349287690'\n sage: ex = DTP.parse(example2)[1]\n sage: ex.sage_source\n 'gamma(1.6) # tol 2.0e-11\\n'\n sage: ex.want\n u'0.893515349287690\\n'\n sage: type(ex.want)\n <class 'sage.doctest.parsing.MarkedOutput'>\n sage: ex.want.tol\n 2.000000000000000000...?e-11\n\n You can use continuation lines::\n\n sage: s = \"sage: for i in range(4):\\n....: print(i)\\n....:\\n\"\n sage: ex = DTP2.parse(s)[1]\n sage: ex.source\n 'for i in range(Integer(4)):\\n print(i)\\n'\n\n Sage currently accepts backslashes as indicating that the end\n of the current line should be joined to the next line. This\n feature allows for breaking large integers over multiple lines\n but is not standard for Python doctesting. It's not\n guaranteed to persist, but works in Sage 5.5::\n\n sage: n = 1234\\\n ....: 5678\n sage: print(n)\n 12345678\n sage: type(n)\n <class 'sage.rings.integer.Integer'>\n\n It also works without the line continuation::\n\n sage: m = 8765\\\n 4321\n sage: print(m)\n 87654321\n\n Test that :trac:`26575` is resolved::\n\n sage: example3 = 'sage: Zp(5,4,print_mode=\"digits\")(5)\\n...00010'\n sage: parsed3 = DTP.parse(example3)\n sage: dte = parsed3[1]\n sage: dte.sage_source\n 'Zp(5,4,print_mode=\"digits\")(5)\\n'\n sage: dte.want\n '...00010\\n'\n \"\"\"\n # Hack for non-standard backslash line escapes accepted by the current\n # doctest system.\n m = backslash_replacer.search(string)\n while m is not None:\n next_prompt = find_sage_prompt.search(string,m.end())\n g = m.groups()\n if next_prompt:\n future = string[m.end():next_prompt.start()] + '\\n' + string[next_prompt.start():]\n else:\n future = string[m.end():]\n string = string[:m.start()] + g[0] + \"sage:\" + g[1] + future\n m = backslash_replacer.search(string,m.start())\n\n replace_ellipsis = not python_prompt.search(string)\n if replace_ellipsis:\n # There are no >>> prompts, so we can allow ... to begin the output\n # We do so by replacing ellipses with a special tag, then putting them back after parsing\n string = find_python_continuation.sub(r\"\\1\" + ellipsis_tag + r\"\\2\", string)\n string = find_sage_prompt.sub(r\"\\1>>> sage: \", string)\n string = find_sage_continuation.sub(r\"\\1...\", string)\n res = doctest.DocTestParser.parse(self, string, *args)\n filtered = []\n for item in res:\n if isinstance(item, doctest.Example):\n optional_tags = parse_optional_tags(item.source)\n if optional_tags:\n for tag in optional_tags:\n self.optionals[tag] += 1\n if (('not implemented' in optional_tags) or\n ('not tested' in optional_tags)):\n continue\n\n if 'long time' in optional_tags:\n if self.long:\n optional_tags.remove('long time')\n else:\n continue\n\n if self.optional_tags is not True:\n extra = optional_tags - self.optional_tags # set difference\n if extra:\n if not('external' in self.optional_tags\n and available_software.issuperset(extra)):\n continue\n elif self.optional_only:\n self.optionals['sage'] += 1\n continue\n if replace_ellipsis:\n item.want = item.want.replace(ellipsis_tag, \"...\")\n if item.exc_msg is not None:\n item.exc_msg = item.exc_msg.replace(ellipsis_tag, \"...\")\n item.want = parse_tolerance(item.source, item.want)\n if item.source.startswith(\"sage: \"):\n item.sage_source = item.source[6:]\n if item.sage_source.lstrip().startswith('#'):\n continue\n item.source = preparse(item.sage_source)\n filtered.append(item)\n return filtered\n\nclass SageOutputChecker(doctest.OutputChecker):\n r\"\"\"\n A modification of the doctest OutputChecker that can check\n relative and absolute tolerance of answers.\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import SageOutputChecker, MarkedOutput, SageDocTestParser\n sage: import doctest\n sage: optflag = doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS\n sage: DTP = SageDocTestParser(('sage','magma','guava'))\n sage: OC = SageOutputChecker()\n sage: example2 = 'sage: gamma(1.6) # tol 2.0e-11\\n0.893515349287690'\n sage: ex = DTP.parse(example2)[1]\n sage: ex.sage_source\n 'gamma(1.6) # tol 2.0e-11\\n'\n sage: ex.want\n u'0.893515349287690\\n'\n sage: type(ex.want)\n <class 'sage.doctest.parsing.MarkedOutput'>\n sage: ex.want.tol\n 2.000000000000000000...?e-11\n sage: OC.check_output(ex.want, '0.893515349287690', optflag)\n True\n sage: OC.check_output(ex.want, '0.8935153492877', optflag)\n True\n sage: OC.check_output(ex.want, '0', optflag)\n False\n sage: OC.check_output(ex.want, 'x + 0.8935153492877', optflag)\n False\n \"\"\"\n def human_readable_escape_sequences(self, string):\n r\"\"\"\n Make ANSI escape sequences human readable.\n\n EXAMPLES::\n\n sage: print('This is \\x1b[1mbold\\x1b[0m text')\n This is <CSI-1m>bold<CSI-0m> text\n\n TESTS::\n\n sage: from sage.doctest.parsing import SageOutputChecker\n sage: OC = SageOutputChecker()\n sage: teststr = '-'.join([\n ....: 'bold\\x1b[1m',\n ....: 'red\\x1b[31m',\n ....: 'oscmd\\x1ba'])\n sage: OC.human_readable_escape_sequences(teststr)\n u'bold<CSI-1m>-red<CSI-31m>-oscmd<ESC-a>'\n \"\"\"\n def human_readable(match):\n ansi_escape = match.group(1)\n assert len(ansi_escape) >= 2\n if len(ansi_escape) == 2:\n return u'<ESC-'+ansi_escape[1]+u'>'\n else:\n return u'<CSI-'+ansi_escape.lstrip(u'\\x1b[\\x9b')+u'>'\n return ansi_escape_sequence.subn(human_readable, string)[0]\n\n def add_tolerance(self, wantval, want):\n \"\"\"\n Enlarge the real interval element ``wantval`` according to\n the tolerance options in ``want``.\n\n INPUT:\n\n - ``wantval`` -- a real interval element\n - ``want`` -- a :class:`MarkedOutput` describing the tolerance\n\n OUTPUT:\n\n - an interval element containing ``wantval``\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import MarkedOutput, SageOutputChecker\n sage: OC = SageOutputChecker()\n sage: want_tol = MarkedOutput().update(tol=0.0001)\n sage: want_abs = MarkedOutput().update(abs_tol=0.0001)\n sage: want_rel = MarkedOutput().update(rel_tol=0.0001)\n sage: OC.add_tolerance(RIF(pi.n(64)), want_tol).endpoints()\n (3.14127849432443, 3.14190681285516)\n sage: OC.add_tolerance(RIF(pi.n(64)), want_abs).endpoints()\n (3.14149265358979, 3.14169265358980)\n sage: OC.add_tolerance(RIF(pi.n(64)), want_rel).endpoints()\n (3.14127849432443, 3.14190681285516)\n sage: OC.add_tolerance(RIF(1e1000), want_tol)\n 1.000?e1000\n sage: OC.add_tolerance(RIF(1e1000), want_abs)\n 1.000000000000000?e1000\n sage: OC.add_tolerance(RIF(1e1000), want_rel)\n 1.000?e1000\n sage: OC.add_tolerance(0, want_tol)\n 0.000?\n sage: OC.add_tolerance(0, want_abs)\n 0.000?\n sage: OC.add_tolerance(0, want_rel)\n 0\n \"\"\"\n if want.tol:\n if wantval == 0:\n return RIFtol(want.tol) * RIFtol(-1,1)\n else:\n return wantval * (1 + RIFtol(want.tol) * RIFtol(-1,1))\n elif want.abs_tol:\n return wantval + RIFtol(want.abs_tol) * RIFtol(-1,1)\n elif want.rel_tol:\n return wantval * (1 + RIFtol(want.rel_tol) * RIFtol(-1,1))\n else:\n return wantval\n\n def check_output(self, want, got, optionflags):\n \"\"\"\n Checks to see if the output matches the desired output.\n\n If ``want`` is a :class:`MarkedOutput` instance, takes into account the desired tolerance.\n\n INPUT:\n\n - ``want`` -- a string or :class:`MarkedOutput`\n - ``got`` -- a string\n - ``optionflags`` -- an integer, passed down to :class:`doctest.OutputChecker`\n\n OUTPUT:\n\n - boolean, whether ``got`` matches ``want`` up to the specified tolerance.\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import MarkedOutput, SageOutputChecker\n sage: import doctest\n sage: optflag = doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS\n sage: rndstr = MarkedOutput(\"I'm wrong!\").update(random=True)\n sage: tentol = MarkedOutput(\"10.0\").update(tol=.1)\n sage: tenabs = MarkedOutput(\"10.0\").update(abs_tol=.1)\n sage: tenrel = MarkedOutput(\"10.0\").update(rel_tol=.1)\n sage: zerotol = MarkedOutput(\"0.0\").update(tol=.1)\n sage: zeroabs = MarkedOutput(\"0.0\").update(abs_tol=.1)\n sage: zerorel = MarkedOutput(\"0.0\").update(rel_tol=.1)\n sage: zero = \"0.0\"\n sage: nf = \"9.5\"\n sage: ten = \"10.05\"\n sage: eps = \"-0.05\"\n sage: OC = SageOutputChecker()\n\n ::\n\n sage: OC.check_output(rndstr,nf,optflag)\n True\n\n sage: OC.check_output(tentol,nf,optflag)\n True\n sage: OC.check_output(tentol,ten,optflag)\n True\n sage: OC.check_output(tentol,zero,optflag)\n False\n\n sage: OC.check_output(tenabs,nf,optflag)\n False\n sage: OC.check_output(tenabs,ten,optflag)\n True\n sage: OC.check_output(tenabs,zero,optflag)\n False\n\n sage: OC.check_output(tenrel,nf,optflag)\n True\n sage: OC.check_output(tenrel,ten,optflag)\n True\n sage: OC.check_output(tenrel,zero,optflag)\n False\n\n sage: OC.check_output(zerotol,zero,optflag)\n True\n sage: OC.check_output(zerotol,eps,optflag)\n True\n sage: OC.check_output(zerotol,ten,optflag)\n False\n\n sage: OC.check_output(zeroabs,zero,optflag)\n True\n sage: OC.check_output(zeroabs,eps,optflag)\n True\n sage: OC.check_output(zeroabs,ten,optflag)\n False\n\n sage: OC.check_output(zerorel,zero,optflag)\n True\n sage: OC.check_output(zerorel,eps,optflag)\n False\n sage: OC.check_output(zerorel,ten,optflag)\n False\n\n More explicit tolerance checks::\n\n sage: _ = x # rel tol 1e10\n sage: raise RuntimeError # rel tol 1e10\n Traceback (most recent call last):\n ...\n RuntimeError\n sage: 1 # abs tol 2\n -0.5\n sage: print(\"0.9999\") # rel tol 1e-4\n 1.0\n sage: print(\"1.00001\") # abs tol 1e-5\n 1.0\n sage: 0 # rel tol 1\n 1\n\n Spaces before numbers or between the sign and number are ignored::\n\n sage: print(\"[ - 1, 2]\") # abs tol 1e-10\n [-1,2]\n\n Tolerance on Python 3 for string results with unicode prefix::\n\n sage: a = u'Cyrano'; a\n u'Cyrano'\n sage: b = [u'Fermat', u'Euler']; b\n [u'Fermat', u'Euler']\n sage: c = u'you'; c\n u'you'\n \"\"\"\n got = self.human_readable_escape_sequences(got)\n got = glpk_simplex_warning_regex.sub('', got)\n got = ld_warning_regex.sub('', got)\n if isinstance(want, MarkedOutput):\n if want.random:\n return True\n elif want.tol or want.rel_tol or want.abs_tol:\n # First check the doctest without the numbers\n want_str = [g[0] for g in float_regex.findall(want)]\n got_str = [g[0] for g in float_regex.findall(got)]\n if len(want_str) != len(got_str):\n return False\n starwant = float_regex.sub('*', want)\n stargot = float_regex.sub('*', got)\n if not doctest.OutputChecker.check_output(self, starwant, stargot, optionflags):\n return False\n\n # Now check the numbers\n want_values = [RIFtol(g) for g in want_str]\n want_intervals = [self.add_tolerance(v, want) for v in want_values]\n got_values = [RIFtol(g) for g in got_str]\n # The doctest is successful if the \"want\" and \"got\"\n # intervals have a non-empty intersection\n return all(a.overlaps(b) for a, b in zip(want_intervals, got_values))\n\n ok = doctest.OutputChecker.check_output(self, want, got, optionflags)\n\n if ok:\n return ok\n\n did_fixup = False\n for quick_check, fixup in _repr_fixups:\n do_fixup = quick_check(got, want)\n if do_fixup:\n got, want = fixup(got, want)\n did_fixup = True\n\n if not did_fixup:\n # Return the same result as before\n return ok\n\n return doctest.OutputChecker.check_output(self, want, got, optionflags)\n\n def output_difference(self, example, got, optionflags):\n r\"\"\"\n Report on the differences between the desired result and what\n was actually obtained.\n\n If ``want`` is a :class:`MarkedOutput` instance, takes into account the desired tolerance.\n\n INPUT:\n\n - ``example`` -- a :class:`doctest.Example` instance\n - ``got`` -- a string\n - ``optionflags`` -- an integer, passed down to :class:`doctest.OutputChecker`\n\n OUTPUT:\n\n - a string, describing how ``got`` fails to match ``example.want``\n\n EXAMPLES::\n\n sage: from sage.doctest.parsing import MarkedOutput, SageOutputChecker\n sage: import doctest\n sage: optflag = doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS\n sage: tentol = doctest.Example('',MarkedOutput(\"10.0\\n\").update(tol=.1))\n sage: tenabs = doctest.Example('',MarkedOutput(\"10.0\\n\").update(abs_tol=.1))\n sage: tenrel = doctest.Example('',MarkedOutput(\"10.0\\n\").update(rel_tol=.1))\n sage: zerotol = doctest.Example('',MarkedOutput(\"0.0\\n\").update(tol=.1))\n sage: zeroabs = doctest.Example('',MarkedOutput(\"0.0\\n\").update(abs_tol=.1))\n sage: zerorel = doctest.Example('',MarkedOutput(\"0.0\\n\").update(rel_tol=.1))\n sage: tlist = doctest.Example('',MarkedOutput(\"[10.0, 10.0, 10.0, 10.0, 10.0, 10.0]\\n\").update(abs_tol=0.987))\n sage: zero = \"0.0\"\n sage: nf = \"9.5\"\n sage: ten = \"10.05\"\n sage: eps = \"-0.05\"\n sage: L = \"[9.9, 8.7, 10.3, 11.2, 10.8, 10.0]\"\n sage: OC = SageOutputChecker()\n\n ::\n\n sage: print(OC.output_difference(tenabs,nf,optflag))\n Expected:\n 10.0\n Got:\n 9.5\n Tolerance exceeded:\n 10.0 vs 9.5, tolerance 5e-1 > 1e-1\n\n sage: print(OC.output_difference(tentol,zero,optflag))\n Expected:\n 10.0\n Got:\n 0.0\n Tolerance exceeded:\n 10.0 vs 0.0, tolerance 1e0 > 1e-1\n\n sage: print(OC.output_difference(tentol,eps,optflag))\n Expected:\n 10.0\n Got:\n -0.05\n Tolerance exceeded:\n 10.0 vs -0.05, tolerance 2e0 > 1e-1\n\n sage: print(OC.output_difference(tlist,L,optflag))\n Expected:\n [10.0, 10.0, 10.0, 10.0, 10.0, 10.0]\n Got:\n [9.9, 8.7, 10.3, 11.2, 10.8, 10.0]\n Tolerance exceeded in 2 of 6:\n 10.0 vs 8.7, tolerance 2e0 > 9.87e-1\n 10.0 vs 11.2, tolerance 2e0 > 9.87e-1\n\n TESTS::\n\n sage: print(OC.output_difference(tenabs,zero,optflag))\n Expected:\n 10.0\n Got:\n 0.0\n Tolerance exceeded:\n 10.0 vs 0.0, tolerance 1e1 > 1e-1\n\n sage: print(OC.output_difference(tenrel,zero,optflag))\n Expected:\n 10.0\n Got:\n 0.0\n Tolerance exceeded:\n 10.0 vs 0.0, tolerance 1e0 > 1e-1\n\n sage: print(OC.output_difference(tenrel,eps,optflag))\n Expected:\n 10.0\n Got:\n -0.05\n Tolerance exceeded:\n 10.0 vs -0.05, tolerance 2e0 > 1e-1\n\n sage: print(OC.output_difference(zerotol,ten,optflag))\n Expected:\n 0.0\n Got:\n 10.05\n Tolerance exceeded:\n 0.0 vs 10.05, tolerance 2e1 > 1e-1\n\n sage: print(OC.output_difference(zeroabs,ten,optflag))\n Expected:\n 0.0\n Got:\n 10.05\n Tolerance exceeded:\n 0.0 vs 10.05, tolerance 2e1 > 1e-1\n\n sage: print(OC.output_difference(zerorel,eps,optflag))\n Expected:\n 0.0\n Got:\n -0.05\n Tolerance exceeded:\n 0.0 vs -0.05, tolerance +infinity > 1e-1\n\n sage: print(OC.output_difference(zerorel,ten,optflag))\n Expected:\n 0.0\n Got:\n 10.05\n Tolerance exceeded:\n 0.0 vs 10.05, tolerance +infinity > 1e-1\n \"\"\"\n got = self.human_readable_escape_sequences(got)\n want = example.want\n diff = doctest.OutputChecker.output_difference(self, example, got, optionflags)\n if isinstance(want, MarkedOutput) and (want.tol or want.abs_tol or want.rel_tol):\n if diff[-1] != \"\\n\":\n diff += \"\\n\"\n want_str = [g[0] for g in float_regex.findall(want)]\n got_str = [g[0] for g in float_regex.findall(got)]\n if len(want_str) == len(got_str):\n failures = []\n def fail(x, y, actual, desired):\n failstr = \" {} vs {}, tolerance {} > {}\".format(x, y,\n RIFtol(actual).upper().str(digits=1, no_sci=False),\n RIFtol(desired).center().str(digits=15, skip_zeroes=True, no_sci=False)\n )\n failures.append(failstr)\n\n for wstr, gstr in zip(want_str, got_str):\n w = RIFtol(wstr)\n g = RIFtol(gstr)\n if not g.overlaps(self.add_tolerance(w, want)):\n if want.tol:\n if not w:\n fail(wstr, gstr, abs(g), want.tol)\n else:\n fail(wstr, gstr, abs(1 - g/w), want.tol)\n elif want.abs_tol:\n fail(wstr, gstr, abs(g - w), want.abs_tol)\n else:\n fail(wstr, gstr, abs(1 - g/w), want.rel_tol)\n\n if failures:\n if len(want_str) == 1:\n diff += \"Tolerance exceeded:\\n\"\n else:\n diff += \"Tolerance exceeded in %s of %s:\\n\"%(len(failures), len(want_str))\n diff += \"\\n\".join(failures) + \"\\n\"\n elif \"...\" in want:\n diff += \"Note: combining tolerance (# tol) with ellipsis (...) is not supported\\n\"\n return diff\n", "id": "8819464", "language": "Python", "matching_score": 2.083362102508545, "max_stars_count": 0, "path": "src/sage/doctest/parsing.py" }, { "content": "# -*- coding: utf-8 -*-\nr\"\"\"\nFunctions to construct widgets, based on the old SageNB interface.\n\nThese should ensure mostly backwards compatibility with SageNB.\n\nTESTS:\n\nWe need to setup a proper test environment for widgets::\n\n sage: from ipywidgets.widgets.tests.utils import setup_test_comm\n sage: setup_test_comm()\n\nEXAMPLES::\n\n sage: from sage.repl.ipython_kernel.widgets_sagenb import text_control\n sage: text_control(\"Hello World!\")\n HTMLText(value=u'Hello World!')\n\"\"\"\n\n# ****************************************************************************\n# Copyright (C) 2017 <NAME> <<EMAIL>>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nfrom ipywidgets.widgets import (IntSlider, IntRangeSlider, FloatSlider,\n FloatRangeSlider, SelectionSlider,\n Checkbox, ToggleButtons, Dropdown)\nfrom .widgets import (TransformText, TransformTextarea,\n TransformIntSlider, TransformIntRangeSlider,\n TransformFloatSlider, TransformFloatRangeSlider,\n EvalText, EvalTextarea, SageColorPicker, Grid)\nfrom ipywidgets.widgets.interaction import _get_min_max_value\nfrom collections.abc import Iterable, Sequence\nfrom numbers import Integral, Rational, Real\n\nfrom sage.structure.all import parent\nfrom sage.arith.srange import srange\nfrom sage.plot.colors import Color\nimport sage.rings.abc\n\n\nfrom .widgets import HTMLText as text_control\n\n\ndef input_box(default=None, label=None, type=None, width=80, height=1):\n \"\"\"\n A textbox widget.\n\n INPUT:\n\n - ``default`` -- initial value\n\n - ``label`` -- optional label\n\n - ``type`` -- function of one variable or ``None``. if ``type`` is\n ``str``, the value of this widget for interactive functions is\n just the text as ``str``. Otherwise, the text is evaluated using\n :func:`sage_eval`, ``type`` is called on it and the result is used\n as value. Except if ``type`` is ``None``, then the evaluated text\n is used as value.\n\n - ``width`` -- width of the box\n\n - ``height`` -- if ``height > 1``, create a textarea instead of a\n single-line textbox.\n\n EXAMPLES::\n\n sage: from sage.repl.ipython_kernel.all_jupyter import input_box\n\n The most basic usage is when ``type=str``::\n\n sage: w = input_box(\"4+5\", type=str, label=\"enter a string\")\n sage: w\n TransformText(value=u'4+5', description=u'enter a string', layout=Layout(max_width=u'81em'))\n sage: w.get_interact_value()\n '4+5'\n\n Without ``type``, the text is evaluated::\n\n sage: w = input_box(\"4+5\")\n sage: w\n EvalText(value=u'4+5', layout=Layout(max_width=u'81em'))\n sage: w.get_interact_value()\n 9\n\n With a different ``type``, the text is evaluated and ``type`` is\n called on it:\n\n sage: w = input_box(\"4+5\", type=float)\n sage: w\n EvalText(value=u'4+5', layout=Layout(max_width=u'81em'))\n sage: w.get_interact_value()\n 9.0\n\n Despite the keyword name, ``type`` does not need to be a type::\n\n sage: w = input_box(\"4+5\", type=sqrt)\n sage: w\n EvalText(value=u'4+5', layout=Layout(max_width=u'81em'))\n sage: w.get_interact_value()\n 3\n\n When ``height > 1``, a textarea is returned::\n\n sage: w = input_box(\"4+5\", width=100, height=1)\n sage: w\n EvalText(value=u'4+5', layout=Layout(max_width=u'101em'))\n sage: w = input_box(\"4+5\", width=100, height=5)\n sage: w\n EvalTextarea(value=u'4+5', layout=Layout(max_width=u'101em'))\n\n TESTS::\n\n sage: w = input_box(type=Color)\n Traceback (most recent call last):\n ...\n NotImplementedError: type=Color is not supported\n \"\"\"\n kwds = {}\n if type is str:\n kwds[\"transform\"] = str # Convert unicode to str\n if height > 1:\n cls = TransformTextarea\n else:\n cls = TransformText\n elif type is Color:\n # This is special-cased by SageNB (with a non-trivial\n # implementation!), but it doesn't seem to be used in practice\n # because we have a SageColorPicker widget.\n raise NotImplementedError(\"type=Color is not supported\")\n else:\n kwds[\"transform\"] = type\n if height > 1:\n cls = EvalTextarea\n else:\n cls = EvalText\n if default is not None:\n kwds[\"value\"] = str(default)\n if label is not None:\n kwds[\"description\"] = label\n w = cls(**kwds)\n w.layout.max_width = str(width+1) + \"em\"\n return w\n\n\ndef slider(vmin, vmax=None, step_size=None, default=None, label=None, display_value=True, _range=False):\n \"\"\"\n A slider widget.\n\n INPUT:\n\n For a numeric slider (select a value from a range):\n\n - ``vmin``, ``vmax`` -- minimum and maximum value\n\n - ``step_size`` -- the step size\n\n For a selection slider (select a value from a list of values):\n\n - ``vmin`` -- a list of possible values for the slider\n\n For all sliders:\n\n - ``default`` -- initial value\n\n - ``label`` -- optional label\n\n - ``display_value`` -- (boolean) if ``True``, display the current\n value.\n\n EXAMPLES::\n\n sage: from sage.repl.ipython_kernel.all_jupyter import slider\n sage: slider(5, label=\"slide me\")\n TransformIntSlider(value=5, description=u'slide me', min=5)\n sage: slider(5, 20)\n TransformIntSlider(value=5, max=20, min=5)\n sage: slider(5, 20, 0.5)\n TransformFloatSlider(value=5.0, max=20.0, min=5.0, step=0.5)\n sage: slider(5, 20, default=12)\n TransformIntSlider(value=12, max=20, min=5)\n\n The parent of the inputs determines the parent of the value::\n\n sage: w = slider(5); w\n TransformIntSlider(value=5, min=5)\n sage: parent(w.get_interact_value())\n Integer Ring\n sage: w = slider(int(5)); w\n IntSlider(value=5, min=5)\n sage: parent(w.get_interact_value())\n <... 'int'>\n sage: w = slider(5, 20, step_size=RDF(\"0.1\")); w\n TransformFloatSlider(value=5.0, max=20.0, min=5.0)\n sage: parent(w.get_interact_value())\n Real Double Field\n sage: w = slider(5, 20, step_size=10/3); w\n SelectionSlider(index=2, options=(5, 25/3, 35/3, 15, 55/3), value=35/3)\n sage: parent(w.get_interact_value())\n Rational Field\n\n Symbolic input is evaluated numerically::\n\n sage: w = slider(e, pi); w\n TransformFloatSlider(value=2.718281828459045, max=3.141592653589793, min=2.718281828459045)\n sage: parent(w.get_interact_value())\n Real Field with 53 bits of precision\n\n For a selection slider, the default is adjusted to one of the\n possible values::\n\n sage: slider(range(10), default=17/10)\n SelectionSlider(index=2, options=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), value=2)\n\n TESTS::\n\n sage: slider(range(5), range(5))\n Traceback (most recent call last):\n ...\n TypeError: unexpected argument 'vmax' for a selection slider\n sage: slider(range(5), step_size=2)\n Traceback (most recent call last):\n ...\n TypeError: unexpected argument 'step_size' for a selection slider\n sage: slider(5).readout\n True\n sage: slider(5, display_value=False).readout\n False\n\n Symbolic subrings work like ``SR``::\n\n sage: SCR = SR.subring(no_variables=True)\n sage: w = slider(SCR(e), SCR(pi)); w\n TransformFloatSlider(value=2.718281828459045, max=3.141592653589793, min=2.718281828459045)\n sage: parent(w.get_interact_value())\n Real Field with 53 bits of precision\n \"\"\"\n kwds = {\"readout\": display_value}\n if label:\n kwds[\"description\"] = label\n\n # If vmin is iterable, return a SelectionSlider\n if isinstance(vmin, Iterable):\n if vmax is not None:\n raise TypeError(\"unexpected argument 'vmax' for a selection slider\")\n if step_size is not None:\n raise TypeError(\"unexpected argument 'step_size' for a selection slider\")\n if _range:\n # https://github.com/ipython/ipywidgets/issues/760\n raise NotImplementedError(\"range_slider does not support a list of values\")\n options = list(vmin)\n # Find default in options\n def err(v):\n if v is default:\n return (-1, 0)\n try:\n if v == default:\n return (0, 0)\n return (0, abs(v - default))\n except Exception:\n return (1, 0)\n kwds[\"options\"] = options\n if default is not None:\n kwds[\"value\"] = min(options, key=err)\n return SelectionSlider(**kwds)\n\n if default is not None:\n kwds[\"value\"] = default\n\n # Sum all input numbers to figure out type/parent\n p = parent(sum(x for x in (vmin, vmax, step_size) if x is not None))\n\n # Change SR to RR\n if isinstance(p, sage.rings.abc.SymbolicRing):\n from sage.rings.real_mpfr import RR\n p = RR\n\n # Convert all inputs to the common parent\n if vmin is not None:\n vmin = p(vmin)\n if vmax is not None:\n vmax = p(vmax)\n if step_size is not None:\n step_size = p(step_size)\n\n def tuple_elements_p(t):\n \"Convert all entries of the tuple `t` to `p`\"\n return tuple(p(x) for x in t)\n\n zero = p()\n if isinstance(zero, Integral):\n if p is int:\n if _range:\n cls = IntRangeSlider\n else:\n cls = IntSlider\n else:\n if _range:\n kwds[\"transform\"] = tuple_elements_p\n cls = TransformIntRangeSlider\n else:\n kwds[\"transform\"] = p\n cls = TransformIntSlider\n elif isinstance(zero, Rational):\n # Rational => implement as SelectionSlider\n if _range:\n # https://github.com/ipython/ipywidgets/issues/760\n raise NotImplementedError(\"range_slider does not support rational numbers\")\n vmin, vmax, value = _get_min_max_value(vmin, vmax, default, step_size)\n kwds[\"value\"] = value\n kwds[\"options\"] = srange(vmin, vmax, step_size, include_endpoint=True)\n return SelectionSlider(**kwds)\n elif isinstance(zero, Real):\n if p is float:\n if _range:\n cls = FloatRangeSlider\n else:\n cls = FloatSlider\n else:\n if _range:\n kwds[\"transform\"] = tuple_elements_p\n cls = TransformFloatRangeSlider\n else:\n kwds[\"transform\"] = p\n cls = TransformFloatSlider\n else:\n raise TypeError(\"unknown parent {!r} for slider\".format(p))\n\n kwds[\"min\"] = vmin\n if vmax is not None:\n kwds[\"max\"] = vmax\n if step_size is not None:\n kwds[\"step\"] = step_size\n return cls(**kwds)\n\n\ndef range_slider(*args, **kwds):\n \"\"\"\n A slider widget to select a range of values.\n\n INPUT:\n\n - ``vmin``, ``vmax`` -- minimum and maximum value\n\n - ``step_size`` -- the step size\n\n - ``default`` -- initial value, given as a 2-tuple\n\n - ``label`` -- optional label\n\n - ``display_value`` -- (boolean) if ``True``, display the current\n value.\n\n EXAMPLES::\n\n sage: from sage.repl.ipython_kernel.all_jupyter import range_slider\n sage: range_slider(5, label=\"slide me\")\n TransformIntRangeSlider(value=(28, 76), description=u'slide me', min=5)\n sage: range_slider(5, 20)\n TransformIntRangeSlider(value=(8, 16), max=20, min=5)\n sage: range_slider(5, 20, 0.5)\n TransformFloatRangeSlider(value=(8.75, 16.25), max=20.0, min=5.0, step=0.5)\n sage: range_slider(5, 20, default=(12,15))\n TransformIntRangeSlider(value=(12, 15), max=20, min=5)\n\n The parent of the inputs determines the parent of the value::\n\n sage: w = range_slider(5); w\n TransformIntRangeSlider(value=(28, 76), min=5)\n sage: [parent(x) for x in w.get_interact_value()]\n [Integer Ring, Integer Ring]\n sage: w = range_slider(int(5)); w\n IntRangeSlider(value=(28, 76), min=5)\n sage: [parent(x) for x in w.get_interact_value()]\n [<... 'int'>, <... 'int'>]\n sage: w = range_slider(5, 20, step_size=RDF(\"0.1\")); w\n TransformFloatRangeSlider(value=(8.75, 16.25), max=20.0, min=5.0)\n sage: [parent(x) for x in w.get_interact_value()]\n [Real Double Field, Real Double Field]\n\n Unfortunately, rational numbers are not supported::\n\n sage: w = range_slider(5, 20, step_size=10/3); w\n Traceback (most recent call last):\n ...\n NotImplementedError: range_slider does not support rational numbers\n\n TESTS::\n\n sage: range_slider(range(5))\n Traceback (most recent call last):\n ...\n NotImplementedError: range_slider does not support a list of values\n \"\"\"\n kwds[\"_range\"] = True\n return slider(*args, **kwds)\n\n\ndef checkbox(default=True, label=None):\n \"\"\"\n A checkbox widget.\n\n INPUT:\n\n - ``default`` -- (boolean) initial value\n\n - ``label`` -- optional label\n\n EXAMPLES::\n\n sage: from sage.repl.ipython_kernel.all_jupyter import checkbox\n sage: checkbox(label=\"toggle me\")\n Checkbox(value=True, description=u'toggle me')\n sage: checkbox(default=0)\n Checkbox(value=False)\n \"\"\"\n kwds = {\"value\": bool(default)}\n if label is not None:\n kwds[\"description\"] = label\n return Checkbox(**kwds)\n\n\ndef selector(values, label=None, default=None, nrows=None, ncols=None, width=None, buttons=False):\n \"\"\"\n A widget to select a value from a given list of values.\n\n This is rendered as a dropdown box (if ``buttons`` is False) or\n as a list of buttons (if ``buttons`` is True).\n\n INPUT:\n\n - ``values`` -- a list of values to choose from (see examples below\n for the accepted formats for this)\n\n - ``label`` -- optional label\n\n - ``default`` -- initial value\n\n - ``buttons`` -- (boolean) if True, display buttons instead of a\n dropdown box\n\n EXAMPLES::\n\n sage: from sage.repl.ipython_kernel.all_jupyter import selector\n sage: selector(range(5), label=\"choose one\")\n Dropdown(description=u'choose one', options=(0, 1, 2, 3, 4), value=0)\n sage: selector(range(5), buttons=True, default=4)\n ToggleButtons(index=4, options=(0, 1, 2, 3, 4), value=4)\n\n Apart from a simple list, ``values`` can be given as a list of\n 2-tuples ``(value, label)``::\n\n sage: selector([(1,\"one\"), (2,\"two\"), (3,\"three\")])\n Dropdown(options=(('one', 1), ('two', 2), ('three', 3)), value=1)\n sage: selector([(1,\"one\"), (2,\"two\"), (3,\"three\")], buttons=True)\n ToggleButtons(options=(('one', 1), ('two', 2), ('three', 3)), value=1)\n\n A dict of ``label:value`` pairs is also allowed. Since a ``dict``\n is not ordered, it is better to use an :class:`OrderedDict`::\n\n sage: from collections import OrderedDict\n sage: selector(OrderedDict(one=1, two=2, three=3))\n Dropdown(options=OrderedDict([('one', 1), ('two', 2), ('three', 3)]), value=1)\n sage: selector(OrderedDict(one=1, two=2, three=3), buttons=True)\n ToggleButtons(options=OrderedDict([('one', 1), ('two', 2), ('three', 3)]), value=1)\n\n The values can be any kind of object:\n\n sage: selector([sin(x^2), GF(29), EllipticCurve('37a1')])\n Dropdown(options=(sin(x^2), Finite Field of size 29, Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field), value=sin(x^2))\n \"\"\"\n if isinstance(values, Sequence):\n values = list(values)\n if values:\n v0 = values[0]\n if isinstance(v0, tuple) and len(v0) == 2:\n # Change [(val0, lbl0), ...] to [(lbl0, val0), ...]\n values = [(str(lbl), val) for (val, lbl) in values]\n kwds = {\"options\": values}\n if buttons:\n cls = ToggleButtons\n elif nrows is not None or ncols is not None:\n # For compatibility with SageNB, these keywords are recognized\n # but their value is ignored\n cls = ToggleButtons\n else:\n cls = Dropdown\n if default is not None:\n kwds[\"value\"] = default\n if label is not None:\n kwds[\"description\"] = label\n return cls(**kwds)\n\n\ndef input_grid(nrows, ncols, default=None, label=None, to_value=None, width=4):\n \"\"\"\n A widget consisting of a grid (matrix) of textboxes.\n\n The values entered in the textboxes are evaluated (using\n :func:`sage_eval`). These are stored as a list of lists on the\n ``value`` attribute. The value of this widget for an interactive\n function is the result of calling ``to_value`` on this list of\n lists.\n\n INPUT:\n\n - ``nrows``, ``ncols`` -- number of rows and columns in the grid\n\n - ``default`` -- initial value (given as a list of lists, a single\n constant value or a flat list)\n\n - ``label`` -- optional label\n\n - ``to_value`` -- function to be called to get the value for\n interactive functions\n\n - ``width`` -- width of each textbox\n\n EXAMPLES::\n\n sage: from sage.repl.ipython_kernel.all_jupyter import input_grid\n sage: input_grid(2, 2, default=42, label=\"answers\")\n Grid(value=[[42, 42], [42, 42]], children=(Label(value=u'answers'), VBox(children=(EvalText(value=u'42', layout=Layout(max_width=u'5em')), EvalText(value=u'42', layout=Layout(max_width=u'5em')))), VBox(children=(EvalText(value=u'42', layout=Layout(max_width=u'5em')), EvalText(value=u'42', layout=Layout(max_width=u'5em'))))))\n sage: w = input_grid(2, 2, default=[[cos(x), sin(x)], [-sin(x), cos(x)]], to_value=matrix); w\n Grid(value=[[cos(x), sin(x)], [-sin(x), cos(x)]], children=(Label(value=u''), VBox(children=(EvalText(value=u'cos(x)', layout=Layout(max_width=u'5em')), EvalText(value=u'-sin(x)', layout=Layout(max_width=u'5em')))), VBox(children=(EvalText(value=u'sin(x)', layout=Layout(max_width=u'5em')), EvalText(value=u'cos(x)', layout=Layout(max_width=u'5em'))))))\n sage: w.get_interact_value()\n [ cos(x) sin(x)]\n [-sin(x) cos(x)]\n sage: w = input_grid(2, 2, default=[1, x, x^2, x^3], to_value=lambda x: x[1][1]); w\n Grid(value=[[1, x], [x^2, x^3]], children=(Label(value=u''), VBox(children=(EvalText(value=u'1', layout=Layout(max_width=u'5em')), EvalText(value=u'x^2', layout=Layout(max_width=u'5em')))), VBox(children=(EvalText(value=u'x', layout=Layout(max_width=u'5em')), EvalText(value=u'x^3', layout=Layout(max_width=u'5em'))))))\n sage: w.get_interact_value()\n x^3\n \"\"\"\n kwds = {\"transform\": to_value}\n if label is not None:\n kwds[\"description\"] = label\n\n # Parse default\n if not isinstance(default, list):\n # Single value\n default = [[default] * ncols] * nrows\n if all(isinstance(elt, list) for elt in default):\n # List of lists\n pass\n else:\n # Flat list\n default = [[default[i * ncols + j] for j in range(ncols)] for i in range(nrows)]\n\n def make_widget(i, j):\n return input_box(str(default[i][j]), width=width)\n\n grid = Grid(nrows, ncols, make_widget, **kwds)\n return grid\n\n\ndef color_selector(default=(0, 0, 1), label=None, widget=None, hide_box=False):\n \"\"\"\n A widget for choosing a color.\n\n INPUT:\n\n - ``default`` -- initial value\n\n - ``label`` -- optional label\n\n - ``hide_box`` -- (boolean) if True, do not show the textbox\n\n EXAMPLES::\n\n sage: from sage.repl.ipython_kernel.all_jupyter import color_selector\n sage: w = color_selector(\"orange\", label=\"color me\"); w\n SageColorPicker(value='#ffa500', description=u'color me')\n sage: w.get_interact_value()\n RGB color (1.0, 0.6470588235294118, 0.0)\n sage: color_selector(Color(0.1, 0.2, 0.3))\n SageColorPicker(value='#19334c')\n \"\"\"\n # widget argument is silently ignored\n kwds = {\"value\": Color(default).html_color(),\n \"concise\": hide_box}\n if label is not None:\n kwds[\"description\"] = label\n return SageColorPicker(**kwds)\n", "id": "1033436", "language": "Python", "matching_score": 1.5613449811935425, "max_stars_count": 1742, "path": "src/sage/repl/ipython_kernel/widgets_sagenb.py" }, { "content": "r\"\"\"\nModular forms for Hecke triangle groups\n\nAUTHORS:\n\n- <NAME> (2013): initial version\n\n\"\"\"\n\n#*****************************************************************************\n# Copyright (C) 2013-2014 <NAME> <<EMAIL>>\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n# as published by the Free Software Foundation; either version 2 of\n# the License, or (at your option) any later version.\n# http://www.gnu.org/licenses/\n#*****************************************************************************\n\nfrom sage.rings.integer_ring import ZZ\nfrom sage.rings.rational_field import QQ\nfrom sage.rings.infinity import infinity\nfrom sage.rings.all import AlgebraicField, I\nfrom sage.rings.polynomial.polynomial_ring import is_PolynomialRing\nfrom sage.rings.power_series_ring import is_PowerSeriesRing\nfrom sage.rings.laurent_series_ring import is_LaurentSeriesRing\nfrom sage.modules.free_module_element import is_FreeModuleElement\nfrom sage.matrix.constructor import matrix\nfrom sage.modules.free_module_element import vector\nfrom sage.rings.integer import Integer\nfrom sage.structure.all import parent\n\nfrom sage.misc.cachefunc import cached_method\n\nfrom .abstract_ring import FormsRing_abstract\n\n\nclass FormsSpace_abstract(FormsRing_abstract):\n r\"\"\"\n Abstract (Hecke) forms space.\n\n This should never be called directly. Instead one should\n instantiate one of the derived classes of this class.\n \"\"\"\n\n from .element import FormsElement\n Element = FormsElement\n\n def __init__(self, group, base_ring, k, ep, n):\n r\"\"\"\n Abstract (Hecke) forms space.\n\n INPUT:\n\n - ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``)\n\n - ``k`` -- The weight (default: `0`)\n\n - ``ep`` -- The epsilon (default: ``None``).\n If ``None``, then k*(n-2) has to be divisible by `2` and\n ``ep=(-1)^(k*(n-2)/2)`` is used.\n\n - ``base_ring`` -- The base_ring (default: `\\Z`).\n\n OUTPUT:\n\n The corresponding abstract (Hecke) forms space.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: MF = ModularForms(n=5, base_ring=ZZ, k=6, ep=-1)\n sage: MF\n ModularForms(n=5, k=6, ep=-1) over Integer Ring\n sage: MF.group()\n Hecke triangle group for n = 5\n sage: MF.base_ring()\n Integer Ring\n sage: MF.weight()\n 6\n sage: MF.ep()\n -1\n sage: MF.has_reduce_hom()\n True\n sage: MF.is_homogeneous()\n True\n \"\"\"\n\n #from space import canonical_parameters\n #(group, base_ring, k, ep, n) = canonical_parameters(group, base_ring, k, ep, n)\n\n super(FormsSpace_abstract, self).__init__(group=group, base_ring=base_ring, red_hom=True, n=n)\n #self.register_embedding(self.hom(lambda f: f.parent().graded_ring()(f), codomain=self.graded_ring()))\n\n self._weight = k\n self._ep = ep\n (self._l1,self._l2) = self.weight_parameters()\n self._module = None\n self._ambient_space = self\n\n def _repr_(self):\n r\"\"\"\n Return the string representation of ``self``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiModularForms\n sage: QuasiModularForms(n=4, k=2, ep=-1)\n QuasiModularForms(n=4, k=2, ep=-1) over Integer Ring\n \"\"\"\n\n return \"{}Forms(n={}, k={}, ep={}) over {}\".format(self._analytic_type.analytic_space_name(), self._group.n(), self._weight, self._ep, self._base_ring)\n\n def _latex_(self):\n r\"\"\"\n Return the LaTeX representation of ``self``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiWeakModularForms\n sage: latex(QuasiWeakModularForms())\n QM^!_{ n=3 }(0,\\ 1)(\\Bold{Z})\n \"\"\"\n\n from sage.misc.latex import latex\n return r\"{}_{{ n={} }}({},\\ {})({})\".format(self._analytic_type.latex_space_name(), self._group.n(), self._weight, self._ep, latex(self._base_ring))\n\n def _element_constructor_(self, el):\n r\"\"\"\n Return ``el`` coerced into this forms space.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.graded_ring import MeromorphicModularFormsRing\n sage: from sage.modular.modform_hecketriangle.space import ModularForms, QuasiWeakModularForms\n sage: MF = ModularForms(k=12, ep=1)\n sage: (x,y,z,d) = MF.pol_ring().gens()\n\n sage: Delta = MeromorphicModularFormsRing().Delta()\n sage: Delta.parent()\n MeromorphicModularFormsRing(n=3) over Integer Ring\n sage: MF(Delta)\n q - 24*q^2 + 252*q^3 - 1472*q^4 + O(q^5)\n sage: MF(Delta).parent() == MF\n True\n\n sage: E2 = MF.E2()\n sage: e2 = QuasiWeakModularForms(n=infinity, k=2, ep=-1)(E2)\n sage: e2\n 1 - 24*q^2 - 72*q^4 + O(q^5)\n sage: e2.parent()\n QuasiWeakModularForms(n=+Infinity, k=2, ep=-1) over Integer Ring\n sage: e2.as_ring_element()\n (-f_i + 3*E2)/2\n sage: MF(x^3)\n 1 + 720*q + 179280*q^2 + 16954560*q^3 + 396974160*q^4 + O(q^5)\n sage: MF(x^3).parent() == MF\n True\n\n sage: qexp = Delta.q_expansion(prec=2)\n sage: qexp\n q + O(q^2)\n sage: qexp.parent()\n Power Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n sage: MF(qexp)\n q - 24*q^2 + 252*q^3 - 1472*q^4 + O(q^5)\n sage: MF(qexp) == MF(Delta)\n True\n\n sage: QF = QuasiWeakModularForms(n=8, k=10/3, ep=-1)\n sage: QF.default_prec(2)\n sage: el2 = QF.quasi_part_gens(min_exp=-1)[4]\n sage: el2.reduced_parent()\n QuasiWeakModularForms(n=8, k=10/3, ep=-1) over Integer Ring\n sage: prec = QF.required_laurent_prec(min_exp=-1)\n sage: qexp2 = el2.q_expansion(prec=prec)\n sage: qexp2\n q^-1 - 19/(64*d) - 7497/(262144*d^2)*q + 15889/(8388608*d^3)*q^2 + 543834047/(1649267441664*d^4)*q^3 + 711869853/(43980465111040*d^5)*q^4 + O(q^5)\n sage: qexp2.parent()\n Laurent Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n sage: QF(qexp2)\n q^-1 - 19/(64*d) - 7497/(262144*d^2)*q + O(q^2)\n sage: QF(qexp2).reduced_parent()\n QuasiWeakModularForms(n=8, k=10/3, ep=-1) over Integer Ring\n sage: QF(qexp2) == el2\n True\n\n sage: QF = QuasiWeakModularForms(n=infinity, k=2, ep=-1)\n sage: el3 = QF.f_i() + QF.f_i()^3/QF.E4()\n sage: prec = QF.required_laurent_prec(order_1=-1)\n sage: qexp3 = el3.q_expansion(prec=prec)\n sage: qexp3\n 2 - 7/(4*d)*q + 195/(256*d^2)*q^2 - 903/(4096*d^3)*q^3 + 41987/(1048576*d^4)*q^4 - 181269/(33554432*d^5)*q^5 + O(q^6)\n sage: QF.construct_quasi_form(qexp3, check=False) == el3\n False\n sage: QF.construct_quasi_form(qexp3, order_1=-1) == el3\n True\n\n sage: MF([0,1]) == MF(Delta)\n True\n sage: MF([1,0]) == MF(x^3) - 720*MF(Delta)\n True\n\n sage: vec = MF(Delta).coordinate_vector()\n sage: vec\n (0, 1)\n sage: vec.parent()\n Vector space of dimension 2 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n sage: vec in MF.module()\n True\n sage: MF(vec) == MF(Delta)\n True\n\n sage: subspace = MF.subspace([MF(Delta)])\n sage: subspace\n Subspace of dimension 1 of ModularForms(n=3, k=12, ep=1) over Integer Ring\n sage: subspace(MF(Delta)) == subspace(d*(x^3-y^2)) == subspace(qexp) == subspace([0,1]) == subspace(vec) == subspace.gen()\n True\n sage: subspace(MF(Delta)).parent() == subspace(d*(x^3-y^2)).parent() == subspace(qexp).parent() == subspace([0,1]).parent() == subspace(vec).parent()\n True\n sage: subspace([1]) == subspace.gen()\n True\n sage: ssvec = subspace(vec).coordinate_vector()\n sage: ssvec\n (1)\n sage: ssvec.parent()\n Vector space of dimension 1 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n sage: ambvec = subspace(vec).ambient_coordinate_vector()\n sage: ambvec\n (0, 1)\n sage: ambvec.parent()\n Vector space of degree 2 and dimension 1 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n Basis matrix:\n [0 1]\n sage: subspace(ambvec) == subspace(vec) and subspace(ambvec).parent() == subspace(vec).parent()\n True\n \"\"\"\n\n from .graded_ring_element import FormsRingElement\n if isinstance(el, FormsRingElement):\n if (self.hecke_n() == infinity and el.hecke_n() == ZZ(3)):\n el_f = el._reduce_d()._rat\n (x,y,z,d) = self.pol_ring().gens()\n\n num_sub = el_f.numerator().subs( x=(y**2 + 3*x)/ZZ(4), y=(9*x*y - y**3)/ZZ(8), z=(3*z - y)/ZZ(2))\n denom_sub = el_f.denominator().subs( x=(y**2 + 3*x)/ZZ(4), y=(9*x*y - y**3)/ZZ(8), z=(3*z - y)/ZZ(2))\n new_num = num_sub.numerator()*denom_sub.denominator()\n new_denom = denom_sub.numerator()*num_sub.denominator()\n\n el = self._rat_field(new_num) / self._rat_field(new_denom)\n elif self.group() == el.group():\n el = el._rat\n else:\n raise ValueError(\"{} has group {} != {}\".format(el, el.group(), self.group()))\n return self.element_class(self, el)\n # This assumes that the series corresponds to a _weakly\n # holomorphic_ (quasi) form. It also assumes that the form is\n # holomorphic at -1 for n=infinity (this assumption however\n # can be changed in construct_form\n # resp. construct_quasi_form))\n P = parent(el)\n if is_LaurentSeriesRing(P) or is_PowerSeriesRing(P):\n if (self.is_modular()):\n return self.construct_form(el)\n else:\n return self.construct_quasi_form(el)\n if is_FreeModuleElement(el) and (self.module() is P or self.ambient_module() is P):\n return self.element_from_ambient_coordinates(el)\n if (not self.is_ambient()) and (isinstance(el, list) or isinstance(el, tuple) or is_FreeModuleElement(el)) and len(el) == self.rank():\n try:\n return self.element_from_coordinates(el)\n except (ArithmeticError, TypeError):\n pass\n if self.ambient_module() and self.ambient_module().has_coerce_map_from(P):\n return self.element_from_ambient_coordinates(self.ambient_module()(el))\n if (isinstance(el,list) or isinstance(el, tuple)) and len(el) == self.degree():\n try:\n return self.element_from_ambient_coordinates(el)\n except (ArithmeticError, TypeError):\n pass\n\n return self.element_class(self, el)\n\n def _coerce_map_from_(self, S):\n r\"\"\"\n Return whether or not there exists a coercion from ``S`` to ``self``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiWeakModularForms, ModularForms, CuspForms, ZeroForm\n sage: MF1 = QuasiWeakModularForms(n=4, base_ring=CC, k=0, ep=1)\n sage: MF2 = ModularForms(n=4, k=24, ep=1)\n sage: MF3 = ModularForms(n=4, k=24, ep=-1)\n sage: MF4 = CuspForms(n=4, k=0, ep=1)\n sage: MF5 = ZeroForm(n=4, k=10, ep=-1)\n sage: MF6 = QuasiWeakModularForms(n=3, k=24, ep=1)\n sage: MF7 = QuasiWeakModularForms(n=infinity, k=24, ep=1)\n sage: subspace1 = MF3.subspace([MF3.gen(0), MF3.gen(1)])\n sage: subspace2 = MF3.subspace([MF3.gen(2)])\n sage: subspace3 = MF3.subspace([MF3.gen(0), MF3.gen(0)+MF3.gen(2)])\n\n sage: MF2.has_coerce_map_from(MF3)\n False\n sage: MF1.has_coerce_map_from(MF4)\n True\n sage: MF4.has_coerce_map_from(MF5)\n True\n sage: MF4.has_coerce_map_from(ZZ)\n False\n sage: MF1.has_coerce_map_from(ZZ)\n True\n sage: MF7.has_coerce_map_from(MF6)\n True\n sage: MF7.has_coerce_map_from(MF2)\n False\n sage: MF3.has_coerce_map_from(subspace1)\n True\n sage: subspace1.has_coerce_map_from(MF3)\n False\n sage: subspace3.has_coerce_map_from(subspace1)\n False\n sage: subspace3.has_coerce_map_from(subspace2)\n True\n \"\"\"\n\n from .space import ZeroForm\n from .subspace import SubSpaceForms\n if ( isinstance(S, ZeroForm)):\n return True\n elif ( isinstance(S, SubSpaceForms)\\\n and isinstance(self, SubSpaceForms) ):\n if (self.ambient_space().has_coerce_map_from(S.ambient_space())):\n S2 = S.change_ambient_space(self.ambient_space())\n return self.module().has_coerce_map_from(S2.module())\n else:\n return False\n elif ( isinstance(S, FormsSpace_abstract)\\\n and self.graded_ring().has_coerce_map_from(S.graded_ring())\\\n and S.weight() == self._weight\\\n and S.ep() == self._ep\\\n and not isinstance(self, SubSpaceForms)):\n return True\n else:\n return self.contains_coeff_ring() \\\n and self.coeff_ring().has_coerce_map_from(S)\n\n # Since forms spaces are modules instead of rings\n # we have to manually define one().\n # one() allows to take the power 0 of an element\n @cached_method\n def one(self):\n r\"\"\"\n Return the one element from the corresponding space of constant forms.\n\n .. NOTE:: The one element does not lie in ``self`` in general.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import CuspForms\n sage: MF = CuspForms(k=12)\n sage: MF.Delta()^0 == MF.one()\n True\n sage: (MF.Delta()^0).parent()\n ModularForms(n=3, k=0, ep=1) over Integer Ring\n \"\"\"\n return self.extend_type(\"holo\", ring=True)(1).reduce()\n\n def is_ambient(self):\n r\"\"\"\n Return whether ``self`` is an ambient space.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: MF = ModularForms(k=12)\n sage: MF.is_ambient()\n True\n sage: MF.subspace([MF.gen(0)]).is_ambient()\n False\n \"\"\"\n return self._ambient_space == self\n\n def ambient_space(self):\n r\"\"\"\n Return the ambient space of self.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: MF = ModularForms(k=12)\n sage: MF.ambient_space()\n ModularForms(n=3, k=12, ep=1) over Integer Ring\n sage: MF.ambient_space() == MF\n True\n sage: subspace = MF.subspace([MF.gen(0)])\n sage: subspace\n Subspace of dimension 1 of ModularForms(n=3, k=12, ep=1) over Integer Ring\n sage: subspace.ambient_space() == MF\n True\n \"\"\"\n\n return self._ambient_space\n\n def module(self):\n r\"\"\"\n Return the module associated to self.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: MF = ModularForms(k=12)\n sage: MF.module()\n Vector space of dimension 2 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n sage: subspace = MF.subspace([MF.gen(0)])\n sage: subspace.module()\n Vector space of degree 2 and dimension 1 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n Basis matrix:\n [1 0]\n \"\"\"\n\n return self._module\n\n def ambient_module(self):\n r\"\"\"\n Return the module associated to the ambient space of self.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: MF = ModularForms(k=12)\n sage: MF.ambient_module()\n Vector space of dimension 2 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n sage: MF.ambient_module() == MF.module()\n True\n sage: subspace = MF.subspace([MF.gen(0)])\n sage: subspace.ambient_module() == MF.module()\n True\n \"\"\"\n\n return self._ambient_space._module\n\n def subspace(self, basis):\n r\"\"\"\n Return the subspace of ``self`` generated by ``basis``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: MF = ModularForms(k=24)\n sage: MF.dimension()\n 3\n sage: subspace = MF.subspace([MF.gen(0), MF.gen(1)])\n sage: subspace\n Subspace of dimension 2 of ModularForms(n=3, k=24, ep=1) over Integer Ring\n \"\"\"\n\n from .subspace import SubSpaceForms\n return SubSpaceForms(self, basis)\n\n def change_ring(self, new_base_ring):\n r\"\"\"\n Return the same space as ``self`` but over a new base ring ``new_base_ring``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import CuspForms\n sage: CuspForms(n=5, k=24).change_ring(CC)\n CuspForms(n=5, k=24, ep=1) over Complex Field with 53 bits of precision\n \"\"\"\n\n return self.__class__.__base__(self.group(), new_base_ring, self.weight(), self.ep())\n\n def construction(self):\n r\"\"\"\n Return a functor that constructs ``self`` (used by the coercion machinery).\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiModularForms\n sage: QuasiModularForms(n=4, k=2, ep=1, base_ring=CC).construction()\n (QuasiModularFormsFunctor(n=4, k=2, ep=1),\n BaseFacade(Complex Field with 53 bits of precision))\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: MF=ModularForms(k=12)\n sage: MF.subspace([MF.gen(1)]).construction()\n (FormsSubSpaceFunctor with 1 generator for the ModularFormsFunctor(n=3, k=12, ep=1), BaseFacade(Integer Ring))\n \"\"\"\n\n from .functors import FormsSubSpaceFunctor, FormsSpaceFunctor, BaseFacade\n ambient_space_functor = FormsSpaceFunctor(self._analytic_type, self._group, self._weight, self._ep)\n\n if (self.is_ambient()):\n return (ambient_space_functor, BaseFacade(self._base_ring))\n else:\n return (FormsSubSpaceFunctor(ambient_space_functor, self._basis), BaseFacade(self._base_ring))\n\n @cached_method\n def weight(self):\n r\"\"\"\n Return the weight of (elements of) ``self``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiModularForms\n sage: QuasiModularForms(n=16, k=16/7, ep=-1).weight()\n 16/7\n \"\"\"\n\n return self._weight\n\n @cached_method\n def ep(self):\n r\"\"\"\n Return the multiplier of (elements of) ``self``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiModularForms\n sage: QuasiModularForms(n=16, k=16/7, ep=-1).ep()\n -1\n \"\"\"\n\n return self._ep\n\n @cached_method\n def contains_coeff_ring(self):\n r\"\"\"\n Return whether ``self`` contains its coefficient ring.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiModularForms\n sage: QuasiModularForms(k=0, ep=1, n=8).contains_coeff_ring()\n True\n sage: QuasiModularForms(k=0, ep=-1, n=8).contains_coeff_ring()\n False\n \"\"\"\n\n return ((self.AT(\"holo\") <= self._analytic_type) and (self.weight()==QQ(0)) and (self.ep()==ZZ(1)))\n\n def element_from_coordinates(self, vec):\n r\"\"\"\n If ``self`` has an associated free module, then return the element of ``self``\n corresponding to the given coordinate vector ``vec``. Otherwise raise an exception.\n\n INPUT:\n\n - ``vec`` -- A coordinate vector with respect to ``self.gens()``.\n\n OUTPUT:\n\n An element of ``self`` corresponding to the coordinate vector ``vec``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: MF = ModularForms(k=24)\n sage: MF.dimension()\n 3\n sage: el = MF.element_from_coordinates([1,1,1])\n sage: el\n 1 + q + q^2 + 52611612*q^3 + 39019413208*q^4 + O(q^5)\n sage: el == MF.gen(0) + MF.gen(1) + MF.gen(2)\n True\n sage: el.parent() == MF\n True\n\n sage: subspace = MF.subspace([MF.gen(0), MF.gen(1)])\n sage: el = subspace.element_from_coordinates([1,1])\n sage: el\n 1 + q + 52611660*q^3 + 39019412128*q^4 + O(q^5)\n sage: el == subspace.gen(0) + subspace.gen(1)\n True\n sage: el.parent() == subspace\n True\n \"\"\"\n\n if not self.module():\n raise ValueError(\"No free module defined for {}\".format(self))\n basis = self.gens()\n assert(len(basis) == len(vec))\n # vec = self.module()(self.module().linear_combination_of_basis(vec))\n # this also handles the trivial case (dimension 0)\n return self(sum([vec[k]*basis[k] for k in range(0, len(vec))]))\n\n def element_from_ambient_coordinates(self, vec):\n r\"\"\"\n If ``self`` has an associated free module, then return the element of ``self``\n corresponding to the given ``vec``. Otherwise raise an exception.\n\n INPUT:\n\n - ``vec`` -- An element of ``self.module()`` or ``self.ambient_module()``.\n\n OUTPUT:\n\n An element of ``self`` corresponding to ``vec``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: MF = ModularForms(k=24)\n sage: MF.dimension()\n 3\n sage: el = MF.element_from_ambient_coordinates([1,1,1])\n sage: el == MF.element_from_coordinates([1,1,1])\n True\n sage: el.parent() == MF\n True\n\n sage: subspace = MF.subspace([MF.gen(0), MF.gen(1)])\n sage: el = subspace.element_from_ambient_coordinates([1,1,0])\n sage: el\n 1 + q + 52611660*q^3 + 39019412128*q^4 + O(q^5)\n sage: el.parent() == subspace\n True\n \"\"\"\n\n return self(self.ambient_space().element_from_coordinates(vec))\n\n def homogeneous_part(self, k, ep):\n r\"\"\"\n Since ``self`` already is a homogeneous component return ``self``\n unless the degree differs in which case a ``ValueError`` is raised.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiMeromorphicModularForms\n sage: MF = QuasiMeromorphicModularForms(n=6, k=4)\n sage: MF == MF.homogeneous_part(4,1)\n True\n sage: MF.homogeneous_part(5,1)\n Traceback (most recent call last):\n ...\n ValueError: QuasiMeromorphicModularForms(n=6, k=4, ep=1) over Integer Ring already is homogeneous with degree (4, 1) != (5, 1)!\n \"\"\"\n\n if (k==self._weight and ep==self._ep):\n return self\n else:\n raise ValueError(\"{} already is homogeneous with degree ({}, {}) != ({}, {})!\".format(self, self._weight, self._ep, k, ep))\n\n def weight_parameters(self):\n r\"\"\"\n Check whether ``self`` has a valid weight and multiplier.\n\n If not then an exception is raised. Otherwise the two weight\n parameters corresponding to the weight and multiplier of ``self``\n are returned.\n\n The weight parameters are e.g. used to calculate dimensions\n or precisions of Fourier expansion.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import MeromorphicModularForms\n sage: MF = MeromorphicModularForms(n=18, k=-7, ep=-1)\n sage: MF.weight_parameters()\n (-3, 17)\n sage: (MF._l1, MF._l2) == MF.weight_parameters()\n True\n sage: (k, ep) = (MF.weight(), MF.ep())\n sage: n = MF.hecke_n()\n sage: k == 4*(n*MF._l1 + MF._l2)/(n-2) + (1-ep)*n/(n-2)\n True\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: MF = ModularForms(n=5, k=12, ep=1)\n sage: MF.weight_parameters()\n (1, 4)\n sage: (MF._l1, MF._l2) == MF.weight_parameters()\n True\n sage: (k, ep) = (MF.weight(), MF.ep())\n sage: n = MF.hecke_n()\n sage: k == 4*(n*MF._l1 + MF._l2)/(n-2) + (1-ep)*n/(n-2)\n True\n sage: MF.dimension() == MF._l1 + 1\n True\n\n sage: MF = ModularForms(n=infinity, k=8, ep=1)\n sage: MF.weight_parameters()\n (2, 0)\n sage: MF.dimension() == MF._l1 + 1\n True\n \"\"\"\n\n n = self._group.n()\n k = self._weight\n ep = self._ep\n if (n == infinity):\n num = (k-(1-ep)) / ZZ(4)\n else:\n num = (k-(1-ep)*ZZ(n)/ZZ(n-2)) * ZZ(n-2) / ZZ(4)\n if (num.is_integral()):\n num = ZZ(num)\n if (n == infinity):\n # TODO: Figure out what to do in this case\n # (l1 and l2 are no longer defined in an analog/unique way)\n #l2 = num % ZZ(2)\n #l1 = ((num-l2)/ZZ(2)).numerator()\n ## TODO: The correct generalization seems (l1,l2) = (0,num)\n l2 = ZZ(0)\n l1 = num\n else:\n l2 = num % n\n l1 = ((num-l2)/n).numerator()\n else:\n raise ValueError(\"Invalid or non-occurring weight k={}, ep={}!\".format(k,ep))\n return (l1, l2)\n\n # TODO: this only makes sense for modular forms,\n # resp. needs a big adjustment for quasi modular forms\n def aut_factor(self, gamma, t):\n r\"\"\"\n The automorphy factor of ``self``.\n\n INPUT:\n\n - ``gamma`` -- An element of the group of ``self``.\n\n - ``t`` -- An element of the upper half plane.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: MF = ModularForms(n=8, k=4, ep=1)\n sage: full_factor = lambda mat, t: (mat[1][0]*t+mat[1][1])**4\n sage: T = MF.group().T()\n sage: S = MF.group().S()\n sage: i = AlgebraicField()(i)\n sage: z = 1 + i/2\n\n sage: MF.aut_factor(S, z)\n 3/2*I - 7/16\n sage: MF.aut_factor(-T^(-2), z)\n 1\n sage: MF.aut_factor(MF.group().V(6), z)\n 173.2640595631...? + 343.8133289126...?*I\n sage: MF.aut_factor(S, z) == full_factor(S, z)\n True\n sage: MF.aut_factor(T, z) == full_factor(T, z)\n True\n sage: MF.aut_factor(MF.group().V(6), z) == full_factor(MF.group().V(6), z)\n True\n\n sage: MF = ModularForms(n=7, k=14/5, ep=-1)\n sage: T = MF.group().T()\n sage: S = MF.group().S()\n\n sage: MF.aut_factor(S, z)\n 1.3655215324256...? + 0.056805991182877...?*I\n sage: MF.aut_factor(-T^(-2), z)\n 1\n sage: MF.aut_factor(S, z) == MF.ep() * (z/i)^MF.weight()\n True\n sage: MF.aut_factor(MF.group().V(6), z)\n 13.23058830577...? + 15.71786610686...?*I\n \"\"\"\n\n if (gamma.is_translation()):\n return ZZ(1)\n elif (gamma.is_reflection()):\n return self._ep * (t/AlgebraicField()(I))**self._weight\n else:\n L = [v for v in gamma.word_S_T()[0]]\n aut_f = ZZ(1)\n while (len(L) > 0):\n M = L.pop(-1)\n aut_f *= self.aut_factor(M, t)\n t = M.acton(t)\n return aut_f\n\n @cached_method\n def F_simple(self, order_1=ZZ(0)):\n r\"\"\"\n Return a (the most) simple normalized element of ``self``\n corresponding to the weight parameters ``l1=self._l1`` and\n ``l2=self._l2``. If the element does not lie in ``self`` the\n type of its parent is extended accordingly.\n\n The main part of the element is given by the ``(l1 - order_1)``-th power\n of ``f_inf``, up to a small holomorphic correction factor.\n\n INPUT:\n\n - ``order_1`` -- An integer (default: 0) denoting the desired order at\n ``-1`` in the case ``n = infinity``.\n If ``n != infinity`` the parameter is ignored.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import WeakModularForms\n sage: MF = WeakModularForms(n=18, k=-7, ep=-1)\n sage: MF.disp_prec(1)\n sage: MF.F_simple()\n q^-3 + 16/(81*d)*q^-2 - 4775/(104976*d^2)*q^-1 - 14300/(531441*d^3) + O(q)\n sage: MF.F_simple() == MF.f_inf()^MF._l1 * MF.f_rho()^MF._l2 * MF.f_i()\n True\n\n sage: from sage.modular.modform_hecketriangle.space import CuspForms, ModularForms\n sage: MF = CuspForms(n=5, k=2, ep=-1)\n sage: MF._l1\n -1\n sage: MF.F_simple().parent()\n WeakModularForms(n=5, k=2, ep=-1) over Integer Ring\n\n sage: MF = ModularForms(n=infinity, k=8, ep=1)\n sage: MF.F_simple().reduced_parent()\n ModularForms(n=+Infinity, k=8, ep=1) over Integer Ring\n sage: MF.F_simple()\n q^2 - 16*q^3 + 120*q^4 + O(q^5)\n sage: MF.F_simple(order_1=2)\n 1 + 32*q + 480*q^2 + 4480*q^3 + 29152*q^4 + O(q^5)\n \"\"\"\n\n (x,y,z,d) = self.rat_field().gens()\n n = self.hecke_n()\n\n if (n == infinity):\n order_1 = ZZ(order_1)\n order_inf = self._l1 - order_1\n\n finf_pol = d*(x - y**2)\n rat = finf_pol**order_inf * x**order_1 * y**(ZZ(1-self._ep)/ZZ(2))\n else:\n order_inf = self._l1\n order_1 = order_inf\n\n finf_pol = d*(x**n - y**2)\n rat = finf_pol**self._l1 * x**self._l2 * y**(ZZ(1-self._ep)/ZZ(2))\n\n if (order_inf > 0 and order_1 > 0):\n new_space = self.extend_type(\"cusp\")\n elif (order_inf >=0 and order_1 >= 0):\n new_space = self.extend_type(\"holo\")\n else:\n new_space = self.extend_type(\"weak\")\n\n return new_space(rat)\n\n def Faber_pol(self, m, order_1=ZZ(0), fix_d = False, d_num_prec = None):\n r\"\"\"\n Return the ``m``'th Faber polynomial of ``self``.\n\n Namely a polynomial ``P(q)`` such that ``P(J_inv)*F_simple(order_1)``\n has a Fourier expansion of the form ``q^m + O(q^(order_inf + 1))``.\n where ``order_inf = self._l1 - order_1`` and ``d^(order_inf - m)*P(q)``\n is a monic polynomial of degree ``order_inf - m``.\n\n If ``n=infinity`` a non-trivial order of ``-1`` can be specified through the\n parameter ``order_1`` (default: 0). Otherwise it is ignored.\n\n The Faber polynomials are e.g. used to construct a basis of weakly holomorphic\n forms and to recover such forms from their initial Fourier coefficients.\n\n INPUT:\n\n - ``m`` -- An integer ``m <= order_inf = self._l1 - order_1``.\n\n - ``order_1`` -- The order at ``-1`` of F_simple (default: 0).\n This parameter is ignored if ``n != infinity``.\n\n - ``fix_d`` -- If ``False`` (default) a formal parameter is used for ``d``.\n If ``True`` then the numerical value of ``d`` is used\n (resp. an exact value if the group is arithmetic).\n Otherwise the given value is used for ``d``.\n\n - ``d_num_prec`` -- The precision to be used if a numerical value for ``d`` is substituted.\n Default: ``None`` in which case the default\n numerical precision of ``self.parent()`` is used.\n\n OUTPUT:\n\n The corresponding Faber polynomial ``P(q)``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import WeakModularForms\n sage: MF = WeakModularForms(n=5, k=62/3, ep=-1)\n sage: MF.weight_parameters()\n (2, 3)\n\n sage: MF.Faber_pol(2)\n 1\n sage: MF.Faber_pol(1)\n 1/d*q - 19/(100*d)\n sage: MF.Faber_pol(0)\n 1/d^2*q^2 - 117/(200*d^2)*q + 9113/(320000*d^2)\n sage: MF.Faber_pol(-2)\n 1/d^4*q^4 - 11/(8*d^4)*q^3 + 41013/(80000*d^4)*q^2 - 2251291/(48000000*d^4)*q + 1974089431/(4915200000000*d^4)\n sage: (MF.Faber_pol(2)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2)\n q^2 - 41/(200*d)*q^3 + O(q^4)\n sage: (MF.Faber_pol(1)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+1)\n q + O(q^3)\n sage: (MF.Faber_pol(0)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+1)\n 1 + O(q^3)\n sage: (MF.Faber_pol(-2)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+1)\n q^-2 + O(q^3)\n\n sage: MF.Faber_pol(2, fix_d=1)\n 1\n sage: MF.Faber_pol(1, fix_d=1)\n q - 19/100\n sage: MF.Faber_pol(-2, fix_d=1)\n q^4 - 11/8*q^3 + 41013/80000*q^2 - 2251291/48000000*q + 1974089431/4915200000000\n sage: (MF.Faber_pol(2, fix_d=1)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2, fix_d=1)\n q^2 - 41/200*q^3 + O(q^4)\n sage: (MF.Faber_pol(-2)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+1, fix_d=1)\n q^-2 + O(q^3)\n\n sage: MF = WeakModularForms(n=4, k=-2, ep=1)\n sage: MF.weight_parameters()\n (-1, 3)\n\n sage: MF.Faber_pol(-1)\n 1\n sage: MF.Faber_pol(-2, fix_d=True)\n 256*q - 184\n sage: MF.Faber_pol(-3, fix_d=True)\n 65536*q^2 - 73728*q + 14364\n sage: (MF.Faber_pol(-1, fix_d=True)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2, fix_d=True)\n q^-1 + 80 + O(q)\n sage: (MF.Faber_pol(-2, fix_d=True)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2, fix_d=True)\n q^-2 + 400 + O(q)\n sage: (MF.Faber_pol(-3)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2, fix_d=True)\n q^-3 + 2240 + O(q)\n\n sage: MF = WeakModularForms(n=infinity, k=14, ep=-1)\n sage: MF.Faber_pol(3)\n 1\n sage: MF.Faber_pol(2)\n 1/d*q + 3/(8*d)\n sage: MF.Faber_pol(1)\n 1/d^2*q^2 + 75/(1024*d^2)\n sage: MF.Faber_pol(0)\n 1/d^3*q^3 - 3/(8*d^3)*q^2 + 3/(512*d^3)*q + 41/(4096*d^3)\n sage: MF.Faber_pol(-1)\n 1/d^4*q^4 - 3/(4*d^4)*q^3 + 81/(1024*d^4)*q^2 + 9075/(8388608*d^4)\n sage: (MF.Faber_pol(-1)(MF.J_inv())*MF.F_simple()).q_expansion(prec=MF._l1 + 1)\n q^-1 + O(q^4)\n\n sage: MF.Faber_pol(3, order_1=-1)\n 1/d*q + 3/(4*d)\n sage: MF.Faber_pol(1, order_1=2)\n 1\n sage: MF.Faber_pol(0, order_1=2)\n 1/d*q - 3/(8*d)\n sage: MF.Faber_pol(-1, order_1=2)\n 1/d^2*q^2 - 3/(4*d^2)*q + 81/(1024*d^2)\n sage: (MF.Faber_pol(-1, order_1=2)(MF.J_inv())*MF.F_simple(order_1=2)).q_expansion(prec=MF._l1 + 1)\n q^-1 - 9075/(8388608*d^4)*q^3 + O(q^4)\n \"\"\"\n\n m = ZZ(m)\n if (self.hecke_n() == infinity):\n order_1 = ZZ(order_1)\n order_inf = self._l1 - order_1\n else:\n order_inf = self._l1\n order_1 = order_inf\n\n if (m > order_inf):\n raise ValueError(\"Invalid basis index: m = {} > {} = order_inf!\".format(m, order_inf))\n\n prec = 2*order_inf - m + 1\n d = self.get_d(fix_d=fix_d, d_num_prec=d_num_prec)\n q = self.get_q(prec=prec, fix_d=fix_d, d_num_prec=d_num_prec)\n\n simple_qexp = self.F_simple(order_1=order_1).q_expansion(prec=prec, fix_d=fix_d, d_num_prec=d_num_prec)\n J_qexp = self.J_inv().q_expansion(prec=order_inf - m, fix_d=fix_d, d_num_prec=d_num_prec)\n\n # The precision could be infinity, otherwise we could do this:\n #assert(temp_reminder.prec() == 1)\n temp_reminder = (1 / simple_qexp / q**(-m)).add_bigoh(1)\n\n fab_pol = q.parent()([])\n while (len(temp_reminder.coefficients()) > 0):\n temp_coeff = temp_reminder.coefficients()[0]\n temp_exp = -temp_reminder.exponents()[0]\n fab_pol += temp_coeff * (q/d)**temp_exp\n\n temp_reminder -= temp_coeff * (J_qexp/d)**temp_exp\n # The first term is zero only up to numerical errors,\n # so we manually have to remove it\n if (not d.parent().is_exact()):\n temp_reminder=temp_reminder.truncate_neg(-temp_exp+1)\n\n return fab_pol.polynomial()\n\n # very similar to Faber_pol: faber_pol(q)=Faber_pol(d*q)\n def faber_pol(self, m, order_1=ZZ(0), fix_d = False, d_num_prec = None):\n r\"\"\"\n If ``n=infinity`` a non-trivial order of ``-1`` can be specified through the\n parameter ``order_1`` (default: 0). Otherwise it is ignored.\n Return the `m`'th Faber polynomial of ``self``\n with a different normalization based on ``j_inv``\n instead of ``J_inv``.\n\n Namely a polynomial ``p(q)`` such that ``p(j_inv)*F_simple()``\n has a Fourier expansion of the form ``q^m + O(q^(order_inf + 1))``.\n where ``order_inf = self._l1 - order_1`` and ``p(q)`` is a\n monic polynomial of degree ``order_inf - m``.\n\n If ``n=infinity`` a non-trivial order of ``-1`` can be specified through the\n parameter ``order_1`` (default: 0). Otherwise it is ignored.\n\n The relation to ``Faber_pol`` is: ``faber_pol(q) = Faber_pol(d*q)``.\n\n INPUT:\n\n - ``m`` -- An integer ``m <= self._l1 - order_1``.\n\n - ``order_1`` -- The order at ``-1`` of ``F_simple`` (default: 0).\n This parameter is ignored if ``n != infinity``.\n\n - ``fix_d`` -- If ``False`` (default) a formal parameter is used for ``d``.\n If ``True`` then the numerical value of ``d`` is used\n (resp. an exact value if the group is arithmetic).\n Otherwise the given value is used for ``d``.\n\n - ``d_num_prec`` -- The precision to be used if a numerical value for ``d`` is substituted.\n Default: ``None`` in which case the default\n numerical precision of ``self.parent()`` is used.\n\n OUTPUT:\n\n The corresponding Faber polynomial ``p(q)``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import WeakModularForms\n sage: MF = WeakModularForms(n=5, k=62/3, ep=-1)\n sage: MF.weight_parameters()\n (2, 3)\n\n sage: MF.faber_pol(2)\n 1\n sage: MF.faber_pol(1)\n q - 19/(100*d)\n sage: MF.faber_pol(0)\n q^2 - 117/(200*d)*q + 9113/(320000*d^2)\n sage: MF.faber_pol(-2)\n q^4 - 11/(8*d)*q^3 + 41013/(80000*d^2)*q^2 - 2251291/(48000000*d^3)*q + 1974089431/(4915200000000*d^4)\n sage: (MF.faber_pol(2)(MF.j_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2)\n q^2 - 41/(200*d)*q^3 + O(q^4)\n sage: (MF.faber_pol(1)(MF.j_inv())*MF.F_simple()).q_expansion(prec=MF._l1+1)\n q + O(q^3)\n sage: (MF.faber_pol(0)(MF.j_inv())*MF.F_simple()).q_expansion(prec=MF._l1+1)\n 1 + O(q^3)\n sage: (MF.faber_pol(-2)(MF.j_inv())*MF.F_simple()).q_expansion(prec=MF._l1+1)\n q^-2 + O(q^3)\n\n sage: MF = WeakModularForms(n=4, k=-2, ep=1)\n sage: MF.weight_parameters()\n (-1, 3)\n\n sage: MF.faber_pol(-1)\n 1\n sage: MF.faber_pol(-2, fix_d=True)\n q - 184\n sage: MF.faber_pol(-3, fix_d=True)\n q^2 - 288*q + 14364\n sage: (MF.faber_pol(-1, fix_d=True)(MF.j_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2, fix_d=True)\n q^-1 + 80 + O(q)\n sage: (MF.faber_pol(-2, fix_d=True)(MF.j_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2, fix_d=True)\n q^-2 + 400 + O(q)\n sage: (MF.faber_pol(-3)(MF.j_inv())*MF.F_simple()).q_expansion(prec=MF._l1+2, fix_d=True)\n q^-3 + 2240 + O(q)\n\n sage: MF = WeakModularForms(n=infinity, k=14, ep=-1)\n sage: MF.faber_pol(3)\n 1\n sage: MF.faber_pol(2)\n q + 3/(8*d)\n sage: MF.faber_pol(1)\n q^2 + 75/(1024*d^2)\n sage: MF.faber_pol(0)\n q^3 - 3/(8*d)*q^2 + 3/(512*d^2)*q + 41/(4096*d^3)\n sage: MF.faber_pol(-1)\n q^4 - 3/(4*d)*q^3 + 81/(1024*d^2)*q^2 + 9075/(8388608*d^4)\n sage: (MF.faber_pol(-1)(MF.j_inv())*MF.F_simple()).q_expansion(prec=MF._l1 + 1)\n q^-1 + O(q^4)\n\n sage: MF.faber_pol(3, order_1=-1)\n q + 3/(4*d)\n sage: MF.faber_pol(1, order_1=2)\n 1\n sage: MF.faber_pol(0, order_1=2)\n q - 3/(8*d)\n sage: MF.faber_pol(-1, order_1=2)\n q^2 - 3/(4*d)*q + 81/(1024*d^2)\n sage: (MF.faber_pol(-1, order_1=2)(MF.j_inv())*MF.F_simple(order_1=2)).q_expansion(prec=MF._l1 + 1)\n q^-1 - 9075/(8388608*d^4)*q^3 + O(q^4)\n \"\"\"\n\n m = ZZ(m)\n if (self.hecke_n() == infinity):\n order_1 = ZZ(order_1)\n order_inf = self._l1 - order_1\n else:\n order_inf = self._l1\n order_1 = order_inf\n\n if (m > order_inf):\n raise ValueError(\"Invalid basis index: m = {} > {} = order_inf!\".format(m, order_inf))\n\n prec = 2*order_inf - m + 1\n d = self.get_d(fix_d=fix_d, d_num_prec=d_num_prec)\n q = self.get_q(prec=prec, fix_d=fix_d, d_num_prec=d_num_prec)\n\n simple_qexp = self.F_simple(order_1=order_1).q_expansion(prec=prec, fix_d=fix_d, d_num_prec=d_num_prec)\n j_qexp = self.j_inv().q_expansion(prec=order_inf - m, fix_d=fix_d, d_num_prec=d_num_prec)\n\n # The precision could be infinity, otherwise we could do this:\n #assert(temp_reminder.prec() == 1)\n temp_reminder = (1 / simple_qexp / q**(-m)).add_bigoh(1)\n\n fab_pol = q.parent()([])\n while (len(temp_reminder.coefficients()) > 0):\n temp_coeff = temp_reminder.coefficients()[0]\n temp_exp = -temp_reminder.exponents()[0]\n fab_pol += temp_coeff*q**temp_exp\n\n temp_reminder -= temp_coeff*j_qexp**temp_exp\n # The first term is zero only up to numerical errors,\n # so we manually have to remove it\n if (not d.parent().is_exact()):\n temp_reminder=temp_reminder.truncate_neg(-temp_exp+1)\n\n return fab_pol.polynomial()\n\n def F_basis_pol(self, m, order_1=ZZ(0)):\n r\"\"\"\n Returns a polynomial corresponding to the basis element of\n the corresponding space of weakly holomorphic forms of\n the same degree as ``self``. The basis element is determined\n by the property that the Fourier expansion is of the form\n ``q^m + O(q^(order_inf + 1))``, where ``order_inf = self._l1 - order_1``.\n\n If ``n=infinity`` a non-trivial order of ``-1`` can be specified through\n the parameter ``order_1`` (default: 0). Otherwise it is ignored.\n\n INPUT:\n\n - ``m`` -- An integer ``m <= self._l1``.\n\n - ``order_1`` -- The order at ``-1`` of ``F_simple`` (default: 0).\n This parameter is ignored if ``n != infinity``.\n\n OUTPUT:\n\n A polynomial in ``x,y,z,d``, corresponding to ``f_rho, f_i, E2``\n and the (possibly) transcendental parameter ``d``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import WeakModularForms\n sage: MF = WeakModularForms(n=5, k=62/3, ep=-1)\n sage: MF.weight_parameters()\n (2, 3)\n\n sage: MF.F_basis_pol(2)\n x^13*y*d^2 - 2*x^8*y^3*d^2 + x^3*y^5*d^2\n sage: MF.F_basis_pol(1)\n (-81*x^13*y*d + 62*x^8*y^3*d + 19*x^3*y^5*d)/(-100)\n sage: MF.F_basis_pol(0)\n (141913*x^13*y + 168974*x^8*y^3 + 9113*x^3*y^5)/320000\n\n sage: MF(MF.F_basis_pol(2)).q_expansion(prec=MF._l1+2)\n q^2 - 41/(200*d)*q^3 + O(q^4)\n sage: MF(MF.F_basis_pol(1)).q_expansion(prec=MF._l1+1)\n q + O(q^3)\n sage: MF(MF.F_basis_pol(0)).q_expansion(prec=MF._l1+1)\n 1 + O(q^3)\n sage: MF(MF.F_basis_pol(-2)).q_expansion(prec=MF._l1+1)\n q^-2 + O(q^3)\n sage: MF(MF.F_basis_pol(-2)).parent()\n WeakModularForms(n=5, k=62/3, ep=-1) over Integer Ring\n\n sage: MF = WeakModularForms(n=4, k=-2, ep=1)\n sage: MF.weight_parameters()\n (-1, 3)\n\n sage: MF.F_basis_pol(-1)\n x^3/(x^4*d - y^2*d)\n sage: MF.F_basis_pol(-2)\n (9*x^7 + 23*x^3*y^2)/(32*x^8*d^2 - 64*x^4*y^2*d^2 + 32*y^4*d^2)\n\n sage: MF(MF.F_basis_pol(-1)).q_expansion(prec=MF._l1+2)\n q^-1 + 5/(16*d) + O(q)\n sage: MF(MF.F_basis_pol(-2)).q_expansion(prec=MF._l1+2)\n q^-2 + 25/(4096*d^2) + O(q)\n\n sage: MF = WeakModularForms(n=infinity, k=14, ep=-1)\n sage: MF.F_basis_pol(3)\n -y^7*d^3 + 3*x*y^5*d^3 - 3*x^2*y^3*d^3 + x^3*y*d^3\n sage: MF.F_basis_pol(2)\n (3*y^7*d^2 - 17*x*y^5*d^2 + 25*x^2*y^3*d^2 - 11*x^3*y*d^2)/(-8)\n sage: MF.F_basis_pol(1)\n (-75*y^7*d + 225*x*y^5*d - 1249*x^2*y^3*d + 1099*x^3*y*d)/1024\n sage: MF.F_basis_pol(0)\n (41*y^7 - 147*x*y^5 - 1365*x^2*y^3 - 2625*x^3*y)/(-4096)\n sage: MF.F_basis_pol(-1)\n (-9075*y^9 + 36300*x*y^7 - 718002*x^2*y^5 - 4928052*x^3*y^3 - 2769779*x^4*y)/(8388608*y^2*d - 8388608*x*d)\n\n sage: MF.F_basis_pol(3, order_1=-1)\n (-3*y^9*d^3 + 16*x*y^7*d^3 - 30*x^2*y^5*d^3 + 24*x^3*y^3*d^3 - 7*x^4*y*d^3)/(-4*x)\n sage: MF.F_basis_pol(1, order_1=2)\n -x^2*y^3*d + x^3*y*d\n sage: MF.F_basis_pol(0, order_1=2)\n (-3*x^2*y^3 - 5*x^3*y)/(-8)\n sage: MF.F_basis_pol(-1, order_1=2)\n (-81*x^2*y^5 - 606*x^3*y^3 - 337*x^4*y)/(1024*y^2*d - 1024*x*d)\n \"\"\"\n\n (x,y,z,d) = self.rat_field().gens()\n n = self._group.n()\n\n if (n ==infinity):\n order_1 = ZZ(order_1)\n order_inf = self._l1 - order_1\n finf_pol = d*(x-y**2)\n jinv_pol = x/(x-y**2)\n rat = finf_pol**order_inf * x**order_1 * y**(ZZ(1-self._ep)/ZZ(2)) * self.Faber_pol(m, order_1)(jinv_pol)\n else:\n order_inf = self._l1\n order_1 = order_inf\n finf_pol = d*(x**n-y**2)\n jinv_pol = x**n/(x**n-y**2)\n rat = finf_pol**order_inf * x**self._l2 * y**(ZZ(1-self._ep)/ZZ(2)) * self.Faber_pol(m)(jinv_pol)\n\n return rat\n\n def F_basis(self, m, order_1=ZZ(0)):\n r\"\"\"\n Returns a weakly holomorphic element of ``self``\n (extended if necessarily) determined by the property that\n the Fourier expansion is of the form is of the form\n ``q^m + O(q^(order_inf + 1))``, where ``order_inf = self._l1 - order_1``.\n\n In particular for all ``m <= order_inf`` these elements form\n a basis of the space of weakly holomorphic modular forms\n of the corresponding degree in case ``n!=infinity``.\n\n If ``n=infinity`` a non-trivial order of ``-1`` can be specified through\n the parameter ``order_1`` (default: 0). Otherwise it is ignored.\n\n INPUT:\n\n - ``m`` -- An integer ``m <= self._l1``.\n\n - ``order_1`` -- The order at ``-1`` of ``F_simple`` (default: 0).\n This parameter is ignored if ``n != infinity``.\n\n OUTPUT:\n\n The corresponding element in (possibly an extension of) ``self``.\n Note that the order at ``-1`` of the resulting element may be\n bigger than ``order_1`` (rare).\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import WeakModularForms, CuspForms\n sage: MF = WeakModularForms(n=5, k=62/3, ep=-1)\n sage: MF.disp_prec(MF._l1+2)\n sage: MF.weight_parameters()\n (2, 3)\n\n sage: MF.F_basis(2)\n q^2 - 41/(200*d)*q^3 + O(q^4)\n sage: MF.F_basis(1)\n q - 13071/(640000*d^2)*q^3 + O(q^4)\n sage: MF.F_basis(0)\n 1 - 277043/(192000000*d^3)*q^3 + O(q^4)\n sage: MF.F_basis(-2)\n q^-2 - 162727620113/(40960000000000000*d^5)*q^3 + O(q^4)\n sage: MF.F_basis(-2).parent() == MF\n True\n\n sage: MF = CuspForms(n=4, k=-2, ep=1)\n sage: MF.weight_parameters()\n (-1, 3)\n\n sage: MF.F_basis(-1).parent()\n WeakModularForms(n=4, k=-2, ep=1) over Integer Ring\n sage: MF.F_basis(-1).parent().disp_prec(MF._l1+2)\n sage: MF.F_basis(-1)\n q^-1 + 80 + O(q)\n sage: MF.F_basis(-2)\n q^-2 + 400 + O(q)\n\n sage: MF = WeakModularForms(n=infinity, k=14, ep=-1)\n sage: MF.F_basis(3)\n q^3 - 48*q^4 + O(q^5)\n sage: MF.F_basis(2)\n q^2 - 1152*q^4 + O(q^5)\n sage: MF.F_basis(1)\n q - 18496*q^4 + O(q^5)\n sage: MF.F_basis(0)\n 1 - 224280*q^4 + O(q^5)\n sage: MF.F_basis(-1)\n q^-1 - 2198304*q^4 + O(q^5)\n\n sage: MF.F_basis(3, order_1=-1)\n q^3 + O(q^5)\n sage: MF.F_basis(1, order_1=2)\n q - 300*q^3 - 4096*q^4 + O(q^5)\n sage: MF.F_basis(0, order_1=2)\n 1 - 24*q^2 - 2048*q^3 - 98328*q^4 + O(q^5)\n sage: MF.F_basis(-1, order_1=2)\n q^-1 - 18150*q^3 - 1327104*q^4 + O(q^5)\n \"\"\"\n\n basis_pol = self.F_basis_pol(m, order_1=order_1)\n\n if (self.hecke_n() == infinity):\n (x,y,z,d) = self.pol_ring().gens()\n if (x.divides(basis_pol.numerator()) and m > 0):\n new_space = self.extend_type(\"cusp\")\n elif (x.divides(basis_pol.denominator()) or m < 0):\n new_space = self.extend_type(\"weak\")\n else:\n new_space = self.extend_type(\"holo\")\n else:\n if (m > 0):\n new_space = self.extend_type(\"cusp\")\n elif (m >= 0):\n new_space = self.extend_type(\"holo\")\n else:\n new_space = self.extend_type(\"weak\")\n\n return new_space(basis_pol)\n\n def _canonical_min_exp(self, min_exp, order_1):\n r\"\"\"\n Return an adjusted value of ``min_exp`` and ``order_1`` corresponding\n to the analytic type of ``self``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import CuspForms\n sage: CF = CuspForms(n=5, k=16, ep=1)\n sage: CF._canonical_min_exp(-2, 0)\n (1, 0)\n\n sage: CF = CuspForms(n=infinity, k=10, ep=-1)\n sage: CF._canonical_min_exp(-2, -2)\n (1, 1)\n \"\"\"\n\n min_exp = ZZ(min_exp)\n order_1 = ZZ(order_1)\n if self.is_holomorphic():\n if self.is_cuspidal():\n min_exp = max(min_exp, 1)\n order_1 = max(order_1, 1)\n else:\n min_exp = max(min_exp, 0)\n order_1 = max(order_1, 0)\n\n if (self.hecke_n() != infinity):\n order_1 = ZZ(0)\n\n return (min_exp, order_1)\n\n def quasi_part_gens(self, r=None, min_exp=0, max_exp=infinity, order_1=ZZ(0)):\n r\"\"\"\n Return a basis in ``self`` of the subspace of (quasi) weakly\n holomorphic forms which satisfy the specified properties on\n the quasi parts and the initial Fourier coefficient.\n\n INPUT:\n\n - ``r`` -- An integer or ``None`` (default), indicating\n the desired power of ``E2`` If ``r=None``\n then all possible powers (``r``) are\n choosen.\n\n - ``min_exp`` -- An integer giving a lower bound for the\n first non-trivial Fourier coefficient of the\n generators (default: 0).\n\n - ``max_exp`` -- An integer or ``infinity`` (default) giving\n an upper bound for the first non-trivial\n Fourier coefficient of the generators. If\n ``max_exp==infinity`` then no upper bound is\n assumed.\n\n - ``order_1`` -- A lower bound for the order at ``-1`` of all\n quasi parts of the basis elements (default:\n 0). If ``n!=infinity`` this parameter is\n ignored.\n\n OUTPUT:\n\n A basis in ``self`` of the subspace of forms which are modular\n after dividing by ``E2^r`` and which have a Fourier expansion\n of the form ``q^m + O(q^(m+1))`` with ``min_exp <= m <=\n max_exp`` for each quasi part (and at least the specified\n order at ``-1`` in case ``n=infinity``). Note that linear\n combinations of forms/quasi parts maybe have a higher order at\n infinity than ``max_exp``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiWeakModularForms\n sage: QF = QuasiWeakModularForms(n=8, k=10/3, ep=-1)\n sage: QF.default_prec(1)\n sage: QF.quasi_part_gens(min_exp=-1)\n [q^-1 + O(q), 1 + O(q), q^-1 - 9/(128*d) + O(q), 1 + O(q), q^-1 - 19/(64*d) + O(q), q^-1 + 1/(64*d) + O(q)]\n\n sage: QF.quasi_part_gens(min_exp=-1, max_exp=-1)\n [q^-1 + O(q), q^-1 - 9/(128*d) + O(q), q^-1 - 19/(64*d) + O(q), q^-1 + 1/(64*d) + O(q)]\n sage: QF.quasi_part_gens(min_exp=-2, r=1)\n [q^-2 - 9/(128*d)*q^-1 - 261/(131072*d^2) + O(q), q^-1 - 9/(128*d) + O(q), 1 + O(q)]\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: MF = ModularForms(k=36)\n sage: MF.quasi_part_gens(min_exp=2)\n [q^2 + 194184*q^4 + O(q^5), q^3 - 72*q^4 + O(q^5)]\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiModularForms\n sage: MF = QuasiModularForms(n=5, k=6, ep=-1)\n sage: MF.default_prec(2)\n sage: MF.dimension()\n 3\n sage: MF.quasi_part_gens(r=0)\n [1 - 37/(200*d)*q + O(q^2)]\n sage: MF.quasi_part_gens(r=0)[0] == MF.E6()\n True\n sage: MF.quasi_part_gens(r=1)\n [1 + 33/(200*d)*q + O(q^2)]\n sage: MF.quasi_part_gens(r=1)[0] == MF.E2()*MF.E4()\n True\n sage: MF.quasi_part_gens(r=2)\n []\n sage: MF.quasi_part_gens(r=3)\n [1 - 27/(200*d)*q + O(q^2)]\n sage: MF.quasi_part_gens(r=3)[0] == MF.E2()^3\n True\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiCuspForms, CuspForms\n sage: MF = QuasiCuspForms(n=5, k=18, ep=-1)\n sage: MF.default_prec(4)\n sage: MF.dimension()\n 8\n sage: MF.quasi_part_gens(r=0)\n [q - 34743/(640000*d^2)*q^3 + O(q^4), q^2 - 69/(200*d)*q^3 + O(q^4)]\n sage: MF.quasi_part_gens(r=1)\n [q - 9/(200*d)*q^2 + 37633/(640000*d^2)*q^3 + O(q^4),\n q^2 + 1/(200*d)*q^3 + O(q^4)]\n sage: MF.quasi_part_gens(r=2)\n [q - 1/(4*d)*q^2 - 24903/(640000*d^2)*q^3 + O(q^4)]\n sage: MF.quasi_part_gens(r=3)\n [q + 1/(10*d)*q^2 - 7263/(640000*d^2)*q^3 + O(q^4)]\n sage: MF.quasi_part_gens(r=4)\n [q - 11/(20*d)*q^2 + 53577/(640000*d^2)*q^3 + O(q^4)]\n sage: MF.quasi_part_gens(r=5)\n [q - 1/(5*d)*q^2 + 4017/(640000*d^2)*q^3 + O(q^4)]\n\n sage: MF.quasi_part_gens(r=1)[0] == MF.E2() * CuspForms(n=5, k=16, ep=1).gen(0)\n True\n sage: MF.quasi_part_gens(r=1)[1] == MF.E2() * CuspForms(n=5, k=16, ep=1).gen(1)\n True\n sage: MF.quasi_part_gens(r=3)[0] == MF.E2()^3 * MF.Delta()\n True\n\n sage: MF = QuasiCuspForms(n=infinity, k=18, ep=-1)\n sage: MF.quasi_part_gens(r=1, min_exp=-2) == MF.quasi_part_gens(r=1, min_exp=1)\n True\n sage: MF.quasi_part_gens(r=1)\n [q - 8*q^2 - 8*q^3 + 5952*q^4 + O(q^5),\n q^2 - 8*q^3 + 208*q^4 + O(q^5),\n q^3 - 16*q^4 + O(q^5)]\n\n sage: MF = QuasiWeakModularForms(n=infinity, k=4, ep=1)\n sage: MF.quasi_part_gens(r=2, min_exp=2, order_1=-2)[0] == MF.E2()^2 * MF.E4()^(-2) * MF.f_inf()^2\n True\n sage: [v.order_at(-1) for v in MF.quasi_part_gens(r=0, min_exp=2, order_1=-2)]\n [-2, -2]\n \"\"\"\n\n if (not self.is_weakly_holomorphic()):\n from warnings import warn\n warn(\"This function only determines generators of (quasi) weakly modular forms!\")\n\n (min_exp, order_1) = self._canonical_min_exp(min_exp, order_1)\n\n # For modular forms spaces the quasi parts are all zero except for r=0\n if (self.is_modular()):\n r = ZZ(r)\n if (r != 0):\n return []\n\n # The lower bounds on the powers of f_inf and E4 determine\n # how large powers of E2 we can fit in...\n n = self.hecke_n()\n if (n == infinity):\n max_numerator_weight = self._weight - 4*min_exp - 4*order_1 + 4\n else:\n max_numerator_weight = self._weight - 4*n/(n-2)*min_exp + 4\n\n # If r is not specified we gather all generators for all possible r's\n if (r is None):\n gens = []\n for rnew in range(ZZ(0), QQ(max_numerator_weight/ZZ(2)).floor() + 1):\n gens += self.quasi_part_gens(r=rnew, min_exp=min_exp, max_exp=max_exp, order_1=order_1)\n return gens\n\n r = ZZ(r)\n if (r < 0 or 2*r > max_numerator_weight):\n return []\n\n E2 = self.E2()\n ambient_weak_space = self.graded_ring().reduce_type(\"weak\", degree=(self._weight-QQ(2*r), self._ep*(-1)**r))\n order_inf = ambient_weak_space._l1 - order_1\n\n if (max_exp == infinity):\n max_exp = order_inf\n elif (max_exp < min_exp):\n return []\n else:\n max_exp = min(ZZ(max_exp), order_inf)\n\n gens = []\n for m in range(min_exp, max_exp + 1):\n gens += [ self(ambient_weak_space.F_basis(m, order_1=order_1)*E2**r) ]\n\n return gens\n\n def quasi_part_dimension(self, r=None, min_exp=0, max_exp=infinity, order_1=ZZ(0)):\n r\"\"\"\n Return the dimension of the subspace of ``self`` generated by\n ``self.quasi_part_gens(r, min_exp, max_exp, order_1)``.\n\n See :meth:`quasi_part_gens` for more details.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiModularForms, QuasiCuspForms, QuasiWeakModularForms\n sage: MF = QuasiModularForms(n=5, k=6, ep=-1)\n sage: [v.as_ring_element() for v in MF.gens()]\n [f_rho^2*f_i, f_rho^3*E2, E2^3]\n sage: MF.dimension()\n 3\n sage: MF.quasi_part_dimension(r=0)\n 1\n sage: MF.quasi_part_dimension(r=1)\n 1\n sage: MF.quasi_part_dimension(r=2)\n 0\n sage: MF.quasi_part_dimension(r=3)\n 1\n\n sage: MF = QuasiCuspForms(n=5, k=18, ep=-1)\n sage: MF.dimension()\n 8\n sage: MF.quasi_part_dimension(r=0)\n 2\n sage: MF.quasi_part_dimension(r=1)\n 2\n sage: MF.quasi_part_dimension(r=2)\n 1\n sage: MF.quasi_part_dimension(r=3)\n 1\n sage: MF.quasi_part_dimension(r=4)\n 1\n sage: MF.quasi_part_dimension(r=5)\n 1\n sage: MF.quasi_part_dimension(min_exp=2, max_exp=2)\n 2\n\n sage: MF = QuasiCuspForms(n=infinity, k=18, ep=-1)\n sage: MF.quasi_part_dimension(r=1, min_exp=-2)\n 3\n sage: MF.quasi_part_dimension()\n 12\n sage: MF.quasi_part_dimension(order_1=3)\n 2\n\n sage: MF = QuasiWeakModularForms(n=infinity, k=4, ep=1)\n sage: MF.quasi_part_dimension(min_exp=2, order_1=-2)\n 4\n sage: [v.order_at(-1) for v in MF.quasi_part_gens(r=0, min_exp=2, order_1=-2)]\n [-2, -2]\n \"\"\"\n\n if (not self.is_weakly_holomorphic()):\n from warnings import warn\n warn(\"This function only determines the dimension of some (quasi) weakly subspace!\")\n\n (min_exp, order_1) = self._canonical_min_exp(min_exp, order_1)\n\n # For modular forms spaces the quasi parts are all zero except for r=0\n if (self.is_modular()):\n r = ZZ(0)\n if (r != 0):\n return ZZ(0)\n\n # The lower bounds on the powers of f_inf and E4 determine\n # how large powers of E2 we can fit in...\n n = self.hecke_n()\n if (n == infinity):\n max_numerator_weight = self._weight - 4*min_exp - 4*order_1 + 4\n else:\n max_numerator_weight = self._weight - 4*n/(n-2)*min_exp + 4\n\n # If r is not specified we calculate the total dimension over all possible r's\n if (r is None):\n return sum([self.quasi_part_dimension(r=rnew, min_exp=min_exp, max_exp=max_exp, order_1=order_1) for rnew in range(ZZ(0), QQ(max_numerator_weight/ZZ(2)).floor() + 1)])\n\n r = ZZ(r)\n if (r < 0 or 2*r > max_numerator_weight):\n return ZZ(0)\n\n k = self._weight - QQ(2*r)\n ep = self._ep * (-1)**r\n if (n == infinity):\n num = (k - (1-ep)) / ZZ(4)\n l2 = order_1\n order_inf = ZZ(num) - order_1\n else:\n num = ZZ((k-(1-ep)*ZZ(n)/ZZ(n-2)) * ZZ(n-2) / ZZ(4))\n l2 = num % n\n order_inf = ((num - l2) / n).numerator()\n\n if (max_exp == infinity):\n max_exp = order_inf\n elif (max_exp < min_exp):\n return ZZ(0)\n else:\n max_exp = min(ZZ(max_exp), order_inf)\n\n return max(ZZ(0), max_exp - min_exp + 1)\n\n def construct_form(self, laurent_series, order_1=ZZ(0), check=True, rationalize=False):\n r\"\"\"\n Tries to construct an element of self with the given Fourier\n expansion. The assumption is made that the specified Fourier\n expansion corresponds to a weakly holomorphic modular form.\n\n If the precision is too low to determine the\n element an exception is raised.\n\n INPUT:\n\n - ``laurent_series`` -- A Laurent or Power series.\n\n - ``order_1`` -- A lower bound for the order at ``-1`` of the form (default: 0).\n If ``n!=infinity`` this parameter is ignored.\n\n - ``check`` -- If ``True`` (default) then the series expansion of the constructed\n form is compared against the given series.\n\n - ``rationalize`` -- If ``True`` (default: ``False``) then the series is\n `rationalized` beforehand. Note that in non-exact or non-arithmetic\n cases this is experimental and extremely unreliable!\n\n OUTPUT:\n\n If possible: An element of self with the same initial\n Fourier expansion as ``laurent_series``.\n\n Note: For modular spaces it is also possible to call\n ``self(laurent_series)`` instead.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import CuspForms\n sage: Delta = CuspForms(k=12).Delta()\n sage: qexp = Delta.q_expansion(prec=2)\n sage: qexp.parent()\n Power Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n sage: qexp\n q + O(q^2)\n sage: CuspForms(k=12).construct_form(qexp) == Delta\n True\n\n sage: from sage.modular.modform_hecketriangle.space import WeakModularForms\n sage: J_inv = WeakModularForms(n=7).J_inv()\n sage: qexp2 = J_inv.q_expansion(prec=1)\n sage: qexp2.parent()\n Laurent Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n sage: qexp2\n d*q^-1 + 151/392 + O(q)\n sage: WeakModularForms(n=7).construct_form(qexp2) == J_inv\n True\n\n sage: MF = WeakModularForms(n=5, k=62/3, ep=-1)\n sage: MF.default_prec(MF._l1+1)\n sage: d = MF.get_d()\n sage: MF.weight_parameters()\n (2, 3)\n sage: el2 = d*MF.F_basis(2) + 2*MF.F_basis(1) + MF.F_basis(-2)\n sage: qexp2 = el2.q_expansion()\n sage: qexp2.parent()\n Laurent Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n sage: qexp2\n q^-2 + 2*q + d*q^2 + O(q^3)\n sage: WeakModularForms(n=5, k=62/3, ep=-1).construct_form(qexp2) == el2\n True\n\n sage: MF = WeakModularForms(n=infinity, k=-2, ep=-1)\n sage: el3 = MF.f_i()/MF.f_inf() + MF.f_i()*MF.f_inf()/MF.E4()^2\n sage: MF.quasi_part_dimension(min_exp=-1, order_1=-2)\n 3\n sage: prec = MF._l1 + 3\n sage: qexp3 = el3.q_expansion(prec)\n sage: qexp3\n q^-1 - 1/(4*d) + ((1024*d^2 - 33)/(1024*d^2))*q + O(q^2)\n sage: MF.construct_form(qexp3, order_1=-2) == el3\n True\n sage: MF.construct_form(el3.q_expansion(prec + 1), order_1=-3) == el3\n True\n\n sage: WF = WeakModularForms(n=14)\n sage: qexp = WF.J_inv().q_expansion_fixed_d(d_num_prec=1000)\n sage: qexp.parent()\n Laurent Series Ring in q over Real Field with 1000 bits of precision\n sage: WF.construct_form(qexp, rationalize=True) == WF.J_inv()\n doctest:...: UserWarning: Using an experimental rationalization of coefficients, please check the result for correctness!\n True\n \"\"\"\n\n base_ring = laurent_series.base_ring()\n if is_PolynomialRing(base_ring.base()):\n if not (self.coeff_ring().has_coerce_map_from(base_ring)):\n raise ValueError(\"The Laurent coefficients don't coerce into the coefficient ring of self!\")\n elif rationalize:\n laurent_series = self.rationalize_series(laurent_series)\n else:\n raise ValueError(\"The Laurent coefficients are not in the proper form yet. Try rationalize_series(laurent_series) beforehand (experimental).\")\n\n order_1 = self._canonical_min_exp(0, order_1)[1]\n order_inf = self._l1 - order_1\n\n if (laurent_series.prec() < order_inf + 1):\n raise ValueError(\"Insufficient precision: {} < {} = order_inf!\".format(laurent_series.prec(), order_inf + 1))\n\n new_series = laurent_series.add_bigoh(order_inf + 1)\n coefficients = new_series.coefficients()\n exponents = new_series.exponents()\n\n if (len(coefficients) == 0):\n return self(0)\n\n rat = sum([\\\n coefficients[j] * self.F_basis_pol(exponents[j], order_1=order_1)\\\n for j in range(ZZ(len(coefficients)))\n ])\n\n el = self(rat)\n\n if (check):\n prec = min(laurent_series.prec(), laurent_series.exponents()[-1] + 1)\n if (el.q_expansion(prec=prec) != laurent_series):\n raise ValueError(\"The Laurent series {} does not correspond to a form of {}\".format(laurent_series, self.reduce_type([\"weak\"])))\n\n return el\n\n @cached_method\n def _quasi_form_matrix(self, min_exp=0, order_1=ZZ(0), incr_prec_by=0):\n r\"\"\"\n Return a base change matrix which transforms coordinate vectors\n with respect to a certain basis into a vector corresponding to\n Laurent coefficients of a series.\n\n This is a helper function used to construct weakly holomorphic quasi\n forms based on their initial Laurent coefficients\n (see :meth:`construct_quasi_form`).\n\n INPUT:\n\n - ``min_exp`` -- An integer (default: 0), namely the lower bound for the\n order at infinity resp. the exponent of the Laurent series.\n\n - ``order_1`` -- A lower bound for the order at ``-1`` of all quasi parts of the\n subspace (default: 0). If ``n!=infinity`` this parameter is ignored.\n\n - ``incr_prec_by`` -- An integer (default: 0) which specifies how\n much the precision should be increased compared to\n the size of the corresponding basis.\n\n OUTPUT:\n\n The corresponding base change matrix.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiWeakModularForms, ModularForms, QuasiModularForms\n sage: QF = QuasiWeakModularForms(n=8, k=10/3, ep=-1)\n sage: A = QF._quasi_form_matrix(min_exp=-1)\n sage: A[3]\n (-1215/(65536*d^3), -2171/(131072*d^2), 134099/(16777216*d^3), -811/(131072*d^2), 15889/(8388608*d^3), -8851/(8388608*d^3))\n\n sage: MF = ModularForms(k=36)\n sage: MF._quasi_form_matrix(min_exp=2)\n [1 0]\n [0 1]\n\n sage: QuasiModularForms(k=2)._quasi_form_matrix()\n [1]\n\n sage: QF = QuasiWeakModularForms(n=infinity, k=-2, ep=-1)\n sage: A = QF._quasi_form_matrix(min_exp=-1, order_1=0)\n sage: A\n [ 1 1]\n [-1/(4*d) 0]\n \"\"\"\n\n (min_exp, order_1) = self._canonical_min_exp(min_exp, order_1)\n\n order_inf = self._l1 - order_1\n\n # We have to add + 1 to get a correct upper bound in all cases\n # since corresponding weak space might have a higher l1 (+1) than\n # ``self``, even if the weight is smaller\n max_exp = order_inf + 1\n\n basis = self.quasi_part_gens(min_exp=min_exp, max_exp=max_exp, order_1=order_1)\n\n column_size = len(basis)\n # a non-trivial incr_prec_by will be added in case the resulting matrix does not have full rank\n row_size = column_size + incr_prec_by\n prec = row_size + min_exp\n\n coeff_ring = self.coeff_ring()\n A = matrix(coeff_ring, row_size, 0)\n\n for gen in basis:\n A = A.augment(gen.q_expansion_vector(min_exp=min_exp, max_exp=prec-1))\n\n # So far this case never happened but potentially A could be singular!\n # In this case we want to increase the row size until A has maximal\n # rank (i.e. column size).\n\n # This is done up increasing the precision of everything by about 20%\n # of the column size until A has maximal rank:\n if (A.rank() < column_size):\n if (incr_prec_by == 0):\n from sage.misc.verbose import verbose\n verbose(\"Encountered a base change matrix with not-yet-maximal rank (rare, please report)!\")\n incr_prec_by += column_size//ZZ(5) + 1\n return self._quasi_form_matrix(min_exp=min_exp, order_1=order_1, incr_prec_by=incr_prec_by)\n elif (incr_prec_by == 0):\n return A\n\n # At this point the matrix has maximal rank but might be too big.\n # Since we are interested in the (exact) required size resp. precision\n # we have to decrease the (row) size as much as possible while keeping\n # maximal rank. We cannot simply choose pivots/etc since we want to\n # keep a simple correspondence to Fourier coefficients!\n\n # We start by using an initial binary search to delete some unnecessary rows:\n while (A.rank() == column_size):\n row_size = A.dimensions()[0]\n\n # to avoid infinite loops\n if (row_size == column_size):\n return A\n\n B = A\n A = A.delete_rows([r for r in range(column_size + (row_size-column_size)//2 - 1, row_size)])\n\n # Next we simply delete row by row. Note that A is still modified here...\n while (B.rank() == column_size):\n A = B\n row_size = B.dimensions()[0]\n B = B.delete_rows([row_size-1])\n\n return A\n\n def required_laurent_prec(self, min_exp=0, order_1=ZZ(0)):\n r\"\"\"\n Return an upper bound for the required precision for Laurent series to\n uniquely determine a corresponding (quasi) form in ``self`` with the given\n lower bound ``min_exp`` for the order at infinity (for each quasi part).\n\n .. NOTE::\n\n For ``n=infinity`` only the holomorphic case (``min_exp >= 0``)\n is supported (in particular a non-negative order at ``-1`` is assumed).\n\n INPUT:\n\n - ``min_exp`` -- An integer (default: 0), namely the lower bound for the\n order at infinity resp. the exponent of the Laurent series.\n\n - ``order_1`` -- A lower bound for the order at ``-1`` for all quasi parts\n (default: 0). If ``n!=infinity`` this parameter is ignored.\n\n OUTPUT:\n\n An integer, namely an upper bound for the number of required\n Laurent coefficients. The bound should be precise or at least\n pretty sharp.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiWeakModularForms, ModularForms, QuasiModularForms\n sage: QF = QuasiWeakModularForms(n=8, k=10/3, ep=-1)\n sage: QF.required_laurent_prec(min_exp=-1)\n 5\n\n sage: MF = ModularForms(k=36)\n sage: MF.required_laurent_prec(min_exp=2)\n 4\n\n sage: QuasiModularForms(k=2).required_laurent_prec()\n 1\n\n sage: QuasiWeakModularForms(n=infinity, k=2, ep=-1).required_laurent_prec(order_1=-1)\n 6\n \"\"\"\n\n (min_exp, order_1) = self._canonical_min_exp(min_exp, order_1)\n\n return self._quasi_form_matrix(min_exp=min_exp, order_1=order_1).dimensions()[0] + min_exp\n\n def construct_quasi_form(self, laurent_series, order_1=ZZ(0), check=True, rationalize=False):\n r\"\"\"\n Try to construct an element of self with the given Fourier\n expansion. The assumption is made that the specified Fourier\n expansion corresponds to a weakly holomorphic quasi modular form.\n\n If the precision is too low to determine the\n element an exception is raised.\n\n INPUT:\n\n - ``laurent_series`` -- A Laurent or Power series.\n\n - ``order_1`` -- A lower bound for the order at ``-1`` for all quasi parts of the\n form (default: 0). If ``n!=infinity`` this parameter is ignored.\n\n - ``check`` -- If ``True`` (default) then the series expansion of the constructed\n form is compared against the given (rationalized) series.\n\n - ``rationalize`` -- If ``True`` (default: ``False``) then the series is\n `rationalized` beforehand. Note that in non-exact or non-arithmetic\n cases this is experimental and extremely unreliable!\n\n OUTPUT:\n\n If possible: An element of self with the same initial\n Fourier expansion as ``laurent_series``.\n\n Note: For non modular spaces it is also possible to call\n ``self(laurent_series)`` instead. Also note that this function works\n much faster if a corresponding (cached) ``q_basis`` is available.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiWeakModularForms, ModularForms, QuasiModularForms, QuasiCuspForms\n sage: QF = QuasiWeakModularForms(n=8, k=10/3, ep=-1)\n sage: el = QF.quasi_part_gens(min_exp=-1)[4]\n sage: prec = QF.required_laurent_prec(min_exp=-1)\n sage: prec\n 5\n sage: qexp = el.q_expansion(prec=prec)\n sage: qexp\n q^-1 - 19/(64*d) - 7497/(262144*d^2)*q + 15889/(8388608*d^3)*q^2 + 543834047/(1649267441664*d^4)*q^3 + 711869853/(43980465111040*d^5)*q^4 + O(q^5)\n sage: qexp.parent()\n Laurent Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n sage: constructed_el = QF.construct_quasi_form(qexp)\n sage: constructed_el.parent()\n QuasiWeakModularForms(n=8, k=10/3, ep=-1) over Integer Ring\n sage: el == constructed_el\n True\n\n If a q_basis is available the construction uses a different algorithm which we also check::\n\n sage: basis = QF.q_basis(min_exp=-1)\n sage: QF(qexp) == constructed_el\n True\n\n sage: MF = ModularForms(k=36)\n sage: el2 = MF.quasi_part_gens(min_exp=2)[1]\n sage: prec = MF.required_laurent_prec(min_exp=2)\n sage: prec\n 4\n sage: qexp2 = el2.q_expansion(prec=prec + 1)\n sage: qexp2\n q^3 - 1/(24*d)*q^4 + O(q^5)\n sage: qexp2.parent()\n Power Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n sage: constructed_el2 = MF.construct_quasi_form(qexp2)\n sage: constructed_el2.parent()\n ModularForms(n=3, k=36, ep=1) over Integer Ring\n sage: el2 == constructed_el2\n True\n\n sage: QF = QuasiModularForms(k=2)\n sage: q = QF.get_q()\n sage: qexp3 = 1 + O(q)\n sage: QF(qexp3)\n 1 - 24*q - 72*q^2 - 96*q^3 - 168*q^4 + O(q^5)\n sage: QF(qexp3) == QF.E2()\n True\n\n sage: QF = QuasiWeakModularForms(n=infinity, k=2, ep=-1)\n sage: el4 = QF.f_i() + QF.f_i()^3/QF.E4()\n sage: prec = QF.required_laurent_prec(order_1=-1)\n sage: qexp4 = el4.q_expansion(prec=prec)\n sage: qexp4\n 2 - 7/(4*d)*q + 195/(256*d^2)*q^2 - 903/(4096*d^3)*q^3 + 41987/(1048576*d^4)*q^4 - 181269/(33554432*d^5)*q^5 + O(q^6)\n sage: QF.construct_quasi_form(qexp4, check=False) == el4\n False\n sage: QF.construct_quasi_form(qexp4, order_1=-1) == el4\n True\n\n sage: QF = QuasiCuspForms(n=8, k=22/3, ep=-1)\n sage: el = QF(QF.f_inf()*QF.E2())\n sage: qexp = el.q_expansion_fixed_d(d_num_prec=1000)\n sage: qexp.parent()\n Power Series Ring in q over Real Field with 1000 bits of precision\n sage: QF.construct_quasi_form(qexp, rationalize=True) == el\n True\n \"\"\"\n\n base_ring = laurent_series.base_ring()\n if is_PolynomialRing(base_ring.base()):\n if not (self.coeff_ring().has_coerce_map_from(base_ring)):\n raise ValueError(\"The Laurent coefficients don't coerce into the coefficient ring of self!\")\n elif rationalize:\n laurent_series = self.rationalize_series(laurent_series)\n else:\n raise ValueError(\"The Laurent coefficients are not in the proper form yet. Try rationalize_series(laurent_series) beforehand (experimental).\")\n\n prec = min(laurent_series.prec(), laurent_series.exponents()[-1] + 1)\n\n min_exp1 = laurent_series.exponents()[0]\n (min_exp, order_1) = self._canonical_min_exp(min_exp1, order_1)\n\n if (min_exp != min_exp1):\n raise ValueError(\"Due to the behavior at infinity the given Laurent series cannot possibly be an element of {}\".format(self))\n\n # if a q_basis is available we can construct the form much faster\n if (self.q_basis.is_in_cache(min_exp=min_exp, order_1=order_1)):\n basis = self.q_basis(min_exp=min_exp, order_1=order_1)\n size = len(basis)\n\n if (prec < min_exp + size):\n raise ValueError(\"Insufficient precision: {} < {}!\".format(laurent_series.prec(), min_exp + size))\n\n b = vector(self.coeff_ring(), [laurent_series[m] for m in range(min_exp, min_exp + len(basis))])\n\n el = self(sum([b[k]*basis[k] for k in range(0, len(basis))]))\n else:\n A = self._quasi_form_matrix(min_exp = min_exp, order_1=order_1)\n row_size = A.dimensions()[0]\n\n if (prec < min_exp + row_size):\n raise ValueError(\"Insufficient precision: {} < {}!\".format(laurent_series.prec(), min_exp + row_size))\n\n b = vector(self.coeff_ring(), [laurent_series[m] for m in range(min_exp, min_exp + row_size)])\n try:\n coord_vector = A.solve_right(b)\n except ValueError:\n raise ValueError(\"The Laurent series {} does not correspond to a (quasi) form of {}\".format(laurent_series, self.reduce_type([\"quasi\", \"weak\"])))\n\n order_inf = self._l1 - order_1\n\n # We have to add + 1 to get a correct upper bound in all cases\n # since corresponding weak space might have a higher l1 (+1) than\n # ``self``, even if the weight is smaller\n max_exp = order_inf + 1\n basis = self.quasi_part_gens(min_exp=min_exp, max_exp=max_exp, order_1=order_1)\n\n el = self(sum([coord_vector[k]*basis[k] for k in range(0, len(coord_vector))]))\n\n if (check):\n if (el.q_expansion(prec=prec) != laurent_series):\n raise ValueError(\"The Laurent series {} does not correspond to a form of {}\".format(laurent_series, self.reduce_type([\"quasi\", \"weak\"])))\n\n return el\n\n\n @cached_method\n def q_basis(self, m=None, min_exp=0, order_1=ZZ(0)):\n r\"\"\"\n Try to return a (basis) element of ``self`` with a Laurent series of the form\n ``q^m + O(q^N)``, where ``N=self.required_laurent_prec(min_exp)``.\n\n If ``m==None`` the whole basis (with varying ``m``'s) is returned if it exists.\n\n INPUT:\n\n - ``m`` -- An integer, indicating the desired initial Laurent exponent of the element.\n If ``m==None`` (default) then the whole basis is returned.\n\n - ``min_exp`` -- An integer, indicating the minimal Laurent exponent (for each quasi part)\n of the subspace of ``self`` which should be considered (default: 0).\n\n - ``order_1`` -- A lower bound for the order at ``-1`` of all quasi parts of the subspace\n (default: 0). If ``n!=infinity`` this parameter is ignored.\n\n OUTPUT:\n\n The corresponding basis (if ``m==None``) resp. the corresponding basis vector (if ``m!=None``).\n If the basis resp. element doesn't exist an exception is raised.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiWeakModularForms, ModularForms, QuasiModularForms\n sage: QF = QuasiWeakModularForms(n=8, k=10/3, ep=-1)\n sage: QF.default_prec(QF.required_laurent_prec(min_exp=-1))\n sage: q_basis = QF.q_basis(min_exp=-1)\n sage: q_basis\n [q^-1 + O(q^5), 1 + O(q^5), q + O(q^5), q^2 + O(q^5), q^3 + O(q^5), q^4 + O(q^5)]\n sage: QF.q_basis(m=-1, min_exp=-1)\n q^-1 + O(q^5)\n\n sage: MF = ModularForms(k=36)\n sage: MF.q_basis() == MF.gens()\n True\n\n sage: QF = QuasiModularForms(k=6)\n sage: QF.required_laurent_prec()\n 3\n sage: QF.q_basis()\n [1 - 20160*q^3 - 158760*q^4 + O(q^5), q - 60*q^3 - 248*q^4 + O(q^5), q^2 + 8*q^3 + 30*q^4 + O(q^5)]\n\n sage: QF = QuasiWeakModularForms(n=infinity, k=-2, ep=-1)\n sage: QF.q_basis(order_1=-1)\n [1 - 168*q^2 + 2304*q^3 - 19320*q^4 + O(q^5),\n q - 18*q^2 + 180*q^3 - 1316*q^4 + O(q^5)]\n \"\"\"\n\n if (not self.is_weakly_holomorphic()):\n from warnings import warn\n warn(\"This function only determines elements / a basis of (quasi) weakly modular forms!\")\n\n (min_exp, order_1) = self._canonical_min_exp(min_exp, order_1)\n order_inf = self._l1 - order_1\n\n if (m is None):\n A = self._quasi_form_matrix(min_exp=min_exp, order_1=order_1)\n\n # If A is square it should automatically be invertible (by the previous procedures)\n if (A.is_square()):\n B = A.inverse()\n\n max_exp = order_inf + 1\n basis = self.quasi_part_gens(min_exp=min_exp, max_exp=max_exp, order_1=order_1)\n\n column_len = A.dimensions()[1]\n q_basis = []\n for k in range(0, column_len):\n el = self(sum([B[l][k] * basis[l] for l in range(0, column_len)]))\n q_basis += [el]\n\n return q_basis\n else:\n raise ValueError(\"Unfortunately a q_basis doesn't exist in this case (this is rare/interesting, please report)\")\n else:\n if (m < min_exp):\n raise ValueError(\"Index out of range: m={} < {}=min_exp\".format(m, min_exp))\n\n # If the whole basis is available, then use it\n if (self.q_basis.is_in_cache(min_exp=min_exp, order_1=order_1)):\n q_basis = self.q_basis(min_exp=min_exp, order_1=order_1)\n\n column_len = len(q_basis)\n if (m >= column_len + min_exp):\n raise ValueError(\"Index out of range: m={} >= {}=dimension + min_exp\".format(m, column_len + min_exp))\n\n return q_basis[m - min_exp]\n else:\n row_len = self.required_laurent_prec(min_exp=min_exp, order_1=order_1) - min_exp\n if (m >= row_len + min_exp):\n raise ValueError(\"Index out of range: m={} >= {}=required_precision + min_exp\".format(m, row_len + min_exp))\n\n A = self._quasi_form_matrix(min_exp = min_exp, order_1=order_1)\n b = vector(self.coeff_ring(), row_len)\n b[m - min_exp] = 1\n try:\n coord_vector = A.solve_right(b)\n except ValueError:\n raise ValueError(\"Unfortunately the q_basis vector (m={}, min_exp={}) doesn't exist in this case (this is rare/interesting, please report)\".format(m, min_exp))\n\n max_exp = order_inf + 1\n basis = self.quasi_part_gens(min_exp=min_exp, max_exp=max_exp, order_1=order_1)\n\n column_len = A.dimensions()[1]\n el = self(sum([coord_vector[l] * basis[l] for l in range(0, column_len)]))\n\n return el\n\n def rationalize_series(self, laurent_series, coeff_bound = 1e-10, denom_factor = ZZ(1)):\n r\"\"\"\n Try to return a Laurent series with coefficients in ``self.coeff_ring()``\n that matches the given Laurent series.\n\n We give our best but there is absolutely no guarantee that it will work!\n\n INPUT:\n\n - ``laurent_series`` -- A Laurent series. If the Laurent coefficients already\n coerce into ``self.coeff_ring()`` with a formal parameter\n then the Laurent series is returned as is.\n\n Otherwise it is assumed that the series is normalized\n in the sense that the first non-trivial coefficient\n is a power of ``d`` (e.g. ``1``).\n\n - ``coeff_bound`` -- Either ``None`` resp. ``0`` or a positive real number\n (default: ``1e-10``). If specified ``coeff_bound``\n gives a lower bound for the size of the initial Laurent\n coefficients. If a coefficient is smaller it is\n assumed to be zero.\n\n For calculations with very small coefficients (less than\n ``1e-10``) ``coeff_bound`` should be set to something\n even smaller or just ``0``.\n\n Non-exact calculations often produce non-zero\n coefficients which are supposed to be zero. In those\n cases this parameter helps a lot.\n\n - ``denom_factor`` -- An integer (default: 1) whose factor might occur in\n the denominator of the given Laurent coefficients\n (in addition to naturally occurring factors).\n\n OUTPUT:\n\n A Laurent series over ``self.coeff_ring()`` corresponding to the given Laurent series.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import WeakModularForms, ModularForms, QuasiCuspForms\n sage: WF = WeakModularForms(n=14)\n sage: qexp = WF.J_inv().q_expansion_fixed_d(d_num_prec=1000)\n sage: qexp.parent()\n Laurent Series Ring in q over Real Field with 1000 bits of precision\n sage: qexp_int = WF.rationalize_series(qexp)\n sage: qexp_int.add_bigoh(3)\n d*q^-1 + 37/98 + 2587/(38416*d)*q + 899/(117649*d^2)*q^2 + O(q^3)\n sage: qexp_int == WF.J_inv().q_expansion()\n True\n sage: WF.rationalize_series(qexp_int) == qexp_int\n True\n sage: WF(qexp_int) == WF.J_inv()\n True\n\n sage: WF.rationalize_series(qexp.parent()(1))\n 1\n sage: WF.rationalize_series(qexp_int.parent()(1)).parent()\n Laurent Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n\n sage: MF = ModularForms(n=infinity, k=4)\n sage: qexp = MF.E4().q_expansion_fixed_d()\n sage: qexp.parent()\n Power Series Ring in q over Rational Field\n sage: qexp_int = MF.rationalize_series(qexp)\n sage: qexp_int.parent()\n Power Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n sage: qexp_int == MF.E4().q_expansion()\n True\n sage: MF.rationalize_series(qexp_int) == qexp_int\n True\n sage: MF(qexp_int) == MF.E4()\n True\n\n sage: QF = QuasiCuspForms(n=8, k=22/3, ep=-1)\n sage: el = QF(QF.f_inf()*QF.E2())\n sage: qexp = el.q_expansion_fixed_d(d_num_prec=1000)\n sage: qexp.parent()\n Power Series Ring in q over Real Field with 1000 bits of precision\n sage: qexp_int = QF.rationalize_series(qexp)\n sage: qexp_int.parent()\n Power Series Ring in q over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n sage: qexp_int == el.q_expansion()\n True\n sage: QF.rationalize_series(qexp_int) == qexp_int\n True\n sage: QF(qexp_int) == el\n True\n \"\"\"\n\n from sage.rings.all import prime_range\n from sage.misc.misc_c import prod\n from warnings import warn\n\n denom_factor = ZZ(denom_factor)\n base_ring = laurent_series.base_ring()\n series_prec = laurent_series.prec()\n\n # If the coefficients already coerce to our coefficient ring\n # and are in polynomial form we simply return the Laurent series\n if (is_PolynomialRing(base_ring.base())):\n if (self.coeff_ring().has_coerce_map_from(base_ring)):\n return laurent_series\n else:\n raise ValueError(\"The Laurent coefficients don't coerce into the coefficient ring of self!\")\n # Else the case that the Laurent series is exact but the group is non-arithmetic\n # shouldn't occur (except for trivial cases)\n elif (base_ring.is_exact() and not self.group().is_arithmetic()):\n prec = self.default_num_prec()\n dvalue = self.group().dvalue().n(prec)\n # For arithmetic groups the coefficients are exact though (so is d)\n elif (base_ring.is_exact()):\n prec = self.default_num_prec()\n dvalue = self.group().dvalue()\n else:\n prec = laurent_series.base_ring().prec()\n dvalue = self.group().dvalue().n(prec)\n\n # This messes up doctests! :-(\n warn(\"Using an experimental rationalization of coefficients, please check the result for correctness!\")\n\n d = self.get_d()\n q = self.get_q()\n\n if (not base_ring.is_exact() and coeff_bound):\n coeff_bound = base_ring(coeff_bound)\n num_q = laurent_series.parent().gen()\n laurent_series = sum([laurent_series[i]*num_q**i for i in range(laurent_series.exponents()[0], laurent_series.exponents()[-1]+1) if laurent_series[i].abs() > coeff_bound]).add_bigoh(series_prec)\n\n first_exp = laurent_series.exponents()[0]\n first_coeff = laurent_series[first_exp]\n d_power = (first_coeff.abs().n(prec).log()/dvalue.n(prec).log()).round()\n\n if (first_coeff < 0):\n return -self.rationalize_series(-laurent_series, coeff_bound=coeff_bound)\n elif (first_exp + d_power != 0):\n cor_factor = dvalue**(-(first_exp + d_power))\n return d**(first_exp + d_power) * self.rationalize_series(cor_factor * laurent_series, coeff_bound=coeff_bound)\n else:\n if (base_ring.is_exact() and self.group().is_arithmetic()):\n tolerance = 0\n else:\n tolerance = 10*ZZ(1).n(prec).ulp()\n\n if (first_coeff * dvalue**first_exp - ZZ(1)) > tolerance:\n raise ValueError(\"The Laurent series is not normalized correctly!\")\n\n # TODO: This is not a good enough estimate, see e.g. E12\n # (however for exact base rings + arithmetic groups we don't need it)\n def denominator_estimate(m):\n cor_exp = max(-first_exp, 0)\n m += cor_exp\n\n if self.group().is_arithmetic():\n return ZZ(1/dvalue)**m\n\n hecke_n = self.hecke_n()\n bad_factors = [fac for fac in Integer(m).factorial().factor() if (fac[0] % hecke_n) not in [1, hecke_n-1] and fac[0] > 2]\n bad_factorial = prod([fac[0]**fac[1] for fac in bad_factors])\n\n return ZZ(2**(6*m) * hecke_n**(2*m) * prod([ p**m for p in prime_range(m+1) if hecke_n % p == 0 and p > 2 ]) * bad_factorial)**(cor_exp + 1)\n\n def rationalize_coefficient(coeff, m):\n # TODO: figure out a correct bound for the required precision\n if (not self.group().is_arithmetic() and denominator_estimate(m).log(2).n().ceil() > prec):\n warn(\"The precision from coefficient m={} on is too low!\".format(m))\n\n rational_coeff = coeff * dvalue**m\n\n if (base_ring.is_exact() and self.group().is_arithmetic() and rational_coeff in QQ):\n rational_coeff = QQ(rational_coeff)\n else:\n int_estimate = denominator_estimate(m) * denom_factor * rational_coeff\n rational_coeff = int_estimate.round() / denominator_estimate(m) / denom_factor\n\n return rational_coeff / d**m\n\n laurent_series = sum([rationalize_coefficient(laurent_series[m], m) * q**m for m in range(first_exp, laurent_series.exponents()[-1] + 1)]).add_bigoh(series_prec)\n\n return laurent_series\n\n\n # DEFAULT METHODS (should be overwritten in concrete classes)\n\n def _an_element_(self):\n r\"\"\"\n Return an element of ``self``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiMeromorphicModularForms\n sage: el = QuasiMeromorphicModularForms(k=2, ep=-1).an_element()\n sage: el.parent()\n QuasiMeromorphicModularForms(n=3, k=2, ep=-1) over Integer Ring\n sage: el.is_zero()\n True\n sage: el\n O(q^5)\n \"\"\"\n\n # this seems ok, so might as well leave it as is for everything\n return self(ZZ(0))\n #return self.F_simple()\n\n @cached_method\n def dimension(self):\n r\"\"\"\n Return the dimension of ``self``.\n\n .. NOTE::\n\n This method should be overloaded by subclasses.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import QuasiMeromorphicModularForms\n sage: QuasiMeromorphicModularForms(k=2, ep=-1).dimension()\n +Infinity\n \"\"\"\n\n return infinity\n\n def rank(self):\n r\"\"\"\n Return the rank of ``self``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: MF = ModularForms(n=4, k=24, ep=-1)\n sage: MF.rank()\n 3\n sage: MF.subspace([MF.gen(0), MF.gen(2)]).rank()\n 2\n \"\"\"\n\n return self.dimension()\n\n def degree(self):\n r\"\"\"\n Return the degree of ``self``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: MF = ModularForms(n=4, k=24, ep=-1)\n sage: MF.degree()\n 3\n sage: MF.subspace([MF.gen(0), MF.gen(2)]).degree() # defined in subspace.py\n 3\n \"\"\"\n\n return self.dimension()\n\n def coordinate_vector(self, v):\n r\"\"\"\n This method should be overloaded by subclasses.\n\n Return the coordinate vector of the element ``v``\n with respect to ``self.gens()``.\n\n NOTE:\n\n Elements use this method (from their parent)\n to calculate their coordinates.\n\n INPUT:\n\n - ``v`` -- An element of ``self``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: MF = ModularForms(n=4, k=24, ep=-1)\n sage: MF.coordinate_vector(MF.gen(0)).parent() # defined in space.py\n Vector space of dimension 3 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n sage: MF.coordinate_vector(MF.gen(0)) # defined in space.py\n (1, 0, 0)\n sage: subspace = MF.subspace([MF.gen(0), MF.gen(2)])\n sage: subspace.coordinate_vector(subspace.gen(0)).parent() # defined in subspace.py\n Vector space of dimension 2 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n sage: subspace.coordinate_vector(subspace.gen(0)) # defined in subspace.py\n (1, 0)\n \"\"\"\n\n raise NotImplementedError(\"No coordinate vector is implemented yet for {}!\".format(self))\n\n @cached_method\n def ambient_coordinate_vector(self, v):\n r\"\"\"\n Return the coordinate vector of the element ``v``\n in ``self.module()`` with respect to the basis\n from ``self.ambient_space``.\n\n NOTE:\n\n Elements use this method (from their parent)\n to calculate their coordinates.\n\n INPUT:\n\n - ``v`` -- An element of ``self``.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: MF = ModularForms(n=4, k=24, ep=-1)\n sage: MF.ambient_coordinate_vector(MF.gen(0)).parent()\n Vector space of dimension 3 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n sage: MF.ambient_coordinate_vector(MF.gen(0))\n (1, 0, 0)\n sage: subspace = MF.subspace([MF.gen(0), MF.gen(2)])\n sage: subspace.ambient_coordinate_vector(subspace.gen(0)).parent()\n Vector space of degree 3 and dimension 2 over Fraction Field of Univariate Polynomial Ring in d over Integer Ring\n Basis matrix:\n [1 0 0]\n [0 0 1]\n sage: subspace.ambient_coordinate_vector(subspace.gen(0))\n (1, 0, 0)\n \"\"\"\n\n return self.module()(self.ambient_space().coordinate_vector(v))\n\n def gens(self):\n r\"\"\"\n This method should be overloaded by subclasses.\n\n Return a basis of ``self``.\n\n Note that the coordinate vector of elements of ``self``\n are with respect to this basis.\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: ModularForms(k=12).gens() # defined in space.py\n [1 + 196560*q^2 + 16773120*q^3 + 398034000*q^4 + O(q^5),\n q - 24*q^2 + 252*q^3 - 1472*q^4 + O(q^5)]\n \"\"\"\n\n raise NotImplementedError(\"No generators are implemented yet for {}!\".format(self))\n\n def gen(self, k=0):\n r\"\"\"\n Return the ``k``'th basis element of ``self``\n if possible (default: ``k=0``).\n\n EXAMPLES::\n\n sage: from sage.modular.modform_hecketriangle.space import ModularForms\n sage: ModularForms(k=12).gen(1).parent()\n ModularForms(n=3, k=12, ep=1) over Integer Ring\n sage: ModularForms(k=12).gen(1)\n q - 24*q^2 + 252*q^3 - 1472*q^4 + O(q^5)\n \"\"\"\n\n k = ZZ(k)\n if k>=0 and k < self.dimension():\n return self.gens()[k]\n else:\n raise ValueError(\"Invalid index: k={} does not satisfy 0 <= k <= {}!\".format(k, self.dimension()))\n", "id": "1868250", "language": "Python", "matching_score": 5.038815975189209, "max_stars_count": 1742, "path": "src/sage/modular/modform_hecketriangle/abstract_space.py" }, { "content": "\"\"\"\nGraded rings of modular forms\n\nThis module contains functions to find generators for the graded ring of\nmodular forms of given level.\n\nAUTHORS:\n\n- <NAME> (2007-08-24): first version\n- <NAME> (2021-06): implemented category and Parent/Element frameworks\n\"\"\"\n#*****************************************************************************\n# Copyright (C) 2007 <NAME>\n# 2021 <NAME>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n# http://www.gnu.org/licenses/\n#*****************************************************************************\n\nfrom sage.structure.richcmp import richcmp_method, richcmp\nfrom sage.rings.all import Integer, QQ, ZZ\nfrom sage.misc.misc_c import prod\nfrom sage.misc.verbose import verbose\nfrom sage.misc.cachefunc import cached_method\nfrom sage.modular.arithgroup.all import Gamma0, is_CongruenceSubgroup\nfrom .constructor import ModularForms\nfrom .element import is_ModularFormElement, GradedModularFormElement\nfrom .space import is_ModularFormsSpace\nfrom random import shuffle\n\nfrom sage.rings.polynomial.multi_polynomial import MPolynomial\nfrom sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing\nfrom sage.rings.polynomial.term_order import TermOrder\nfrom sage.rings.power_series_poly import PowerSeries_poly\n\nfrom sage.structure.parent import Parent\n\nfrom sage.categories.graded_algebras import GradedAlgebras\n\nfrom sage.misc.superseded import deprecated_function_alias\n\ndef _span_of_forms_in_weight(forms, weight, prec, stop_dim=None, use_random=False):\n r\"\"\"\n Utility function. Given a nonempty list of pairs ``(k,f)``, where `k` is an\n integer and `f` is a power series, and a weight l, return all weight l\n forms obtained by multiplying together the given forms.\n\n INPUT:\n\n - ``forms`` -- list of pairs `(k, f)` with k an integer and f a power\n series (all over the same base ring)\n - ``weight`` -- an integer\n - ``prec`` -- an integer (less than or equal to the precision of all the\n forms in ``forms``) -- precision to use in power series computations.\n - ``stop_dim`` -- an integer: stop as soon as we have enough forms to span\n a submodule of this rank (a saturated one if the base ring is `\\ZZ`).\n Ignored if ``use_random`` is False.\n - ``use_random`` -- which algorithm to use. If True, tries random products\n of the generators of the appropriate weight until a large enough\n submodule is found (determined by ``stop_dim``). If False, just tries\n everything.\n\n Note that if the given forms do generate the whole space, then\n ``use_random=True`` will often be quicker (particularly if the weight is\n large); but if the forms don't generate, the randomized algorithm is no\n help and will actually be substantially slower, because it needs to do\n repeated echelon form calls to check if vectors are in a submodule, while\n the non-randomized algorithm just echelonizes one enormous matrix at the\n end.\n\n EXAMPLES::\n\n sage: import sage.modular.modform.ring as f\n sage: forms = [(4, 240*eisenstein_series_qexp(4,5)), (6,504*eisenstein_series_qexp(6,5))]\n sage: f._span_of_forms_in_weight(forms, 12, prec=5)\n Vector space of degree 5 and dimension 2 over Rational Field\n Basis matrix:\n [ 1 0 196560 16773120 398034000]\n [ 0 1 -24 252 -1472]\n sage: f._span_of_forms_in_weight(forms, 24, prec=5)\n Vector space of degree 5 and dimension 3 over Rational Field\n Basis matrix:\n [ 1 0 0 52416000 39007332000]\n [ 0 1 0 195660 12080128]\n [ 0 0 1 -48 1080]\n sage: ModularForms(1, 24).q_echelon_basis(prec=5)\n [\n 1 + 52416000*q^3 + 39007332000*q^4 + O(q^5),\n q + 195660*q^3 + 12080128*q^4 + O(q^5),\n q^2 - 48*q^3 + 1080*q^4 + O(q^5)\n ]\n\n Test the alternative randomized algorithm::\n\n sage: f._span_of_forms_in_weight(forms, 24, prec=5, use_random=True, stop_dim=3)\n Vector space of degree 5 and dimension 3 over Rational Field\n Basis matrix:\n [ 1 0 0 52416000 39007332000]\n [ 0 1 0 195660 12080128]\n [ 0 0 1 -48 1080]\n \"\"\"\n t = verbose('multiplying forms up to weight %s'%weight)\n # Algorithm: run through the monomials of the appropriate weight, and build\n # up the vector space they span.\n\n n = len(forms)\n R = forms[0][1].base_ring()\n V = R ** prec\n W = V.zero_submodule()\n shortforms = [f[1].truncate_powerseries(prec) for f in forms]\n\n # List of weights\n from sage.combinat.integer_vector_weighted import WeightedIntegerVectors\n wts = list(WeightedIntegerVectors(weight, [f[0] for f in forms]))\n t = verbose(\"calculated weight list\", t)\n N = len(wts)\n\n if use_random:\n if stop_dim is None:\n raise ValueError(\"stop_dim must be provided if use_random is True\")\n shuffle(wts)\n\n for c in range(N):\n w = V(prod(shortforms[i]**wts[c][i] for i in range(n)).padded_list(prec))\n if w in W:\n continue\n W = V.span(list(W.gens()) + [w])\n if stop_dim and W.rank() == stop_dim:\n if R != ZZ or W.index_in_saturation() == 1:\n verbose(\"Succeeded after %s of %s\" % (c, N), t)\n return W\n verbose(\"Nothing worked\", t)\n return W\n else:\n G = [V(prod(forms[i][1]**c[i] for i in range(n)).padded_list(prec)) for c in wts]\n t = verbose('found %s candidates' % N, t)\n W = V.span(G)\n verbose('span has dimension %s' % W.rank(), t)\n return W\n\n@richcmp_method\nclass ModularFormsRing(Parent):\n r\"\"\"\n The ring of modular forms (of weights 0 or at least 2) for a congruence\n subgroup of `{\\rm SL}_2(\\ZZ)`, with coefficients in a specified base ring.\n\n EXAMPLES::\n\n sage: ModularFormsRing(Gamma1(13))\n Ring of Modular Forms for Congruence Subgroup Gamma1(13) over Rational Field\n sage: m = ModularFormsRing(4); m\n Ring of Modular Forms for Congruence Subgroup Gamma0(4) over Rational Field\n sage: m.modular_forms_of_weight(2)\n Modular Forms space of dimension 2 for Congruence Subgroup Gamma0(4) of weight 2 over Rational Field\n sage: m.modular_forms_of_weight(10)\n Modular Forms space of dimension 6 for Congruence Subgroup Gamma0(4) of weight 10 over Rational Field\n sage: m == loads(dumps(m))\n True\n sage: m.generators()\n [(2, 1 + 24*q^2 + 24*q^4 + 96*q^6 + 24*q^8 + O(q^10)),\n (2, q + 4*q^3 + 6*q^5 + 8*q^7 + 13*q^9 + O(q^10))]\n sage: m.q_expansion_basis(2,10)\n [1 + 24*q^2 + 24*q^4 + 96*q^6 + 24*q^8 + O(q^10),\n q + 4*q^3 + 6*q^5 + 8*q^7 + 13*q^9 + O(q^10)]\n sage: m.q_expansion_basis(3,10)\n []\n sage: m.q_expansion_basis(10,10)\n [1 + 10560*q^6 + 3960*q^8 + O(q^10),\n q - 8056*q^7 - 30855*q^9 + O(q^10),\n q^2 - 796*q^6 - 8192*q^8 + O(q^10),\n q^3 + 66*q^7 + 832*q^9 + O(q^10),\n q^4 + 40*q^6 + 528*q^8 + O(q^10),\n q^5 + 20*q^7 + 190*q^9 + O(q^10)]\n\n Elements of modular forms ring can be initiated via multivariate polynomials (see :meth:`from_polynomial`)::\n\n sage: M = ModularFormsRing(1)\n sage: M.ngens()\n 2\n sage: E4, E6 = polygens(QQ, 'E4, E6')\n sage: M(E4)\n 1 + 240*q + 2160*q^2 + 6720*q^3 + 17520*q^4 + 30240*q^5 + O(q^6)\n sage: M(E6)\n 1 - 504*q - 16632*q^2 - 122976*q^3 - 532728*q^4 - 1575504*q^5 + O(q^6)\n sage: M((E4^3 - E6^2)/1728)\n q - 24*q^2 + 252*q^3 - 1472*q^4 + 4830*q^5 + O(q^6)\n\n \"\"\"\n\n Element = GradedModularFormElement\n\n def __init__(self, group, base_ring=QQ):\n r\"\"\"\n INPUT:\n\n - ``group`` -- a congruence subgroup of `{\\rm SL}_2(\\ZZ)`, or a\n positive integer `N` (interpreted as `\\Gamma_0(N)`)\n\n - ``base_ring`` (ring, default: `\\QQ`) -- a base ring, which should be\n `\\QQ`, `\\ZZ`, or the integers mod `p` for some prime `p`.\n\n TESTS::\n\n Check that :trac:`15037` is fixed::\n\n sage: ModularFormsRing(3.4)\n Traceback (most recent call last):\n ...\n ValueError: Group (=3.40000000000000) should be a congruence subgroup\n sage: ModularFormsRing(Gamma0(2), base_ring=PolynomialRing(ZZ,x))\n Traceback (most recent call last):\n ...\n ValueError: Base ring (=Univariate Polynomial Ring in x over Integer Ring) should be QQ, ZZ or a finite prime field\n\n ::\n\n sage: TestSuite(ModularFormsRing(1)).run()\n sage: TestSuite(ModularFormsRing(Gamma0(6))).run()\n sage: TestSuite(ModularFormsRing(Gamma1(4))).run()\n\n .. TODO::\n\n - Add graded modular forms over non-trivial Dirichlet character;\n - makes gen_forms returns modular forms over base rings other than `QQ`;\n - implement binary operations between two forms with different groups.\n \"\"\"\n if isinstance(group, (int, Integer)):\n group = Gamma0(group)\n elif not is_CongruenceSubgroup(group):\n raise ValueError(\"Group (=%s) should be a congruence subgroup\" % group)\n\n if base_ring != ZZ and not base_ring.is_field() and not base_ring.is_finite():\n raise ValueError(\"Base ring (=%s) should be QQ, ZZ or a finite prime field\" % base_ring)\n\n self.__group = group\n self.__cached_maxweight = ZZ(-1)\n self.__cached_gens = []\n self.__cached_cusp_maxweight = ZZ(-1)\n self.__cached_cusp_gens = []\n Parent.__init__(self, base=base_ring, category=GradedAlgebras(base_ring))\n\n def some_elements(self):\n r\"\"\"\n Return a list of generators of ``self``.\n\n EXAMPLES::\n\n sage: ModularFormsRing(1).some_elements()\n [1 + 240*q + 2160*q^2 + 6720*q^3 + 17520*q^4 + 30240*q^5 + O(q^6),\n 1 - 504*q - 16632*q^2 - 122976*q^3 - 532728*q^4 - 1575504*q^5 + O(q^6)]\n \"\"\"\n return [self(f) for f in self.gen_forms()]\n\n def group(self):\n r\"\"\"\n Return the congruence subgroup for which this is the ring of modular forms.\n\n EXAMPLES::\n\n sage: R = ModularFormsRing(Gamma1(13))\n sage: R.group() is Gamma1(13)\n True\n \"\"\"\n return self.__group\n\n def gen(self, i):\n r\"\"\"\n Return the `i`-th generator of ``self``.\n\n INPUT:\n\n - ``i`` (Integer) - correspond to the `i`-th modular form generating the ``ModularFormsRing``.\n\n OUTPUT: A ``GradedModularFormElement``\n\n EXAMPLES::\n\n sage: M = ModularFormsRing(1)\n sage: E4 = M.0; E4 # indirect doctest\n 1 + 240*q + 2160*q^2 + 6720*q^3 + 17520*q^4 + 30240*q^5 + O(q^6)\n sage: E6 = M.1; E6 # indirect doctest\n 1 - 504*q - 16632*q^2 - 122976*q^3 - 532728*q^4 - 1575504*q^5 + O(q^6)\n \"\"\"\n if self.base_ring() is not QQ:\n raise NotImplementedError(\"the base ring of the given ring of modular form should be QQ\")\n return self(self.gen_forms()[i])\n\n def ngens(self):\n r\"\"\"\n Return the number of generators of ``self``\n\n EXAMPLES::\n\n sage: ModularFormsRing(1).ngens()\n 2\n sage: ModularFormsRing(Gamma0(2)).ngens()\n 2\n sage: ModularFormsRing(Gamma1(13)).ngens() # long time\n 33\n\n .. WARNING::\n\n Computing the number of generators of a graded ring of modular form for a certain\n congruence subgroup can be very long.\n \"\"\"\n return len(self.gen_forms())\n\n def polynomial_ring(self, names, gens=None):\n r\"\"\"\n Return a polynomial ring of which ``self`` is a quotient.\n\n INPUT:\n\n - ``names`` -- a list or tuple of names (strings), or a comma separated string\n - ``gens`` (default: None) -- (list) a list of generator of ``self``. If ``gens`` is\n ``None`` then the generators returned by :meth:`~sage.modular.modform.find_generator.ModularFormsRing.gen_forms`\n is used instead.\n\n OUTPUT: A multivariate polynomial ring in the variable ``names``. Each variable of the\n polynomial ring correspond to a generator given in gens (following the ordering of the list).\n\n EXAMPLES::\n\n sage: M = ModularFormsRing(1)\n sage: gens = M.gen_forms()\n sage: M.polynomial_ring('E4, E6', gens)\n Multivariate Polynomial Ring in E4, E6 over Rational Field\n sage: M = ModularFormsRing(Gamma0(8))\n sage: gens = M.gen_forms()\n sage: M.polynomial_ring('g', gens)\n Multivariate Polynomial Ring in g0, g1, g2 over Rational Field\n\n The degrees of the variables are the weights of the corresponding forms::\n\n sage: M = ModularFormsRing(1)\n sage: P.<E4, E6> = M.polynomial_ring()\n sage: E4.degree()\n 4\n sage: E6.degree()\n 6\n sage: (E4*E6).degree()\n 10\n \"\"\"\n if gens is None:\n gens = self.gen_forms()\n degs = [f.weight() for f in gens]\n return PolynomialRing(self.base_ring(), len(gens), names, order=TermOrder('wdeglex', degs)) # Should we remove the deg lexicographic ordering here?\n\n\n def _generators_variables_dictionnary(self, poly_parent, gens):\n r\"\"\"\n Utility function that returns a dictionnary giving an association between\n polynomial ring generators and generators of modular forms ring.\n\n INPUT:\n\n - ``poly_parent`` -- A polynomial ring\n - ``gen`` -- list of generators of the modular forms ring\n\n TESTS::\n\n sage: M = ModularFormsRing(Gamma0(6))\n sage: P = QQ['x, y, z']\n sage: M._generators_variables_dictionnary(P, M.gen_forms())\n {z: q^2 - 2*q^3 + 3*q^4 + O(q^6),\n y: q + 5*q^3 - 2*q^4 + 6*q^5 + O(q^6),\n x: 1 + 24*q^3 + O(q^6)}\n \"\"\"\n if poly_parent.base_ring() != self.base_ring():\n raise ValueError('the base ring of `poly_parent` must be the same as the base ring of the modular forms ring')\n nb_var = poly_parent.ngens()\n nb_gens = self.ngens()\n if nb_var != nb_gens:\n raise ValueError('the number of variables (%s) must be equal to the number of generators of the modular forms ring (%s)'%(nb_var, self.ngens()))\n return {poly_parent.gen(i) : self(gens[i]) for i in range(0, nb_var)}\n\n def from_polynomial(self, polynomial, gens=None):\n r\"\"\"\n Convert the given polynomial to a graded form living in ``self``. If\n ``gens`` is ``None`` then the list of generators given by the method\n :meth:`gen_forms` will be used. Otherwise, ``gens`` should be a list of\n generators.\n\n INPUT:\n\n - ``polynomial`` -- A multivariate polynomial. The variables names of\n the polynomial should be different from ``'q'``. The number of\n variable of this polynomial should equal the number of generators\n - ``gens`` -- list (default: ``None``) of generators of the modular\n forms ring\n\n OUTPUT: A ``GradedModularFormElement`` given by the polynomial\n relation ``polynomial``.\n\n EXAMPLES::\n\n sage: M = ModularFormsRing(1)\n sage: x,y = polygens(QQ, 'x,y')\n sage: M.from_polynomial(x^2+y^3)\n 2 - 1032*q + 774072*q^2 - 77047584*q^3 - 11466304584*q^4 - 498052467504*q^5 + O(q^6)\n sage: M = ModularFormsRing(Gamma0(6))\n sage: M.ngens()\n 3\n sage: x,y,z = polygens(QQ, 'x,y,z')\n sage: M.from_polynomial(x+y+z)\n 1 + q + q^2 + 27*q^3 + q^4 + 6*q^5 + O(q^6)\n sage: M.0 + M.1 + M.2\n 1 + q + q^2 + 27*q^3 + q^4 + 6*q^5 + O(q^6)\n sage: P = x.parent()\n sage: M.from_polynomial(P(1/2))\n 1/2\n\n Note that the number of variables must be equal to the number of generators::\n\n sage: x, y = polygens(QQ, 'x, y')\n sage: M(x + y)\n Traceback (most recent call last):\n ...\n ValueError: the number of variables (2) must be equal to the number of generators of the modular forms ring (3)\n\n TESTS::\n\n sage: x,y = polygens(GF(7), 'x, y')\n sage: ModularFormsRing(1, GF(7))(x)\n Traceback (most recent call last):\n ...\n NotImplementedError: conversion from polynomial is not implemented if the base ring is not Q\n\n ..TODO::\n\n * add conversion for symbolic expressions?\n \"\"\"\n if not self.base_ring() == QQ: # this comes from the method gens_form\n raise NotImplementedError(\"conversion from polynomial is not implemented if the base ring is not Q\")\n if not isinstance(polynomial, MPolynomial):\n raise TypeError('`polynomial` must be a multivariate polynomial')\n if gens is None:\n gens = self.gen_forms()\n dict = self._generators_variables_dictionnary(polynomial.parent(), gens)\n if polynomial.is_constant():\n return self(polynomial.constant_coefficient())\n return polynomial.substitute(dict)\n\n def _element_constructor_(self, forms_datum):\n r\"\"\"\n The call method of self.\n\n INPUT:\n\n - ``forms_datum`` (dict, list, ModularFormElement, GradedModularFormElement, RingElement, Multivariate polynomial) - Try to coerce\n ``forms_datum`` into self.\n\n TESTS::\n\n sage: M = ModularFormsRing(1)\n sage: E4 = ModularForms(1,4).0; E6 = ModularForms(1,6).0\n sage: M([E4, E6])\n 2 - 264*q - 14472*q^2 - 116256*q^3 - 515208*q^4 - 1545264*q^5 + O(q^6)\n sage: M([E4, E4])\n 2 + 480*q + 4320*q^2 + 13440*q^3 + 35040*q^4 + 60480*q^5 + O(q^6)\n sage: M({4:E4, 6:E6})\n 2 - 264*q - 14472*q^2 - 116256*q^3 - 515208*q^4 - 1545264*q^5 + O(q^6)\n sage: M(E4)\n 1 + 240*q + 2160*q^2 + 6720*q^3 + 17520*q^4 + 30240*q^5 + O(q^6)\n sage: x,y = polygens(QQ, 'x,y')\n sage: M(x^2+x^3 + x*y + y^6)\n 4 - 2088*q + 3816216*q^2 - 2296935072*q^3 + 720715388184*q^4 - 77528994304752*q^5 + O(q^6)\n sage: f = ModularForms(3, 10).0\n sage: M(f)\n Traceback (most recent call last):\n ...\n ValueError: the group (Congruence Subgroup Gamma0(3)) and/or the base ring (Rational Field) of the given modular form is not consistant with the base space: Ring of Modular Forms for Modular Group SL(2,Z) over Rational Field\n sage: M = ModularFormsRing(1, base_ring=ZZ)\n sage: M(ModularForms(1,4).0)\n Traceback (most recent call last):\n ...\n ValueError: the group (Modular Group SL(2,Z)) and/or the base ring (Rational Field) of the given modular form is not consistant with the base space: Ring of Modular Forms for Modular Group SL(2,Z) over Integer Ring\n sage: M('x')\n Traceback (most recent call last):\n ...\n TypeError: the defining data structure should be a single modular form, a ring element, a list of modular forms, a multivariate polynomial or a dictionary\n sage: P.<t> = PowerSeriesRing(QQ)\n sage: e = 1 + 240*t + 2160*t^2 + 6720*t^3 + 17520*t^4 + 30240*t^5 + O(t^6)\n sage: ModularFormsRing(1)(e)\n Traceback (most recent call last):\n ...\n NotImplementedError: conversion from q-expansion not yet implemented\n \"\"\"\n if isinstance(forms_datum, (dict, list)):\n forms_dictionary = forms_datum\n elif isinstance(forms_datum, self.element_class):\n forms_dictionary = forms_datum._forms_dictionary\n elif is_ModularFormElement(forms_datum):\n if self.group().is_subgroup(forms_datum.group()) and self.base_ring().has_coerce_map_from(forms_datum.base_ring()):\n forms_dictionary = {forms_datum.weight():forms_datum}\n else:\n raise ValueError('the group (%s) and/or the base ring (%s) of the given modular form is not consistant with the base space: %s'%(forms_datum.group(), forms_datum.base_ring(), self))\n elif forms_datum in self.base_ring():\n forms_dictionary = {0:forms_datum}\n elif isinstance(forms_datum, MPolynomial):\n return self.from_polynomial(forms_datum)\n elif isinstance(forms_datum, PowerSeries_poly):\n raise NotImplementedError(\"conversion from q-expansion not yet implemented\")\n else:\n raise TypeError('the defining data structure should be a single modular form, a ring element, a list of modular forms, a multivariate polynomial or a dictionary')\n return self.element_class(self, forms_dictionary)\n\n def zero(self):\n r\"\"\"\n Return the zero element of this ring.\n\n EXAMPLES::\n\n sage: M = ModularFormsRing(1)\n sage: zer = M.zero(); zer\n 0\n sage: zer.is_zero()\n True\n sage: E4 = ModularForms(1,4).0; E4\n 1 + 240*q + 2160*q^2 + 6720*q^3 + 17520*q^4 + 30240*q^5 + O(q^6)\n sage: E4 + zer\n 1 + 240*q + 2160*q^2 + 6720*q^3 + 17520*q^4 + 30240*q^5 + O(q^6)\n sage: zer * E4\n 0\n sage: E4 * zer\n 0\n \"\"\"\n return self.element_class(self, {})\n\n def one(self):\n r\"\"\"\n Return the one element of this ring.\n\n EXAMPLES::\n\n sage: M = ModularFormsRing(1)\n sage: u = M.one(); u\n 1\n sage: u.is_one()\n True\n sage: u + u\n 2\n sage: E4 = ModularForms(1,4).0; E4\n 1 + 240*q + 2160*q^2 + 6720*q^3 + 17520*q^4 + 30240*q^5 + O(q^6)\n sage: E4 * u\n 1 + 240*q + 2160*q^2 + 6720*q^3 + 17520*q^4 + 30240*q^5 + O(q^6)\n \"\"\"\n return self.element_class(self, {0: self.base_ring().one()})\n\n def _coerce_map_from_(self, M):\n r\"\"\"\n Code to make ModularFormRing work well with coercion framework.\n\n TESTS::\n\n sage: M = ModularFormsRing(1)\n sage: E6 = ModularForms(1,6).0\n sage: D = ModularForms(1,12).1\n sage: M(D) + E6\n 2 - 282744/691*q + 122757768/691*q^2 + 11521760544/691*q^3 + 274576933512/691*q^4 + 3198130142256/691*q^5 + O(q^6)\n sage: D + M(E6)\n 2 - 282744/691*q + 122757768/691*q^2 + 11521760544/691*q^3 + 274576933512/691*q^4 + 3198130142256/691*q^5 + O(q^6)\n sage: 14 + M(D)\n 15 + 65520/691*q + 134250480/691*q^2 + 11606736960/691*q^3 + 274945048560/691*q^4 + 3199218815520/691*q^5 + O(q^6)\n sage: M(D) + 53\n 54 + 65520/691*q + 134250480/691*q^2 + 11606736960/691*q^3 + 274945048560/691*q^4 + 3199218815520/691*q^5 + O(q^6)\n \"\"\"\n if is_ModularFormsSpace(M):\n if M.group() == self.group() and self.has_coerce_map_from(M.base_ring()):\n return True\n if self.base_ring().has_coerce_map_from(M):\n return True\n return False\n\n def __richcmp__(self, other, op):\n r\"\"\"\n Compare self to other.\n\n Rings are equal if and only if their groups and base rings are.\n\n EXAMPLES::\n\n sage: ModularFormsRing(3) == 3\n False\n sage: ModularFormsRing(Gamma0(3)) == ModularFormsRing(Gamma0(7))\n False\n sage: ModularFormsRing(Gamma0(3)) == ModularFormsRing(Gamma0(3))\n True\n \"\"\"\n if not isinstance(other, ModularFormsRing):\n return NotImplemented\n\n return richcmp((self.group(), self.base_ring()),\n (other.group(), other.base_ring()), op)\n\n def _repr_(self):\n r\"\"\"\n String representation of self.\n\n EXAMPLES::\n\n sage: ModularFormsRing(Gamma0(13))._repr_()\n 'Ring of Modular Forms for Congruence Subgroup Gamma0(13) over Rational Field'\n sage: ModularFormsRing(Gamma1(13), base_ring=ZZ)._repr_()\n 'Ring of Modular Forms for Congruence Subgroup Gamma1(13) over Integer Ring'\n \"\"\"\n return \"Ring of Modular Forms for %s over %s\" % (self.group(), self.base_ring())\n\n def modular_forms_of_weight(self, weight):\n \"\"\"\n Return the space of modular forms on this group of the given weight.\n\n EXAMPLES::\n\n sage: R = ModularFormsRing(13)\n sage: R.modular_forms_of_weight(10)\n Modular Forms space of dimension 11 for Congruence Subgroup Gamma0(13) of weight 10 over Rational Field\n sage: ModularFormsRing(Gamma1(13)).modular_forms_of_weight(3)\n Modular Forms space of dimension 20 for Congruence Subgroup Gamma1(13) of weight 3 over Rational Field\n \"\"\"\n return ModularForms(self.group(), weight)\n\n def generators(self, maxweight=8, prec=10, start_gens=[], start_weight=2):\n r\"\"\"\n If `R` is the base ring of self, then this function calculates a set of\n modular forms which generate the `R`-algebra of all modular forms of\n weight up to ``maxweight`` with coefficients in `R`.\n\n INPUT:\n\n - ``maxweight`` (integer, default: 8) -- check up to this weight for\n generators\n\n - ``prec`` (integer, default: 10) -- return `q`-expansions to this\n precision\n\n - ``start_gens`` (list, default: ``[]``) -- list of pairs `(k, f)`, or\n triples `(k, f, F)`, where:\n\n - `k` is an integer,\n - `f` is the `q`-expansion of a modular form of weight `k`, as a power series over the base ring of self,\n - `F` (if provided) is a modular form object corresponding to F.\n\n If this list is nonempty, we find a minimal generating set containing\n these forms. If `F` is not supplied, then `f` needs to have\n sufficiently large precision (an error will be raised if this is not\n the case); otherwise, more terms will be calculated from the modular\n form object `F`.\n\n - ``start_weight`` (integer, default: 2) -- calculate the graded\n subalgebra of forms of weight at least ``start_weight``.\n\n OUTPUT:\n\n a list of pairs (k, f), where f is the q-expansion to precision\n ``prec`` of a modular form of weight k.\n\n .. SEEALSO::\n\n :meth:`gen_forms`, which does exactly the same thing, but returns\n Sage modular form objects rather than bare power series, and keeps\n track of a lifting to characteristic 0 when the base ring is a\n finite field.\n\n .. NOTE::\n\n If called with the default values of ``start_gens`` (an empty list)\n and ``start_weight`` (2), the values will be cached for re-use on\n subsequent calls to this function. (This cache is shared with\n :meth:`gen_forms`). If called with non-default values for these\n parameters, caching will be disabled.\n\n EXAMPLES::\n\n sage: ModularFormsRing(SL2Z).generators()\n [(4, 1 + 240*q + 2160*q^2 + 6720*q^3 + 17520*q^4 + 30240*q^5 + 60480*q^6 + 82560*q^7 + 140400*q^8 + 181680*q^9 + O(q^10)), (6, 1 - 504*q - 16632*q^2 - 122976*q^3 - 532728*q^4 - 1575504*q^5 - 4058208*q^6 - 8471232*q^7 - 17047800*q^8 - 29883672*q^9 + O(q^10))]\n sage: s = ModularFormsRing(SL2Z).generators(maxweight=5, prec=3); s\n [(4, 1 + 240*q + 2160*q^2 + O(q^3))]\n sage: s[0][1].parent()\n Power Series Ring in q over Rational Field\n\n sage: ModularFormsRing(1).generators(prec=4)\n [(4, 1 + 240*q + 2160*q^2 + 6720*q^3 + O(q^4)), (6, 1 - 504*q - 16632*q^2 - 122976*q^3 + O(q^4))]\n sage: ModularFormsRing(2).generators(prec=12)\n [(2, 1 + 24*q + 24*q^2 + 96*q^3 + 24*q^4 + 144*q^5 + 96*q^6 + 192*q^7 + 24*q^8 + 312*q^9 + 144*q^10 + 288*q^11 + O(q^12)), (4, 1 + 240*q^2 + 2160*q^4 + 6720*q^6 + 17520*q^8 + 30240*q^10 + O(q^12))]\n sage: ModularFormsRing(4).generators(maxweight=2, prec=20)\n [(2, 1 + 24*q^2 + 24*q^4 + 96*q^6 + 24*q^8 + 144*q^10 + 96*q^12 + 192*q^14 + 24*q^16 + 312*q^18 + O(q^20)), (2, q + 4*q^3 + 6*q^5 + 8*q^7 + 13*q^9 + 12*q^11 + 14*q^13 + 24*q^15 + 18*q^17 + 20*q^19 + O(q^20))]\n\n Here we see that for ``\\Gamma_0(11)`` taking a basis of forms in weights 2\n and 4 is enough to generate everything up to weight 12 (and probably\n everything else).::\n\n sage: v = ModularFormsRing(11).generators(maxweight=12)\n sage: len(v)\n 3\n sage: [k for k, _ in v]\n [2, 2, 4]\n sage: from sage.modular.dims import dimension_modular_forms\n sage: dimension_modular_forms(11,2)\n 2\n sage: dimension_modular_forms(11,4)\n 4\n\n For congruence subgroups not containing -1, we miss out some forms since we\n can't calculate weight 1 forms at present, but we can still find generators\n for the ring of forms of weight `\\ge 2`::\n\n sage: ModularFormsRing(Gamma1(4)).generators(prec=10, maxweight=10)\n [(2, 1 + 24*q^2 + 24*q^4 + 96*q^6 + 24*q^8 + O(q^10)),\n (2, q + 4*q^3 + 6*q^5 + 8*q^7 + 13*q^9 + O(q^10)),\n (3, 1 + 12*q^2 + 64*q^3 + 60*q^4 + 160*q^6 + 384*q^7 + 252*q^8 + O(q^10)),\n (3, q + 4*q^2 + 8*q^3 + 16*q^4 + 26*q^5 + 32*q^6 + 48*q^7 + 64*q^8 + 73*q^9 + O(q^10))]\n\n Using different base rings will change the generators::\n\n sage: ModularFormsRing(Gamma0(13)).generators(maxweight=12, prec=4)\n [(2, 1 + 2*q + 6*q^2 + 8*q^3 + O(q^4)), (4, 1 + O(q^4)), (4, q + O(q^4)), (4, q^2 + O(q^4)), (4, q^3 + O(q^4)), (6, 1 + O(q^4)), (6, q + O(q^4))]\n sage: ModularFormsRing(Gamma0(13),base_ring=ZZ).generators(maxweight=12, prec=4)\n [(2, 1 + 2*q + 6*q^2 + 8*q^3 + O(q^4)), (4, q + 4*q^2 + 10*q^3 + O(q^4)), (4, 2*q^2 + 5*q^3 + O(q^4)), (4, q^2 + O(q^4)), (4, -2*q^3 + O(q^4)), (6, O(q^4)), (6, O(q^4)), (12, O(q^4))]\n sage: [k for k,f in ModularFormsRing(1, QQ).generators(maxweight=12)]\n [4, 6]\n sage: [k for k,f in ModularFormsRing(1, ZZ).generators(maxweight=12)]\n [4, 6, 12]\n sage: [k for k,f in ModularFormsRing(1, Zmod(5)).generators(maxweight=12)]\n [4, 6]\n sage: [k for k,f in ModularFormsRing(1, Zmod(2)).generators(maxweight=12)]\n [4, 6, 12]\n\n An example where ``start_gens`` are specified::\n\n sage: M = ModularForms(11, 2); f = (M.0 + M.1).qexp(8)\n sage: ModularFormsRing(11).generators(start_gens = [(2, f)])\n Traceback (most recent call last):\n ...\n ValueError: Requested precision cannot be higher than precision of approximate starting generators!\n sage: f = (M.0 + M.1).qexp(10); f\n 1 + 17/5*q + 26/5*q^2 + 43/5*q^3 + 94/5*q^4 + 77/5*q^5 + 154/5*q^6 + 86/5*q^7 + 36*q^8 + 146/5*q^9 + O(q^10)\n sage: ModularFormsRing(11).generators(start_gens = [(2, f)])\n [(2, 1 + 17/5*q + 26/5*q^2 + 43/5*q^3 + 94/5*q^4 + 77/5*q^5 + 154/5*q^6 + 86/5*q^7 + 36*q^8 + 146/5*q^9 + O(q^10)), (2, 1 + 12*q^2 + 12*q^3 + 12*q^4 + 12*q^5 + 24*q^6 + 24*q^7 + 36*q^8 + 36*q^9 + O(q^10)), (4, 1 + O(q^10))]\n \"\"\"\n sgs = []\n for x in start_gens:\n if len(x) == 2:\n if x[1].prec() < prec:\n raise ValueError(\"Requested precision cannot be higher than precision of approximate starting generators!\")\n sgs.append((x[0], x[1], None))\n else:\n sgs.append(x)\n\n G = self._find_generators(maxweight, tuple(sgs), start_weight)\n\n ret = []\n # Returned generators may be a funny mixture of precisions if start_gens has been used.\n for k, f, F in G:\n if f.prec() < prec:\n f = F.qexp(prec).change_ring(self.base_ring())\n else:\n f = f.truncate_powerseries(prec)\n ret.append((k, f))\n\n return ret\n\n\n def gen_forms(self, maxweight=8, start_gens=[], start_weight=2):\n r\"\"\"\n This function calculates a list of modular forms generating this ring\n (as an algebra over the appropriate base ring). It differs from\n :meth:`generators` only in that it returns Sage modular form objects,\n rather than bare `q`-expansions; and if the base ring is a finite\n field, the modular forms returned will be forms in characteristic 0\n with integral `q`-expansions whose reductions modulo `p` generate the\n ring of modular forms mod `p`.\n\n INPUT:\n\n - ``maxweight`` (integer, default: 8) -- calculate forms generating all\n forms up to this weight.\n\n - ``start_gens`` (list, default: ``[]``) -- a list of modular forms. If\n this list is nonempty, we find a minimal generating set containing\n these forms.\n\n - ``start_weight`` (integer, default: 2) -- calculate the graded\n subalgebra of forms of weight at least ``start_weight``.\n\n .. NOTE::\n\n If called with the default values of ``start_gens`` (an empty list)\n and ``start_weight`` (2), the values will be cached for re-use on\n subsequent calls to this function. (This cache is shared with\n :meth:`generators`). If called with non-default values for these\n parameters, caching will be disabled.\n\n EXAMPLES::\n\n sage: A = ModularFormsRing(Gamma0(11), Zmod(5)).gen_forms(); A\n [1 + 12*q^2 + 12*q^3 + 12*q^4 + 12*q^5 + O(q^6), q - 2*q^2 - q^3 + 2*q^4 + q^5 + O(q^6), q - 9*q^4 - 10*q^5 + O(q^6)]\n sage: A[0].parent()\n Modular Forms space of dimension 2 for Congruence Subgroup Gamma0(11) of weight 2 over Rational Field\n \"\"\"\n sgs = tuple( (F.weight(), None, F) for F in start_gens )\n G = self._find_generators(maxweight, sgs, start_weight)\n return [F for k,f,F in G]\n\n def _find_generators(self, maxweight, start_gens, start_weight):\n r\"\"\"\n For internal use. This function is called by :meth:`generators` and\n :meth:`gen_forms`: it returns a list of triples `(k, f, F)` where `F`\n is a modular form of weight `k` and `f` is its `q`-expansion coerced\n into the base ring of self.\n\n INPUT:\n\n - maxweight: maximum weight to try\n - start_weight: minimum weight to try\n - start_gens: a sequence of tuples of the form `(k, f, F)`, where `F` is a\n modular form of weight `k` and `f` is its `q`-expansion coerced into\n ``self.base_ring()`. Either (but not both) of `f` and `F` may be\n None.\n\n OUTPUT:\n\n a list of tuples, formatted as with ``start_gens``.\n\n EXAMPLES::\n\n sage: R = ModularFormsRing(Gamma1(4))\n sage: R._find_generators(8, (), 2)\n [(2, 1 + 24*q^2 + 24*q^4 + 96*q^6 + 24*q^8 + O(q^9), 1 + 24*q^2 + 24*q^4 + O(q^6)), (2, q + 4*q^3 + 6*q^5 + 8*q^7 + O(q^9), q + 4*q^3 + 6*q^5 + O(q^6)), (3, 1 + 12*q^2 + 64*q^3 + 60*q^4 + 160*q^6 + 384*q^7 + 252*q^8 + O(q^9), 1 + 12*q^2 + 64*q^3 + 60*q^4 + O(q^6)), (3, q + 4*q^2 + 8*q^3 + 16*q^4 + 26*q^5 + 32*q^6 + 48*q^7 + 64*q^8 + O(q^9), q + 4*q^2 + 8*q^3 + 16*q^4 + 26*q^5 + O(q^6))]\n \"\"\"\n default_params = (start_gens == () and start_weight == 2)\n\n if default_params and self.__cached_maxweight != -1:\n verbose(\"Already know generators up to weight %s -- using those\" % self.__cached_maxweight)\n\n if self.__cached_maxweight >= maxweight:\n return [(k, f, F) for k, f, F in self.__cached_gens if k <= maxweight]\n\n start_gens = self.__cached_gens\n start_weight = self.__cached_maxweight + 1\n\n if self.group().is_even():\n increment = 2\n else:\n increment = 1\n\n working_prec = self.modular_forms_of_weight(maxweight).sturm_bound()\n\n # parse the list of start gens\n G = []\n for x in start_gens:\n k, f, F = x\n if F is None and f.prec() < working_prec:\n raise ValueError(\"Need start gens to precision at least %s\" % working_prec)\n elif f is None or f.prec() < working_prec:\n f = F.qexp(working_prec).change_ring(self.base_ring())\n G.append((k, f, F))\n\n k = start_weight\n if increment == 2 and (k % 2) == 1:\n k += 1\n\n while k <= maxweight:\n\n if self.modular_forms_of_weight(k).dimension() == 0:\n k += increment\n continue\n\n verbose('Looking at k = %s' % k)\n M = self.modular_forms_of_weight(k)\n\n # 1. Multiply together all forms in G that give an element\n # of M.\n if G:\n F = _span_of_forms_in_weight(G, k, M.sturm_bound(), None, False)\n else:\n F = (self.base_ring() ** M.sturm_bound()).zero_submodule()\n\n # 2. If the dimension of the span of the result is equal\n # to the dimension of M, increment k.\n if F.rank() == M.dimension():\n if self.base_ring().is_field() or F.index_in_saturation() == 1:\n # TODO: Do something clever if the submodule's of the right\n # rank but not saturated -- avoid triggering needless\n # modular symbol computations.\n verbose('Nothing new in weight %s' % k)\n k += increment\n continue\n\n # 3. If the dimension is less, compute a basis for G, and\n # try adding basis elements of M into G.\n\n verbose(\"Known generators span a subspace of dimension %s of space of dimension %s\" % (F.dimension(), M.dimension()))\n if self.base_ring() == ZZ:\n verbose(\"saturation index is %s\" % F.index_in_saturation())\n\n t = verbose(\"Computing more modular forms at weight %s\" % k)\n kprec = M.sturm_bound()\n if self.base_ring() == QQ:\n B = M.q_echelon_basis(working_prec)\n else:\n B = M.q_integral_basis(working_prec)\n t = verbose(\"done computing forms\", t)\n V = F.ambient_module().submodule_with_basis([f.padded_list(kprec) for f in B])\n Q = V / F\n for q in Q.gens():\n try:\n qc = V.coordinates(Q.lift(q))\n except AttributeError:\n # work around a silly free module bug\n qc = V.coordinates(q.lift())\n qcZZ = [ZZ(_) for _ in qc] # lift to ZZ so we can define F\n f = sum([B[i] * qcZZ[i] for i in range(len(B))])\n F = M(f)\n G.append((k, f.change_ring(self.base_ring()), F))\n\n verbose('added %s new generators' % Q.ngens(), t)\n k += increment\n\n if default_params:\n self.__cached_maxweight = maxweight\n self.__cached_gens = G\n\n return G\n\n @cached_method\n def q_expansion_basis(self, weight, prec=None, use_random=True):\n r\"\"\"\n Calculate a basis of q-expansions for the space of modular forms of the\n given weight for this group, calculated using the ring generators given\n by ``find_generators``.\n\n INPUT:\n\n - ``weight`` (integer) -- the weight\n - ``prec`` (integer or ``None``, default: ``None``) -- power series\n precision. If ``None``, the precision defaults to the Sturm bound for\n the requested level and weight.\n - ``use_random`` (boolean, default: True) -- whether or not to use a\n randomized algorithm when building up the space of forms at the given\n weight from known generators of small weight.\n\n EXAMPLES::\n\n sage: m = ModularFormsRing(Gamma0(4))\n sage: m.q_expansion_basis(2,10)\n [1 + 24*q^2 + 24*q^4 + 96*q^6 + 24*q^8 + O(q^10),\n q + 4*q^3 + 6*q^5 + 8*q^7 + 13*q^9 + O(q^10)]\n sage: m.q_expansion_basis(3,10)\n []\n\n sage: X = ModularFormsRing(SL2Z)\n sage: X.q_expansion_basis(12, 10)\n [1 + 196560*q^2 + 16773120*q^3 + 398034000*q^4 + 4629381120*q^5 + 34417656000*q^6 + 187489935360*q^7 + 814879774800*q^8 + 2975551488000*q^9 + O(q^10),\n q - 24*q^2 + 252*q^3 - 1472*q^4 + 4830*q^5 - 6048*q^6 - 16744*q^7 + 84480*q^8 - 113643*q^9 + O(q^10)]\n\n We calculate a basis of a massive modular forms space, in two ways.\n Using this module is about twice as fast as Sage's generic code. ::\n\n sage: A = ModularFormsRing(11).q_expansion_basis(30, prec=40) # long time (5s)\n sage: B = ModularForms(Gamma0(11), 30).q_echelon_basis(prec=40) # long time (9s)\n sage: A == B # long time\n True\n\n Check that absurdly small values of ``prec`` don't mess things up::\n\n sage: ModularFormsRing(11).q_expansion_basis(10, prec=5)\n [1 + O(q^5), q + O(q^5), q^2 + O(q^5), q^3 + O(q^5), q^4 + O(q^5), O(q^5), O(q^5), O(q^5), O(q^5), O(q^5)]\n \"\"\"\n d = self.modular_forms_of_weight(weight).dimension()\n if d == 0:\n return []\n\n if prec is None:\n prec=self.modular_forms_of_weight(weight).sturm_bound()\n\n working_prec = max(prec, self.modular_forms_of_weight(weight).sturm_bound())\n\n gen_weight = min(6, weight)\n\n while True:\n verbose(\"Trying to generate the %s-dimensional space at weight %s using generators of weight up to %s\" % (d, weight, gen_weight))\n G = self.generators(maxweight=gen_weight, prec=working_prec)\n V = _span_of_forms_in_weight(G, weight, prec=working_prec, use_random=use_random, stop_dim=d)\n if V.rank() == d and (self.base_ring().is_field() or V.index_in_saturation() == 1):\n break\n else:\n gen_weight += 1\n verbose(\"Need more generators: trying again with generators of weight up to %s\" % gen_weight)\n\n R = G[0][1].parent()\n return [R(list(x), prec=prec) for x in V.gens()]\n\n def cuspidal_ideal_generators(self, maxweight=8, prec=None):\n r\"\"\"\n Calculate generators for the ideal of cuspidal forms in this ring, as a\n module over the whole ring.\n\n EXAMPLES::\n\n sage: ModularFormsRing(Gamma0(3)).cuspidal_ideal_generators(maxweight=12)\n [(6, q - 6*q^2 + 9*q^3 + 4*q^4 + O(q^5), q - 6*q^2 + 9*q^3 + 4*q^4 + 6*q^5 + O(q^6))]\n sage: [k for k,f,F in ModularFormsRing(13, base_ring=ZZ).cuspidal_ideal_generators(maxweight=14)]\n [4, 4, 4, 6, 6, 12]\n \"\"\"\n working_prec = self.modular_forms_of_weight(maxweight).sturm_bound()\n\n if self.__cached_cusp_maxweight > -1:\n k = self.__cached_cusp_maxweight + 1\n verbose(\"Already calculated cusp gens up to weight %s -- using those\" % (k-1))\n\n # we may need to increase the precision of the cached cusp\n # generators\n G = []\n for j,f,F in self.__cached_cusp_gens:\n if f.prec() >= working_prec:\n f = F.qexp(working_prec).change_ring(self.base_ring())\n G.append( (j,f,F) )\n else:\n k = 2\n G = []\n\n\n while k <= maxweight:\n t = verbose(\"Looking for cusp generators in weight %s\" % k)\n\n kprec = self.modular_forms_of_weight(k).sturm_bound()\n\n flist = []\n\n for (j, f, F) in G:\n for g in self.q_expansion_basis(k - j, prec=kprec):\n flist.append(g*f)\n A = self.base_ring() ** kprec\n W = A.span([A(f.padded_list(kprec)) for f in flist])\n\n S = self.modular_forms_of_weight(k).cuspidal_submodule()\n if (W.rank() == S.dimension()\n and (self.base_ring().is_field() or W.index_in_saturation() == 1)):\n verbose(\"Nothing new in weight %s\" % k, t)\n k += 1\n continue\n\n t = verbose(\"Known cusp generators span a submodule of dimension %s of space of dimension %s\" % (W.rank(), S.dimension()), t)\n\n B = S.q_integral_basis(prec=working_prec)\n V = A.span([A(f.change_ring(self.base_ring()).padded_list(kprec)) for f in B])\n Q = V/W\n\n for q in Q.gens():\n try:\n qc = V.coordinates(Q.lift(q))\n except AttributeError:\n # work around a silly free module bug\n qc = V.coordinates(q.lift())\n qcZZ = [ZZ(_) for _ in qc] # lift to ZZ so we can define F\n f = sum([B[i] * qcZZ[i] for i in range(len(B))])\n F = S(f)\n G.append((k, f.change_ring(self.base_ring()), F))\n\n verbose('added %s new generators' % Q.ngens(), t)\n k += 1\n\n self.__cached_cusp_maxweight = maxweight\n self.__cached_cusp_gens = G\n\n if prec is None:\n return G\n elif prec <= working_prec:\n return [ (k, f.truncate_powerseries(prec), F) for k,f,F in G]\n else:\n # user wants increased precision, so we may as well cache that\n Gnew = [ (k, F.qexp(prec).change_ring(self.base_ring()), F) for k,f,F in G]\n self.__cached_cusp_gens = Gnew\n return Gnew\n\n def cuspidal_submodule_q_expansion_basis(self, weight, prec=None):\n r\"\"\"\n Calculate a basis of `q`-expansions for the space of cusp forms of\n weight ``weight`` for this group.\n\n INPUT:\n\n - ``weight`` (integer) -- the weight\n - ``prec`` (integer or None) -- precision of `q`-expansions to return\n\n ALGORITHM: Uses the method :meth:`cuspidal_ideal_generators` to\n calculate generators of the ideal of cusp forms inside this ring. Then\n multiply these up to weight ``weight`` using the generators of the\n whole modular form space returned by :meth:`q_expansion_basis`.\n\n EXAMPLES::\n\n sage: R = ModularFormsRing(Gamma0(3))\n sage: R.cuspidal_submodule_q_expansion_basis(20)\n [q - 8532*q^6 - 88442*q^7 + O(q^8), q^2 + 207*q^6 + 24516*q^7 + O(q^8), q^3 + 456*q^6 + O(q^8), q^4 - 135*q^6 - 926*q^7 + O(q^8), q^5 + 18*q^6 + 135*q^7 + O(q^8)]\n\n We compute a basis of a space of very large weight, quickly (using this\n module) and slowly (using modular symbols), and verify that the answers\n are the same. ::\n\n sage: A = R.cuspidal_submodule_q_expansion_basis(80, prec=30) # long time (1s on sage.math, 2013)\n sage: B = R.modular_forms_of_weight(80).cuspidal_submodule().q_expansion_basis(prec=30) # long time (19s on sage.math, 2013)\n sage: A == B # long time\n True\n \"\"\"\n d = self.modular_forms_of_weight(weight).cuspidal_submodule().dimension()\n if d == 0:\n return []\n\n minprec = self.modular_forms_of_weight(weight).sturm_bound()\n if prec is None:\n prec = working_prec = minprec\n else:\n working_prec = max(prec, minprec)\n\n gen_weight = min(6, weight)\n\n while True:\n verbose(\"Trying to generate the %s-dimensional cuspidal submodule at weight %s using generators of weight up to %s\" % (d, weight, gen_weight))\n G = self.cuspidal_ideal_generators(maxweight=gen_weight, prec=working_prec)\n\n flist = []\n for (j, f, F) in G:\n for g in self.q_expansion_basis(weight - j, prec=working_prec):\n flist.append(g*f)\n\n A = self.base_ring() ** working_prec\n W = A.span([A(f.padded_list(working_prec)) for f in flist])\n if W.rank() == d and (self.base_ring().is_field() or W.index_in_saturation() == 1):\n break\n else:\n gen_weight += 1\n verbose(\"Need more generators: trying again with generators of weight up to %s\" % gen_weight)\n\n R = G[0][1].parent()\n return [R(list(x), prec=prec) for x in W.gens()]\n\n\n# Deprecated functions\nfind_generators = deprecated_function_alias(31559, ModularFormsRing.generators)\nbasis_for_modform_space = deprecated_function_alias(31559, ModularFormsRing.q_expansion_basis)\n", "id": "4449888", "language": "Python", "matching_score": 2.070197105407715, "max_stars_count": 1742, "path": "src/sage/modular/modform/ring.py" }, { "content": "r\"\"\"\nVerma Modules\n\nAUTHORS:\n\n- <NAME> (2017-06-30): Initial version\n\n.. TODO::\n\n Implement a :class:`sage.categories.pushout.ConstructionFunctor`\n and return as the ``construction()``.\n\"\"\"\n\n# ****************************************************************************\n# Copyright (C) 2017 <NAME> <tcscrims at gmail.com>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nfrom sage.misc.lazy_attribute import lazy_attribute\nfrom sage.misc.cachefunc import cached_method\nfrom sage.categories.modules import Modules\nfrom sage.categories.morphism import Morphism\nfrom sage.categories.homset import Hom, Homset\nfrom sage.monoids.indexed_free_monoid import IndexedFreeAbelianMonoid\nfrom sage.combinat.free_module import CombinatorialFreeModule\nfrom sage.modules.free_module_element import vector\nfrom sage.sets.family import Family\nfrom sage.structure.richcmp import richcmp\nfrom sage.rings.integer_ring import ZZ\nfrom sage.rings.rational_field import QQ\n\n\nclass VermaModule(CombinatorialFreeModule):\n r\"\"\"\n A Verma module.\n\n Let `\\lambda` be a weight and `\\mathfrak{g}` be a Kac--Moody Lie\n algebra with a fixed Borel subalgebra `\\mathfrak{b} = \\mathfrak{h}\n \\oplus \\mathfrak{g}^+`. The *Verma module* `M_{\\lambda}` is a\n `U(\\mathfrak{g})`-module given by\n\n .. MATH::\n\n M_{\\lambda} := U(\\mathfrak{g}) \\otimes_{U(\\mathfrak{b})} F_{\\lambda},\n\n where `F_{\\lambda}` is the `U(\\mathfrak{b})` module such that\n `h \\in U(\\mathfrak{h})` acts as multiplication by\n `\\langle \\lambda, h \\rangle` and `U\\mathfrak{g}^+) F_{\\lambda} = 0`.\n\n INPUT:\n\n - ``g`` -- a Lie algebra\n - ``weight`` -- a weight\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(2*La[1] + 3*La[2])\n sage: pbw = M.pbw_basis()\n sage: E1,E2,F1,F2,H1,H2 = [pbw(g) for g in L.gens()]\n sage: v = M.highest_weight_vector()\n sage: x = F2^3 * F1 * v\n sage: x\n f[-alpha[2]]^3*f[-alpha[1]]*v[2*Lambda[1] + 3*Lambda[2]]\n sage: F1 * x\n f[-alpha[2]]^3*f[-alpha[1]]^2*v[2*Lambda[1] + 3*Lambda[2]]\n + 3*f[-alpha[2]]^2*f[-alpha[1]]*f[-alpha[1] - alpha[2]]*v[2*Lambda[1] + 3*Lambda[2]]\n sage: E1 * x\n 2*f[-alpha[2]]^3*v[2*Lambda[1] + 3*Lambda[2]]\n sage: H1 * x\n 3*f[-alpha[2]]^3*f[-alpha[1]]*v[2*Lambda[1] + 3*Lambda[2]]\n sage: H2 * x\n -2*f[-alpha[2]]^3*f[-alpha[1]]*v[2*Lambda[1] + 3*Lambda[2]]\n\n REFERENCES:\n\n - :wikipedia:`Verma_module`\n \"\"\"\n def __init__(self, g, weight, basis_key=None, prefix='f', **kwds):\n \"\"\"\n Initialize ``self``.\n\n TESTS::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + 4*La[2])\n sage: TestSuite(M).run()\n sage: M = L.verma_module(La[1] - 2*La[2])\n sage: TestSuite(M).run()\n\n sage: L = lie_algebras.sp(QQ, 4)\n sage: La = L.cartan_type().root_system().ambient_space().fundamental_weights()\n sage: M = L.verma_module(-1/2*La[1] + 3/7*La[2])\n sage: TestSuite(M).run()\n \"\"\"\n if basis_key is not None:\n self._basis_key = basis_key\n else:\n self._basis_key = g._basis_key\n\n self._weight = weight\n\n R = g.base_ring()\n self._g = g\n self._pbw = g.pbw_basis(basis_key=self._triangular_key)\n monomials = IndexedFreeAbelianMonoid(g._negative_half_index_set(),\n prefix,\n sorting_key=self._monoid_key,\n **kwds)\n CombinatorialFreeModule.__init__(self, R, monomials,\n prefix='', bracket=False, latex_bracket=False,\n sorting_key=self._monomial_key,\n category=Modules(R).WithBasis().Graded())\n\n def _triangular_key(self, x):\n \"\"\"\n Return a key for sorting for the index ``x`` that respects\n the triangular decomposition by `U^-, U^0, U^+`.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1])\n sage: sorted(L.basis().keys(), key=L._basis_key)\n [alpha[2], alpha[1], alpha[1] + alpha[2],\n alphacheck[1], alphacheck[2],\n -alpha[2], -alpha[1], -alpha[1] - alpha[2]]\n sage: sorted(L.basis().keys(), key=M._triangular_key)\n [-alpha[2], -alpha[1], -alpha[1] - alpha[2],\n alphacheck[1], alphacheck[2],\n alpha[2], alpha[1], alpha[1] + alpha[2]]\n\n sage: def neg_key(x):\n ....: return -L.basis().keys().index(x)\n sage: sorted(L.basis().keys(), key=neg_key)\n [-alpha[1] - alpha[2], -alpha[1], -alpha[2],\n alphacheck[2], alphacheck[1],\n alpha[1] + alpha[2], alpha[1], alpha[2]]\n sage: N = L.verma_module(La[1], basis_key=neg_key)\n sage: sorted(L.basis().keys(), key=N._triangular_key)\n [-alpha[1] - alpha[2], -alpha[1], -alpha[2],\n alphacheck[2], alphacheck[1],\n alpha[1] + alpha[2], alpha[1], alpha[2]]\n \"\"\"\n return (self._g._part_on_basis(x), self._basis_key(x))\n\n def _monoid_key(self, x):\n \"\"\"\n Return a key for comparison in the underlying monoid of ``self``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1])\n sage: monoid = M.basis().keys()\n sage: prod(monoid.gens()) # indirect doctest\n f[-alpha[2]]*f[-alpha[1]]*f[-alpha[1] - alpha[2]]\n sage: [M._monoid_key(x) for x in monoid.an_element()._sorted_items()]\n [5, 6, 7]\n\n sage: def neg_key(x):\n ....: return -L.basis().keys().index(x)\n sage: M = L.verma_module(La[1], basis_key=neg_key)\n sage: monoid = M.basis().keys()\n sage: prod(monoid.gens()) # indirect doctest\n f[-alpha[1] - alpha[2]]*f[-alpha[1]]*f[-alpha[2]]\n sage: [M._monoid_key(x) for x in monoid.an_element()._sorted_items()]\n [-7, -6, -5]\n \"\"\"\n return self._basis_key(x[0])\n\n def _monomial_key(self, x):\n \"\"\"\n Compute the key for ``x`` so that the comparison is done by\n triangular decomposition and then reverse degree lexicographic order.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1])\n sage: pbw = M.pbw_basis()\n sage: f1,f2 = pbw(L.f(1)), pbw(L.f(2))\n sage: f1 * f2 * f1 * M.highest_weight_vector() # indirect doctest\n f[-alpha[2]]*f[-alpha[1]]^2*v[Lambda[1]]\n + f[-alpha[1]]*f[-alpha[1] - alpha[2]]*v[Lambda[1]]\n\n sage: def neg_key(x):\n ....: return -L.basis().keys().index(x)\n sage: M = L.verma_module(La[1], basis_key=neg_key)\n sage: f1 * f2 * f1 * M.highest_weight_vector() # indirect doctest\n f[-alpha[1]]^2*f[-alpha[2]]*v[Lambda[1]]\n - f[-alpha[1] - alpha[2]]*f[-alpha[1]]*v[Lambda[1]]\n \"\"\"\n return (-len(x), [self._triangular_key(l) for l in x.to_word_list()])\n\n def _repr_(self):\n \"\"\"\n Return a string representation of ``self``.\n\n EXAMPLES::\n\n sage: L = LieAlgebra(QQ, cartan_type=['E',6])\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(2*La[1] + 3*La[2] - 5*La[5])\n sage: M\n Verma module with highest weight 2*Lambda[1] + 3*Lambda[2] - 5*Lambda[5]\n of Lie algebra of ['E', 6] in the Chevalley basis\n \"\"\"\n return \"Verma module with highest weight {} of {}\".format(self._weight, self._g)\n\n def _latex_(self):\n r\"\"\"\n Return a latex representation of ``self``.\n\n EXAMPLES::\n\n sage: L = LieAlgebra(QQ, cartan_type=['E',7])\n sage: La = L.cartan_type().root_system().weight_space().fundamental_weights()\n sage: M = L.verma_module(2*La[1] + 7*La[4] - 3/4*La[7])\n sage: latex(M)\n M_{2\\Lambda_{1} + 7\\Lambda_{4} - \\frac{3}{4}\\Lambda_{7}}\n \"\"\"\n from sage.misc.latex import latex\n return \"M_{{{}}}\".format(latex(self._weight))\n\n def _repr_generator(self, m):\n r\"\"\"\n Return a string representation of the generator indexed by ``m``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sp(QQ, 4)\n sage: La = L.cartan_type().root_system().ambient_space().fundamental_weights()\n sage: M = L.verma_module(-1/2*La[1] + 3/7*La[2])\n sage: f1, f2 = L.f(1), L.f(2)\n sage: x = M.pbw_basis()(L([f1, [f1, f2]]))\n sage: v = x * M.highest_weight_vector()\n sage: M._repr_generator(v.leading_support())\n 'f[-2*alpha[1] - alpha[2]]*v[(-1/14, 3/7)]'\n\n sage: M.highest_weight_vector()\n v[(-1/14, 3/7)]\n sage: 2 * M.highest_weight_vector()\n 2*v[(-1/14, 3/7)]\n \"\"\"\n ret = super(VermaModule, self)._repr_generator(m)\n if ret == '1':\n ret = ''\n else:\n ret += '*'\n return ret + \"v[{}]\".format(self._weight)\n\n def _latex_generator(self, m):\n r\"\"\"\n Return a latex representation of the generator indexed by ``m``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sp(QQ, 4)\n sage: La = L.cartan_type().root_system().ambient_space().fundamental_weights()\n sage: M = L.verma_module(-1/2*La[1] + 3/7*La[2])\n sage: f1, f2 = L.f(1), L.f(2)\n sage: x = M.pbw_basis()(L([f1, [f1, f2]]))\n sage: v = x * M.highest_weight_vector()\n sage: M._latex_generator(v.leading_support())\n f_{-2\\alpha_{1} - \\alpha_{2}} v_{-\\frac{1}{14}e_{0} + \\frac{3}{7}e_{1}}\n\n sage: latex(2 * M.highest_weight_vector())\n 2 v_{-\\frac{1}{14}e_{0} + \\frac{3}{7}e_{1}}\n sage: latex(M.highest_weight_vector())\n v_{-\\frac{1}{14}e_{0} + \\frac{3}{7}e_{1}}\n \"\"\"\n ret = super(VermaModule, self)._latex_generator(m)\n if ret == '1':\n ret = ''\n from sage.misc.latex import latex\n return ret + \" v_{{{}}}\".format(latex(self._weight))\n\n _repr_term = _repr_generator\n _latex_term = _latex_generator\n\n def lie_algebra(self):\n \"\"\"\n Return the underlying Lie algebra of ``self``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.so(QQ, 9)\n sage: La = L.cartan_type().root_system().weight_space().fundamental_weights()\n sage: M = L.verma_module(La[3] - 1/2*La[1])\n sage: M.lie_algebra()\n Lie algebra of ['B', 4] in the Chevalley basis\n \"\"\"\n return self._g\n\n def pbw_basis(self):\n \"\"\"\n Return the PBW basis of the underlying Lie algebra\n used to define ``self``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.so(QQ, 8)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[2] - 2*La[3])\n sage: M.pbw_basis()\n Universal enveloping algebra of Lie algebra of ['D', 4] in the Chevalley basis\n in the Poincare-Birkhoff-Witt basis\n \"\"\"\n return self._pbw\n\n poincare_birkhoff_witt_basis = pbw_basis\n\n @cached_method\n def highest_weight_vector(self):\n \"\"\"\n Return the highest weight vector of ``self``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sp(QQ, 6)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] - 3*La[2])\n sage: M.highest_weight_vector()\n v[Lambda[1] - 3*Lambda[2]]\n \"\"\"\n one = self.base_ring().one()\n return self._from_dict({self._indices.one(): one},\n remove_zeros=False, coerce=False)\n\n def gens(self):\n r\"\"\"\n Return the generators of ``self`` as a `U(\\mathfrak{g})`-module.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sp(QQ, 6)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] - 3*La[2])\n sage: M.gens()\n (v[Lambda[1] - 3*Lambda[2]],)\n \"\"\"\n return (self.highest_weight_vector(),)\n\n def highest_weight(self):\n r\"\"\"\n Return the highest weight of ``self``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.so(QQ, 7)\n sage: La = L.cartan_type().root_system().weight_space().fundamental_weights()\n sage: M = L.verma_module(4*La[1] - 3/2*La[2])\n sage: M.highest_weight()\n 4*Lambda[1] - 3/2*Lambda[2]\n \"\"\"\n return self._weight\n\n def degree_on_basis(self, m):\n r\"\"\"\n Return the degree (or weight) of the basis element indexed by ``m``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(2*La[1] + 3*La[2])\n sage: v = M.highest_weight_vector()\n sage: M.degree_on_basis(v.leading_support())\n 2*Lambda[1] + 3*Lambda[2]\n\n sage: pbw = M.pbw_basis()\n sage: G = list(pbw.gens())\n sage: f1, f2 = L.f()\n sage: x = pbw(f1.bracket(f2)) * pbw(f1) * v\n sage: x.degree()\n -Lambda[1] + 3*Lambda[2]\n \"\"\"\n P = self._weight.parent()\n return self._weight + P.sum(P(e * self._g.degree_on_basis(k))\n for k,e in m.dict().items())\n\n def _coerce_map_from_(self, R):\n r\"\"\"\n Return if there is a coercion map from ``R`` to ``self``.\n\n There is a coercion map from ``R`` if and only if\n\n - there is a coercion from ``R`` into the base ring;\n - ``R`` is a Verma module over the same Lie algebra and\n there is a non-zero Verma module morphism from ``R``\n into ``self``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.so(QQ, 8)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([1,2]))\n sage: Mpp = L.verma_module(M.highest_weight().dot_action([1,2]) + La[1])\n sage: M._coerce_map_from_(Mp) is not None\n True\n sage: Mp._coerce_map_from_(M)\n sage: M._coerce_map_from_(Mpp)\n sage: M._coerce_map_from_(ZZ)\n True\n \"\"\"\n if self.base_ring().has_coerce_map_from(R):\n return True\n if isinstance(R, VermaModule) and R._g is self._g:\n H = Hom(R, self)\n if H.dimension() == 1:\n return H.natural_map()\n return super(VermaModule, self)._coerce_map_from_(R)\n\n def _element_constructor_(self, x):\n r\"\"\"\n Construct an element of ``self`` from ``x``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + 2*La[2])\n sage: M(3)\n 3*v[Lambda[1] + 2*Lambda[2]]\n sage: pbw = M.pbw_basis()\n sage: [M(g) for g in pbw.gens()]\n [0,\n 0,\n 0,\n v[Lambda[1] + 2*Lambda[2]],\n 2*v[Lambda[1] + 2*Lambda[2]],\n f[-alpha[2]]*v[Lambda[1] + 2*Lambda[2]],\n f[-alpha[1]]*v[Lambda[1] + 2*Lambda[2]],\n f[-alpha[1] - alpha[2]]*v[Lambda[1] + 2*Lambda[2]]]\n \"\"\"\n if x in self.base_ring():\n return self._from_dict({self._indices.one(): x})\n if isinstance(x, self._pbw.element_class):\n return self.highest_weight_vector()._acted_upon_(x, False)\n return super(VermaModule, self)._element_constructor_(self, x)\n\n @lazy_attribute\n def _dominant_data(self):\n r\"\"\"\n Return the closest to dominant weight in the dot orbit of\n the highest weight of ``self`` and the corresponding reduced word.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: M._dominant_data\n (Lambda[1] + Lambda[2], [])\n sage: M = L.verma_module(M.highest_weight().dot_action([1,2]))\n sage: M._dominant_data\n (Lambda[1] + Lambda[2], [1, 2])\n sage: M = L.verma_module(-4*La[1] - La[2])\n sage: M._dominant_data\n (-Lambda[1] + 2*Lambda[2], [1, 2])\n \"\"\"\n P = self._weight.parent()\n wt, w = (self._weight + P.rho()).to_dominant_chamber(reduced_word=True)\n return (wt - P.rho(), w)\n\n def is_singular(self):\n r\"\"\"\n Return if ``self`` is a singular Verma module.\n\n A Verma module `M_{\\lambda}` is *singular* if there does not\n exist a dominant weight `\\tilde{\\lambda}` that is in the dot\n orbit of `\\lambda`. We call a Verma module *regular* otherwise.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: M.is_singular()\n False\n sage: M = L.verma_module(La[1] - La[2])\n sage: M.is_singular()\n True\n sage: M = L.verma_module(2*La[1] - 10*La[2])\n sage: M.is_singular()\n False\n sage: M = L.verma_module(-2*La[1] - 2*La[2])\n sage: M.is_singular()\n False\n sage: M = L.verma_module(-4*La[1] - La[2])\n sage: M.is_singular()\n True\n \"\"\"\n return not self._dominant_data[0].is_dominant()\n\n def homogeneous_component_basis(self, d):\n r\"\"\"\n Return a basis for the ``d``-th homogeneous component of ``self``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: P = L.cartan_type().root_system().weight_lattice()\n sage: La = P.fundamental_weights()\n sage: al = P.simple_roots()\n sage: mu = 2*La[1] + 3*La[2]\n sage: M = L.verma_module(mu)\n sage: M.homogeneous_component_basis(mu - al[2])\n [f[-alpha[2]]*v[2*Lambda[1] + 3*Lambda[2]]]\n sage: M.homogeneous_component_basis(mu - 3*al[2])\n [f[-alpha[2]]^3*v[2*Lambda[1] + 3*Lambda[2]]]\n sage: M.homogeneous_component_basis(mu - 3*al[2] - 2*al[1])\n [f[-alpha[2]]*f[-alpha[1] - alpha[2]]^2*v[2*Lambda[1] + 3*Lambda[2]],\n f[-alpha[2]]^2*f[-alpha[1]]*f[-alpha[1] - alpha[2]]*v[2*Lambda[1] + 3*Lambda[2]],\n f[-alpha[2]]^3*f[-alpha[1]]^2*v[2*Lambda[1] + 3*Lambda[2]]]\n sage: M.homogeneous_component_basis(mu - La[1])\n Family ()\n \"\"\"\n diff = _convert_wt_to_root(d - self._weight)\n if diff is None or not all(coeff <= 0 and coeff in ZZ for coeff in diff):\n return Family([])\n return sorted(self._homogeneous_component_f(diff))\n\n @cached_method\n def _homogeneous_component_f(self, d):\n r\"\"\"\n Return a basis of the PBW given by ``d`` expressed in the\n root lattice in terms of the simple roots.\n\n INPUT:\n\n - ``d`` -- the coefficients of the simple roots as a vector\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: sorted(M._homogeneous_component_f(vector([-1,-2])), key=str)\n [f[-alpha[2]]*f[-alpha[1] - alpha[2]]*v[Lambda[1] + Lambda[2]],\n f[-alpha[2]]^2*f[-alpha[1]]*v[Lambda[1] + Lambda[2]]]\n sage: sorted(M._homogeneous_component_f(vector([-5,-4])), key=str)\n [f[-alpha[1]]*f[-alpha[1] - alpha[2]]^4*v[Lambda[1] + Lambda[2]],\n f[-alpha[2]]*f[-alpha[1]]^2*f[-alpha[1] - alpha[2]]^3*v[Lambda[1] + Lambda[2]],\n f[-alpha[2]]^2*f[-alpha[1]]^3*f[-alpha[1] - alpha[2]]^2*v[Lambda[1] + Lambda[2]],\n f[-alpha[2]]^3*f[-alpha[1]]^4*f[-alpha[1] - alpha[2]]*v[Lambda[1] + Lambda[2]],\n f[-alpha[2]]^4*f[-alpha[1]]^5*v[Lambda[1] + Lambda[2]]]\n \"\"\"\n if not d:\n return frozenset([self.highest_weight_vector()])\n f = {i: self._pbw(g) for i,g in enumerate(self._g.f())}\n basis = d.parent().basis() # Standard basis vectors\n ret = set()\n def degree(m):\n m = m.dict()\n if not m:\n return d.parent().zero()\n return sum(e * self._g.degree_on_basis(k) for k,e in m.items()).to_vector()\n for i in f:\n if d[i] == 0:\n continue\n for b in self._homogeneous_component_f(d + basis[i]):\n temp = f[i] * b\n ret.update([self.monomial(m) for m in temp.support() if degree(m) == d])\n return frozenset(ret)\n\n def _Hom_(self, Y, category=None, **options):\n r\"\"\"\n Return the homset from ``self`` to ``Y`` in the\n category ``category``.\n\n INPUT:\n\n - ``Y`` -- an object\n - ``category`` -- a subcategory of :class:`Crystals`() or ``None``\n\n The sole purpose of this method is to construct the homset as a\n :class:`~sage.algebras.lie_algebras.verma_module.VermaModuleHomset`.\n If ``category`` is specified and is not a subcategory of\n ``self.category()``, a ``TypeError`` is raised instead.\n\n This method is not meant to be called directly. Please use\n :func:`sage.categories.homset.Hom` instead.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_space().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(3*La[1] - 3*La[2])\n sage: H = Hom(M, Mp)\n sage: type(H)\n <...VermaModuleHomset_with_category_with_equality_by_id'>\n \"\"\"\n if not (isinstance(Y, VermaModule) and self._g is Y._g):\n raise TypeError(\"{} must be a Verma module of {}\".format(Y, self._g))\n if category is not None and not category.is_subcategory(self.category()):\n raise TypeError(\"{} is not a subcategory of {}\".format(category, self.category()))\n return VermaModuleHomset(self, Y)\n\n class Element(CombinatorialFreeModule.Element):\n def _acted_upon_(self, scalar, self_on_left=False):\n \"\"\"\n Return the action of ``scalar`` on ``self``.\n\n Check that other PBW algebras have an action::\n\n sage: L = lie_algebras.sp(QQ, 6)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] - 3*La[2])\n sage: PBW = L.pbw_basis()\n sage: F1 = PBW(L.f(1))\n sage: F1 * M.highest_weight_vector()\n f[-alpha[1]]*v[Lambda[1] - 3*Lambda[2]]\n sage: F1.parent() is M.pbw_basis()\n False\n sage: F1 * M.highest_weight_vector()\n f[-alpha[1]]*v[Lambda[1] - 3*Lambda[2]]\n sage: E1 = PBW(L.e(1))\n sage: E1 * F1\n PBW[alpha[1]]*PBW[-alpha[1]]\n sage: E1 * F1 * M.highest_weight_vector()\n v[Lambda[1] - 3*Lambda[2]]\n sage: M.pbw_basis()(E1 * F1)\n PBW[-alpha[1]]*PBW[alpha[1]] + PBW[alphacheck[1]]\n \"\"\"\n P = self.parent()\n # Check for scalars first\n if scalar in P.base_ring():\n # Don't have this be a super call\n return CombinatorialFreeModule.Element._acted_upon_(self, scalar, self_on_left)\n\n # Check for Lie algebra elements\n try:\n scalar = P._g(scalar)\n except (ValueError, TypeError):\n pass\n\n # Check for PBW elements\n try:\n scalar = P._pbw(scalar)\n except (ValueError, TypeError):\n # Cannot be made into a PBW element, so propagate it up\n return CombinatorialFreeModule.Element._acted_upon_(self,\n scalar, self_on_left)\n\n # We only implement x * self, i.e., as a left module\n if self_on_left:\n return None\n\n # Lift ``self`` to the PBW basis and do multiplication there\n mc = self._monomial_coefficients\n d = {P._pbw._indices(x.dict()): mc[x] for x in mc} # Lift the index set\n ret = scalar * P._pbw._from_dict(d, remove_zeros=False, coerce=False)\n\n # Now have ``ret`` act on the highest weight vector\n d = {}\n for m in ret._monomial_coefficients:\n c = ret._monomial_coefficients[m]\n mp = {}\n for k,e in reversed(m._sorted_items()):\n part = P._g._part_on_basis(k)\n if part > 0:\n mp = None\n break\n elif part == 0:\n c *= P._g._weight_action(k, P._weight)**e\n else:\n mp[k] = e\n # This term is 0, so nothing to do\n if mp is None:\n continue\n # Convert back to an element of the indexing set\n mp = P._indices(mp)\n if mp in d:\n d[mp] += c\n else:\n d[mp] = c\n return P._from_dict(d)\n\n _lmul_ = _acted_upon_\n _rmul_ = _acted_upon_\n\n#####################################################################\n## Morphisms and Homset\n\nclass VermaModuleMorphism(Morphism):\n \"\"\"\n A morphism of Verma modules.\n \"\"\"\n def __init__(self, parent, scalar):\n \"\"\"\n Initialize ``self``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([1,2]))\n sage: phi = Hom(Mp, M).natural_map()\n sage: TestSuite(phi).run()\n \"\"\"\n self._scalar = scalar\n Morphism.__init__(self, parent)\n\n def _repr_type(self):\n \"\"\"\n Return a string describing the specific type of this map,\n to be used when printing ``self``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([1,2]))\n sage: phi = Hom(Mp, M).natural_map()\n sage: phi._repr_type()\n 'Verma module'\n \"\"\"\n return \"Verma module\"\n\n def _repr_defn(self):\n r\"\"\"\n Return a string describing the definition of ``self``,\n to be used when printing ``self``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([1,2]))\n sage: phi = Hom(Mp, M).natural_map()\n sage: phi._repr_defn()\n 'v[-5*Lambda[1] + Lambda[2]] |--> f[-alpha[2]]^2*f[-alpha[1]]^4*v[Lambda[1]\n + Lambda[2]] + 8*f[-alpha[2]]*f[-alpha[1]]^3*f[-alpha[1] - alpha[2]]*v[Lambda[1]\n + Lambda[2]] + 12*f[-alpha[1]]^2*f[-alpha[1] - alpha[2]]^2*v[Lambda[1] + Lambda[2]]'\n\n alpha[1]]^2*f[-alpha[1] - alpha[2]]^2*v[Lambda[1] + Lambda[2]]'\n sage: psi = Hom(M, Mp).natural_map()\n sage: psi\n Verma module morphism:\n From: Verma module with highest weight Lambda[1] + Lambda[2]\n of Lie algebra of ['A', 2] in the Chevalley basis\n To: Verma module with highest weight -5*Lambda[1] + Lambda[2]\n of Lie algebra of ['A', 2] in the Chevalley basis\n Defn: v[Lambda[1] + Lambda[2]] |--> 0\n \"\"\"\n v = self.domain().highest_weight_vector()\n if not self._scalar:\n return \"{} |--> {}\".format(v, self.codomain().zero())\n return \"{} |--> {}\".format(v, self._scalar * self.parent().singular_vector())\n\n def _richcmp_(self, other, op):\n r\"\"\"\n Return whether this morphism and ``other`` satisfy ``op``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([1,2]))\n sage: H = Hom(Mp, M)\n sage: H(1) < H(2)\n True\n sage: H(2) < H(1)\n False\n sage: H.zero() == H(0)\n True\n sage: H(3) <= H(3)\n True\n \"\"\"\n return richcmp(self._scalar, other._scalar, op)\n\n def _call_(self, x):\n r\"\"\"\n Apply this morphism to ``x``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([1,2]))\n sage: pbw = M.pbw_basis()\n sage: f1, f2 = pbw(L.f(1)), pbw(L.f(2))\n sage: v = Mp.highest_weight_vector()\n sage: phi = Hom(Mp, M).natural_map()\n sage: phi(f1 * v) == f1 * phi(v)\n True\n sage: phi(f2 * f1 * v) == f2 * f1 * phi(v)\n True\n sage: phi(f1 * f2 * f1 * v) == f1 * f2 * f1 * phi(v)\n True\n\n sage: Mpp = L.verma_module(M.highest_weight().dot_action([1,2]) + La[1])\n sage: psi = Hom(Mpp, M).natural_map()\n sage: v = Mpp.highest_weight_vector()\n sage: psi(v)\n 0\n \"\"\"\n if not self._scalar or self.parent().singular_vector() is None:\n return self.codomain().zero()\n mc = x.monomial_coefficients(copy=False)\n return self.codomain().linear_combination((self._on_basis(m), self._scalar * c)\n for m,c in mc.items())\n\n def _on_basis(self, m):\n \"\"\"\n Return the image of the basis element indexed by ``m``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([1,2]))\n sage: pbw = M.pbw_basis()\n sage: f1, f2 = pbw(L.f(1)), pbw(L.f(2))\n sage: v = Mp.highest_weight_vector()\n sage: phi = Hom(Mp, M).natural_map()\n sage: phi._on_basis((f1 * v).leading_support()) == f1 * phi(v)\n True\n \"\"\"\n pbw = self.codomain()._pbw\n return pbw.monomial(pbw._indices(m.dict())) * self.parent().singular_vector()\n\n def _add_(self, other):\n \"\"\"\n Add ``self`` and ``other``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([1,2]))\n sage: phi = Hom(Mp, M).natural_map()\n sage: (phi + 3/2 * phi)._scalar\n 5/2\n \"\"\"\n return type(self)(self.parent(), self._scalar + other._scalar)\n\n def _sub_(self, other):\n \"\"\"\n Subtract ``self`` and ``other``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([1,2]))\n sage: phi = Hom(Mp, M).natural_map()\n sage: (phi - 3/2 * phi)._scalar\n -1/2\n \"\"\"\n return type(self)(self.parent(), self._scalar - other._scalar)\n\n def _acted_upon_(self, other, self_on_left):\n \"\"\"\n Return the action of ``other`` on ``self``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([1,2]))\n sage: phi = Hom(Mp, M).natural_map()\n sage: phi._scalar\n 1\n sage: (0 * phi)._scalar\n 0\n sage: R.<x> = QQ[]\n sage: x * phi\n Traceback (most recent call last):\n ...\n TypeError: unsupported operand parent(s) for *: ...\n \"\"\"\n R = self.parent().base_ring()\n if other not in R:\n return None\n return type(self)(self.parent(), R(other) * self._scalar)\n\n def _composition_(self, right, homset):\n r\"\"\"\n Return the composition of ``self`` and ``right``.\n\n INPUT:\n\n - ``self``, ``right`` -- maps\n - homset -- a homset\n\n ASSUMPTION:\n\n The codomain of ``right`` is contained in the domain of ``self``.\n This assumption is not verified.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([1,2]))\n sage: Mpp = L.verma_module(M.highest_weight().dot_action([1,2]) + La[1])\n sage: phi = Hom(Mp, M).natural_map()\n sage: psi = Hom(Mpp, Mp).natural_map()\n sage: xi = phi * psi\n sage: xi._scalar\n 0\n \"\"\"\n if (isinstance(right, VermaModuleMorphism)\n and right.domain()._g is self.codomain()._g):\n return homset.element_class(homset, right._scalar * self._scalar)\n return super(VermaModuleMorphism, self)._composition_(right, homset)\n\n def is_injective(self):\n r\"\"\"\n Return if ``self`` is injective or not.\n\n A Verma module morphism `\\phi : M \\to M'` is injective if\n and only if `\\dim \\hom(M, M') = 1` and `\\phi \\neq 0`.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([1,2]))\n sage: Mpp = L.verma_module(M.highest_weight().dot_action([1,2]) + La[1])\n sage: phi = Hom(Mp, M).natural_map()\n sage: phi.is_injective()\n True\n sage: (0 * phi).is_injective()\n False\n sage: psi = Hom(Mpp, Mp).natural_map()\n sage: psi.is_injective()\n False\n \"\"\"\n return self.parent().singular_vector() is not None and bool(self._scalar)\n\n def is_surjective(self):\n \"\"\"\n Return if ``self`` is surjective or not.\n\n A Verma module morphism is surjective if and only if the\n domain is equal to the codomain and it is not the zero\n morphism.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([1,2]))\n sage: phi = Hom(M, M).natural_map()\n sage: phi.is_surjective()\n True\n sage: (0 * phi).is_surjective()\n False\n sage: psi = Hom(Mp, M).natural_map()\n sage: psi.is_surjective()\n False\n \"\"\"\n return self.domain() == self.codomain() and bool(self._scalar)\n\nclass VermaModuleHomset(Homset):\n r\"\"\"\n The set of morphisms from one Verma module to another\n considered as `U(\\mathfrak{g})`-representations.\n\n Let `M_{w \\cdot \\lambda}` and `M_{w' \\cdot \\lambda'}` be\n Verma modules, `\\cdot` is the dot action, and `\\lambda + \\rho`,\n `\\lambda' + \\rho` are dominant weights. Then we have\n\n .. MATH::\n\n \\dim \\hom(M_{w \\cdot \\lambda}, M_{w' \\cdot \\lambda'}) = 1\n\n if and only if `\\lambda = \\lambda'` and `w' \\leq w` in Bruhat\n order. Otherwise the homset is 0 dimensional.\n \"\"\"\n def __call__(self, x, **options):\n \"\"\"\n Construct a morphism in this homset from ``x`` if possible.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([1,2]))\n sage: Mpp = L.verma_module(M.highest_weight().dot_action([1,2,1]))\n sage: phi = Hom(Mp, M).natural_map()\n sage: Hom(Mpp, M)(phi)\n Verma module morphism:\n From: Verma module with highest weight -3*Lambda[1] - 3*Lambda[2]\n of Lie algebra of ['A', 2] in the Chevalley basis\n To: Verma module with highest weight Lambda[1] + Lambda[2]\n of Lie algebra of ['A', 2] in the Chevalley basis\n Defn: v[-3*Lambda[1] - 3*Lambda[2]] |-->\n f[-alpha[2]]^4*f[-alpha[1]]^4*v[Lambda[1] + Lambda[2]]\n + 8*f[-alpha[2]]^3*f[-alpha[1]]^3*f[-alpha[1] - alpha[2]]*v[Lambda[1] + Lambda[2]]\n + 12*f[-alpha[2]]^2*f[-alpha[1]]^2*f[-alpha[1] - alpha[2]]^2*v[Lambda[1] + Lambda[2]]\n\n sage: psi = Hom(Mpp, Mp).natural_map()\n sage: Hom(Mpp, M)(psi)\n Verma module morphism:\n From: Verma module with highest weight -3*Lambda[1] - 3*Lambda[2]\n of Lie algebra of ['A', 2] in the Chevalley basis\n To: Verma module with highest weight Lambda[1] + Lambda[2]\n of Lie algebra of ['A', 2] in the Chevalley basis\n Defn: v[-3*Lambda[1] - 3*Lambda[2]] |-->\n f[-alpha[2]]^4*f[-alpha[1]]^4*v[Lambda[1] + Lambda[2]]\n + 8*f[-alpha[2]]^3*f[-alpha[1]]^3*f[-alpha[1] - alpha[2]]*v[Lambda[1] + Lambda[2]]\n + 12*f[-alpha[2]]^2*f[-alpha[1]]^2*f[-alpha[1] - alpha[2]]^2*v[Lambda[1] + Lambda[2]]\n \"\"\"\n if isinstance(x, VermaModuleMorphism):\n if x.parent() is self:\n return x\n if x.parent() == self:\n x._set_parent(self) # needed due to non-uniqueness of homsets\n return x\n\n if x.domain() != self.domain():\n x = x * Hom(self.domain(), x.domain()).natural_map()\n if x.codomain() != self.codomain():\n x = Hom(x.codomain(), self.codomain()).natural_map() * x\n\n return x\n\n if x in self.base_ring():\n if self.singular_vector() is None:\n return self.zero()\n return self.element_class(self, self.base_ring()(x))\n\n return super(VermaModuleHomset, self).__call__(x, **options)\n\n def _an_element_(self):\n \"\"\"\n Return an element of ``self``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([2]))\n sage: H = Hom(Mp, M)\n sage: H._an_element_()\n Verma module morphism:\n From: Verma module with highest weight 3*Lambda[1] - 3*Lambda[2]\n of Lie algebra of ['A', 2] in the Chevalley basis\n To: Verma module with highest weight Lambda[1] + Lambda[2]\n of Lie algebra of ['A', 2] in the Chevalley basis\n Defn: v[3*Lambda[1] - 3*Lambda[2]] |-->\n f[-alpha[2]]^2*v[Lambda[1] + Lambda[2]]\n \"\"\"\n return self.natural_map()\n\n @cached_method\n def singular_vector(self):\n r\"\"\"\n Return the singular vector in the codomain corresponding\n to the domain's highest weight element or ``None`` if no\n such element exists.\n\n ALGORITHM:\n\n We essentially follow the algorithm laid out in [deG2005]_.\n We use the `\\mathfrak{sl}_2` relation on\n `M_{s_i \\cdot \\lambda} \\to M_{\\lambda}`, where\n `\\langle \\lambda + \\delta, \\alpha_i^{\\vee} \\rangle = m > 0`,\n i.e., the weight `\\lambda` is `i`-dominant with respect to\n the dot action. From here, we construct the singular vector\n `f_i^m v_{\\lambda}`. We iterate this until we reach `\\mu`.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sp(QQ, 6)\n sage: La = L.cartan_type().root_system().weight_space().fundamental_weights()\n sage: la = La[1] - La[3]\n sage: mu = la.dot_action([1,2])\n sage: M = L.verma_module(la)\n sage: Mp = L.verma_module(mu)\n sage: H = Hom(Mp, M)\n sage: H.singular_vector()\n f[-alpha[2]]*f[-alpha[1]]^3*v[Lambda[1] - Lambda[3]]\n + 3*f[-alpha[1]]^2*f[-alpha[1] - alpha[2]]*v[Lambda[1] - Lambda[3]]\n\n ::\n\n sage: L = LieAlgebra(QQ, cartan_type=['F',4])\n sage: La = L.cartan_type().root_system().weight_space().fundamental_weights()\n sage: la = La[1] + La[2] - La[3]\n sage: mu = la.dot_action([1,2,3,2])\n sage: M = L.verma_module(la)\n sage: Mp = L.verma_module(mu)\n sage: H = Hom(Mp, M)\n sage: v = H.singular_vector()\n sage: pbw = M.pbw_basis()\n sage: E = [pbw(e) for e in L.e()]\n sage: all(e * v == M.zero() for e in E)\n True\n\n When `w \\cdot \\lambda \\notin \\lambda + Q^-`, there does not\n exist a singular vector::\n\n sage: L = lie_algebras.sl(QQ, 4)\n sage: La = L.cartan_type().root_system().weight_space().fundamental_weights()\n sage: la = 3/7*La[1] - 1/2*La[3]\n sage: mu = la.dot_action([1,2])\n sage: M = L.verma_module(la)\n sage: Mp = L.verma_module(mu)\n sage: H = Hom(Mp, M)\n sage: H.singular_vector() is None\n True\n\n TESTS::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_space().fundamental_weights()\n sage: al = L.cartan_type().root_system().root_lattice().simple_roots()\n sage: M = L.verma_module(La[1] + La[2])\n sage: pbw = M.pbw_basis()\n sage: E = {i: pbw(L.e(i)) for i in L.cartan_type().index_set()}\n sage: all(not E[i] * Hom(L.verma_module(mu), M).singular_vector()\n ....: for i in L.cartan_type().index_set()\n ....: for mu in M.highest_weight().dot_orbit())\n True\n \"\"\"\n if self.is_endomorphism_set():\n return self.codomain().highest_weight_vector()\n if self.domain()._dominant_data[0] != self.codomain()._dominant_data[0]:\n return None\n\n from sage.combinat.root_system.coxeter_group import CoxeterGroup\n W = CoxeterGroup(self.domain()._g._cartan_type)\n wp = W.from_reduced_word(self.domain()._dominant_data[1])\n w = W.from_reduced_word(self.codomain()._dominant_data[1])\n if not w.bruhat_le(wp):\n return None\n C = self.codomain()\n pbw = C._pbw\n f = C._g.f()\n F = {i: pbw(f[i]) for i in f.keys()}\n red_word = (wp * ~w).reduced_word()\n rho = C._weight.parent().rho()\n ac = C._weight.parent().simple_coroots()\n elt = pbw.one()\n wt = C._weight\n # Construct the singular vector by iterated embeddings of Verma\n # modules (without constructing the modules themselves)\n for i in reversed(red_word):\n exp = (wt + rho).scalar(ac[i])\n if exp not in ZZ or exp < 0:\n return None\n elt = F[i]**ZZ(exp) * elt\n wt = wt.dot_action([i])\n return C.highest_weight_vector()._acted_upon_(elt, False)\n\n @cached_method\n def natural_map(self):\n \"\"\"\n Return the \"natural map\" of ``self``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([2]))\n sage: H = Hom(Mp, M)\n sage: H.natural_map()\n Verma module morphism:\n From: Verma module with highest weight 3*Lambda[1] - 3*Lambda[2]\n of Lie algebra of ['A', 2] in the Chevalley basis\n To: Verma module with highest weight Lambda[1] + Lambda[2]\n of Lie algebra of ['A', 2] in the Chevalley basis\n Defn: v[3*Lambda[1] - 3*Lambda[2]] |-->\n f[-alpha[2]]^2*v[Lambda[1] + Lambda[2]]\n\n sage: Mp = L.verma_module(La[1] + 2*La[2])\n sage: H = Hom(Mp, M)\n sage: H.natural_map()\n Verma module morphism:\n From: Verma module with highest weight Lambda[1] + 2*Lambda[2]\n of Lie algebra of ['A', 2] in the Chevalley basis\n To: Verma module with highest weight Lambda[1] + Lambda[2]\n of Lie algebra of ['A', 2] in the Chevalley basis\n Defn: v[Lambda[1] + 2*Lambda[2]] |--> 0\n \"\"\"\n if self.singular_vector() is None:\n return self.zero()\n return self.element_class(self, self.base_ring().one())\n\n @cached_method\n def zero(self):\n \"\"\"\n Return the zero morphism of ``self``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sp(QQ, 6)\n sage: La = L.cartan_type().root_system().weight_space().fundamental_weights()\n sage: M = L.verma_module(La[1] + 2/3*La[2])\n sage: Mp = L.verma_module(La[2] - La[3])\n sage: H = Hom(Mp, M)\n sage: H.zero()\n Verma module morphism:\n From: Verma module with highest weight Lambda[2] - Lambda[3]\n of Lie algebra of ['C', 3] in the Chevalley basis\n To: Verma module with highest weight Lambda[1] + 2/3*Lambda[2]\n of Lie algebra of ['C', 3] in the Chevalley basis\n Defn: v[Lambda[2] - Lambda[3]] |--> 0\n \"\"\"\n return self.element_class(self, self.base_ring().zero())\n\n def dimension(self):\n \"\"\"\n Return the dimension of ``self`` (as a vector space over\n the base ring).\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([2]))\n sage: H = Hom(Mp, M)\n sage: H.dimension()\n 1\n\n sage: Mp = L.verma_module(La[1] + 2*La[2])\n sage: H = Hom(Mp, M)\n sage: H.dimension()\n 0\n \"\"\"\n if self.singular_vector() is None:\n return ZZ.zero()\n return ZZ.one()\n\n def basis(self):\n \"\"\"\n Return a basis of ``self``.\n\n EXAMPLES::\n\n sage: L = lie_algebras.sl(QQ, 3)\n sage: La = L.cartan_type().root_system().weight_lattice().fundamental_weights()\n sage: M = L.verma_module(La[1] + La[2])\n sage: Mp = L.verma_module(M.highest_weight().dot_action([2]))\n sage: H = Hom(Mp, M)\n sage: list(H.basis()) == [H.natural_map()]\n True\n\n sage: Mp = L.verma_module(La[1] + 2*La[2])\n sage: H = Hom(Mp, M)\n sage: H.basis()\n Family ()\n \"\"\"\n if self.singular_vector() is None:\n return Family([])\n return Family([self.natural_map()])\n\n Element = VermaModuleMorphism\n\n\ndef _convert_wt_to_root(wt):\n r\"\"\"\n Helper function to express ``wt`` as a linear combination\n of simple roots.\n\n INPUT:\n\n - ``wt`` -- an element of a weight lattice realization\n\n OUTPUT:\n\n A vector over `\\QQ` representing ``wt`` as a linear combination\n of simple roots.\n\n EXAMPLES::\n\n sage: from sage.algebras.lie_algebras.verma_module import _convert_wt_to_root\n sage: P = RootSystem(['A',3]).weight_lattice()\n sage: La = P.fundamental_weights()\n sage: [_convert_wt_to_root(al) for al in P.simple_roots()]\n [(1, 0, 0), (0, 1, 0), (0, 0, 1)]\n sage: _convert_wt_to_root(La[1] + La[2])\n (5/4, 3/2, 3/4)\n\n sage: L = RootSystem(['A',3]).ambient_space()\n sage: e = L.basis()\n sage: _convert_wt_to_root(e[0] + 3*e[3])\n sage: _convert_wt_to_root(e[0] - e[1])\n (1, 0, 0)\n sage: _convert_wt_to_root(e[0] + 2*e[1] - 3*e[2])\n (1, 3, 0)\n \"\"\"\n v = wt.to_vector().change_ring(QQ)\n al = [a.to_vector() for a in wt.parent().simple_roots()]\n b = v.parent().linear_dependence([v] + al)\n if len(b) != 1 or b[0] == 0:\n return None\n b = b[0] # Get the actual vector that gives the linear dependency\n # Get v as a linear combination of the simple roots\n return vector(QQ, [-x / b[0] for x in b[1:]])\n", "id": "5543717", "language": "Python", "matching_score": 2.4367880821228027, "max_stars_count": 1742, "path": "src/sage/algebras/lie_algebras/verma_module.py" }, { "content": "# -*- coding: utf-8 -*-\nr\"\"\"\nBlob Algebras\n\nAUTHORS:\n\n- <NAME> (2020-05-16): Initial version\n\"\"\"\n\n# ****************************************************************************\n# Copyright (C) 2020 <NAME> <<EMAIL>>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nfrom sage.structure.parent import Parent\nfrom sage.structure.unique_representation import UniqueRepresentation\nfrom sage.structure.element import Element, get_coercion_model\nfrom sage.structure.richcmp import richcmp\n#from sage.misc.inherit_comparison import InheritComparisonClasscallMetaclass\nfrom sage.misc.cachefunc import cached_method\nfrom sage.misc.misc import powerset\nfrom sage.arith.all import binomial\nfrom sage.categories.finite_enumerated_sets import FiniteEnumeratedSets\nfrom sage.categories.algebras import Algebras\nfrom sage.combinat.diagram_algebras import (TemperleyLiebDiagrams, diagram_latex,\n TL_diagram_ascii_art)\nfrom sage.combinat.free_module import CombinatorialFreeModule\nfrom sage.combinat.dyck_word import DyckWords\n\n#@add_metaclass(InheritComparisonClasscallMetaclass)\nclass BlobDiagram(Element):\n r\"\"\"\n A blob diagram.\n\n A blob diagram consists of a perfect matching of the set\n `\\{1, \\ldots, n\\} \\sqcup \\{-1, \\ldots, -n\\}` such that the result\n is a noncrossing matching (a :class:`Temperley-Lieb diagram\n <sage.combinat.diagram_algebras.TemperleyLiebDiagram>`), divided\n into two sets of pairs: one for the pairs with blobs and one for\n those without. The blobed pairs must either be either the leftmost\n propagating strand or to the left of it and not nested.\n \"\"\"\n def __init__(self, parent, marked, unmarked):\n r\"\"\"\n Initialize ``self``.\n\n TESTS::\n\n sage: from sage.combinat.blob_algebra import BlobDiagrams\n sage: BD4 = BlobDiagrams(4)\n sage: B = BD4([[1,-3]], [[2,-4], [3,4], [-1,-2]])\n sage: TestSuite(B).run()\n \"\"\"\n Element.__init__(self, parent)\n self.marked = tuple(sorted([tuple(sorted(pair)) for pair in marked]))\n self.unmarked = tuple(sorted([tuple(sorted(pair)) for pair in unmarked]))\n\n def _repr_(self):\n r\"\"\"\n Return a string representation of ``self``.\n\n EXAMPLES::\n\n sage: from sage.combinat.blob_algebra import BlobDiagrams\n sage: BD4 = BlobDiagrams(4)\n sage: BD4([[1,-3]], [[2,-4], [3,4], [-1,-2]])\n ({{-3, 1}}, {{-4, 2}, {-2, -1}, {3, 4}})\n \"\"\"\n return '({{{}}}, {{{}}})'.format(', '.join('{' + repr(X)[1:-1] + '}'\n for X in self.marked),\n ', '.join('{' + repr(X)[1:-1] + '}'\n for X in self.unmarked))\n\n def __hash__(self):\n r\"\"\"\n Return the hash of ``self``.\n\n EXAMPLES::\n\n sage: from sage.combinat.blob_algebra import BlobDiagrams\n sage: BD4 = BlobDiagrams(4)\n sage: B = BD4([[1,-3]], [[2,-4], [3,4], [-1,-2]])\n sage: hash(B) in [hash(D) for D in BD4]\n True\n sage: len(set([hash(D) for D in BD4])) == len(BD4)\n True\n \"\"\"\n return hash((self.marked, self.unmarked))\n\n def _richcmp_(self, other, op):\n r\"\"\"\n Compare ``self`` to ``other`` with operation ``op``.\n\n EXAMPLES::\n\n sage: from sage.combinat.blob_algebra import BlobDiagrams\n sage: BD4 = BlobDiagrams(4)\n sage: B = BD4([[1,-3]], [[2,-4], [3,4], [-1,-2]])\n sage: any(B == D for D in BD4)\n True\n sage: B2 = BD4([], [[1,-3], [2,-4], [3,4], [-1,-2]])\n sage: B == B2\n False\n sage: B != B2\n True\n sage: sorted(BlobDiagrams(3))\n [({}, {{-3, -2}, {-1, 1}, {2, 3}}),\n ({}, {{-3, -2}, {-1, 3}, {1, 2}}),\n ({}, {{-3, 1}, {-2, -1}, {2, 3}}),\n ({}, {{-3, 3}, {-2, -1}, {1, 2}}),\n ({}, {{-3, 3}, {-2, 2}, {-1, 1}}),\n ({{-3, 1}}, {{-2, -1}, {2, 3}}),\n ({{-3, 3}}, {{-2, -1}, {1, 2}}),\n ({{-2, -1}}, {{-3, 1}, {2, 3}}),\n ({{-2, -1}}, {{-3, 3}, {1, 2}}),\n ({{-1, 1}}, {{-3, -2}, {2, 3}}),\n ({{-1, 1}}, {{-3, 3}, {-2, 2}}),\n ({{-1, 3}}, {{-3, -2}, {1, 2}}),\n ({{1, 2}}, {{-3, -2}, {-1, 3}}),\n ({{1, 2}}, {{-3, 3}, {-2, -1}}),\n ({{-3, 1}, {-2, -1}}, {{2, 3}}),\n ({{-3, 3}, {-2, -1}}, {{1, 2}}),\n ({{-3, 3}, {1, 2}}, {{-2, -1}}),\n ({{-2, -1}, {1, 2}}, {{-3, 3}}),\n ({{-1, 3}, {1, 2}}, {{-3, -2}}),\n ({{-3, 3}, {-2, -1}, {1, 2}}, {})]\n \"\"\"\n return richcmp((len(self.marked), self.marked, self.unmarked),\n (len(other.marked), other.marked, other.unmarked),\n op)\n\n def temperley_lieb_diagram(self):\n r\"\"\"\n Return the Temperley-Lieb diagram corresponding to ``self``.\n\n EXAMPLES::\n\n sage: from sage.combinat.blob_algebra import BlobDiagrams\n sage: BD4 = BlobDiagrams(4)\n sage: B = BD4([[1,-3]], [[2,-4], [3,4], [-1,-2]])\n sage: B.temperley_lieb_diagram()\n {{-4, 2}, {-3, 1}, {-2, -1}, {3, 4}}\n \"\"\"\n return self.parent()._TL_diagrams(self.marked + self.unmarked)\n\nclass BlobDiagrams(Parent, UniqueRepresentation):\n r\"\"\"\n The set of all blob diagrams.\n \"\"\"\n def __init__(self, n):\n r\"\"\"\n Initialize ``self``.\n\n TESTS::\n\n sage: from sage.combinat.blob_algebra import BlobDiagrams\n sage: BD4 = BlobDiagrams(4)\n sage: TestSuite(BD4).run()\n \"\"\"\n self._n = n\n self._TL_diagrams = TemperleyLiebDiagrams(n)\n Parent.__init__(self, category=FiniteEnumeratedSets())\n\n def _repr_(self):\n r\"\"\"\n Return a string representation of ``self``.\n\n EXAMPLES::\n\n sage: from sage.combinat.blob_algebra import BlobDiagrams\n sage: BlobDiagrams(4)\n Blob diagrams of order 4\n \"\"\"\n return \"Blob diagrams of order {}\".format(self._n)\n\n def cardinality(self):\n r\"\"\"\n Return the cardinality of ``self``.\n\n EXAMPLES::\n\n sage: from sage.combinat.blob_algebra import BlobDiagrams\n sage: BD4 = BlobDiagrams(4)\n sage: BD4.cardinality()\n 70\n \"\"\"\n return binomial(2*self._n, self._n)\n\n def order(self):\n r\"\"\"\n Return the order of ``self``.\n\n EXAMPLES::\n\n sage: from sage.combinat.blob_algebra import BlobDiagrams\n sage: BD4 = BlobDiagrams(4)\n sage: BD4.order()\n 4\n \"\"\"\n return self._n\n\n @cached_method\n def base_set(self):\n r\"\"\"\n Return the base set of ``self``.\n\n EXAMPLES::\n\n sage: from sage.combinat.blob_algebra import BlobDiagrams\n sage: BD4 = BlobDiagrams(4)\n sage: sorted(BD4.base_set())\n [-4, -3, -2, -1, 1, 2, 3, 4]\n \"\"\"\n return frozenset(range(1,self._n+1)).union(range(-self._n,0))\n\n def _element_constructor_(self, marked, unmarked=None):\n r\"\"\"\n Construct an element of ``self``.\n\n EXAMPLES::\n\n sage: from sage.combinat.blob_algebra import BlobDiagrams\n sage: BD4 = BlobDiagrams(4)\n sage: BD4([[1,-3]], [[-1,-2], [2,3], [-4,4]])\n ({{-3, 1}}, {{-4, 4}, {-2, -1}, {2, 3}})\n sage: BD4([[(1,-3)], ([-1,-2], (2,3), [-4,4])])\n ({{-3, 1}}, {{-4, 4}, {-2, -1}, {2, 3}})\n \"\"\"\n if unmarked is None:\n marked, unmarked = marked\n ret = self.element_class(self, marked, unmarked)\n if ret not in self:\n raise ValueError(\"not a blob diagram of order {}\".format(self._n))\n return ret\n\n def __contains__(self, X):\n r\"\"\"\n Check if ``X`` is contained in ``self``.\n\n EXAMPLES::\n\n sage: from sage.combinat.blob_algebra import BlobDiagrams\n sage: BD4 = BlobDiagrams(4)\n sage: BD4([[1,-3], [-1,-2]], [[2,-4], [3,4]]) # indirect doctest\n ({{-3, 1}, {-2, -1}}, {{-4, 2}, {3, 4}})\n sage: BD4([[1,4], [-1,-2], [-3,-4]], [[2,3]]) # indirect doctest\n ({{-4, -3}, {-2, -1}, {1, 4}}, {{2, 3}})\n\n sage: BD4([[1,-2], [-1,-3]], [[2,-4], [3,4]]) # crossing strands\n Traceback (most recent call last):\n ...\n ValueError: not a blob diagram of order 4\n sage: BD4([[1,-4], [-1,-2]], [[2,-3], [3,4]]) # crossing strands\n Traceback (most recent call last):\n ...\n ValueError: not a blob diagram of order 4\n sage: BD4([[1,-2], [-1,-3]], [[3,-4], [2,4]]) # crossing strands\n Traceback (most recent call last):\n ...\n ValueError: not a blob diagram of order 4\n sage: BD4([[1,-3], [-1,-2], [3,4]], [[2,-4]]) # trapped blob cup\n Traceback (most recent call last):\n ...\n ValueError: not a blob diagram of order 4\n sage: BD4([[-1,3], [1,2], [-3,-4]], [[-2,4]]) # trapped blob cap\n Traceback (most recent call last):\n ...\n ValueError: not a blob diagram of order 4\n sage: BD4([[1,4], [-1,-2], [-3,-4], [2,3]], []) # nested blob cup\n Traceback (most recent call last):\n ...\n ValueError: not a blob diagram of order 4\n sage: BD4([[-1,-4], [1,2], [3,4], [-2,-3]], []) # nested blob cap\n Traceback (most recent call last):\n ...\n ValueError: not a blob diagram of order 4\n sage: BD4([[3,-3]], [[1,-1],[2,-2],[4,-4]]) # trapped propagating line\n Traceback (most recent call last):\n ...\n ValueError: not a blob diagram of order 4\n \"\"\"\n if not isinstance(X, BlobDiagram):\n return False\n # Check that it is a Temperley-Lieb diagram\n TL = X.marked + X.unmarked # the TL diagram\n if TL not in self._TL_diagrams:\n return False\n # Check left escaping\n for x, y in X.marked:\n if x > 0: # Must be a cup\n for P in TL:\n if P[1] < 0: # P is a cap\n continue\n if P[1] < x:\n if P[0] < 0: # A propagating line to the left\n return False\n else: # Note that P[1] != x\n if 0 < P[0] < x: # A nesting line\n return False\n elif y < 0: # Must be a cap\n for P in TL:\n if P[0] > 0: # P is a cup\n continue\n if P[0] > y:\n if P[1] > 0: # A propagating line to the left\n return False\n else: # Note that P[0] != y\n if 0 > P[1] > y: # A nesting line\n return False\n else: # Must be a propagating line\n if any(P[0] < 0 and P[1] > 0 and P[1] < y for P in TL):\n return False\n return True\n\n def __iter__(self):\n r\"\"\"\n Iterate over ``self``.\n\n EXAMPLES::\n\n sage: from sage.combinat.blob_algebra import BlobDiagrams\n sage: BD3 = BlobDiagrams(3)\n sage: sorted(BD3)\n [({}, {{-3, -2}, {-1, 1}, {2, 3}}),\n ({}, {{-3, -2}, {-1, 3}, {1, 2}}),\n ({}, {{-3, 1}, {-2, -1}, {2, 3}}),\n ({}, {{-3, 3}, {-2, -1}, {1, 2}}),\n ({}, {{-3, 3}, {-2, 2}, {-1, 1}}),\n ({{-3, 1}}, {{-2, -1}, {2, 3}}),\n ({{-3, 3}}, {{-2, -1}, {1, 2}}),\n ({{-2, -1}}, {{-3, 1}, {2, 3}}),\n ({{-2, -1}}, {{-3, 3}, {1, 2}}),\n ({{-1, 1}}, {{-3, -2}, {2, 3}}),\n ({{-1, 1}}, {{-3, 3}, {-2, 2}}),\n ({{-1, 3}}, {{-3, -2}, {1, 2}}),\n ({{1, 2}}, {{-3, -2}, {-1, 3}}),\n ({{1, 2}}, {{-3, 3}, {-2, -1}}),\n ({{-3, 1}, {-2, -1}}, {{2, 3}}),\n ({{-3, 3}, {-2, -1}}, {{1, 2}}),\n ({{-3, 3}, {1, 2}}, {{-2, -1}}),\n ({{-2, -1}, {1, 2}}, {{-3, 3}}),\n ({{-1, 3}, {1, 2}}, {{-3, -2}}),\n ({{-3, 3}, {-2, -1}, {1, 2}}, {})]\n \"\"\"\n for D in DyckWords(self._n):\n markable = set()\n unmarked = []\n unpaired = []\n # Determine the pairing and which pairings are markable\n for i,d in enumerate(D):\n if i >= self._n:\n i = -2*self._n + i\n else:\n i += 1\n if d == 1:\n unpaired.append(i)\n else: # d == 0\n m = unpaired.pop()\n if not unpaired:\n markable.add((m, i))\n else:\n unmarked.append((m, i))\n for X in powerset(markable):\n yield self.element_class(self, X, unmarked + list(markable.difference(X)))\n\n Element = BlobDiagram\n\nclass BlobAlgebra(CombinatorialFreeModule):\n r\"\"\"\n The blob algebra.\n\n The *blob algebra* (also known as the Temperley-Lieb algebra of type `B`\n in [ILZ2018]_, but is a quotient of the Temperley-Lieb algebra of type `B`\n defined in [Graham1985]_) is a diagram-type algebra introduced in\n [MS1994]_ whose basis consists of :class:`Temperley-Lieb diagrams\n <sage.combinat.diagram_algebras.TemperleyLiebDiagram>`, noncrossing\n perfect matchings, that may contain blobs on strands that can be\n deformed so that the blob touches the left side (which we can think of\n as a frozen pole).\n\n The form we give here has 3 parameters, the natural one from the\n :class:`Temperley-Lieb algebra <sage.combinat.diagram_algebras.TemperleyLiebAlgebra>`,\n one for the idempotent relation, and one for a loop with a blob.\n\n INPUT:\n\n - ``k`` -- the order\n - ``q1`` -- the loop parameter\n - ``q2`` -- the idempotent parameter\n - ``q3`` -- the blob loop parameter\n\n EXAMPLES::\n\n sage: R.<q,r,s> = ZZ[]\n sage: B4 = algebras.Blob(4, q, r, s)\n sage: B = sorted(B4.basis())\n sage: B[14]\n B({{-4, -3}}, {{-2, -1}, {1, 2}, {3, 4}})\n sage: B[40]\n B({{3, 4}}, {{-4, -3}, {-2, -1}, {1, 2}})\n sage: B[14] * B[40]\n q*r*s*B({}, {{-4, -3}, {-2, -1}, {1, 2}, {3, 4}})\n\n REFERENCES:\n\n - [MS1994]_\n - [ILZ2018]_\n \"\"\"\n @staticmethod\n def __classcall_private__(cls, k, q1, q2, q3, base_ring=None, prefix='B'):\n r\"\"\"\n Normalize input to ensure a unique representation.\n\n TESTS::\n\n sage: R.<q,r,s> = ZZ[]\n sage: B3 = algebras.Blob(3, q, r, s)\n sage: Bp = algebras.Blob(3, q, r, s, R, prefix='B')\n sage: B3 is Bp\n True\n \"\"\"\n if base_ring is None:\n base_ring = get_coercion_model().common_parent(q1, q2, q3)\n q1 = base_ring(q1)\n q2 = base_ring(q2)\n q3 = base_ring(q3)\n return super(BlobAlgebra, cls).__classcall__(cls, k, q1, q2, q3, base_ring, prefix)\n\n def __init__(self, k, q1, q2, q3, base_ring, prefix):\n r\"\"\"\n Initialize ``self``.\n\n TESTS::\n\n sage: R.<q,r,s> = ZZ[]\n sage: B4 = algebras.Blob(4, q, r, s)\n sage: TestSuite(B4).run()\n\n sage: B3 = algebras.Blob(3, q, r, s)\n sage: B = list(B3.basis())\n sage: TestSuite(B3).run(elements=B) # long time\n \"\"\"\n self._q1 = q1\n self._q2 = q2\n self._q3 = q3\n diagrams = BlobDiagrams(k)\n cat = Algebras(base_ring.category()).FiniteDimensional().WithBasis()\n CombinatorialFreeModule.__init__(self, base_ring, diagrams, category=cat,\n prefix=prefix, bracket=False)\n\n def _ascii_art_term(self, diagram):\n r\"\"\"\n Return an ascii art representation of ``diagram``.\n\n EXAMPLES::\n\n sage: R.<q,r,s> = ZZ[]\n sage: B2 = algebras.Blob(2, q, r, s)\n sage: x = B2.an_element()\n sage: ascii_art(x) # indirect doctest\n o o o o o o\n 2* `-` + 3* `-` + 2* `0`\n .-. .0. .-.\n o o o o o o\n \"\"\"\n return TL_diagram_ascii_art(diagram.marked+diagram.unmarked, use_unicode=False,\n blobs=diagram.marked)\n\n def _unicode_art_term(self, diagram):\n r\"\"\"\n Return a unicode art representation of ``diagram``.\n\n EXAMPLES::\n\n sage: R.<q,r,s> = ZZ[]\n sage: B2 = algebras.Blob(2, q, r, s)\n sage: x = B2.an_element()\n sage: unicode_art(x) # indirect doctest\n ⚬ ⚬ ⚬ ⚬ ⚬ ⚬\n 2* ╰─╯ + 3* ╰─╯ + 2* ╰●╯\n ╭─╮ ╭●╮ ╭─╮\n ⚬ ⚬ ⚬ ⚬ ⚬ ⚬\n \"\"\"\n return TL_diagram_ascii_art(diagram.marked+diagram.unmarked, use_unicode=True,\n blobs=diagram.marked)\n\n def _latex_term(self, diagram):\n r\"\"\"\n Return a latex representation of ``diagram``.\n\n EXAMPLES::\n\n sage: R.<q,r,s> = ZZ[]\n sage: B2 = algebras.Blob(2, q, r, s)\n sage: latex(B2.an_element()) # indirect doctest\n 2\\begin{tikzpicture}[scale = 0.5,thick, baseline={(0,-1ex/2)}]\n \\tikzstyle{vertex} = [shape = circle, minimum size = 7pt, inner sep = 1pt]\n \\node[vertex] (G--2) at (1.5, -1) [shape = circle, draw] {};\n \\node[vertex] (G--1) at (0.0, -1) [shape = circle, draw] {};\n \\node[vertex] (G-1) at (0.0, 1) [shape = circle, draw] {};\n \\node[vertex] (G-2) at (1.5, 1) [shape = circle, draw] {};\n \\draw[] (G--2) .. controls +(-0.5, 0.5) and +(0.5, 0.5) .. (G--1);\n \\draw[] (G-1) .. controls +(0.5, -0.5) and +(-0.5, -0.5) .. (G-2);\n \\end{tikzpicture}\n + 3\\begin{tikzpicture}[scale = 0.5,thick, baseline={(0,-1ex/2)}]\n \\tikzstyle{vertex} = [shape = circle, minimum size = 7pt, inner sep = 1pt]\n \\node[vertex] (G--2) at (1.5, -1) [shape = circle, draw] {};\n \\node[vertex] (G--1) at (0.0, -1) [shape = circle, draw] {};\n \\node[vertex] (G-1) at (0.0, 1) [shape = circle, draw] {};\n \\node[vertex] (G-2) at (1.5, 1) [shape = circle, draw] {};\n \\draw[blue,very thick] (G--2) .. controls +(-0.5, 0.5) and +(0.5, 0.5) .. node[midway,circle,fill,scale=0.6] {} (G--1);\n \\draw[] (G-1) .. controls +(0.5, -0.5) and +(-0.5, -0.5) .. (G-2);\n \\end{tikzpicture}\n + 2\\begin{tikzpicture}[scale = 0.5,thick, baseline={(0,-1ex/2)}]\n \\tikzstyle{vertex} = [shape = circle, minimum size = 7pt, inner sep = 1pt]\n \\node[vertex] (G-1) at (0.0, 1) [shape = circle, draw] {};\n \\node[vertex] (G-2) at (1.5, 1) [shape = circle, draw] {};\n \\node[vertex] (G--2) at (1.5, -1) [shape = circle, draw] {};\n \\node[vertex] (G--1) at (0.0, -1) [shape = circle, draw] {};\n \\draw[blue,very thick] (G-1) .. controls +(0.5, -0.5) and +(-0.5, -0.5) .. node[midway,circle,fill,scale=0.6] {} (G-2);\n \\draw[] (G--2) .. controls +(-0.5, 0.5) and +(0.5, 0.5) .. (G--1);\n \\end{tikzpicture}\n \"\"\"\n def edge_options(P):\n if P[1] < P[0]:\n P = [P[1], P[0]]\n if tuple(P) in diagram.marked:\n return 'blue,very thick'\n return ''\n def edge_additions(P):\n if P[1] < P[0]:\n P = [P[1], P[0]]\n if tuple(P) in diagram.marked:\n return 'node[midway,circle,fill,scale=0.6] {} '\n return ''\n return diagram_latex(diagram.marked+diagram.unmarked,\n edge_options=edge_options,\n edge_additions=edge_additions)\n\n def order(self):\n r\"\"\"\n Return the order of ``self``.\n\n The order of a partition algebra is defined as half of the number\n of nodes in the diagrams.\n\n EXAMPLES::\n\n sage: R.<q,r,s> = ZZ[]\n sage: B4 = algebras.Blob(4, q, r, s)\n sage: B4.order()\n 4\n \"\"\"\n return self._indices.order()\n\n @cached_method\n def one_basis(self):\n r\"\"\"\n Return the index of the basis element `1`.\n\n EXAMPLES::\n\n sage: R.<q,r,s> = ZZ[]\n sage: B4 = algebras.Blob(4, q, r, s)\n sage: B4.one_basis()\n ({}, {{-4, 4}, {-3, 3}, {-2, 2}, {-1, 1}})\n \"\"\"\n B = self._indices\n return B.element_class(B, [], [[i, -i] for i in range(1, self.order()+1)])\n\n def product_on_basis(self, top, bot):\n r\"\"\"\n Return the product of the basis elements indexed by ``top``\n and ``bot``.\n\n EXAMPLES::\n\n sage: R.<q,r,s> = ZZ[]\n sage: B4 = algebras.Blob(4, q, r, s)\n sage: B = B4.basis()\n sage: BD = sorted(B.keys())\n sage: BD[14]\n ({{-4, -3}}, {{-2, -1}, {1, 2}, {3, 4}})\n sage: BD[40]\n ({{3, 4}}, {{-4, -3}, {-2, -1}, {1, 2}})\n sage: B4.product_on_basis(BD[14], BD[40])\n q*r*s*B({}, {{-4, -3}, {-2, -1}, {1, 2}, {3, 4}})\n sage: all(len((x*y).support()) == 1 for x in B for y in B)\n True\n \"\"\"\n ret_lists = [[], []]\n coeff = self.base_ring().one()\n top_marked = set(top.marked)\n top_unmarked = set(top.unmarked)\n bot_marked = set(bot.marked)\n bot_unmarked = set(bot.unmarked)\n\n for top_set, is_unmarked in [(top_marked, 0), (top_unmarked, 1)]:\n while top_set:\n # We are starting a new strand\n cur, stop = top_set.pop() # note that cur < stop\n unmarked = is_unmarked\n #print(top_set, unmarked, cur, stop)\n if cur > 0: # Both are anchored to the top\n ret_lists[unmarked].append((cur, stop))\n continue\n anchored = bool(stop > 0) # Possibly only stop is anchored\n\n # Follow the path from cur until we either reach stop or\n # we break out of the loop because both ends are anchored\n while anchored or cur != stop:\n #print(anchored, unmarked, cur, stop)\n cur = -cur # Move cur to the bottom diagram\n for X in bot_marked:\n if cur in X:\n if unmarked:\n unmarked = 0\n else:\n coeff *= self._q2\n prev = cur\n cur = X[1-X.index(prev)]\n bot_marked.remove(X)\n break\n for X in bot_unmarked:\n if cur in X:\n prev = cur\n cur = X[1-X.index(prev)]\n bot_unmarked.remove(X)\n break\n if cur < 0: # cur is anchored at the bottom\n if anchored:\n ret_lists[unmarked].append((stop, cur))\n break\n else:\n anchored = True\n stop, cur = cur, stop # stop is now anchored to the bottom\n continue\n cur = -cur # bring cur back to the top diagram\n for X in top_marked:\n if cur in X:\n if unmarked:\n unmarked = 0\n else:\n coeff *= self._q2\n prev = cur\n cur = X[1-X.index(prev)]\n top_marked.remove(X)\n break\n for X in top_unmarked:\n if cur in X:\n prev = cur\n cur = X[1-X.index(prev)]\n top_unmarked.remove(X)\n break\n if cur > 0: # cur is anchored at the top\n if anchored:\n ret_lists[unmarked].append((stop, cur))\n break\n else:\n anchored = True\n stop, cur = cur, stop # stop is now anchored to the top\n if cur == stop: # We have found a (marked) loop\n if unmarked:\n coeff *= self._q1\n else:\n coeff *= self._q3\n # Everything remaining in the bottom sets are just anchored\n # at the bottom, (i.e., are of the form {-i, -j}).\n ret_lists[0].extend(bot_marked)\n ret_lists[1].extend(bot_unmarked)\n\n if coeff == 0:\n return self.zero()\n diagram = self._indices.element_class(self._indices, ret_lists[0], ret_lists[1])\n return self._from_dict({diagram: coeff}, remove_zeros=False)\n", "id": "6524617", "language": "Python", "matching_score": 0.18915362656116486, "max_stars_count": 1742, "path": "src/sage/combinat/blob_algebra.py" }, { "content": "from . import Feature, FeatureTestResult\nfrom .join_feature import JoinFeature\n\n\nclass MIPBackend(Feature):\n r\"\"\"\n A feature describing whether a :class:`MixedIntegerLinearProgram` backend is available.\n \"\"\"\n def _is_present(self):\n r\"\"\"\n Test for the presence of a :class:`MixedIntegerLinearProgram` backend.\n\n EXAMPLES::\n\n sage: from sage.features.mip_backends import CPLEX\n sage: CPLEX()._is_present() # optional - cplex\n FeatureTestResult('cplex', True)\n \"\"\"\n try:\n from sage.numerical.mip import MixedIntegerLinearProgram\n MixedIntegerLinearProgram(solver=self.name)\n return FeatureTestResult(self, True)\n except Exception:\n return FeatureTestResult(self, False)\n\n\nclass CPLEX(MIPBackend):\n r\"\"\"\n A feature describing whether a :class:`MixedIntegerLinearProgram` backend ``CPLEX`` is available.\n \"\"\"\n def __init__(self):\n r\"\"\"\n TESTS::\n\n sage: from sage.features.mip_backends import CPLEX\n sage: CPLEX()._is_present() # optional - cplex\n FeatureTestResult('cplex', True)\n \"\"\"\n MIPBackend.__init__(self, 'cplex',\n spkg='sage_numerical_backends_cplex')\n\n\nclass Gurobi(MIPBackend):\n r\"\"\"\n A feature describing whether a :class:`MixedIntegerLinearProgram` backend ``Gurobi`` is available.\n \"\"\"\n def __init__(self):\n r\"\"\"\n TESTS::\n\n sage: from sage.features.mip_backends import Gurobi\n sage: Gurobi()._is_present() # optional - gurobi\n FeatureTestResult('gurobi', True)\n \"\"\"\n MIPBackend.__init__(self, 'gurobi',\n spkg='sage_numerical_backends_gurobi')\n\n\nclass COIN(JoinFeature):\n r\"\"\"\n A feature describing whether a :class:`MixedIntegerLinearProgram` backend ``COIN`` is available.\n \"\"\"\n def __init__(self):\n r\"\"\"\n TESTS::\n\n sage: from sage.features.mip_backends import COIN\n sage: COIN()._is_present() # optional - sage_numerical_backends_coin\n FeatureTestResult('sage_numerical_backends_coin', True)\n \"\"\"\n JoinFeature.__init__(self, 'sage_numerical_backends_coin',\n [MIPBackend('coin')],\n spkg='sage_numerical_backends_coin')\n", "id": "715831", "language": "Python", "matching_score": 2.1783807277679443, "max_stars_count": 0, "path": "src/sage/features/mip_backends.py" }, { "content": "from . import Executable\nfrom .join_feature import JoinFeature\n\n\nclass FourTi2Executable(Executable):\n r\"\"\"\n Feature for the 4ti2 executables.\n \"\"\"\n def __init__(self, name):\n r\"\"\"\n TESTS::\n\n sage: from sage.features.four_ti_2 import FourTi2Executable\n sage: isinstance(FourTi2Executable('hilbert'), FourTi2Executable)\n True\n \"\"\"\n from sage.env import SAGE_ENV\n Executable.__init__(self,\n name=\"4ti2-\" + name,\n executable=SAGE_ENV.get(\"FOURTITWO_\" + name.upper(), None) or name,\n spkg=\"4ti2\")\n\n\nclass FourTi2(JoinFeature):\n r\"\"\"\n A :class:`sage.features.Feature` describing the presence of the ``4ti2`` executables.\n\n EXAMPLES::\n\n sage: from sage.features.four_ti_2 import FourTi2\n sage: FourTi2().is_present() # optional - 4ti2\n FeatureTestResult('4ti2', True)\n \"\"\"\n def __init__(self):\n r\"\"\"\n TESTS::\n\n sage: from sage.features.four_ti_2 import FourTi2\n sage: isinstance(FourTi2(), FourTi2)\n True\n \"\"\"\n JoinFeature.__init__(self, '4ti2',\n [FourTi2Executable(x)\n # same list is tested in build/pkgs/4ti2/spkg-configure.m4\n for x in ('hilbert', 'markov', 'graver', 'zsolve', 'qsolve',\n 'rays', 'ppi', 'circuits', 'groebner')])\n", "id": "12110150", "language": "Python", "matching_score": 2.1558353900909424, "max_stars_count": 0, "path": "src/sage/features/four_ti_2.py" }, { "content": "# -*- coding: utf-8 -*-\nr\"\"\"\nCheck for pdflatex and equivalent programs\n\"\"\"\n# ****************************************************************************\n# Copyright (C) 2021 <NAME> <<EMAIL>>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nfrom . import Executable, FeatureTestResult\n\nclass latex(Executable):\n r\"\"\"\n A :class:`sage.features.Feature` describing the presence of ``latex``\n\n EXAMPLES::\n\n sage: from sage.features.latex import latex\n sage: latex().is_present() # optional: latex\n FeatureTestResult('latex', True)\n \"\"\"\n def __init__(self):\n r\"\"\"\n TESTS::\n\n sage: from sage.features.latex import latex\n sage: isinstance(latex(), latex)\n True\n \"\"\"\n Executable.__init__(self, \"latex\", executable=\"latex\",\n url=\"https://www.latex-project.org/\")\n\n def is_functional(self):\n r\"\"\"\n Return whether `latex` in the path is functional.\n\n EXAMPLES:\n\n sage: from sage.features.latex import latex\n sage: latex().is_functional() # optional: latex\n FeatureTestResult('latex', True)\n \"\"\"\n lines = []\n lines.append(r\"\\documentclass{article}\")\n lines.append(r\"\\begin{document}\")\n lines.append(r\"$\\alpha+2$\")\n lines.append(r\"\\end{document}\")\n content = '\\n'.join(lines)\n\n # create a simple tex file with the content\n from sage.misc.temporary_file import tmp_filename\n base_filename_tex = tmp_filename(ext='.tex')\n with open(base_filename_tex, 'w') as f:\n f.write(content)\n import os\n base, filename_tex = os.path.split(base_filename_tex)\n\n # running latex\n from subprocess import run\n cmd = ['latex', '-interaction=nonstopmode', filename_tex]\n cmd = ' '.join(cmd)\n result = run(cmd, shell=True, cwd=base, capture_output=True, text=True)\n\n # return\n if result.returncode == 0:\n return FeatureTestResult(self, True)\n else:\n return FeatureTestResult(self, False, reason=\"Running latex on \"\n \"a sample file returned non-zero \"\n \"exit status {}\".format(result.returncode))\n\nclass pdflatex(Executable):\n r\"\"\"\n A :class:`sage.features.Feature` describing the presence of ``pdflatex``\n\n EXAMPLES::\n\n sage: from sage.features.latex import pdflatex\n sage: pdflatex().is_present() # optional: pdflatex\n FeatureTestResult('pdflatex', True)\n \"\"\"\n def __init__(self):\n r\"\"\"\n TESTS::\n\n sage: from sage.features.latex import pdflatex\n sage: isinstance(pdflatex(), pdflatex)\n True\n \"\"\"\n Executable.__init__(self, \"pdflatex\", executable=\"pdflatex\",\n url=\"https://www.latex-project.org/\")\n\nclass xelatex(Executable):\n r\"\"\"\n A :class:`sage.features.Feature` describing the presence of ``xelatex``\n\n EXAMPLES::\n\n sage: from sage.features.latex import xelatex\n sage: xelatex().is_present() # optional: xelatex\n FeatureTestResult('xelatex', True)\n \"\"\"\n def __init__(self):\n r\"\"\"\n TESTS::\n\n sage: from sage.features.latex import xelatex\n sage: isinstance(xelatex(), xelatex)\n True\n \"\"\"\n Executable.__init__(self, \"xelatex\", executable=\"xelatex\",\n url=\"https://www.latex-project.org/\")\n\nclass lualatex(Executable):\n r\"\"\"\n A :class:`sage.features.Feature` describing the presence of ``lualatex``\n\n EXAMPLES::\n\n sage: from sage.features.latex import lualatex\n sage: lualatex().is_present() # optional: lualatex\n FeatureTestResult('lualatex', True)\n \"\"\"\n def __init__(self):\n r\"\"\"\n TESTS::\n\n sage: from sage.features.latex import lualatex\n sage: isinstance(lualatex(), lualatex)\n True\n \"\"\"\n Executable.__init__(self, \"lualatex\", executable=\"lualatex\",\n url=\"https://www.latex-project.org/\")\n", "id": "10483810", "language": "Python", "matching_score": 4.232082366943359, "max_stars_count": 0, "path": "src/sage/features/latex.py" }, { "content": "# -*- coding: utf-8 -*-\nr\"\"\"\nCheck for pdf2svg\n\"\"\"\n# ****************************************************************************\n# Copyright (C) 2021 <NAME> <<EMAIL>>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nfrom . import Executable\n\nclass pdf2svg(Executable):\n r\"\"\"\n A :class:`sage.features.Feature` describing the presence of ``pdf2svg``\n\n EXAMPLES::\n\n sage: from sage.features.pdf2svg import pdf2svg\n sage: pdf2svg().is_present() # optional: pdf2svg\n FeatureTestResult('pdf2svg', True)\n \"\"\"\n def __init__(self):\n r\"\"\"\n TESTS::\n\n sage: from sage.features.pdf2svg import pdf2svg\n sage: isinstance(pdf2svg(), pdf2svg)\n True\n \"\"\"\n Executable.__init__(self, \"pdf2svg\", executable=\"pdf2svg\",\n spkg='pdf2svg',\n url=\"http://www.cityinthesky.co.uk/opensource/pdf2svg/\")\n", "id": "7420115", "language": "Python", "matching_score": 0.643677830696106, "max_stars_count": 0, "path": "src/sage/features/pdf2svg.py" }, { "content": "r\"\"\"\nCheck for pynormaliz\n\"\"\"\nfrom . import PythonModule\nfrom .join_feature import JoinFeature\n\n\nclass PyNormaliz(JoinFeature):\n r\"\"\"\n A :class:`sage.features.Feature` describing the presence of the\n Python package ``PyNormaliz``.\n\n EXAMPLES::\n\n sage: from sage.features.normaliz import PyNormaliz\n sage: PyNormaliz().is_present() # optional - pynormaliz\n FeatureTestResult('pynormaliz', True)\n \"\"\"\n def __init__(self):\n r\"\"\"\n TESTS::\n\n sage: from sage.features.normaliz import PyNormaliz\n sage: isinstance(PyNormaliz(), PyNormaliz)\n True\n \"\"\"\n JoinFeature.__init__(self, 'pynormaliz',\n [PythonModule('PyNormaliz', spkg=\"pynormaliz\")])\n", "id": "7804548", "language": "Python", "matching_score": 2.2894668579101562, "max_stars_count": 0, "path": "src/sage/features/normaliz.py" }, { "content": "from . import PythonModule\nfrom .join_feature import JoinFeature\n\n\nclass JuPyMake(JoinFeature):\n r\"\"\"\n A :class:`sage.features.Feature` describing the presence of the ``JuPyMake``\n module, a Python interface to the polymake library.\n\n EXAMPLES::\n\n sage: from sage.features.polymake import JuPyMake\n sage: JuPyMake().is_present() # optional: jupymake\n FeatureTestResult('jupymake', True)\n \"\"\"\n def __init__(self):\n r\"\"\"\n TESTS::\n\n sage: from sage.features.polymake import JuPyMake\n sage: isinstance(JuPyMake(), JuPyMake)\n True\n \"\"\"\n JoinFeature.__init__(self, \"jupymake\",\n [PythonModule(\"JuPyMake\", spkg=\"jupymake\")])\n", "id": "3127358", "language": "Python", "matching_score": 2.2971863746643066, "max_stars_count": 0, "path": "src/sage/features/polymake.py" }, { "content": "from . import PythonModule\nfrom .join_feature import JoinFeature\n\n\nclass Tdlib(JoinFeature):\n r\"\"\"\n A :class:`sage.features.Feature` describing the presence of the ``TDLib``.\n \"\"\"\n def __init__(self):\n r\"\"\"\n TESTS::\n\n sage: from sage.features.tdlib import Tdlib\n sage: isinstance(Tdlib(), Tdlib)\n True\n \"\"\"\n # Currently part of sagemath_standard, conditionally built.\n # Will be changed to spkg='sagemath_tdlib' later\n JoinFeature.__init__(self, 'tdlib',\n [PythonModule('sage.graphs.graph_decompositions.tdlib', spkg='tdlib')])\n", "id": "6792638", "language": "Python", "matching_score": 1.9267350435256958, "max_stars_count": 0, "path": "src/sage/features/tdlib.py" } ]
2.133817
picleslivre
[ { "content": "import json\nimport unittest\ntry:\n from unittest import mock\nexcept ImportError:\n import mock\n\nfrom schemaprobe import JsonProbe, ensure, TestCaseMixin\n\n#------------------\n# Fixtures\n#------------------\njson_schema = '''{\n \"$schema\": \"http://json-schema.org/schema#\",\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"date\": {\"type\": \"number\"},\n \"price\": {\"type\": \"number\"},\n \"amount\": {\"type\": \"number\"},\n \"tid\": {\"type\": \"number\"},\n \"price_currency\": {\"type\": \"string\"},\n \"item\": {\"type\": \"string\"},\n \"trade_type\": {\"type\": \"string\"}\n }\n }\n}'''\n\n\njson_data = '''[{\"date\":1399339082,\n \"price\":423.188,\n \"amount\":0.01,\n \"tid\":36961108,\n \"price_currency\":\"USD\",\n \"item\":\"BTC\",\n \"trade_type\":\"bid\"}]'''\n\n\n#------------------\n# Unit tests\n#------------------\nclass JsonProbeTests(unittest.TestCase):\n @mock.patch.object(JsonProbe, '_jsonschema', None)\n def test_jsonschema_must_be_present(self):\n self.assertRaises(TypeError, lambda: JsonProbe(json_schema))\n\n def test_normalize_input(self):\n probe = JsonProbe(json_schema)\n expected = json.loads(json_schema)\n\n self.assertEqual(probe._normalize_input(json_schema), expected)\n\n def test_schema_normalized_during_init(self):\n probe = JsonProbe(json_schema)\n expected = json.loads(json_schema)\n\n self.assertEqual(probe.schema, expected)\n\n def test_valid_json_data(self):\n probe = JsonProbe(json_schema)\n\n self.assertTrue(probe.validate(json_data))\n\n def test_valid_python_data(self):\n probe = JsonProbe(json_schema)\n py_data = json.loads(json_data)\n\n self.assertTrue(probe.validate(py_data))\n\n def test_invalid_python_data(self):\n probe = JsonProbe(json_schema)\n py_data = ['foo', 'bar']\n\n self.assertFalse(probe.validate(py_data))\n\n def test_invalid_json_data(self):\n probe = JsonProbe(json_schema)\n data = json.dumps(['foo', 'bar'])\n\n self.assertFalse(probe.validate(data))\n\n\nclass EnsureDecoratorTests(unittest.TestCase):\n def test_valid_python_data(self):\n @ensure(JsonProbe(json_schema))\n def FixtureFactory():\n return json.loads(json_data)\n\n self.assertTrue(FixtureFactory())\n\n def test_invalid_python_data(self):\n @ensure(JsonProbe(json_schema))\n def FixtureFactory():\n return ['foo', 'bar']\n\n self.assertRaises(TypeError, lambda: FixtureFactory())\n\n def test_metadata_is_maintained(self):\n @ensure(JsonProbe(json_schema))\n def FixtureFactory():\n \"\"\"Docstring\"\"\"\n return json.loads(json_data)\n\n self.assertEqual(FixtureFactory.__doc__, 'Docstring')\n self.assertEqual(FixtureFactory.__name__, 'FixtureFactory')\n\n\nclass FooBar(TestCaseMixin, unittest.TestCase):\n def test_foo(self):\n self.assertSchemaIsValid(JsonProbe(json_schema),\n 'https://btc-e.com/api/2/btc_usd/trades')\n\n", "id": "1410222", "language": "Python", "matching_score": 3.7732670307159424, "max_stars_count": 0, "path": "tests.py" }, { "content": "from __future__ import unicode_literals\n\nimport sys\nimport functools\nimport json\ntry:\n import jsonschema\nexcept ImportError:\n jsonschema = None\ntry:\n import requests\nexcept ImportError:\n requests = None\n\n\n__version__ = '1.0.0.dev1'\n\n__all__ = ['ensure', 'JsonProbe']\n\n\n# --------------\n# Py2 compat\n# --------------\nPY2 = sys.version_info[0] == 2\n\nif PY2:\n string_types = (str, unicode)\nelse:\n string_types = (str,)\n# --------------\n\n\nclass JsonProbe(object):\n \"\"\"\n An instance that knows how to perform validations against json-schema.\n \"\"\"\n _jsonschema = jsonschema\n\n def __init__(self, schema):\n \"\"\"\n :param schema: json-schema as json-encoded text or python datastructures.\n \"\"\"\n if self._jsonschema is None:\n raise TypeError('Missing dependency `jsonschema`.')\n\n self.schema = self._normalize_input(schema)\n\n def validate(self, input):\n \"\"\"\n Validate `input` agains the given schema.\n\n :param input: json-encoded text or python datastructures.\n :returns: boolean\n \"\"\"\n data = self._normalize_input(input)\n\n try:\n jsonschema.validate(data, self.schema)\n except self._jsonschema.ValidationError:\n return False\n else:\n return True\n\n def _normalize_input(self, input):\n \"\"\"\n Always return python datastructures.\n\n :param input: json-encoded text or python datastructures.\n \"\"\"\n if isinstance(input, string_types):\n return json.loads(input)\n else:\n return input\n\n\ndef ensure(probe):\n \"\"\"\n Decorator that asserts the returned value is valid against `probe`.\n \"\"\"\n def ensure_decorator(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n result = f(*args, **kwargs)\n if probe.validate(result):\n return result\n else:\n raise TypeError('Returned data does not conform with the given schema.')\n return wrapper\n return ensure_decorator\n\n\nclass TestCaseMixin(object):\n def assertSchemaIsValid(self, probe, resource_url, msg=None):\n api_sample = requests.get(resource_url)\n\n if not probe.validate(api_sample.json()):\n raise self.failureException(msg or 'Schema is invalid.')\n", "id": "9445954", "language": "Python", "matching_score": 0.9661875367164612, "max_stars_count": 0, "path": "schemaprobe.py" }, { "content": "# coding: utf-8\nimport abc\ntry:\n from queue import Queue\nexcept ImportError: # PY2\n from Queue import Queue\nimport threading\nimport multiprocessing\nimport logging\n\n\n__version__ = ('0', '11')\n__all__ = ['UnmetPrecondition', 'Filter', 'Pipe', 'Pipeline', 'precondition',\n 'filter', 'pipe']\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UnmetPrecondition(Exception):\n pass\n\n\nclass ThreadSafeIter(object):\n \"\"\"\n Wraps an iterable for safe use in a threaded environment.\n \"\"\"\n def __init__(self, it):\n self.it = iter(it)\n self.lock = threading.Lock()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n with self.lock:\n return next(self.it)\n next = __next__\n\n\ndef thread_based_prefetch(iterable, buff):\n\n def worker(job_queue, it):\n for item in it:\n job_queue.put(item)\n\n job_queue.put(None)\n\n max_threads = multiprocessing.cpu_count() * 2\n total_threads = buff if buff < max_threads else max_threads\n\n running_threads = []\n job_queue = Queue(buff)\n source_data = ThreadSafeIter(iterable)\n\n for t in range(total_threads):\n thread = threading.Thread(target=worker, args=(job_queue, source_data))\n running_threads.append(thread)\n thread.start()\n logger.debug('Spawned worker thread %s' % thread)\n\n while True:\n item = job_queue.get()\n if item is None:\n total_threads -= 1\n logger.debug('Worker thread terminated. %s remaining.' % total_threads)\n\n if total_threads == 0:\n return\n else:\n continue\n else:\n yield item\n\n\ndef precondition(precond):\n \"\"\"\n Runs the callable responsible for making some assertions\n about the data structure expected for the transformation.\n\n If the precondition is not achieved, a UnmetPrecondition\n exception must be raised, and then the transformation pipe\n is bypassed.\n \"\"\"\n def decorator(f):\n \"\"\"`f` can be a reference to a method or function. In\n both cases the `data` is expected to be passed as the\n first positional argument (obviously respecting the\n `self` argument when it is a method).\n \"\"\"\n def decorated(*args):\n if len(args) > 2:\n raise TypeError('%s takes only 1 argument (or 2 for instance methods)' % f.__name__)\n\n try:\n instance, data = args\n if not isinstance(instance, Pipe):\n raise TypeError('%s is not a valid pipe instance' % instance)\n\n except ValueError: # tuple unpacking error\n data = args[0]\n\n try:\n precond(data)\n except UnmetPrecondition:\n # bypass the pipe\n return data\n else:\n return f(*args)\n return decorated\n return decorator\n\n\nclass Filter(object):\n \"\"\"\n A segment of the transformation pipeline.\n\n ``transform`` method must return the transformation result.\n Sometimes a transformation process may need to fetch content\n from different endpoints, and it can be achieved through\n the ``fetch_resource`` method.\n \"\"\"\n __metaclass__ = abc.ABCMeta\n\n def feed(self, iterable):\n \"\"\"\n Feeds the filter with data.\n\n :param iterable: the data to be processed\n \"\"\"\n self._iterable_data = iterable\n\n def __iter__(self):\n \"\"\"\n Iters through all items of ``self._iterable_data``, yielding its\n data already transformed.\n\n The iterable interface is the heart of the pipeline machinery.\n \"\"\"\n for data in getattr(self, '_iterable_data', []):\n yield self.transform(data)\n\n @abc.abstractmethod\n def transform(self, data):\n \"\"\"\n Performs the desired transformation to the data.\n \"\"\"\n\n\nPipe = Filter\n\n\nclass FunctionBasedFilter(Filter):\n \"\"\"\n Wraps a function to make possible its usage as a Filter.\n \"\"\"\n def __init__(self, function):\n self.declared_function = function\n\n def transform(self, data):\n return self.declared_function(data)\n\n\nFunctionBasedPipe = FunctionBasedFilter\n\n\nclass Pipeline(object):\n \"\"\"\n Represents a chain of filters filters (duh).\n\n Accepts an arbitrary number of filters that will be executed sequentially\n in order to process the input data.\n\n :param prefetch_callable: (optional) keyword-only argument who\n receives a callable that handles data prefetching. Default is\n `thread_based_prefetch`.\n \"\"\"\n def __init__(self, *args, **kwargs):\n self._filters = []\n\n for _filter in args:\n # the regular case where Filter instances are passed in\n if isinstance(_filter, Filter):\n self._filters.append(_filter)\n\n # callables may be passed if they have been properly\n # decorated with `filter`.\n elif callable(_filter):\n try:\n self._filters.append(_filter._filter)\n except AttributeError:\n raise TypeError('%s is not a valid filter' % _filter.__name__)\n else:\n raise TypeError('%s is not a valid filter' % _filter.__name__)\n\n # the old way to handle keyword-only args\n prefetch_callable = kwargs.pop('prefetch_callable', None)\n if prefetch_callable:\n self._prefetch_callable = prefetch_callable\n else:\n self._prefetch_callable = thread_based_prefetch\n\n def run(self, data, rewrap=False, prefetch=0):\n \"\"\"\n Wires the pipeline and returns a lazy object of\n the transformed data.\n\n :param data: must be an iterable, where a full document\n must be returned for each loop\n\n :param rewrap: (optional) is a bool that indicates the need to rewrap\n data in cases where iterating over it produces undesired results,\n for instance ``dict`` instances.\n\n :param prefetch: (optional) is an int defining the number of items to\n be prefetched once the pipeline starts yielding data. The\n default prefetching mechanism is based on threads, so be\n careful with CPU-bound processing pipelines.\n \"\"\"\n if rewrap:\n data = [data]\n\n for _filter in self._filters:\n _filter.feed(data)\n data = _filter\n else:\n iterable = self._prefetch_callable(data, prefetch) if prefetch else data\n for out_data in iterable:\n yield out_data\n\n\ndef filter(callable):\n \"\"\"\n Decorator that sets any callable to be used as a filter.\n\n After decorated, the original callable will have a `_filter`\n attribute containing an instance of :class:`FunctionBasedFilter`.\n\n Usage:\n\n >>> @filter\n ... def to_upper(data):\n ... return data.upper()\n ...\n >>> ppl = Pipeline(to_upper)\n \"\"\"\n filter_instance = FunctionBasedFilter(callable)\n setattr(callable, '_filter', filter_instance)\n return callable\n\n\npipe = filter\n\n\n", "id": "12199680", "language": "Python", "matching_score": 3.7394943237304688, "max_stars_count": 1, "path": "plumber.py" }, { "content": "# coding: utf-8\nimport sys\nimport unittest\ntry:\n from unittest import mock\nexcept ImportError: # PY2\n import mock\n\n\nPY2 = sys.version_info[0] == 2\n\n\nclass PipeTests(unittest.TestCase):\n\n def _makeOne(self, *args, **kwargs):\n from plumber import Pipe\n return Pipe(*args, **kwargs)\n\n def test_pipe_cannot_be_instantiated(self):\n data = {\n 'abstract_keyword_languages': None,\n 'acronym': 'AISS',\n }\n\n self.assertRaises(TypeError, lambda: self._makeOne(data))\n\n def test_returns_an_iterator(self):\n from plumber import Pipe\n\n class Blitz(Pipe):\n def transform(self, data):\n return 'Foo'\n\n data = {\n 'abstract_keyword_languages': None,\n 'acronym': 'AISS',\n }\n\n p = Blitz()\n p.feed(data)\n self.assertTrue(hasattr(iter(p), 'next' if PY2 else '__next__'))\n\n def test_accepts_generator_objects(self):\n from plumber import Pipe\n\n class Blitz(Pipe):\n def transform(self, data):\n return 'Foo'\n\n def make_generator():\n data = {\n 'abstract_keyword_languages': None,\n 'acronym': 'AISS',\n }\n yield data\n\n p = Blitz()\n p.feed(make_generator())\n self.assertTrue(hasattr(iter(p), 'next' if PY2 else '__next__'))\n\n def test_passing_precondition(self):\n from plumber import Pipe, precondition\n\n class Blitz(Pipe):\n @precondition(lambda x: None)\n def transform(self, data):\n return {\n 'abstract_keyword_languages': None,\n 'acronym': 'AISS',\n }\n\n data = [\n {\n 'abstract_keyword_languages': None,\n 'acronym': 'AISS',\n },\n ]\n\n p = Blitz()\n p.feed(data)\n self.assertEqual(next(iter(p)), data[0])\n\n def test_not_passing_precondition(self):\n from plumber import Pipe, precondition, UnmetPrecondition\n def precond(data):\n raise UnmetPrecondition()\n\n class Blitz(Pipe):\n @precondition(precond)\n def transform(self, data):\n \"\"\"\n This transformation is not called\n \"\"\"\n\n data = [\n {\n 'abstract_keyword_languages': None,\n 'acronym': 'AISS',\n },\n ]\n\n p = Blitz()\n p.feed(data)\n self.assertEqual(next(iter(p)), data[0])\n\n def test_pipes_receiving_arguments_during_initialization(self):\n from plumber import Pipe\n class Blitz(Pipe):\n def __init__(self, func):\n self.func = func\n\n def transform(self, data):\n \"\"\"\n This transformation is not called\n \"\"\"\n return self.func(data)\n\n data = [\n 'abstract_keyword_languages',\n 'acronym',\n ]\n\n p = Blitz(len)\n p.feed(data)\n\n for dt in p:\n self.assertIsInstance(dt, int)\n\n\nclass PipelineTests(unittest.TestCase):\n\n def _makeOneA(self):\n from plumber import Pipe\n\n class A(Pipe):\n def transform(self, data):\n data['name'] = data['name'].strip()\n return data\n\n return A\n\n def _makeOneB(self):\n from plumber import Pipe\n\n class B(Pipe):\n def transform(self, data):\n data['name'] = data['name'].upper()\n return data\n\n return B\n\n def test_run_pipeline(self):\n from plumber import Pipeline\n A = self._makeOneA()\n B = self._makeOneB()\n\n ppl = Pipeline(A(), B())\n post_data = ppl.run([{'name': ' foo '}])\n\n for pd in post_data:\n self.assertEqual(pd, {'name': 'FOO'})\n\n def test_run_pipeline_prefetching_data(self):\n from plumber import Pipeline\n A = self._makeOneA()\n B = self._makeOneB()\n\n ppl = Pipeline(A(), B())\n post_data = ppl.run([{'name': ' foo '}], prefetch=5)\n\n for pd in post_data:\n self.assertEqual(pd, {'name': 'FOO'})\n\n def test_run_pipeline_for_rewrapped_data(self):\n from plumber import Pipeline\n A = self._makeOneA()\n B = self._makeOneB()\n\n ppl = Pipeline(A(), B())\n post_data = ppl.run({'name': ' foo '}, rewrap=True)\n\n for pd in post_data:\n self.assertEqual(pd, {'name': 'FOO'})\n\n def test_pipes_are_run_in_right_order(self):\n from plumber import Pipeline, Pipe\n parent = mock.Mock()\n parent.a = mock.MagicMock(spec=Pipe)\n parent.a.__iter__.return_value = ['foo']\n parent.b = mock.MagicMock(spec=Pipe)\n parent.b.__iter__.return_value = ['foo']\n\n ppl = Pipeline(parent.a, parent.b)\n post_data = ppl.run([{'name': None}]) # placebo input value\n\n for pd in post_data:\n pass # do nothing\n\n parent.mock_calls[0] == mock.call.a.feed([{'name': None}])\n parent.mock_calls[1] == mock.call.b.feed(mock.ANY)\n\n def test_prefetch_callable_is_called_when_prefetch_arg_is_greater_than_zero(self):\n raw_data = [{'name': ' foo '}]\n pos_data = [{'name': 'FOO'}]\n\n pf_callable = mock.MagicMock(return_value=pos_data)\n\n from plumber import Pipeline\n A = self._makeOneA()\n B = self._makeOneB()\n\n ppl = Pipeline(A(), B(), prefetch_callable=pf_callable)\n post_data = ppl.run(raw_data, prefetch=5)\n\n for pd in post_data:\n self.assertEqual(pd, {'name': 'FOO'})\n\n pf_callable.assert_called_with(mock.ANY, 5)\n\n def test_prefetching_generators(self):\n from plumber import Pipeline\n import time\n def makeA():\n from plumber import Pipe\n\n class A(Pipe):\n def transform(self, data):\n data['name'] = data['name'].strip()\n time.sleep(0.3)\n return data\n\n return A\n\n def makeB():\n from plumber import Pipe\n\n class B(Pipe):\n def transform(self, data):\n data['name'] = data['name'].upper()\n return data\n\n return B\n\n raw_data = ({'name': ' foo '} for i in range(10))\n\n A = makeA()\n B = makeB()\n\n ppl = Pipeline(A(), B())\n\n self.assertTrue(hasattr(raw_data, 'next' if PY2 else '__next__'))\n\n post_data = ppl.run(raw_data, prefetch=2)\n\n for pd in post_data:\n self.assertEqual(pd, {'name': 'FOO'})\n\n def test_processing_custom_objects(self):\n class Foo(object):\n def __init__(self):\n self.name = u'Foo name'\n\n raw_data = Foo()\n pos_data = [{'name': 'FOO NAME'}]\n\n from plumber import Pipeline, Pipe\n class A(Pipe):\n def transform(self, data):\n return {'name': data.name}\n\n class B(Pipe):\n def transform(self, data):\n data = {k:v.upper() for k, v in data.items()}\n return data\n\n ppl = Pipeline(A(), B())\n post_data = ppl.run(raw_data, rewrap=True)\n\n for pd in post_data:\n self.assertEqual(pd, {'name': 'FOO NAME'})\n\n\nclass FunctionBasedPipesTests(unittest.TestCase):\n\n def test_pipe_decorator_adds_filter_attribute(self):\n from plumber import pipe\n @pipe\n def do_something(data):\n return data.lower()\n\n self.assertTrue(hasattr(do_something, '_filter'))\n\n def test_pipe_function_runs(self):\n from plumber import Pipeline, pipe\n @pipe\n def do_something(data):\n return data.lower()\n\n ppl = Pipeline(do_something)\n post_data = ppl.run(['FOO'])\n\n self.assertEqual(next(post_data), 'foo')\n\n def test_non_decorated_functions_fails(self):\n from plumber import Pipeline\n def do_something(data):\n return data.lower()\n\n self.assertRaises(TypeError, lambda: Pipeline(do_something))\n\n def test_pipe_function_precondition(self):\n from plumber import Pipeline, pipe, precondition\n @pipe\n @precondition(lambda x: isinstance(x, str))\n def do_something(data):\n return data.lower()\n\n ppl = Pipeline(do_something)\n post_data = ppl.run(['FOO'])\n\n self.assertEqual(next(post_data), 'foo')\n\n\nclass PreconditionTests(unittest.TestCase):\n\n def test_wrong_params_list(self):\n from plumber import precondition, UnmetPrecondition\n def precond(data):\n if not isinstance(data, str):\n raise UnmetPrecondition()\n\n @precondition(precond)\n def do_something(data, wrong_param):\n return data.lower()\n\n self.assertRaises(TypeError, lambda: do_something('FOO', 'Bar'))\n\n def test_wrong_params_on_instance_methods(self):\n from plumber import precondition, UnmetPrecondition\n def precond(data):\n if not isinstance(data, str):\n raise UnmetPrecondition()\n\n class Bar(object):\n @precondition(precond)\n def do_something(self, data, wrong_param):\n return data.lower()\n\n bar = Bar()\n self.assertRaises(TypeError, lambda: bar.do_something('FOO', 'Bar'))\n\n", "id": "2380527", "language": "Python", "matching_score": 1.1809241771697998, "max_stars_count": 1, "path": "tests.py" }, { "content": "#!/usr/bin/env python\nimport sys\nimport codecs\nfrom setuptools import setup, Extension\n\nimport plumber\n\ntests_require = []\nPY2 = sys.version_info[0] == 2\nif PY2:\n tests_require.append('mock')\n\n\nsetup(\n name=\"picles.plumber\",\n version='.'.join(plumber.__version__),\n description=\"Simple data transformation pipeline.\",\n long_description=codecs.open('README.rst', mode='r', encoding='utf-8').read(),\n author=\"<NAME> & contributors\",\n author_email=\"<EMAIL>\",\n license=\"BSD\",\n url=\"https://github.com/picleslivre/plumber/\",\n py_modules=[\"plumber\"],\n package_data={'': ['README.md', 'LICENSE']},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n ],\n tests_require=tests_require,\n test_suite='tests',\n)\n", "id": "6167882", "language": "Python", "matching_score": 4.119259834289551, "max_stars_count": 1, "path": "setup.py" }, { "content": "#!/usr/bin/env python\nimport sys\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\ntests_require = []\nPY2 = sys.version_info[0] == 2\n\nif PY2:\n tests_require.append('mock')\n\nsetup(\n name=\"SolrAPI\",\n version='1.2.0',\n description=\"Python implementation of the main operation in the Solr API Rest\",\n author=\"<NAME>\",\n author_email=\"<EMAIL>\",\n license=\"BSD\",\n url=\"https://github.com/picleslivre/solrapi\",\n py_modules=['SolrAPI'],\n keywords='solr api lucene rest',\n maintainer_email='<EMAIL>',\n download_url='',\n classifiers=[\n \"Topic :: System\",\n \"Topic :: Utilities\",\n \"Programming Language :: Python\",\n \"Operating System :: POSIX :: Linux\",\n \"Development Status :: 1 - Planning\",\n \"Intended Audience :: Customer Service\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: Portuguese (Brazilian)\",\n \"Topic :: Internet :: WWW/HTTP :: Indexing/Search\",\n \"Topic :: Software Development :: Libraries :: Python Modules\"\n ],\n install_requires=[\"requests>=2.18.1\", \"lxml>=3.7.2\"],\n tests_require=tests_require,\n test_suite='tests'\n)\n", "id": "8482785", "language": "Python", "matching_score": 4.59773588180542, "max_stars_count": 0, "path": "setup.py" }, { "content": "#!/usr/bin/env python\nimport sys\nimport re\nfrom setuptools import setup, Extension\n\nimport schemaprobe\n\n\nversion = schemaprobe.__version__\n\ninstall_requires = [\n 'jsonschema',\n 'requests',\n]\ntests_require = install_requires[:]\n\nPY2 = sys.version_info[0] == 2\nif PY2:\n tests_require.append('mock')\n\n\nsetup(\n name=\"schemaprobe\",\n version=version,\n description=\"Platform for testing JSON-based RESTful API resources.\",\n author=\"<NAME>\",\n author_email=\"<EMAIL>\",\n license=\"BSD\",\n url=\"http://github.com/picleslivre/schemaprobe/\",\n py_modules=[\"schemaprobe\"],\n install_requires=install_requires,\n tests_require=tests_require,\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Topic :: Software Development :: Testing\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n test_suite=\"tests\",\n)\n", "id": "8216031", "language": "Python", "matching_score": 0.4851664900779724, "max_stars_count": 0, "path": "setup.py" }, { "content": "#!coding: utf-8\n\nimport requests\n\n\nclass Solr(object):\n \"\"\"\n Python implementation of the basic operation in the Solr API Rest\n \"\"\"\n\n def __init__(self, url, timeout=5):\n \"\"\"\n Create an instance of Solr class.\n\n :param url: endpoint of Solr\n :param timeout: Time for any request, default: 5 seconds\n \"\"\"\n self.url = url\n self.timeout = timeout\n\n def select(self, params, format='json'):\n \"\"\"\n Search Solr, return URL and JSON response.\n\n :param params: Dictionary parameters to Solr\n :param format: Format of return send to Solr, default=json\n :param return: Solr response\n \"\"\"\n params['wt'] = format\n\n response = requests.get(self.url + '/select?', params=params, timeout=self.timeout)\n\n return response.text\n\n def delete(self, query, commit=False):\n \"\"\"\n Delete documents matching `query` from Solr.\n\n :param query: Solr query string, see: https://wiki.apache.org/solr/SolrQuerySyntax\n :param commit: Boolean to carry out the operation\n :param return: Solr response\n \"\"\"\n params = {}\n\n if commit:\n params['commit'] = 'true'\n\n headers = {'Content-Type': 'text/xml; charset=utf-8'}\n data = '<delete><query>{0}</query></delete>'.format(query)\n\n response = requests.post(self.url + '/update?', params=params,\n headers=headers, data=data, timeout=self.timeout)\n\n return response.text\n\n def update(self, data, headers=None, commit=False):\n \"\"\"\n Post list of docs to Solr.\n\n :param commit: Boolean to carry out the operation\n :param headers: Dictionary content headers to send,\n default={'Content-Type': 'text/xml; charset=utf-8'}\n :param data: XML or JSON send to Solr, XML ex.:\n\n XML:\n <add>\n <doc>\n <field name=\"id\">XXX</field>\n <field name=\"field_name\">YYY</field>\n </doc>\n </add>\n\n JSON:\n [\n {\n \"id\":\"1\",\n \"ti\":\"This is just a test\"\n },\n {...}\n ]\n :param return: Solr response\n \"\"\"\n params = {}\n if commit:\n params['commit'] = 'true'\n\n if not headers:\n headers = {'Content-Type': 'text/xml; charset=utf-8'}\n\n response = requests.post(self.url + '/update?', params=params,\n headers=headers, data=data, timeout=self.timeout)\n\n return response.text\n\n def commit(self, waitsearcher=False):\n \"\"\"\n Commit uncommitted changes to Solr immediately, without waiting.\n\n :param waitsearcher: Boolean wait or not the Solr to execute\n :param return: Solr response\n \"\"\"\n\n data = '<commit waitSearcher=\"' + str(waitsearcher).lower() + '\"/>'\n headers = {'Content-Type': 'text/xml; charset=utf-8'}\n\n response = requests.post(self.url + '/update?', headers=headers,\n data=data, timeout=self.timeout)\n\n return response.text\n\n def optimize(self):\n \"\"\"\n Optimize Solr by API RESTFul.\n \"\"\"\n\n headers = {'Content-Type': 'text/xml; charset=utf-8'}\n\n response = requests.get(self.url + '/update?optimize=true',\n headers=headers, timeout=self.timeout)\n return response.text\n", "id": "5534096", "language": "Python", "matching_score": 2.549436569213867, "max_stars_count": 0, "path": "SolrAPI.py" }, { "content": "#!/usr/bin/env python\n# coding: utf-8\n\nimport unittest\n\n# compatibilizing python 2.x and 3.x\ntry:\n from mock import Mock, patch\nexcept ImportError:\n from unittest.mock import Mock, patch\n\nfrom SolrAPI import Solr\n\n\nclass TestSolr(unittest.TestCase):\n\n def setUp(self):\n self.solr = Solr('http://some.url')\n\n def tearDown(self):\n pass\n\n @patch('SolrAPI.requests')\n def test_select_method(self, mock_requests):\n mock_requests.get.return_value = response = Mock()\n response.text = '{\"responseHeader\":{\"status\":0,\"QTime\":1,\"params\":{\"q\":\"pickles\",\"wt\":\"json\"}},\"{response\": {\"numFound\": 1, \"start\": 0,\"docs\": []}}'\n response.status_code = 200\n\n self.assertEqual(self.solr.select(params={'q': 'pickles'}), '{\"responseHeader\":{\"status\":0,\"QTime\":1,\"params\":{\"q\":\"pickles\",\"wt\":\"json\"}},\"{response\": {\"numFound\": 1, \"start\": 0,\"docs\": []}}')\n\n @patch('SolrAPI.requests')\n def test_select_method_without_params(self, mock_requests):\n mock_requests.get.return_value = response = Mock()\n response.text = '{\"responseHeader\":{\"status\":0,\"QTime\":1,\"params\":{\"wt\":\"json\"}},\"response\":{\"numFound\":0,\"start\":0,\"docs\":[]}}}'\n response.status_code = 200\n\n self.assertEqual(self.solr.select({}), '{\"responseHeader\":{\"status\":0,\"QTime\":1,\"params\":{\"wt\":\"json\"}},\"response\":{\"numFound\":0,\"start\":0,\"docs\":[]}}}')\n\n @patch('SolrAPI.requests')\n def test_select_method_change_return_format(self, mock_requests):\n mock_requests.get.return_value = response = Mock()\n response.text = '<?xml version=\"1.0\" encoding=\"UTF-8\"?><response><lst name=\"responseHeader\"><int name=\"status\">0</int><int name=\"QTime\">1</int><lst name=\"params\"><str name=\"q\">pickles</str<str name=\"wt\">xml</str></lst></lst><result name=\"response\" numFound=\"0\" start=\"0\"></result></lst></response>'\n response.status_code = 200\n\n self.assertEqual(self.solr.select({'q': 'pickles'}, format='xml'), '<?xml version=\"1.0\" encoding=\"UTF-8\"?><response><lst name=\"responseHeader\"><int name=\"status\">0</int><int name=\"QTime\">1</int><lst name=\"params\"><str name=\"q\">pickles</str<str name=\"wt\">xml</str></lst></lst><result name=\"response\" numFound=\"0\" start=\"0\"></result></lst></response>')\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "2595473", "language": "Python", "matching_score": 2.0105092525482178, "max_stars_count": 0, "path": "tests.py" } ]
2.549437
jbustamante35
[ { "content": "\n# cell pics from http://www.cellimagelibrary.org/groups/9070\n# augmented in photoshop for effect\n\nimport numpy as np \nimport matplotlib.pyplot as plt \nimport os\n\ncwd = os.getcwd()\n\nimage_dir = os.path.join(cwd, 'image_timeseries')\n\n# Show how the paths work\n\n# need to do this list comprehension because of ds_Store\nfiles = [f for f in os.listdir(image_dir) if f.endswith('.jpg')]\n\nredperimage = []\ngreperimage = []\n\n# Show what an image array looks like. A plt.imread then look through i, k, print k\n\nfor image in files:\n\t\n\timg = plt.imread( os.path.join( image_dir, image))\n\treds = img[:,:,0]\n\tredperimage.append( np.sum(reds))\n\tgreens = img[:,:,1]\n\tgreperimage.append( np.sum(greens))\n\nredperimage = np.array(redperimage, dtype = float)\ngreperimage = np.array(greperimage, dtype = float)\n\nratio = redperimage / greperimage\n\nplt.subplot(211)\n# since this X is number of image you can write [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]\nplt.plot(range(0, len(redperimage)), redperimage, 'ro')\nplt.plot(range(0, len(greperimage)), greperimage, 'go')\n\nplt.subplot(212)\nplt.plot(range(0, len(ratio)), ratio, 'ko')\n\nplt.show()\n\n\n\t\n\n\n\n\n\n\n\n\n\t\t\n\n", "id": "4938583", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "langtest/python/tlapse/cell_timeseries.py" }, { "content": "# Convert list of tif images to jpg\n\nimport Image\n\nimport os\nfrom os.path import join as join2\n\ndir = os.getcwd()\n\ntifs = [f for f in os.listdir(dir) if f.endswith('.png')]\n\nfor i in tifs:\n\tImage.open( join2(dir, i)).save( join2(dir, i + '.jpg'), 'JPEG', quality=100)\n\n\t\n\n", "id": "11701836", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "langtest/python/tlapse/image_timeseries/convert_tif_to_JPG.py" } ]
0
T-800
[ { "content": "#! /usr/bin/python\n\nfrom PIL import Image\nimport os\nimport sys\n\n\nclass Pix():\n\n def __init__(self, x, y):\n self.coor = (x, y)\n self.pere = self\n self.taille = 1\n\n def __repr__(self):\n return str(self.coor)+\" \"+str(self.taille)\n\n\ndef photo_to_list(path):\n \"\"\"\n Ouvre l'image et renvoi la liste des pixels\n \"\"\"\n im = Image.open(path)\n im = im.convert(\"L\")\n ens = {a for a in list(im.getdata())}\n width, height = im.size\n im = list(im.getdata())\n tab = [[im[y*width+x] for x in range(width)] for y in range(height)]\n return tab\n\n\ndef create_Pix_Ens(png):\n\n ens = []\n for y in range(len(png)):\n for x in range(len(png[y])):\n if png[y][x] == 0:\n ens += [Pix(x, y)]\n # print(len(ens))\n return ens\n\n\ndef root(p):\n if p.pere != p:\n p.pere = root(p.pere)\n return p.pere\n\n\ndef find(p1, p2):\n return root(p1) == root(p2)\n\n\ndef union(p1, p2):\n if not find(p1, p2):\n rootP1 = root(p1)\n rootP2 = root(p2)\n if rootP1.taille > rootP2.taille:\n rootP2.pere = rootP1\n rootP1.taille += rootP2.taille\n else:\n rootP1.pere = rootP2\n rootP2.taille += rootP1.taille\n\n\ndef r_Voisins(p1, p2):\n if ((p1.coor[1]-p2.coor[1])**2 > int(sys.argv[2])**2):\n return 2\n elif (p1.coor[0]-p2.coor[0])**2 + (p1.coor[1]-p2.coor[1])**2 <=\\\n int(sys.argv[2])**2:\n return 1\n return 0\n\n\ndef algo(ens_Pix):\n for p1 in range(len(ens_Pix)):\n for p2 in ens_Pix[p1:]:\n tmp = r_Voisins(ens_Pix[p1], p2)\n if tmp == 2:\n break\n elif tmp == 1:\n union(ens_Pix[p1], p2)\n i = 0\n for e in ens_Pix:\n if e.taille > int(sys.argv[3]):\n # print(e)\n if e.pere == e:\n i += 1\n return i\n\n\nif __name__ == '__main__':\n if(len(sys.argv) != 4):\n print(\"Nb_args\")\n\n png = photo_to_list(sys.argv[1])\n\n print(algo(create_Pix_Ens(png)))\n\n\n\"\"\"\nRESULTAT:\n\nimage1bw.png 1 1\n11\nimage2bw.png 1 1\n11\nimage3bw.png 1 1\n9\nimage4bw.png 1 1\n7\ncharpetit.png 1 18\n30\ncells-inv.png 1 1\n117\n\"\"\"\n", "id": "1743296", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "Main.py" } ]
0
willmadison
[ { "content": "from yoyo import step\n\nsteps = [\n step(\n r\"CREATE TABLE users (id INT, first_name VARCHAR(30), last_name VARCHAR(30))\"\n ),\n]\n", "id": "6098394", "language": "Python", "matching_score": 1.294675588607788, "max_stars_count": 0, "path": "tests/migrations/0001.bootstrapping.py" }, { "content": "class User:\n def __init__(self, id, first_name, last_name):\n self.id = id\n self.first_name = first_name\n self.last_name = last_name", "id": "4546275", "language": "Python", "matching_score": 0.1692296713590622, "max_stars_count": 0, "path": "extraction/user.py" }, { "content": "import unittest\nfrom yoyo import read_migrations\nfrom yoyo import get_backend\nimport sqlite3\nfrom extraction import extractor\n\nSQLITE_DB_LOCATION = 'databases/test.db'\n\n\ndef create_connection():\n conn = None\n try:\n conn = sqlite3.connect(SQLITE_DB_LOCATION)\n print(sqlite3.version)\n except Exception as e:\n raise e\n\n return conn\n\n\nclass TestSqlLiteExtraction(unittest.TestCase):\n def setUp(self):\n self.backend = get_backend(f\"sqlite:///{SQLITE_DB_LOCATION}\")\n self.migrations = read_migrations('./migrations')\n\n with self.backend.lock():\n self.backend.apply_migrations(self.backend.to_apply(self.migrations))\n\n def tearDown(self):\n import os\n os.remove(SQLITE_DB_LOCATION)\n\n def test_it_should_find_users_by_last_name(self):\n conn = create_connection()\n users = extractor.find_users_by_lastname(conn, 'Madison')\n conn.close()\n self.assertEqual(5, len(users), \"Expected 5 Users with last_name = Madison\")\n users_with_short_first_names = [user for user in users if len(user.first_name) <= 5]\n self.assertEqual(2, len(users_with_short_first_names),\n \"There should only be 2 users with short (i.e. < 6 characters) first names\")\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "3881030", "language": "Python", "matching_score": 2.615370035171509, "max_stars_count": 0, "path": "tests/test_extractor.py" }, { "content": "from extraction.user import User\n\n\ndef find_users_by_lastname(conn, last_name):\n cursor = conn.cursor()\n parameters = (last_name,)\n cursor.execute('SELECT * FROM users WHERE last_name = ?', parameters)\n return [User(i[0], i[1], i[2]) for i in cursor.fetchall()]\n\n\n", "id": "11302838", "language": "Python", "matching_score": 1.9745547771453857, "max_stars_count": 0, "path": "extraction/extractor.py" } ]
1.634615
comicmuse
[ { "content": "from alerta import app\n", "id": "11515081", "language": "Python", "matching_score": 0, "max_stars_count": 124, "path": "slash/app/wsgi.py" } ]
0
juanluis17
[ { "content": "from base import Model\nfrom helper import *\nimport tensorflow as tf\n\n\"\"\"\nAbbreviations used in variable names:\n\tType: Entity type side informatoin\n\tProbY, RelAlias: Relation alias side information\nRecommendation: View this file with tab size 8.\n\"\"\"\n\n\nclass PCNNATT(Model):\n\n def getBatches(self, data, shuffle=True):\n \"\"\"\n Generates batches of multiple bags\n\n Parameters\n ----------\n data:\t\tData to be used for creating batches. Dataset as list of bags where each bag is a dictionary\n shuffle:\tDecides whether to shuffle the data or not.\n\n Returns\n -------\n Generator for creating batches.\n \"\"\"\n if shuffle: random.shuffle(data)\n\n def get_sent_part(sub_pos, obj_pos, sents):\n assert len(sub_pos) == len(obj_pos)\n\n part_pos = []\n for i in range(len(sub_pos)):\n sent = sents[i]\n pos1, pos2 = sub_pos[i], obj_pos[i]\n pos1, pos2 = min(pos1, pos2), max(pos1, pos2)\n if pos1 == pos2 or pos1 == 0 or pos2 == len(sent) - 1:\n pos1 = len(sent) // 4\n pos2 = pos1 + len(sent) // 2\n\n part_pos.append([pos1, pos2])\n\n return part_pos\n\n for chunk in getChunks(data, self.p.batch_size): # chunk = batch\n batch = ddict(list)\n\n num = 0\n for i, bag in enumerate(chunk):\n batch['X'] += bag['X']\n batch['Pos1'] += bag['Pos1']\n batch['Pos2'] += bag['Pos2']\n batch['PartPos'] += get_sent_part(bag['SubPos'], bag['ObjPos'], bag['X'])\n\n batch['Y'].append(bag['Y'])\n old_num = num\n num += len(bag['X'])\n\n batch['sent_num'].append([old_num, num, i])\n\n batch = dict(batch)\n\n yield batch\n\n def add_placeholders(self):\n \"\"\"\n Defines the placeholder required for the model\n \"\"\"\n\n self.input_x = tf.placeholder(tf.int32, shape=[None, None], name='input_data') # Tokens ids of sentences\n self.input_y = tf.placeholder(tf.int32, shape=[None, None], name='input_labels') # Actual relation of the bag\n self.input_pos1 = tf.placeholder(tf.int32, shape=[None, None], name='input_pos1') # Position ids wrt entity 1\n self.input_pos2 = tf.placeholder(tf.int32, shape=[None, None], name='input_pos2') # Position ids wrt entity 2\n self.part_pos = tf.placeholder(tf.int32, shape=[None, 2],\n name='input_part_pos') # Positions where sentence needs to be partitioned\n\n self.x_len = tf.placeholder(tf.int32, shape=[None], name='input_len') # Number of words in sentences in a batch\n self.sent_num = tf.placeholder(tf.int32, shape=[None, 3],\n name='sent_num') # Stores which sentences belong to which bag | [start_index, end_index, bag_number]\n self.seq_len = tf.placeholder(tf.int32, shape=(),\n name='seq_len') # Max number of tokens in sentences in a batch\n self.total_bags = tf.placeholder(tf.int32, shape=(), name='total_bags') # Total number of bags in a batch\n self.total_sents = tf.placeholder(tf.int32, shape=(),\n name='total_sents') # Total number of sentences in a batch\n\n self.dropout = tf.placeholder_with_default(self.p.dropout, shape=(),\n name='dropout') # Dropout used in GCN Layer\n\n def pad_dynamic(self, X, pos1, pos2):\n \"\"\"\n Pads each batch during runtime.\n\n Parameters\n ----------\n X:\t\tFor each sentence in the bag, list of words\n pos1:\t\tFor each sentence in the bag, list position of words with respect to subject\n pos2:\t\tFor each sentence in the bag, list position of words with respect to object\n\n Returns\n -------\n x_pad\t\tPadded words\n x_len\t\tNumber of sentences in each sentence,\n pos1_pad\tPadded position 1\n pos2_pad\tPadded position 2\n seq_len \tMaximum sentence length in the batch\n \"\"\"\n\n seq_len, max_et = 0, 0\n x_len = np.zeros((len(X)), np.int32)\n\n for i, x in enumerate(X):\n seq_len = max(seq_len, len(x))\n x_len[i] = len(x)\n\n x_pad, _ = self.padData(X, seq_len)\n pos1_pad, _ = self.padData(pos1, seq_len)\n pos2_pad, _ = self.padData(pos2, seq_len)\n\n return x_pad, x_len, pos1_pad, pos2_pad, seq_len\n\n def create_feed_dict(self, batch, wLabels=True, split='train'):\n X, Y, pos1, pos2, sent_num, part_pos = batch['X'], batch['Y'], batch['Pos1'], batch['Pos2'], batch['sent_num'], \\\n batch['PartPos']\n\n total_sents = len(batch['X'])\n total_bags = len(batch['Y'])\n x_pad, x_len, pos1_pad, pos2_pad, seq_len = self.pad_dynamic(X, pos1, pos2)\n\n y_hot = self.getOneHot(Y, self.num_class)\n\n feed_dict = {}\n feed_dict[self.input_x] = np.array(x_pad)\n feed_dict[self.input_pos1] = np.array(pos1_pad)\n feed_dict[self.input_pos2] = np.array(pos2_pad)\n feed_dict[self.x_len] = np.array(x_len)\n feed_dict[self.seq_len] = seq_len\n feed_dict[self.total_sents] = total_sents\n feed_dict[self.total_bags] = total_bags\n feed_dict[self.sent_num] = sent_num\n feed_dict[self.part_pos] = np.array(part_pos)\n\n if wLabels: feed_dict[self.input_y] = y_hot\n\n if split != 'train':\n feed_dict[self.dropout] = 1.0\n else:\n feed_dict[self.dropout] = self.p.dropout\n\n return feed_dict\n\n def get_adj(self, edgeList, batch_size, max_nodes, max_labels):\n \"\"\"\n Stores the adjacency matrix as indices and data for feeding to TensorFlow\n\n Parameters\n ----------\n edgeList:\tList of list of edges\n batch_size:\tNumber of bags in a batch\n max_nodes:\tMaximum number of nodes in the graph\n max_labels:\tMaximum number of edge labels in the graph\n\n Returns\n -------\n adj_mat_ind \tindices of adjacency matrix\n adj_mat_data\tdata of adjacency matrix\n \"\"\"\n max_edges = 0\n for edges in edgeList:\n max_edges = max(max_edges, len(edges))\n\n adj_mat_ind = np.zeros((max_labels, batch_size, max_edges, 2), np.int64)\n adj_mat_data = np.zeros((max_labels, batch_size, max_edges), np.float32)\n\n for lbl in range(max_labels):\n for i, edges in enumerate(edgeList):\n in_ind_temp, in_data_temp = [], []\n for j, (src, dest, _, _) in enumerate(edges):\n adj_mat_ind[lbl, i, j] = (src, dest)\n adj_mat_data[lbl, i, j] = 1.0\n\n return adj_mat_ind, adj_mat_data\n\n def add_model(self):\n \"\"\"\n Creates the Computational Graph\n\n Parameters\n ----------\n\n Returns\n -------\n nn_out:\t\tLogits for each bag in the batch\n accuracy:\taccuracy for the entire batch\n \"\"\"\n\n in_wrds, in_pos1, in_pos2 = self.input_x, self.input_pos1, self.input_pos2\n\n with tf.variable_scope('Embeddings') as scope:\n embed_init = getEmbeddings(self.wrd_list, self.p.embed_dim, self.p.embed_loc)\n _wrd_embeddings = tf.get_variable('embeddings', initializer=embed_init, trainable=True,\n regularizer=self.regularizer) # Word Embeddings\n wrd_pad = tf.zeros([1, self.p.embed_dim])\n wrd_embeddings = tf.concat([wrd_pad, _wrd_embeddings], axis=0)\n\n pos1_embeddings = tf.get_variable('pos1_embeddings', [self.max_pos, self.p.pos_dim], trainable=True,\n regularizer=self.regularizer)\n pos2_embeddings = tf.get_variable('pos2_embeddings', [self.max_pos, self.p.pos_dim], trainable=True,\n regularizer=self.regularizer)\n\n wrd_embed = tf.nn.embedding_lookup(wrd_embeddings, self.input_x)\n pos1_embed = tf.nn.embedding_lookup(pos1_embeddings, self.input_pos1)\n pos2_embed = tf.nn.embedding_lookup(pos2_embeddings, self.input_pos2)\n conv_in = tf.expand_dims(tf.concat([wrd_embed, pos1_embed, pos2_embed], axis=2), axis=3)\n\n conv_in_dim = self.p.embed_dim + 2 * self.p.pos_dim\n\n with tf.variable_scope('Convolution') as scope:\n padding = tf.constant([[0, 0], [1, 1], [0, 0], [0, 0]])\n kernel = tf.get_variable('kernel', [self.p.filt_size, conv_in_dim, 1, self.p.num_filt],\n initializer=tf.truncated_normal_initializer(), regularizer=self.regularizer)\n biases = tf.get_variable('biases', [self.p.num_filt], initializer=tf.random_normal_initializer(),\n regularizer=self.regularizer)\n\n conv_in = tf.pad(conv_in, padding, 'CONSTANT')\n conv = tf.nn.conv2d(conv_in, kernel, strides=[1, 1, 1, 1], padding='VALID')\n convRes = tf.nn.relu(conv + biases, name=scope.name)\n convRes = tf.squeeze(convRes, 2)\n\n sent_rep = tf.reduce_max(convRes, axis=1)\n sent_rep_dim = self.p.num_filt\n\n with tf.variable_scope('Sentence_attention') as scope:\n sent_atten_q = tf.get_variable('sent_atten_q', [sent_rep_dim, 1])\n\n def getSentAtten(num):\n bag_sents = sent_rep[num[0]: num[1]]\n num_sents = num[1] - num[0]\n if self.p.inc_attn:\n sent_atten_wts = tf.nn.softmax(tf.reshape(tf.matmul(tf.tanh(bag_sents), sent_atten_q), [-1]))\n bag_rep_ = tf.reshape(tf.matmul(tf.expand_dims(sent_atten_wts, 0), bag_sents), [sent_rep_dim])\n else:\n bag_rep_ = tf.reduce_mean(bag_sents, axis=0)\n\n return bag_rep_\n\n bag_rep = tf.map_fn(getSentAtten, self.sent_num, dtype=tf.float32)\n\n with tf.variable_scope('FC1') as scope:\n w_rel = tf.get_variable('w_rel', [sent_rep_dim, self.num_class],\n initializer=tf.truncated_normal_initializer(), regularizer=self.regularizer)\n b_rel = tf.get_variable('b_rel', [self.num_class], initializer=tf.constant_initializer(0.0),\n regularizer=self.regularizer)\n nn_out = tf.nn.xw_plus_b(bag_rep, w_rel, b_rel)\n\n with tf.name_scope('Accuracy') as scope:\n prob = tf.nn.softmax(nn_out)\n y_pred = tf.argmax(prob, axis=1)\n y_actual = tf.argmax(self.input_y, axis=1)\n accuracy = tf.reduce_mean(tf.cast(tf.equal(y_pred, y_actual), tf.float32))\n\n ''' Debugging command:\n Put the below command anywhere and get the values of the tensors (Use TF like PyTorch!)\n res = debug_nn([de_out], self.create_feed_dict( next(self.getBatches(self.data['train'])) ) ); pdb.set_trace()\n '''\n\n return nn_out, accuracy\n\n def __init__(self, params):\n \"\"\"\n Constructor for the main function. Loads data and creates computation graph.\n\n Parameters\n ----------\n params:\t\tHyperparameters of the model\n\n Returns\n -------\n \"\"\"\n super(PCNNATT, self).__init__(params)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n description='Improving Distantly-Supervised Neural Relation Extraction using Side Information')\n\n parser.add_argument('-data', dest=\"dataset\", required=True, help='Dataset to use')\n parser.add_argument('-gpu', dest=\"gpu\", default='0', help='GPU to use')\n parser.add_argument('-nGate', dest=\"wGate\", action='store_false', help='Include edgewise-gating in GCN')\n\n parser.add_argument('-pos_dim', dest=\"pos_dim\", default=16, type=int, help='Dimension of positional embeddings')\n parser.add_argument('-filt_size', dest=\"filt_size\", default=3, type=int,\n help='Size of filters used in Convolution Layer')\n parser.add_argument('-num_filt', dest=\"num_filt\", default=100, type=int,\n help='Number of filters used in Convolution Layer')\n parser.add_argument('-drop', dest=\"dropout\", default=0.8, type=float, help='Dropout for full connected layer')\n parser.add_argument('-attn', dest=\"inc_attn\", action='store_true',\n help='Include attention during instance aggregation')\n\n parser.add_argument('-lr', dest=\"lr\", default=0.001, type=float, help='Learning rate')\n parser.add_argument('-l2', dest=\"l2\", default=0.001, type=float, help='L2 regularization')\n parser.add_argument('-epoch', dest=\"max_epochs\", default=2, type=int, help='Max epochs')\n parser.add_argument('-batch', dest=\"batch_size\", default=32, type=int, help='Batch size')\n parser.add_argument('-chunk', dest=\"chunk_size\", default=1000, type=int, help='Chunk size')\n parser.add_argument('-only_eval', dest=\"only_eval\", action='store_true',\n help='Only Evaluate the pretrained model (skip training)')\n parser.add_argument('-restore', dest=\"restore\", action='store_true',\n help='Restore from the previous best saved model')\n parser.add_argument('-original', dest=\"original\", action='store_true',\n help='Si es sobre los datos originales')\n parser.add_argument('-opt', dest=\"opt\", default='adam', help='Optimizer to use for training')\n\n parser.add_argument('-eps', dest=\"eps\", default=0.00000001, type=float, help='Value of epsilon')\n parser.add_argument('-name', dest=\"name\", default='test_' + str(uuid.uuid4()), help='Name of the run')\n parser.add_argument('-seed', dest=\"seed\", default=1234, type=int, help='Seed for randomization')\n parser.add_argument('-logdir', dest=\"log_dir\", default='./log/', help='Log directory')\n parser.add_argument('-config', dest=\"config_dir\", default='./config/', help='Config directory')\n parser.add_argument('-embed_loc', dest=\"embed_loc\", default='./glove/glove.6B.50d.txt', help='Log directory')\n parser.add_argument('-embed_dim', dest=\"embed_dim\", default=50, type=int, help='Dimension of embedding')\n args = parser.parse_args()\n\n if not args.restore: args.name = args.name\n args.seed = int(time.time())\n print(args.seed)\n # Set GPU to use\n set_gpu(args.gpu)\n\n # Set seed\n tf.set_random_seed(args.seed)\n random.seed(args.seed)\n np.random.seed(args.seed)\n\n # Create model computational graph\n model = PCNNATT(args)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n model.fit(sess)\n", "id": "2060083", "language": "Python", "matching_score": 2.2600536346435547, "max_stars_count": 1, "path": "cnnatt.py" }, { "content": "import numpy as np, argparse, pickle\nimport matplotlib;\nimport os\n\ndata={}\n\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import precision_recall_curve, average_precision_score, auc\nimport pdb\n\n\ndef loadData(path):\n preds = pickle.load(open(path, 'rb'))\n y_hot = np.array(preds['y_hot'])\n logit_list = np.array(preds['logit_list'])\n y_hot_new = np.reshape(np.array([x[1:] for x in y_hot]), (-1))\n logit_list_new = np.reshape(np.array([x[1:] for x in logit_list]), (-1))\n return y_hot_new, logit_list_new\n\n\ndef plotPR(model, original=True):\n global data\n y_true, y_scores = loadData('./results/{}/{}_precision_recall.pkl'.format(model, original))\n precision, recall, threshold = precision_recall_curve(y_true, y_scores)\n area_under = auc(x=recall, y=precision)\n print('Area under the curve: {} ---> {:.3}'.format(model,area_under))\n label = '{}: {:.3}'.format(model.split('_')[0],area_under)\n data[model] = [area_under,precision,recall]\n #plt.plot(recall[:], precision[:], label=label, color=color, lw=1, markevery=0.1, ms=6)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-original', default='pretrained_reside')\n args = parser.parse_args()\n plt.ylim([0.0, 1.01])\n plt.xlim([0.0, 1.01])\n BASE_DIR = './results'\n color = ['purple', 'darkorange', 'green', 'xkcd:azure', 'orchid', 'cornflowerblue', 'blue', 'yellow']\n\n directory = os.fsencode(BASE_DIR)\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n #if index >= len(color):\n # index = 0\n if 'original_' not in filename:\n plotPR(model=filename, original=args.original)#, color=color[index])\n # index += 1\n data = {k: v for k, v in sorted(data.items(), key=lambda item: -item[1][0])}\n\n index = 0\n for key,value in data.items():\n if index >= len(color):\n index = 0\n label = '{}: {:.3}'.format(key.split('_')[0], value[0])\n recall = value[2]\n precision = value[1]\n plt.plot(recall[:], precision[:], label=label, color=color[index], lw=1, markevery=0.1, ms=6)\n index+=1\n\n plt.xlabel('Recall', fontsize=14)\n plt.ylabel('Precision', fontsize=14)\n plt.legend(loc=\"upper right\", prop={'size': 12})\n plt.grid(True)\n plt.tight_layout()\n plt.show()\n\n plot_path = './{}_plot_pr.png'.format(args.original)\n plt.savefig(plot_path)\n print('Precision-Recall plot saved at: {}'.format(plot_path))\n", "id": "5670364", "language": "Python", "matching_score": 1.8788890838623047, "max_stars_count": 1, "path": "plot_pr_my.py" }, { "content": "import os\nimport numpy as np\n\nBASE = './results'\n\nmodels = ['reside_original', 'pcnnatt_original', 'pcnn_original', 'cnnatt_original', 'cnn_original', 'bgwa_original']\n\nrankings_original = {}\nrankings_real = {}\n\nmodels_output = []\nwith open('results/AUCs_orig.txt', 'w+') as writer_h,open('results/AUCs_manual.txt', 'w+') as writer_manuals:\n writer_manuals.write(\"{},{}\\n\".format(\"model\", \"AUC\"))\n writer_h.write(\"{},{}\\n\".format(\"model\", \"AUC\"))\n for model in models:\n precs = []\n recalls = []\n f1s = []\n aucs = []\n precs_orig = []\n recalls_orig = []\n f1s_orig = []\n aucs_orig = []\n for i in range(42):\n file_orig = ''\n file = ''\n area_original = 0\n area_real = 0\n #Creando los arreglos\n if i not in rankings_original:\n rankings_original[i] = []\n rankings_real[i] = []\n #Si es la iteracion 0 no tiene numero\n if i == 0:\n file_orig = os.path.join(BASE, model, 'True_final_values.txt')\n file = os.path.join(BASE, model, 'False_final_values.txt')\n else:\n file_orig = os.path.join(BASE, model + '_' + str(i), 'True_final_values.txt')\n file = os.path.join(BASE, model + '_' + str(i), 'False_final_values.txt')\n try:\n with open(file, 'r') as reader:\n metrics = reader.readlines()[0].strip().split('|')\n prec = float(metrics[0].split('Prec:')[1])\n rec = float(metrics[1].split('Rec:')[1])\n f1 = float(metrics[2].split('F1:')[1])\n area_real = float(metrics[3].split('Area:')[1])\n precs.append(prec)\n recalls.append(rec)\n f1s.append(f1)\n aucs.append(area_real)\n except:\n print(\"Error 1\")\n try:\n with open(file_orig, 'r') as reader:\n metrics = reader.readlines()[0].strip().split('|')\n prec = float(metrics[0].split('Prec:')[1])\n rec = float(metrics[1].split('Rec:')[1])\n f1 = float(metrics[2].split('F1:')[1])\n area_original = float(metrics[3].split('Area:')[1])\n precs_orig.append(prec)\n recalls_orig.append(rec)\n f1s_orig.append(f1)\n aucs_orig.append(area_original)\n except:\n print(\"Error 2\")\n models_output.append([model, area_original, area_real])\n rankings_original[i].append([model,area_original])\n rankings_real[i].append([model,area_real])\n print(len(precs_orig), len(precs))\n print(len(f1s_orig), len(f1s))\n\n for value in aucs_orig:\n writer_h.write(\"{},{}\\n\".format(model.split('_')[0].upper(),value))\n\n for value in aucs:\n writer_manuals.write(\"{},{}\\n\".format(model.split('_')[0].upper(),value))\n print(\n 'Model {} --- AUC --> Cleaned: {:.3}+-{:.3} Original: {:.3}+-{:.3}'.format(model, np.mean(aucs), np.std(aucs),\n np.mean(aucs_orig),\n np.std(aucs_orig)))\n\nmodels = ['reside_original', 'pcnnatt_original', 'pcnn_original', 'cnnatt_original', 'cnn_original', 'bgwa_original']\nrankings_ord_original = {}\nrankings_ord_real = {}\n\nfor i,values in rankings_original.items():\n aux = sorted(values, key=lambda x: -x[1])\n ordered = []\n for model in models:\n for j,x in enumerate(aux):\n if model==x[0]:\n break\n ordered.append(j+1)\n rankings_ord_original[i]=ordered\n\nfor i,values in rankings_real.items():\n aux = sorted(values, key=lambda x: -x[1])\n ordered = []\n for model in models:\n for j,x in enumerate(aux):\n if model==x[0]:\n break\n ordered.append(j+1)\n rankings_ord_real[i]=ordered\n\nout = ''\n\nfor key,values_orig in rankings_ord_original.items():\n for i,value_orig in enumerate(values_orig):\n out+='{},{},'.format(value_orig,rankings_ord_real[key][i])\nprint(len(out.split(',')))\nprint(out)\n", "id": "6608805", "language": "Python", "matching_score": 1.3673174381256104, "max_stars_count": 1, "path": "auc_heuristic_manual_labels.py" } ]
1.878889